diff --git a/spaces/123Kumar/vits-uma-genshin-honkai123/text/__init__.py b/spaces/123Kumar/vits-uma-genshin-honkai123/text/__init__.py
deleted file mode 100644
index 663c4b6416affb53c9dc56dddbc8b2b65d4bf518..0000000000000000000000000000000000000000
--- a/spaces/123Kumar/vits-uma-genshin-honkai123/text/__init__.py
+++ /dev/null
@@ -1,57 +0,0 @@
-""" from https://github.com/keithito/tacotron """
-from text import cleaners
-from text.symbols import symbols
-
-
-# Mappings from symbol to numeric ID and vice versa:
-_symbol_to_id = {s: i for i, s in enumerate(symbols)}
-_id_to_symbol = {i: s for i, s in enumerate(symbols)}
-
-
-def text_to_sequence(text, symbols, cleaner_names):
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
- Args:
- text: string to convert to a sequence
- cleaner_names: names of the cleaner functions to run the text through
- Returns:
- List of integers corresponding to the symbols in the text
- '''
- _symbol_to_id = {s: i for i, s in enumerate(symbols)}
- sequence = []
-
- clean_text = _clean_text(text, cleaner_names)
- for symbol in clean_text:
- if symbol not in _symbol_to_id.keys():
- continue
- symbol_id = _symbol_to_id[symbol]
- sequence += [symbol_id]
- return sequence, clean_text
-
-
-def cleaned_text_to_sequence(cleaned_text):
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
- Args:
- text: string to convert to a sequence
- Returns:
- List of integers corresponding to the symbols in the text
- '''
- sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()]
- return sequence
-
-
-def sequence_to_text(sequence):
- '''Converts a sequence of IDs back to a string'''
- result = ''
- for symbol_id in sequence:
- s = _id_to_symbol[symbol_id]
- result += s
- return result
-
-
-def _clean_text(text, cleaner_names):
- for name in cleaner_names:
- cleaner = getattr(cleaners, name)
- if not cleaner:
- raise Exception('Unknown cleaner: %s' % name)
- text = cleaner(text)
- return text
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/ARK SURVIVAL EVOLVED TRAINER The Ultimate Guide to Infinite Health and Unlimited Food.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/ARK SURVIVAL EVOLVED TRAINER The Ultimate Guide to Infinite Health and Unlimited Food.md
deleted file mode 100644
index e255dee0aae930b27c6536bc15ad0f43d58a5a8a..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/ARK SURVIVAL EVOLVED TRAINER The Ultimate Guide to Infinite Health and Unlimited Food.md
+++ /dev/null
@@ -1,133 +0,0 @@
-
-
Are you a fan of ARK Survival Evolved, the popular action-adventure game that lets you explore a massive island full of dinosaurs and other creatures? Do you want to make your gameplay more enjoyable and exciting by having access to unlimited resources, abilities, and options? If so, then you might be interested in using a trainer for ARK Survival Evolved.
A trainer is a software program that modifies the game's memory and code to give you various advantages and cheats. With a trainer, you can have infinite health, unlimited food, no reload, easy crafting, super speed, and more. You can also customize your trainer settings to suit your preferences and needs.
-
In this article, we will show you how to install and use a trainer for ARK Survival Evolved, what features it offers, what benefits it brings, and what risks it entails. By the end of this article, you will be able to decide whether using a trainer is worth it for you or not.
-
How to install and use the trainer
-
Before you can use a trainer for ARK Survival Evolved, you need to download it from a reliable source. There are many websites that offer trainers for various games, but not all of them are safe and trustworthy. Some of them may contain malware or viruses that can harm your computer or steal your personal information. Therefore, you should always do some research before downloading any file from the internet.
-
One of the most reputable sources for trainers is Steam, which is the official platform for ARK Survival Evolved. Steam has a community workshop where users can upload and download mods, trainers, maps, skins, and other content for various games. You can browse through the workshop and find a trainer that suits your needs. You can also read the reviews and ratings from other users to see if the trainer works well or not.
-
Downloading the trainer from a reliable source
-
To download a trainer from Steam, you need to have an account and own ARK Survival Evolved on Steam. If you don't have an account, you can create one for free on their website. If you don't own ARK Survival Evolved on Steam, you can buy it from their store or use another platform that supports trainers.
-
How to get infinite health in ARK SURVIVAL EVOLVED with trainer
-ARK SURVIVAL EVOLVED trainer unlimited food and health cheat
-Best trainer for ARK SURVIVAL EVOLVED infinite health and food hack
-Download ARK SURVIVAL EVOLVED trainer with unlimited health and food features
-ARK SURVIVAL EVOLVED trainer infinite health and food mod for PC
-Cheat codes for ARK SURVIVAL EVOLVED trainer with infinite health and unlimited food
-ARK SURVIVAL EVOLVED trainer guide for infinite health and food
-Tips and tricks for using ARK SURVIVAL EVOLVED trainer with unlimited health and food
-ARK SURVIVAL EVOLVED trainer review: infinite health and food option
-How to install ARK SURVIVAL EVOLVED trainer with infinite health and unlimited food
-ARK SURVIVAL EVOLVED trainer infinite health and food gameplay video
-ARK SURVIVAL EVOLVED trainer unlimited health and food update
-ARK SURVIVAL EVOLVED trainer infinite health and food patch notes
-How to use ARK SURVIVAL EVOLVED trainer with unlimited health and food safely
-ARK SURVIVAL EVOLVED trainer infinite health and food bug fixes
-How to uninstall ARK SURVIVAL EVOLVED trainer with unlimited health and food
-ARK SURVIVAL EVOLVED trainer infinite health and food compatibility issues
-How to activate ARK SURVIVAL EVOLVED trainer with unlimited health and food
-ARK SURVIVAL EVOLVED trainer infinite health and food keyboard shortcuts
-How to customize ARK SURVIVAL EVOLVED trainer with unlimited health and food settings
-ARK SURVIVAL EVOLVED trainer infinite health and food pros and cons
-How to get free ARK SURVIVAL EVOLVED trainer with unlimited health and food
-ARK SURVIVAL EVOLVED trainer infinite health and food vs other trainers
-How to troubleshoot ARK SURVIVAL EVOLVED trainer with unlimited health and food errors
-ARK SURVIVAL EVOLVED trainer infinite health and food FAQs
-How to backup ARK SURVIVAL EVOLVED trainer with unlimited health and food files
-How to restore ARK SURVIVAL EVOLVED trainer with infinite health and food files
-How to upgrade ARK SURVIVAL EVOLVED trainer with unlimited health and food version
-How to downgrade ARK SURVIVAL EVOLVED trainer with infinite health and food version
-How to disable ARK SURVIVAL EVOLVED trainer with unlimited health and food features
-How to enable ARK SURVIVAL EVOLVED trainer with infinite health and food features
-How to test ARK SURVIVAL EVOLVED trainer with unlimited health and food performance
-How to improve ARK SURVIVAL EVOLVED trainer with infinite health and food performance
-How to optimize ARK SURVIVAL EVOLVED trainer with unlimited health and food performance
-How to verify ARK SURVIVAL EVOLVED trainer with infinite health and food authenticity
-How to report ARK SURVIVAL EVOLVED trainer with unlimited health and food issues
-How to contact ARK SURVIVAL EVOLVED trainer with infinite health and food support team
-How to rate ARK SURVIVAL EVOLVED trainer with unlimited health and food quality
-How to share ARK SURVIVAL EVOLVED trainer with infinite health and food feedback
-How to recommend ARK SURVIVAL EVOLVED trainer with unlimited health and food to others
-
Once you have an account and own ARK Survival Evolved on Steam, you can follow these steps to download a trainer:
-
-
Open Steam and log in to your account.
-
Go to Library and select ARK Survival Evolved from your games list.
-
Click on Workshop under Community Hub on the right side of the screen.
-
Type "trainer" in the search box and press Enter.
-
Choose a trainer that has good ratings, reviews, and compatibility with your game version.
-
Click on Subscribe to download the trainer to your computer.
-
-
Extracting the trainer files and running the program
-
After downloading the trainer from Steam, you need to extract it from its compressed file format. Most trainers come in ZIP or RAR files that need to be unpacked using a program like WinRAR or 7-Zip. You can download these programs for free from their official websites.
-
To extract the trainer files, you need to follow these steps:
-
-
Locate the downloaded file on your computer. It should be in your Steam folder under steamapps > workshop > content > 346110 > [trainer ID].
-
Right-click on the file and select Extract Here or Extract to [trainer name].
-
A new folder with the same name as the file should appear in the same location.
-
Open the folder and look for an executable file with the name of the trainer or something similar.
-
Double-click on the file to run the program.
-
-
Launching the game and activating the trainer
-
The final step is to launch ARK Survival Evolved and activate the trainer. To do this, you need to follow these steps:
-
-
Run ARK Survival Evolved from Steam or your preferred platform.
-
Wait for the game to load and start a new game or load an existing one.
-
Alt-tab to switch back to your desktop or use Windows key + D.
-
Run the trainer program if it's not already running.
-
A window with various options should appear on your screen.
-
Select the options that you want to enable by clicking on them or pressing their corresponding keys.
-
You should hear a confirmation sound if an option is activated successfully.
-
Switch back to ARK Survival Evolved by alt-tabbing or using Windows key + D again.
-
You should see some indicators on your screen showing that the options are enabled.
-
You can now enjoy playing ARK Survival Evolved with cheats!
-
-
Features of the trainer
-
A typical trainer for ARK Survival Evolved offers many features that can enhance your gameplay experience. Some of these features are:
-
Infinite health and stamina
-
This feature allows you to have unlimited health points (HP) and stamina points (SP). You will never die or get exhausted from any damage or activity. You can fight any enemy, fall from any height, swim underwater indefinitely, run forever, etc. without worrying about losing health or stamina.
-
Unlimited food and water
-
This feature allows you to have unlimited food points (FP) and water points (WP). You will never starve or dehydrate from any condition or environment. You can eat anything, drink anything, stay in any temperature zone, etc. without worrying about losing food or water.
-
Infinite weight and oxygen
-
This feature allows you to have unlimited weight capacity (WC) and oxygen capacity (OC). You will never be encumbered or suffocated by any item or situation. You can carry anything, breathe anywhere, dive deep underwater indefinitely, etc. without worrying about losing weight or oxygen.
-
No reload and unlimited ammo
-
This feature allows you to have no reload time (RT) and unlimited ammunition (AM) for any weapon or tool. You will never run out of bullets or arrows or need to reload your gun or bow. You can shoot anything continuously without worrying about losing ammo or wasting time reloading.
-
Easy crafting and taming
-
This feature allows you to have easy crafting requirements (CR) and easy taming effectiveness (TE) for any item or creature. You will need only one resource of any type to craft any item or tool. You will also tame any creature instantly with one food item of any type. You can craft anything quickly without worrying about gathering resources or wasting time crafting. You can also tame anything easily without worrying about feeding them properly or waiting for them to be tamed.
-
Super speed and jump
-
will be able to outrun any enemy, reach any location, jump over any obstacle, etc. without worrying about speed or height.
-
Other options and customizations
-
Depending on the trainer you use, you may have access to other options and customizations that can further enhance your gameplay experience. For example, some trainers may allow you to:
-
-
Change your character's appearance, level, stats, skills, etc.
-
Spawn any item, weapon, tool, resource, etc. in your inventory or on the ground.
-
Spawn any creature, tame or wild, friendly or hostile, on the island.
-
Teleport to any location on the map or to your cursor position.
-
Freeze the time of day or change the weather conditions.
-
Enable god mode or ghost mode for yourself or your mount.
-
And more!
-
-
To access these options and customizations, you may need to use different keys or buttons on your keyboard or controller. You may also need to open a console window or a menu screen to enter commands or codes. You should always read the instructions and notes that come with the trainer to learn how to use it properly and safely.
-
Benefits of using the trainer
-
Using a trainer for ARK Survival Evolved can bring you many benefits that can make your gameplay more enjoyable and exciting. Some of these benefits are:
-
Enhance your gaming experience and have more fun
-
With a trainer, you can have more freedom and flexibility to play ARK Survival Evolved the way you want. You can experiment with different items, weapons, tools, creatures, etc. without worrying about their costs or consequences. You can also try out different scenarios and challenges without risking your progress or reputation. You can have more fun and satisfaction from playing ARK Survival Evolved with cheats.
-
Explore the island and its secrets without limitations
-
With a trainer, you can explore the island and its secrets without limitations. You can travel to any location on the map without being hindered by terrain, distance, enemies, etc. You can also discover hidden areas, caves, ruins, etc. that may contain valuable loot or clues. You can uncover the mysteries and secrets of ARK Survival Evolved without missing anything.
-
Survive and dominate the dinosaurs and other players
-
With a trainer, you can survive and dominate the dinosaurs and other players on the island. You can fight any dinosaur or creature without fear of death or injury. You can also tame any dinosaur or creature without difficulty or delay. You can also compete with other players online without being at a disadvantage or disadvantage. You can be the ultimate survivor and ruler of ARK Survival Evolved with cheats.
-
Customize your gameplay according to your preferences
-
With a trainer, you can customize your gameplay according to your preferences. You can adjust the difficulty level, game speed, graphics quality, sound volume, etc. according to your liking. You can also enable or disable certain options or features according to your needs. You can make ARK Survival Evolved suit your personal taste and style with cheats.
-
Risks of using the trainer
-
While using a trainer for ARK Survival Evolved can bring you many benefits, it can also entail some risks that you should be aware of before using it. Some of these risks are:
-
Potential malware and viruses from untrusted sources
-
As mentioned earlier, not all trainers are safe and trustworthy. Some of them may contain malware or viruses that can harm your computer or steal your personal information. Therefore, you should always do some research before downloading any file from the internet. You should also scan any file with an antivirus program before opening it. You should also backup your game files and system files before using any trainer.
-
Possible bans and penalties from online servers
-
its compressed file format, run the program, launch the game, and activate the options that you want to enable.
-
What features does a trainer for ARK Survival Evolved offer? A typical trainer for ARK Survival Evolved offers many features that can enhance your gameplay experience, such as infinite health, unlimited food, no reload, easy crafting, super speed, and more. You can also customize your trainer settings to suit your preferences and needs.
-
What are the benefits of using a trainer for ARK Survival Evolved? Using a trainer for ARK Survival Evolved can bring you many benefits that can make your gameplay more enjoyable and exciting, such as enhancing your gaming experience and having more fun, exploring the island and its secrets without limitations, surviving and dominating the dinosaurs and other players, and customizing your gameplay according to your preferences.
-
What are the risks of using a trainer for ARK Survival Evolved? Using a trainer for ARK Survival Evolved can also entail some risks that you should be aware of before using it, such as potential malware and viruses from untrusted sources, possible bans and penalties from online servers, and loss of challenge and immersion from cheating.
-
- 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Free Download Psikey.dll Coreldraw X5 Serial Number LINK.md b/spaces/1gistliPinn/ChatGPT4/Examples/Free Download Psikey.dll Coreldraw X5 Serial Number LINK.md
deleted file mode 100644
index 83765bc8a22454eb276448a9d7018f5405d3f342..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Free Download Psikey.dll Coreldraw X5 Serial Number LINK.md
+++ /dev/null
@@ -1,5 +0,0 @@
-
-
Download latest build for psikey.dll corel draw x5 download. Download - WebHostingHub. a R61 store the Coreldraw x5. to a free rectangular parallelepiped. Given a rectangle of sides $a_1$ and $a_2$, there are three elementary ways to subdivide it: create three smaller rectangles, create four smaller rectangles, or first chop off one edge and then create two rectangles. The process of chopping a rectangle $a_1 imes a_2$ into smaller rectangles $a_1 imes a_i imes a_2$ is described in Fig. [fig:rectangles]. For each of the three directions along the positive sides, we create a smaller rectangle in the direction indicated by the shaded rectangles. The goal is to create three smaller rectangles of area $a_1a_2$. Such a subdivision is possible if and only if $a_2>a_1$. >From now on, we will assume that $a_2>a_1$. In the case that $a_2=a_1$, this lemma does not apply. In this case, one of the three smaller rectangles will have a square hole, so the lemma can be re-stated as follows: a subdivision of a rectangle into smaller rectangles with $a_2>a_1$ is possible if and only if $a_2>a_1+1$. Let us now assume that $a_2>a_1+1$. We can subdivide the rectangle in three different ways as shown in Fig. [fig:rectangles]. The first possibility is to subdivide as described in Fig. [fig:subdivide]. As can be seen, this results in three smaller rectangles with the same area as the original rectangle. The second possibility is to subdivide as shown in Fig. [fig:subdivide2]. This leaves an empty space, which is divided into four smaller rectangles, see Fig. [fig:squares]. The third possibility is to subdivide as in Fig. [fig:subdivide3]. This results in a subtraction of a square from the original rectangle. If the square is in the upper left corner, the result is two smaller rectangles. If the square is in the lower right corner, the result is one smaller rectangle. Let $a_1 imes a_2 imes a_3$ be any rectangle, and divide it along the positive sides. Then the number of rectangles created during this process does not exceed $a_3-a_2-a_1$. We start with the rectangle of sides $a_1,a_2,a_3$. We can apply the previous lemma to each of the three directions in the positive $x,y,z$-directions. The result of this lemma is three smaller rectangles with the same area as the original rectangle. From now on, we will assume that $a_3>a_2>a_1$. In the case that $a_3=a_2=a_1$, this lemma does not apply.
-
free download psikey.dll coreldraw x5 serial number
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bubble Shooter Classic How to Download and Play the Most Addictive Game for Free.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bubble Shooter Classic How to Download and Play the Most Addictive Game for Free.md
deleted file mode 100644
index 74abf0e0e22c0b6bed8e7cb77590d8a707356485..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bubble Shooter Classic How to Download and Play the Most Addictive Game for Free.md
+++ /dev/null
@@ -1,225 +0,0 @@
-
-
Bubble Shooter Classic Kostenlos Download: How to Play and Enjoy this Fun Game
-
If you are looking for a fun and relaxing game that can keep you entertained for hours, you might want to try Bubble Shooter Classic. This is a classic and addictive bubble pop game that is easy to play but hard to master. In this article, we will tell you everything you need to know about Bubble Shooter Classic, including what it is, how to download and install it for free, how to play and win it, and why it is a fun and relaxing game. Let's get started!
-
What is Bubble Shooter Classic?
-
Bubble Shooter Classic is a version of the popular bubble shooter game that has been around for decades. It is a puzzle game where you have to match three or more bubbles of the same color to pop them and clear the board. The game has thousands of levels with different layouts, obstacles, and challenges. You can also choose from three game modes: puzzle, arcade, and classic. The game is suitable for all ages and can be played online or offline.
The origin of the bubble shooter game can be traced back to the 1980s, when a Japanese company called Taito released a game called Puzzle Bobble (also known as Bust-a-Move). This game featured cute dinosaurs shooting bubbles at the top of the screen. The game was a huge hit and spawned many sequels and spin-offs. In 2002, a company called Absolutist released a web-based version of the game called Bubble Shooter, which became one of the most popular online games ever. Since then, many variations and clones of the game have been created, including Bubble Shooter Classic.
-
The main features and gameplay of the game
-
Bubble Shooter Classic has many features that make it an enjoyable and addictive game. Some of them are:
-
-
The game has colorful graphics and sound effects that create a pleasant atmosphere.
-
The game has simple and intuitive controls. You just need to tap on the screen to aim and shoot the bubbles.
-
The game has three difficulty levels: easy, medium, and hard. You can choose the one that suits your skill level.
-
The game has three game modes: puzzle, arcade, and classic. Each mode has its own rules and objectives.
-
The puzzle mode has over 1800 levels with different challenges and puzzles. You have to clear the board with a limited number of shots.
-
The arcade mode has over 1750 levels with increasing difficulty. You have to pop as many bubbles as you can before they reach the bottom of the screen.
-
The classic mode has three difficulty levels: easy, medium, and hard. You have to pop all the bubbles in this retro mode.
-
The game has various power-ups and boosters that can help you clear the board faster. You can earn coins by playing the game and use them to buy these items.
-
The game has a leaderboard and achievements system that tracks your progress and performance. You can compete with your friends and other players around the world.
-
The game has a daily bonus feature that rewards you with coins and power-ups every day.
-
-
How to download and install Bubble Shooter Classic for free?
-
If you want to play Bubble Shooter Classic on your device, you can download it for free from various sources. Here are some of them:
-
The
The requirements and steps for downloading the game on different devices
-
Bubble Shooter Classic is compatible with various devices, such as smartphones, tablets, laptops, and desktops. You can download it for free from different sources, depending on your device. Here are some of the requirements and steps for downloading the game on different devices:
-
-
-
Device
-
Requirements
-
Steps
-
-
-
Android smartphone or tablet
-
You need to have Android 4.1 or higher and at least 40 MB of free space on your device.
-
You can download the game from the Google Play Store by searching for "Bubble Shooter Classic" or by clicking on this link. You can also scan the QR code below to access the download page. Once you download the game, you can install it and start playing.
-
-
-
iOS smartphone or tablet
-
You need to have iOS 9.0 or later and at least 70 MB of free space on your device.
-
You can download the game from the App Store by searching for "Bubble Shooter Classic" or by clicking on this link. You can also scan the QR code below to access the download page. Once you download the game, you can install it and start playing.
-
-
-
Windows laptop or desktop
-
You need to have Windows 10 and at least 100 MB of free space on your device.
-
You can download the game from the Microsoft Store by searching for "Bubble Shooter Classic" or by clicking on this link. You can also scan the QR code below to access the download page. Once you download the game, you can install it and start playing.
-
-
-
-
bubble shooter classic free download
-bubble shooter classic game download
-bubble shooter classic apk download
-bubble shooter classic android download
-bubble shooter classic app download
-bubble shooter classic online kostenlos spielen
-bubble shooter classic offline kostenlos spielen
-bubble shooter classic ohne anmeldung kostenlos spielen
-bubble shooter classic ohne download kostenlos spielen
-bubble shooter classic ohne werbung kostenlos spielen
-bubble shooter classic for pc free download
-bubble shooter classic for windows free download
-bubble shooter classic for mac free download
-bubble shooter classic for laptop free download
-bubble shooter classic for desktop free download
-bubble shooter classic mod apk download
-bubble shooter classic hack apk download
-bubble shooter classic unlimited coins apk download
-bubble shooter classic premium apk download
-bubble shooter classic pro apk download
-bubble shooter classic ilyon free download
-bubble shooter classic ilyon game download
-bubble shooter classic ilyon apk download
-bubble shooter classic ilyon android download
-bubble shooter classic ilyon app download
-bubble popper game free download
-bubble popper game online kostenlos spielen
-bubble popper game offline kostenlos spielen
-bubble popper game ohne anmeldung kostenlos spielen
-bubble popper game ohne download kostenlos spielen
-bubble popper game for pc free download
-bubble popper game for windows free download
-bubble popper game for mac free download
-bubble popper game for laptop free download
-bubble popper game for desktop free download
-bubble popper game mod apk download
-bubble popper game hack apk download
-bubble popper game unlimited coins apk download
-bubble popper game premium apk download
-bubble popper game pro apk download
-balloon shooting game free download
-balloon shooting game online kostenlos spielen
-balloon shooting game offline kostenlos spielen
-balloon shooting game ohne anmeldung kostenlos spielen
-balloon shooting game ohne download kostenlos spielen
-balloon shooting game for pc free download
-balloon shooting game for windows free download
-
The benefits and drawbacks of downloading the game for free
-
Downloading Bubble Shooter Classic for free has its pros and cons. Here are some of them:
-
-
The benefits of downloading the game for free are:
-
You can enjoy a fun and relaxing game without spending any money.
-
You can play the game anytime and anywhere, even without an internet connection.
-
You can access thousands of levels and modes with different challenges and puzzles.
-
You can earn coins and power-ups by playing the game and use them to boost your performance.
-
You can compete with your friends and other players around the world and see who can get the highest score and the most stars.
-
-
-
The drawbacks of downloading the game for free are:
-
You might encounter some ads and pop-ups that can interrupt your gameplay.
-
You might need to update the game regularly to get new features and bug fixes.
-
You might run out of coins and power-ups if you use them too often or if you don't play enough.
-
You might get addicted to the game and spend too much time on it.
-
-
-
-
Overall, downloading Bubble Shooter Classic for free is a great way to have some fun and relax with a classic bubble pop game. However, you should also be aware of the potential drawbacks and manage your time and resources wisely.
How to play and win Bubble Shooter Classic?
-
Playing Bubble Shooter Classic is easy and fun, but winning it can be challenging and rewarding. Here are some of the basic rules and controls of the game, as well as some tips and tricks for matching and popping bubbles.
-
The basic rules and controls of the game
-
The basic rules and controls of Bubble Shooter Classic are simple and intuitive. Here are some of them:
-
-
The game has a cannon at the bottom of the screen that shoots bubbles. You can tap on the screen to aim and shoot the bubbles.
-
The game has a board at the top of the screen that is filled with bubbles of different colors. You have to match three or more bubbles of the same color to pop them and clear the board.
-
The game has a score counter at the top left corner of the screen that shows your current score. You can earn points by popping bubbles, making combos, and clearing levels.
-
The game has a star counter at the top right corner of the screen that shows your current star rating. You can earn stars by completing levels with a high score and a low number of shots.
-
The game has a level indicator at the bottom left corner of the screen that shows your current level and mode. You can switch between puzzle, arcade, and classic modes by tapping on it.
-
The game has a coin counter at the bottom right corner of the screen that shows your current coin balance. You can earn coins by playing the game and use them to buy power-ups and boosters.
-
The game has a pause button at the top center of the screen that allows you to pause and resume the game. You can also access the settings, sound, and help menus from there.
-
-
The tips and tricks for matching and popping bubbles
-
Matching and popping bubbles is the core of Bubble Shooter Classic, but it can also be tricky and strategic. Here are some tips and tricks for matching and popping bubbles:
-
-
Aim carefully and try to hit the bubbles that are close to each other. This will create bigger clusters and combos that will pop more bubbles and earn more points.
-
Use the walls to bounce your bubbles and reach difficult spots. This will help you clear the board faster and avoid wasting shots.
-
Pay attention to the color of the next bubble in your cannon. This will help you plan your moves ahead and avoid mismatching bubbles.
-
Try to pop the bubbles that are hanging from the top or from other bubbles. This will cause them to fall down and pop more bubbles along the way.
-
Use power-ups and boosters wisely. They can help you clear the board faster, but they also cost coins or shots. Some of them are:
-
The fireball: This power-up can burn through any bubble it touches, regardless of its color.
-
The bomb: This power-up can explode and pop all the bubbles around it, regardless of their color.
-
The rainbow: This power-up can change its color to match any bubble it touches, creating a big combo.
-
The aim: This booster can help you aim more precisely by showing you a dotted line that indicates where your bubble will go.
-
The swap: This booster can help you swap your current bubble with the next one in your cannon, giving you more options.
-
The extra shot: This booster can give you an extra shot for your current level, allowing you to pop more bubbles.
-
-
-
-
The different game modes and levels of the game
-
Bubble Shooter Classic has three game modes: puzzle, arcade, and classic. Each mode has its own rules and objectives, as well as different levels of difficulty and fun. Here are some of them:
-
-
-
Mode
-
Description
-
Objective
-
Difficulty
-
Fun
-
-
-
Puzzle
-
This mode has over 1800 levels with different challenges and puzzles. You have to clear the board with a limited number of shots.
-
To clear all the bubbles on the board with as few shots as possible.
-
Hard
-
High
-
-
-
Arcade
-
This mode has over 1750 levels with increasing difficulty. You have to pop as many bubbles as you can before they reach the bottom of the screen.
-
To pop as many bubbles as possible before they touch the bottom line.
-
Medium
-
Medium
-
-
-
Classic
-
This mode has three difficulty levels: easy, medium, and hard. You have to pop all the bubbles in this retro mode.
-
To pop all the bubbles on the board with no time or shot limit.
-
Easy
-
Low
-
-
-
You can switch between the modes by tapping on the level indicator at the bottom left corner of the screen. You can also see your progress and performance in each mode by tapping on the star counter at the top right corner of the screen.
-
Why is Bubble Shooter Classic a fun and relaxing game?
-
Bubble Shooter Classic is a fun and relaxing game for many reasons. Here are some of them:
-
The advantages of playing Bubble Shooter Classic for your brain and mood
-
Playing Bubble Shooter Classic can have positive effects on your brain and mood. Some of them are:
-
-
Playing Bubble Shooter Classic can improve your concentration and focus. You have to pay attention to the colors, patterns, and movements of the bubbles and plan your moves ahead.
-
Playing Bubble Shooter Classic can enhance your memory and recall. You have to remember the color of the next bubble in your cannon and the position of the bubbles on the board.
-
Playing Bubble Shooter Classic can boost your problem-solving and logical thinking skills. You have to find the best way to match and pop the bubbles and overcome the obstacles and challenges.
-
Playing Bubble Shooter Classic can reduce your stress and anxiety levels. You can enjoy a soothing and calming game that can distract you from your worries and troubles.
-
Playing Bubble Shooter Classic can increase your happiness and satisfaction levels. You can feel a sense of achievement and reward when you pop the bubbles, clear the levels, and earn stars and coins.
-
-
The challenges and rewards of playing Bubble Shooter Classic
-
Playing Bubble Shooter Classic can also be challenging and rewarding. Some of them are:
-
-
Playing Bubble Shooter Classic can test your patience and perseverance. You have to deal with difficult levels, limited shots, moving bubbles, and other obstacles that can make you frustrated or angry.
-
Playing Bubble Shooter Classic can motivate you to improve your skills and performance. You have to try harder, practice more, and learn from your mistakes to get better scores, more stars, and higher rankings.
-
Playing Bubble Shooter Classic can inspire you to explore new possibilities and strategies. You have to experiment with different angles, power-ups, boosters, and game modes to find the most effective and fun way to play the game.
-
Playing Bubble Shooter Classic can reward you with coins and power-ups that can help you progress faster and easier. You can earn them by playing the game, completing achievements, or claiming daily bonuses.
-
Playing Bubble Shooter Classic can reward you with a sense of accomplishment and pride when you complete a level, especially a hard one. You can also share your results with your friends and other players online.
-
-
The best ways to enjoy Bubble Shooter Classic with friends and family
-
Bubble Shooter Classic is not only a solo game, but also a social game. You can enjoy it with your friends and family in many ways. Some of them are:
-
-
You can play Bubble Shooter Classic online with your friends and family who have the same game on their devices. You can invite them to join you in a multiplayer mode where you can cooperate or compete with each other.
-
You can play Bubble Shooter Classic offline with your friends and family who are near you. You can take turns playing the game on one device or use multiple devices to play together.
-
You can compare your scores, stars, levels, achievements, and rankings with your friends and family who play Bubble Shooter Classic. You can see who is the best player among you and challenge them to beat your records.
-
You can chat with your friends and family who play Bubble Shooter Classic. You can exchange tips, tricks, feedback, compliments, jokes, or anything else related to the game.
-
You can have fun with your friends and family who play Bubble Shooter Classic. You can make bets, jokes, pranks, or games based on the game outcomes or events.
-
-
Conclusion
-
Bubble Shooter Classic is a classic and addictive bubble pop game that is easy to play but hard to master. It has thousands of levels with different layouts, obstacles, challenges, modes, difficulty levels, power-ups, boosters, coins, stars, achievements, leaderboards, online multiplayer features, offline single-player features, colorful graphics, sound effects, simple controls, daily bonuses, etc. It is a fun and relaxing game that can improve your brain functions, mood states,
stress levels, and happiness levels. It is also a challenging and rewarding game that can test your patience, perseverance, skills, performance, possibilities, and strategies. It is also a social game that can be enjoyed with your friends and family in various ways. If you are looking for a fun and relaxing game that can keep you entertained for hours, you might want to try Bubble Shooter Classic. You can download it for free from various sources and play it on your device anytime and anywhere. Have fun and good luck!
-
FAQs
-
Here are some of the frequently asked questions about Bubble Shooter Classic:
-
-
How do I get more coins and power-ups in Bubble Shooter Classic?
-
You can get more coins and power-ups in Bubble Shooter Classic by playing the game, completing achievements, claiming daily bonuses, watching ads, or buying them with real money.
-
How do I change the difficulty level or the game mode in Bubble Shooter Classic?
-
You can change the difficulty level or the game mode in Bubble Shooter Classic by tapping on the level indicator at the bottom left corner of the screen. You can choose from easy, medium, or hard difficulty levels and from puzzle, arcade, or classic game modes.
-
How do I play online multiplayer mode in Bubble Shooter Classic?
-
You can play online multiplayer mode in Bubble Shooter Classic by tapping on the multiplayer button at the top center of the screen. You can invite your friends or join random players in a cooperative or competitive mode.
-
How do I pause or resume the game in Bubble Shooter Classic?
-
You can pause or resume the game in Bubble Shooter Classic by tapping on the pause button at the top center of the screen. You can also access the settings, sound, and help menus from there.
-
How do I contact the support team or report a bug in Bubble Shooter Classic?
-
You can contact the support team or report a bug in Bubble Shooter Classic by tapping on the help button at the top center of the screen. You can also email them at support@bubbleshooter.com or visit their website at www.bubbleshooter.com.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Car Parking Multiplayer Mod APK How to Experience the Open World with Real Gas Stations and Car Services.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Car Parking Multiplayer Mod APK How to Experience the Open World with Real Gas Stations and Car Services.md
deleted file mode 100644
index 4550fa7bed400fa5b227805fbe8e40b34ba32a1c..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Car Parking Multiplayer Mod APK How to Experience the Open World with Real Gas Stations and Car Services.md
+++ /dev/null
@@ -1,100 +0,0 @@
-
-
Download Car Parking Multiplayer Mod APK Happymod - A Guide for Car Lovers
-
If you are a car lover and you enjoy parking games, you might want to try out Car Parking Multiplayer, a realistic and fun game that lets you drive, park, and customize your own cars. And if you want to make the game even more exciting, you can download Car Parking Multiplayer Mod APK Happymod, a modified version of the game that gives you unlimited money, resources, and access to all cars and upgrades. In this article, we will tell you everything you need to know about Car Parking Multiplayer Mod APK Happymod, including what it is, what are its benefits, how to download and install it, and whether it is safe and legal.
-
What is Car Parking Multiplayer?
-
Car Parking Multiplayer is a game that supports open-world multiplayer mode, car tuning, police mode, and free walking. Plus, you can decide to jump out of the car and walk around. There are several areas that you can explore in the game and you can choose to play either single-player mode or online mode if you want a more chaotic (fun) scene.
The main feature of Car Parking Multiplayer is, of course, the car parking gameplay. You can choose from over 100 cars with real interiors and drive them in various parking scenarios. You can also adjust the suspension, wheel angle, engine, turbo, gearbox, exhaust, and more to suit your driving style. The game has realistic physics and graphics that make the parking experience more challenging and enjoyable.
-
A multiplayer open world mode with real players
-
Another feature of Car Parking Multiplayer is the multiplayer open world mode. In this mode, you can join thousands of real players from all over the world in a huge map with real gas stations and car services. You can compete against them in races, exchange cars with them, chat with them using voice chat, or even become a police officer and chase them. You can also make friends with other players and add them to your friend list.
-
A car customization and tuning feature
-
The last feature of Car Parking Multiplayer is the car customization and tuning feature. In this feature, you can make your car stand out from the crowd by applying dynamic vinyls, car body parts, stickers, neon lights, spoilers, rims, tires, and more. You can also change the color of your car or paint it with different patterns. You can also upgrade your car's performance by swapping engines, adding turbos, changing gearboxes, or installing exhausts.
-
What is Happymod?
-
Happymod is a platform for downloading modded APK files for Android games and apps. Modded APK files are modified versions of the original files that have some features changed or added to enhance the user's experience. For example, some modded APK files may have unlimited money, unlocked features, menu options, or cheats. Happymod is a popular source for modded APK files because it has several advantages over other platforms.
-
A safe and reliable source for mods
-
One of the advantages of Happymod is that it is safe and reliable. All the modded APK files on Happymod are tested by users and verified by editors before they are uploaded to the platform. This means that you can download the mods without worrying about viruses, malware, or corrupted files. You can also read the reviews and ratings from other users to see if the mod works well and meets your expectations.
-
A community of modders and users
-
Another advantage of Happymod is that it is a community of modders and users. Happymod allows users to request mods for their favorite games and apps, and modders can upload their mods to the platform for others to enjoy. You can also join the discussion forum and chat with other users and modders about your feedback, suggestions, or questions. You can also share your modded APK files with your friends via social media or email.
-
What are the benefits of downloading Car Parking Multiplayer Mod APK Happymod?
-
If you are a fan of Car Parking Multiplayer, you might want to download Car Parking Multiplayer Mod APK Happymod to get some extra benefits that will make your game more fun and easy. Here are some of the benefits of downloading Car Parking Multiplayer Mod APK Happymod:
-
Unlimited money and resources
-
One of the benefits of downloading Car Parking Multiplayer Mod APK Happymod is that you will get unlimited money and resources in the game. This means that you can buy any car you want, upgrade it to the max, and customize it to your liking. You can also buy any item or service you need in the game, such as gas, car wash, repair, or insurance. You will never run out of money or resources in the game.
-
download car parking multiplayer mod apk happymod with unlimited money
-download car parking multiplayer mod apk happymod with mod menu
-download car parking multiplayer mod apk happymod latest version
-download car parking multiplayer mod apk happymod for android
-download car parking multiplayer mod apk happymod free
-download car parking multiplayer mod apk happymod 4.8.9.4.1
-download car parking multiplayer mod apk happymod 4.8.9.3.6
-download car parking multiplayer mod apk happymod with voice chat
-download car parking multiplayer mod apk happymod with police mode
-download car parking multiplayer mod apk happymod with free walking
-download car parking multiplayer mod apk happymod with real gas stations
-download car parking multiplayer mod apk happymod with real players
-download car parking multiplayer mod apk happymod with friend list
-download car parking multiplayer mod apk happymod with open world
-download car parking multiplayer mod apk happymod with 70+ cars
-download car parking multiplayer mod apk happymod with HD graphics
-download car parking multiplayer mod apk happymod with realistic physics
-download car parking multiplayer mod apk happymod with missions and goals
-download car parking multiplayer mod apk happymod with online racing
-download car parking multiplayer mod apk happymod with car customization
-download car parking multiplayer mod apk happymod with car exchange
-download car parking multiplayer mod apk happymod with simulation category
-download car parking multiplayer mod apk happymod by ayanbiswas2489
-download car parking multiplayer mod apk happymod by olzhas
-download car parking multiplayer mod apk happymod from new scientist website[^1^]
-download car parking multiplayer mod apk happymod from the sun website[^2^] [^3^]
-download car parking multiplayer mod apk happymod from yahoo news website
-how to download car parking multiplayer mod apk happymod safely
-how to download car parking multiplayer mod apk happymod without virus
-how to download car parking multiplayer mod apk happymod without root
-how to install car parking multiplayer mod apk happymod on android device
-how to play car parking multiplayer mod apk happymod online
-how to update car parking multiplayer mod apk happymod to latest version
-how to uninstall car parking multiplayer mod apk happymod from android device
-how to fix car parking multiplayer mod apk happymod not working issue
-what is the difference between car parking multiplayer and car parking multiplayer mod apk happymod
-what are the benefits of using car parking multiplayer mod apk happymod over original game
-what are the drawbacks of using car parking multiplayer mod apk happymod over original game
-what are the reviews of users who downloaded car parking multiplayer mod apk happymod
-what are the alternatives to car parking multiplayer mod apk happymod
-
Access to all cars and upgrades
-
Another benefit of downloading Car Parking Multiplayer Mod APK Happymod is that you will get access to all cars and upgrades in the game. This means that you can drive any car you want, from sports cars to trucks, from classic cars to supercars. You can also unlock all the upgrades for your cars, such as engines, turbos, gearboxes, exhausts, suspensions, wheels, tires, and more. You will have the best cars in the game.
-
Menu with various options and features
-
The last benefit of downloading Car Parking Multiplayer Mod APK Happymod is that you will get a menu with various options and features in the game. This means that you can activate or deactivate different mods according to your preference. For example, you can enable or disable god mode, speed hack, teleportation, invisibility, free camera, no ads, and more. You can also change the weather, time, traffic, police mode, and other settings in the game. You will have full control over the game.
-
How to download and install Car Parking Multiplayer Mod APK Happymod?
-
If you are interested in downloading Car Parking Multiplayer Mod APK Happymod, you can follow these simple steps:
-
Step 1: Visit the Happymod website and search for Car Parking Multiplayer Mod APK
-
The first step is to visit the Happymod website at https://www.happymod.com/ and search for Car Parking Multiplayer Mod APK in the search bar. You will see a list of results with different versions and features of the mod. Choose the one that suits your needs and click on the download button.
-
Step 2: Download the APK file and enable unknown sources on your device
-
The second step is to download the APK file to your device and enable unknown sources on your device. To do this, go to your device settings > security > unknown sources and toggle it on. This will allow you to install apps from sources other than Google Play Store.
-
Step 3: Install the APK file and launch the game
-
The third step is to install the APK file and launch the game. To do this, locate the downloaded APK file on your device storage and tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to finish. Once done, launch the game from your app drawer or home screen and enjoy.
-
Is Car Parking Multiplayer Mod APK Happymod safe and legal?
-
Before you download Car Parking Multiplayer Mod APK Happymod, you might wonder if it is safe and legal to use. Here are some answers to these questions:
-
The mod APK is safe from viruses and malware
-
As we mentioned earlier, Happymod is a safe and reliable platform for downloading modded APK files. All the mods on Happymod are tested by users and verified by editors before they are uploaded to the platform. This means that you can download Car Parking Multiplayer Mod APK Happymod without worrying about viruses, malware, or corrupted files. However, you should always be careful when downloading any file from the internet and scan it with a reputable antivirus software before installing it.
-
The mod APK is legal as long as you use it for personal and educational purposes
-
Another question you might have is whether Car Parking Multiplayer Mod APK Happymod is legal to use. The answer is that it depends on how you use it and where you live. Generally speaking, modding games and apps is not illegal as long as you use it for personal and educational purposes and do not distribute or sell it to others. However, some countries or regions may have different laws or regulations regarding modding or hacking games and apps. Therefore, you should always check the local laws and rules before downloading or using any modded APK file.
-
The mod APK may not be compatible with the latest version of the game or the original developer's policies
-
The last thing you should know is that Car Parking Multiplayer Mod APK Happymod may not be compatible with the latest version of the game or the original developer's policies. This means that the mod may not work properly or cause some errors or glitches in the game. It also means that the mod may violate the terms of service or privacy policy of the game or the app store. This could result in your account being banned or suspended by the game developer or the app store. Therefore, you should always use the mod at your own risk and discretion.
-
Conclusion
-
Car Parking Multiplayer is a realistic and fun car parking game that supports open-world multiplayer mode, car tuning, police mode, and free walking. You can drive, park, and customize over 100 cars with real interiors in various parking scenarios. You can also join thousands of real players from all over the world in a huge map with real gas stations and car services. You can compete against them in races, exchange cars with them, chat with them using voice chat, or even become a police officer and chase them.
-
If you want to make the game even more exciting, you can download Car Parking Multiplayer Mod APK Happymod, a modified version of the game that gives you unlimited money, resources, and access to all cars and upgrades. You can also get a menu with various options and features that let you control the game settings and activate different mods. You can download Car Parking Multiplayer Mod APK Happymod from Happymod, a safe and reliable platform for downloading modded APK files for Android games and apps.
-
However, you should be aware that Car Parking Multiplayer Mod APK Happymod may not be compatible with the latest version of the game or the original developer's policies. It may also not be legal to use in some countries or regions depending on how you use it and where you live. Therefore, you should always check the local laws and rules before downloading or using any modded APK file. You should also use the mod at your own risk and discretion.
-
FAQs
-
Here are some frequently asked questions about Car Parking Multiplayer Mod APK Happymod:
-
Q: Can I play online mode with Car Parking Multiplayer Mod APK Happymod?
-
A: Yes, you can play online mode with Car Parking Multiplayer Mod APK Happymod. However, you may encounter some problems or issues when playing online mode with other players who are using the original version of the game or a different version of the mod. Therefore, we recommend that you play online mode with other players who are using the same version of the mod as you.
-
Q: Can I update Car Parking Multiplayer Mod APK Happymod?
-
A: No, you cannot update Car Parking Multiplayer Mod APK Happymod from Google Play Store or any other app store. If you want to update Car Parking Multiplayer Mod APK Happymod, you have to visit Happymod website again and download the latest version of the mod from there. However, you should note that updating Car Parking Multiplayer Mod APK Happymod may erase your previous data or progress in the game. Therefore, we suggest that you backup your data or progress before updating Car Parking Multiplayer Mod APK Happymod.
-
Q: Can I uninstall Car Parking Multiplayer Mod APK Happymod and reinstall the original version of the game?
-
A: Yes, you can uninstall Car Parking Multiplayer Mod APK Happymod and reinstall the original version of the game from Google Play Store or any other app store. However, you should note that uninstalling Car Parking Multiplayer Mod APK Happymod may erase your previous data or progress in the game. Therefore, we suggest that you backup your data or progress before uninstalling Car Parking Multiplayer Mod APK Happymod.
-
Q: Can I use Car Parking Multiplayer Mod APK Happymod on iOS devices?
-
A: No, you cannot use Car Parking Multiplayer Mod APK Happymod on iOS devices. Car Parking Multiplayer Mod APK Happymod is only compatible with Android devices. If you want to play Car Parking Multiplayer on iOS devices, you have to download the original version of the game from App Store or any other app store.
-
Q: Can I share Car Parking Multiplayer Mod APK Happymod with my friends?
-
A: Yes, you can share Car Parking Multiplayer Mod APK Happymod with your friends via social media or email. However, you should respect the intellectual property rights of the game developer and the modder and do not distribute or sell Car Parking Multiplayer Mod APK Happymod to others for commercial purposes. You should also inform your friends about the risks and responsibilities of using Car Parking Multiplayer Mod APK Happymod.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Clash Royale Dinheiro Infinito baixe o apk e entre na arena com vantagem!.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Clash Royale Dinheiro Infinito baixe o apk e entre na arena com vantagem!.md
deleted file mode 100644
index 6e7c6c58eca332def07645965c28774a678e9c12..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Clash Royale Dinheiro Infinito baixe o apk e entre na arena com vantagem!.md
+++ /dev/null
@@ -1,139 +0,0 @@
-
-
Clash Royale Dinheiro Infinito Apk: Como Baixar e Jogar
-
Você é fã de Clash Royale, o jogo de estratégia e cartas que conquistou milhões de jogadores ao redor do mundo? Você gostaria de ter recursos ilimitados para montar o seu deck, comprar novas cartas, melhorar as suas tropas e desafiar os seus adversários? Então você precisa conhecer o Clash Royale Dinheiro Infinito Apk, um mod apk que oferece dinheiro e gemas infinitas para você aproveitar o jogo sem limites. Neste artigo, vamos explicar o que é Clash Royale, o que é o mod apk, como baixar e instalar o jogo, e como jogar com o dinheiro infinito. Ficou curioso? Então continue lendo e descubra tudo sobre esse incrível jogo.
-
O que é Clash Royale?
-
Clash Royale é um jogo de estratégia e cartas desenvolvido pela Supercell, a mesma empresa por trás de outros sucessos como Clash of Clans, Brawl Stars e Hay Day. O jogo foi lançado em 2016 para Android e iOS, e desde então se tornou um dos jogos mais populares e rentáveis do mundo.
A jogabilidade de Clash Royale é simples e intuitiva. No início do jogo, existem três torres, uma no meio e duas nas laterais. Seu objetivo é liderar o seu exército para destruir a defesa da torre central do inimigo, enquanto também protege a sua própria torre. Para isso, você precisa usar as cartas que representam as suas tropas, feitiços e construções. Cada carta tem um custo de elixir, que se regenera ao longo do tempo. Você pode ter até oito cartas no seu deck, mas só pode usar quatro por vez. O jogo dura três minutos, mas pode ser prorrogado em caso de empate.
-
Um sucesso mundial
-
Clash Royale é um jogo que mistura elementos de vários gêneros, como estratégia, tower defense, colecionáveis e multiplayer online. Além disso, o jogo tem um visual colorido, personagens carismáticos, sons divertidos e uma trilha sonora envolvente. Tudo isso faz com que Clash Royale seja um jogo viciante, desafiador e divertido. Não é à toa que o jogo tem mais de 500 milhões de downloads na Google Play Store, uma nota média de 4,3 estrelas, e uma comunidade ativa de jogadores que participam de torneios, ligas, clãs e eventos especiais.
-
O que é Clash Royale Dinheiro Infinito Apk?
-
Clash Royale Dinheiro Infinito Apk é um mod apk, ou seja, uma versão modificada do jogo original que oferece recursos ilimitados para os jogadores. Com esse mod apk, você pode ter dinheiro e gemas infinitas no jogo
Com esse mod apk, você pode ter dinheiro e gemas infinitas no jogo, o que significa que você pode comprar todas as cartas que quiser, melhorar as suas tropas ao máximo, abrir todos os baús que encontrar, e participar de todos os eventos e desafios sem se preocupar com o seu saldo. Assim, você pode montar o seu deck ideal, experimentar novas estratégias, e se divertir muito mais no jogo.
-
Os benefícios e os riscos de usar o mod apk
-
O Clash Royale Dinheiro Infinito Apk tem alguns benefícios para os jogadores que querem ter uma experiência mais livre e variada no jogo. Alguns desses benefícios são:
-
-
Você pode ter acesso a todas as cartas do jogo, incluindo as lendárias, as épicas, as raras e as comuns.
-
Você pode melhorar as suas cartas rapidamente, sem precisar esperar pelos baús ou gastar gemas.
-
Você pode abrir todos os baús que quiser, sem ter que esperar pelo tempo de abertura ou usar gemas.
-
Você pode participar de todos os eventos e desafios que o jogo oferece, sem se preocupar com o custo de entrada ou com o número de tentativas.
-
Você pode testar diferentes combinações de cartas e ver qual funciona melhor para o seu estilo de jogo.
-
Você pode se divertir mais no jogo, sem se estressar com a falta de recursos ou com a dificuldade dos adversários.
-
-
Porém, o Clash Royale Dinheiro Infinito Apk também tem alguns riscos que você deve considerar antes de baixar e instalar o jogo. Alguns desses riscos são:
-
-
Você pode perder a sua conta original do Clash Royale, pois o mod apk não é oficial e não é reconhecido pela Supercell. Isso significa que você pode ser banido do jogo por violar os termos de uso e a política de privacidade.
-
Você pode ter problemas de compatibilidade, atualização e segurança, pois o mod apk não é atualizado com a mesma frequência e qualidade que o jogo original. Isso significa que você pode enfrentar bugs, erros, travamentos e até vírus no seu dispositivo.
-
Você pode perder a graça do jogo, pois o mod apk tira o desafio e a emoção de conquistar os recursos e as vitórias no jogo. Isso significa que você pode ficar entediado e desmotivado com o jogo.
-
-
Como baixar e instalar o Clash Royale Dinheiro Infinito Apk?
-
Se você decidiu baixar e instalar o Clash Royale Dinheiro Infinito Apk, você precisa seguir alguns passos simples para fazer isso. Mas antes, você precisa verificar se o seu dispositivo atende aos requisitos mínimos para rodar o jogo.
-
Os requisitos mínimos para rodar o jogo
-
Para baixar e instalar o Clash Royale Dinheiro Infinito Apk, você precisa ter um dispositivo Android com as seguintes especificações:
-
clash royale mod apk gemas infinitas 2023
-como baixar clash royale com dinheiro infinito
-clash royale hack apk download mediafire
-clash royale atualizado 2023 dinheiro infinito
-clash royale apk mod tudo infinito 2023
-clash royale apk mod menu dinheiro infinito
-baixar clash royale hackeado para android
-clash royale apk mod servidor privado 2023
-clash royale apk mod moedas e gemas infinitas
-clash royale apk mod desbloqueado tudo 2023
-como instalar clash royale com dinheiro infinito
-clash royale hack apk sem verificação humana
-clash royale apk mod funcionando 2023
-clash royale apk mod online dinheiro infinito
-clash royale apk mod offline dinheiro infinito
-baixar clash royale hackeado atualizado 2023
-clash royale apk mod servidor brasileiro 2023
-clash royale apk mod cartas lendárias infinitas
-clash royale apk mod anti ban dinheiro infinito
-clash royale apk mod sem root dinheiro infinito
-como jogar clash royale com dinheiro infinito
-clash royale hack apk unlimited gems and coins
-clash royale apk mod novas cartas 2023
-clash royale apk mod pass royale infinito
-clash royale apk mod elixir infinito 2023
-baixar clash royale hackeado para ios
-clash royale apk mod mega cavaleiro infinito
-clash royale apk mod dragão elétrico infinito
-clash royale apk mod princesa do gelo infinita
-clash royale apk mod rei fantasma infinito
-como atualizar clash royale com dinheiro infinito
-clash royale hack apk sem root dinheiro infinito
-clash royale apk mod torneios infinitos 2023
-clash royale apk mod bau lendário infinito
-clash royale apk mod troféus infinitos 2023
-baixar clash royale hackeado para pc
-clash royale apk mod corredor da morte infinito
-clash royale apk mod bruxa sombria infinita
-clash royale apk mod esqueleto gigante infinito
-clash royale apk mod goblin gigante infinito
-como desinstalar clash royale com dinheiro infinito
-clash royale hack apk sem anúncios dinheiro infinito
-clash royale apk mod clãs infinitos 2023
-clash royale apk mod guerras de clãs infinitas
-clash royale apk mod desafios especiais infinitos
-baixar clash royale hackeado para celular
-clash royale apk mod dragão infernal infinito
-clash royale apk mod lenhador infinito
-clash royale apk mod mineiro infinito
-
-
Requisito
Especificação
-
Versão do Android
4.1 ou superior
-
Espaço livre
Pelo menos 150 MB
-
Conexão à internet
Wi-Fi ou dados móveis
-
Permissões
Acesso ao armazenamento, à câmera, ao microfone e à localização
-
-
O passo a passo para baixar e instalar o mod apk
-
Depois de verificar se o seu dispositivo atende aos requisitos mínimos, você pode seguir os passos abaixo para baixar e instalar o Clash Royale Dinheiro Infinito Apk:
-
-
Acesse um site confiável que ofereça o download do mod apk. Você pode pesquisar no Google por "Clash Royale Dinheiro Infinito Apk" ou usar um dos links abaixo:
-
-
[Clash Royale Dinheiro Infinito Apk 2023]
-
[Clash Royale Mod Apk Unlimited Money and Gems](^2^
[Clash Royale Mod Apk Unlimited Money and Gems]
-
[Clash Royale Dinheiro Infinito Apk Download]
-
-
Clique no botão de download do mod apk e aguarde o arquivo ser baixado no seu dispositivo. O arquivo deve ter o formato .apk e o tamanho de cerca de 150 MB.
-
Antes de instalar o mod apk, você precisa habilitar a opção de instalar aplicativos de fontes desconhecidas no seu dispositivo. Para isso, vá em Configurações > Segurança > Fontes desconhecidas e ative a opção.
-
Depois de habilitar a opção, localize o arquivo do mod apk no seu gerenciador de arquivos e clique nele para iniciar a instalação. Siga as instruções na tela e aguarde a instalação ser concluída.
-
Após a instalação, você pode abrir o jogo e aproveitar o dinheiro e as gemas infinitas. Você não precisa se registrar ou fazer login no jogo, basta clicar em Jogar e começar a diversão.
-
-
Como jogar o Clash Royale Dinheiro Infinito Apk?
-
Agora que você já baixou e instalou o Clash Royale Dinheiro Infinito Apk, você pode jogar o jogo com os recursos ilimitados. Mas antes, você precisa saber algumas diferenças entre o jogo original e o mod apk, e algumas dicas e truques para se dar bem no jogo.
-
As principais diferenças entre o jogo original e o mod apk
-
O Clash Royale Dinheiro Infinito Apk é muito parecido com o jogo original, mas tem algumas diferenças que você deve conhecer. Algumas dessas diferenças são:
-
-
O mod apk não é conectado aos servidores da Supercell, mas sim a servidores privados que hospedam o jogo modificado. Isso significa que você não pode jogar com os seus amigos ou com outros jogadores que usam o jogo original, mas sim com outros jogadores que usam o mod apk.
-
O mod apk não tem as mesmas atualizações e novidades que o jogo original, pois depende dos desenvolvedores do mod para atualizar o jogo. Isso significa que você pode perder alguns recursos, cartas, eventos e correções que o jogo original oferece.
-
O mod apk não tem o mesmo equilíbrio e dificuldade que o jogo original, pois todos os jogadores têm recursos ilimitados. Isso significa que você pode enfrentar adversários muito fortes ou muito fracos, dependendo da sorte ou da habilidade.
-
-
As dicas e truques para se dar bem no jogo
-
Apesar das diferenças, o Clash Royale Dinheiro Infinito Apk ainda é um jogo de estratégia e cartas que requer raciocínio e planejamento. Por isso, aqui vão algumas dicas e truques para você se dar bem no jogo:
-
-
Aproveite os recursos ilimitados para experimentar diferentes cartas e combinações. Você pode testar as cartas que você não tem no jogo original, ou as cartas que você tem mas não usa muito. Assim, você pode descobrir novas formas de jogar e surpreender os seus adversários.
-
Não se esqueça de melhorar as suas cartas sempre que possível. Mesmo tendo dinheiro e gemas infinitas, você ainda precisa melhorar as suas cartas para aumentar os seus atributos e habilidades. Quanto mais forte for a sua carta, mais chances você tem de vencer.
-
Não se deixe levar pela ganância e pelo desperdício. Mesmo tendo recursos ilimitados, você ainda precisa usar as suas cartas com inteligência e eficiência. Não adianta gastar todo o seu elixir em uma única carta ou em um único ataque, pois isso pode deixar você vulnerável à defesa ou ao contra-ataque do inimigo.
-
Não subestime nem superestime os seus adversários. Mesmo tendo recursos ilimitados, você ainda precisa respeitar os seus adversários e analisar as suas estratégias. Não pense que você vai ganhar fácil só porque tem dinheiro e gemas infinitas, pois isso pode fazer você perder a concentração ou a confiança. Também não pense que você vai perder fácil só porque só porque o seu adversário tem cartas mais fortes ou mais raras, pois isso pode fazer você perder a esperança ou a criatividade.
-
Divirta-se e aprenda com o jogo. Mesmo tendo recursos ilimitados, você ainda precisa aproveitar o jogo e aprender com ele. Não se frustre se você perder ou se cometer erros, pois isso faz parte do processo de evolução. Também não se acomode se você ganhar ou se acertar tudo, pois isso pode impedir o seu crescimento. O importante é se divertir e aprender com o jogo, e usar o mod apk como uma forma de experimentar novas possibilidades.
-
-
Conclusão
-
Clash Royale é um jogo de estratégia e cartas que faz sucesso no mundo todo. Com o Clash Royale Dinheiro Infinito Apk, você pode ter dinheiro e gemas infinitas no jogo, e assim comprar e melhorar todas as cartas que quiser, abrir todos os baús que encontrar, e participar de todos os eventos e desafios sem limites. Porém, você também precisa saber os benefícios e os riscos de usar o mod apk, como baixar e instalar o jogo, e como jogar com os recursos ilimitados. Com essas informações, você pode decidir se vale a pena ou não baixar e instalar o Clash Royale Dinheiro Infinito Apk, e como aproveitar o jogo da melhor forma possível.
-
E aí, gostou do artigo? Você já jogou o Clash Royale Dinheiro Infinito Apk? O que você achou do jogo? Deixe o seu comentário abaixo e compartilhe a sua opinião conosco. E se você gostou do artigo, não se esqueça de compartilhá-lo com os seus amigos nas redes sociais. Obrigado pela leitura e até a próxima!
-
FAQs
-
Aqui estão algumas perguntas frequentes sobre o Clash Royale Dinheiro Infinito Apk:
-
-
O que é Clash Royale?
-
Clash Royale é um jogo de estratégia e cartas desenvolvido pela Supercell, a mesma empresa por trás de outros sucessos como Clash of Clans, Brawl Stars e Hay Day. O jogo foi lançado em 2016 para Android e iOS, e desde então se tornou um dos jogos mais populares e rentáveis do mundo.
-
O que é Clash Royale Dinheiro Infinito Apk?
-
Clash Royale Dinheiro Infinito Apk é um mod apk, ou seja, uma versão modificada do jogo original que oferece recursos ilimitados para os jogadores. Com esse mod apk, você pode ter dinheiro e gemas infinitas no jogo, o que significa que você pode comprar todas as cartas que quiser, melhorar as suas tropas ao máximo, abrir todos os baús que encontrar, e participar de todos os eventos e desafios sem se preocupar com o seu saldo.
-
Como baixar e instalar o Clash Royale Dinheiro Infinito Apk?
-
Para baixar e instalar o Clash Royale Dinheiro Infinito Apk, você precisa seguir alguns passos simples: 1) Acesse um site confiável que ofereça o download do mod apk; 2) Clique no botão de download do mod apk e aguarde o arquivo ser baixado no seu dispositivo; 3) Antes de instalar o mod apk, você precisa habilitar a opção de instalar aplicativos de fontes desconhecidas no seu dispositivo; 4) Depois de habilitar a opção, localize o arquivo do mod apk no seu gerenciador de arquivos e clique nele para iniciar a instalação; 5) Após a instalação, você pode abrir o jogo e aproveitar o dinheiro e as gemas infinitas.
-
Como jogar o Clash Royale Dinheiro Infinito Apk?
-
Para jogar o Clash Royale Dinheiro Infinito Apk, você precisa saber algumas diferenças entre o jogo original e o mod apk, e algumas dicas e truques para se dar bem no jogo: 1) O mod apk não é conectado aos servidores da Supercell, mas sim a servidores privados que hospedam o jogo modificado; 2) O mod apk não tem as mesmas atualizações e novidades que o jogo original; 3) 3) O mod apk não tem o mesmo equilíbrio e dificuldade que o jogo original, pois todos os jogadores têm recursos ilimitados; 4) Aproveite os recursos ilimitados para experimentar diferentes cartas e combinações; 5) Não se esqueça de melhorar as suas cartas sempre que possível; 6) Não se deixe levar pela ganância e pelo desperdício; 7) Não subestime nem superestime os seus adversários; 8) Divirta-se e aprenda com o jogo.
-
O Clash Royale Dinheiro Infinito Apk é seguro?
-
O Clash Royale Dinheiro Infinito Apk não é um aplicativo oficial da Supercell, e por isso não tem a mesma garantia de segurança e qualidade que o jogo original. Ao baixar e instalar o mod apk, você pode estar colocando em risco a sua conta do jogo, o seu dispositivo e os seus dados pessoais. Por isso, é recomendado que você use o mod apk com cautela, e que faça um backup dos seus arquivos antes de instalar o jogo. Além disso, é aconselhável que você baixe o mod apk de sites confiáveis, que verifiquem a procedência e a integridade do arquivo.
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Ball Run Merge 2048 APK A New Twist on the Classic 2048 Game.md b/spaces/1phancelerku/anime-remove-background/Ball Run Merge 2048 APK A New Twist on the Classic 2048 Game.md
deleted file mode 100644
index 49ee025f48178d4b158069b9db9981cfe4da632b..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Ball Run Merge 2048 APK A New Twist on the Classic 2048 Game.md
+++ /dev/null
@@ -1,129 +0,0 @@
-
-
Ball Run Merge 2048 APK: A Fun and Relaxing Game for Android Users
-
Are you looking for a new game to play on your Android device? Do you enjoy casual games that are easy to learn but hard to master? If yes, then you might want to try Ball Run Merge 2048 APK, a fun and relaxing game that combines the concepts of ball rolling and number merging. In this article, we will tell you what Ball Run Merge 2048 APK is, how to play it, why you should download it, how to download it, and some tips and tricks for playing it.
Ball Run Merge 2048 APK is a game developed by Kayac Inc., a Japanese company that specializes in creating innovative and entertaining games. The game was released in June 2021 and has received positive reviews from users and critics alike. The game is inspired by the popular puzzle game 2048, where you have to merge tiles with the same number until you reach 2048. However, instead of tiles, you have balls that roll on a track. You have to control the balls with your finger and merge them with other balls of the same number and color. The more balls you merge, the bigger they get and the higher the number they show. The goal is to reach 2048 or higher before reaching the finish line.
-
How to play Ball Run Merge 2048 APK
-
The gameplay of Ball Run Merge 2048 APK is simple and intuitive. Here are the basic steps:
-
-
Tap and swipe to control the rolling ball.
-
When the ball is running, merge it with same number and color to reach 2048 ultimately.
-
Be careful of the obstacles, which will lower your ball number.
-
The big prize is waiting for you at the finish line!
-
-
Features of Ball Run Merge 2048 APK
-
Ball Run Merge 2048 APK has many features that make it an enjoyable and relaxing game. Some of them are:
-
-
It has various levels with different tracks, themes, and difficulties.
-
It has different types of balls with different shapes, colors, and numbers.
-
It has power-ups that can help you boost your speed, increase your number, or clear obstacles.
-
It has leaderboards and achievements that let you compete with other players and challenge yourself.
-
It has a simple and elegant design that is easy on the eyes.
-
-
Why you should download Ball Run Merge 2048 APK
-
If you are still not convinced that Ball Run Merge 2048 APK is a game worth playing, here are some reasons why you should download it:
-
It is free and easy to install
-
Ball Run Merge 2048 APK is a free game that does not require any registration or subscription. You can download it from Google Play Store or other websites that offer APK files. The installation process is quick and straightforward. You just need to allow the app to access your device's storage and permissions.
-
It is addictive and challenging
-
Ball Run Merge 2048 APK is a game that will keep you hooked for hours. It is not as easy as it looks. You have to be fast and accurate in swiping the balls and merging them with the right ones. You also have to avoid the obstacles and traps that can lower your number or make you lose the game. The game gets harder as you progress, so you have to be smart and strategic in your moves. The game is a great way to test your reflexes, concentration, and math skills.
-
ball run merge 2048 game
-ball run merge 2048 download
-ball run merge 2048 mod apk
-ball run merge 2048 online
-ball run merge 2048 free
-ball run merge 2048 app
-ball run merge 2048 hack
-ball run merge 2048 cheats
-ball run merge 2048 tips
-ball run merge 2048 tricks
-ball run merge 2048 review
-ball run merge 2048 gameplay
-ball run merge 2048 android
-ball run merge 2048 ios
-ball run merge 2048 pc
-ball run merge 2048 windows
-ball run merge 2048 mac
-ball run merge 2048 linux
-ball run merge 2048 chromebook
-ball run merge 2048 emulator
-ball run merge 2048 bluestacks
-ball run merge 2048 noxplayer
-ball run merge 2048 memu
-ball run merge 2048 ldplayer
-ball run merge 2048 apkcombo [^1^]
-ball run merge 2048 kayac [^2^]
-ball run - merge 2048 apk [^1^]
-ballrun2048 apk [^2^]
-ballrun - 2048 game apk [^2^]
-download ballrun - 2048 game [^2^]
-download ballrun - merge 2048 [^1^]
-play ballrun - merge 2048 online [^1^]
-play ballrun - 2048 game online [^2^]
-how to play ballrun - merge 2048 [^1^]
-how to play ballrun - 2048 game [^2^]
-how to win ballrun - merge 2048 [^1^]
-how to win ballrun - 2048 game [^2^]
-best strategy for ballrun - merge 2048 [^1^]
-best strategy for ballrun - 2048 game [^2^]
-best score in ballrun - merge 2048 [^1^]
-best score in ballrun - 2048 game [^2^]
-fun and addictive ballrun - merge 2048 [^1^]
-fun and addictive ballrun - 2048 game [^2^]
-relaxing and challenging ballrun - merge 2048 [^1^]
-relaxing and challenging ballrun - 2048 game [^2^]
-colorful and smooth graphics in ballrun - merge 2048 [^1^]
-colorful and smooth graphics in ballrun - 2048 game [^2^]
-easy and simple controls in ballrun - merge 2048 [^1^]
-easy and simple controls in ballrun - 2048 game [^2^]
-new and unique gameplay in ballrun - merge 2048 [^1^]
-
It has colorful graphics and sound effects
-
Ball Run Merge 2048 APK is a game that will delight your senses. It has bright and vivid graphics that create a cheerful and lively atmosphere. The balls are colorful and shiny, and the tracks are diverse and dynamic. The game also has upbeat and catchy sound effects that match the mood of the game. The game is a feast for your eyes and ears.
-
How to download Ball Run Merge 2048 APK
-
If you are interested in playing Ball Run Merge 2048 APK, you have several options to download it. Here are some of them:
-
From Google Play Store
-
The easiest and safest way to download Ball Run Merge 2048 APK is from Google Play Store, the official app store for Android devices. You just need to follow these steps:
-
-
Open Google Play Store on your device.
-
Search for "Ball Run Merge 2048" or use this link: (https://play.google.com/store/apps/details?id=com.kayac.ball_run_merge_2048).
-
Tap on the "Install" button and wait for the download to finish.
-
Enjoy playing Ball Run Merge 2048 APK!
-
-
From APKCombo website
-
If you want to download Ball Run Merge 2048 APK from a third-party website, you can use APKCombo, a reliable and secure website that offers APK files for various apps and games. You just need to follow these steps:
-
-
Open your browser and go to this link: (https://apkcombo.com/ball-run-merge-2048/com.kayac.ball_run_merge_2048/).
-
Select the version of Ball Run Merge 2048 APK that you want to download.
-
Tap on the "Download APK" button and wait for the download to finish.
-
Before installing the app, make sure you enable "Unknown Sources" in your device's settings.
-
Locate the downloaded file in your device's storage and tap on it to install it.
-
Enjoy playing Ball Run Merge 2048 APK!
-
-
From other sources
-
You can also download Ball Run Merge 2048 APK from other websites that offer APK files, such as APKPure, Uptodown, or APKMirror. However, you should be careful when downloading from these sources, as they may contain malware or viruses that can harm your device or compromise your privacy. You should always check the reviews and ratings of the apps before downloading them, and scan them with an antivirus software before installing them. You should also avoid downloading modded or hacked versions of the apps, as they may not work properly or cause problems with your device.
-
Tips and tricks for playing Ball Run Merge 2048 APK
-
To help you enjoy playing Ball Run Merge 2048 APK more, here are some tips and tricks that you can use:
-
Swipe fast and accurately
-
The key to playing Ball Run Merge 2048 APK is to swipe fast and accurately. You have to swipe the balls quickly to merge them with other balls of the same number and color. You also have to swipe them accurately to avoid hitting the wrong balls or missing the right ones. Swiping fast and accurately will help you increase your score and reach higher numbers faster.
-
Avoid obstacles and traps
-
Another important thing to remember when playing Ball Run Merge 2048 APK is to avoid obstacles and traps. These are things that can lower your ball number or make you lose the game. Some examples of obstacles are spikes, walls, holes, or other balls with different numbers or colors. Some examples of traps are bombs, magnets, or switches that can change the direction or speed of the balls. You have to be careful and alert when swiping the balls, as some obstacles and traps may appear suddenly or unexpectedly.
-
Collect coins and rewards
-
A fun part of playing Ball Run Merge 2048 APK is collecting coins and rewards. These are things that can help you boost your speed, increase your number, or clear obstacles. Some examples of coins are gold coins, silver coins, or rainbow coins. Some examples of rewards are stars, hearts, diamonds, or keys. You can use these coins and rewards to buy new balls, unlock new levels, or activate power-ups. You can find these coins and rewards on the track, or you can get them from chests, daily bonuses, or watching ads. You should try to collect as many coins and rewards as you can, as they can make your game more fun and exciting.
-
Conclusion
-
Ball Run Merge 2048 APK is a fun and relaxing game that you can play on your Android device. It is a game that combines the concepts of ball rolling and number merging, where you have to swipe the balls and merge them with other balls of the same number and color. The game has various levels, different types of balls, power-ups, leaderboards, achievements, and more. The game is free and easy to download from Google Play Store or other websites that offer APK files. The game is addictive and challenging, but also colorful and cheerful. The game is a great way to pass the time, exercise your brain, and have fun.
-
FAQs
-
Here are some frequently asked questions about Ball Run Merge 2048 APK:
-
-
What is the highest number you can reach in Ball Run Merge 2048 APK?
-
The highest number you can reach in Ball Run Merge 2048 APK is 8192, which is the maximum number that can fit on a ball. However, reaching this number is very difficult and rare, as you have to merge many balls of the same number and color without hitting any obstacles or traps.
-
How do you get more coins and rewards in Ball Run Merge 2048 APK?
-
You can get more coins and rewards in Ball Run Merge 2048 APK by collecting them on the track, opening chests, getting daily bonuses, or watching ads. You can also get more coins and rewards by completing achievements or ranking high on the leaderboards.
-
How do you use power-ups in Ball Run Merge 2048 APK?
-
You can use power-ups in Ball Run Merge 2048 APK by tapping on them when they appear on the screen. Some power-ups are activated automatically, while others require you to tap on them again to use them. Some examples of power-ups are speed boost, number increase, obstacle clear, or magnet.
-
How do you unlock new levels and balls in Ball Run Merge 2048 APK?
-
You can unlock new levels and balls in Ball Run Merge 2048 APK by earning stars, hearts, diamonds, or keys. You can earn these items by completing levels, collecting rewards, or buying them with coins. You can use these items to unlock new tracks, themes, or balls with different shapes, colors, and numbers.
-
Is Ball Run Merge 2048 APK safe to download and play?
-
Yes, Ball Run Merge 2048 APK is safe to download and play. The game does not contain any malware or viruses that can harm your device or compromise your privacy. The game also does not require any registration or subscription. However, you should always download the game from trusted sources such as Google Play Store or APKCombo website.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download 20 Minutes Till Dawn and Experience a Thrilling Roguelike Shooter.md b/spaces/1phancelerku/anime-remove-background/Download 20 Minutes Till Dawn and Experience a Thrilling Roguelike Shooter.md
deleted file mode 100644
index 1c964d9d6454564ba3ca89f4411aa0f408118d3b..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download 20 Minutes Till Dawn and Experience a Thrilling Roguelike Shooter.md
+++ /dev/null
@@ -1,90 +0,0 @@
-
-
20 Minutes Till Dawn: A Survival Roguelite Game Review
-
If you are looking for a fast-paced, action-packed, and challenging game that will test your skills and reflexes, then you might want to check out 20 Minutes Till Dawn. This is a survival roguelite game where you have to fight against endless hordes of Lovecraftian monsters and survive the night. In this article, we will review the game's features, gameplay, graphics, sound, pros, cons, and more.
20 Minutes Till Dawn is a roguelike shoot 'em up video game developed and published by flanne. The game was released in early access on Steam on June 8, 2022, and was ported to Android and iOS by Erabit Studios on September 9, 2022. The game exited early access on Steam with version 1.0 on June 8th, 2023.
-
The game belongs to the genre of survival roguelite, which means that it features permadeath, randomization, and progression across runs. The goal of the game is to survive for 20 minutes until dawn breaks, while facing an onslaught of monsters that become stronger and more numerous as time passes. The game is inspired by Vampire Survivors, but with more active combat and customization options.
-
The game is available on Steam for $4.99, as well as on Google Play, App Store, and TapTap for free. The game has received very positive reviews from players and critics alike, with over 20,000 reviews on Steam and over 6 million downloads on mobile platforms. The game has also been featured by IGN, TheGamer, Level Winner, and other media outlets.
-
Gameplay
-
The gameplay of 20 Minutes Till Dawn is simple but challenging. You control a character who can move around with WASD keys or a virtual joystick, aim with the mouse or touch screen, and fire with left click or tap. You can also use right click or double tap to activate your special ability, which varies depending on your character.
-
You start each run by choosing one of several characters, each with their own unique skill, bonus, and weapon. You can unlock more characters by spending gems, which are earned by killing monsters or completing achievements. You can also choose your starting weapon from a variety of guns, melee weapons, or magic items. You can unlock more weapons by spending gems as well.
-
As you kill monsters, you gain experience points that allow you to level up. Each time you level up, you can choose one of four randomly generated upgrades that enhance your stats or abilities. These upgrades can range from increasing your damage or health, to adding effects like fire, poison, or stun to your attacks, to unlocking new abilities like dash, shield, or summon. The upgrades are permanent for the current run, but they are lost when you die or restart.
-
20 Minutes Till Dawn roguelike shooting game
-How to play 20 Minutes Till Dawn on PC
-20 Minutes Till Dawn full version download
-20 Minutes Till Dawn best weapons and runes
-20 Minutes Till Dawn tips and tricks
-20 Minutes Till Dawn review and rating
-20 Minutes Till Dawn APK download for Android
-20 Minutes Till Dawn cheats and hacks
-20 Minutes Till Dawn gameplay and walkthrough
-20 Minutes Till Dawn online multiplayer mode
-20 Minutes Till Dawn free steam key
-20 Minutes Till Dawn system requirements and specs
-20 Minutes Till Dawn mod apk unlimited money
-20 Minutes Till Dawn soundtrack and music
-20 Minutes Till Dawn developer and publisher
-20 Minutes Till Dawn release date and updates
-20 Minutes Till Dawn trailer and screenshots
-20 Minutes Till Dawn similar games and alternatives
-20 Minutes Till Dawn wiki and guide
-20 Minutes Till Dawn achievements and trophies
-20 Minutes Till Dawn horror survival game
-20 Minutes Till Dawn lovecraftian monsters and enemies
-20 Minutes Till Dawn special abilities and skills
-20 Minutes Till Dawn how to survive the night
-20 Minutes Till Dawn endless hordes and waves
-20 Minutes Till Dawn action video game genre
-20 Minutes Till Dawn Google Play store link
-20 Minutes Till Dawn BlueStacks emulator download
-20 Minutes Till Dawn PC game free torrent
-20 Minutes Till Dawn crack and patch
-
To survive the night, you have to keep moving and shooting, while avoiding the enemies' attacks and environmental hazards. The enemies come in different shapes and sizes, each with their own behavior and attack pattern. Some of them are fast and agile, some are slow and tanky, some are ranged and explosive, and some are stealthy and deadly. You will also encounter bosses every few minutes, which are much stronger and tougher than regular enemies. The bosses have unique abilities and weaknesses that you have to exploit to defeat them.
-
The game has four different game modes: Normal, Hardcore, Endless, and Custom. Normal mode is the default mode, where you have to survive for 20 minutes with three lives. Hardcore mode is similar to Normal mode, but you only have one life and the enemies are more aggressive. Endless mode is where you can play as long as you want, but the enemies become harder and more frequent as time goes on. Custom mode is where you can create your own rules and settings for the game, such as changing the time limit, the enemy spawn rate, the difficulty level, and more.
-
Graphics and Sound
-
The graphics of 20 Minutes Till Dawn are colorful and pixelated, giving the game a retro and nostalgic feel. The game has a dark and gloomy atmosphere, with a night sky full of stars and a moon that changes phases as time passes. The game also has dynamic lighting and shadows, which create a contrast between the dark background and the bright projectiles and explosions. The game has a variety of environments, such as forests, deserts, cities, caves, and more. Each environment has its own theme and features, such as trees, rocks, buildings, traps, and secrets.
-
The sound of 20 Minutes Till Dawn is immersive and engaging, with a soundtrack that matches the mood and intensity of the game. The game has a synthwave style music that is catchy and energetic, with different tracks for each environment and boss. The game also has sound effects that are realistic and satisfying, such as the sound of gunfire, explosions, screams, footsteps, and more. The game has no voice acting or dialogue, but it does have text messages that appear on the screen to give you hints or warnings.
-
The game performs well on most devices and platforms, with smooth gameplay and minimal lag or glitches. The game has low system requirements for PC users, as well as options to adjust the graphics quality and resolution for mobile users. The game also supports cloud saving , controller support, leaderboards , achievements , and multiplayer co-op .
-
Pros and Cons
-
20 Minutes Till Dawn is a fun and addictive game that will keep you entertained for hours. However, like any other game, it also has its pros and cons. Here are some of them:
-
-
Pros
Cons
-
- Fast-paced and challenging gameplay that requires skill and strategy
- Permadeath can be frustrating and discouraging for some players
-
- Variety of characters, weapons, upgrades, enemies, bosses, environments, and game modes that offer replay value
- Randomization can be unfair or unbalanced at times
-
- Retro-style graphics that are colorful and atmospheric
- Pixelated graphics might not appeal to everyone
-
- Synthwave-style music that is catchy and energetic
- Music can get repetitive or annoying after a while
-
- Low system requirements and cross-platform compatibility
- Some bugs or glitches might occur occasionally
-
-
Conclusion
-
20 Minutes Till Dawn is a survival roguelite game that will test your skills and reflexes as you fight against endless hordes of Lovecraftian monsters and survive the night. The game has a simple but challenging gameplay, a variety of features and options, a retro-style graphics and sound, and a low system requirements and cross-platform compatibility. The game is suitable for anyone who enjoys action, horror, or roguelite games, and who is looking for a thrilling and rewarding experience. The game is also affordable and accessible, as it costs only $4.99 on Steam and is free on mobile platforms.
-
If you are interested in playing 20 Minutes Till Dawn, you can find more information or download the game from the following links:
-
-
Steam: [20 Minutes Till Dawn on Steam]
-
Google Play: [20 Minutes Till Dawn - Apps on Google Play]
-
App Store: [20 Minutes Till Dawn on the App Store]
-
TapTap: [20 Minutes Till Dawn - TapTap]
-
-
You can also watch some gameplay videos or read some reviews from the following sources:
-
-
IGN: [20 Minutes Till Dawn Review - IGN]
-
TheGamer: [20 Minutes Till Dawn Review: A Roguelite That Keeps You On Your Toes]
-
Level Winner: [20 Minutes Till Dawn Beginner's Guide: Tips, Tricks & Strategies to Survive the Night]
-
-
FAQs
-
Here are some of the frequently asked questions about 20 Minutes Till Dawn:
-
-
How do I unlock more characters and weapons?
-
You can unlock more characters and weapons by spending gems, which are earned by killing monsters or completing achievements. You can also find some weapons as loot drops from enemies or chests.
-
How do I save my progress?
-
You can save your progress by using the cloud saving feature, which is available on all platforms. You can also use the local saving feature, which is available on PC and mobile platforms. However, keep in mind that your progress is only saved between runs, not during runs. If you die or restart, you will lose your current upgrades and items.
-
How do I play with my friends?
-
You can play with your friends by using the multiplayer co-op feature, which is available on all platforms. You can join or host a game with up to four players online or locally. You can also chat with your friends using the voice or text chat feature.
-
How do I change the game settings?
-
You can change the game settings by using the options menu, which is available on all platforms. You can adjust the graphics quality, resolution, sound volume, language, controls, and more.
-
How do I contact the developers or report a bug?
-
You can contact the developers or report a bug by using the feedback feature, which is available on all platforms. You can also visit the official website, Discord server, Twitter page, or Facebook page of the game.
-
-
I hope you enjoyed this article and found it helpful. If you have any questions or comments, feel free to leave them below. Thank you for reading and have a great day!
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download Epic Conquest 2 Mod Apk 1.7.5 and Enjoy Unlimited Gold and Rubies.md b/spaces/1phancelerku/anime-remove-background/Download Epic Conquest 2 Mod Apk 1.7.5 and Enjoy Unlimited Gold and Rubies.md
deleted file mode 100644
index ab9723cfe8e1de6b543fd397780dfd2c36ff7011..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download Epic Conquest 2 Mod Apk 1.7.5 and Enjoy Unlimited Gold and Rubies.md
+++ /dev/null
@@ -1,102 +0,0 @@
-
-
Epic Conquest 2 Mod Apk 1.7.5: Everything You Need to Know
-
If you are a fan of action RPG games and anime, you might have heard of Epic Conquest 2, a game created by a small indie team of 4 with passion and love for the genre. Epic Conquest 2 is inspired by the classics, with a special touch in the combat and story, giving you an experience that's hard to find in similar games.
But what if you want to enhance your gaming experience even more? What if you want to unlock all the costumes, skills, items, and features that the game has to offer? Well, that's where a mod apk comes in handy.
-
A mod apk is a modified version of an original app that allows you to access features that are normally locked or restricted in the official app. For example, a mod apk can give you unlimited money, gems, resources, or premium items that you would otherwise have to pay for or grind for hours.
-
One of the most popular mod apks for Epic Conquest 2 is the version 1.7.5, which has many amazing features such as:
-
-
Unlimited gold and ruby
-
Unlocked all costumes and accessories
-
Unlocked all skills and masteries
-
Unlocked all equipment and materials
-
Unlocked all runes and keys
-
Unlocked all chapters and areas
-
No ads
-
-
With these features, you can enjoy Epic Conquest 2 without any limitations or interruptions. You can customize your character's appearance, skills, and equipment to suit your playstyle. You can explore the open world with all kinds of treasures and resources to strengthen your character. You can experience the anime-style story with cutscenes and character expressions that will keep you hooked.
-
How to Download and Install Epic Conquest 2 Mod Apk 1.7.5
-
If you are interested in trying out this mod apk, here are the steps you need to follow:
-
-
Download the mod apk file from a reliable source. You can use this link as an example.
-
Before installing the mod apk, make sure you have enough storage space on your device and that you have enabled the option to install apps from unknown sources in your settings.
-
Locate the downloaded file in your file manager and tap on it to start the installation process.
-
Follow the instructions on the screen and wait for the installation to finish.
-
Launch the game and enjoy!
-
-
However, before installing the mod apk, there are some precautions you need to take:
-
epic conquest 2 mod apk 1.7.5 free shopping
-epic conquest 2 mod apk 1.7.5 unlock all character
-epic conquest 2 mod apk 1.7.5 latest version
-epic conquest 2 mod apk 1.7.5 unlimited gold and rubies
-epic conquest 2 mod apk 1.7.5 offline
-epic conquest 2 mod apk 1.7.5 download for android
-epic conquest 2 mod apk 1.7.5 no root
-epic conquest 2 mod apk 1.7.5 hack
-epic conquest 2 mod apk 1.7.5 mega mod
-epic conquest 2 mod apk 1.7.5 gameplay
-epic conquest 2 mod apk 1.7.5 review
-epic conquest 2 mod apk 1.7.5 cheats
-epic conquest 2 mod apk 1.7.5 tips and tricks
-epic conquest 2 mod apk 1.7.5 guide
-epic conquest 2 mod apk 1.7.5 walkthrough
-epic conquest 2 mod apk 1.7.5 best build
-epic conquest 2 mod apk 1.7.5 blacksmith system
-epic conquest 2 mod apk 1.7.5 character customization
-epic conquest 2 mod apk 1.7.5 skills and masteries
-epic conquest 2 mod apk 1.7.5 costumes and outfits
-epic conquest 2 mod apk 1.7.5 items and crafting
-epic conquest 2 mod apk 1.7.5 dungeons and secrets
-epic conquest 2 mod apk 1.7.5 story and lore
-epic conquest 2 mod apk 1.7.5 update and patch notes
-epic conquest 2 mod apk 1.7.5 features and benefits
-epic conquest 2 mod apk 1.7.5 comparison and alternatives
-epic conquest 2 mod apk 1.7.5 pros and cons
-epic conquest 2 mod apk 1.7.5 ratings and reviews
-epic conquest 2 mod apk 1.7.5 download link and instructions
-epic conquest 2 mod apk 1.7.5 installation and setup
-epic conquest 2 mod apk 1.7.5 troubleshooting and support
-epic conquest 2 mod apk 1.7.5 faq and answers
-epic conquest 2 mod apk 1.7.5 forum and community
-epic conquest 2 mod apk 1.7.5 developer and contact info
-epic conquest 2 mod apk 1.7.5 news and updates
-
-
Make sure you have backed up your original game data in case something goes wrong or you want to switch back to the official version.
-
Make sure you have scanned the mod apk file with an antivirus or malware detector to ensure it is safe and clean.
-
Make sure you have read the terms and conditions of the mod apk provider and that you are aware of any risks or consequences of using it.
-
-
How to Play Epic Conquest 2 Mod Apk 1.7.5
-
Now that you have installed the mod apk, you are ready to play Epic Conquest 2 with all the benefits and advantages. Here are some tips and tricks to help you get started:
-
-
Choose your character wisely. There are four playable characters in Epic Conquest 2, each with their own unique skills, weapons, and costumes. You can switch between them anytime in the game, but you need to level them up separately. You can use the unlimited gold and ruby to buy all the skills and masteries for each character, as well as the costumes and accessories that suit your taste.
-
Explore the world freely. Epic Conquest 2 has a huge open world with various regions, biomes, dungeons, and secrets. You can use the unlocked runes and keys to access all the areas and loot all the chests and resources. You can also use the fast travel feature to move around quickly and easily.
-
Follow the story and quests. Epic Conquest 2 has a rich and immersive story with multiple endings and choices that affect the outcome. You can use the unlocked chapters to play through all the scenarios and see how your decisions impact the characters and the world. You can also complete various side quests and challenges to earn extra rewards and learn more about the lore and history of the game.
-
Enjoy the combat and customization. Epic Conquest 2 has a dynamic and fluid combat system that lets you unleash your skills and combos with ease. You can use the unlimited equipment and materials to craft and upgrade your weapons and armor to suit your playstyle. You can also use the mod features to adjust the difficulty level, enemy stats, drop rates, and other settings to make the game more fun and challenging.
-
-
However, using the mod apk also has some drawbacks that you need to be aware of:
-
-
You might encounter some bugs or glitches that affect the game performance or stability. If this happens, you can try reinstalling the mod apk or clearing the cache data.
-
You might lose your progress or data if you uninstall the mod apk or switch back to the official version. To avoid this, you can backup your data using a cloud service or a third-party app.
-
You might get banned or suspended from the online features or services of the game if you use the mod apk in an inappropriate or abusive way. To avoid this, you can play offline or use a VPN service.
-
-
Conclusion
-
Epic Conquest 2 is a great game for fans of action RPGs and anime, with a lot of content and features to enjoy. However, if you want to experience the game in a different way, you can try using the epic conquest 2 mod apk 1.7.5, which gives you unlimited access to everything in the game.
-
This mod apk is easy to download and install, as long as you follow the steps and precautions mentioned above. It also gives you a lot of tips and tricks to play the game better and have more fun.
-
However, you should also be aware of the risks and consequences of using this mod apk, such as bugs, data loss, or bans. You should also respect the developers and creators of the game by not using this mod apk for illegal or unethical purposes.
-
If you are interested in trying out this mod apk, you can use this link to download it. If you have any feedback or questions about this mod apk, feel free to leave a comment below or contact us through our website.
-
FAQs
-
Here are some frequently asked questions about epic conquest 2 mod apk 1.7.5:
-
-
Is this mod apk safe and virus-free?
-
Yes, this mod apk is safe and virus-free, as long as you download it from a reliable source like this link. However, you should always scan any file you download with an antivirus or malware detector before installing it on your device.
-
Does this mod apk work on any device or platform?
-
-
Does this mod apk require root access or any special permissions?
-
No, this mod apk does not require root access or any special permissions to run on your device. You just need to enable the option to install apps from unknown sources in your settings.
-
Can I update this mod apk to the latest version of the game?
-
No, this mod apk is based on the version 1.7.5 of the game, which is not the latest version. If you update this mod apk, you might lose the mod features or encounter compatibility issues. You should wait for a new version of the mod apk to be released before updating.
-
Can I play online or multiplayer with this mod apk?
-
No, this mod apk is not compatible with the online or multiplayer features of the game. If you try to play online or multiplayer with this mod apk, you might get banned or suspended from the game servers. You should only play offline or solo with this mod apk.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download Mortal Kombat X MOD APK Data for Android and iOS Devices.md b/spaces/1phancelerku/anime-remove-background/Download Mortal Kombat X MOD APK Data for Android and iOS Devices.md
deleted file mode 100644
index 807d7b45d7cf5529e47f80d4e7688bf7103c388e..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download Mortal Kombat X MOD APK Data for Android and iOS Devices.md
+++ /dev/null
@@ -1,98 +0,0 @@
-
-
Mortal Kombat X Mod APK + Data Download: How to Install and Play the Ultimate Fighting Game on Your Android Device
-
Mortal Kombat X is one of the most popular and acclaimed fighting games of all time. It is the tenth installment in the Mortal Kombat franchise and a sequel to Mortal Kombat (2011). It features stunning graphics, cinematic presentation, brutal gameplay, and a rich story mode that takes place 25 years after the events of its predecessor. It also introduces new characters, variations, modes, and features that make it a must-play for any fan of the genre.
But what if you want to play Mortal Kombat X on your Android device? Unfortunately, the official version of the game is not available on Google Play Store due to its high requirements and compatibility issues. However, there is a way to enjoy this amazing game on your smartphone or tablet by using a mod apk + data download. A mod apk is a modified version of an application that allows you to access premium features, unlock content, bypass restrictions, and enhance performance. A data download is a file that contains additional information and resources for an application, such as graphics, sounds, levels, etc.
-
By using a mod apk + data download for Mortal Kombat X, you can experience the full game on your Android device without any limitations or problems. You can play as any character you want, use any variation you like, customize your fighters, access all game modes, complete all challenges, and enjoy all the updates and DLCs that have been released for the game. You can also play online with other players around the world, or offline with your friends using local multiplayer. All you need is a compatible device, enough storage space, and a reliable internet connection.
-
How to Download and Install Mortal Kombat X Mod APK + Data on Your Android Device
-
If you are interested in playing Mortal Kombat X with mod apk + data on your Android device, here are the steps you need to follow:
-
Step 1: Enable unknown sources on your device settings
-
Before you can install any mod apk or data file on your device, you need to enable unknown sources on your device settings. This will allow you to install applications from sources other than Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on. You may also need to confirm this action by tapping OK or Allow.
-
Step 2: Download the mod apk + data file from a trusted source
-
Next, you need to download the mod apk + data file for Mortal Kombat X from a trusted source. There are many websites that offer this file, but not all of them are safe and reliable. Some may contain viruses, malware, or fake or outdated links. Therefore, you should always do some research and check the reviews and ratings of the source before downloading anything. One of the best sources for Mortal Kombat X mod apk + data download is [this website]. It offers a safe, fast, and easy download link that works for most Android devices. You can also find more information and instructions on how to use the mod apk + data file on the website.
-
mortal kombat x unlimited coins and souls apk + data
-mortal kombat x mega mod apk + obb data
-mortal kombat x hacked version apk + data offline
-mortal kombat x latest mod apk + data for android
-mortal kombat x mod apk + data highly compressed
-mortal kombat x mod apk + data revdl
-mortal kombat x mod apk + data android 1
-mortal kombat x mod apk + data rexdl
-mortal kombat x mod apk + data free download
-mortal kombat x mod apk + data unlimited everything
-mortal kombat x mod apk + data all characters unlocked
-mortal kombat x mod apk + data no root
-mortal kombat x mod apk + data offline mode
-mortal kombat x mod apk + data anti ban
-mortal kombat x mod apk + data all gpu
-mortal kombat x mod apk + data mali
-mortal kombat x mod apk + data adreno
-mortal kombat x mod apk + data power vr
-mortal kombat x mod apk + data 1.18.0
-mortal kombat x mod apk + data 1.21.0
-mortal kombat x mod apk + data 1.22.0
-mortal kombat x mod apk + data 1.23.0
-mortal kombat x mod apk + data 1.24.0
-mortal kombat x mod apk + data 1.25.0
-mortal kombat x mod apk + data 2.0.0
-mortal kombat x mod apk + data 2.1.0
-mortal kombat x mod apk + data 2.2.0
-mortal kombat x mod apk + data 2.3.0
-mortal kombat x mod apk + data 2.4.0
-mortal kombat x mod apk + data 2.5.0
-mortal kombat x god mode apk + data download
-mortal kombat x unlimited health apk + data download
-mortal kombat x one hit kill apk + data download
-mortal kombat x unlocked skins apk + data download
-mortal kombat x unlocked fatalities apk + data download
-mortal kombat x unlocked brutalities apk + data download
-mortal kombat x unlocked cards apk + data download
-mortal kombat x unlocked levels apk + data download
-mortal kombat x unlocked features apk + data download
-mortal kombat x premium edition apk + data download
-how to download and install mortal kombat x mod apk + data on android device
-how to download and install mortal kombat x mod apk + data on pc using bluestacks
-how to download and install mortal kombat x mod apk + data on ios device
-how to download and install mortal kombat x mod apk + data on mac using nox player
-how to fix error while downloading or installing mortal kombat x mod apk + data
-how to update or uninstall mortal kombat x mod apk + data
-how to play online or offline with friends using mortal kombat x mod apk + data
-how to backup or restore your progress in mortal kombat x mod apk + data
-how to get more coins and souls in mortal kombat x mod apk + data
-
Step 3: Extract the data file to the obb folder on your device storage
-
After you have downloaded the mod apk + data file, you need to extract the data file to the obb folder on your device storage. The obb folder is where the game stores its additional data and resources. To do this, you need a file manager app that can handle zip files, such as ES File Explorer, ZArchiver, or RAR. You can download any of these apps from Google Play Store for free. Once you have installed a file manager app, follow these steps:
- - Locate the mod apk + data file that you have downloaded on your device storage. It should have a name like Mortal-Kombat-X-Mod-APK-Data.zip or something similar. - Tap and hold on the file and select Extract or Unzip from the menu that appears. - Choose a destination folder where you want to extract the file. You can create a new folder or use an existing one. - Tap OK or Extract to start the extraction process. It may take a few minutes depending on the size of the file and your device speed. - Once the extraction is done, you should see a folder named com.wb.goog.mkx or something similar inside the destination folder. This is the data folder for Mortal Kombat X. - Move or copy this folder to the obb folder on your device storage. The obb folder is usually located in Android > obb. If you don't see it, you may need to create it manually. - Make sure that the data folder is inside the obb folder and has the correct name.
Step 4: Install the mod apk file and launch the game
-
The final step is to install the mod apk file and launch the game. To do this, follow these steps:
- - Locate the mod apk file that you have downloaded on your device storage. It should have a name like Mortal-Kombat-X-Mod-APK.apk or something similar. - Tap on the file and select Install from the menu that appears. - Wait for the installation process to finish. It may take a few seconds or minutes depending on your device speed and compatibility. - Once the installation is done, tap Open or Launch to start the game. - Enjoy Mortal Kombat X with mod apk + data on your Android device!
How to Play Mortal Kombat X with Mod APK + Data
-
Now that you have installed and launched Mortal Kombat X with mod apk + data on your Android device, you are ready to play and have fun. Here are some tips and tricks on how to play Mortal Kombat X with mod apk + data:
-
Choose your character and variation
-
Mortal Kombat X features a roster of over 30 characters, each with their own unique skills, abilities, and fatalities. You can choose from classic characters like Scorpion, Sub-Zero, Raiden, Liu Kang, Sonya Blade, and Johnny Cage, as well as new characters like Cassie Cage, D'Vorah, Kotal Kahn, Erron Black, and Jacqui Briggs. You can also unlock and play as guest characters like Jason Voorhees, Predator, Alien, Leatherface, and Kratos.
-
Each character has three variations that change their appearance, moveset, and strategy. For example, Scorpion has Ninjutsu, Hellfire, and Inferno variations; Sub-Zero has Cryomancer, Grandmaster, and Unbreakable variations; Raiden has Thunder God, Displacer, and Master of Storms variations; etc. You can choose your variation before each match or change it during gameplay by pressing L1 (or equivalent button) on your controller.
-
You can also customize your character's appearance by changing their costume, accessory, weapon, taunt, victory pose, etc. You can unlock new costumes by completing challenges, playing the story mode, or using the mod apk + data. You can also create your own custom character by using the Kreate a Fighter feature in the Extras menu.
-
Learn the basic and advanced techniques
-
Mortal Kombat X is a game that requires skill, timing, and strategy to master. You need to learn the basic and advanced techniques to survive and win against your opponents. Some of the basic techniques are:
- - Punch: Press Square (or equivalent button) to perform a quick and weak attack. - Kick: Press X (or equivalent button) to perform a fast and moderate attack. - Block: Press R2 (or equivalent button) to defend yourself from incoming attacks. You can also use directional buttons to block high, mid, or low attacks. - Throw: Press L1 (or equivalent button) or Square + X (or equivalent buttons) to grab and toss your opponent. You can also press directional buttons to change the direction of the throw. - Run: Press R2 + Forward (or equivalent buttons) to sprint towards your opponent. You can use this to close the distance or surprise them with an attack. - Jump: Press Up (or equivalent button) to leap into the air. You can also press directional buttons to jump forward, backward, or sideways. - Crouch: Press Down (or equivalent button) to duck under high attacks or avoid projectiles. You can also press Square or X (or equivalent buttons) to perform a low punch or kick.
Some of the advanced techniques are:
- - Combo: Press a sequence of buttons to perform a series of attacks that deal more damage and stun your opponent. You can find the list of combos for each character in the Moves List menu or on the screen during gameplay. - Special Move: Press a combination of buttons to perform a unique and powerful attack that uses some of your energy meter. You can find the list of special moves for each character in the Moves List menu or on the screen during gameplay. - X-Ray Move: Press L2 + R2 (or equivalent buttons) when your energy meter is full to perform a devastating attack that shows the internal damage inflicted on your opponent. This move can deal up to 30% damage and break your opponent's bones and organs. - Fatality: Press a specific sequence of buttons at the end of the match when your opponent is in a dizzy state to execute a gruesome finishing move that kills them in a brutal way. You can find the list of fatalities for each character in the Moves List menu or on the screen during gameplay. You can also use the mod apk + data to unlock all fatalities for all characters. - Brutality: Perform a certain requirement during the match, such as using a specific move or variation, and end the match with a specific attack to trigger a violent finishing move that kills your opponent instantly. You can find the list of brutalities for each character in the Moves List menu or on the screen during gameplay. You can also use the mod apk + data to unlock all brutalities for all characters.
Use the environment and special attacks
-
Mortal Kombat X features interactive environments that you can use to your advantage or disadvantage during gameplay. You can use objects, weapons, traps, animals, and even people in the background to damage, stun, or escape from your opponent. To use an environmental interaction, press R1 (or equivalent button) when you are near an object that has a white outline. Some examples of environmental interactions are:
- - Throwing barrels, rocks, skulls, spears, etc. at your opponent - Jumping off walls, pillars, statues, etc. to evade or attack your opponent - Grabbing branches, chains, hooks, etc. to swing or pull yourself towards or away from your opponent - Activating traps, such as spikes, flames, lasers, etc. to hurt your opponent - Using animals, such as crocodiles, wolves, dragons, etc. to bite or claw your opponent - Using people, such as monks, soldiers, civilians, etc. to hit or distract your opponent
Mortal Kombat X also features special attacks that you can use once per match to turn the tide of battle. These are:
- - Quitality: If you quit the match online before it ends, your character's head will explode and you will lose automatically. - Faction Kill: If you belong to one of the five factions in Mortal Kombat X (Lin Kuei, Special Forces, Black Dragon, Brotherhood of Shadow, or White Lotus), you can perform a faction-specific finishing move that shows your allegiance and earns you faction points. - Stage Brutality: If you end the match with an environmental interaction, you can trigger a stage-specific finishing move that kills your opponent in a creative way.
Enjoy the different game modes and challenges
-
Mortal Kombat X offers a variety of game modes and challenges that you can enjoy with mod apk + data. These are:
- - Story Mode: Follow the epic story of Mortal Kombat X that spans 25 years and features multiple characters and events. You can play as different characters in each chapter and make choices that affect the outcome of the story. You can also unlock rewards and secrets by completing the story mode. - Tower Mode: Fight your way through different towers that have different rules, modifiers, and opponents. You can choose from traditional towers, such as Klassic, Test Your Luck, Test Your Might, etc., or dynamic towers, such as Living Towers, Faction Towers, Premier Towers, etc. You can also create your own custom tower by using the mod apk + data. - Online Mode: Compete with other players online in various modes, such as Ranked, Player, King of the Hill, Survivor, etc. You can also join or create a room to chat and play with other players. You can also participate in online events and tournaments that have special rewards and prizes. - Local Mode: Play with your friends offline in various modes, such as Versus, Tag Team, Co-op Arcade, etc. You can also use the mod apk + data to enable local multiplayer on one device by using a split-screen feature. - Challenge Mode: Complete various challenges that test your skills and knowledge of Mortal Kombat X. You can choose from daily challenges, weekly challenges, character challenges, faction challenges, etc. You can also use the mod apk + data to unlock all challenges and rewards. - Krypt Mode: Explore the mysterious and dangerous Krypt that contains secrets, puzzles, traps, and treasures. You can use koins, souls, and hearts to unlock items, such as costumes, fatalities, brutalities, concept art, music, etc. You can also use the mod apk + data to unlock all items and areas in the Krypt.
Conclusion
-
Mortal Kombat X is a game that you should not miss if you are a fan of fighting games or Mortal Kombat franchise. It is a game that offers stunning graphics, cinematic presentation, brutal gameplay, and a rich story mode that will keep you entertained for hours. It is also a game that you can enjoy on your Android device by using a mod apk + data download that gives you access to all features, content, and updates of the game.
-
By following the steps above, you can download and install Mortal Kombat X mod apk + data on your Android device easily and safely. You can also learn how to play Mortal Kombat X with mod apk + data by using the tips and tricks above. You can also enjoy the different game modes and challenges that Mortal Kombat X offers with mod apk + data.
-
We hope that this article has helped you to play Mortal Kombat X with mod apk + data on your Android device. If you have any questions or feedback, please feel free to share them in the comments section below. We would love to hear from you and help you out.
-
FAQs
-
Here are some frequently asked questions about Mortal Kombat X with mod apk + data:
-
What are the system requirements for Mortal Kombat X on Android?
-
The minimum system requirements for Mortal Kombat X on Android are:
- - Android 4.0 or higher - 1 GB of RAM - 1.5 GB of free storage space - A stable internet connection
The recommended system requirements for Mortal Kombat X on Android are:
- - Android 5.0 or higher - 2 GB of RAM - 2 GB of free storage space - A fast internet connection
What are the differences between Mortal Kombat X and Mortal Kombat XL?
-
Mortal Kombat XL is an enhanced version of Mortal Kombat X that includes all the updates and DLCs that have been released for the game. It features new characters, costumes, stages, modes, and features that make it the ultimate Mortal Kombat experience. However, Mortal Kombat XL is only available for PlayStation 4, Xbox One, and PC platforms. It is not available for Android devices. Therefore, if you want to play Mortal Kombat XL on your Android device, you need to use a mod apk + data download that includes all the content and updates of Mortal Kombat XL.
-
How can I unlock all the characters and costumes in Mortal Kombat X with mod apk + data?
-
One of the benefits of using a mod apk + data download for Mortal Kombat X is that you can unlock all the characters and costumes in the game without spending any money or time. You can access all the characters and costumes from the character selection screen or the customization menu. You can also change your character and costume during gameplay by pressing L1 (or equivalent button) on your controller. You can also use the mod apk + data download to unlock new characters and costumes that are not available in the official version of the game, such as Kratos, Freddy Krueger, Michael Myers, etc.
-
How can I update Mortal Kombat X with mod apk + data?
-
Another benefit of using a mod apk + data download for Mortal Kombat X is that you can update the game with the latest patches and DLCs without any hassle or delay. You can update the game by downloading and installing the latest version of the mod apk + data file from the same source that you used before. You can also check for updates on the website or the app itself. You do not need to uninstall or delete the previous version of the game. You can simply overwrite it with the new version and enjoy the new features and content.
-
Is Mortal Kombat X with mod apk + data safe and legal?
-
The answer to this question depends on your perspective and preference. On one hand, using a mod apk + data download for Mortal Kombat X is safe and legal as long as you download it from a trusted source that does not contain any viruses, malware, or fake links. You also need to make sure that your device is compatible and has enough storage space and internet connection to run the game smoothly. On the other hand, using a mod apk + data download for Mortal Kombat X is unsafe and illegal as it violates the terms and conditions of the game developer and publisher. You also risk getting banned or suspended from online services and features if you use a mod apk + data download for Mortal Kombat X. Therefore, you should use a mod apk + data download for Mortal Kombat X at your own risk and discretion.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/AIConsultant/MusicGen/audiocraft/models/lm.py b/spaces/AIConsultant/MusicGen/audiocraft/models/lm.py
deleted file mode 100644
index 8cefd2c58c3a337378579d6cd6469fd038cbb1ee..0000000000000000000000000000000000000000
--- a/spaces/AIConsultant/MusicGen/audiocraft/models/lm.py
+++ /dev/null
@@ -1,531 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-from dataclasses import dataclass
-from functools import partial
-import logging
-import math
-import typing as tp
-
-import torch
-from torch import nn
-
-from ..utils import utils
-from ..modules.streaming import StreamingModule, State
-from ..modules.transformer import StreamingTransformer, create_norm_fn
-from ..modules.conditioners import (
- ConditionFuser,
- ClassifierFreeGuidanceDropout,
- AttributeDropout,
- ConditioningProvider,
- ConditioningAttributes,
- ConditionType,
-)
-from ..modules.codebooks_patterns import CodebooksPatternProvider
-from ..modules.activations import get_activation_fn
-
-
-logger = logging.getLogger(__name__)
-ConditionTensors = tp.Dict[str, ConditionType]
-CFGConditions = tp.Union[ConditionTensors, tp.Tuple[ConditionTensors, ConditionTensors]]
-
-
-def get_init_fn(method: str, input_dim: int, init_depth: tp.Optional[int] = None):
- """LM layer initialization.
- Inspired from xlformers: https://github.com/fairinternal/xlformers
-
- Args:
- method (str): Method name for init function. Valid options are:
- 'gaussian', 'uniform'.
- input_dim (int): Input dimension of the initialized module.
- init_depth (int, optional): Optional init depth value used to rescale
- the standard deviation if defined.
- """
- # Compute std
- std = 1 / math.sqrt(input_dim)
- # Rescale with depth
- if init_depth is not None:
- std = std / math.sqrt(2 * init_depth)
-
- if method == 'gaussian':
- return partial(
- torch.nn.init.trunc_normal_, mean=0.0, std=std, a=-3 * std, b=3 * std
- )
- elif method == 'uniform':
- bound = math.sqrt(3) * std # ensure the standard deviation is `std`
- return partial(torch.nn.init.uniform_, a=-bound, b=bound)
- else:
- raise ValueError("Unsupported layer initialization method")
-
-
-def init_layer(m: nn.Module,
- method: str,
- init_depth: tp.Optional[int] = None,
- zero_bias_init: bool = False):
- """Wrapper around ``get_init_fn`` for proper initialization of LM modules.
-
- Args:
- m (nn.Module): Module to initialize.
- method (str): Method name for the init function.
- init_depth (int, optional): Optional init depth value used to rescale
- the standard deviation if defined.
- zero_bias_init (bool): Whether to initialize the bias to 0 or not.
- """
- if isinstance(m, nn.Linear):
- init_fn = get_init_fn(method, m.in_features, init_depth=init_depth)
- if m.weight.device.type == 'cpu' and m.weight.dtype == torch.float16:
- weight = m.weight.float()
- init_fn(weight)
- m.weight.data[:] = weight.half()
- else:
- init_fn(m.weight)
- if zero_bias_init and m.bias is not None:
- nn.init.constant_(m.bias, 0)
- elif isinstance(m, nn.Embedding):
- init_fn = get_init_fn(method, m.embedding_dim, init_depth=None)
- if m.weight.device.type == 'cpu' and m.weight.dtype == torch.float16:
- weight = m.weight.float()
- init_fn(weight)
- m.weight.data[:] = weight.half()
- else:
- init_fn(m.weight)
-
-
-class ScaledEmbedding(nn.Embedding):
- """Boost learning rate for embeddings (with `scale`).
- """
- def __init__(self, *args, lr=None, **kwargs):
- super().__init__(*args, **kwargs)
- self.lr = lr
-
- def make_optim_group(self):
- group = {"params": list(self.parameters())}
- if self.lr is not None:
- group["lr"] = self.lr
- return group
-
-
-@dataclass
-class LMOutput:
- # The logits are already re-aligned with the input codes
- # hence no extra shift is required, e.g. when computing CE
- logits: torch.Tensor # [B, K, T, card]
- mask: torch.Tensor # [B, K, T]
-
-
-class LMModel(StreamingModule):
- """Transformer-based language model on multiple streams of codes.
-
- Args:
- pattern_provider (CodebooksPatternProvider): Pattern provider for codebook interleaving.
- condition_provider (MusicConditioningProvider): Conditioning provider from metadata.
- fuser (ConditionFuser): Fuser handling the fusing of conditions with language model input.
- n_q (int): Number of parallel streams to model.
- card (int): Cardinality, vocabulary size.
- dim (int): Dimension of the transformer encoder.
- num_heads (int): Number of heads for the transformer encoder.
- hidden_scale (int): Scale for hidden feed forward dimension of the transformer encoder.
- norm (str): Normalization method.
- norm_first (bool): Use pre-norm instead of post-norm.
- emb_lr (float, optional): Embedding-specific learning rate.
- bias_proj (bool): Use bias for output projections.
- weight_init (str, optional): Method for weight initialization.
- depthwise_init (str, optional): Method for depthwise weight initialization.
- zero_bias_init (bool): If true and bias in Linears, initialize bias to zeros.
- cfg_dropout (float): Classifier-free guidance dropout.
- cfg_coef (float): Classifier-free guidance coefficient.
- attribute_dropout (dict): Attribute dropout probabilities.
- two_step_cfg (bool): Whether to run classifier free-guidance with 2 distinct steps.
- **kwargs: Additional parameters for the transformer encoder.
- """
- def __init__(self, pattern_provider: CodebooksPatternProvider, condition_provider: ConditioningProvider,
- fuser: ConditionFuser, n_q: int = 8, card: int = 1024, dim: int = 128, num_heads: int = 8,
- hidden_scale: int = 4, norm: str = 'layer_norm', norm_first: bool = False,
- emb_lr: tp.Optional[float] = None, bias_proj: bool = True,
- weight_init: tp.Optional[str] = None, depthwise_init: tp.Optional[str] = None,
- zero_bias_init: bool = False, cfg_dropout: float = 0, cfg_coef: float = 1.0,
- attribute_dropout: tp.Dict[str, tp.Dict[str, float]] = {}, two_step_cfg: bool = False,
- **kwargs):
- super().__init__()
- self.cfg_coef = cfg_coef
- self.cfg_dropout = ClassifierFreeGuidanceDropout(p=cfg_dropout)
- self.att_dropout = AttributeDropout(p=attribute_dropout)
- self.condition_provider = condition_provider
- self.fuser = fuser
- self.card = card
- embed_dim = self.card + 1
- self.n_q = n_q
- self.dim = dim
- self.pattern_provider = pattern_provider
- self.two_step_cfg = two_step_cfg
- self.emb = nn.ModuleList([ScaledEmbedding(embed_dim, dim, lr=emb_lr) for _ in range(n_q)])
- if 'activation' in kwargs:
- kwargs['activation'] = get_activation_fn(kwargs['activation'])
- self.transformer = StreamingTransformer(
- d_model=dim, num_heads=num_heads, dim_feedforward=int(hidden_scale * dim),
- norm=norm, norm_first=norm_first, **kwargs)
- self.out_norm: tp.Optional[nn.Module] = None
- if norm_first:
- self.out_norm = create_norm_fn(norm, dim)
- self.linears = nn.ModuleList([nn.Linear(dim, self.card, bias=bias_proj) for _ in range(n_q)])
- self._init_weights(weight_init, depthwise_init, zero_bias_init)
- self._fsdp: tp.Optional[nn.Module]
- self.__dict__['_fsdp'] = None
-
- def _init_weights(self, weight_init: tp.Optional[str], depthwise_init: tp.Optional[str], zero_bias_init: bool):
- """Initialization of the transformer module weights.
-
- Args:
- weight_init (str, optional): Weight initialization strategy. See ``get_init_fn`` for valid options.
- depthwise_init (str, optional): Depthwise initialization strategy. The following options are valid:
- 'current' where the depth corresponds to the current layer index or 'global' where the total number
- of layer is used as depth. If not set, no depthwise initialization strategy is used.
- zero_bias_init (bool): Whether to initialize bias to zero or not.
- """
- assert depthwise_init is None or depthwise_init in ['current', 'global']
- assert depthwise_init is None or weight_init is not None, \
- "If 'depthwise_init' is defined, a 'weight_init' method should be provided."
- assert not zero_bias_init or weight_init is not None, \
- "If 'zero_bias_init', a 'weight_init' method should be provided"
-
- if weight_init is None:
- return
-
- for emb_layer in self.emb:
- init_layer(emb_layer, method=weight_init, init_depth=None, zero_bias_init=zero_bias_init)
-
- for layer_idx, tr_layer in enumerate(self.transformer.layers):
- depth = None
- if depthwise_init == 'current':
- depth = layer_idx + 1
- elif depthwise_init == 'global':
- depth = len(self.transformer.layers)
- init_fn = partial(init_layer, method=weight_init, init_depth=depth, zero_bias_init=zero_bias_init)
- tr_layer.apply(init_fn)
-
- for linear in self.linears:
- init_layer(linear, method=weight_init, init_depth=None, zero_bias_init=zero_bias_init)
-
- @property
- def special_token_id(self) -> int:
- return self.card
-
- @property
- def num_codebooks(self) -> int:
- return self.n_q
-
- def forward(self, sequence: torch.Tensor,
- conditions: tp.List[ConditioningAttributes],
- condition_tensors: tp.Optional[ConditionTensors] = None) -> torch.Tensor:
- """Apply language model on sequence and conditions.
- Given a tensor of sequence of shape [B, K, S] with K the number of codebooks and
- S the sequence steps, return the logits with shape [B, card, K, S].
-
- Args:
- indices (torch.Tensor): Indices of the codes to model.
- conditions (list of ConditioningAttributes): Conditions to use when modeling
- the given codes. Note that when evaluating multiple time with the same conditioning
- you should pre-compute those and pass them as `condition_tensors`.
- condition_tensors (dict[str, ConditionType], optional): Pre-computed conditioning
- tensors, see `conditions`.
- Returns:
- torch.Tensor: Logits.
- """
- B, K, S = sequence.shape
- assert K == self.num_codebooks, "Sequence shape must match the specified number of codebooks"
- input_ = sum([self.emb[k](sequence[:, k]) for k in range(K)])
- if condition_tensors is None:
- assert not self._is_streaming, "Conditions tensors should be precomputed when streaming."
- # apply dropout modules
- conditions = self.cfg_dropout(conditions)
- conditions = self.att_dropout(conditions)
- tokenized = self.condition_provider.tokenize(conditions)
- # encode conditions and fuse, both have a streaming cache to not recompute when generating.
- condition_tensors = self.condition_provider(tokenized)
- else:
- assert not conditions, "Shouldn't pass both conditions and condition_tensors."
-
- input_, cross_attention_input = self.fuser(input_, condition_tensors)
-
- out = self.transformer(input_, cross_attention_src=cross_attention_input)
- if self.out_norm:
- out = self.out_norm(out)
- logits = torch.stack([self.linears[k](out) for k in range(K)], dim=1) # [B, K, S, card]
-
- # remove the prefix from the model outputs
- if len(self.fuser.fuse2cond['prepend']) > 0:
- logits = logits[:, :, -S:]
-
- return logits # [B, K, S, card]
-
- def compute_predictions(
- self, codes: torch.Tensor,
- conditions: tp.List[ConditioningAttributes],
- condition_tensors: tp.Optional[ConditionTensors] = None) -> LMOutput:
- """Given an input tensor of codes [B, K, T] and list of conditions, runs the model
- forward using the specified codes interleaving pattern.
-
- Args:
- codes (torch.Tensor): Input codes of shape [B, K, T] with B the batch size,
- K the number of codebooks and T the number of timesteps.
- conditions (list of ConditioningAttributes): conditionings to use when modeling
- the given codes. Note that when evaluating multiple time with the same conditioning
- you should pre-compute those and pass them as `condition_tensors`.
- condition_tensors (dict[str, ConditionType], optional): pre-computed conditioning
- tensors, see `conditions`.
- Returns:
- LMOutput: Language model outputs
- logits (torch.Tensor) of shape [B, K, T, card] corresponding to the provided codes,
- i.e. the first item corresponds to logits to predict the first code, meaning that
- no additional shifting of codes and logits is required.
- mask (torch.Tensor) of shape [B, K, T], mask over valid and invalid positions.
- Given the specified interleaving strategies, parts of the logits and codes should
- not be considered as valid predictions because of invalid context.
- """
- B, K, T = codes.shape
- codes = codes.contiguous()
- # map codes [B, K, T] into pattern sequence [B, K, S] using special_token_id for masked tokens
- pattern = self.pattern_provider.get_pattern(T)
- sequence_codes, sequence_indexes, sequence_mask = pattern.build_pattern_sequence(
- codes, self.special_token_id, keep_only_valid_steps=True
- )
- # apply model on pattern sequence
- model = self if self._fsdp is None else self._fsdp
- logits = model(sequence_codes, conditions, condition_tensors) # [B, K, S, card]
- # map back the logits on pattern sequence to logits on original codes: [B, K, S, card] -> [B, K, T, card]
- # and provide the corresponding mask over invalid positions of tokens
- logits = logits.permute(0, 3, 1, 2) # [B, card, K, S]
- # note: we use nans as special token to make it obvious if we feed unexpected logits
- logits, logits_indexes, logits_mask = pattern.revert_pattern_logits(
- logits, float('nan'), keep_only_valid_steps=True
- )
- logits = logits.permute(0, 2, 3, 1) # [B, K, T, card]
- logits_mask = logits_mask[None, :, :].expand(B, -1, -1) # [K, T] -> [B, K, T]
- return LMOutput(logits, logits_mask)
-
- def _sample_next_token(self,
- sequence: torch.Tensor,
- cfg_conditions: CFGConditions,
- unconditional_state: State,
- use_sampling: bool = False,
- temp: float = 1.0,
- top_k: int = 0,
- top_p: float = 0.0,
- cfg_coef: tp.Optional[float] = None) -> torch.Tensor:
- """Sample next token from the model given a sequence and a set of conditions. The model supports
- multiple sampling strategies (greedy sampling, softmax, top-k, top-p...).
-
- Args:
- sequence (torch.Tensor): Current sequence of shape [B, K, S]
- with K corresponding to the number of codebooks and S the number of sequence steps.
- S = 1 in streaming mode, except for the first step that contains a bigger prompt.
- condition_tensors (dict[str, ConditionType): Set of conditions. If CFG is used,
- should be twice the batch size, being the concatenation of the conditions + null conditions.
- use_sampling (bool): Whether to use a sampling strategy or not.
- temp (float): Sampling temperature.
- top_k (int): K for "top-k" sampling.
- top_p (float): P for "top-p" sampling.
- cfg_coef (float, optional): classifier free guidance coefficient
- Returns:
- next_token (torch.Tensor): Next token tensor of shape [B, K, 1].
- """
- B = sequence.shape[0]
- cfg_coef = self.cfg_coef if cfg_coef is None else cfg_coef
- model = self if self._fsdp is None else self._fsdp
- if self.two_step_cfg and cfg_conditions != {}:
- assert isinstance(cfg_conditions, tuple), type(cfg_conditions)
- condition_tensors, null_condition_tensors = cfg_conditions
- cond_logits = model(sequence, conditions=[], condition_tensors=condition_tensors)
- state = self.get_streaming_state()
- self.set_streaming_state(unconditional_state)
- uncond_logits = model(sequence, conditions=[], condition_tensors=null_condition_tensors)
- unconditional_state.update(self.get_streaming_state())
- self.set_streaming_state(state)
- logits = uncond_logits + (cond_logits - uncond_logits) * self.cfg_coef
- else:
- assert isinstance(cfg_conditions, dict)
- condition_tensors = cfg_conditions
- if condition_tensors:
- # Preparing for CFG, predicting both conditional and unconditional logits.
- sequence = torch.cat([sequence, sequence], dim=0)
- all_logits = model(
- sequence,
- conditions=[], condition_tensors=condition_tensors)
- if condition_tensors:
- cond_logits, uncond_logits = all_logits.split(B, dim=0) # [B, K, T, card]
- logits = uncond_logits + (cond_logits - uncond_logits) * cfg_coef
- else:
- logits = all_logits
-
- logits = logits.permute(0, 1, 3, 2) # [B, K, card, T]
- logits = logits[..., -1] # [B x K x card]
-
- # Apply softmax for sampling if temp > 0. Else, do greedy sampling to avoid zero division error.
- if use_sampling and temp > 0.0:
- probs = torch.softmax(logits / temp, dim=-1)
- if top_p > 0.0:
- next_token = utils.sample_top_p(probs, p=top_p)
- elif top_k > 0:
- next_token = utils.sample_top_k(probs, k=top_k)
- else:
- next_token = utils.multinomial(probs, num_samples=1)
- else:
- next_token = torch.argmax(logits, dim=-1, keepdim=True)
-
- return next_token
-
- @torch.no_grad()
- def generate(self,
- prompt: tp.Optional[torch.Tensor] = None,
- conditions: tp.List[ConditioningAttributes] = [],
- num_samples: tp.Optional[int] = None,
- max_gen_len: int = 256,
- use_sampling: bool = True,
- temp: float = 1.0,
- top_k: int = 250,
- top_p: float = 0.0,
- cfg_coef: tp.Optional[float] = None,
- two_step_cfg: tp.Optional[bool] = None,
- remove_prompts: bool = False,
- check: bool = False,
- callback: tp.Optional[tp.Callable[[int, int], None]] = None) -> torch.Tensor:
- """Generate tokens sampling from the model given a prompt or unconditionally. Generation can
- be perform in a greedy fashion or using sampling with top K and top P strategies.
-
- Args:
- prompt (torch.Tensor, optional): Prompt tokens of shape [B, K, T].
- conditions_tensors (list of ConditioningAttributes, optional): List of conditions.
- num_samples (int, optional): Number of samples to generate when no prompt and no conditions are given.
- max_gen_len (int): Maximum generation length.
- use_sampling (bool): Whether to use a sampling strategy or not.
- temp (float): Sampling temperature.
- top_k (int): K for "top-k" sampling.
- top_p (float): P for "top-p" sampling.
- cfg_coeff (float, optional): Classifier-free guidance coefficient.
- two_step_cfg (bool, optional): Whether to perform classifier-free guidance with two steps generation.
- remove_prompts (bool): Whether to remove prompts from generation or not.
- check (bool): Whether to apply further checks on generated sequence.
- callback (Callback, optional): Callback function to report generation progress.
- Returns:
- torch.Tensor: Generated tokens.
- """
- assert not self.training, "generation shouldn't be used in training mode."
- first_param = next(iter(self.parameters()))
- device = first_param.device
-
- # Checking all input shapes are consistent.
- possible_num_samples = []
- if num_samples is not None:
- possible_num_samples.append(num_samples)
- elif prompt is not None:
- possible_num_samples.append(prompt.shape[0])
- elif conditions:
- possible_num_samples.append(len(conditions))
- else:
- possible_num_samples.append(1)
- assert [x == possible_num_samples[0] for x in possible_num_samples], "Inconsistent inputs shapes"
- num_samples = possible_num_samples[0]
-
- # below we create set of conditions: one conditional and one unconditional
- # to do that we merge the regular condition together with the null condition
- # we then do 1 forward pass instead of 2.
- # the reason for that is two-fold:
- # 1. it is about x2 faster than doing 2 forward passes
- # 2. avoid the streaming API treating the 2 passes as part of different time steps
- # We also support doing two different passes, in particular to ensure that
- # the padding structure is exactly the same between train and test.
- # With a batch size of 1, this can be slower though.
- cfg_conditions: CFGConditions
- two_step_cfg = self.two_step_cfg if two_step_cfg is None else two_step_cfg
- if conditions:
- null_conditions = ClassifierFreeGuidanceDropout(p=1.0)(conditions)
- if two_step_cfg:
- cfg_conditions = (
- self.condition_provider(self.condition_provider.tokenize(conditions)),
- self.condition_provider(self.condition_provider.tokenize(null_conditions)),
- )
- else:
- conditions = conditions + null_conditions
- tokenized = self.condition_provider.tokenize(conditions)
- cfg_conditions = self.condition_provider(tokenized)
- else:
- cfg_conditions = {}
-
- if prompt is None:
- assert num_samples > 0
- prompt = torch.zeros((num_samples, self.num_codebooks, 0), dtype=torch.long, device=device)
-
- B, K, T = prompt.shape
- start_offset = T
- assert start_offset < max_gen_len
-
- pattern = self.pattern_provider.get_pattern(max_gen_len)
- # this token is used as default value for codes that are not generated yet
- unknown_token = -1
-
- # we generate codes up to the max_gen_len that will be mapped to the pattern sequence
- gen_codes = torch.full((B, K, max_gen_len), unknown_token, dtype=torch.long, device=device)
- # filling the gen_codes with the prompt if needed
- gen_codes[..., :start_offset] = prompt
- # create the gen_sequence with proper interleaving from the pattern: [B, K, S]
- gen_sequence, indexes, mask = pattern.build_pattern_sequence(gen_codes, self.special_token_id)
- # retrieve the start_offset in the sequence:
- # it is the first sequence step that contains the `start_offset` timestep
- start_offset_sequence = pattern.get_first_step_with_timesteps(start_offset)
- assert start_offset_sequence is not None
-
- with self.streaming():
- unconditional_state = self.get_streaming_state()
- prev_offset = 0
- gen_sequence_len = gen_sequence.shape[-1] # gen_sequence shape is [B, K, S]
- for offset in range(start_offset_sequence, gen_sequence_len):
- # get current sequence (note that the streaming API is providing the caching over previous offsets)
- curr_sequence = gen_sequence[..., prev_offset:offset]
- curr_mask = mask[None, ..., prev_offset:offset].expand(B, -1, -1)
- if check:
- # check coherence between mask and sequence
- assert (curr_sequence == torch.where(curr_mask, curr_sequence, self.special_token_id)).all()
- # should never happen as gen_sequence is filled progressively
- assert not (curr_sequence == unknown_token).any()
- # sample next token from the model, next token shape is [B, K, 1]
- next_token = self._sample_next_token(
- curr_sequence, cfg_conditions, unconditional_state, use_sampling, temp, top_k, top_p,
- cfg_coef=cfg_coef)
- # ensure the tokens that should be masked are properly set to special_token_id
- # as the model never output special_token_id
- valid_mask = mask[..., offset:offset+1].expand(B, -1, -1)
- next_token[~valid_mask] = self.special_token_id
- # ensure we don't overwrite prompt tokens, we only write over unknown tokens
- # (then mask tokens should be left as is as well, which is correct)
- gen_sequence[..., offset:offset+1] = torch.where(
- gen_sequence[..., offset:offset+1] == unknown_token,
- next_token, gen_sequence[..., offset:offset+1]
- )
- prev_offset = offset
- if callback is not None:
- callback(1 + offset - start_offset_sequence, gen_sequence_len - start_offset_sequence)
- unconditional_state.clear()
-
- # ensure sequence has been entirely filled
- assert not (gen_sequence == unknown_token).any()
- # ensure gen_sequence pattern and mask are matching
- # which means the gen_sequence is valid according to the pattern
- assert (
- gen_sequence == torch.where(mask[None, ...].expand(B, -1, -1), gen_sequence, self.special_token_id)
- ).all()
- # get back the codes, trimming the prompt if needed and cutting potentially incomplete timesteps
- out_codes, out_indexes, out_mask = pattern.revert_pattern_sequence(gen_sequence, special_token=unknown_token)
-
- # sanity checks over the returned codes and corresponding masks
- assert (out_codes[..., :max_gen_len] != unknown_token).all()
- assert (out_mask[..., :max_gen_len] == 1).all()
-
- out_start_offset = start_offset if remove_prompts else 0
- out_codes = out_codes[..., out_start_offset:max_gen_len]
-
- # ensure the returned codes are all valid
- assert (out_codes >= 0).all() and (out_codes <= self.card).all()
- return out_codes
diff --git a/spaces/AIConsultant/MusicGen/audiocraft/utils/export_legacy.py b/spaces/AIConsultant/MusicGen/audiocraft/utils/export_legacy.py
deleted file mode 100644
index 52f145f3148c3e9fdba436273bc45480fbae6481..0000000000000000000000000000000000000000
--- a/spaces/AIConsultant/MusicGen/audiocraft/utils/export_legacy.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-"""
-Legacy functions used at the time of the first release, kept for referencd.
-"""
-
-from pathlib import Path
-import typing as tp
-
-from omegaconf import OmegaConf, DictConfig
-import torch
-
-
-def _clean_lm_cfg(cfg: DictConfig):
- OmegaConf.set_struct(cfg, False)
- # This used to be set automatically in the LM solver, need a more robust solution
- # for the future.
- cfg['transformer_lm']['card'] = 2048
- cfg['transformer_lm']['n_q'] = 4
- # Experimental params no longer supported.
- bad_params = ['spectral_norm_attn_iters', 'spectral_norm_ff_iters',
- 'residual_balancer_attn', 'residual_balancer_ff', 'layer_drop']
- for name in bad_params:
- del cfg['transformer_lm'][name]
- OmegaConf.set_struct(cfg, True)
- return cfg
-
-
-def export_encodec(checkpoint_path: tp.Union[Path, str], out_folder: tp.Union[Path, str]):
- sig = Path(checkpoint_path).parent.name
- assert len(sig) == 8, "Not a valid Dora signature"
- pkg = torch.load(checkpoint_path, 'cpu')
- new_pkg = {
- 'best_state': pkg['ema']['state']['model'],
- 'xp.cfg': OmegaConf.to_yaml(pkg['xp.cfg']),
- }
- out_file = Path(out_folder) / f'{sig}.th'
- torch.save(new_pkg, out_file)
- return out_file
-
-
-def export_lm(checkpoint_path: tp.Union[Path, str], out_folder: tp.Union[Path, str]):
- sig = Path(checkpoint_path).parent.name
- assert len(sig) == 8, "Not a valid Dora signature"
- pkg = torch.load(checkpoint_path, 'cpu')
- new_pkg = {
- 'best_state': pkg['fsdp_best_state']['model'],
- 'xp.cfg': OmegaConf.to_yaml(_clean_lm_cfg(pkg['xp.cfg']))
- }
- out_file = Path(out_folder) / f'{sig}.th'
- torch.save(new_pkg, out_file)
- return out_file
diff --git a/spaces/AIFILMS/generate_human_motion/pyrender/README.md b/spaces/AIFILMS/generate_human_motion/pyrender/README.md
deleted file mode 100644
index ae88ed1c5e78f247e38291ed83cf4c81230bf976..0000000000000000000000000000000000000000
--- a/spaces/AIFILMS/generate_human_motion/pyrender/README.md
+++ /dev/null
@@ -1,92 +0,0 @@
-# Pyrender
-
-[](https://travis-ci.org/mmatl/pyrender)
-[](https://pyrender.readthedocs.io/en/latest/?badge=latest)
-[](https://coveralls.io/github/mmatl/pyrender?branch=master)
-[](https://badge.fury.io/py/pyrender)
-[](https://pepy.tech/project/pyrender)
-
-Pyrender is a pure Python (2.7, 3.4, 3.5, 3.6) library for physically-based
-rendering and visualization.
-It is designed to meet the [glTF 2.0 specification from Khronos](https://www.khronos.org/gltf/).
-
-Pyrender is lightweight, easy to install, and simple to use.
-It comes packaged with both an intuitive scene viewer and a headache-free
-offscreen renderer with support for GPU-accelerated rendering on headless
-servers, which makes it perfect for machine learning applications.
-
-Extensive documentation, including a quickstart guide, is provided [here](https://pyrender.readthedocs.io/en/latest/).
-
-For a minimal working example of GPU-accelerated offscreen rendering using EGL,
-check out the [EGL Google CoLab Notebook](https://colab.research.google.com/drive/1pcndwqeY8vker3bLKQNJKr3B-7-SYenE?usp=sharing).
-
-
-
-
-
-
-
-## Installation
-You can install pyrender directly from pip.
-
-```bash
-pip install pyrender
-```
-
-## Features
-
-Despite being lightweight, pyrender has lots of features, including:
-
-* Simple interoperation with the amazing [trimesh](https://github.com/mikedh/trimesh) project,
-which enables out-of-the-box support for dozens of mesh types, including OBJ,
-STL, DAE, OFF, PLY, and GLB.
-* An easy-to-use scene viewer with support for animation, showing face and vertex
-normals, toggling lighting conditions, and saving images and GIFs.
-* An offscreen rendering module that supports OSMesa and EGL backends.
-* Shadow mapping for directional and spot lights.
-* Metallic-roughness materials for physically-based rendering, including several
-types of texture and normal mapping.
-* Transparency.
-* Depth and color image generation.
-
-## Sample Usage
-
-For sample usage, check out the [quickstart
-guide](https://pyrender.readthedocs.io/en/latest/examples/index.html) or one of
-the Google CoLab Notebooks:
-
-* [EGL Google CoLab Notebook](https://colab.research.google.com/drive/1pcndwqeY8vker3bLKQNJKr3B-7-SYenE?usp=sharing)
-
-## Viewer Keyboard and Mouse Controls
-
-When using the viewer, the basic controls for moving about the scene are as follows:
-
-* To rotate the camera about the center of the scene, hold the left mouse button and drag the cursor.
-* To rotate the camera about its viewing axis, hold `CTRL` left mouse button and drag the cursor.
-* To pan the camera, do one of the following:
- * Hold `SHIFT`, then hold the left mouse button and drag the cursor.
- * Hold the middle mouse button and drag the cursor.
-* To zoom the camera in or out, do one of the following:
- * Scroll the mouse wheel.
- * Hold the right mouse button and drag the cursor.
-
-The available keyboard commands are as follows:
-
-* `a`: Toggles rotational animation mode.
-* `c`: Toggles backface culling.
-* `f`: Toggles fullscreen mode.
-* `h`: Toggles shadow rendering.
-* `i`: Toggles axis display mode (no axes, world axis, mesh axes, all axes).
-* `l`: Toggles lighting mode (scene lighting, Raymond lighting, or direct lighting).
-* `m`: Toggles face normal visualization.
-* `n`: Toggles vertex normal visualization.
-* `o`: Toggles orthographic camera mode.
-* `q`: Quits the viewer.
-* `r`: Starts recording a GIF, and pressing again stops recording and opens a file dialog.
-* `s`: Opens a file dialog to save the current view as an image.
-* `w`: Toggles wireframe mode (scene default, flip wireframes, all wireframe, or all solid).
-* `z`: Resets the camera to the default view.
-
-As a note, displaying shadows significantly slows down rendering, so if you're
-experiencing low framerates, just kill shadows or reduce the number of lights in
-your scene.
diff --git a/spaces/AIZero2Hero4Health/3-ChatbotBlenderbot-GR/README.md b/spaces/AIZero2Hero4Health/3-ChatbotBlenderbot-GR/README.md
deleted file mode 100644
index 5d45a17e81e1a1616182dce44cebaa9bf13ca31a..0000000000000000000000000000000000000000
--- a/spaces/AIZero2Hero4Health/3-ChatbotBlenderbot-GR/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: 3 ChatbotBlenderbot GR
-emoji: 🏢
-colorFrom: blue
-colorTo: green
-sdk: gradio
-sdk_version: 3.8.2
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/ASJMO/freegpt/g4f/Provider/Providers/Ails.py b/spaces/ASJMO/freegpt/g4f/Provider/Providers/Ails.py
deleted file mode 100644
index 5feec9e987e3cd2590e2a72b623dc4b90e0cf53d..0000000000000000000000000000000000000000
--- a/spaces/ASJMO/freegpt/g4f/Provider/Providers/Ails.py
+++ /dev/null
@@ -1,87 +0,0 @@
-import os
-import time
-import json
-import uuid
-import hashlib
-import requests
-
-from ...typing import sha256, Dict, get_type_hints
-from datetime import datetime
-
-url: str = 'https://ai.ls'
-model: str = 'gpt-3.5-turbo'
-supports_stream = True
-needs_auth = False
-working = True
-
-
-class Utils:
- def hash(json_data: Dict[str, str]) -> sha256:
-
- base_string: str = '%s:%s:%s:%s' % (
- json_data['t'],
- json_data['m'],
- 'WI,2rU#_r:r~aF4aJ36[.Z(/8Rv93Rf',
- len(json_data['m'])
- )
-
- return hashlib.sha256(base_string.encode()).hexdigest()
-
- def format_timestamp(timestamp: int) -> str:
-
- e = timestamp
- n = e % 10
- r = n + 1 if n % 2 == 0 else n
- return str(e - n + r)
-
-
-def _create_completion(model: str, messages: list, temperature: float = 0.6, stream: bool = False, **kwargs):
-
- headers = {
- 'authority': 'api.caipacity.com',
- 'accept': '*/*',
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'authorization': 'Bearer free',
- 'client-id': str(uuid.uuid4()),
- 'client-v': '0.1.249',
- 'content-type': 'application/json',
- 'origin': 'https://ai.ls',
- 'referer': 'https://ai.ls/',
- 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"Windows"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'cross-site',
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
- }
-
- timestamp = Utils.format_timestamp(int(time.time() * 1000))
-
- sig = {
- 'd': datetime.now().strftime('%Y-%m-%d'),
- 't': timestamp,
- 's': Utils.hash({
- 't': timestamp,
- 'm': messages[-1]['content']})}
-
- json_data = json.dumps(separators=(',', ':'), obj={
- 'model': 'gpt-3.5-turbo',
- 'temperature': 0.6,
- 'stream': True,
- 'messages': messages} | sig)
-
- response = requests.post('https://api.caipacity.com/v1/chat/completions',
- headers=headers, data=json_data, stream=True)
-
- for token in response.iter_lines():
- if b'content' in token:
- completion_chunk = json.loads(token.decode().replace('data: ', ''))
- token = completion_chunk['choices'][0]['delta'].get('content')
- if token != None:
- yield token
-
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join(
- [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
diff --git a/spaces/AfrodreamsAI/afrodreams/CaffeLoader.py b/spaces/AfrodreamsAI/afrodreams/CaffeLoader.py
deleted file mode 100644
index d09ccdd752f47e1d1e03f4560a8649e9cf921adc..0000000000000000000000000000000000000000
--- a/spaces/AfrodreamsAI/afrodreams/CaffeLoader.py
+++ /dev/null
@@ -1,254 +0,0 @@
-import torch
-import torch.nn as nn
-
-
-class VGG(nn.Module):
- def __init__(self, features, num_classes=1000):
- super(VGG, self).__init__()
- self.features = features
- self.classifier = nn.Sequential(
- nn.Linear(512 * 7 * 7, 4096),
- nn.ReLU(True),
- nn.Dropout(),
- nn.Linear(4096, 4096),
- nn.ReLU(True),
- nn.Dropout(),
- nn.Linear(4096, num_classes),
- )
-
-
-class VGG_SOD(nn.Module):
- def __init__(self, features, num_classes=100):
- super(VGG_SOD, self).__init__()
- self.features = features
- self.classifier = nn.Sequential(
- nn.Linear(512 * 7 * 7, 4096),
- nn.ReLU(True),
- nn.Dropout(),
- nn.Linear(4096, 4096),
- nn.ReLU(True),
- nn.Dropout(),
- nn.Linear(4096, 100),
- )
-
-
-class VGG_FCN32S(nn.Module):
- def __init__(self, features, num_classes=1000):
- super(VGG_FCN32S, self).__init__()
- self.features = features
- self.classifier = nn.Sequential(
- nn.Conv2d(512,4096,(7, 7)),
- nn.ReLU(True),
- nn.Dropout(0.5),
- nn.Conv2d(4096,4096,(1, 1)),
- nn.ReLU(True),
- nn.Dropout(0.5),
- )
-
-
-class VGG_PRUNED(nn.Module):
- def __init__(self, features, num_classes=1000):
- super(VGG_PRUNED, self).__init__()
- self.features = features
- self.classifier = nn.Sequential(
- nn.Linear(512 * 7 * 7, 4096),
- nn.ReLU(True),
- nn.Dropout(0.5),
- nn.Linear(4096, 4096),
- nn.ReLU(True),
- nn.Dropout(0.5),
- )
-
-
-class NIN(nn.Module):
- def __init__(self, pooling):
- super(NIN, self).__init__()
- if pooling == 'max':
- pool2d = nn.MaxPool2d((3, 3),(2, 2),(0, 0),ceil_mode=True)
- elif pooling == 'avg':
- pool2d = nn.AvgPool2d((3, 3),(2, 2),(0, 0),ceil_mode=True)
-
- self.features = nn.Sequential(
- nn.Conv2d(3,96,(11, 11),(4, 4)),
- nn.ReLU(inplace=True),
- nn.Conv2d(96,96,(1, 1)),
- nn.ReLU(inplace=True),
- nn.Conv2d(96,96,(1, 1)),
- nn.ReLU(inplace=True),
- pool2d,
- nn.Conv2d(96,256,(5, 5),(1, 1),(2, 2)),
- nn.ReLU(inplace=True),
- nn.Conv2d(256,256,(1, 1)),
- nn.ReLU(inplace=True),
- nn.Conv2d(256,256,(1, 1)),
- nn.ReLU(inplace=True),
- pool2d,
- nn.Conv2d(256,384,(3, 3),(1, 1),(1, 1)),
- nn.ReLU(inplace=True),
- nn.Conv2d(384,384,(1, 1)),
- nn.ReLU(inplace=True),
- nn.Conv2d(384,384,(1, 1)),
- nn.ReLU(inplace=True),
- pool2d,
- nn.Dropout(0.5),
- nn.Conv2d(384,1024,(3, 3),(1, 1),(1, 1)),
- nn.ReLU(inplace=True),
- nn.Conv2d(1024,1024,(1, 1)),
- nn.ReLU(inplace=True),
- nn.Conv2d(1024,1000,(1, 1)),
- nn.ReLU(inplace=True),
- nn.AvgPool2d((6, 6),(1, 1),(0, 0),ceil_mode=True),
- nn.Softmax(),
- )
-
-
-
-class ModelParallel(nn.Module):
- def __init__(self, net, device_ids, device_splits):
- super(ModelParallel, self).__init__()
- self.device_list = self.name_devices(device_ids.split(','))
- self.chunks = self.chunks_to_devices(self.split_net(net, device_splits.split(',')))
-
- def name_devices(self, input_list):
- device_list = []
- for i, device in enumerate(input_list):
- if str(device).lower() != 'c':
- device_list.append("cuda:" + str(device))
- else:
- device_list.append("cpu")
- return device_list
-
- def split_net(self, net, device_splits):
- chunks, cur_chunk = [], nn.Sequential()
- for i, l in enumerate(net):
- cur_chunk.add_module(str(i), net[i])
- if str(i) in device_splits and device_splits != '':
- del device_splits[0]
- chunks.append(cur_chunk)
- cur_chunk = nn.Sequential()
- chunks.append(cur_chunk)
- return chunks
-
- def chunks_to_devices(self, chunks):
- for i, chunk in enumerate(chunks):
- chunk.to(self.device_list[i])
- return chunks
-
- def c(self, input, i):
- if input.type() == 'torch.FloatTensor' and 'cuda' in self.device_list[i]:
- input = input.type('torch.cuda.FloatTensor')
- elif input.type() == 'torch.cuda.FloatTensor' and 'cpu' in self.device_list[i]:
- input = input.type('torch.FloatTensor')
- return input
-
- def forward(self, input):
- for i, chunk in enumerate(self.chunks):
- if i < len(self.chunks) -1:
- input = self.c(chunk(self.c(input, i).to(self.device_list[i])), i+1).to(self.device_list[i+1])
- else:
- input = chunk(input)
- return input
-
-
-
-def buildSequential(channel_list, pooling):
- layers = []
- in_channels = 3
- if pooling == 'max':
- pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
- elif pooling == 'avg':
- pool2d = nn.AvgPool2d(kernel_size=2, stride=2)
- else:
- raise ValueError("Unrecognized pooling parameter")
- for c in channel_list:
- if c == 'P':
- layers += [pool2d]
- else:
- conv2d = nn.Conv2d(in_channels, c, kernel_size=3, padding=1)
- layers += [conv2d, nn.ReLU(inplace=True)]
- in_channels = c
- return nn.Sequential(*layers)
-
-
-channel_list = {
-'VGG-16p': [24, 22, 'P', 41, 51, 'P', 108, 89, 111, 'P', 184, 276, 228, 'P', 512, 512, 512, 'P'],
-'VGG-16': [64, 64, 'P', 128, 128, 'P', 256, 256, 256, 'P', 512, 512, 512, 'P', 512, 512, 512, 'P'],
-'VGG-19': [64, 64, 'P', 128, 128, 'P', 256, 256, 256, 256, 'P', 512, 512, 512, 512, 'P', 512, 512, 512, 512, 'P'],
-}
-
-nin_dict = {
-'C': ['conv1', 'cccp1', 'cccp2', 'conv2', 'cccp3', 'cccp4', 'conv3', 'cccp5', 'cccp6', 'conv4-1024', 'cccp7-1024', 'cccp8-1024'],
-'R': ['relu0', 'relu1', 'relu2', 'relu3', 'relu5', 'relu6', 'relu7', 'relu8', 'relu9', 'relu10', 'relu11', 'relu12'],
-'P': ['pool1', 'pool2', 'pool3', 'pool4'],
-'D': ['drop'],
-}
-vgg16_dict = {
-'C': ['conv1_1', 'conv1_2', 'conv2_1', 'conv2_2', 'conv3_1', 'conv3_2', 'conv3_3', 'conv4_1', 'conv4_2', 'conv4_3', 'conv5_1', 'conv5_2', 'conv5_3'],
-'R': ['relu1_1', 'relu1_2', 'relu2_1', 'relu2_2', 'relu3_1', 'relu3_2', 'relu3_3', 'relu4_1', 'relu4_2', 'relu4_3', 'relu5_1', 'relu5_2', 'relu5_3'],
-'P': ['pool1', 'pool2', 'pool3', 'pool4', 'pool5'],
-}
-vgg19_dict = {
-'C': ['conv1_1', 'conv1_2', 'conv2_1', 'conv2_2', 'conv3_1', 'conv3_2', 'conv3_3', 'conv3_4', 'conv4_1', 'conv4_2', 'conv4_3', 'conv4_4', 'conv5_1', 'conv5_2', 'conv5_3', 'conv5_4'],
-'R': ['relu1_1', 'relu1_2', 'relu2_1', 'relu2_2', 'relu3_1', 'relu3_2', 'relu3_3', 'relu3_4', 'relu4_1', 'relu4_2', 'relu4_3', 'relu4_4', 'relu5_1', 'relu5_2', 'relu5_3', 'relu5_4'],
-'P': ['pool1', 'pool2', 'pool3', 'pool4', 'pool5'],
-}
-
-
-def modelSelector(model_file, pooling):
- vgg_list = ["fcn32s", "pruning", "sod", "vgg"]
- if any(name in model_file for name in vgg_list):
- if "pruning" in model_file:
- print("VGG-16 Architecture Detected")
- print("Using The Channel Pruning Model")
- cnn, layerList = VGG_PRUNED(buildSequential(channel_list['VGG-16p'], pooling)), vgg16_dict
- elif "fcn32s" in model_file:
- print("VGG-16 Architecture Detected")
- print("Using the fcn32s-heavy-pascal Model")
- cnn, layerList = VGG_FCN32S(buildSequential(channel_list['VGG-16'], pooling)), vgg16_dict
- elif "sod" in model_file:
- print("VGG-16 Architecture Detected")
- print("Using The SOD Fintune Model")
- cnn, layerList = VGG_SOD(buildSequential(channel_list['VGG-16'], pooling)), vgg16_dict
- elif "19" in model_file:
- print("VGG-19 Architecture Detected")
- cnn, layerList = VGG(buildSequential(channel_list['VGG-19'], pooling)), vgg19_dict
- elif "16" in model_file:
- print("VGG-16 Architecture Detected")
- cnn, layerList = VGG(buildSequential(channel_list['VGG-16'], pooling)), vgg16_dict
- else:
- raise ValueError("VGG architecture not recognized.")
- elif "nin" in model_file:
- print("NIN Architecture Detected")
- cnn, layerList = NIN(pooling), nin_dict
- else:
- raise ValueError("Model architecture not recognized.")
- return cnn, layerList
-
-
-# Print like Torch7/loadcaffe
-def print_loadcaffe(cnn, layerList):
- c = 0
- for l in list(cnn):
- if "Conv2d" in str(l):
- in_c, out_c, ks = str(l.in_channels), str(l.out_channels), str(l.kernel_size)
- print(layerList['C'][c] +": " + (out_c + " " + in_c + " " + ks).replace(")",'').replace("(",'').replace(",",'') )
- c+=1
- if c == len(layerList['C']):
- break
-
-
-# Load the model, and configure pooling layer type
-def loadCaffemodel(model_file, pooling, use_gpu, disable_check):
- cnn, layerList = modelSelector(str(model_file).lower(), pooling)
-
- cnn.load_state_dict(torch.load(model_file), strict=(not disable_check))
- print("Successfully loaded " + str(model_file))
-
- # Maybe convert the model to cuda now, to avoid later issues
- if "c" not in str(use_gpu).lower() or "c" not in str(use_gpu[0]).lower():
- cnn = cnn.cuda()
- cnn = cnn.features
-
- print_loadcaffe(cnn, layerList)
-
- return cnn, layerList
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/mousewheeltoupdown-plugin.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/mousewheeltoupdown-plugin.js
deleted file mode 100644
index 18f250b6b4248c631375bfe01b20d1009a3a99cb..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/mousewheeltoupdown-plugin.js
+++ /dev/null
@@ -1,20 +0,0 @@
-import MouseWheelToUpDown from './mousewheeltoupdown.js';
-
-class MouseWheelToUpDownPlugin extends Phaser.Plugins.BasePlugin {
-
- constructor(pluginManager) {
- super(pluginManager);
- }
-
- start() {
- var eventEmitter = this.game.events;
- eventEmitter.on('destroy', this.destroy, this);
- }
-
- add(scene, config) {
- return new MouseWheelToUpDown(scene, config);
- }
-
-}
-
-export default MouseWheelToUpDownPlugin;
\ No newline at end of file
diff --git a/spaces/AlirezaSM/bear_classifier/app.py b/spaces/AlirezaSM/bear_classifier/app.py
deleted file mode 100644
index b1251c65ba8bc20a57d2e4182a41b81fe5cefb8e..0000000000000000000000000000000000000000
--- a/spaces/AlirezaSM/bear_classifier/app.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from fastai.vision.all import *
-import gradio as gr
-
-learn = load_learner('export.pkl')
-
-categories = ('Black', 'Grizzly', 'Teddy')
-
-def classify_image(img):
- pred, idx, probs = learn.predict(img)
- return dict(zip(categories, map(float, probs)))
-
-image = gr.inputs.Image(shape=(192, 192))
-label = gr.outputs.Label()
-examples = ['black.jpg', 'grizzly.jpg', 'teddy.jpg']
-
-intf = gr.Interface(fn=classify_image, inputs=image, outputs=label, examples=examples)
-intf.launch(inline=False)
\ No newline at end of file
diff --git a/spaces/Alycer/VITS-Umamusume-voice-synthesizer/attentions.py b/spaces/Alycer/VITS-Umamusume-voice-synthesizer/attentions.py
deleted file mode 100644
index 86bc73b5fe98cc7b443e9078553920346c996707..0000000000000000000000000000000000000000
--- a/spaces/Alycer/VITS-Umamusume-voice-synthesizer/attentions.py
+++ /dev/null
@@ -1,300 +0,0 @@
-import math
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-import commons
-from modules import LayerNorm
-
-
-class Encoder(nn.Module):
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.window_size = window_size
-
- self.drop = nn.Dropout(p_dropout)
- self.attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask):
- attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.attn_layers[i](x, x, attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class Decoder(nn.Module):
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
-
- self.drop = nn.Dropout(p_dropout)
- self.self_attn_layers = nn.ModuleList()
- self.norm_layers_0 = nn.ModuleList()
- self.encdec_attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
- self.norm_layers_0.append(LayerNorm(hidden_channels))
- self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask, h, h_mask):
- """
- x: decoder input
- h: encoder output
- """
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
- encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.self_attn_layers[i](x, x, self_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_0[i](x + y)
-
- y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class MultiHeadAttention(nn.Module):
- def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
- super().__init__()
- assert channels % n_heads == 0
-
- self.channels = channels
- self.out_channels = out_channels
- self.n_heads = n_heads
- self.p_dropout = p_dropout
- self.window_size = window_size
- self.heads_share = heads_share
- self.block_length = block_length
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
- self.attn = None
-
- self.k_channels = channels // n_heads
- self.conv_q = nn.Conv1d(channels, channels, 1)
- self.conv_k = nn.Conv1d(channels, channels, 1)
- self.conv_v = nn.Conv1d(channels, channels, 1)
- self.conv_o = nn.Conv1d(channels, out_channels, 1)
- self.drop = nn.Dropout(p_dropout)
-
- if window_size is not None:
- n_heads_rel = 1 if heads_share else n_heads
- rel_stddev = self.k_channels**-0.5
- self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
- self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
-
- nn.init.xavier_uniform_(self.conv_q.weight)
- nn.init.xavier_uniform_(self.conv_k.weight)
- nn.init.xavier_uniform_(self.conv_v.weight)
- if proximal_init:
- with torch.no_grad():
- self.conv_k.weight.copy_(self.conv_q.weight)
- self.conv_k.bias.copy_(self.conv_q.bias)
-
- def forward(self, x, c, attn_mask=None):
- q = self.conv_q(x)
- k = self.conv_k(c)
- v = self.conv_v(c)
-
- x, self.attn = self.attention(q, k, v, mask=attn_mask)
-
- x = self.conv_o(x)
- return x
-
- def attention(self, query, key, value, mask=None):
- # reshape [b, d, t] -> [b, n_h, t, d_k]
- b, d, t_s, t_t = (*key.size(), query.size(2))
- query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
- key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
- value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
-
- scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
- if self.window_size is not None:
- assert t_s == t_t, "Relative attention is only available for self-attention."
- key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
- rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
- scores_local = self._relative_position_to_absolute_position(rel_logits)
- scores = scores + scores_local
- if self.proximal_bias:
- assert t_s == t_t, "Proximal bias is only available for self-attention."
- scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
- if mask is not None:
- scores = scores.masked_fill(mask == 0, -1e4)
- if self.block_length is not None:
- assert t_s == t_t, "Local attention is only available for self-attention."
- block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
- scores = scores.masked_fill(block_mask == 0, -1e4)
- p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
- p_attn = self.drop(p_attn)
- output = torch.matmul(p_attn, value)
- if self.window_size is not None:
- relative_weights = self._absolute_position_to_relative_position(p_attn)
- value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
- output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
- output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
- return output, p_attn
-
- def _matmul_with_relative_values(self, x, y):
- """
- x: [b, h, l, m]
- y: [h or 1, m, d]
- ret: [b, h, l, d]
- """
- ret = torch.matmul(x, y.unsqueeze(0))
- return ret
-
- def _matmul_with_relative_keys(self, x, y):
- """
- x: [b, h, l, d]
- y: [h or 1, m, d]
- ret: [b, h, l, m]
- """
- ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
- return ret
-
- def _get_relative_embeddings(self, relative_embeddings, length):
- max_relative_position = 2 * self.window_size + 1
- # Pad first before slice to avoid using cond ops.
- pad_length = max(length - (self.window_size + 1), 0)
- slice_start_position = max((self.window_size + 1) - length, 0)
- slice_end_position = slice_start_position + 2 * length - 1
- if pad_length > 0:
- padded_relative_embeddings = F.pad(
- relative_embeddings,
- commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
- else:
- padded_relative_embeddings = relative_embeddings
- used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
- return used_relative_embeddings
-
- def _relative_position_to_absolute_position(self, x):
- """
- x: [b, h, l, 2*l-1]
- ret: [b, h, l, l]
- """
- batch, heads, length, _ = x.size()
- # Concat columns of pad to shift from relative to absolute indexing.
- x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
-
- # Concat extra elements so to add up to shape (len+1, 2*len-1).
- x_flat = x.view([batch, heads, length * 2 * length])
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
-
- # Reshape and slice out the padded elements.
- x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
- return x_final
-
- def _absolute_position_to_relative_position(self, x):
- """
- x: [b, h, l, l]
- ret: [b, h, l, 2*l-1]
- """
- batch, heads, length, _ = x.size()
- # padd along column
- x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
- x_flat = x.view([batch, heads, length**2 + length*(length -1)])
- # add 0's in the beginning that will skew the elements after reshape
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
- x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
- return x_final
-
- def _attention_bias_proximal(self, length):
- """Bias for self-attention to encourage attention to close positions.
- Args:
- length: an integer scalar.
- Returns:
- a Tensor with shape [1, 1, length, length]
- """
- r = torch.arange(length, dtype=torch.float32)
- diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
- return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
-
-
-class FFN(nn.Module):
- def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.activation = activation
- self.causal = causal
-
- if causal:
- self.padding = self._causal_padding
- else:
- self.padding = self._same_padding
-
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
- self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
- self.drop = nn.Dropout(p_dropout)
-
- def forward(self, x, x_mask):
- x = self.conv_1(self.padding(x * x_mask))
- if self.activation == "gelu":
- x = x * torch.sigmoid(1.702 * x)
- else:
- x = torch.relu(x)
- x = self.drop(x)
- x = self.conv_2(self.padding(x * x_mask))
- return x * x_mask
-
- def _causal_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = self.kernel_size - 1
- pad_r = 0
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
-
- def _same_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = (self.kernel_size - 1) // 2
- pad_r = self.kernel_size // 2
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
diff --git a/spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/pti_models/e4e/encoders/psp_encoders.py b/spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/pti_models/e4e/encoders/psp_encoders.py
deleted file mode 100644
index 4bdfa8a5072770967f81ae1f8393b44368ffe42b..0000000000000000000000000000000000000000
--- a/spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/pti_models/e4e/encoders/psp_encoders.py
+++ /dev/null
@@ -1,208 +0,0 @@
-from enum import Enum
-import math
-import numpy as np
-import torch
-from torch import nn
-from torch.nn import Conv2d, BatchNorm2d, PReLU, Sequential, Module
-
-from pti.pti_models.e4e.encoders.helpers import get_blocks, bottleneck_IR, bottleneck_IR_SE, _upsample_add
-from pti.pti_models.e4e.stylegan2.model import EqualLinear
-
-
-class ProgressiveStage(Enum):
- WTraining = 0
- Delta1Training = 1
- Delta2Training = 2
- Delta3Training = 3
- Delta4Training = 4
- Delta5Training = 5
- Delta6Training = 6
- Delta7Training = 7
- Delta8Training = 8
- Delta9Training = 9
- Delta10Training = 10
- Delta11Training = 11
- Delta12Training = 12
- Delta13Training = 13
- Delta14Training = 14
- Delta15Training = 15
- Delta16Training = 16
- Delta17Training = 17
- Inference = 18
-
-
-class GradualStyleBlock(Module):
- def __init__(self, in_c, out_c, spatial):
- super(GradualStyleBlock, self).__init__()
- self.out_c = out_c
- self.spatial = spatial
- num_pools = int(np.log2(spatial))
- modules = []
- modules += [Conv2d(in_c, out_c, kernel_size=3, stride=2, padding=1),
- nn.LeakyReLU()]
- for i in range(num_pools - 1):
- modules += [
- Conv2d(out_c, out_c, kernel_size=3, stride=2, padding=1),
- nn.LeakyReLU()
- ]
- self.convs = nn.Sequential(*modules)
- self.linear = EqualLinear(out_c, out_c, lr_mul=1)
-
- def forward(self, x):
- x = self.convs(x)
- x = x.view(-1, self.out_c)
- x = self.linear(x)
- return x
-
-
-class GradualStyleEncoder(Module):
- def __init__(self, num_layers, mode='ir', opts=None):
- super(GradualStyleEncoder, self).__init__()
- assert num_layers in [
- 50, 100, 152], 'num_layers should be 50,100, or 152'
- assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
- blocks = get_blocks(num_layers)
- if mode == 'ir':
- unit_module = bottleneck_IR
- elif mode == 'ir_se':
- unit_module = bottleneck_IR_SE
- self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
- BatchNorm2d(64),
- PReLU(64))
- modules = []
- for block in blocks:
- for bottleneck in block:
- modules.append(unit_module(bottleneck.in_channel,
- bottleneck.depth,
- bottleneck.stride))
- self.body = Sequential(*modules)
-
- self.styles = nn.ModuleList()
- log_size = int(math.log(opts.stylegan_size, 2))
- self.style_count = 2 * log_size - 2
- self.coarse_ind = 3
- self.middle_ind = 7
- for i in range(self.style_count):
- if i < self.coarse_ind:
- style = GradualStyleBlock(512, 512, 16)
- elif i < self.middle_ind:
- style = GradualStyleBlock(512, 512, 32)
- else:
- style = GradualStyleBlock(512, 512, 64)
- self.styles.append(style)
- self.latlayer1 = nn.Conv2d(
- 256, 512, kernel_size=1, stride=1, padding=0)
- self.latlayer2 = nn.Conv2d(
- 128, 512, kernel_size=1, stride=1, padding=0)
-
- def forward(self, x):
- x = self.input_layer(x)
-
- latents = []
- modulelist = list(self.body._modules.values())
- for i, l in enumerate(modulelist):
- x = l(x)
- if i == 6:
- c1 = x
- elif i == 20:
- c2 = x
- elif i == 23:
- c3 = x
-
- for j in range(self.coarse_ind):
- latents.append(self.styles[j](c3))
-
- p2 = _upsample_add(c3, self.latlayer1(c2))
- for j in range(self.coarse_ind, self.middle_ind):
- latents.append(self.styles[j](p2))
-
- p1 = _upsample_add(p2, self.latlayer2(c1))
- for j in range(self.middle_ind, self.style_count):
- latents.append(self.styles[j](p1))
-
- out = torch.stack(latents, dim=1)
- return out
-
-
-class Encoder4Editing(Module):
- def __init__(self, num_layers, mode='ir', opts=None):
- super(Encoder4Editing, self).__init__()
- assert num_layers in [
- 50, 100, 152], 'num_layers should be 50,100, or 152'
- assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
- blocks = get_blocks(num_layers)
- if mode == 'ir':
- unit_module = bottleneck_IR
- elif mode == 'ir_se':
- unit_module = bottleneck_IR_SE
- self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
- BatchNorm2d(64),
- PReLU(64))
- modules = []
- for block in blocks:
- for bottleneck in block:
- modules.append(unit_module(bottleneck.in_channel,
- bottleneck.depth,
- bottleneck.stride))
- self.body = Sequential(*modules)
-
- self.styles = nn.ModuleList()
- log_size = int(math.log(opts.stylegan_size, 2))
- self.style_count = 2 * log_size - 2
- self.coarse_ind = 3
- self.middle_ind = 7
-
- for i in range(self.style_count):
- if i < self.coarse_ind:
- style = GradualStyleBlock(512, 512, 16)
- elif i < self.middle_ind:
- style = GradualStyleBlock(512, 512, 32)
- else:
- style = GradualStyleBlock(512, 512, 64)
- self.styles.append(style)
-
- self.latlayer1 = nn.Conv2d(
- 256, 512, kernel_size=1, stride=1, padding=0)
- self.latlayer2 = nn.Conv2d(
- 128, 512, kernel_size=1, stride=1, padding=0)
-
- self.progressive_stage = ProgressiveStage.Inference
-
- def get_deltas_starting_dimensions(self):
- ''' Get a list of the initial dimension of every delta from which it is applied '''
- return list(range(self.style_count)) # Each dimension has a delta applied to it
-
- def set_progressive_stage(self, new_stage: ProgressiveStage):
- self.progressive_stage = new_stage
- print('Changed progressive stage to: ', new_stage)
-
- def forward(self, x):
- x = self.input_layer(x)
-
- modulelist = list(self.body._modules.values())
- for i, l in enumerate(modulelist):
- x = l(x)
- if i == 6:
- c1 = x
- elif i == 20:
- c2 = x
- elif i == 23:
- c3 = x
-
- # Infer main W and duplicate it
- w0 = self.styles[0](c3)
- w = w0.repeat(self.style_count, 1, 1).permute(1, 0, 2)
- stage = self.progressive_stage.value
- features = c3
- for i in range(1, min(stage + 1, self.style_count)): # Infer additional deltas
- if i == self.coarse_ind:
- # FPN's middle features
- p2 = _upsample_add(c3, self.latlayer1(c2))
- features = p2
- elif i == self.middle_ind:
- # FPN's fine features
- p1 = _upsample_add(p2, self.latlayer2(c1))
- features = p1
- delta_i = self.styles[i](features)
- w[:, i] += delta_i
- return w
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/clip_guided_stable_diffusion_img2img.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/clip_guided_stable_diffusion_img2img.py
deleted file mode 100644
index a72a5a127c72785806d4bfe194fb990d4740060e..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/clip_guided_stable_diffusion_img2img.py
+++ /dev/null
@@ -1,496 +0,0 @@
-import inspect
-from typing import List, Optional, Union
-
-import numpy as np
-import PIL
-import torch
-from torch import nn
-from torch.nn import functional as F
-from torchvision import transforms
-from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
-
-from diffusers import (
- AutoencoderKL,
- DDIMScheduler,
- DiffusionPipeline,
- DPMSolverMultistepScheduler,
- LMSDiscreteScheduler,
- PNDMScheduler,
- UNet2DConditionModel,
-)
-from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
-from diffusers.utils import (
- PIL_INTERPOLATION,
- deprecate,
- randn_tensor,
-)
-
-
-EXAMPLE_DOC_STRING = """
- Examples:
- ```
- from io import BytesIO
-
- import requests
- import torch
- from diffusers import DiffusionPipeline
- from PIL import Image
- from transformers import CLIPFeatureExtractor, CLIPModel
-
- feature_extractor = CLIPFeatureExtractor.from_pretrained(
- "laion/CLIP-ViT-B-32-laion2B-s34B-b79K"
- )
- clip_model = CLIPModel.from_pretrained(
- "laion/CLIP-ViT-B-32-laion2B-s34B-b79K", torch_dtype=torch.float16
- )
-
-
- guided_pipeline = DiffusionPipeline.from_pretrained(
- "CompVis/stable-diffusion-v1-4",
- # custom_pipeline="clip_guided_stable_diffusion",
- custom_pipeline="/home/njindal/diffusers/examples/community/clip_guided_stable_diffusion.py",
- clip_model=clip_model,
- feature_extractor=feature_extractor,
- torch_dtype=torch.float16,
- )
- guided_pipeline.enable_attention_slicing()
- guided_pipeline = guided_pipeline.to("cuda")
-
- prompt = "fantasy book cover, full moon, fantasy forest landscape, golden vector elements, fantasy magic, dark light night, intricate, elegant, sharp focus, illustration, highly detailed, digital painting, concept art, matte, art by WLOP and Artgerm and Albert Bierstadt, masterpiece"
-
- url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
-
- response = requests.get(url)
- init_image = Image.open(BytesIO(response.content)).convert("RGB")
-
- image = guided_pipeline(
- prompt=prompt,
- num_inference_steps=30,
- image=init_image,
- strength=0.75,
- guidance_scale=7.5,
- clip_guidance_scale=100,
- num_cutouts=4,
- use_cutouts=False,
- ).images[0]
- display(image)
- ```
-"""
-
-
-def preprocess(image, w, h):
- if isinstance(image, torch.Tensor):
- return image
- elif isinstance(image, PIL.Image.Image):
- image = [image]
-
- if isinstance(image[0], PIL.Image.Image):
- image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
- image = np.concatenate(image, axis=0)
- image = np.array(image).astype(np.float32) / 255.0
- image = image.transpose(0, 3, 1, 2)
- image = 2.0 * image - 1.0
- image = torch.from_numpy(image)
- elif isinstance(image[0], torch.Tensor):
- image = torch.cat(image, dim=0)
- return image
-
-
-class MakeCutouts(nn.Module):
- def __init__(self, cut_size, cut_power=1.0):
- super().__init__()
-
- self.cut_size = cut_size
- self.cut_power = cut_power
-
- def forward(self, pixel_values, num_cutouts):
- sideY, sideX = pixel_values.shape[2:4]
- max_size = min(sideX, sideY)
- min_size = min(sideX, sideY, self.cut_size)
- cutouts = []
- for _ in range(num_cutouts):
- size = int(torch.rand([]) ** self.cut_power * (max_size - min_size) + min_size)
- offsetx = torch.randint(0, sideX - size + 1, ())
- offsety = torch.randint(0, sideY - size + 1, ())
- cutout = pixel_values[:, :, offsety : offsety + size, offsetx : offsetx + size]
- cutouts.append(F.adaptive_avg_pool2d(cutout, self.cut_size))
- return torch.cat(cutouts)
-
-
-def spherical_dist_loss(x, y):
- x = F.normalize(x, dim=-1)
- y = F.normalize(y, dim=-1)
- return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
-
-
-def set_requires_grad(model, value):
- for param in model.parameters():
- param.requires_grad = value
-
-
-class CLIPGuidedStableDiffusion(DiffusionPipeline):
- """CLIP guided stable diffusion based on the amazing repo by @crowsonkb and @Jack000
- - https://github.com/Jack000/glid-3-xl
- - https://github.dev/crowsonkb/k-diffusion
- """
-
- def __init__(
- self,
- vae: AutoencoderKL,
- text_encoder: CLIPTextModel,
- clip_model: CLIPModel,
- tokenizer: CLIPTokenizer,
- unet: UNet2DConditionModel,
- scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler],
- feature_extractor: CLIPFeatureExtractor,
- ):
- super().__init__()
- self.register_modules(
- vae=vae,
- text_encoder=text_encoder,
- clip_model=clip_model,
- tokenizer=tokenizer,
- unet=unet,
- scheduler=scheduler,
- feature_extractor=feature_extractor,
- )
-
- self.normalize = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
- self.cut_out_size = (
- feature_extractor.size
- if isinstance(feature_extractor.size, int)
- else feature_extractor.size["shortest_edge"]
- )
- self.make_cutouts = MakeCutouts(self.cut_out_size)
-
- set_requires_grad(self.text_encoder, False)
- set_requires_grad(self.clip_model, False)
-
- def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
- if slice_size == "auto":
- # half the attention head size is usually a good trade-off between
- # speed and memory
- slice_size = self.unet.config.attention_head_dim // 2
- self.unet.set_attention_slice(slice_size)
-
- def disable_attention_slicing(self):
- self.enable_attention_slicing(None)
-
- def freeze_vae(self):
- set_requires_grad(self.vae, False)
-
- def unfreeze_vae(self):
- set_requires_grad(self.vae, True)
-
- def freeze_unet(self):
- set_requires_grad(self.unet, False)
-
- def unfreeze_unet(self):
- set_requires_grad(self.unet, True)
-
- def get_timesteps(self, num_inference_steps, strength, device):
- # get the original timestep using init_timestep
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
-
- t_start = max(num_inference_steps - init_timestep, 0)
- timesteps = self.scheduler.timesteps[t_start:]
-
- return timesteps, num_inference_steps - t_start
-
- def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
- if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
- raise ValueError(
- f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
- )
-
- image = image.to(device=device, dtype=dtype)
-
- batch_size = batch_size * num_images_per_prompt
- if isinstance(generator, list) and len(generator) != batch_size:
- raise ValueError(
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
- )
-
- if isinstance(generator, list):
- init_latents = [
- self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
- ]
- init_latents = torch.cat(init_latents, dim=0)
- else:
- init_latents = self.vae.encode(image).latent_dist.sample(generator)
-
- init_latents = self.vae.config.scaling_factor * init_latents
-
- if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
- # expand init_latents for batch_size
- deprecation_message = (
- f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
- " images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
- " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
- " your script to pass as many initial images as text prompts to suppress this warning."
- )
- deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
- additional_image_per_prompt = batch_size // init_latents.shape[0]
- init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
- elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
- raise ValueError(
- f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
- )
- else:
- init_latents = torch.cat([init_latents], dim=0)
-
- shape = init_latents.shape
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
-
- # get latents
- init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
- latents = init_latents
-
- return latents
-
- @torch.enable_grad()
- def cond_fn(
- self,
- latents,
- timestep,
- index,
- text_embeddings,
- noise_pred_original,
- text_embeddings_clip,
- clip_guidance_scale,
- num_cutouts,
- use_cutouts=True,
- ):
- latents = latents.detach().requires_grad_()
-
- latent_model_input = self.scheduler.scale_model_input(latents, timestep)
-
- # predict the noise residual
- noise_pred = self.unet(latent_model_input, timestep, encoder_hidden_states=text_embeddings).sample
-
- if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
- alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
- beta_prod_t = 1 - alpha_prod_t
- # compute predicted original sample from predicted noise also called
- # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
- pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
-
- fac = torch.sqrt(beta_prod_t)
- sample = pred_original_sample * (fac) + latents * (1 - fac)
- elif isinstance(self.scheduler, LMSDiscreteScheduler):
- sigma = self.scheduler.sigmas[index]
- sample = latents - sigma * noise_pred
- else:
- raise ValueError(f"scheduler type {type(self.scheduler)} not supported")
-
- sample = 1 / self.vae.config.scaling_factor * sample
- image = self.vae.decode(sample).sample
- image = (image / 2 + 0.5).clamp(0, 1)
-
- if use_cutouts:
- image = self.make_cutouts(image, num_cutouts)
- else:
- image = transforms.Resize(self.cut_out_size)(image)
- image = self.normalize(image).to(latents.dtype)
-
- image_embeddings_clip = self.clip_model.get_image_features(image)
- image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
-
- if use_cutouts:
- dists = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip)
- dists = dists.view([num_cutouts, sample.shape[0], -1])
- loss = dists.sum(2).mean(0).sum() * clip_guidance_scale
- else:
- loss = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip).mean() * clip_guidance_scale
-
- grads = -torch.autograd.grad(loss, latents)[0]
-
- if isinstance(self.scheduler, LMSDiscreteScheduler):
- latents = latents.detach() + grads * (sigma**2)
- noise_pred = noise_pred_original
- else:
- noise_pred = noise_pred_original - torch.sqrt(beta_prod_t) * grads
- return noise_pred, latents
-
- @torch.no_grad()
- def __call__(
- self,
- prompt: Union[str, List[str]],
- height: Optional[int] = 512,
- width: Optional[int] = 512,
- image: Union[torch.FloatTensor, PIL.Image.Image] = None,
- strength: float = 0.8,
- num_inference_steps: Optional[int] = 50,
- guidance_scale: Optional[float] = 7.5,
- num_images_per_prompt: Optional[int] = 1,
- eta: float = 0.0,
- clip_guidance_scale: Optional[float] = 100,
- clip_prompt: Optional[Union[str, List[str]]] = None,
- num_cutouts: Optional[int] = 4,
- use_cutouts: Optional[bool] = True,
- generator: Optional[torch.Generator] = None,
- latents: Optional[torch.FloatTensor] = None,
- output_type: Optional[str] = "pil",
- return_dict: bool = True,
- ):
- if isinstance(prompt, str):
- batch_size = 1
- elif isinstance(prompt, list):
- batch_size = len(prompt)
- else:
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
-
- if height % 8 != 0 or width % 8 != 0:
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
-
- # get prompt text embeddings
- text_input = self.tokenizer(
- prompt,
- padding="max_length",
- max_length=self.tokenizer.model_max_length,
- truncation=True,
- return_tensors="pt",
- )
- text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]
- # duplicate text embeddings for each generation per prompt
- text_embeddings = text_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
-
- # set timesteps
- accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
- extra_set_kwargs = {}
- if accepts_offset:
- extra_set_kwargs["offset"] = 1
-
- self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
- # Some schedulers like PNDM have timesteps as arrays
- # It's more optimized to move all timesteps to correct device beforehand
- self.scheduler.timesteps.to(self.device)
-
- timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, self.device)
- latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
-
- # Preprocess image
- image = preprocess(image, width, height)
- latents = self.prepare_latents(
- image, latent_timestep, batch_size, num_images_per_prompt, text_embeddings.dtype, self.device, generator
- )
-
- if clip_guidance_scale > 0:
- if clip_prompt is not None:
- clip_text_input = self.tokenizer(
- clip_prompt,
- padding="max_length",
- max_length=self.tokenizer.model_max_length,
- truncation=True,
- return_tensors="pt",
- ).input_ids.to(self.device)
- else:
- clip_text_input = text_input.input_ids.to(self.device)
- text_embeddings_clip = self.clip_model.get_text_features(clip_text_input)
- text_embeddings_clip = text_embeddings_clip / text_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
- # duplicate text embeddings clip for each generation per prompt
- text_embeddings_clip = text_embeddings_clip.repeat_interleave(num_images_per_prompt, dim=0)
-
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
- # corresponds to doing no classifier free guidance.
- do_classifier_free_guidance = guidance_scale > 1.0
- # get unconditional embeddings for classifier free guidance
- if do_classifier_free_guidance:
- max_length = text_input.input_ids.shape[-1]
- uncond_input = self.tokenizer([""], padding="max_length", max_length=max_length, return_tensors="pt")
- uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
- # duplicate unconditional embeddings for each generation per prompt
- uncond_embeddings = uncond_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
-
- # For classifier free guidance, we need to do two forward passes.
- # Here we concatenate the unconditional and text embeddings into a single batch
- # to avoid doing two forward passes
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
-
- # get the initial random noise unless the user supplied it
-
- # Unlike in other pipelines, latents need to be generated in the target device
- # for 1-to-1 results reproducibility with the CompVis implementation.
- # However this currently doesn't work in `mps`.
- latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
- latents_dtype = text_embeddings.dtype
- if latents is None:
- if self.device.type == "mps":
- # randn does not work reproducibly on mps
- latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
- self.device
- )
- else:
- latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
- else:
- if latents.shape != latents_shape:
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
- latents = latents.to(self.device)
-
- # scale the initial noise by the standard deviation required by the scheduler
- latents = latents * self.scheduler.init_noise_sigma
-
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
- # and should be between [0, 1]
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
- extra_step_kwargs = {}
- if accepts_eta:
- extra_step_kwargs["eta"] = eta
-
- # check if the scheduler accepts generator
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
- if accepts_generator:
- extra_step_kwargs["generator"] = generator
-
- with self.progress_bar(total=num_inference_steps):
- for i, t in enumerate(timesteps):
- # expand the latents if we are doing classifier free guidance
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
-
- # predict the noise residual
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
-
- # perform classifier free guidance
- if do_classifier_free_guidance:
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
-
- # perform clip guidance
- if clip_guidance_scale > 0:
- text_embeddings_for_guidance = (
- text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
- )
- noise_pred, latents = self.cond_fn(
- latents,
- t,
- i,
- text_embeddings_for_guidance,
- noise_pred,
- text_embeddings_clip,
- clip_guidance_scale,
- num_cutouts,
- use_cutouts,
- )
-
- # compute the previous noisy sample x_t -> x_t-1
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
-
- # scale and decode the image latents with vae
- latents = 1 / self.vae.config.scaling_factor * latents
- image = self.vae.decode(latents).sample
-
- image = (image / 2 + 0.5).clamp(0, 1)
- image = image.cpu().permute(0, 2, 3, 1).numpy()
-
- if output_type == "pil":
- image = self.numpy_to_pil(image)
-
- if not return_dict:
- return (image, None)
-
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_original_t2i_adapter.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_original_t2i_adapter.py
deleted file mode 100644
index 01a1fecf4e4b4a458cd1d866786cc7c975ed8ad2..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_original_t2i_adapter.py
+++ /dev/null
@@ -1,250 +0,0 @@
-# coding=utf-8
-# Copyright 2023 The HuggingFace Inc. team.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-Conversion script for the T2I-Adapter checkpoints.
-"""
-
-import argparse
-
-import torch
-
-from diffusers import T2IAdapter
-
-
-def convert_adapter(src_state, in_channels):
- original_body_length = max([int(x.split(".")[1]) for x in src_state.keys() if "body." in x]) + 1
-
- assert original_body_length == 8
-
- # (0, 1) -> channels 1
- assert src_state["body.0.block1.weight"].shape == (320, 320, 3, 3)
-
- # (2, 3) -> channels 2
- assert src_state["body.2.in_conv.weight"].shape == (640, 320, 1, 1)
-
- # (4, 5) -> channels 3
- assert src_state["body.4.in_conv.weight"].shape == (1280, 640, 1, 1)
-
- # (6, 7) -> channels 4
- assert src_state["body.6.block1.weight"].shape == (1280, 1280, 3, 3)
-
- res_state = {
- "adapter.conv_in.weight": src_state.pop("conv_in.weight"),
- "adapter.conv_in.bias": src_state.pop("conv_in.bias"),
- # 0.resnets.0
- "adapter.body.0.resnets.0.block1.weight": src_state.pop("body.0.block1.weight"),
- "adapter.body.0.resnets.0.block1.bias": src_state.pop("body.0.block1.bias"),
- "adapter.body.0.resnets.0.block2.weight": src_state.pop("body.0.block2.weight"),
- "adapter.body.0.resnets.0.block2.bias": src_state.pop("body.0.block2.bias"),
- # 0.resnets.1
- "adapter.body.0.resnets.1.block1.weight": src_state.pop("body.1.block1.weight"),
- "adapter.body.0.resnets.1.block1.bias": src_state.pop("body.1.block1.bias"),
- "adapter.body.0.resnets.1.block2.weight": src_state.pop("body.1.block2.weight"),
- "adapter.body.0.resnets.1.block2.bias": src_state.pop("body.1.block2.bias"),
- # 1
- "adapter.body.1.in_conv.weight": src_state.pop("body.2.in_conv.weight"),
- "adapter.body.1.in_conv.bias": src_state.pop("body.2.in_conv.bias"),
- # 1.resnets.0
- "adapter.body.1.resnets.0.block1.weight": src_state.pop("body.2.block1.weight"),
- "adapter.body.1.resnets.0.block1.bias": src_state.pop("body.2.block1.bias"),
- "adapter.body.1.resnets.0.block2.weight": src_state.pop("body.2.block2.weight"),
- "adapter.body.1.resnets.0.block2.bias": src_state.pop("body.2.block2.bias"),
- # 1.resnets.1
- "adapter.body.1.resnets.1.block1.weight": src_state.pop("body.3.block1.weight"),
- "adapter.body.1.resnets.1.block1.bias": src_state.pop("body.3.block1.bias"),
- "adapter.body.1.resnets.1.block2.weight": src_state.pop("body.3.block2.weight"),
- "adapter.body.1.resnets.1.block2.bias": src_state.pop("body.3.block2.bias"),
- # 2
- "adapter.body.2.in_conv.weight": src_state.pop("body.4.in_conv.weight"),
- "adapter.body.2.in_conv.bias": src_state.pop("body.4.in_conv.bias"),
- # 2.resnets.0
- "adapter.body.2.resnets.0.block1.weight": src_state.pop("body.4.block1.weight"),
- "adapter.body.2.resnets.0.block1.bias": src_state.pop("body.4.block1.bias"),
- "adapter.body.2.resnets.0.block2.weight": src_state.pop("body.4.block2.weight"),
- "adapter.body.2.resnets.0.block2.bias": src_state.pop("body.4.block2.bias"),
- # 2.resnets.1
- "adapter.body.2.resnets.1.block1.weight": src_state.pop("body.5.block1.weight"),
- "adapter.body.2.resnets.1.block1.bias": src_state.pop("body.5.block1.bias"),
- "adapter.body.2.resnets.1.block2.weight": src_state.pop("body.5.block2.weight"),
- "adapter.body.2.resnets.1.block2.bias": src_state.pop("body.5.block2.bias"),
- # 3.resnets.0
- "adapter.body.3.resnets.0.block1.weight": src_state.pop("body.6.block1.weight"),
- "adapter.body.3.resnets.0.block1.bias": src_state.pop("body.6.block1.bias"),
- "adapter.body.3.resnets.0.block2.weight": src_state.pop("body.6.block2.weight"),
- "adapter.body.3.resnets.0.block2.bias": src_state.pop("body.6.block2.bias"),
- # 3.resnets.1
- "adapter.body.3.resnets.1.block1.weight": src_state.pop("body.7.block1.weight"),
- "adapter.body.3.resnets.1.block1.bias": src_state.pop("body.7.block1.bias"),
- "adapter.body.3.resnets.1.block2.weight": src_state.pop("body.7.block2.weight"),
- "adapter.body.3.resnets.1.block2.bias": src_state.pop("body.7.block2.bias"),
- }
-
- assert len(src_state) == 0
-
- adapter = T2IAdapter(in_channels=in_channels, adapter_type="full_adapter")
-
- adapter.load_state_dict(res_state)
-
- return adapter
-
-
-def convert_light_adapter(src_state):
- original_body_length = max([int(x.split(".")[1]) for x in src_state.keys() if "body." in x]) + 1
-
- assert original_body_length == 4
-
- res_state = {
- # body.0.in_conv
- "adapter.body.0.in_conv.weight": src_state.pop("body.0.in_conv.weight"),
- "adapter.body.0.in_conv.bias": src_state.pop("body.0.in_conv.bias"),
- # body.0.resnets.0
- "adapter.body.0.resnets.0.block1.weight": src_state.pop("body.0.body.0.block1.weight"),
- "adapter.body.0.resnets.0.block1.bias": src_state.pop("body.0.body.0.block1.bias"),
- "adapter.body.0.resnets.0.block2.weight": src_state.pop("body.0.body.0.block2.weight"),
- "adapter.body.0.resnets.0.block2.bias": src_state.pop("body.0.body.0.block2.bias"),
- # body.0.resnets.1
- "adapter.body.0.resnets.1.block1.weight": src_state.pop("body.0.body.1.block1.weight"),
- "adapter.body.0.resnets.1.block1.bias": src_state.pop("body.0.body.1.block1.bias"),
- "adapter.body.0.resnets.1.block2.weight": src_state.pop("body.0.body.1.block2.weight"),
- "adapter.body.0.resnets.1.block2.bias": src_state.pop("body.0.body.1.block2.bias"),
- # body.0.resnets.2
- "adapter.body.0.resnets.2.block1.weight": src_state.pop("body.0.body.2.block1.weight"),
- "adapter.body.0.resnets.2.block1.bias": src_state.pop("body.0.body.2.block1.bias"),
- "adapter.body.0.resnets.2.block2.weight": src_state.pop("body.0.body.2.block2.weight"),
- "adapter.body.0.resnets.2.block2.bias": src_state.pop("body.0.body.2.block2.bias"),
- # body.0.resnets.3
- "adapter.body.0.resnets.3.block1.weight": src_state.pop("body.0.body.3.block1.weight"),
- "adapter.body.0.resnets.3.block1.bias": src_state.pop("body.0.body.3.block1.bias"),
- "adapter.body.0.resnets.3.block2.weight": src_state.pop("body.0.body.3.block2.weight"),
- "adapter.body.0.resnets.3.block2.bias": src_state.pop("body.0.body.3.block2.bias"),
- # body.0.out_conv
- "adapter.body.0.out_conv.weight": src_state.pop("body.0.out_conv.weight"),
- "adapter.body.0.out_conv.bias": src_state.pop("body.0.out_conv.bias"),
- # body.1.in_conv
- "adapter.body.1.in_conv.weight": src_state.pop("body.1.in_conv.weight"),
- "adapter.body.1.in_conv.bias": src_state.pop("body.1.in_conv.bias"),
- # body.1.resnets.0
- "adapter.body.1.resnets.0.block1.weight": src_state.pop("body.1.body.0.block1.weight"),
- "adapter.body.1.resnets.0.block1.bias": src_state.pop("body.1.body.0.block1.bias"),
- "adapter.body.1.resnets.0.block2.weight": src_state.pop("body.1.body.0.block2.weight"),
- "adapter.body.1.resnets.0.block2.bias": src_state.pop("body.1.body.0.block2.bias"),
- # body.1.resnets.1
- "adapter.body.1.resnets.1.block1.weight": src_state.pop("body.1.body.1.block1.weight"),
- "adapter.body.1.resnets.1.block1.bias": src_state.pop("body.1.body.1.block1.bias"),
- "adapter.body.1.resnets.1.block2.weight": src_state.pop("body.1.body.1.block2.weight"),
- "adapter.body.1.resnets.1.block2.bias": src_state.pop("body.1.body.1.block2.bias"),
- # body.1.body.2
- "adapter.body.1.resnets.2.block1.weight": src_state.pop("body.1.body.2.block1.weight"),
- "adapter.body.1.resnets.2.block1.bias": src_state.pop("body.1.body.2.block1.bias"),
- "adapter.body.1.resnets.2.block2.weight": src_state.pop("body.1.body.2.block2.weight"),
- "adapter.body.1.resnets.2.block2.bias": src_state.pop("body.1.body.2.block2.bias"),
- # body.1.body.3
- "adapter.body.1.resnets.3.block1.weight": src_state.pop("body.1.body.3.block1.weight"),
- "adapter.body.1.resnets.3.block1.bias": src_state.pop("body.1.body.3.block1.bias"),
- "adapter.body.1.resnets.3.block2.weight": src_state.pop("body.1.body.3.block2.weight"),
- "adapter.body.1.resnets.3.block2.bias": src_state.pop("body.1.body.3.block2.bias"),
- # body.1.out_conv
- "adapter.body.1.out_conv.weight": src_state.pop("body.1.out_conv.weight"),
- "adapter.body.1.out_conv.bias": src_state.pop("body.1.out_conv.bias"),
- # body.2.in_conv
- "adapter.body.2.in_conv.weight": src_state.pop("body.2.in_conv.weight"),
- "adapter.body.2.in_conv.bias": src_state.pop("body.2.in_conv.bias"),
- # body.2.body.0
- "adapter.body.2.resnets.0.block1.weight": src_state.pop("body.2.body.0.block1.weight"),
- "adapter.body.2.resnets.0.block1.bias": src_state.pop("body.2.body.0.block1.bias"),
- "adapter.body.2.resnets.0.block2.weight": src_state.pop("body.2.body.0.block2.weight"),
- "adapter.body.2.resnets.0.block2.bias": src_state.pop("body.2.body.0.block2.bias"),
- # body.2.body.1
- "adapter.body.2.resnets.1.block1.weight": src_state.pop("body.2.body.1.block1.weight"),
- "adapter.body.2.resnets.1.block1.bias": src_state.pop("body.2.body.1.block1.bias"),
- "adapter.body.2.resnets.1.block2.weight": src_state.pop("body.2.body.1.block2.weight"),
- "adapter.body.2.resnets.1.block2.bias": src_state.pop("body.2.body.1.block2.bias"),
- # body.2.body.2
- "adapter.body.2.resnets.2.block1.weight": src_state.pop("body.2.body.2.block1.weight"),
- "adapter.body.2.resnets.2.block1.bias": src_state.pop("body.2.body.2.block1.bias"),
- "adapter.body.2.resnets.2.block2.weight": src_state.pop("body.2.body.2.block2.weight"),
- "adapter.body.2.resnets.2.block2.bias": src_state.pop("body.2.body.2.block2.bias"),
- # body.2.body.3
- "adapter.body.2.resnets.3.block1.weight": src_state.pop("body.2.body.3.block1.weight"),
- "adapter.body.2.resnets.3.block1.bias": src_state.pop("body.2.body.3.block1.bias"),
- "adapter.body.2.resnets.3.block2.weight": src_state.pop("body.2.body.3.block2.weight"),
- "adapter.body.2.resnets.3.block2.bias": src_state.pop("body.2.body.3.block2.bias"),
- # body.2.out_conv
- "adapter.body.2.out_conv.weight": src_state.pop("body.2.out_conv.weight"),
- "adapter.body.2.out_conv.bias": src_state.pop("body.2.out_conv.bias"),
- # body.3.in_conv
- "adapter.body.3.in_conv.weight": src_state.pop("body.3.in_conv.weight"),
- "adapter.body.3.in_conv.bias": src_state.pop("body.3.in_conv.bias"),
- # body.3.body.0
- "adapter.body.3.resnets.0.block1.weight": src_state.pop("body.3.body.0.block1.weight"),
- "adapter.body.3.resnets.0.block1.bias": src_state.pop("body.3.body.0.block1.bias"),
- "adapter.body.3.resnets.0.block2.weight": src_state.pop("body.3.body.0.block2.weight"),
- "adapter.body.3.resnets.0.block2.bias": src_state.pop("body.3.body.0.block2.bias"),
- # body.3.body.1
- "adapter.body.3.resnets.1.block1.weight": src_state.pop("body.3.body.1.block1.weight"),
- "adapter.body.3.resnets.1.block1.bias": src_state.pop("body.3.body.1.block1.bias"),
- "adapter.body.3.resnets.1.block2.weight": src_state.pop("body.3.body.1.block2.weight"),
- "adapter.body.3.resnets.1.block2.bias": src_state.pop("body.3.body.1.block2.bias"),
- # body.3.body.2
- "adapter.body.3.resnets.2.block1.weight": src_state.pop("body.3.body.2.block1.weight"),
- "adapter.body.3.resnets.2.block1.bias": src_state.pop("body.3.body.2.block1.bias"),
- "adapter.body.3.resnets.2.block2.weight": src_state.pop("body.3.body.2.block2.weight"),
- "adapter.body.3.resnets.2.block2.bias": src_state.pop("body.3.body.2.block2.bias"),
- # body.3.body.3
- "adapter.body.3.resnets.3.block1.weight": src_state.pop("body.3.body.3.block1.weight"),
- "adapter.body.3.resnets.3.block1.bias": src_state.pop("body.3.body.3.block1.bias"),
- "adapter.body.3.resnets.3.block2.weight": src_state.pop("body.3.body.3.block2.weight"),
- "adapter.body.3.resnets.3.block2.bias": src_state.pop("body.3.body.3.block2.bias"),
- # body.3.out_conv
- "adapter.body.3.out_conv.weight": src_state.pop("body.3.out_conv.weight"),
- "adapter.body.3.out_conv.bias": src_state.pop("body.3.out_conv.bias"),
- }
-
- assert len(src_state) == 0
-
- adapter = T2IAdapter(in_channels=3, channels=[320, 640, 1280], num_res_blocks=4, adapter_type="light_adapter")
-
- adapter.load_state_dict(res_state)
-
- return adapter
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
-
- parser.add_argument(
- "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
- )
- parser.add_argument(
- "--output_path", default=None, type=str, required=True, help="Path to the store the result checkpoint."
- )
- parser.add_argument(
- "--is_adapter_light",
- action="store_true",
- help="Is checkpoint come from Adapter-Light architecture. ex: color-adapter",
- )
- parser.add_argument("--in_channels", required=False, type=int, help="Input channels for non-light adapter")
-
- args = parser.parse_args()
- src_state = torch.load(args.checkpoint_path)
-
- if args.is_adapter_light:
- adapter = convert_light_adapter(src_state)
- else:
- if args.in_channels is None:
- raise ValueError("set `--in_channels=`")
- adapter = convert_adapter(src_state, args.in_channels)
-
- adapter.save_pretrained(args.output_path)
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/dual_transformer_2d.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/dual_transformer_2d.py
deleted file mode 100644
index 3db7e73ca6afc5fa7c67c1902d79e67c1aa728bc..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/dual_transformer_2d.py
+++ /dev/null
@@ -1,151 +0,0 @@
-# Copyright 2023 The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from typing import Optional
-
-from torch import nn
-
-from .transformer_2d import Transformer2DModel, Transformer2DModelOutput
-
-
-class DualTransformer2DModel(nn.Module):
- """
- Dual transformer wrapper that combines two `Transformer2DModel`s for mixed inference.
-
- Parameters:
- num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.
- attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.
- in_channels (`int`, *optional*):
- Pass if the input is continuous. The number of channels in the input and output.
- num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.
- dropout (`float`, *optional*, defaults to 0.1): The dropout probability to use.
- cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use.
- sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images.
- Note that this is fixed at training time as it is used for learning a number of position embeddings. See
- `ImagePositionalEmbeddings`.
- num_vector_embeds (`int`, *optional*):
- Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels.
- Includes the class for the masked latent pixel.
- activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
- num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`.
- The number of diffusion steps used during training. Note that this is fixed at training time as it is used
- to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for
- up to but not more than steps than `num_embeds_ada_norm`.
- attention_bias (`bool`, *optional*):
- Configure if the TransformerBlocks' attention should contain a bias parameter.
- """
-
- def __init__(
- self,
- num_attention_heads: int = 16,
- attention_head_dim: int = 88,
- in_channels: Optional[int] = None,
- num_layers: int = 1,
- dropout: float = 0.0,
- norm_num_groups: int = 32,
- cross_attention_dim: Optional[int] = None,
- attention_bias: bool = False,
- sample_size: Optional[int] = None,
- num_vector_embeds: Optional[int] = None,
- activation_fn: str = "geglu",
- num_embeds_ada_norm: Optional[int] = None,
- ):
- super().__init__()
- self.transformers = nn.ModuleList(
- [
- Transformer2DModel(
- num_attention_heads=num_attention_heads,
- attention_head_dim=attention_head_dim,
- in_channels=in_channels,
- num_layers=num_layers,
- dropout=dropout,
- norm_num_groups=norm_num_groups,
- cross_attention_dim=cross_attention_dim,
- attention_bias=attention_bias,
- sample_size=sample_size,
- num_vector_embeds=num_vector_embeds,
- activation_fn=activation_fn,
- num_embeds_ada_norm=num_embeds_ada_norm,
- )
- for _ in range(2)
- ]
- )
-
- # Variables that can be set by a pipeline:
-
- # The ratio of transformer1 to transformer2's output states to be combined during inference
- self.mix_ratio = 0.5
-
- # The shape of `encoder_hidden_states` is expected to be
- # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
- self.condition_lengths = [77, 257]
-
- # Which transformer to use to encode which condition.
- # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
- self.transformer_index_for_condition = [1, 0]
-
- def forward(
- self,
- hidden_states,
- encoder_hidden_states,
- timestep=None,
- attention_mask=None,
- cross_attention_kwargs=None,
- return_dict: bool = True,
- ):
- """
- Args:
- hidden_states ( When discrete, `torch.LongTensor` of shape `(batch size, num latent pixels)`.
- When continuous, `torch.FloatTensor` of shape `(batch size, channel, height, width)`): Input
- hidden_states
- encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*):
- Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
- self-attention.
- timestep ( `torch.long`, *optional*):
- Optional timestep to be applied as an embedding in AdaLayerNorm's. Used to indicate denoising step.
- attention_mask (`torch.FloatTensor`, *optional*):
- Optional attention mask to be applied in Attention
- return_dict (`bool`, *optional*, defaults to `True`):
- Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.
-
- Returns:
- [`~models.transformer_2d.Transformer2DModelOutput`] or `tuple`:
- [`~models.transformer_2d.Transformer2DModelOutput`] if `return_dict` is True, otherwise a `tuple`. When
- returning a tuple, the first element is the sample tensor.
- """
- input_states = hidden_states
-
- encoded_states = []
- tokens_start = 0
- # attention_mask is not used yet
- for i in range(2):
- # for each of the two transformers, pass the corresponding condition tokens
- condition_state = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
- transformer_index = self.transformer_index_for_condition[i]
- encoded_state = self.transformers[transformer_index](
- input_states,
- encoder_hidden_states=condition_state,
- timestep=timestep,
- cross_attention_kwargs=cross_attention_kwargs,
- return_dict=False,
- )[0]
- encoded_states.append(encoded_state - input_states)
- tokens_start += self.condition_lengths[i]
-
- output_states = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
- output_states = output_states + input_states
-
- if not return_dict:
- return (output_states,)
-
- return Transformer2DModelOutput(sample=output_states)
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r50-d8_512x1024_80k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r50-d8_512x1024_80k_cityscapes.py
deleted file mode 100644
index d2feeef7e982550481365f8187cb1a50f0fafcc9..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r50-d8_512x1024_80k_cityscapes.py
+++ /dev/null
@@ -1,4 +0,0 @@
-_base_ = [
- '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/cityscapes.py',
- '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
-]
diff --git a/spaces/Anonumous/RuImageCaptioning/README.md b/spaces/Anonumous/RuImageCaptioning/README.md
deleted file mode 100644
index 313354e38369e9ea72d746d8c641ff15d788ec3a..0000000000000000000000000000000000000000
--- a/spaces/Anonumous/RuImageCaptioning/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: RuImageCaptionong
-emoji: 👁
-colorFrom: yellow
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.24.1
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/utils/change_place.py b/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/utils/change_place.py
deleted file mode 100644
index 121ff17830addba408f304f21ef091b2f8d51e19..0000000000000000000000000000000000000000
--- a/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/utils/change_place.py
+++ /dev/null
@@ -1,121 +0,0 @@
-#!/usr/bin/python
-#****************************************************************#
-# ScriptName: change_place.py
-# Author: Anonymous_123
-# Create Date: 2022-08-26 14:13
-# Modify Author: Anonymous_123
-# Modify Date: 2022-08-26 14:13
-# Function:
-#***************************************************************#
-
-import os
-import torch
-import torch.nn as nn
-from torchvision.transforms import functional as TF
-import cv2
-from PIL import Image
-import numpy as np
-import random
-# random.seed(0)
-import pdb
-import imutils
-from tqdm import tqdm
-
-def change_place(img, mask, bbox, invert_mask):
- '''
- img: N,C,H,W
- '''
- if invert_mask:
- mask = 1-mask
-
- device = img.device
- x,y,new_x,new_y,w,h = bbox
-
- img_ori = img.clone()
- mask_ori = mask.clone()
- img_ori = img_ori.to(device)
- mask_ori = mask_ori.to(device)
-
- img[:,:, new_y:new_y+h, new_x:new_x+w] = img_ori[:,:, y:y+h, x:x+w]
- mask_new = torch.zeros(mask.shape).to(device)
- mask_new[:,:, new_y:new_y+h, new_x:new_x+w] = mask_ori[:,:, y:y+h, x:x+w]
- mask_ = mask_new > 0.5
- img = img*mask_ + (~mask_)*img_ori
-
- if invert_mask:
- mask_new = 1 - mask_new
-
- return img, mask_new
-
-def find_bbox(mask):
- mask_copy = mask.copy()
-
- contours, _ = cv2.findContours(mask[:,:,0],cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
- bbox = []
- max_area = 0
- for cnt in contours:
- x, y, w, h = cv2.boundingRect(cnt)
- cv2.rectangle(mask_copy, (x, y), (x + w, y + h), (0, 255, 0), 2)
- if max_area < w*h:
- max_area = w*h
- bbox = [x,y,w,h]
-
- if bbox == []:
- return None
- else:
- H,W,C = mask.shape
- x,y,w,h = bbox
- new_x = random.randint(0, W-w)
- new_y = random.randint(0, H-h)
- return [x, y, new_x, new_y, w,h]
-
-
-if __name__ == '__main__':
- mask_path = 'n01440764/ILSVRC2012_val_00000293.png'
-
- ori_img_path_root = 'ImageNet-S/ImageNetS919/validation/'
- outpainting_root = 'TFill/results/imagenet_2/test_latest/img_ref_out/'
- padding_root = 'ImageNet-S/ImageNetS919/validation-size-0.05-padding-4901/'
- mask_root = 'ImageNet-S/ImageNetS919/validation-segmentation-label-mask/'
-
-
- imgs = os.listdir(outpainting_root)
-
- shape = (256,256)
- for cls in tqdm(os.listdir(mask_root)):
- for img_name in os.listdir(os.path.join(mask_root, cls)):
- if not img_name.split('.')[0]+'_0.png' in imgs:
- continue
- img_path = os.path.join(ori_img_path_root, cls, img_name.split('.')[0]+'.JPEG')
- img_path_init = os.path.join(outpainting_root, img_name.split('.')[0]+'_0.png')
- img_path_2 = os.path.join(padding_root, cls, img_name.split('.')[0]+'.JPEG')
- mask_path = os.path.join(mask_root, cls, img_name)
- if os.path.exists(img_path) and os.path.exists(img_path_init) and os.path.exists(img_path_2) and os.path.exists(mask_path):
- img = Image.open(img_path_2).convert('RGB')
- img = img.resize(shape, Image.LANCZOS)
- img = TF.to_tensor(img).unsqueeze(0).mul(2).sub(1)
-
- mask = Image.open(mask_path).convert('RGB')
- mask = mask.resize(shape, Image.NEAREST)
- bbox = find_bbox(np.array(mask))
-
- mask = ((np.array(mask) > 0.5) * 255).astype(np.uint8)
-
- mask = TF.to_tensor(Image.fromarray(mask))
- mask = mask[0, ...].unsqueeze(0).unsqueeze(0)
-
- if bbox is not None:
- img, mask = change_place(img, mask, bbox)
-
- img_init = Image.open(img_path_init).convert('RGB')
- img_init = img_init.resize(shape, Image.LANCZOS)
- img_init = TF.to_tensor(img_init).unsqueeze(0).mul(2).sub(1)
- img_new = img_init*(1-mask) + img*mask
-
- img_new = np.transpose(((img_new+1)/2*255)[0].numpy(), (1,2,0))[:,:,::-1]
- img_init = cv2.imread(img_path)
- img_init = cv2.resize(img_init, shape)
- # cv2.imwrite('tmp/'+img_name, cv2.hconcat([img_init, img_new.astype('uint8')]))
- cv2.imwrite('tmp/'+img_name, img_new.astype('uint8'))
-
-
diff --git a/spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/results/celeba/test_latest/index.html b/spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/results/celeba/test_latest/index.html
deleted file mode 100644
index 955c0989bb6f9902364f7e6d9419c8d58984834e..0000000000000000000000000000000000000000
--- a/spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/results/celeba/test_latest/index.html
+++ /dev/null
@@ -1,563 +0,0 @@
-
-
-
- Experiment = celeba, Phase = test, Epoch = latest
-
-
-
-
-
\ No newline at end of file
diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/point_sample.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/point_sample.py
deleted file mode 100644
index 267f4b3c56630acd85f9bdc630b7be09abab0aba..0000000000000000000000000000000000000000
--- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/point_sample.py
+++ /dev/null
@@ -1,336 +0,0 @@
-# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend # noqa
-
-from os import path as osp
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from torch.nn.modules.utils import _pair
-from torch.onnx.operators import shape_as_tensor
-
-
-def bilinear_grid_sample(im, grid, align_corners=False):
- """Given an input and a flow-field grid, computes the output using input
- values and pixel locations from grid. Supported only bilinear interpolation
- method to sample the input pixels.
-
- Args:
- im (torch.Tensor): Input feature map, shape (N, C, H, W)
- grid (torch.Tensor): Point coordinates, shape (N, Hg, Wg, 2)
- align_corners {bool}: If set to True, the extrema (-1 and 1) are
- considered as referring to the center points of the input’s
- corner pixels. If set to False, they are instead considered as
- referring to the corner points of the input’s corner pixels,
- making the sampling more resolution agnostic.
- Returns:
- torch.Tensor: A tensor with sampled points, shape (N, C, Hg, Wg)
- """
- n, c, h, w = im.shape
- gn, gh, gw, _ = grid.shape
- assert n == gn
-
- x = grid[:, :, :, 0]
- y = grid[:, :, :, 1]
-
- if align_corners:
- x = ((x + 1) / 2) * (w - 1)
- y = ((y + 1) / 2) * (h - 1)
- else:
- x = ((x + 1) * w - 1) / 2
- y = ((y + 1) * h - 1) / 2
-
- x = x.view(n, -1)
- y = y.view(n, -1)
-
- x0 = torch.floor(x).long()
- y0 = torch.floor(y).long()
- x1 = x0 + 1
- y1 = y0 + 1
-
- wa = ((x1 - x) * (y1 - y)).unsqueeze(1)
- wb = ((x1 - x) * (y - y0)).unsqueeze(1)
- wc = ((x - x0) * (y1 - y)).unsqueeze(1)
- wd = ((x - x0) * (y - y0)).unsqueeze(1)
-
- # Apply default for grid_sample function zero padding
- im_padded = F.pad(im, pad=[1, 1, 1, 1], mode='constant', value=0)
- padded_h = h + 2
- padded_w = w + 2
- # save points positions after padding
- x0, x1, y0, y1 = x0 + 1, x1 + 1, y0 + 1, y1 + 1
-
- # Clip coordinates to padded image size
- x0 = torch.where(x0 < 0, torch.tensor(0), x0)
- x0 = torch.where(x0 > padded_w - 1, torch.tensor(padded_w - 1), x0)
- x1 = torch.where(x1 < 0, torch.tensor(0), x1)
- x1 = torch.where(x1 > padded_w - 1, torch.tensor(padded_w - 1), x1)
- y0 = torch.where(y0 < 0, torch.tensor(0), y0)
- y0 = torch.where(y0 > padded_h - 1, torch.tensor(padded_h - 1), y0)
- y1 = torch.where(y1 < 0, torch.tensor(0), y1)
- y1 = torch.where(y1 > padded_h - 1, torch.tensor(padded_h - 1), y1)
-
- im_padded = im_padded.view(n, c, -1)
-
- x0_y0 = (x0 + y0 * padded_w).unsqueeze(1).expand(-1, c, -1)
- x0_y1 = (x0 + y1 * padded_w).unsqueeze(1).expand(-1, c, -1)
- x1_y0 = (x1 + y0 * padded_w).unsqueeze(1).expand(-1, c, -1)
- x1_y1 = (x1 + y1 * padded_w).unsqueeze(1).expand(-1, c, -1)
-
- Ia = torch.gather(im_padded, 2, x0_y0)
- Ib = torch.gather(im_padded, 2, x0_y1)
- Ic = torch.gather(im_padded, 2, x1_y0)
- Id = torch.gather(im_padded, 2, x1_y1)
-
- return (Ia * wa + Ib * wb + Ic * wc + Id * wd).reshape(n, c, gh, gw)
-
-
-def is_in_onnx_export_without_custom_ops():
- from annotator.uniformer.mmcv.ops import get_onnxruntime_op_path
- ort_custom_op_path = get_onnxruntime_op_path()
- return torch.onnx.is_in_onnx_export(
- ) and not osp.exists(ort_custom_op_path)
-
-
-def normalize(grid):
- """Normalize input grid from [-1, 1] to [0, 1]
- Args:
- grid (Tensor): The grid to be normalize, range [-1, 1].
- Returns:
- Tensor: Normalized grid, range [0, 1].
- """
-
- return (grid + 1.0) / 2.0
-
-
-def denormalize(grid):
- """Denormalize input grid from range [0, 1] to [-1, 1]
- Args:
- grid (Tensor): The grid to be denormalize, range [0, 1].
- Returns:
- Tensor: Denormalized grid, range [-1, 1].
- """
-
- return grid * 2.0 - 1.0
-
-
-def generate_grid(num_grid, size, device):
- """Generate regular square grid of points in [0, 1] x [0, 1] coordinate
- space.
-
- Args:
- num_grid (int): The number of grids to sample, one for each region.
- size (tuple(int, int)): The side size of the regular grid.
- device (torch.device): Desired device of returned tensor.
-
- Returns:
- (torch.Tensor): A tensor of shape (num_grid, size[0]*size[1], 2) that
- contains coordinates for the regular grids.
- """
-
- affine_trans = torch.tensor([[[1., 0., 0.], [0., 1., 0.]]], device=device)
- grid = F.affine_grid(
- affine_trans, torch.Size((1, 1, *size)), align_corners=False)
- grid = normalize(grid)
- return grid.view(1, -1, 2).expand(num_grid, -1, -1)
-
-
-def rel_roi_point_to_abs_img_point(rois, rel_roi_points):
- """Convert roi based relative point coordinates to image based absolute
- point coordinates.
-
- Args:
- rois (Tensor): RoIs or BBoxes, shape (N, 4) or (N, 5)
- rel_roi_points (Tensor): Point coordinates inside RoI, relative to
- RoI, location, range (0, 1), shape (N, P, 2)
- Returns:
- Tensor: Image based absolute point coordinates, shape (N, P, 2)
- """
-
- with torch.no_grad():
- assert rel_roi_points.size(0) == rois.size(0)
- assert rois.dim() == 2
- assert rel_roi_points.dim() == 3
- assert rel_roi_points.size(2) == 2
- # remove batch idx
- if rois.size(1) == 5:
- rois = rois[:, 1:]
- abs_img_points = rel_roi_points.clone()
- # To avoid an error during exporting to onnx use independent
- # variables instead inplace computation
- xs = abs_img_points[:, :, 0] * (rois[:, None, 2] - rois[:, None, 0])
- ys = abs_img_points[:, :, 1] * (rois[:, None, 3] - rois[:, None, 1])
- xs += rois[:, None, 0]
- ys += rois[:, None, 1]
- abs_img_points = torch.stack([xs, ys], dim=2)
- return abs_img_points
-
-
-def get_shape_from_feature_map(x):
- """Get spatial resolution of input feature map considering exporting to
- onnx mode.
-
- Args:
- x (torch.Tensor): Input tensor, shape (N, C, H, W)
- Returns:
- torch.Tensor: Spatial resolution (width, height), shape (1, 1, 2)
- """
- if torch.onnx.is_in_onnx_export():
- img_shape = shape_as_tensor(x)[2:].flip(0).view(1, 1, 2).to(
- x.device).float()
- else:
- img_shape = torch.tensor(x.shape[2:]).flip(0).view(1, 1, 2).to(
- x.device).float()
- return img_shape
-
-
-def abs_img_point_to_rel_img_point(abs_img_points, img, spatial_scale=1.):
- """Convert image based absolute point coordinates to image based relative
- coordinates for sampling.
-
- Args:
- abs_img_points (Tensor): Image based absolute point coordinates,
- shape (N, P, 2)
- img (tuple/Tensor): (height, width) of image or feature map.
- spatial_scale (float): Scale points by this factor. Default: 1.
-
- Returns:
- Tensor: Image based relative point coordinates for sampling,
- shape (N, P, 2)
- """
-
- assert (isinstance(img, tuple) and len(img) == 2) or \
- (isinstance(img, torch.Tensor) and len(img.shape) == 4)
-
- if isinstance(img, tuple):
- h, w = img
- scale = torch.tensor([w, h],
- dtype=torch.float,
- device=abs_img_points.device)
- scale = scale.view(1, 1, 2)
- else:
- scale = get_shape_from_feature_map(img)
-
- return abs_img_points / scale * spatial_scale
-
-
-def rel_roi_point_to_rel_img_point(rois,
- rel_roi_points,
- img,
- spatial_scale=1.):
- """Convert roi based relative point coordinates to image based absolute
- point coordinates.
-
- Args:
- rois (Tensor): RoIs or BBoxes, shape (N, 4) or (N, 5)
- rel_roi_points (Tensor): Point coordinates inside RoI, relative to
- RoI, location, range (0, 1), shape (N, P, 2)
- img (tuple/Tensor): (height, width) of image or feature map.
- spatial_scale (float): Scale points by this factor. Default: 1.
-
- Returns:
- Tensor: Image based relative point coordinates for sampling,
- shape (N, P, 2)
- """
-
- abs_img_point = rel_roi_point_to_abs_img_point(rois, rel_roi_points)
- rel_img_point = abs_img_point_to_rel_img_point(abs_img_point, img,
- spatial_scale)
-
- return rel_img_point
-
-
-def point_sample(input, points, align_corners=False, **kwargs):
- """A wrapper around :func:`grid_sample` to support 3D point_coords tensors
- Unlike :func:`torch.nn.functional.grid_sample` it assumes point_coords to
- lie inside ``[0, 1] x [0, 1]`` square.
-
- Args:
- input (Tensor): Feature map, shape (N, C, H, W).
- points (Tensor): Image based absolute point coordinates (normalized),
- range [0, 1] x [0, 1], shape (N, P, 2) or (N, Hgrid, Wgrid, 2).
- align_corners (bool): Whether align_corners. Default: False
-
- Returns:
- Tensor: Features of `point` on `input`, shape (N, C, P) or
- (N, C, Hgrid, Wgrid).
- """
-
- add_dim = False
- if points.dim() == 3:
- add_dim = True
- points = points.unsqueeze(2)
- if is_in_onnx_export_without_custom_ops():
- # If custom ops for onnx runtime not compiled use python
- # implementation of grid_sample function to make onnx graph
- # with supported nodes
- output = bilinear_grid_sample(
- input, denormalize(points), align_corners=align_corners)
- else:
- output = F.grid_sample(
- input, denormalize(points), align_corners=align_corners, **kwargs)
- if add_dim:
- output = output.squeeze(3)
- return output
-
-
-class SimpleRoIAlign(nn.Module):
-
- def __init__(self, output_size, spatial_scale, aligned=True):
- """Simple RoI align in PointRend, faster than standard RoIAlign.
-
- Args:
- output_size (tuple[int]): h, w
- spatial_scale (float): scale the input boxes by this number
- aligned (bool): if False, use the legacy implementation in
- MMDetection, align_corners=True will be used in F.grid_sample.
- If True, align the results more perfectly.
- """
-
- super(SimpleRoIAlign, self).__init__()
- self.output_size = _pair(output_size)
- self.spatial_scale = float(spatial_scale)
- # to be consistent with other RoI ops
- self.use_torchvision = False
- self.aligned = aligned
-
- def forward(self, features, rois):
- num_imgs = features.size(0)
- num_rois = rois.size(0)
- rel_roi_points = generate_grid(
- num_rois, self.output_size, device=rois.device)
-
- if torch.onnx.is_in_onnx_export():
- rel_img_points = rel_roi_point_to_rel_img_point(
- rois, rel_roi_points, features, self.spatial_scale)
- rel_img_points = rel_img_points.reshape(num_imgs, -1,
- *rel_img_points.shape[1:])
- point_feats = point_sample(
- features, rel_img_points, align_corners=not self.aligned)
- point_feats = point_feats.transpose(1, 2)
- else:
- point_feats = []
- for batch_ind in range(num_imgs):
- # unravel batch dim
- feat = features[batch_ind].unsqueeze(0)
- inds = (rois[:, 0].long() == batch_ind)
- if inds.any():
- rel_img_points = rel_roi_point_to_rel_img_point(
- rois[inds], rel_roi_points[inds], feat,
- self.spatial_scale).unsqueeze(0)
- point_feat = point_sample(
- feat, rel_img_points, align_corners=not self.aligned)
- point_feat = point_feat.squeeze(0).transpose(0, 1)
- point_feats.append(point_feat)
-
- point_feats = torch.cat(point_feats, dim=0)
-
- channels = features.size(1)
- roi_feats = point_feats.reshape(num_rois, channels, *self.output_size)
-
- return roi_feats
-
- def __repr__(self):
- format_str = self.__class__.__name__
- format_str += '(output_size={}, spatial_scale={}'.format(
- self.output_size, self.spatial_scale)
- return format_str
diff --git a/spaces/Ariharasudhan/YoloV5/utils/segment/plots.py b/spaces/Ariharasudhan/YoloV5/utils/segment/plots.py
deleted file mode 100644
index 9b90900b3772fe23dbd57deb64221f98e563b069..0000000000000000000000000000000000000000
--- a/spaces/Ariharasudhan/YoloV5/utils/segment/plots.py
+++ /dev/null
@@ -1,143 +0,0 @@
-import contextlib
-import math
-from pathlib import Path
-
-import cv2
-import matplotlib.pyplot as plt
-import numpy as np
-import pandas as pd
-import torch
-
-from .. import threaded
-from ..general import xywh2xyxy
-from ..plots import Annotator, colors
-
-
-@threaded
-def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg', names=None):
- # Plot image grid with labels
- if isinstance(images, torch.Tensor):
- images = images.cpu().float().numpy()
- if isinstance(targets, torch.Tensor):
- targets = targets.cpu().numpy()
- if isinstance(masks, torch.Tensor):
- masks = masks.cpu().numpy().astype(int)
-
- max_size = 1920 # max image size
- max_subplots = 16 # max image subplots, i.e. 4x4
- bs, _, h, w = images.shape # batch size, _, height, width
- bs = min(bs, max_subplots) # limit plot images
- ns = np.ceil(bs ** 0.5) # number of subplots (square)
- if np.max(images[0]) <= 1:
- images *= 255 # de-normalise (optional)
-
- # Build Image
- mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init
- for i, im in enumerate(images):
- if i == max_subplots: # if last batch has fewer images than we expect
- break
- x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
- im = im.transpose(1, 2, 0)
- mosaic[y:y + h, x:x + w, :] = im
-
- # Resize (optional)
- scale = max_size / ns / max(h, w)
- if scale < 1:
- h = math.ceil(scale * h)
- w = math.ceil(scale * w)
- mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h)))
-
- # Annotate
- fs = int((h + w) * ns * 0.01) # font size
- annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names)
- for i in range(i + 1):
- x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin
- annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders
- if paths:
- annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames
- if len(targets) > 0:
- idx = targets[:, 0] == i
- ti = targets[idx] # image targets
-
- boxes = xywh2xyxy(ti[:, 2:6]).T
- classes = ti[:, 1].astype('int')
- labels = ti.shape[1] == 6 # labels if no conf column
- conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred)
-
- if boxes.shape[1]:
- if boxes.max() <= 1.01: # if normalized with tolerance 0.01
- boxes[[0, 2]] *= w # scale to pixels
- boxes[[1, 3]] *= h
- elif scale < 1: # absolute coords need scale if image scales
- boxes *= scale
- boxes[[0, 2]] += x
- boxes[[1, 3]] += y
- for j, box in enumerate(boxes.T.tolist()):
- cls = classes[j]
- color = colors(cls)
- cls = names[cls] if names else cls
- if labels or conf[j] > 0.25: # 0.25 conf thresh
- label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}'
- annotator.box_label(box, label, color=color)
-
- # Plot masks
- if len(masks):
- if masks.max() > 1.0: # mean that masks are overlap
- image_masks = masks[[i]] # (1, 640, 640)
- nl = len(ti)
- index = np.arange(nl).reshape(nl, 1, 1) + 1
- image_masks = np.repeat(image_masks, nl, axis=0)
- image_masks = np.where(image_masks == index, 1.0, 0.0)
- else:
- image_masks = masks[idx]
-
- im = np.asarray(annotator.im).copy()
- for j, box in enumerate(boxes.T.tolist()):
- if labels or conf[j] > 0.25: # 0.25 conf thresh
- color = colors(classes[j])
- mh, mw = image_masks[j].shape
- if mh != h or mw != w:
- mask = image_masks[j].astype(np.uint8)
- mask = cv2.resize(mask, (w, h))
- mask = mask.astype(bool)
- else:
- mask = image_masks[j].astype(bool)
- with contextlib.suppress(Exception):
- im[y:y + h, x:x + w, :][mask] = im[y:y + h, x:x + w, :][mask] * 0.4 + np.array(color) * 0.6
- annotator.fromarray(im)
- annotator.im.save(fname) # save
-
-
-def plot_results_with_masks(file="path/to/results.csv", dir="", best=True):
- # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv')
- save_dir = Path(file).parent if file else Path(dir)
- fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True)
- ax = ax.ravel()
- files = list(save_dir.glob("results*.csv"))
- assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot."
- for f in files:
- try:
- data = pd.read_csv(f)
- index = np.argmax(0.9 * data.values[:, 8] + 0.1 * data.values[:, 7] + 0.9 * data.values[:, 12] +
- 0.1 * data.values[:, 11])
- s = [x.strip() for x in data.columns]
- x = data.values[:, 0]
- for i, j in enumerate([1, 2, 3, 4, 5, 6, 9, 10, 13, 14, 15, 16, 7, 8, 11, 12]):
- y = data.values[:, j]
- # y[y == 0] = np.nan # don't show zero values
- ax[i].plot(x, y, marker=".", label=f.stem, linewidth=2, markersize=2)
- if best:
- # best
- ax[i].scatter(index, y[index], color="r", label=f"best:{index}", marker="*", linewidth=3)
- ax[i].set_title(s[j] + f"\n{round(y[index], 5)}")
- else:
- # last
- ax[i].scatter(x[-1], y[-1], color="r", label="last", marker="*", linewidth=3)
- ax[i].set_title(s[j] + f"\n{round(y[-1], 5)}")
- # if j in [8, 9, 10]: # share train and val loss y axes
- # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
- except Exception as e:
- print(f"Warning: Plotting error for {f}: {e}")
- ax[1].legend()
- fig.savefig(save_dir / "results.png", dpi=200)
- plt.close()
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/distlib/manifest.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/distlib/manifest.py
deleted file mode 100644
index ca0fe442d9ca499466df9438df16eca405c5f102..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/distlib/manifest.py
+++ /dev/null
@@ -1,393 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2012-2013 Python Software Foundation.
-# See LICENSE.txt and CONTRIBUTORS.txt.
-#
-"""
-Class representing the list of files in a distribution.
-
-Equivalent to distutils.filelist, but fixes some problems.
-"""
-import fnmatch
-import logging
-import os
-import re
-import sys
-
-from . import DistlibException
-from .compat import fsdecode
-from .util import convert_path
-
-
-__all__ = ['Manifest']
-
-logger = logging.getLogger(__name__)
-
-# a \ followed by some spaces + EOL
-_COLLAPSE_PATTERN = re.compile('\\\\w*\n', re.M)
-_COMMENTED_LINE = re.compile('#.*?(?=\n)|\n(?=$)', re.M | re.S)
-
-#
-# Due to the different results returned by fnmatch.translate, we need
-# to do slightly different processing for Python 2.7 and 3.2 ... this needed
-# to be brought in for Python 3.6 onwards.
-#
-_PYTHON_VERSION = sys.version_info[:2]
-
-class Manifest(object):
- """A list of files built by on exploring the filesystem and filtered by
- applying various patterns to what we find there.
- """
-
- def __init__(self, base=None):
- """
- Initialise an instance.
-
- :param base: The base directory to explore under.
- """
- self.base = os.path.abspath(os.path.normpath(base or os.getcwd()))
- self.prefix = self.base + os.sep
- self.allfiles = None
- self.files = set()
-
- #
- # Public API
- #
-
- def findall(self):
- """Find all files under the base and set ``allfiles`` to the absolute
- pathnames of files found.
- """
- from stat import S_ISREG, S_ISDIR, S_ISLNK
-
- self.allfiles = allfiles = []
- root = self.base
- stack = [root]
- pop = stack.pop
- push = stack.append
-
- while stack:
- root = pop()
- names = os.listdir(root)
-
- for name in names:
- fullname = os.path.join(root, name)
-
- # Avoid excess stat calls -- just one will do, thank you!
- stat = os.stat(fullname)
- mode = stat.st_mode
- if S_ISREG(mode):
- allfiles.append(fsdecode(fullname))
- elif S_ISDIR(mode) and not S_ISLNK(mode):
- push(fullname)
-
- def add(self, item):
- """
- Add a file to the manifest.
-
- :param item: The pathname to add. This can be relative to the base.
- """
- if not item.startswith(self.prefix):
- item = os.path.join(self.base, item)
- self.files.add(os.path.normpath(item))
-
- def add_many(self, items):
- """
- Add a list of files to the manifest.
-
- :param items: The pathnames to add. These can be relative to the base.
- """
- for item in items:
- self.add(item)
-
- def sorted(self, wantdirs=False):
- """
- Return sorted files in directory order
- """
-
- def add_dir(dirs, d):
- dirs.add(d)
- logger.debug('add_dir added %s', d)
- if d != self.base:
- parent, _ = os.path.split(d)
- assert parent not in ('', '/')
- add_dir(dirs, parent)
-
- result = set(self.files) # make a copy!
- if wantdirs:
- dirs = set()
- for f in result:
- add_dir(dirs, os.path.dirname(f))
- result |= dirs
- return [os.path.join(*path_tuple) for path_tuple in
- sorted(os.path.split(path) for path in result)]
-
- def clear(self):
- """Clear all collected files."""
- self.files = set()
- self.allfiles = []
-
- def process_directive(self, directive):
- """
- Process a directive which either adds some files from ``allfiles`` to
- ``files``, or removes some files from ``files``.
-
- :param directive: The directive to process. This should be in a format
- compatible with distutils ``MANIFEST.in`` files:
-
- http://docs.python.org/distutils/sourcedist.html#commands
- """
- # Parse the line: split it up, make sure the right number of words
- # is there, and return the relevant words. 'action' is always
- # defined: it's the first word of the line. Which of the other
- # three are defined depends on the action; it'll be either
- # patterns, (dir and patterns), or (dirpattern).
- action, patterns, thedir, dirpattern = self._parse_directive(directive)
-
- # OK, now we know that the action is valid and we have the
- # right number of words on the line for that action -- so we
- # can proceed with minimal error-checking.
- if action == 'include':
- for pattern in patterns:
- if not self._include_pattern(pattern, anchor=True):
- logger.warning('no files found matching %r', pattern)
-
- elif action == 'exclude':
- for pattern in patterns:
- found = self._exclude_pattern(pattern, anchor=True)
- #if not found:
- # logger.warning('no previously-included files '
- # 'found matching %r', pattern)
-
- elif action == 'global-include':
- for pattern in patterns:
- if not self._include_pattern(pattern, anchor=False):
- logger.warning('no files found matching %r '
- 'anywhere in distribution', pattern)
-
- elif action == 'global-exclude':
- for pattern in patterns:
- found = self._exclude_pattern(pattern, anchor=False)
- #if not found:
- # logger.warning('no previously-included files '
- # 'matching %r found anywhere in '
- # 'distribution', pattern)
-
- elif action == 'recursive-include':
- for pattern in patterns:
- if not self._include_pattern(pattern, prefix=thedir):
- logger.warning('no files found matching %r '
- 'under directory %r', pattern, thedir)
-
- elif action == 'recursive-exclude':
- for pattern in patterns:
- found = self._exclude_pattern(pattern, prefix=thedir)
- #if not found:
- # logger.warning('no previously-included files '
- # 'matching %r found under directory %r',
- # pattern, thedir)
-
- elif action == 'graft':
- if not self._include_pattern(None, prefix=dirpattern):
- logger.warning('no directories found matching %r',
- dirpattern)
-
- elif action == 'prune':
- if not self._exclude_pattern(None, prefix=dirpattern):
- logger.warning('no previously-included directories found '
- 'matching %r', dirpattern)
- else: # pragma: no cover
- # This should never happen, as it should be caught in
- # _parse_template_line
- raise DistlibException(
- 'invalid action %r' % action)
-
- #
- # Private API
- #
-
- def _parse_directive(self, directive):
- """
- Validate a directive.
- :param directive: The directive to validate.
- :return: A tuple of action, patterns, thedir, dir_patterns
- """
- words = directive.split()
- if len(words) == 1 and words[0] not in ('include', 'exclude',
- 'global-include',
- 'global-exclude',
- 'recursive-include',
- 'recursive-exclude',
- 'graft', 'prune'):
- # no action given, let's use the default 'include'
- words.insert(0, 'include')
-
- action = words[0]
- patterns = thedir = dir_pattern = None
-
- if action in ('include', 'exclude',
- 'global-include', 'global-exclude'):
- if len(words) < 2:
- raise DistlibException(
- '%r expects ...' % action)
-
- patterns = [convert_path(word) for word in words[1:]]
-
- elif action in ('recursive-include', 'recursive-exclude'):
- if len(words) < 3:
- raise DistlibException(
- '%r expects ...' % action)
-
- thedir = convert_path(words[1])
- patterns = [convert_path(word) for word in words[2:]]
-
- elif action in ('graft', 'prune'):
- if len(words) != 2:
- raise DistlibException(
- '%r expects a single ' % action)
-
- dir_pattern = convert_path(words[1])
-
- else:
- raise DistlibException('unknown action %r' % action)
-
- return action, patterns, thedir, dir_pattern
-
- def _include_pattern(self, pattern, anchor=True, prefix=None,
- is_regex=False):
- """Select strings (presumably filenames) from 'self.files' that
- match 'pattern', a Unix-style wildcard (glob) pattern.
-
- Patterns are not quite the same as implemented by the 'fnmatch'
- module: '*' and '?' match non-special characters, where "special"
- is platform-dependent: slash on Unix; colon, slash, and backslash on
- DOS/Windows; and colon on Mac OS.
-
- If 'anchor' is true (the default), then the pattern match is more
- stringent: "*.py" will match "foo.py" but not "foo/bar.py". If
- 'anchor' is false, both of these will match.
-
- If 'prefix' is supplied, then only filenames starting with 'prefix'
- (itself a pattern) and ending with 'pattern', with anything in between
- them, will match. 'anchor' is ignored in this case.
-
- If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and
- 'pattern' is assumed to be either a string containing a regex or a
- regex object -- no translation is done, the regex is just compiled
- and used as-is.
-
- Selected strings will be added to self.files.
-
- Return True if files are found.
- """
- # XXX docstring lying about what the special chars are?
- found = False
- pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex)
-
- # delayed loading of allfiles list
- if self.allfiles is None:
- self.findall()
-
- for name in self.allfiles:
- if pattern_re.search(name):
- self.files.add(name)
- found = True
- return found
-
- def _exclude_pattern(self, pattern, anchor=True, prefix=None,
- is_regex=False):
- """Remove strings (presumably filenames) from 'files' that match
- 'pattern'.
-
- Other parameters are the same as for 'include_pattern()', above.
- The list 'self.files' is modified in place. Return True if files are
- found.
-
- This API is public to allow e.g. exclusion of SCM subdirs, e.g. when
- packaging source distributions
- """
- found = False
- pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex)
- for f in list(self.files):
- if pattern_re.search(f):
- self.files.remove(f)
- found = True
- return found
-
- def _translate_pattern(self, pattern, anchor=True, prefix=None,
- is_regex=False):
- """Translate a shell-like wildcard pattern to a compiled regular
- expression.
-
- Return the compiled regex. If 'is_regex' true,
- then 'pattern' is directly compiled to a regex (if it's a string)
- or just returned as-is (assumes it's a regex object).
- """
- if is_regex:
- if isinstance(pattern, str):
- return re.compile(pattern)
- else:
- return pattern
-
- if _PYTHON_VERSION > (3, 2):
- # ditch start and end characters
- start, _, end = self._glob_to_re('_').partition('_')
-
- if pattern:
- pattern_re = self._glob_to_re(pattern)
- if _PYTHON_VERSION > (3, 2):
- assert pattern_re.startswith(start) and pattern_re.endswith(end)
- else:
- pattern_re = ''
-
- base = re.escape(os.path.join(self.base, ''))
- if prefix is not None:
- # ditch end of pattern character
- if _PYTHON_VERSION <= (3, 2):
- empty_pattern = self._glob_to_re('')
- prefix_re = self._glob_to_re(prefix)[:-len(empty_pattern)]
- else:
- prefix_re = self._glob_to_re(prefix)
- assert prefix_re.startswith(start) and prefix_re.endswith(end)
- prefix_re = prefix_re[len(start): len(prefix_re) - len(end)]
- sep = os.sep
- if os.sep == '\\':
- sep = r'\\'
- if _PYTHON_VERSION <= (3, 2):
- pattern_re = '^' + base + sep.join((prefix_re,
- '.*' + pattern_re))
- else:
- pattern_re = pattern_re[len(start): len(pattern_re) - len(end)]
- pattern_re = r'%s%s%s%s.*%s%s' % (start, base, prefix_re, sep,
- pattern_re, end)
- else: # no prefix -- respect anchor flag
- if anchor:
- if _PYTHON_VERSION <= (3, 2):
- pattern_re = '^' + base + pattern_re
- else:
- pattern_re = r'%s%s%s' % (start, base, pattern_re[len(start):])
-
- return re.compile(pattern_re)
-
- def _glob_to_re(self, pattern):
- """Translate a shell-like glob pattern to a regular expression.
-
- Return a string containing the regex. Differs from
- 'fnmatch.translate()' in that '*' does not match "special characters"
- (which are platform-specific).
- """
- pattern_re = fnmatch.translate(pattern)
-
- # '?' and '*' in the glob pattern become '.' and '.*' in the RE, which
- # IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix,
- # and by extension they shouldn't match such "special characters" under
- # any OS. So change all non-escaped dots in the RE to match any
- # character except the special characters (currently: just os.sep).
- sep = os.sep
- if os.sep == '\\':
- # we're using a regex to manipulate a regex, so we need
- # to escape the backslash twice
- sep = r'\\\\'
- escaped = r'\1[^%s]' % sep
- pattern_re = re.sub(r'((? None:
- self.layout = layout
- self.style = style
-
- def __rich_console__(
- self, console: Console, options: ConsoleOptions
- ) -> RenderResult:
- width = options.max_width
- height = options.height or options.size.height
- layout = self.layout
- title = (
- f"{layout.name!r} ({width} x {height})"
- if layout.name
- else f"({width} x {height})"
- )
- yield Panel(
- Align.center(Pretty(layout), vertical="middle"),
- style=self.style,
- title=self.highlighter(title),
- border_style="blue",
- height=height,
- )
-
-
-class Splitter(ABC):
- """Base class for a splitter."""
-
- name: str = ""
-
- @abstractmethod
- def get_tree_icon(self) -> str:
- """Get the icon (emoji) used in layout.tree"""
-
- @abstractmethod
- def divide(
- self, children: Sequence["Layout"], region: Region
- ) -> Iterable[Tuple["Layout", Region]]:
- """Divide a region amongst several child layouts.
-
- Args:
- children (Sequence(Layout)): A number of child layouts.
- region (Region): A rectangular region to divide.
- """
-
-
-class RowSplitter(Splitter):
- """Split a layout region in to rows."""
-
- name = "row"
-
- def get_tree_icon(self) -> str:
- return "[layout.tree.row]⬌"
-
- def divide(
- self, children: Sequence["Layout"], region: Region
- ) -> Iterable[Tuple["Layout", Region]]:
- x, y, width, height = region
- render_widths = ratio_resolve(width, children)
- offset = 0
- _Region = Region
- for child, child_width in zip(children, render_widths):
- yield child, _Region(x + offset, y, child_width, height)
- offset += child_width
-
-
-class ColumnSplitter(Splitter):
- """Split a layout region in to columns."""
-
- name = "column"
-
- def get_tree_icon(self) -> str:
- return "[layout.tree.column]⬍"
-
- def divide(
- self, children: Sequence["Layout"], region: Region
- ) -> Iterable[Tuple["Layout", Region]]:
- x, y, width, height = region
- render_heights = ratio_resolve(height, children)
- offset = 0
- _Region = Region
- for child, child_height in zip(children, render_heights):
- yield child, _Region(x, y + offset, width, child_height)
- offset += child_height
-
-
-@rich_repr
-class Layout:
- """A renderable to divide a fixed height in to rows or columns.
-
- Args:
- renderable (RenderableType, optional): Renderable content, or None for placeholder. Defaults to None.
- name (str, optional): Optional identifier for Layout. Defaults to None.
- size (int, optional): Optional fixed size of layout. Defaults to None.
- minimum_size (int, optional): Minimum size of layout. Defaults to 1.
- ratio (int, optional): Optional ratio for flexible layout. Defaults to 1.
- visible (bool, optional): Visibility of layout. Defaults to True.
- """
-
- splitters = {"row": RowSplitter, "column": ColumnSplitter}
-
- def __init__(
- self,
- renderable: Optional[RenderableType] = None,
- *,
- name: Optional[str] = None,
- size: Optional[int] = None,
- minimum_size: int = 1,
- ratio: int = 1,
- visible: bool = True,
- ) -> None:
- self._renderable = renderable or _Placeholder(self)
- self.size = size
- self.minimum_size = minimum_size
- self.ratio = ratio
- self.name = name
- self.visible = visible
- self.splitter: Splitter = self.splitters["column"]()
- self._children: List[Layout] = []
- self._render_map: RenderMap = {}
- self._lock = RLock()
-
- def __rich_repr__(self) -> Result:
- yield "name", self.name, None
- yield "size", self.size, None
- yield "minimum_size", self.minimum_size, 1
- yield "ratio", self.ratio, 1
-
- @property
- def renderable(self) -> RenderableType:
- """Layout renderable."""
- return self if self._children else self._renderable
-
- @property
- def children(self) -> List["Layout"]:
- """Gets (visible) layout children."""
- return [child for child in self._children if child.visible]
-
- @property
- def map(self) -> RenderMap:
- """Get a map of the last render."""
- return self._render_map
-
- def get(self, name: str) -> Optional["Layout"]:
- """Get a named layout, or None if it doesn't exist.
-
- Args:
- name (str): Name of layout.
-
- Returns:
- Optional[Layout]: Layout instance or None if no layout was found.
- """
- if self.name == name:
- return self
- else:
- for child in self._children:
- named_layout = child.get(name)
- if named_layout is not None:
- return named_layout
- return None
-
- def __getitem__(self, name: str) -> "Layout":
- layout = self.get(name)
- if layout is None:
- raise KeyError(f"No layout with name {name!r}")
- return layout
-
- @property
- def tree(self) -> "Tree":
- """Get a tree renderable to show layout structure."""
- from pip._vendor.rich.styled import Styled
- from pip._vendor.rich.table import Table
- from pip._vendor.rich.tree import Tree
-
- def summary(layout: "Layout") -> Table:
-
- icon = layout.splitter.get_tree_icon()
-
- table = Table.grid(padding=(0, 1, 0, 0))
-
- text: RenderableType = (
- Pretty(layout) if layout.visible else Styled(Pretty(layout), "dim")
- )
- table.add_row(icon, text)
- _summary = table
- return _summary
-
- layout = self
- tree = Tree(
- summary(layout),
- guide_style=f"layout.tree.{layout.splitter.name}",
- highlight=True,
- )
-
- def recurse(tree: "Tree", layout: "Layout") -> None:
- for child in layout._children:
- recurse(
- tree.add(
- summary(child),
- guide_style=f"layout.tree.{child.splitter.name}",
- ),
- child,
- )
-
- recurse(tree, self)
- return tree
-
- def split(
- self,
- *layouts: Union["Layout", RenderableType],
- splitter: Union[Splitter, str] = "column",
- ) -> None:
- """Split the layout in to multiple sub-layouts.
-
- Args:
- *layouts (Layout): Positional arguments should be (sub) Layout instances.
- splitter (Union[Splitter, str]): Splitter instance or name of splitter.
- """
- _layouts = [
- layout if isinstance(layout, Layout) else Layout(layout)
- for layout in layouts
- ]
- try:
- self.splitter = (
- splitter
- if isinstance(splitter, Splitter)
- else self.splitters[splitter]()
- )
- except KeyError:
- raise NoSplitter(f"No splitter called {splitter!r}")
- self._children[:] = _layouts
-
- def add_split(self, *layouts: Union["Layout", RenderableType]) -> None:
- """Add a new layout(s) to existing split.
-
- Args:
- *layouts (Union[Layout, RenderableType]): Positional arguments should be renderables or (sub) Layout instances.
-
- """
- _layouts = (
- layout if isinstance(layout, Layout) else Layout(layout)
- for layout in layouts
- )
- self._children.extend(_layouts)
-
- def split_row(self, *layouts: Union["Layout", RenderableType]) -> None:
- """Split the layout in to a row (layouts side by side).
-
- Args:
- *layouts (Layout): Positional arguments should be (sub) Layout instances.
- """
- self.split(*layouts, splitter="row")
-
- def split_column(self, *layouts: Union["Layout", RenderableType]) -> None:
- """Split the layout in to a column (layouts stacked on top of each other).
-
- Args:
- *layouts (Layout): Positional arguments should be (sub) Layout instances.
- """
- self.split(*layouts, splitter="column")
-
- def unsplit(self) -> None:
- """Reset splits to initial state."""
- del self._children[:]
-
- def update(self, renderable: RenderableType) -> None:
- """Update renderable.
-
- Args:
- renderable (RenderableType): New renderable object.
- """
- with self._lock:
- self._renderable = renderable
-
- def refresh_screen(self, console: "Console", layout_name: str) -> None:
- """Refresh a sub-layout.
-
- Args:
- console (Console): Console instance where Layout is to be rendered.
- layout_name (str): Name of layout.
- """
- with self._lock:
- layout = self[layout_name]
- region, _lines = self._render_map[layout]
- (x, y, width, height) = region
- lines = console.render_lines(
- layout, console.options.update_dimensions(width, height)
- )
- self._render_map[layout] = LayoutRender(region, lines)
- console.update_screen_lines(lines, x, y)
-
- def _make_region_map(self, width: int, height: int) -> RegionMap:
- """Create a dict that maps layout on to Region."""
- stack: List[Tuple[Layout, Region]] = [(self, Region(0, 0, width, height))]
- push = stack.append
- pop = stack.pop
- layout_regions: List[Tuple[Layout, Region]] = []
- append_layout_region = layout_regions.append
- while stack:
- append_layout_region(pop())
- layout, region = layout_regions[-1]
- children = layout.children
- if children:
- for child_and_region in layout.splitter.divide(children, region):
- push(child_and_region)
-
- region_map = {
- layout: region
- for layout, region in sorted(layout_regions, key=itemgetter(1))
- }
- return region_map
-
- def render(self, console: Console, options: ConsoleOptions) -> RenderMap:
- """Render the sub_layouts.
-
- Args:
- console (Console): Console instance.
- options (ConsoleOptions): Console options.
-
- Returns:
- RenderMap: A dict that maps Layout on to a tuple of Region, lines
- """
- render_width = options.max_width
- render_height = options.height or console.height
- region_map = self._make_region_map(render_width, render_height)
- layout_regions = [
- (layout, region)
- for layout, region in region_map.items()
- if not layout.children
- ]
- render_map: Dict["Layout", "LayoutRender"] = {}
- render_lines = console.render_lines
- update_dimensions = options.update_dimensions
-
- for layout, region in layout_regions:
- lines = render_lines(
- layout.renderable, update_dimensions(region.width, region.height)
- )
- render_map[layout] = LayoutRender(region, lines)
- return render_map
-
- def __rich_console__(
- self, console: Console, options: ConsoleOptions
- ) -> RenderResult:
- with self._lock:
- width = options.max_width or console.width
- height = options.height or console.height
- render_map = self.render(console, options.update_dimensions(width, height))
- self._render_map = render_map
- layout_lines: List[List[Segment]] = [[] for _ in range(height)]
- _islice = islice
- for (region, lines) in render_map.values():
- _x, y, _layout_width, layout_height = region
- for row, line in zip(
- _islice(layout_lines, y, y + layout_height), lines
- ):
- row.extend(line)
-
- new_line = Segment.line()
- for layout_row in layout_lines:
- yield from layout_row
- yield new_line
-
-
-if __name__ == "__main__":
- from pip._vendor.rich.console import Console
-
- console = Console()
- layout = Layout()
-
- layout.split_column(
- Layout(name="header", size=3),
- Layout(ratio=1, name="main"),
- Layout(size=10, name="footer"),
- )
-
- layout["main"].split_row(Layout(name="side"), Layout(name="body", ratio=2))
-
- layout["body"].split_row(Layout(name="content", ratio=2), Layout(name="s2"))
-
- layout["s2"].split_column(
- Layout(name="top"), Layout(name="middle"), Layout(name="bottom")
- )
-
- layout["side"].split_column(Layout(layout.tree, name="left1"), Layout(name="left2"))
-
- layout["content"].update("foo")
-
- console.print(layout)
diff --git a/spaces/Benson/text-generation/Examples/918kiss Descarga Apk Kaya 2021.md b/spaces/Benson/text-generation/Examples/918kiss Descarga Apk Kaya 2021.md
deleted file mode 100644
index 38930fcbf99c074442ed601b46424bcbafb5fc59..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/918kiss Descarga Apk Kaya 2021.md
+++ /dev/null
@@ -1,73 +0,0 @@
-
-
918kiss Kaya APK Descargar 2021: Cómo jugar y ganar juegos de casino en línea
-
Si usted está buscando una manera divertida y emocionante para disfrutar de los juegos de casino en línea en su dispositivo móvil, usted debe tratar 918kiss Kaya APK. Esta es una de las plataformas de casino en línea más populares y confiables en Malasia que ofrece una variedad de juegos, como tragamonedas, juegos de mesa, casino en vivo, juegos de pesca y más. En este artículo, le mostraremos cómo descargar e instalar 918kiss Kaya APK en su dispositivo Android o iOS, cómo registrarse e iniciar sesión en su cuenta, y cómo jugar y ganar juegos de casino en línea en esta plataforma.
-
¿Qué es 918kiss Kaya APK?
-
918kiss Kaya APK es una aplicación de casino en línea que le permite acceder a cientos de juegos de diferentes proveedores, como 918Kiss, Mega888, Pussy888, XE88, Joker123, y más. Puedes jugar a estos juegos en cualquier momento y en cualquier lugar que desees, siempre y cuando tengas una conexión a Internet estable y un dispositivo compatible.
Algunas de las características que hacen que 918kiss Kaya APK se destacan de otras plataformas de casino en línea son:
-
-
Gráficos y efectos de sonido de alta calidad que crean una experiencia de juego inmersiva.
-
Interfaz suave y fácil de usar que hace que sea fácil de navegar y jugar.
-
Sistema seguro y cifrado que protege su información personal y financiera.
-
Resultados de juego justo y aleatorio que aseguran una oportunidad justa de ganar.
-
Generosos bonos y promociones que recompensan tu lealtad y actividad.
-
24/7 servicio al cliente que proporciona una asistencia rápida y profesional.
-
-
Beneficios de 918kiss Kaya APK
-
Algunos de los beneficios que se pueden disfrutar cuando se juega en 918kiss Kaya APK son:
-
-
Puedes jugar una amplia gama de juegos de diferentes géneros y temas, como tragamonedas clásicas, tragamonedas de video, tragamonedas progresivas, blackjack, ruleta, baccarat, póker, sic bo, tigre dragón, cazador de peces y más.
-
-
Puedes ganar grandes premios y premios, especialmente si juegas a las tragamonedas progresivas o a los juegos de casino en vivo.
-
Puedes divertirte y relajarte, mientras mejoras tus habilidades y estrategias.
-
-
Cómo descargar e instalar 918kiss Kaya APK?
-
Descargar e instalar 918kiss Kaya APK es muy fácil y rápido. Solo tiene que seguir estos sencillos pasos:
-
Para dispositivos Android
-
-
Ir a la página web oficial de 918kiss Kaya APK y haga clic en el botón de descarga para dispositivos Android.
-
Permitir la descarga de fuentes desconocidas en la configuración del dispositivo.
-
Abra el archivo descargado e instale la aplicación.
-
Inicie la aplicación y disfrute jugando.
-
-
Para dispositivos iOS
-
-
Ir a la página web oficial de 918kiss Kaya APK y haga clic en el botón de descarga para dispositivos iOS.
-
Confía en el desarrollador en la configuración de tu dispositivo.
-
Abra el archivo descargado e instale la aplicación.
-
Inicie la aplicación y disfrute jugando.
-
-
¿Cómo registrarse e iniciar sesión en 918kiss Kaya APK?
-
Para jugar en 918kiss Kaya APK, es necesario registrar una cuenta e iniciar sesión con su nombre de usuario y contraseña. Aquí es cómo se puede hacer eso:
-
Registrarse con un agente oficial
-
La mejor manera de registrar una cuenta en 918kiss Kaya APK es ponerse en contacto con un agente oficial. Puedes encontrarlos en el sitio web oficial, plataformas de redes sociales o foros en línea. Ellos le guiarán a través del proceso de registro y le proporcionarán un nombre de usuario y contraseña. También necesitarás hacer un depósito para activar tu cuenta.
-
Inicie sesión con su nombre de usuario y contraseña
-
Una vez que tenga su nombre de usuario y contraseña, puede iniciar sesión en su cuenta en 918kiss Kaya APK. Simplemente introduzca sus credenciales en la página de inicio de sesión y haga clic en el botón de inicio de sesión. A continuación, podrá acceder a todos los juegos y características de la plataforma.
-
-
-
Jugar y ganar juegos de casino en línea en 918kiss Kaya APK no solo es divertido, sino también gratificante. Aquí hay algunos consejos que pueden ayudarle a mejorar sus posibilidades de ganar:
-
Elige tu juego favorito
-
Lo primero que tienes que hacer es elegir un juego que se adapte a tu gusto y nivel de habilidad. Puede navegar a través de las diferentes categorías y géneros de juegos en 918kiss Kaya APK y probarlos de forma gratuita o por dinero real. También puedes consultar las reseñas y valoraciones de otros jugadores para ver qué juegos son populares y rentables.
-
Aprenda las reglas y estrategias
-
Lo siguiente que tienes que hacer es aprender las reglas y estrategias del juego que has elegido. Puede leer las instrucciones y consejos en la pantalla del juego o en el sitio web oficial de 918kiss Kaya APK. También puedes ver videos o tutoriales en línea que explican cómo jugar y ganar el juego. Cuanto más sepas del juego, mejor podrás jugarlo.
-
Gestiona tu bankroll y apuesta sabiamente
-
Lo último que necesitas hacer es administrar tu bankroll y apostar sabiamente. Debes establecer un presupuesto para tu sesión de juego y ceñirte a él. También debe evitar perseguir sus pérdidas o apostar más de lo que puede permitirse. También debe utilizar los bonos y promociones que 918kiss Kaya APK ofrece para aumentar su bankroll y aumentar sus posibilidades de ganar.
-
Conclusión
-
En conclusión, 918kiss Kaya APK es una de las mejores plataformas de casino en línea en Malasia que ofrece una variedad de juegos, características, beneficios y bonos. Puede descargarlo e instalarlo en su dispositivo Android o iOS, registrarse e iniciar sesión en su cuenta, y jugar y ganar juegos de casino en línea en esta plataforma. Si usted está buscando una manera divertida y emocionante para disfrutar de los juegos de casino en línea en su dispositivo móvil, usted debe tratar 918kiss Kaya APK hoy!
-
Preguntas frecuentes
-
Aquí hay algunas preguntas frecuentes sobre 918kiss Kaya APK:
-
-
-
Sí, 918kiss Kaya APK es seguro y legal. Utiliza un sistema seguro y cifrado que protege su información personal y financiera. También cumple con las leyes y reglamentos de la industria del juego en línea en Malasia. Está autorizado y regulado por las autoridades pertinentes.
-
¿Cuáles son los requisitos mínimos para jugar en 918kiss Kaya APK?
-
Los requisitos mínimos para jugar en 918kiss Kaya APK son: un dispositivo Android o iOS compatible, una conexión a Internet estable, y una cuenta registrada. También necesitas tener al menos 18 años para jugar en esta plataforma.
-
¿Cómo puedo retirar mis ganancias de 918kiss Kaya APK?
-
Usted puede retirar sus ganancias de 918kiss Kaya APK poniéndose en contacto con su agente oficial. Ellos procesarán su solicitud de retiro y transferirán su dinero a su cuenta bancaria dentro de las 24 horas. El monto mínimo de retiro es de RM50 y el monto máximo de retiro es de RM50,000 por día.
-
¿Puedo jugar en 918kiss Kaya APK con otros jugadores?
-
Sí, se puede jugar en 918kiss Kaya APK con otros jugadores. Puede unirse a los juegos de casino en vivo e interactuar con los distribuidores en vivo y otros jugadores. También puede chatear con otros jugadores en los foros en línea o plataformas de redes sociales. Puedes hacer nuevos amigos y compartir tus experiencias de juego con ellos.
-
¿Puedo obtener ayuda si tengo problemas o preguntas sobre 918kiss Kaya APK?
-
Sí, puede obtener ayuda si tiene algún problema o pregunta sobre 918kiss Kaya APK. Puede ponerse en contacto con el equipo de servicio al cliente 24/7 por teléfono, correo electrónico, WhatsApp, WeChat, Telegram o chat en vivo. Ellos le proporcionarán una asistencia rápida y profesional.
-
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Api-ms-win-core-localization-l1-2-0.dll Download.md b/spaces/Benson/text-generation/Examples/Api-ms-win-core-localization-l1-2-0.dll Download.md
deleted file mode 100644
index 6188b86283cb2a1c08c3acae14de33445623fd2c..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Api-ms-win-core-localization-l1-2-0.dll Download.md
+++ /dev/null
@@ -1,77 +0,0 @@
-
-
¿Qué es api-ms-win-core-localization-l1-2-0.dll y por qué lo necesita?
-
Si usted es un usuario de Windows, es posible que haya encontrado un mensaje de error diciendo que api-ms-win-core-localization-l1-2-0.dll falta o no se encuentra. Esto puede ser frustrante e impedirle ejecutar ciertos programas o aplicaciones. ¿Pero qué es este misterioso archivo y por qué es tan importante?
-
Api-ms-win-core-localization-l1-2-0.dll es un archivo de biblioteca de enlaces dinámicos (DLL) que contiene funciones y recursos relacionados con la localización, como la visualización de texto en el idioma correcto para una región o región en particular. El archivo DLL es parte del sistema operativo Microsoft Windows y es utilizado por muchos programas diferentes.
Cuando un programa necesita usar una función o recurso del archivo DLL, lo llama y lo carga en memoria. De esta manera, varios programas pueden compartir el mismo archivo DLL y ahorrar espacio y recursos. Sin embargo, si el archivo DLL falta o está dañado, el programa no puede acceder a él y mostrará un mensaje de error.
-
¿Qué causa los errores api-ms-win-core-localization-l1-2-0.dll?
-
Hay varias razones posibles por las que pueden ocurrir errores api-ms-win-core-localization-l1-2-0.dll en su PC con Windows. Algunas de ellas son:
-
-
Su software antivirus puede haber eliminado o puesto en cuarentena el archivo DLL como un falso positivo.
-
Es posible que haya eliminado accidentalmente o movido el archivo DLL a otra ubicación.
-
Es posible que haya instalado o desinstalado un programa que modificó o reemplazó el archivo DLL.
-
Es posible que tenga una versión defectuosa o desactualizada de Windows que haya dañado o sobrescrito el archivo DLL.
-
Es posible que tenga una infección de virus o malware que haya dañado o secuestrado el archivo DLL.
-
-
-
¿Cómo corregir los errores api-ms-win -core-localization-l1-2-0.dll?
-
Método 1: Comprobador de archivos del sistema de ejecución
-
System File Checker (SFC) es una herramienta integrada de Windows que puede escanear y reparar archivos del sistema dañados o faltantes, incluidos archivos DLL. Para ejecutar SFC, siga estos pasos:
-
-
Pulse la tecla de Windows + R para abrir el cuadro de diálogo Ejecutar.
-
Escriba cmd y presione Ctrl + Shift + Enter para ejecutar el símbolo del sistema como administrador.
-
Escriba sfc /scannow y presione Enter para iniciar el escaneo.
-
Espere a que se complete la exploración. Puede llevar algún tiempo, así que sea paciente.
-
Si SFC encuentra y corrige cualquier error, reinicie su PC y compruebe si el error DLL está resuelto.
-
-
-
Método 2: Descargar e instalar el archivo DLL desde una fuente de confianza
-
Si SFC no corrige el error DLL, puede intentar descargar e instalar el archivo DLL desde una fuente de confianza. Sin embargo, tenga cuidado al descargar archivos DLL desde Internet, ya que algunos sitios web pueden contener archivos maliciosos o desactualizados que pueden dañar su PC. Solo descargue archivos DLL de sitios web verificados, como or . Para descargar e instalar el archivo DLL, siga estos pasos:
-
-
Ir a o y buscar api-ms-win-core-localización-l1-2-0.dll.
-
Seleccione la versión apropiada del archivo DLL para su sistema Windows (32 bits o 64 bits).
-
Haga clic en Descargar y guarde el archivo ZIP en su PC.
-
Extraiga el archivo ZIP y copie el archivo DLL a la carpeta donde está instalado el programa que lo requiere. Por ejemplo, si obtiene el error al intentar ejecutar Skype, copie el archivo DLL en C: Archivos de programa Skype.
-
Si eso no funciona, copie el archivo DLL a la carpeta del sistema de Windows. Para Windows de 32 bits, cópielo a C: Windows System32. Para Windows de 64 bits, cópielo en C: Windows SysWOW64.
-
Reinicie su PC y compruebe si el error DLL está resuelto.
-
-
-
Método 3: Reinstalar el programa que requiere el archivo DLL
-
Si ninguno de los métodos anteriores funciona, puede intentar reinstalar el programa que está generando el mensaje de error. Esto puede ayudar a restaurar los archivos perdidos o dañados que están asociados con el programa, incluyendo el archivo DLL. Para reinstalar el programa, siga estos pasos:
-
-
Pulse la tecla de Windows + R para abrir el cuadro de diálogo Ejecutar.
-
Escriba appwiz.cpl y presione Enter para abrir Programas y Características.
-
Encuentre y seleccione el programa que requiere el archivo DLL, como Skype, WordPress o un juego.
-
Haga clic en Desinstalar y siga las instrucciones para eliminar el programa de su PC.
-
Reinicie su PC y descargar e instalar la última versión del programa desde su sitio web oficial o fuente.
-
Compruebe si el error DLL está resuelto.
-
-
-
¿Cómo prevenir errores api-ms-win-core-localization-l1-2-0.dll en el futuro?
-
Para evitar errores api-ms-win-core-localization-l1-2-0.dll en el futuro, debe tomar algunas medidas preventivas para mantener su PC en buena forma. Estos son algunos consejos:
-
-
-
Mantenga su Windows actualizado con los últimos parches de seguridad y correcciones de errores. Esto puede ayudar a solucionar cualquier vulnerabilidad o problema que pueda afectar los archivos del sistema.
-
Ejecutar software antivirus regularmente y escanear su PC para cualquier virus o infección de malware. Esto puede ayudar a eliminar cualquier programa malicioso que pueda dañar o secuestrar sus archivos DLL.
-
Copia de seguridad de sus archivos importantes con regularidad y crear un punto de restauración del sistema. Esto puede ayudarle a recuperar sus datos y restaurar su sistema en caso de cualquier desastre o fallo.
-
Limpie su espacio en disco y registro regularmente. Esto puede ayudar a eliminar cualquier archivo basura o entradas no válidas que puedan desordenar o dañar su sistema.
-
-
-
Conclusión
-
En este artículo, aprendiste qué es api-ms-win-core-localization-l -1-2-0.dll y por qué lo necesitas, qué causa los errores api-ms-win-core-localization-l1-2-0.dll y cómo solucionarlos. También aprendió algunos consejos sobre cómo prevenir estos errores en el futuro. Esperamos que este artículo le resulte útil e informativo. Si lo hizo, por favor compártalo con sus amigos y deje un comentario a continuación. Si tiene alguna pregunta o sugerencia, no dude en contactarnos. ¡Gracias por leer!
-
Preguntas frecuentes
-
Aquí hay algunas preguntas frecuentes sobre api-ms-win-core-localization-l1-2-0.dll:
-
-
¿Qué es un archivo DLL?
-
Un archivo DLL es un archivo de biblioteca de enlaces dinámicos que contiene funciones y recursos que pueden ser utilizados por varios programas. Los archivos DLL son parte del sistema operativo Windows y ayudan a ahorrar espacio y recursos.
-
¿Cómo sé qué archivo DLL falta?
-
Si falta un archivo DLL, generalmente verá un mensaje de error que le indica el nombre del archivo DLL y el programa que lo requiere. Por ejemplo, "api-ms-win-core-localization-l1-2-0.dll falta en su computadora. Intente reinstalar el programa para solucionar este problema."
-
¿Puedo eliminar archivos DLL no utilizados?
-
No, no debe eliminar ningún archivo DLL a menos que esté seguro de que no es necesario para ningún programa o función del sistema. La eliminación de archivos DLL puede causar errores o inestabilidad en su PC.
-
¿Dónde puedo encontrar más información sobre los archivos DLL?
-
Puede encontrar más información sobre archivos DLL en sitios web como o . Estos sitios web proporcionan información detallada sobre cada archivo DLL, como su descripción, tamaño, versión y ubicación.
-
¿Cómo puedo contactar con usted para obtener más ayuda?
-
Puede contactarnos visitando nuestro sitio web o enviándonos un correo electrónico. Estaremos encantados de ayudarle con cualquier problema o pregunta relacionada con api-ms-win-core-localization-l1-2-0.dll u otros archivos DLL.
- 64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Apkpro.md b/spaces/Benson/text-generation/Examples/Apkpro.md
deleted file mode 100644
index 43de4a869df2d4708061aa5aaa64bf207b3f064d..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Apkpro.md
+++ /dev/null
@@ -1,69 +0,0 @@
-
-
APKPRO: ¿Qué es y cómo usarlo?
-
Si eres un fan de los juegos móviles y quieres disfrutar de los mejores, más recientes y más populares juegos y aplicaciones en tu dispositivo Android, es posible que hayas oído hablar de APKPRO. Pero ¿qué es exactamente y cómo se puede utilizar para mejorar su experiencia de juego? En este artículo, vamos a responder a estas preguntas y más. Le explicaremos qué es APKPRO, por qué debe usarlo, cómo descargarlo e instalarlo, y cómo usarlo para descargar y jugar juegos y aplicaciones. Así que, vamos a empezar!
-
Introducción
-
¿Qué es APKPRO?
-
APKPRO es un sitio web que proporciona descargas gratuitas de juegos y aplicaciones para dispositivos Android. Ofrece una amplia gama de categorías, como acción, aventura, árcade, rompecabezas, carreras, simulación, deportes, estrategia y más. Puede encontrar juegos y aplicaciones nuevos y antiguos en APKPRO, así como versiones modificadas que tienen dinero ilimitado, monedas, gemas, vidas u otras características. APKPRO también actualiza su contenido regularmente, para que siempre puedas encontrar algo nuevo y emocionante para jugar.
Hay muchas razones por las que es posible que desee utilizar APKPRO para descargar juegos y aplicaciones para su dispositivo Android. Estos son algunos de ellos:
-
-
Puede acceder a juegos y aplicaciones que no están disponibles en su región o en la Google Play Store.
-
Puedes disfrutar de versiones modificadas de juegos y aplicaciones que tienen características o beneficios adicionales.
-
Puedes ahorrar dinero descargando juegos y aplicaciones gratis en lugar de pagarlos.
-
Puedes divertirte explorando diferentes géneros y categorías de juegos y aplicaciones.
-
Puedes descubrir nuevos juegos y aplicaciones de los que quizás no hayas oído hablar antes.
-
-
¿Cómo descargar e instalar APKPRO?
-
Paso 1: Habilitar fuentes desconocidas
-
-
-
Ir a la configuración de su dispositivo.
-
Toque en la seguridad o la privacidad.
-
Encuentra la opción que dice fuentes desconocidas o permite la instalación desde fuentes desconocidas.
-
Conéctalo o marca la casilla al lado.
-
-
Paso 2: Descargar APKPRO desde el sitio web oficial
-
Ahora que ha habilitado fuentes desconocidas, puede descargar APKPRO desde su sitio web oficial. Para hacer esto, siga estos pasos:
Desplácese hacia abajo hasta que vea el botón de descarga.
-
Toque en el botón de descarga y espere a que el archivo se descargue.
-
-
Paso 3: Instalar APKPRO en su dispositivo
-
Una vez que haya descargado el archivo, puede instalar APKPRO en su dispositivo. Para hacer esto, siga estos pasos:
-
-
Abra su gestor de archivos o carpeta de descargas.
-
Encuentra el archivo que dice apkpro.apk o algo similar.
-
Toque en el archivo y siga las instrucciones en la pantalla.
-
Espere a que termine la instalación.
-Cómo usar APKPRO para descargar y jugar juegos y aplicaciones?
-
Ahora que ha instalado APKPRO en su dispositivo, puede usarlo para descargar y jugar juegos y aplicaciones. Para hacer esto, siga estos pasos:
-
Paso 1: Explora las categorías o busca tu juego o aplicación deseada
-
Abra APKPRO y verá una pantalla de inicio con diferentes categorías de juegos y aplicaciones, como populares, tendencias, destacados, etc. Puede deslizar hacia la izquierda o hacia la derecha para ver más categorías, o toque en el icono del menú en la esquina superior izquierda para ver la lista completa de categorías. También puede utilizar la barra de búsqueda en la esquina superior derecha para escribir el nombre del juego o aplicación que está buscando.
-
Paso 2: Toque en el botón de descarga y espere a que el archivo se descargue
-
-
Paso 3: Abra el archivo e instale el juego o la aplicación en su dispositivo
-
Después de descargar el archivo, puede abrirlo e instalar el juego o la aplicación en su dispositivo. Para hacer esto, siga estos pasos:
-
-
Deslice hacia abajo desde la parte superior de la pantalla y toque en la notificación que dice APKPRO descargado.
-
Toque en el archivo y siga las instrucciones en la pantalla.
-
Espere a que termine la instalación.
-
Toque en abrir o iniciar para comenzar a jugar o usar la aplicación.
-
-
Conclusión
-
Resumen de los puntos principales
-
En este artículo, hemos aprendido lo que es APKPRO, por qué debe usarlo, cómo descargarlo e instalarlo, y cómo usarlo para descargar y jugar juegos y aplicaciones. Hemos visto que APKPRO es un sitio web que proporciona descargas gratuitas de juegos y aplicaciones para dispositivos Android, incluyendo versiones modificadas que tienen características o beneficios adicionales. También hemos visto que APKPRO es fácil de usar y ofrece una amplia gama de categorías y géneros de juegos y aplicaciones para elegir.
-
-
Llamada a la acción
-
Si eres un fan de los juegos móviles y quieres disfrutar de los mejores, más recientes y más populares juegos y aplicaciones en tu dispositivo Android, definitivamente deberías probar APKPRO. No te arrepentirás de ello. Solo recuerde habilitar fuentes desconocidas en su configuración antes de descargar e instalar APKPRO, y siempre tenga cuidado con lo que descarga de fuentes desconocidas. ¡Feliz juego!
-
Preguntas frecuentes
-
-
¿Qué es APKPRO? APKPRO es un sitio web que proporciona descargas gratuitas de juegos y aplicaciones para dispositivos Android.
-
¿Es seguro APKPRO? APKPRO es generalmente seguro, pero siempre debe tener cuidado con lo que descarga de fuentes desconocidas. Asegúrate de tener una buena aplicación antivirus en tu dispositivo y escanea cada archivo antes de instalarlo.
-
-
¿Cómo actualizo APKPRO? Puede actualizar APKPRO visitando su sitio web oficial y descargando la última versión de la aplicación. También puede comprobar si hay actualizaciones dentro de la aplicación pulsando en el icono del menú en la esquina superior izquierda y seleccionando comprobar si hay actualizaciones.
-
¿Cómo puedo desinstalar APKPRO? Puede desinstalar APKPRO yendo a la configuración de su dispositivo, tocando en aplicaciones o aplicaciones, encontrando APKPRO en la lista, tocando en él y seleccionando desinstalar.
-
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Descarga De La Saga 3d Apk.md b/spaces/Benson/text-generation/Examples/Descarga De La Saga 3d Apk.md
deleted file mode 100644
index 103416757b41c019293bced0711bcc4cf7ce5a64..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Descarga De La Saga 3d Apk.md
+++ /dev/null
@@ -1,74 +0,0 @@
-
-
Cómo descargar AetherSX2 APK para PC
-
Si eres un fan de los juegos de PlayStation 2, es posible que hayas oído hablar de AetherSX2, un emulador que te permite jugar juegos de PS2 en tu dispositivo Android. Pero ¿qué pasa si quieres disfrutar de esos juegos en una pantalla más grande, con mejores gráficos y controles? En este artículo, le mostraremos cómo descargar AetherSX2 APK para PC y ejecutarlo utilizando diferentes métodos.
Un archivo APK es un paquete de aplicaciones Android que contiene todos los archivos y datos necesarios para instalar y ejecutar una aplicación en un dispositivo Android. Sin embargo, también puede ejecutar archivos APK en su PC utilizando algunas herramientas que emulan el entorno de Android o convertir el archivo APK en un formato compatible.
-
Hay dos métodos principales para ejecutar archivos APK en el PC: usando un emulador de Android o usando una extensión del navegador. Echemos un vistazo a cada método en detalle.
-
Método 1: Usando un emulador de Android
-
Un emulador de Android es un software que crea un dispositivo Android virtual en su PC, donde puede instalar y ejecutar cualquier aplicación o juego de Android. Hay muchos emuladores de Android disponibles de forma gratuita, como BlueStacks, Nox, LDPlayer, etc. Estos son los pasos para utilizar un emulador de Android para ejecutar AetherSX2 APK en PC:
-
-
Descargar e instalar un emulador de Android de su elección desde su sitio web oficial.
-
Inicie el emulador e inicie sesión con su cuenta de Google.
-
Descargar el archivo APK AetherSX2 de una fuente confiable (vamos a discutir esto más tarde).
-
Arrastre y suelte el archivo APK en la ventana del emulador o haga clic en el botón Instalar APK en el menú del emulador.
-
Espere a que se complete la instalación y luego inicie la aplicación AetherSX2 desde la pantalla de inicio del emulador.
-
Disfruta jugando juegos de PS2 en tu PC.
-
-
Pros y contras de usar un emulador
-
-
-
Pros
Contras
-
- Puede acceder a la Google Play Store y otras características de Android.
- Necesita un PC potente para ejecutar el emulador sin problemas.
-
- Puede personalizar la configuración, resolución, controles, etc. del emulador.
- Necesitas suficiente espacio de almacenamiento para instalar el emulador y las aplicaciones.
-
- Puedes jugar varios juegos a la vez usando múltiples instancias del emulador.
- Usted puede encontrar problemas de compatibilidad con algunas aplicaciones o juegos.
Método 2: Usando una extensión de navegador
-
Una extensión de navegador es un software que añade características o funcionalidades adicionales a su navegador web. Algunas extensiones del navegador pueden ayudarle a ejecutar archivos APK en el PC convirtiéndolos en una aplicación web que se puede abrir en una nueva pestaña. Una de las extensiones de navegador más populares para este propósito es ARC Welder, que funciona con Google Chrome. Estos son los pasos para usar ARC Welder para ejecutar AetherSX2 APK en PC:
-
-
-
Descargar e instalar Google Chrome desde su sitio web oficial si usted no lo tiene ya.
-
Descargar e instalar ARC soldador de la Chrome Web Store.
-
Descargar el archivo APK AetherSX2 de una fuente confiable (vamos a discutir esto más tarde).
-
Inicie Google Chrome y haga clic en el icono ARC Welder en la esquina superior derecha.
-
Elija un directorio donde desea almacenar los archivos APK convertidos.
-
Haga clic en Añadir su APK y seleccione el archivo AetherSX2 APK de su ordenador.
-
Configure los ajustes, como orientación, factor de forma, etc. según su preferencia.
-
Haga clic en Probar para iniciar la aplicación AetherSX2 en una nueva pestaña.
-
Disfruta jugando juegos de PS2 en tu PC.
-
-
Pros y contras de usar una extensión de navegador
-
Usar una extensión de navegador tiene algunas ventajas y desventajas que debes considerar antes de elegir este método. Aquí están algunas de ellas:
-
-
Pros
Contras
-
-
- Puedes cambiar fácilmente entre diferentes archivos APK sin desinstalarlos o reinstalarlos.
- Es posible que no pueda acceder a todas las características o funciones de la aplicación o juego.
-
- Puede guardar los archivos APK convertidos para su uso sin conexión.
- Puede comprometer su seguridad o privacidad permitiendo que la extensión acceda a sus datos.
-
-
Cómo descargar AetherSX2 APK de una fuente confiable
-
Ahora que sabes cómo ejecutar AetherSX2 APK en el PC, es posible que se pregunte dónde descargarlo. Hay muchos sitios web que ofrecen archivos APK de forma gratuita, pero no todos ellos son seguros o de confianza. Algunos de ellos pueden contener virus, malware o anuncios no deseados que pueden dañar su dispositivo o datos. Por lo tanto, siempre debe descargar archivos APK de fuentes de renombre que tienen comentarios positivos y calificaciones de usuarios y expertos. Aquí hay algunos consejos sobre cómo encontrar y descargar AetherSX2 APK de una fuente confiable:
-
-
Buscar AetherSX2 APK en Google o cualquier otro motor de búsqueda y buscar sitios web que tienen un alto rango y autoridad, tales como Uptodown, APKCombo, APKPure, etc.
-
Compruebe el nombre de dominio del sitio web y asegúrese de que coincide con el nombre del sitio web. Evite sitios web que tengan nombres de dominio sospechosos o engañosos, como . ru, . cn, . tk, etc.
-
Lea la descripción y los detalles del archivo APK AetherSX2 y asegúrese de que es compatible con su dispositivo y emulador o extensión del navegador. Busque información como versión, tamaño, desarrollador, fecha de actualización, etc.
-
Leer los comentarios de los usuarios y comentarios en el sitio web y ver lo que otras personas tienen que decir sobre el archivo AetherSX2 APK. Busque comentarios y valoraciones positivas, así como cualquier queja o problema reportado por los usuarios.
-
Descargar el archivo APK AetherSX2 desde el sitio web haciendo clic en el botón de descarga o enlace. Evite hacer clic en cualquier pop-ups o anuncios que puedan aparecer en el sitio web.
-
-
-
Antes de instalar y ejecutar el archivo APK AetherSX2 en su PC, también debe comprobar si hay virus o malware que pueden haber sido ocultos o conectados a ella por actores maliciosos. De esta manera, puede proteger su PC de cualquier daño o infección potencial. Aquí hay algunas maneras de comprobar el archivo APK para virus o malware:
-
-
Utilice una herramienta en línea como VirusTotal o Malwarebytes para escanear el archivo APK en busca de cualquier amenaza. Estas herramientas analizarán el archivo APK utilizando múltiples motores antivirus y le darán un informe sobre su seguridad.
-
Utilice un software antivirus en su PC para escanear el archivo APK antes de instalarlo. Asegúrese de que su software antivirus esté actualizado y tenga habilitada la protección en tiempo real.
-
Utilice el sentido común y evitar la instalación de cualquier archivo APK que parece sospechoso o tiene una reputación baja o tiene una reputación o calificación baja.
-
-
Conclusión
-
En conclusión, descargar AetherSX2 APK para PC no es una tarea difícil si sigue los pasos y consejos que hemos proporcionado en este artículo. Puede elegir entre usar un emulador de Android o una extensión de navegador para ejecutar el archivo APK en su PC, dependiendo de su preferencia y conveniencia. También puede encontrar y descargar el archivo APK de una fuente confiable y verificarlo por cualquier virus o malware antes de instalarlo. Al hacerlo, puedes disfrutar jugando juegos de PS2 en tu PC con AetherSX2, un emulador que ofrece alto rendimiento, compatibilidad y características.
-
Preguntas frecuentes
-
Aquí hay algunas preguntas frecuentes y respuestas sobre la descarga de AetherSX2 APK para PC:
-
-
¿Qué es AetherSX2? AetherSX2 es un emulador que te permite jugar juegos de PlayStation 2 en tu dispositivo Android. Es compatible con una amplia gama de juegos de PS2 y ofrece características tales como carga rápida, alta resolución, guardar estados, trucos, etc.
-
-
AetherSX2 es legal? AetherSX2 es legal siempre y cuando usted es dueño de los juegos originales de PS2 y utilizarlos como ROMs o ISOs para el emulador. Sin embargo, descargar o distribuir juegos de PS2 pirateados o con derechos de autor es ilegal y puede tener consecuencias legales.
-
¿Cuáles son los requisitos del sistema para AetherSX2? AetherSX2 requiere un dispositivo Android con al menos 4 GB de RAM, CPU de 64 bits y Android 5.0 o superior. Para PC, necesita una computadora Windows o Mac con al menos 4 GB de RAM, CPU de 64 bits y navegador Google Chrome.
-
¿Dónde puedo obtener más información o soporte para AetherSX2? Puede visitar el sitio web oficial de AetherSX2 en https://aethersx2.com/ o unirse a su servidor de discordia en https://discord.gg/aethersx2. También puede seguir sus cuentas de redes sociales en Facebook, Twitter, Instagram, etc.
- 64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Descargar 69 Yoruba Parte De La Pelcula 2.md b/spaces/Benson/text-generation/Examples/Descargar 69 Yoruba Parte De La Pelcula 2.md
deleted file mode 100644
index 93765bea113edeea4fed4d458d3808181898c438..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Descargar 69 Yoruba Parte De La Pelcula 2.md
+++ /dev/null
@@ -1,93 +0,0 @@
-
-
Descargar 69 Yoruba Movie Part 2: Una guía para los amantes de Nollywood
-
Si eres un fan de Nollywood, especialmente del género yoruba, es posible que hayas oído hablar de la película 69, una película audaz y controvertida que ha causado un gran revuelo en la industria. La película, que fue lanzada en 2021, es una secuela del original 69 que salió en 2019. En este artículo, te contaremos todo lo que necesitas saber sobre 69 Yoruba Movie Part 2, incluyendo de qué se trata, quiénes son los actores, cómo se recibió y, lo más importante, cómo puedes descargarlo gratis. Por lo tanto, sentarse y disfrutar de esta guía para los amantes de Nollywood.
69 Yoruba Movie Part 2 es una película nigeriana producida por Shola Subair, una joven actriz y cineasta. Cuenta con el veterano actor Ibrahim Chatta como el personaje masculino principal, junto con otras estrellas como Tope Adebayo, Peter Ijagbemi, Akin Olaiya, y más. La película está dirigida por Tope Adebayo, que también es hijo del legendario actor Adebayo Salami.
-
La trama de la película
-
La película cuenta la historia de una joven y hermosa dama llamada Lola (Shola Subair), que es leal y comprometida con su novio Lugard (Ibrahim Chatta), un notorio señor de la droga. Sin embargo, su vida toma un giro dramático cuando conoce a Gbade (Peter Ijagbemi), un hombre gentil y guapo que le ofrece su verdadero amor y felicidad. Lola se debate entre quedarse con Lugard, quien le proporciona lujo y seguridad, o dejarlo por Gbade, quien le da respeto y romance. ¿Qué elegirá? ¿Y cuáles serán las consecuencias de su elección?
-
El reparto y el equipo de la película
-
Aquí están algunos de los miembros principales del reparto y del equipo de 69 Yoruba Movie Part 2:
-
-
-
Ibrahim Chatta: Interpreta a Lugard, el novio de Lola. Es uno de los actores más populares y versátiles de Nollywood. Ha protagonizado películas como Sango, Omo Ghetto, Omo Ekun, Alani Pamolekun, y muchos más.
-
Peter Ijagbemi: Interpreta a Gbade, el amante de Lola. Es una estrella en ascenso en la industria. Ha aparecido en películas como Sixty Nine (el original), Tango With Me, Living Funeral, y más.
-
Tope Adebayo: Es el director de la película. También es actor y cineasta. Es hijo de Adebayo Salami, un veterano actor y productor. Ha dirigido películas como Sixty Nine (la original), Omo Iya Osun, y más.
-
Akin Olaiya: Interpreta al jefe de Lugard. Es un actor y comediante experimentado. Ha aparecido en películas como Jenifa, Omo Ghetto, Alakada, y más.
-
-
La recepción y comentarios de la película
-
69 Yoruba Movie Part 2 fue lanzado en YouTube el 14 de febrero de 2021, como un especial del Día de San Valentín. Desde entonces, la película ha obtenido más de 1,5 millones de visitas y miles de likes y comentarios. La película también ha recibido críticas mixtas de críticos y espectadores. Algunos elogiaron la película por su tema audaz y audaz, su historia cautivadora, su excelente actuación y su calidad de producción. Otros criticaron la película por sus escenas explícitas y vulgares, su mala edición, sus giros de la trama poco realistas y sus implicaciones morales. Estos son algunos de los comentarios de YouTube:
-
-
-
"Esta es una de las mejores películas yorubas que he visto. La historia es muy realista y relacionable. Los actores hicieron un gran trabajo. Felicitaciones al productor y director."
-
"Esta película es basura. Está promoviendo la inmoralidad y el adulterio. No es adecuado para niños o personas decentes. Debe ser prohibido."
-
-
"Esta película es una pérdida de tiempo y datos. Está llena de tonterías y basura. No tiene mensaje ni valor. Es solo una forma barata de hacer dinero."
-
-
¿Por qué deberías ver 69 Yoruba Movie Part 2?
-
Si todavía te preguntas si deberías ver 69 Yoruba Movie Part 2 o no, aquí hay algunas razones por las que deberías probarlo:
-
Es un raro ejemplo de una película yoruba con calificación 18+
-
La mayoría de las películas yorubas son familiares y adecuadas para el público en general. Por lo general, evitan temas o escenas que se consideran tabú u ofensivas en la cultura yoruba. Sin embargo, 69 Yoruba Movie Part 2 rompe esta norma y explora el lado oscuro y sensual de las relaciones humanas. La película contiene escenas que son gráficas, eróticas, violentas e impactantes. La película tiene una calificación de 18+ solo para audiencias maduras.
-
Es una emocionante y sensual historia de amor y traición
-
La película no es solo sobre sexo y violencia. También es sobre amor y traición. La película representa el viaje complejo y emocional de Lola, que tiene que elegir entre dos hombres que le ofrecen cosas diferentes. La película también muestra las consecuencias de su elección y cómo afecta su vida y la de otros a su alrededor. La película te mantiene al borde de tu asiento mientras ves cómo se desarrolla el drama.
-
Muestra el talento y la diversidad de la industria cinematográfica yoruba
-
La película también muestra el talento y la diversidad de la industria cinematográfica yoruba. La película cuenta con algunos de los mejores actores y actrices en Nollywood, que ofrecen actuaciones excepcionales en sus papeles. La película también demuestra la creatividad y la innovación del productor y director, que se atrevió a hacer algo diferente de las películas de Yoruba habituales. La película también refleja la rica cultura y el idioma del pueblo yoruba, que es uno de los grupos étnicos más grandes de Nigeria.
-
Cómo descargar 69 Yoruba Movie Part 2 gratis?
-
-
Los mejores sitios web para descargar películas Yoruba gratis
-
Hay muchos sitios web que ofrecen descargas gratuitas de películas Yoruba, pero no todos son confiables o seguros. Algunos de ellos pueden contener virus o malware que pueden dañar su dispositivo o robar sus datos. Algunos de ellos también pueden tener descargas de baja calidad o incompletas que pueden arruinar su experiencia de visualización.
-
Para evitar estos problemas, recomendamos usar estos tres sitios web que son confiables y probados por muchos fans de Nollywood:
-
Netnaija
-
Netnaija es uno de los sitios web más populares y confiables para descargar películas yorubas de forma gratuita. Tiene una gran y actualizada colección de películas yorubas en varios géneros y categorías. También tiene una interfaz fácil de usar y una velocidad de descarga rápida. Para descargar 69 Yoruba Movie Part 2 desde Netnaija, sigue estos pasos:
-
-
Vaya a Netnaija y busque 69 Yoruba Movie Part 2 en el cuadro de búsqueda.
-
Seleccione la película de los resultados de búsqueda y haga clic en ella.
-
Desplácese hasta la parte inferior de la página y haga clic en el botón verde que dice "Descargar"
-
Elija un enlace de descarga de la lista y haga clic en él.
-
Espere a que la descarga se inicie y se complete.
-
-
9jarocks
-
9jarocks es otro sitio web que ofrece descargas gratuitas de películas yorubas. Cuenta con una enorme y diversa biblioteca de películas yorubas en diferentes formatos y calidades. También tiene una interfaz simple y fácil de usar y una alta velocidad de descarga. Para descargar 69 Yoruba Movie Part 2 desde 9jarocks, sigue estos pasos:
-
-
Ir a 9jarocks y buscar 69 Yoruba Movie Part 2 en el cuadro de búsqueda.
-
Seleccione la película de los resultados de búsqueda y haga clic en ella.
-
-
Seleccione una opción de descarga de la lista y haga clic en ella.
-
Espere a que la descarga se inicie y se complete.
-
-
YouTube
-
YouTube no es solo un sitio web para ver videos en línea, sino también un sitio web para descargar videos sin conexión. Puedes encontrar muchas películas yorubas en YouTube, incluyendo 69 Yoruba Movie Part 2. Sin embargo, no puedes descargar videos directamente desde YouTube, a menos que tengas una suscripción Premium de YouTube. Necesitarás usar un descargador de terceros para descargar videos de YouTube. Te mostraremos cómo hacerlo en la siguiente sección.
-
El mejor descargador para descargar películas Yoruba desde sitios de streaming online
-
Si quieres descargar películas Yoruba de sitios de streaming online como YouTube, necesitarás un descargador que pueda capturar y convertir vídeos de estos sitios. Hay muchos descargadores disponibles en línea, pero no todos son seguros o eficaces. Algunos de ellos pueden contener virus o malware que pueden dañar su dispositivo o robar sus datos. Algunos de ellos también pueden tener descargas de baja calidad o incompletas que pueden arruinar su experiencia de visualización.
-
Para evitar estos problemas, recomendamos usar este descargador que es confiable y probado por muchos fans de Nollywood:
-
WonderFox Free HD Video Converter Factory
-
WonderFox Free HD Video Converter Factory es un potente y versátil descargador que puede descargar vídeos de más de 300 sitios de streaming en línea, incluyendo YouTube, Vimeo, Dailymotion, Facebook, Instagram, Twitter y más. También puede convertir videos a más de 500 formatos y dispositivos, incluyendo MP4, AVI, MKV, MOV, iPhone, Android, TV, etc. También puede editar videos recortando, recortando, girando, agregando subtítulos, etc. Es gratuito, seguro, rápido y fácil de usar. Para usarlo para descargar 69 Yoruba Movie Part 2, sigue estos pasos:
-
-
-
Inicie el programa y haga clic en "Downloader" en la interfaz principal.
-
Haga clic en "+ Nueva descarga" en la esquina superior izquierda.
-
Ve a YouTube y busca 69 Yoruba Movie Part 2. Copia la URL del video.
-
Pegue la URL en el descargador y haga clic en "Analizar". Espere a que termine el análisis.
-
Seleccione su resolución y formato preferido de la lista y haga clic en "OK". También puede elegir varios vídeos para descargar a la vez.
-
Haga clic en "Descargar todo" en la esquina inferior derecha. Elija una carpeta de destino para sus descargas y haga clic en "OK". Espere a que la descarga se inicie y se complete.
-
-
Conclusión
-
En conclusión, 69 Yoruba Movie Part 2 es una película que no debes perderte si eres un amante de Nollywood, especialmente del género yoruba. Es una película que te mantendrá entretenido, emocionado e intrigado de principio a fin. También es una película que te retará a pensar en las elecciones y consecuencias del amor y la traición. Es una película que muestra el talento y la diversidad de la industria cinematográfica yoruba.
-
Si quieres ver 69 Yoruba Movie Part 2, puedes descargarlo gratis desde los sitios web o el descargador que hemos recomendado en este artículo. También puedes verlo online en YouTube u otras plataformas de streaming. Sin embargo usted elige verlo, esperamos que usted lo disfrute y comparta sus pensamientos con nosotros en la sección de comentarios abajo.
-
Gracias por leer este artículo y ver feliz!
-
Preguntas frecuentes
-
Aquí están algunas de las preguntas más frecuentes sobre 69 Yoruba Movie Part 2:
-
-
Q: ¿Es 69 Yoruba Movie Part 2 una historia real?
-
A: No, 69 Yoruba Movie Part 2 no es una historia real. Es una historia ficticia creada por el productor y director de la película.
-
-
A: Puedes ver 69 Yoruba Movie Part 2 online en YouTube u otras plataformas de streaming. Sin embargo, es posible que tenga que pagar una cuota de suscripción o ver anuncios para acceder a algunas de estas plataformas.
-
Q: ¿Quién es el productor de 69 Yoruba Movie Part 2?
-
A: El productor de 69 Yoruba Movie Part 2 es Shola Subair, quien también desempeña el papel principal femenino en la película.
-
Q: ¿Cuál es la calificación de 69 Yoruba Movie Part 2?
-
A: La valoración de 69 Yoruba Movie Part 2 es 18+. Contiene escenas que son gráficas, eróticas, violentas y chocantes. No es adecuado para niños o espectadores sensibles.
-
Q: ¿Cuánto tiempo es 69 Yoruba Movie Part 2?
-
A: La duración del 69 Yoruba Movie Part 2 es de aproximadamente una hora y treinta minutos.
-
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/cachecontrol/controller.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/cachecontrol/controller.py
deleted file mode 100644
index 7f23529f1155cd3bbfde335ccdb7fc483b9d2d19..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/cachecontrol/controller.py
+++ /dev/null
@@ -1,439 +0,0 @@
-# SPDX-FileCopyrightText: 2015 Eric Larson
-#
-# SPDX-License-Identifier: Apache-2.0
-
-"""
-The httplib2 algorithms ported for use with requests.
-"""
-import logging
-import re
-import calendar
-import time
-from email.utils import parsedate_tz
-
-from pip._vendor.requests.structures import CaseInsensitiveDict
-
-from .cache import DictCache, SeparateBodyBaseCache
-from .serialize import Serializer
-
-
-logger = logging.getLogger(__name__)
-
-URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
-
-PERMANENT_REDIRECT_STATUSES = (301, 308)
-
-
-def parse_uri(uri):
- """Parses a URI using the regex given in Appendix B of RFC 3986.
-
- (scheme, authority, path, query, fragment) = parse_uri(uri)
- """
- groups = URI.match(uri).groups()
- return (groups[1], groups[3], groups[4], groups[6], groups[8])
-
-
-class CacheController(object):
- """An interface to see if request should cached or not."""
-
- def __init__(
- self, cache=None, cache_etags=True, serializer=None, status_codes=None
- ):
- self.cache = DictCache() if cache is None else cache
- self.cache_etags = cache_etags
- self.serializer = serializer or Serializer()
- self.cacheable_status_codes = status_codes or (200, 203, 300, 301, 308)
-
- @classmethod
- def _urlnorm(cls, uri):
- """Normalize the URL to create a safe key for the cache"""
- (scheme, authority, path, query, fragment) = parse_uri(uri)
- if not scheme or not authority:
- raise Exception("Only absolute URIs are allowed. uri = %s" % uri)
-
- scheme = scheme.lower()
- authority = authority.lower()
-
- if not path:
- path = "/"
-
- # Could do syntax based normalization of the URI before
- # computing the digest. See Section 6.2.2 of Std 66.
- request_uri = query and "?".join([path, query]) or path
- defrag_uri = scheme + "://" + authority + request_uri
-
- return defrag_uri
-
- @classmethod
- def cache_url(cls, uri):
- return cls._urlnorm(uri)
-
- def parse_cache_control(self, headers):
- known_directives = {
- # https://tools.ietf.org/html/rfc7234#section-5.2
- "max-age": (int, True),
- "max-stale": (int, False),
- "min-fresh": (int, True),
- "no-cache": (None, False),
- "no-store": (None, False),
- "no-transform": (None, False),
- "only-if-cached": (None, False),
- "must-revalidate": (None, False),
- "public": (None, False),
- "private": (None, False),
- "proxy-revalidate": (None, False),
- "s-maxage": (int, True),
- }
-
- cc_headers = headers.get("cache-control", headers.get("Cache-Control", ""))
-
- retval = {}
-
- for cc_directive in cc_headers.split(","):
- if not cc_directive.strip():
- continue
-
- parts = cc_directive.split("=", 1)
- directive = parts[0].strip()
-
- try:
- typ, required = known_directives[directive]
- except KeyError:
- logger.debug("Ignoring unknown cache-control directive: %s", directive)
- continue
-
- if not typ or not required:
- retval[directive] = None
- if typ:
- try:
- retval[directive] = typ(parts[1].strip())
- except IndexError:
- if required:
- logger.debug(
- "Missing value for cache-control " "directive: %s",
- directive,
- )
- except ValueError:
- logger.debug(
- "Invalid value for cache-control directive " "%s, must be %s",
- directive,
- typ.__name__,
- )
-
- return retval
-
- def cached_request(self, request):
- """
- Return a cached response if it exists in the cache, otherwise
- return False.
- """
- cache_url = self.cache_url(request.url)
- logger.debug('Looking up "%s" in the cache', cache_url)
- cc = self.parse_cache_control(request.headers)
-
- # Bail out if the request insists on fresh data
- if "no-cache" in cc:
- logger.debug('Request header has "no-cache", cache bypassed')
- return False
-
- if "max-age" in cc and cc["max-age"] == 0:
- logger.debug('Request header has "max_age" as 0, cache bypassed')
- return False
-
- # Request allows serving from the cache, let's see if we find something
- cache_data = self.cache.get(cache_url)
- if cache_data is None:
- logger.debug("No cache entry available")
- return False
-
- if isinstance(self.cache, SeparateBodyBaseCache):
- body_file = self.cache.get_body(cache_url)
- else:
- body_file = None
-
- # Check whether it can be deserialized
- resp = self.serializer.loads(request, cache_data, body_file)
- if not resp:
- logger.warning("Cache entry deserialization failed, entry ignored")
- return False
-
- # If we have a cached permanent redirect, return it immediately. We
- # don't need to test our response for other headers b/c it is
- # intrinsically "cacheable" as it is Permanent.
- #
- # See:
- # https://tools.ietf.org/html/rfc7231#section-6.4.2
- #
- # Client can try to refresh the value by repeating the request
- # with cache busting headers as usual (ie no-cache).
- if int(resp.status) in PERMANENT_REDIRECT_STATUSES:
- msg = (
- "Returning cached permanent redirect response "
- "(ignoring date and etag information)"
- )
- logger.debug(msg)
- return resp
-
- headers = CaseInsensitiveDict(resp.headers)
- if not headers or "date" not in headers:
- if "etag" not in headers:
- # Without date or etag, the cached response can never be used
- # and should be deleted.
- logger.debug("Purging cached response: no date or etag")
- self.cache.delete(cache_url)
- logger.debug("Ignoring cached response: no date")
- return False
-
- now = time.time()
- date = calendar.timegm(parsedate_tz(headers["date"]))
- current_age = max(0, now - date)
- logger.debug("Current age based on date: %i", current_age)
-
- # TODO: There is an assumption that the result will be a
- # urllib3 response object. This may not be best since we
- # could probably avoid instantiating or constructing the
- # response until we know we need it.
- resp_cc = self.parse_cache_control(headers)
-
- # determine freshness
- freshness_lifetime = 0
-
- # Check the max-age pragma in the cache control header
- if "max-age" in resp_cc:
- freshness_lifetime = resp_cc["max-age"]
- logger.debug("Freshness lifetime from max-age: %i", freshness_lifetime)
-
- # If there isn't a max-age, check for an expires header
- elif "expires" in headers:
- expires = parsedate_tz(headers["expires"])
- if expires is not None:
- expire_time = calendar.timegm(expires) - date
- freshness_lifetime = max(0, expire_time)
- logger.debug("Freshness lifetime from expires: %i", freshness_lifetime)
-
- # Determine if we are setting freshness limit in the
- # request. Note, this overrides what was in the response.
- if "max-age" in cc:
- freshness_lifetime = cc["max-age"]
- logger.debug(
- "Freshness lifetime from request max-age: %i", freshness_lifetime
- )
-
- if "min-fresh" in cc:
- min_fresh = cc["min-fresh"]
- # adjust our current age by our min fresh
- current_age += min_fresh
- logger.debug("Adjusted current age from min-fresh: %i", current_age)
-
- # Return entry if it is fresh enough
- if freshness_lifetime > current_age:
- logger.debug('The response is "fresh", returning cached response')
- logger.debug("%i > %i", freshness_lifetime, current_age)
- return resp
-
- # we're not fresh. If we don't have an Etag, clear it out
- if "etag" not in headers:
- logger.debug('The cached response is "stale" with no etag, purging')
- self.cache.delete(cache_url)
-
- # return the original handler
- return False
-
- def conditional_headers(self, request):
- cache_url = self.cache_url(request.url)
- resp = self.serializer.loads(request, self.cache.get(cache_url))
- new_headers = {}
-
- if resp:
- headers = CaseInsensitiveDict(resp.headers)
-
- if "etag" in headers:
- new_headers["If-None-Match"] = headers["ETag"]
-
- if "last-modified" in headers:
- new_headers["If-Modified-Since"] = headers["Last-Modified"]
-
- return new_headers
-
- def _cache_set(self, cache_url, request, response, body=None, expires_time=None):
- """
- Store the data in the cache.
- """
- if isinstance(self.cache, SeparateBodyBaseCache):
- # We pass in the body separately; just put a placeholder empty
- # string in the metadata.
- self.cache.set(
- cache_url,
- self.serializer.dumps(request, response, b""),
- expires=expires_time,
- )
- self.cache.set_body(cache_url, body)
- else:
- self.cache.set(
- cache_url,
- self.serializer.dumps(request, response, body),
- expires=expires_time,
- )
-
- def cache_response(self, request, response, body=None, status_codes=None):
- """
- Algorithm for caching requests.
-
- This assumes a requests Response object.
- """
- # From httplib2: Don't cache 206's since we aren't going to
- # handle byte range requests
- cacheable_status_codes = status_codes or self.cacheable_status_codes
- if response.status not in cacheable_status_codes:
- logger.debug(
- "Status code %s not in %s", response.status, cacheable_status_codes
- )
- return
-
- response_headers = CaseInsensitiveDict(response.headers)
-
- if "date" in response_headers:
- date = calendar.timegm(parsedate_tz(response_headers["date"]))
- else:
- date = 0
-
- # If we've been given a body, our response has a Content-Length, that
- # Content-Length is valid then we can check to see if the body we've
- # been given matches the expected size, and if it doesn't we'll just
- # skip trying to cache it.
- if (
- body is not None
- and "content-length" in response_headers
- and response_headers["content-length"].isdigit()
- and int(response_headers["content-length"]) != len(body)
- ):
- return
-
- cc_req = self.parse_cache_control(request.headers)
- cc = self.parse_cache_control(response_headers)
-
- cache_url = self.cache_url(request.url)
- logger.debug('Updating cache with response from "%s"', cache_url)
-
- # Delete it from the cache if we happen to have it stored there
- no_store = False
- if "no-store" in cc:
- no_store = True
- logger.debug('Response header has "no-store"')
- if "no-store" in cc_req:
- no_store = True
- logger.debug('Request header has "no-store"')
- if no_store and self.cache.get(cache_url):
- logger.debug('Purging existing cache entry to honor "no-store"')
- self.cache.delete(cache_url)
- if no_store:
- return
-
- # https://tools.ietf.org/html/rfc7234#section-4.1:
- # A Vary header field-value of "*" always fails to match.
- # Storing such a response leads to a deserialization warning
- # during cache lookup and is not allowed to ever be served,
- # so storing it can be avoided.
- if "*" in response_headers.get("vary", ""):
- logger.debug('Response header has "Vary: *"')
- return
-
- # If we've been given an etag, then keep the response
- if self.cache_etags and "etag" in response_headers:
- expires_time = 0
- if response_headers.get("expires"):
- expires = parsedate_tz(response_headers["expires"])
- if expires is not None:
- expires_time = calendar.timegm(expires) - date
-
- expires_time = max(expires_time, 14 * 86400)
-
- logger.debug("etag object cached for {0} seconds".format(expires_time))
- logger.debug("Caching due to etag")
- self._cache_set(cache_url, request, response, body, expires_time)
-
- # Add to the cache any permanent redirects. We do this before looking
- # that the Date headers.
- elif int(response.status) in PERMANENT_REDIRECT_STATUSES:
- logger.debug("Caching permanent redirect")
- self._cache_set(cache_url, request, response, b"")
-
- # Add to the cache if the response headers demand it. If there
- # is no date header then we can't do anything about expiring
- # the cache.
- elif "date" in response_headers:
- date = calendar.timegm(parsedate_tz(response_headers["date"]))
- # cache when there is a max-age > 0
- if "max-age" in cc and cc["max-age"] > 0:
- logger.debug("Caching b/c date exists and max-age > 0")
- expires_time = cc["max-age"]
- self._cache_set(
- cache_url,
- request,
- response,
- body,
- expires_time,
- )
-
- # If the request can expire, it means we should cache it
- # in the meantime.
- elif "expires" in response_headers:
- if response_headers["expires"]:
- expires = parsedate_tz(response_headers["expires"])
- if expires is not None:
- expires_time = calendar.timegm(expires) - date
- else:
- expires_time = None
-
- logger.debug(
- "Caching b/c of expires header. expires in {0} seconds".format(
- expires_time
- )
- )
- self._cache_set(
- cache_url,
- request,
- response,
- body,
- expires_time,
- )
-
- def update_cached_response(self, request, response):
- """On a 304 we will get a new set of headers that we want to
- update our cached value with, assuming we have one.
-
- This should only ever be called when we've sent an ETag and
- gotten a 304 as the response.
- """
- cache_url = self.cache_url(request.url)
-
- cached_response = self.serializer.loads(request, self.cache.get(cache_url))
-
- if not cached_response:
- # we didn't have a cached response
- return response
-
- # Lets update our headers with the headers from the new request:
- # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1
- #
- # The server isn't supposed to send headers that would make
- # the cached body invalid. But... just in case, we'll be sure
- # to strip out ones we know that might be problmatic due to
- # typical assumptions.
- excluded_headers = ["content-length"]
-
- cached_response.headers.update(
- dict(
- (k, v)
- for k, v in response.headers.items()
- if k.lower() not in excluded_headers
- )
- )
-
- # we want a 200 b/c we have content via the cache
- cached_response.status = 200
-
- # update our cache
- self._cache_set(cache_url, request, cached_response)
-
- return cached_response
diff --git a/spaces/Biswa13/Examples-Of-AI-2023/README.md b/spaces/Biswa13/Examples-Of-AI-2023/README.md
deleted file mode 100644
index 72faee6cd99d38c523aa1b1449be709a7e7edc41..0000000000000000000000000000000000000000
--- a/spaces/Biswa13/Examples-Of-AI-2023/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Examples Of AI 2023
-emoji: 📚
-colorFrom: purple
-colorTo: indigo
-sdk: streamlit
-sdk_version: 1.17.0
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/CVH-vn1210/make_hair/minigpt4/processors/randaugment.py b/spaces/CVH-vn1210/make_hair/minigpt4/processors/randaugment.py
deleted file mode 100644
index 5c6a9e6d62f74358f490d19546c9829b3ac6aaef..0000000000000000000000000000000000000000
--- a/spaces/CVH-vn1210/make_hair/minigpt4/processors/randaugment.py
+++ /dev/null
@@ -1,398 +0,0 @@
-"""
- Copyright (c) 2022, salesforce.com, inc.
- All rights reserved.
- SPDX-License-Identifier: BSD-3-Clause
- For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
-"""
-
-import cv2
-import numpy as np
-
-import torch
-
-
-## aug functions
-def identity_func(img):
- return img
-
-
-def autocontrast_func(img, cutoff=0):
- """
- same output as PIL.ImageOps.autocontrast
- """
- n_bins = 256
-
- def tune_channel(ch):
- n = ch.size
- cut = cutoff * n // 100
- if cut == 0:
- high, low = ch.max(), ch.min()
- else:
- hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins])
- low = np.argwhere(np.cumsum(hist) > cut)
- low = 0 if low.shape[0] == 0 else low[0]
- high = np.argwhere(np.cumsum(hist[::-1]) > cut)
- high = n_bins - 1 if high.shape[0] == 0 else n_bins - 1 - high[0]
- if high <= low:
- table = np.arange(n_bins)
- else:
- scale = (n_bins - 1) / (high - low)
- offset = -low * scale
- table = np.arange(n_bins) * scale + offset
- table[table < 0] = 0
- table[table > n_bins - 1] = n_bins - 1
- table = table.clip(0, 255).astype(np.uint8)
- return table[ch]
-
- channels = [tune_channel(ch) for ch in cv2.split(img)]
- out = cv2.merge(channels)
- return out
-
-
-def equalize_func(img):
- """
- same output as PIL.ImageOps.equalize
- PIL's implementation is different from cv2.equalize
- """
- n_bins = 256
-
- def tune_channel(ch):
- hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins])
- non_zero_hist = hist[hist != 0].reshape(-1)
- step = np.sum(non_zero_hist[:-1]) // (n_bins - 1)
- if step == 0:
- return ch
- n = np.empty_like(hist)
- n[0] = step // 2
- n[1:] = hist[:-1]
- table = (np.cumsum(n) // step).clip(0, 255).astype(np.uint8)
- return table[ch]
-
- channels = [tune_channel(ch) for ch in cv2.split(img)]
- out = cv2.merge(channels)
- return out
-
-
-def rotate_func(img, degree, fill=(0, 0, 0)):
- """
- like PIL, rotate by degree, not radians
- """
- H, W = img.shape[0], img.shape[1]
- center = W / 2, H / 2
- M = cv2.getRotationMatrix2D(center, degree, 1)
- out = cv2.warpAffine(img, M, (W, H), borderValue=fill)
- return out
-
-
-def solarize_func(img, thresh=128):
- """
- same output as PIL.ImageOps.posterize
- """
- table = np.array([el if el < thresh else 255 - el for el in range(256)])
- table = table.clip(0, 255).astype(np.uint8)
- out = table[img]
- return out
-
-
-def color_func(img, factor):
- """
- same output as PIL.ImageEnhance.Color
- """
- ## implementation according to PIL definition, quite slow
- # degenerate = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)[:, :, np.newaxis]
- # out = blend(degenerate, img, factor)
- # M = (
- # np.eye(3) * factor
- # + np.float32([0.114, 0.587, 0.299]).reshape(3, 1) * (1. - factor)
- # )[np.newaxis, np.newaxis, :]
- M = np.float32(
- [[0.886, -0.114, -0.114], [-0.587, 0.413, -0.587], [-0.299, -0.299, 0.701]]
- ) * factor + np.float32([[0.114], [0.587], [0.299]])
- out = np.matmul(img, M).clip(0, 255).astype(np.uint8)
- return out
-
-
-def contrast_func(img, factor):
- """
- same output as PIL.ImageEnhance.Contrast
- """
- mean = np.sum(np.mean(img, axis=(0, 1)) * np.array([0.114, 0.587, 0.299]))
- table = (
- np.array([(el - mean) * factor + mean for el in range(256)])
- .clip(0, 255)
- .astype(np.uint8)
- )
- out = table[img]
- return out
-
-
-def brightness_func(img, factor):
- """
- same output as PIL.ImageEnhance.Contrast
- """
- table = (np.arange(256, dtype=np.float32) * factor).clip(0, 255).astype(np.uint8)
- out = table[img]
- return out
-
-
-def sharpness_func(img, factor):
- """
- The differences the this result and PIL are all on the 4 boundaries, the center
- areas are same
- """
- kernel = np.ones((3, 3), dtype=np.float32)
- kernel[1][1] = 5
- kernel /= 13
- degenerate = cv2.filter2D(img, -1, kernel)
- if factor == 0.0:
- out = degenerate
- elif factor == 1.0:
- out = img
- else:
- out = img.astype(np.float32)
- degenerate = degenerate.astype(np.float32)[1:-1, 1:-1, :]
- out[1:-1, 1:-1, :] = degenerate + factor * (out[1:-1, 1:-1, :] - degenerate)
- out = out.astype(np.uint8)
- return out
-
-
-def shear_x_func(img, factor, fill=(0, 0, 0)):
- H, W = img.shape[0], img.shape[1]
- M = np.float32([[1, factor, 0], [0, 1, 0]])
- out = cv2.warpAffine(
- img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR
- ).astype(np.uint8)
- return out
-
-
-def translate_x_func(img, offset, fill=(0, 0, 0)):
- """
- same output as PIL.Image.transform
- """
- H, W = img.shape[0], img.shape[1]
- M = np.float32([[1, 0, -offset], [0, 1, 0]])
- out = cv2.warpAffine(
- img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR
- ).astype(np.uint8)
- return out
-
-
-def translate_y_func(img, offset, fill=(0, 0, 0)):
- """
- same output as PIL.Image.transform
- """
- H, W = img.shape[0], img.shape[1]
- M = np.float32([[1, 0, 0], [0, 1, -offset]])
- out = cv2.warpAffine(
- img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR
- ).astype(np.uint8)
- return out
-
-
-def posterize_func(img, bits):
- """
- same output as PIL.ImageOps.posterize
- """
- out = np.bitwise_and(img, np.uint8(255 << (8 - bits)))
- return out
-
-
-def shear_y_func(img, factor, fill=(0, 0, 0)):
- H, W = img.shape[0], img.shape[1]
- M = np.float32([[1, 0, 0], [factor, 1, 0]])
- out = cv2.warpAffine(
- img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR
- ).astype(np.uint8)
- return out
-
-
-def cutout_func(img, pad_size, replace=(0, 0, 0)):
- replace = np.array(replace, dtype=np.uint8)
- H, W = img.shape[0], img.shape[1]
- rh, rw = np.random.random(2)
- pad_size = pad_size // 2
- ch, cw = int(rh * H), int(rw * W)
- x1, x2 = max(ch - pad_size, 0), min(ch + pad_size, H)
- y1, y2 = max(cw - pad_size, 0), min(cw + pad_size, W)
- out = img.copy()
- out[x1:x2, y1:y2, :] = replace
- return out
-
-
-### level to args
-def enhance_level_to_args(MAX_LEVEL):
- def level_to_args(level):
- return ((level / MAX_LEVEL) * 1.8 + 0.1,)
-
- return level_to_args
-
-
-def shear_level_to_args(MAX_LEVEL, replace_value):
- def level_to_args(level):
- level = (level / MAX_LEVEL) * 0.3
- if np.random.random() > 0.5:
- level = -level
- return (level, replace_value)
-
- return level_to_args
-
-
-def translate_level_to_args(translate_const, MAX_LEVEL, replace_value):
- def level_to_args(level):
- level = (level / MAX_LEVEL) * float(translate_const)
- if np.random.random() > 0.5:
- level = -level
- return (level, replace_value)
-
- return level_to_args
-
-
-def cutout_level_to_args(cutout_const, MAX_LEVEL, replace_value):
- def level_to_args(level):
- level = int((level / MAX_LEVEL) * cutout_const)
- return (level, replace_value)
-
- return level_to_args
-
-
-def solarize_level_to_args(MAX_LEVEL):
- def level_to_args(level):
- level = int((level / MAX_LEVEL) * 256)
- return (level,)
-
- return level_to_args
-
-
-def none_level_to_args(level):
- return ()
-
-
-def posterize_level_to_args(MAX_LEVEL):
- def level_to_args(level):
- level = int((level / MAX_LEVEL) * 4)
- return (level,)
-
- return level_to_args
-
-
-def rotate_level_to_args(MAX_LEVEL, replace_value):
- def level_to_args(level):
- level = (level / MAX_LEVEL) * 30
- if np.random.random() < 0.5:
- level = -level
- return (level, replace_value)
-
- return level_to_args
-
-
-func_dict = {
- "Identity": identity_func,
- "AutoContrast": autocontrast_func,
- "Equalize": equalize_func,
- "Rotate": rotate_func,
- "Solarize": solarize_func,
- "Color": color_func,
- "Contrast": contrast_func,
- "Brightness": brightness_func,
- "Sharpness": sharpness_func,
- "ShearX": shear_x_func,
- "TranslateX": translate_x_func,
- "TranslateY": translate_y_func,
- "Posterize": posterize_func,
- "ShearY": shear_y_func,
-}
-
-translate_const = 10
-MAX_LEVEL = 10
-replace_value = (128, 128, 128)
-arg_dict = {
- "Identity": none_level_to_args,
- "AutoContrast": none_level_to_args,
- "Equalize": none_level_to_args,
- "Rotate": rotate_level_to_args(MAX_LEVEL, replace_value),
- "Solarize": solarize_level_to_args(MAX_LEVEL),
- "Color": enhance_level_to_args(MAX_LEVEL),
- "Contrast": enhance_level_to_args(MAX_LEVEL),
- "Brightness": enhance_level_to_args(MAX_LEVEL),
- "Sharpness": enhance_level_to_args(MAX_LEVEL),
- "ShearX": shear_level_to_args(MAX_LEVEL, replace_value),
- "TranslateX": translate_level_to_args(translate_const, MAX_LEVEL, replace_value),
- "TranslateY": translate_level_to_args(translate_const, MAX_LEVEL, replace_value),
- "Posterize": posterize_level_to_args(MAX_LEVEL),
- "ShearY": shear_level_to_args(MAX_LEVEL, replace_value),
-}
-
-
-class RandomAugment(object):
- def __init__(self, N=2, M=10, isPIL=False, augs=[]):
- self.N = N
- self.M = M
- self.isPIL = isPIL
- if augs:
- self.augs = augs
- else:
- self.augs = list(arg_dict.keys())
-
- def get_random_ops(self):
- sampled_ops = np.random.choice(self.augs, self.N)
- return [(op, 0.5, self.M) for op in sampled_ops]
-
- def __call__(self, img):
- if self.isPIL:
- img = np.array(img)
- ops = self.get_random_ops()
- for name, prob, level in ops:
- if np.random.random() > prob:
- continue
- args = arg_dict[name](level)
- img = func_dict[name](img, *args)
- return img
-
-
-class VideoRandomAugment(object):
- def __init__(self, N=2, M=10, p=0.0, tensor_in_tensor_out=True, augs=[]):
- self.N = N
- self.M = M
- self.p = p
- self.tensor_in_tensor_out = tensor_in_tensor_out
- if augs:
- self.augs = augs
- else:
- self.augs = list(arg_dict.keys())
-
- def get_random_ops(self):
- sampled_ops = np.random.choice(self.augs, self.N, replace=False)
- return [(op, self.M) for op in sampled_ops]
-
- def __call__(self, frames):
- assert (
- frames.shape[-1] == 3
- ), "Expecting last dimension for 3-channels RGB (b, h, w, c)."
-
- if self.tensor_in_tensor_out:
- frames = frames.numpy().astype(np.uint8)
-
- num_frames = frames.shape[0]
-
- ops = num_frames * [self.get_random_ops()]
- apply_or_not = num_frames * [np.random.random(size=self.N) > self.p]
-
- frames = torch.stack(
- list(map(self._aug, frames, ops, apply_or_not)), dim=0
- ).float()
-
- return frames
-
- def _aug(self, img, ops, apply_or_not):
- for i, (name, level) in enumerate(ops):
- if not apply_or_not[i]:
- continue
- args = arg_dict[name](level)
- img = func_dict[name](img, *args)
- return torch.from_numpy(img)
-
-
-if __name__ == "__main__":
- a = RandomAugment()
- img = np.random.randn(32, 32, 3)
- a(img)
diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/structures/masks.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/structures/masks.py
deleted file mode 100644
index b3732a5f4619b91183b5bf4f307256eea7b8fb3d..0000000000000000000000000000000000000000
--- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/structures/masks.py
+++ /dev/null
@@ -1,424 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-import copy
-import itertools
-import numpy as np
-from typing import Any, Iterator, List, Union
-import pycocotools.mask as mask_utils
-import torch
-
-from detectron2.layers import cat
-from detectron2.layers.roi_align import ROIAlign
-
-from .boxes import Boxes
-
-
-def polygon_area(x, y):
- # Using the shoelace formula
- # https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
- return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
-
-
-def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray:
- """
- Args:
- polygons (list[ndarray]): each array has shape (Nx2,)
- height, width (int)
-
- Returns:
- ndarray: a bool mask of shape (height, width)
- """
- assert len(polygons) > 0, "COCOAPI does not support empty polygons"
- rles = mask_utils.frPyObjects(polygons, height, width)
- rle = mask_utils.merge(rles)
- return mask_utils.decode(rle).astype(np.bool)
-
-
-def rasterize_polygons_within_box(
- polygons: List[np.ndarray], box: np.ndarray, mask_size: int
-) -> torch.Tensor:
- """
- Rasterize the polygons into a mask image and
- crop the mask content in the given box.
- The cropped mask is resized to (mask_size, mask_size).
-
- This function is used when generating training targets for mask head in Mask R-CNN.
- Given original ground-truth masks for an image, new ground-truth mask
- training targets in the size of `mask_size x mask_size`
- must be provided for each predicted box. This function will be called to
- produce such targets.
-
- Args:
- polygons (list[ndarray[float]]): a list of polygons, which represents an instance.
- box: 4-element numpy array
- mask_size (int):
-
- Returns:
- Tensor: BoolTensor of shape (mask_size, mask_size)
- """
- # 1. Shift the polygons w.r.t the boxes
- w, h = box[2] - box[0], box[3] - box[1]
-
- polygons = copy.deepcopy(polygons)
- for p in polygons:
- p[0::2] = p[0::2] - box[0]
- p[1::2] = p[1::2] - box[1]
-
- # 2. Rescale the polygons to the new box size
- ratio_h = mask_size / max(h, 0.1)
- ratio_w = mask_size / max(w, 0.1)
-
- if ratio_h == ratio_w:
- for p in polygons:
- p *= ratio_h
- else:
- for p in polygons:
- p[0::2] *= ratio_w
- p[1::2] *= ratio_h
-
- # 3. Rasterize the polygons with coco api
- mask = polygons_to_bitmask(polygons, mask_size, mask_size)
- mask = torch.from_numpy(mask)
- return mask
-
-
-class BitMasks:
- """
- This class stores the segmentation masks for all objects in one image, in
- the form of bitmaps.
-
- Attributes:
- tensor: bool Tensor of N,H,W, representing N instances in the image.
- """
-
- def __init__(self, tensor: Union[torch.Tensor, np.ndarray]):
- """
- Args:
- tensor: bool Tensor of N,H,W, representing N instances in the image.
- """
- device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
- tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device)
- assert tensor.dim() == 3, tensor.size()
- self.image_size = tensor.shape[1:]
- self.tensor = tensor
-
- def to(self, device: str) -> "BitMasks":
- return BitMasks(self.tensor.to(device))
-
- @property
- def device(self) -> torch.device:
- return self.tensor.device
-
- def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks":
- """
- Returns:
- BitMasks: Create a new :class:`BitMasks` by indexing.
-
- The following usage are allowed:
-
- 1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask.
- 2. `new_masks = masks[2:10]`: return a slice of masks.
- 3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
- with `length = len(masks)`. Nonzero elements in the vector will be selected.
-
- Note that the returned object might share storage with this object,
- subject to Pytorch's indexing semantics.
- """
- if isinstance(item, int):
- return BitMasks(self.tensor[item].view(1, -1))
- m = self.tensor[item]
- assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format(
- item, m.shape
- )
- return BitMasks(m)
-
- def __iter__(self) -> torch.Tensor:
- yield from self.tensor
-
- def __repr__(self) -> str:
- s = self.__class__.__name__ + "("
- s += "num_instances={})".format(len(self.tensor))
- return s
-
- def __len__(self) -> int:
- return self.tensor.shape[0]
-
- def nonempty(self) -> torch.Tensor:
- """
- Find masks that are non-empty.
-
- Returns:
- Tensor: a BoolTensor which represents
- whether each mask is empty (False) or non-empty (True).
- """
- return self.tensor.flatten(1).any(dim=1)
-
- @staticmethod
- def from_polygon_masks(
- polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int
- ) -> "BitMasks":
- """
- Args:
- polygon_masks (list[list[ndarray]] or PolygonMasks)
- height, width (int)
- """
- if isinstance(polygon_masks, PolygonMasks):
- polygon_masks = polygon_masks.polygons
- masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks]
- return BitMasks(torch.stack([torch.from_numpy(x) for x in masks]))
-
- def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
- """
- Crop each bitmask by the given box, and resize results to (mask_size, mask_size).
- This can be used to prepare training targets for Mask R-CNN.
- It has less reconstruction error compared to rasterization with polygons.
- However we observe no difference in accuracy,
- but BitMasks requires more memory to store all the masks.
-
- Args:
- boxes (Tensor): Nx4 tensor storing the boxes for each mask
- mask_size (int): the size of the rasterized mask.
-
- Returns:
- Tensor:
- A bool tensor of shape (N, mask_size, mask_size), where
- N is the number of predicted boxes for this image.
- """
- assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
- device = self.tensor.device
-
- batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None]
- rois = torch.cat([batch_inds, boxes], dim=1) # Nx5
-
- bit_masks = self.tensor.to(dtype=torch.float32)
- rois = rois.to(device=device)
- output = (
- ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True)
- .forward(bit_masks[:, None, :, :], rois)
- .squeeze(1)
- )
- output = output >= 0.5
- return output
-
- def get_bounding_boxes(self) -> None:
- # not needed now
- raise NotImplementedError
-
- @staticmethod
- def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks":
- """
- Concatenates a list of BitMasks into a single BitMasks
-
- Arguments:
- bitmasks_list (list[BitMasks])
-
- Returns:
- BitMasks: the concatenated BitMasks
- """
- assert isinstance(bitmasks_list, (list, tuple))
- assert len(bitmasks_list) > 0
- assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list)
-
- cat_bitmasks = type(bitmasks_list[0])(cat([bm.tensor for bm in bitmasks_list], dim=0))
- return cat_bitmasks
-
-
-class PolygonMasks:
- """
- This class stores the segmentation masks for all objects in one image, in the form of polygons.
-
- Attributes:
- polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon.
- """
-
- def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]):
- """
- Arguments:
- polygons (list[list[np.ndarray]]): The first
- level of the list correspond to individual instances,
- the second level to all the polygons that compose the
- instance, and the third level to the polygon coordinates.
- The third level array should have the format of
- [x0, y0, x1, y1, ..., xn, yn] (n >= 3).
- """
- assert isinstance(polygons, list), (
- "Cannot create PolygonMasks: Expect a list of list of polygons per image. "
- "Got '{}' instead.".format(type(polygons))
- )
-
- def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray:
- # Use float64 for higher precision, because why not?
- # Always put polygons on CPU (self.to is a no-op) since they
- # are supposed to be small tensors.
- # May need to change this assumption if GPU placement becomes useful
- if isinstance(t, torch.Tensor):
- t = t.cpu().numpy()
- return np.asarray(t).astype("float64")
-
- def process_polygons(
- polygons_per_instance: List[Union[torch.Tensor, np.ndarray]]
- ) -> List[np.ndarray]:
- assert isinstance(polygons_per_instance, list), (
- "Cannot create polygons: Expect a list of polygons per instance. "
- "Got '{}' instead.".format(type(polygons_per_instance))
- )
- # transform the polygon to a tensor
- polygons_per_instance = [_make_array(p) for p in polygons_per_instance]
- for polygon in polygons_per_instance:
- assert len(polygon) % 2 == 0 and len(polygon) >= 6
- return polygons_per_instance
-
- self.polygons: List[List[np.ndarray]] = [
- process_polygons(polygons_per_instance) for polygons_per_instance in polygons
- ]
-
- def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks":
- return self
-
- @property
- def device(self) -> torch.device:
- return torch.device("cpu")
-
- def get_bounding_boxes(self) -> Boxes:
- """
- Returns:
- Boxes: tight bounding boxes around polygon masks.
- """
- boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32)
- for idx, polygons_per_instance in enumerate(self.polygons):
- minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32)
- maxxy = torch.zeros(2, dtype=torch.float32)
- for polygon in polygons_per_instance:
- coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32)
- minxy = torch.min(minxy, torch.min(coords, dim=0).values)
- maxxy = torch.max(maxxy, torch.max(coords, dim=0).values)
- boxes[idx, :2] = minxy
- boxes[idx, 2:] = maxxy
- return Boxes(boxes)
-
- def nonempty(self) -> torch.Tensor:
- """
- Find masks that are non-empty.
-
- Returns:
- Tensor:
- a BoolTensor which represents whether each mask is empty (False) or not (True).
- """
- keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons]
- return torch.from_numpy(np.asarray(keep, dtype=np.bool))
-
- def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks":
- """
- Support indexing over the instances and return a `PolygonMasks` object.
- `item` can be:
-
- 1. An integer. It will return an object with only one instance.
- 2. A slice. It will return an object with the selected instances.
- 3. A list[int]. It will return an object with the selected instances,
- correpsonding to the indices in the list.
- 4. A vector mask of type BoolTensor, whose length is num_instances.
- It will return an object with the instances whose mask is nonzero.
- """
- if isinstance(item, int):
- selected_polygons = [self.polygons[item]]
- elif isinstance(item, slice):
- selected_polygons = self.polygons[item]
- elif isinstance(item, list):
- selected_polygons = [self.polygons[i] for i in item]
- elif isinstance(item, torch.Tensor):
- # Polygons is a list, so we have to move the indices back to CPU.
- if item.dtype == torch.bool:
- assert item.dim() == 1, item.shape
- item = item.nonzero().squeeze(1).cpu().numpy().tolist()
- elif item.dtype in [torch.int32, torch.int64]:
- item = item.cpu().numpy().tolist()
- else:
- raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype))
- selected_polygons = [self.polygons[i] for i in item]
- return PolygonMasks(selected_polygons)
-
- def __iter__(self) -> Iterator[List[np.ndarray]]:
- """
- Yields:
- list[ndarray]: the polygons for one instance.
- Each Tensor is a float64 vector representing a polygon.
- """
- return iter(self.polygons)
-
- def __repr__(self) -> str:
- s = self.__class__.__name__ + "("
- s += "num_instances={})".format(len(self.polygons))
- return s
-
- def __len__(self) -> int:
- return len(self.polygons)
-
- def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
- """
- Crop each mask by the given box, and resize results to (mask_size, mask_size).
- This can be used to prepare training targets for Mask R-CNN.
-
- Args:
- boxes (Tensor): Nx4 tensor storing the boxes for each mask
- mask_size (int): the size of the rasterized mask.
-
- Returns:
- Tensor: A bool tensor of shape (N, mask_size, mask_size), where
- N is the number of predicted boxes for this image.
- """
- assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
-
- device = boxes.device
- # Put boxes on the CPU, as the polygon representation is not efficient GPU-wise
- # (several small tensors for representing a single instance mask)
- boxes = boxes.to(torch.device("cpu"))
-
- results = [
- rasterize_polygons_within_box(poly, box.numpy(), mask_size)
- for poly, box in zip(self.polygons, boxes)
- ]
- """
- poly: list[list[float]], the polygons for one instance
- box: a tensor of shape (4,)
- """
- if len(results) == 0:
- return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device)
- return torch.stack(results, dim=0).to(device=device)
-
- def area(self):
- """
- Computes area of the mask.
- Only works with Polygons, using the shoelace formula:
- https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
-
- Returns:
- Tensor: a vector, area for each instance
- """
-
- area = []
- for polygons_per_instance in self.polygons:
- area_per_instance = 0
- for p in polygons_per_instance:
- area_per_instance += polygon_area(p[0::2], p[1::2])
- area.append(area_per_instance)
-
- return torch.tensor(area)
-
- @staticmethod
- def cat(polymasks_list: List["PolygonMasks"]) -> "PolygonMasks":
- """
- Concatenates a list of PolygonMasks into a single PolygonMasks
-
- Arguments:
- polymasks_list (list[PolygonMasks])
-
- Returns:
- PolygonMasks: the concatenated PolygonMasks
- """
- assert isinstance(polymasks_list, (list, tuple))
- assert len(polymasks_list) > 0
- assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list)
-
- cat_polymasks = type(polymasks_list[0])(
- list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list))
- )
- return cat_polymasks
diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/README.md b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/README.md
deleted file mode 100644
index 30a41f008776a8755ec4dc19f4be07c514cd3794..0000000000000000000000000000000000000000
--- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/README.md
+++ /dev/null
@@ -1,31 +0,0 @@
-
-Here are a few projects that are built on detectron2.
-They are examples of how to use detectron2 as a library, to make your projects more
-maintainable.
-
-## Projects by Facebook
-
-Note that these are research projects, and therefore may not have the same level
-of support or stability of detectron2.
-
-+ [DensePose: Dense Human Pose Estimation In The Wild](DensePose)
-+ [Scale-Aware Trident Networks for Object Detection](TridentNet)
-+ [TensorMask: A Foundation for Dense Object Segmentation](TensorMask)
-+ [Mesh R-CNN](https://github.com/facebookresearch/meshrcnn)
-+ [PointRend: Image Segmentation as Rendering](PointRend)
-+ [Momentum Contrast for Unsupervised Visual Representation Learning](https://github.com/facebookresearch/moco/tree/master/detection)
-
-
-## External Projects
-
-External projects in the community that use detectron2:
-
-
-
-+ [VoVNet backbones](https://github.com/youngwanLEE/vovnet-detectron2).
-+ [AdelaiDet](https://github.com/aim-uofa/adet), a detection toolbox from the Universtiy of Adelaide.
-+ [CenterMask : Real-Time Anchor-Free Instance Segmentation](https://github.com/youngwanLEE/centermask2)
diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/ops/layer_norm.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/ops/layer_norm.py
deleted file mode 100644
index 2e20c7475f9fc71e69cf0e2a44cac0508ae7fa8f..0000000000000000000000000000000000000000
--- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/ops/layer_norm.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# --------------------------------------------------------
-# OpenVQA
-# Written by Yuhao Cui https://github.com/cuiyuhao1996
-# --------------------------------------------------------
-
-import torch.nn as nn
-import torch
-
-class LayerNorm(nn.Module):
- def __init__(self, size, eps=1e-6):
- super(LayerNorm, self).__init__()
- self.eps = eps
-
- self.a_2 = nn.Parameter(torch.ones(size))
- self.b_2 = nn.Parameter(torch.zeros(size))
-
- def forward(self, x):
- mean = x.mean(-1, keepdim=True)
- std = x.std(-1, keepdim=True)
-
- return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
diff --git a/spaces/CVPR/LIVE/thrust/thrust/iterator/detail/permutation_iterator_base.h b/spaces/CVPR/LIVE/thrust/thrust/iterator/detail/permutation_iterator_base.h
deleted file mode 100644
index 2610cfdfaffdeb50ad085f90d4ff9b85920ede4f..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/thrust/thrust/iterator/detail/permutation_iterator_base.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright 2008-2013 NVIDIA Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include
-#include
-#include
-#include
-
-namespace thrust
-{
-
-template class permutation_iterator;
-
-
-namespace detail
-{
-
-template
- struct permutation_iterator_base
-{
- typedef typename thrust::iterator_system::type System1;
- typedef typename thrust::iterator_system::type System2;
-
- typedef thrust::iterator_adaptor<
- permutation_iterator,
- IndexIterator,
- typename thrust::iterator_value::type,
- typename detail::minimum_system::type,
- thrust::use_default,
- typename thrust::iterator_reference::type
- > type;
-}; // end permutation_iterator_base
-
-} // end detail
-
-} // end thrust
-
diff --git a/spaces/CVPR/transfiner/configs/new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ.py b/spaces/CVPR/transfiner/configs/new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ.py
deleted file mode 100644
index df7a2aedf480ed8dc4aa3645e37420e9b893fae4..0000000000000000000000000000000000000000
--- a/spaces/CVPR/transfiner/configs/new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ.py
+++ /dev/null
@@ -1,72 +0,0 @@
-import detectron2.data.transforms as T
-from detectron2.config.lazy import LazyCall as L
-from detectron2.layers.batch_norm import NaiveSyncBatchNorm
-from detectron2.solver import WarmupParamScheduler
-from fvcore.common.param_scheduler import MultiStepParamScheduler
-
-from ..common.data.coco import dataloader
-from ..common.models.mask_rcnn_fpn import model
-from ..common.optim import SGD as optimizer
-from ..common.train import train
-
-# train from scratch
-train.init_checkpoint = ""
-train.amp.enabled = True
-train.ddp.fp16_compression = True
-model.backbone.bottom_up.freeze_at = 0
-
-# SyncBN
-# fmt: off
-model.backbone.bottom_up.stem.norm = \
- model.backbone.bottom_up.stages.norm = \
- model.backbone.norm = "SyncBN"
-
-# Using NaiveSyncBatchNorm becase heads may have empty input. That is not supported by
-# torch.nn.SyncBatchNorm. We can remove this after
-# https://github.com/pytorch/pytorch/issues/36530 is fixed.
-model.roi_heads.box_head.conv_norm = \
- model.roi_heads.mask_head.conv_norm = lambda c: NaiveSyncBatchNorm(c,
- stats_mode="N")
-# fmt: on
-
-# 2conv in RPN:
-# https://github.com/tensorflow/tpu/blob/b24729de804fdb751b06467d3dce0637fa652060/models/official/detection/modeling/architecture/heads.py#L95-L97 # noqa: E501, B950
-model.proposal_generator.head.conv_dims = [-1, -1]
-
-# 4conv1fc box head
-model.roi_heads.box_head.conv_dims = [256, 256, 256, 256]
-model.roi_heads.box_head.fc_dims = [1024]
-
-# resize_and_crop_image in:
-# https://github.com/tensorflow/tpu/blob/b24729de804fdb751b06467d3dce0637fa652060/models/official/detection/utils/input_utils.py#L127 # noqa: E501, B950
-image_size = 1024
-dataloader.train.mapper.augmentations = [
- L(T.ResizeScale)(
- min_scale=0.1, max_scale=2.0, target_height=image_size, target_width=image_size
- ),
- L(T.FixedSizeCrop)(crop_size=(image_size, image_size)),
- L(T.RandomFlip)(horizontal=True),
-]
-
-# recompute boxes due to cropping
-dataloader.train.mapper.recompute_boxes = True
-
-# larger batch-size.
-dataloader.train.total_batch_size = 64
-
-# Equivalent to 100 epochs.
-# 100 ep = 184375 iters * 64 images/iter / 118000 images/ep
-train.max_iter = 184375
-
-lr_multiplier = L(WarmupParamScheduler)(
- scheduler=L(MultiStepParamScheduler)(
- values=[1.0, 0.1, 0.01],
- milestones=[163889, 177546],
- num_updates=train.max_iter,
- ),
- warmup_length=500 / train.max_iter,
- warmup_factor=0.067,
-)
-
-optimizer.lr = 0.1
-optimizer.weight_decay = 4e-5
diff --git a/spaces/ChandraMohanNayal/AutoGPT/tests/unit/test_browse_scrape_links.py b/spaces/ChandraMohanNayal/AutoGPT/tests/unit/test_browse_scrape_links.py
deleted file mode 100644
index 0a3340e7397a997da96b8ab9828954230e1a3c20..0000000000000000000000000000000000000000
--- a/spaces/ChandraMohanNayal/AutoGPT/tests/unit/test_browse_scrape_links.py
+++ /dev/null
@@ -1,118 +0,0 @@
-# Generated by CodiumAI
-
-# Dependencies:
-# pip install pytest-mock
-import pytest
-
-from autogpt.commands.web_requests import scrape_links
-
-"""
-Code Analysis
-
-Objective:
-The objective of the 'scrape_links' function is to scrape hyperlinks from a
-given URL and return them in a formatted way.
-
-Inputs:
-- url: a string representing the URL to be scraped.
-
-Flow:
-1. Send a GET request to the given URL using the requests library and the user agent header from the config file.
-2. Check if the response contains an HTTP error. If it does, return "error".
-3. Parse the HTML content of the response using the BeautifulSoup library.
-4. Remove any script and style tags from the parsed HTML.
-5. Extract all hyperlinks from the parsed HTML using the 'extract_hyperlinks' function.
-6. Format the extracted hyperlinks using the 'format_hyperlinks' function.
-7. Return the formatted hyperlinks.
-
-Outputs:
-- A list of formatted hyperlinks.
-
-Additional aspects:
-- The function uses the 'requests' and 'BeautifulSoup' libraries to send HTTP
-requests and parse HTML content, respectively.
-- The 'extract_hyperlinks' function is called to extract hyperlinks from the parsed HTML.
-- The 'format_hyperlinks' function is called to format the extracted hyperlinks.
-- The function checks for HTTP errors and returns "error" if any are found.
-"""
-
-
-class TestScrapeLinks:
- # Tests that the function returns a list of formatted hyperlinks when
- # provided with a valid url that returns a webpage with hyperlinks.
- def test_valid_url_with_hyperlinks(self):
- url = "https://www.google.com"
- result = scrape_links(url)
- assert len(result) > 0
- assert isinstance(result, list)
- assert isinstance(result[0], str)
-
- # Tests that the function returns correctly formatted hyperlinks when given a valid url.
- def test_valid_url(self, mocker):
- # Mock the requests.get() function to return a response with sample HTML containing hyperlinks
- mock_response = mocker.Mock()
- mock_response.status_code = 200
- mock_response.text = (
- "Google"
- )
- mocker.patch("requests.Session.get", return_value=mock_response)
-
- # Call the function with a valid URL
- result = scrape_links("https://www.example.com")
-
- # Assert that the function returns correctly formatted hyperlinks
- assert result == ["Google (https://www.google.com)"]
-
- # Tests that the function returns "error" when given an invalid url.
- def test_invalid_url(self, mocker):
- # Mock the requests.get() function to return an HTTP error response
- mock_response = mocker.Mock()
- mock_response.status_code = 404
- mocker.patch("requests.Session.get", return_value=mock_response)
-
- # Call the function with an invalid URL
- result = scrape_links("https://www.invalidurl.com")
-
- # Assert that the function returns "error"
- assert "Error:" in result
-
- # Tests that the function returns an empty list when the html contains no hyperlinks.
- def test_no_hyperlinks(self, mocker):
- # Mock the requests.get() function to return a response with sample HTML containing no hyperlinks
- mock_response = mocker.Mock()
- mock_response.status_code = 200
- mock_response.text = "
No hyperlinks here
"
- mocker.patch("requests.Session.get", return_value=mock_response)
-
- # Call the function with a URL containing no hyperlinks
- result = scrape_links("https://www.example.com")
-
- # Assert that the function returns an empty list
- assert result == []
-
- # Tests that scrape_links() correctly extracts and formats hyperlinks from
- # a sample HTML containing a few hyperlinks.
- def test_scrape_links_with_few_hyperlinks(self, mocker):
- # Mock the requests.get() function to return a response with a sample HTML containing hyperlinks
- mock_response = mocker.Mock()
- mock_response.status_code = 200
- mock_response.text = """
-
-
-
- )
-}
diff --git a/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/README.md b/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/README.md
deleted file mode 100644
index 3804ddb93ff8dc141f3180c7d6af8e105b949c24..0000000000000000000000000000000000000000
--- a/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/README.md
+++ /dev/null
@@ -1,135 +0,0 @@
----
-title: Anything V3.0
-emoji: 🏃
-colorFrom: gray
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.10.1
-app_file: app.py
-pinned: false
-duplicated_from: yangheng/Super-Resolution-Anime-Diffusion
----
-
-# If you have a GPU, try the [Stable Diffusion WebUI](https://github.com/yangheng95/stable-diffusion-webui)
-
-
-# [Online Web Demo](https://huggingface.co/spaces/yangheng/Super-Resolution-Anime-Diffusion)
-
-This is demo forked from https://huggingface.co/Linaqruf/anything-v3.0.
-
-## Super Resolution Anime Diffusion
-At this moment, many diffusion models can only generate <1024 width and length pictures.
-I integrated the Super Resolution with [Anything diffusion model](https://huggingface.co/Linaqruf/anything-v3.0) to produce high resolution pictures.
-Thanks to the open-source project: https://github.com/yu45020/Waifu2x
-
-
-## Modifications
-1. Disable the safety checker to save time and memory. You need to abide the original rules of the model.
-2. Add the Super Resolution function to the model.
-3. Add batch generation function to the model (see inference.py).
-
-## Install
-1. Install [Anaconda](https://www.anaconda.com/products/distribution) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html)
-2. create a conda environment:
-```bash
-conda create -n diffusion python=3.9
-conda activate diffusion
-```
-3. install requirements:
-```ash
-conda install pytorch pytorch-cuda=11.7 -c pytorch -c nvidia
-pip install -r requirements.txt
-```
-4. Run web demo:
-```
-python app.py
-```
-5. or run batch anime-generation
-```
-python inference.py
-```
-see the source code for details, you can set scale factor to magnify pictures
-
-## Random Examples (512*768) x4 scale factor
-
-
-# Origin README
----
-language:
-- en
-license: creativeml-openrail-m
-tags:
-- stable-diffusion
-- stable-diffusion-diffusers
-- text-to-image
-- diffusers
-inference: true
----
-
-# Anything V3
-
-Welcome to Anything V3 - a latent diffusion model for weebs. This model is intended to produce high-quality, highly detailed anime style with just a few prompts. Like other anime-style Stable Diffusion models, it also supports danbooru tags to generate images.
-
-e.g. **_1girl, white hair, golden eyes, beautiful eyes, detail, flower meadow, cumulonimbus clouds, lighting, detailed sky, garden_**
-
-## Gradio
-
-We support a [Gradio](https://github.com/gradio-app/gradio) Web UI to run Anything-V3.0:
-
-[Open in Spaces](https://huggingface.co/spaces/akhaliq/anything-v3.0)
-
-
-
-## 🧨 Diffusers
-
-This model can be used just like any other Stable Diffusion model. For more information,
-please have a look at the [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion).
-
-You can also export the model to [ONNX](https://huggingface.co/docs/diffusers/optimization/onnx), [MPS](https://huggingface.co/docs/diffusers/optimization/mps) and/or [FLAX/JAX]().
-
-```python
-from diffusers import StableDiffusionPipeline
-import torch
-
-model_id = "Linaqruf/anything-v3.0"
-pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
-pipe = pipe.to("cuda")
-
-prompt = "pikachu"
-image = pipe(prompt).images[0]
-
-image.save("./pikachu.png")
-```
-
-## Examples
-
-Below are some examples of images generated using this model:
-
-**Anime Girl:**
-
-```
-1girl, brown hair, green eyes, colorful, autumn, cumulonimbus clouds, lighting, blue sky, falling leaves, garden
-Steps: 50, Sampler: DDIM, CFG scale: 12
-```
-**Anime Boy:**
-
-```
-1boy, medium hair, blonde hair, blue eyes, bishounen, colorful, autumn, cumulonimbus clouds, lighting, blue sky, falling leaves, garden
-Steps: 50, Sampler: DDIM, CFG scale: 12
-```
-**Scenery:**
-
-```
-scenery, shibuya tokyo, post-apocalypse, ruins, rust, sky, skyscraper, abandoned, blue sky, broken window, building, cloud, crane machine, outdoors, overgrown, pillar, sunset
-Steps: 50, Sampler: DDIM, CFG scale: 12
-```
-
-## License
-
-This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage.
-The CreativeML OpenRAIL License specifies:
-
-1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content
-2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license
-3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully)
-[Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license)
diff --git a/spaces/Gradio-Blocks/pubmed-abstract-retriever/README.md b/spaces/Gradio-Blocks/pubmed-abstract-retriever/README.md
deleted file mode 100644
index 05a7522491204f2401025f3a863d6d0e0436ff53..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/pubmed-abstract-retriever/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: PubMed Abstract Retriever
-emoji: 📚🔎📄
-colorFrom: red
-colorTo: green
-sdk: gradio
-app_file: app.py
-pinned: false
----
-
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco.py
deleted file mode 100644
index 1afeeef1212db831dd1f097d30b0354e459daa97..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco.py
+++ /dev/null
@@ -1,13 +0,0 @@
-_base_ = './cascade_rcnn_r50_fpn_20e_coco.py'
-model = dict(
- pretrained='open-mmlab://resnext101_32x4d',
- backbone=dict(
- type='ResNeXt',
- depth=101,
- groups=32,
- base_width=4,
- num_stages=4,
- out_indices=(0, 1, 2, 3),
- frozen_stages=1,
- norm_cfg=dict(type='BN', requires_grad=True),
- style='pytorch'))
diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/decode_heads/da_head.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/decode_heads/da_head.py
deleted file mode 100644
index 8ee0e08c3d69ee4392aa550072a043548c377571..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/decode_heads/da_head.py
+++ /dev/null
@@ -1,178 +0,0 @@
-import torch
-import torch.nn.functional as F
-from mmcv.cnn import ConvModule, Scale
-from torch import nn
-
-from mmseg.core import add_prefix
-from ..builder import HEADS
-from ..utils import SelfAttentionBlock as _SelfAttentionBlock
-from .decode_head import BaseDecodeHead
-
-
-class PAM(_SelfAttentionBlock):
- """Position Attention Module (PAM)
-
- Args:
- in_channels (int): Input channels of key/query feature.
- channels (int): Output channels of key/query transform.
- """
-
- def __init__(self, in_channels, channels):
- super(PAM, self).__init__(
- key_in_channels=in_channels,
- query_in_channels=in_channels,
- channels=channels,
- out_channels=in_channels,
- share_key_query=False,
- query_downsample=None,
- key_downsample=None,
- key_query_num_convs=1,
- key_query_norm=False,
- value_out_num_convs=1,
- value_out_norm=False,
- matmul_norm=False,
- with_out=False,
- conv_cfg=None,
- norm_cfg=None,
- act_cfg=None)
-
- self.gamma = Scale(0)
-
- def forward(self, x):
- """Forward function."""
- out = super(PAM, self).forward(x, x)
-
- out = self.gamma(out) + x
- return out
-
-
-class CAM(nn.Module):
- """Channel Attention Module (CAM)"""
-
- def __init__(self):
- super(CAM, self).__init__()
- self.gamma = Scale(0)
-
- def forward(self, x):
- """Forward function."""
- batch_size, channels, height, width = x.size()
- proj_query = x.view(batch_size, channels, -1)
- proj_key = x.view(batch_size, channels, -1).permute(0, 2, 1)
- energy = torch.bmm(proj_query, proj_key)
- energy_new = torch.max(
- energy, -1, keepdim=True)[0].expand_as(energy) - energy
- attention = F.softmax(energy_new, dim=-1)
- proj_value = x.view(batch_size, channels, -1)
-
- out = torch.bmm(attention, proj_value)
- out = out.view(batch_size, channels, height, width)
-
- out = self.gamma(out) + x
- return out
-
-
-@HEADS.register_module()
-class DAHead(BaseDecodeHead):
- """Dual Attention Network for Scene Segmentation.
-
- This head is the implementation of `DANet
- `_.
-
- Args:
- pam_channels (int): The channels of Position Attention Module(PAM).
- """
-
- def __init__(self, pam_channels, **kwargs):
- super(DAHead, self).__init__(**kwargs)
- self.pam_channels = pam_channels
- self.pam_in_conv = ConvModule(
- self.in_channels,
- self.channels,
- 3,
- padding=1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg)
- self.pam = PAM(self.channels, pam_channels)
- self.pam_out_conv = ConvModule(
- self.channels,
- self.channels,
- 3,
- padding=1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg)
- self.pam_conv_seg = nn.Conv2d(
- self.channels, self.num_classes, kernel_size=1)
-
- self.cam_in_conv = ConvModule(
- self.in_channels,
- self.channels,
- 3,
- padding=1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg)
- self.cam = CAM()
- self.cam_out_conv = ConvModule(
- self.channels,
- self.channels,
- 3,
- padding=1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg)
- self.cam_conv_seg = nn.Conv2d(
- self.channels, self.num_classes, kernel_size=1)
-
- def pam_cls_seg(self, feat):
- """PAM feature classification."""
- if self.dropout is not None:
- feat = self.dropout(feat)
- output = self.pam_conv_seg(feat)
- return output
-
- def cam_cls_seg(self, feat):
- """CAM feature classification."""
- if self.dropout is not None:
- feat = self.dropout(feat)
- output = self.cam_conv_seg(feat)
- return output
-
- def forward(self, inputs):
- """Forward function."""
- x = self._transform_inputs(inputs)
- pam_feat = self.pam_in_conv(x)
- pam_feat = self.pam(pam_feat)
- pam_feat = self.pam_out_conv(pam_feat)
- pam_out = self.pam_cls_seg(pam_feat)
-
- cam_feat = self.cam_in_conv(x)
- cam_feat = self.cam(cam_feat)
- cam_feat = self.cam_out_conv(cam_feat)
- cam_out = self.cam_cls_seg(cam_feat)
-
- feat_sum = pam_feat + cam_feat
- pam_cam_out = self.cls_seg(feat_sum)
-
- return pam_cam_out, pam_out, cam_out
-
- def forward_test(self, inputs, img_metas, test_cfg):
- """Forward function for testing, only ``pam_cam`` is used."""
- return self.forward(inputs)[0]
-
- def losses(self, seg_logit, seg_label):
- """Compute ``pam_cam``, ``pam``, ``cam`` loss."""
- pam_cam_seg_logit, pam_seg_logit, cam_seg_logit = seg_logit
- loss = dict()
- loss.update(
- add_prefix(
- super(DAHead, self).losses(pam_cam_seg_logit, seg_label),
- 'pam_cam'))
- loss.update(
- add_prefix(
- super(DAHead, self).losses(pam_seg_logit, seg_label), 'pam'))
- loss.update(
- add_prefix(
- super(DAHead, self).losses(cam_seg_logit, seg_label), 'cam'))
- return loss
diff --git a/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/CODE_OF_CONDUCT.md b/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/CODE_OF_CONDUCT.md
deleted file mode 100644
index 83f431e8feeb7e80d571f39c9f6c1b96857b5f85..0000000000000000000000000000000000000000
--- a/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,80 +0,0 @@
-# Code of Conduct
-
-## Our Pledge
-
-In the interest of fostering an open and welcoming environment, we as
-contributors and maintainers pledge to make participation in our project and
-our community a harassment-free experience for everyone, regardless of age, body
-size, disability, ethnicity, sex characteristics, gender identity and expression,
-level of experience, education, socio-economic status, nationality, personal
-appearance, race, religion, or sexual identity and orientation.
-
-## Our Standards
-
-Examples of behavior that contributes to creating a positive environment
-include:
-
-* Using welcoming and inclusive language
-* Being respectful of differing viewpoints and experiences
-* Gracefully accepting constructive criticism
-* Focusing on what is best for the community
-* Showing empathy towards other community members
-
-Examples of unacceptable behavior by participants include:
-
-* The use of sexualized language or imagery and unwelcome sexual attention or
-advances
-* Trolling, insulting/derogatory comments, and personal or political attacks
-* Public or private harassment
-* Publishing others' private information, such as a physical or electronic
-address, without explicit permission
-* Other conduct which could reasonably be considered inappropriate in a
-professional setting
-
-## Our Responsibilities
-
-Project maintainers are responsible for clarifying the standards of acceptable
-behavior and are expected to take appropriate and fair corrective action in
-response to any instances of unacceptable behavior.
-
-Project maintainers have the right and responsibility to remove, edit, or
-reject comments, commits, code, wiki edits, issues, and other contributions
-that are not aligned to this Code of Conduct, or to ban temporarily or
-permanently any contributor for other behaviors that they deem inappropriate,
-threatening, offensive, or harmful.
-
-## Scope
-
-This Code of Conduct applies within all project spaces, and it also applies when
-an individual is representing the project or its community in public spaces.
-Examples of representing a project or community include using an official
-project e-mail address, posting via an official social media account, or acting
-as an appointed representative at an online or offline event. Representation of
-a project may be further defined and clarified by project maintainers.
-
-This Code of Conduct also applies outside the project spaces when there is a
-reasonable belief that an individual's behavior may have a negative impact on
-the project or its community.
-
-## Enforcement
-
-Instances of abusive, harassing, or otherwise unacceptable behavior may be
-reported by contacting the project team at . All
-complaints will be reviewed and investigated and will result in a response that
-is deemed necessary and appropriate to the circumstances. The project team is
-obligated to maintain confidentiality with regard to the reporter of an incident.
-Further details of specific enforcement policies may be posted separately.
-
-Project maintainers who do not follow or enforce the Code of Conduct in good
-faith may face temporary or permanent repercussions as determined by other
-members of the project's leadership.
-
-## Attribution
-
-This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
-available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
-
-[homepage]: https://www.contributor-covenant.org
-
-For answers to common questions about this code of conduct, see
-https://www.contributor-covenant.org/faq
diff --git a/spaces/HaHaBill/LandShapes-Antarctica/models/stylegan2/stylegan2-pytorch/projector.py b/spaces/HaHaBill/LandShapes-Antarctica/models/stylegan2/stylegan2-pytorch/projector.py
deleted file mode 100644
index d63ad3573696cc22640cbeddc197d8cb15c52977..0000000000000000000000000000000000000000
--- a/spaces/HaHaBill/LandShapes-Antarctica/models/stylegan2/stylegan2-pytorch/projector.py
+++ /dev/null
@@ -1,203 +0,0 @@
-import argparse
-import math
-import os
-
-import torch
-from torch import optim
-from torch.nn import functional as F
-from torchvision import transforms
-from PIL import Image
-from tqdm import tqdm
-
-import lpips
-from model import Generator
-
-
-def noise_regularize(noises):
- loss = 0
-
- for noise in noises:
- size = noise.shape[2]
-
- while True:
- loss = (
- loss
- + (noise * torch.roll(noise, shifts=1, dims=3)).mean().pow(2)
- + (noise * torch.roll(noise, shifts=1, dims=2)).mean().pow(2)
- )
-
- if size <= 8:
- break
-
- noise = noise.reshape([1, 1, size // 2, 2, size // 2, 2])
- noise = noise.mean([3, 5])
- size //= 2
-
- return loss
-
-
-def noise_normalize_(noises):
- for noise in noises:
- mean = noise.mean()
- std = noise.std()
-
- noise.data.add_(-mean).div_(std)
-
-
-def get_lr(t, initial_lr, rampdown=0.25, rampup=0.05):
- lr_ramp = min(1, (1 - t) / rampdown)
- lr_ramp = 0.5 - 0.5 * math.cos(lr_ramp * math.pi)
- lr_ramp = lr_ramp * min(1, t / rampup)
-
- return initial_lr * lr_ramp
-
-
-def latent_noise(latent, strength):
- noise = torch.randn_like(latent) * strength
-
- return latent + noise
-
-
-def make_image(tensor):
- return (
- tensor.detach()
- .clamp_(min=-1, max=1)
- .add(1)
- .div_(2)
- .mul(255)
- .type(torch.uint8)
- .permute(0, 2, 3, 1)
- .to('cpu')
- .numpy()
- )
-
-
-if __name__ == '__main__':
- device = 'cuda'
-
- parser = argparse.ArgumentParser()
- parser.add_argument('--ckpt', type=str, required=True)
- parser.add_argument('--size', type=int, default=256)
- parser.add_argument('--lr_rampup', type=float, default=0.05)
- parser.add_argument('--lr_rampdown', type=float, default=0.25)
- parser.add_argument('--lr', type=float, default=0.1)
- parser.add_argument('--noise', type=float, default=0.05)
- parser.add_argument('--noise_ramp', type=float, default=0.75)
- parser.add_argument('--step', type=int, default=1000)
- parser.add_argument('--noise_regularize', type=float, default=1e5)
- parser.add_argument('--mse', type=float, default=0)
- parser.add_argument('--w_plus', action='store_true')
- parser.add_argument('files', metavar='FILES', nargs='+')
-
- args = parser.parse_args()
-
- n_mean_latent = 10000
-
- resize = min(args.size, 256)
-
- transform = transforms.Compose(
- [
- transforms.Resize(resize),
- transforms.CenterCrop(resize),
- transforms.ToTensor(),
- transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
- ]
- )
-
- imgs = []
-
- for imgfile in args.files:
- img = transform(Image.open(imgfile).convert('RGB'))
- imgs.append(img)
-
- imgs = torch.stack(imgs, 0).to(device)
-
- g_ema = Generator(args.size, 512, 8)
- g_ema.load_state_dict(torch.load(args.ckpt)['g_ema'], strict=False)
- g_ema.eval()
- g_ema = g_ema.to(device)
-
- with torch.no_grad():
- noise_sample = torch.randn(n_mean_latent, 512, device=device)
- latent_out = g_ema.style(noise_sample)
-
- latent_mean = latent_out.mean(0)
- latent_std = ((latent_out - latent_mean).pow(2).sum() / n_mean_latent) ** 0.5
-
- percept = lpips.PerceptualLoss(
- model='net-lin', net='vgg', use_gpu=device.startswith('cuda')
- )
-
- noises = g_ema.make_noise()
-
- latent_in = latent_mean.detach().clone().unsqueeze(0).repeat(2, 1)
-
- if args.w_plus:
- latent_in = latent_in.unsqueeze(1).repeat(1, g_ema.n_latent, 1)
-
- latent_in.requires_grad = True
-
- for noise in noises:
- noise.requires_grad = True
-
- optimizer = optim.Adam([latent_in] + noises, lr=args.lr)
-
- pbar = tqdm(range(args.step))
- latent_path = []
-
- for i in pbar:
- t = i / args.step
- lr = get_lr(t, args.lr)
- optimizer.param_groups[0]['lr'] = lr
- noise_strength = latent_std * args.noise * max(0, 1 - t / args.noise_ramp) ** 2
- latent_n = latent_noise(latent_in, noise_strength.item())
-
- img_gen, _ = g_ema([latent_n], input_is_latent=True, noise=noises)
-
- batch, channel, height, width = img_gen.shape
-
- if height > 256:
- factor = height // 256
-
- img_gen = img_gen.reshape(
- batch, channel, height // factor, factor, width // factor, factor
- )
- img_gen = img_gen.mean([3, 5])
-
- p_loss = percept(img_gen, imgs).sum()
- n_loss = noise_regularize(noises)
- mse_loss = F.mse_loss(img_gen, imgs)
-
- loss = p_loss + args.noise_regularize * n_loss + args.mse * mse_loss
-
- optimizer.zero_grad()
- loss.backward()
- optimizer.step()
-
- noise_normalize_(noises)
-
- if (i + 1) % 100 == 0:
- latent_path.append(latent_in.detach().clone())
-
- pbar.set_description(
- (
- f'perceptual: {p_loss.item():.4f}; noise regularize: {n_loss.item():.4f};'
- f' mse: {mse_loss.item():.4f}; lr: {lr:.4f}'
- )
- )
-
- result_file = {'noises': noises}
-
- img_gen, _ = g_ema([latent_path[-1]], input_is_latent=True, noise=noises)
-
- filename = os.path.splitext(os.path.basename(args.files[0]))[0] + '.pt'
-
- img_ar = make_image(img_gen)
-
- for i, input_name in enumerate(args.files):
- result_file[input_name] = {'img': img_gen[i], 'latent': latent_in[i]}
- img_name = os.path.splitext(os.path.basename(input_name))[0] + '-project.png'
- pil_img = Image.fromarray(img_ar[i])
- pil_img.save(img_name)
-
- torch.save(result_file, filename)
diff --git a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/templates/frontend/assets/index.3517cbba.css b/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/templates/frontend/assets/index.3517cbba.css
deleted file mode 100644
index 8a986feb0fd82c371bc0dfbbbced8f43167c38aa..0000000000000000000000000000000000000000
--- a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/templates/frontend/assets/index.3517cbba.css
+++ /dev/null
@@ -1 +0,0 @@
-span.svelte-1cgkd5k{text-shadow:0 0 8px rgba(0,0,0,.5)}progress.svelte-1cgkd5k::-webkit-progress-bar{border-radius:2px;background-color:#fff3;overflow:hidden}progress.svelte-1cgkd5k::-webkit-progress-value{background-color:#ffffffe6}.mirror.svelte-1cgkd5k{transform:scaleX(-1)}.wrap.svelte-1cgkd5k{width:calc(100% - .75rem)}
diff --git a/spaces/Hoodady/3DFuse/ldm/modules/midas/midas/base_model.py b/spaces/Hoodady/3DFuse/ldm/modules/midas/midas/base_model.py
deleted file mode 100644
index 5cf430239b47ec5ec07531263f26f5c24a2311cd..0000000000000000000000000000000000000000
--- a/spaces/Hoodady/3DFuse/ldm/modules/midas/midas/base_model.py
+++ /dev/null
@@ -1,16 +0,0 @@
-import torch
-
-
-class BaseModel(torch.nn.Module):
- def load(self, path):
- """Load model from file.
-
- Args:
- path (str): file path
- """
- parameters = torch.load(path, map_location=torch.device('cpu'))
-
- if "optimizer" in parameters:
- parameters = parameters["model"]
-
- self.load_state_dict(parameters)
diff --git a/spaces/HuggingFaceH4/open_llm_leaderboard/src/load_from_hub.py b/spaces/HuggingFaceH4/open_llm_leaderboard/src/load_from_hub.py
deleted file mode 100644
index e512e529eed70156cc91d76b02296a8d55f86224..0000000000000000000000000000000000000000
--- a/spaces/HuggingFaceH4/open_llm_leaderboard/src/load_from_hub.py
+++ /dev/null
@@ -1,91 +0,0 @@
-import json
-import os
-from collections import defaultdict
-
-import pandas as pd
-
-from src.assets.hardcoded_evals import baseline, gpt4_values, gpt35_values
-from src.get_model_info.apply_metadata_to_df import apply_metadata
-from src.plots.read_results import get_eval_results_dicts, make_clickable_model
-from src.get_model_info.utils import AutoEvalColumn, EvalQueueColumn, has_no_nan_values
-
-IS_PUBLIC = bool(os.environ.get("IS_PUBLIC", True))
-
-
-def get_all_requested_models(requested_models_dir: str) -> set[str]:
- depth = 1
- file_names = []
- users_to_submission_dates = defaultdict(list)
-
- for root, _, files in os.walk(requested_models_dir):
- current_depth = root.count(os.sep) - requested_models_dir.count(os.sep)
- if current_depth == depth:
- for file in files:
- if not file.endswith(".json"):
- continue
- with open(os.path.join(root, file), "r") as f:
- info = json.load(f)
- file_names.append(f"{info['model']}_{info['revision']}_{info['precision']}")
-
- # Select organisation
- if info["model"].count("/") == 0 or "submitted_time" not in info:
- continue
- organisation, _ = info["model"].split("/")
- users_to_submission_dates[organisation].append(info["submitted_time"])
-
- return set(file_names), users_to_submission_dates
-
-
-def get_leaderboard_df(results_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame:
- all_data = get_eval_results_dicts(results_path)
-
- if not IS_PUBLIC:
- all_data.append(gpt4_values)
- all_data.append(gpt35_values)
-
- all_data.append(baseline)
- apply_metadata(all_data) # Populate model type based on known hardcoded values in `metadata.py`
-
- df = pd.DataFrame.from_records(all_data)
- df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False)
- df = df[cols].round(decimals=2)
-
- # filter out if any of the benchmarks have not been produced
- df = df[has_no_nan_values(df, benchmark_cols)]
- return df
-
-
-def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
- entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")]
- all_evals = []
-
- for entry in entries:
- if ".json" in entry:
- file_path = os.path.join(save_path, entry)
- with open(file_path) as fp:
- data = json.load(fp)
-
- data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
- data[EvalQueueColumn.revision.name] = data.get("revision", "main")
-
- all_evals.append(data)
- elif ".md" not in entry:
- # this is a folder
- sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if not e.startswith(".")]
- for sub_entry in sub_entries:
- file_path = os.path.join(save_path, entry, sub_entry)
- with open(file_path) as fp:
- data = json.load(fp)
-
- data[EvalQueueColumn.model.name] = make_clickable_model(data["model"])
- data[EvalQueueColumn.revision.name] = data.get("revision", "main")
- all_evals.append(data)
-
- pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
- running_list = [e for e in all_evals if e["status"] == "RUNNING"]
- finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
- df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
- df_running = pd.DataFrame.from_records(running_list, columns=cols)
- df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
- return df_finished[cols], df_running[cols], df_pending[cols]
-
diff --git a/spaces/HuseynG/ECS7022P-WGAN-GP/app.py b/spaces/HuseynG/ECS7022P-WGAN-GP/app.py
deleted file mode 100644
index 8f024b78f1a7a4800ddbb4975d6792b7dd666627..0000000000000000000000000000000000000000
--- a/spaces/HuseynG/ECS7022P-WGAN-GP/app.py
+++ /dev/null
@@ -1,28 +0,0 @@
-import gradio as gr
-import torch
-from utils import load_model, generate_random_img, schedule_function
-import time
-import random
-import threading
-from gradio_client import Client
-
-def generate_image():
- with torch.no_grad():
- model = load_model('generator', 'generator_model_epoch_94.pth')
- generated_image = generate_random_img(model)
- return generated_image
-
-iface = gr.Interface(
- fn=generate_image,
- inputs=[],
- outputs=gr.outputs.Image(type='numpy'),
- allow_screenshot=True,
- title='Random Landscape Image Generator By Huseyn Gorbani',
- description='This app generates random images, using DCFAN inspired WGAN-GP model. Special Thanks to Aladdin Persson and Emilien Dupont for their insightful repos on GitHub. Aladdin Persson (repo: https://github.com/aladdinpersson/Machine-Learning-Collection/tree/master/ML/Pytorch/GANs/4.%20WGAN-GP) Emilien Dupont (repo: https://github.com/EmilienDupont/wgan-gp/blob/master/training.py)',
- css='img_styles.css',
-)
-
-if __name__ == '__main__':
- scheduler_thread = threading.Thread(target=schedule_function) # avoiding sleep, again this project is for academic purposes only
- # scheduler_thread.start()
- iface.launch()
\ No newline at end of file
diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/data/offset_tokens_dataset.py b/spaces/ICML2022/OFA/fairseq/fairseq/data/offset_tokens_dataset.py
deleted file mode 100644
index 6fabbdcdaa1a8f70d8d8c07db4cd53754503c194..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/fairseq/data/offset_tokens_dataset.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from . import BaseWrapperDataset
-
-
-class OffsetTokensDataset(BaseWrapperDataset):
- def __init__(self, dataset, offset):
- super().__init__(dataset)
- self.offset = offset
-
- def __getitem__(self, idx):
- return self.dataset[idx] + self.offset
diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/models/nat/nat_crf_transformer.py b/spaces/ICML2022/OFA/fairseq/fairseq/models/nat/nat_crf_transformer.py
deleted file mode 100644
index d4b3cd931ceb077eb30db73df1d5d6cd714a86c2..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/fairseq/models/nat/nat_crf_transformer.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-
-from fairseq.models import register_model, register_model_architecture
-from fairseq.models.nat import NATransformerModel, base_architecture
-from fairseq.modules import DynamicCRF
-
-
-@register_model("nacrf_transformer")
-class NACRFTransformerModel(NATransformerModel):
- def __init__(self, args, encoder, decoder):
- super().__init__(args, encoder, decoder)
- self.crf_layer = DynamicCRF(
- num_embedding=len(self.tgt_dict),
- low_rank=args.crf_lowrank_approx,
- beam_size=args.crf_beam_approx,
- )
-
- @property
- def allow_ensemble(self):
- return False
-
- @staticmethod
- def add_args(parser):
- NATransformerModel.add_args(parser)
- parser.add_argument(
- "--crf-lowrank-approx",
- type=int,
- help="the dimension of low-rank approximation of transition",
- )
- parser.add_argument(
- "--crf-beam-approx",
- type=int,
- help="the beam size for apporixmating the normalizing factor",
- )
- parser.add_argument(
- "--word-ins-loss-factor",
- type=float,
- help="weights on NAT loss used to co-training with CRF loss.",
- )
-
- def forward(
- self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs
- ):
- # encoding
- encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
-
- # length prediction
- length_out = self.decoder.forward_length(
- normalize=False, encoder_out=encoder_out
- )
- length_tgt = self.decoder.forward_length_prediction(
- length_out, encoder_out, tgt_tokens
- )
-
- # decoding
- word_ins_out = self.decoder(
- normalize=False,
- prev_output_tokens=prev_output_tokens,
- encoder_out=encoder_out,
- )
- word_ins_tgt, word_ins_mask = tgt_tokens, tgt_tokens.ne(self.pad)
-
- # compute the log-likelihood of CRF
- crf_nll = -self.crf_layer(word_ins_out, word_ins_tgt, word_ins_mask)
- crf_nll = (crf_nll / word_ins_mask.type_as(crf_nll).sum(-1)).mean()
-
- return {
- "word_ins": {
- "out": word_ins_out,
- "tgt": word_ins_tgt,
- "mask": word_ins_mask,
- "ls": self.args.label_smoothing,
- "nll_loss": True,
- "factor": self.args.word_ins_loss_factor,
- },
- "word_crf": {"loss": crf_nll},
- "length": {
- "out": length_out,
- "tgt": length_tgt,
- "factor": self.decoder.length_loss_factor,
- },
- }
-
- def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs):
- output_tokens = decoder_out.output_tokens
- output_scores = decoder_out.output_scores
- history = decoder_out.history
-
- # execute the decoder and get emission scores
- output_masks = output_tokens.ne(self.pad)
- word_ins_out = self.decoder(
- normalize=False, prev_output_tokens=output_tokens, encoder_out=encoder_out
- )
-
- # run viterbi decoding through CRF
- _scores, _tokens = self.crf_layer.forward_decoder(word_ins_out, output_masks)
- output_tokens.masked_scatter_(output_masks, _tokens[output_masks])
- output_scores.masked_scatter_(output_masks, _scores[output_masks])
- if history is not None:
- history.append(output_tokens.clone())
-
- return decoder_out._replace(
- output_tokens=output_tokens,
- output_scores=output_scores,
- attn=None,
- history=history,
- )
-
-
-@register_model_architecture("nacrf_transformer", "nacrf_transformer")
-def nacrf_base_architecture(args):
- args.crf_lowrank_approx = getattr(args, "crf_lowrank_approx", 32)
- args.crf_beam_approx = getattr(args, "crf_beam_approx", 64)
- args.word_ins_loss_factor = getattr(args, "word_ins_loss_factor", 0.5)
- args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
- args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
- base_architecture(args)
diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/modules/quantization/pq/__init__.py b/spaces/ICML2022/OFA/fairseq/fairseq/modules/quantization/pq/__init__.py
deleted file mode 100644
index c142a802e05ec7ecfa5dba7d9a98c26a60ac75d2..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/fairseq/modules/quantization/pq/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from .utils import SizeTracker, get_param, attrsetter, quantize_model_ # NOQA
diff --git a/spaces/Ibtehaj10/cheating-detection-FYP/pages/signup.py b/spaces/Ibtehaj10/cheating-detection-FYP/pages/signup.py
deleted file mode 100644
index d7753e687fd683bd2ec45cd58b9a72013dc23689..0000000000000000000000000000000000000000
--- a/spaces/Ibtehaj10/cheating-detection-FYP/pages/signup.py
+++ /dev/null
@@ -1,82 +0,0 @@
-import pickle
-from pathlib import Path
-import streamlit as st
-import os
-import pandas as pd
-import csv
-data = ['Id','Password']
-
-# with open('LoginStatus.csv', 'w') as file:
-# writer = csv.writer(file)
-# writer.writerow(data)
-db = {}
-
-l1 = []
-l2 = []
-ids = st.text_input("Email Address")
-password = st.text_input("Password",type="password",key="password")
-# l1.append(ids)
-# l2.append(password)
-
-# l1.append(ids)
-# l2.append(password)
-key1 = "Id"
-db.setdefault(key1, [])
-db[key1].append(ids)
-
-key2 = "password"
-db.setdefault(key2, [])
-db[key2].append(password)
-
-# print(db)
-# db['Id'] = l1
-# db['Password'] = l2
-# for i in db:
-df = pd.DataFrame(db)
-# st.write(db)
-# df
-if st.button('Register'):
- df.to_csv('LoginStatus.csv', mode='a', header=False, index=False)
- st.success('User Successfully Registered!')
-
-
-
-# import streamlit as st
-# def check_password():
-# """Returns `True` if the user had a correct password."""
-
-# def password_entered():
-# """Checks whether a password entered by the user is correct."""
-# if (
-# st.session_state["username"] in st.secrets["passwords"]
-# and st.session_state["password"]
-# == st.secrets["passwords"][st.session_state["username"]]
-# ):
-# st.session_state["password_correct"] = True
-# del st.session_state["password"] # don't store username + password
-# del st.session_state["username"]
-# else:
-# st.session_state["password_correct"] = False
-
-# if "password_correct" not in st.session_state:
-# # First run, show inputs for username + password.
-# st.text_input("Username", on_change=password_entered, key="username")
-# st.text_input(
-# "Password", type="password", on_change=password_entered, key="password"
-# )
-# return False
-# elif not st.session_state["password_correct"]:
-# # Password not correct, show input + error.
-# st.text_input("Username", on_change=password_entered, key="username")
-# st.text_input(
-# "Password", type="password", on_change=password_entered, key="password"
-# )
-# st.error("😕 User not known or password incorrect")
-# return False
-# else:
-# # Password correct.
-# return True
-
-# if check_password():
-# st.write("Here goes your normal Streamlit app...")
-# st.button("Click me")
\ No newline at end of file
diff --git a/spaces/ItsJayQz/Marvel_WhatIf_Diffusion/app.py b/spaces/ItsJayQz/Marvel_WhatIf_Diffusion/app.py
deleted file mode 100644
index eac6841a968aba1cbc4cbb5d05136afd271eb7d5..0000000000000000000000000000000000000000
--- a/spaces/ItsJayQz/Marvel_WhatIf_Diffusion/app.py
+++ /dev/null
@@ -1,137 +0,0 @@
-from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler
-import gradio as gr
-import torch
-from PIL import Image
-
-model_id = 'ItsJayQz/Marvel_WhatIf_Diffusion'
-prefix = 'whatif style'
-
-scheduler = DPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler")
-
-pipe = StableDiffusionPipeline.from_pretrained(
- model_id,
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
- scheduler=scheduler)
-
-pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(
- model_id,
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
- scheduler=scheduler)
-
-if torch.cuda.is_available():
- pipe = pipe.to("cuda")
- pipe_i2i = pipe_i2i.to("cuda")
-
-def error_str(error, title="Error"):
- return f"""#### {title}
- {error}""" if error else ""
-
-def inference(prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt="", auto_prefix=False):
-
- generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
- prompt = f"{prefix} {prompt}" if auto_prefix else prompt
-
- try:
- if img is not None:
- return img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator), None
- else:
- return txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator), None
- except Exception as e:
- return None, error_str(e)
-
-def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator):
-
- result = pipe(
- prompt,
- negative_prompt = neg_prompt,
- num_inference_steps = int(steps),
- guidance_scale = guidance,
- width = width,
- height = height,
- generator = generator)
-
- return result.images[0]
-
-def img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator):
-
- ratio = min(height / img.height, width / img.width)
- img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
- result = pipe_i2i(
- prompt,
- negative_prompt = neg_prompt,
- init_image = img,
- num_inference_steps = int(steps),
- strength = strength,
- guidance_scale = guidance,
- width = width,
- height = height,
- generator = generator)
-
- return result.images[0]
-
-css = """.main-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.main-div div h1{font-weight:900;margin-bottom:7px}.main-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem}
-"""
-with gr.Blocks(css=css) as demo:
- gr.HTML(
- f"""
-
-
-
Marvel Whatif Diffusion
-
-
- Demo for Marvel Whatif Diffusion Stable Diffusion model.
- {"Add the following tokens to your prompts for the model to work properly: prefix" if prefix else ""}
-
- Running on {"GPU 🔥" if torch.cuda.is_available() else f"CPU 🥶. For faster inference it is recommended to upgrade to GPU in Settings"} after duplicating the space
- """)
-
-demo.queue(concurrency_count=1)
-demo.launch()
diff --git a/spaces/Jamel887/Rvc-tio887/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py b/spaces/Jamel887/Rvc-tio887/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py
deleted file mode 100644
index b2c592527a5966e6f8e79e8c52dc5b414246dcc6..0000000000000000000000000000000000000000
--- a/spaces/Jamel887/Rvc-tio887/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py
+++ /dev/null
@@ -1,97 +0,0 @@
-from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
-import parselmouth
-import numpy as np
-
-
-class PMF0Predictor(F0Predictor):
- def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
- self.hop_length = hop_length
- self.f0_min = f0_min
- self.f0_max = f0_max
- self.sampling_rate = sampling_rate
-
- def interpolate_f0(self, f0):
- """
- 对F0进行插值处理
- """
-
- data = np.reshape(f0, (f0.size, 1))
-
- vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
- vuv_vector[data > 0.0] = 1.0
- vuv_vector[data <= 0.0] = 0.0
-
- ip_data = data
-
- frame_number = data.size
- last_value = 0.0
- for i in range(frame_number):
- if data[i] <= 0.0:
- j = i + 1
- for j in range(i + 1, frame_number):
- if data[j] > 0.0:
- break
- if j < frame_number - 1:
- if last_value > 0.0:
- step = (data[j] - data[i - 1]) / float(j - i)
- for k in range(i, j):
- ip_data[k] = data[i - 1] + step * (k - i + 1)
- else:
- for k in range(i, j):
- ip_data[k] = data[j]
- else:
- for k in range(i, frame_number):
- ip_data[k] = last_value
- else:
- ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
- last_value = data[i]
-
- return ip_data[:, 0], vuv_vector[:, 0]
-
- def compute_f0(self, wav, p_len=None):
- x = wav
- if p_len is None:
- p_len = x.shape[0] // self.hop_length
- else:
- assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
- time_step = self.hop_length / self.sampling_rate * 1000
- f0 = (
- parselmouth.Sound(x, self.sampling_rate)
- .to_pitch_ac(
- time_step=time_step / 1000,
- voicing_threshold=0.6,
- pitch_floor=self.f0_min,
- pitch_ceiling=self.f0_max,
- )
- .selected_array["frequency"]
- )
-
- pad_size = (p_len - len(f0) + 1) // 2
- if pad_size > 0 or p_len - len(f0) - pad_size > 0:
- f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
- f0, uv = self.interpolate_f0(f0)
- return f0
-
- def compute_f0_uv(self, wav, p_len=None):
- x = wav
- if p_len is None:
- p_len = x.shape[0] // self.hop_length
- else:
- assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
- time_step = self.hop_length / self.sampling_rate * 1000
- f0 = (
- parselmouth.Sound(x, self.sampling_rate)
- .to_pitch_ac(
- time_step=time_step / 1000,
- voicing_threshold=0.6,
- pitch_floor=self.f0_min,
- pitch_ceiling=self.f0_max,
- )
- .selected_array["frequency"]
- )
-
- pad_size = (p_len - len(f0) + 1) // 2
- if pad_size > 0 or p_len - len(f0) - pad_size > 0:
- f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
- f0, uv = self.interpolate_f0(f0)
- return f0, uv
diff --git a/spaces/Jeffsun/LSP-LearningandStrivePartner-Demo/modules/prompt_parser.py b/spaces/Jeffsun/LSP-LearningandStrivePartner-Demo/modules/prompt_parser.py
deleted file mode 100644
index 42cbbb3038612a44571765905e8526553f462663..0000000000000000000000000000000000000000
--- a/spaces/Jeffsun/LSP-LearningandStrivePartner-Demo/modules/prompt_parser.py
+++ /dev/null
@@ -1,391 +0,0 @@
-
-import re
-import math
-import numpy as np
-import torch
-
-# Code from https://github.com/AUTOMATIC1111/stable-diffusion-webui/commit/8e2aeee4a127b295bfc880800e4a312e0f049b85, modified.
-
-class PromptChunk:
- """
- This object contains token ids, weight (multipliers:1.4) and textual inversion embedding info for a chunk of prompt.
- If a prompt is short, it is represented by one PromptChunk, otherwise, multiple are necessary.
- Each PromptChunk contains an exact amount of tokens - 77, which includes one for start and end token,
- so just 75 tokens from prompt.
- """
-
- def __init__(self):
- self.tokens = []
- self.multipliers = []
- self.fixes = []
-
-
-class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
- """A pytorch module that is a wrapper for FrozenCLIPEmbedder module. it enhances FrozenCLIPEmbedder, making it possible to
- have unlimited prompt length and assign weights to tokens in prompt.
- """
-
- def __init__(self, text_encoder, enable_emphasis=True):
- super().__init__()
-
- self.device = lambda: text_encoder.device
- self.enable_emphasis = enable_emphasis
- """Original FrozenCLIPEmbedder module; can also be FrozenOpenCLIPEmbedder or xlmr.BertSeriesModelWithTransformation,
- depending on model."""
-
- self.chunk_length = 75
-
- def empty_chunk(self):
- """creates an empty PromptChunk and returns it"""
-
- chunk = PromptChunk()
- chunk.tokens = [self.id_start] + [self.id_end] * (self.chunk_length + 1)
- chunk.multipliers = [1.0] * (self.chunk_length + 2)
- return chunk
-
- def get_target_prompt_token_count(self, token_count):
- """returns the maximum number of tokens a prompt of a known length can have before it requires one more PromptChunk to be represented"""
-
- return math.ceil(max(token_count, 1) / self.chunk_length) * self.chunk_length
-
- def tokenize_line(self, line):
- """
- this transforms a single prompt into a list of PromptChunk objects - as many as needed to
- represent the prompt.
- Returns the list and the total number of tokens in the prompt.
- """
-
- if self.enable_emphasis:
- parsed = parse_prompt_attention(line)
- else:
- parsed = [[line, 1.0]]
-
- tokenized = self.tokenize([text for text, _ in parsed])
-
- chunks = []
- chunk = PromptChunk()
- token_count = 0
- last_comma = -1
-
- def next_chunk(is_last=False):
- """puts current chunk into the list of results and produces the next one - empty;
- if is_last is true, tokens tokens at the end won't add to token_count"""
- nonlocal token_count
- nonlocal last_comma
- nonlocal chunk
-
- if is_last:
- token_count += len(chunk.tokens)
- else:
- token_count += self.chunk_length
-
- to_add = self.chunk_length - len(chunk.tokens)
- if to_add > 0:
- chunk.tokens += [self.id_end] * to_add
- chunk.multipliers += [1.0] * to_add
-
- chunk.tokens = [self.id_start] + chunk.tokens + [self.id_end]
- chunk.multipliers = [1.0] + chunk.multipliers + [1.0]
-
- last_comma = -1
- chunks.append(chunk)
- chunk = PromptChunk()
-
- comma_padding_backtrack = 20 # default value in https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/6cff4401824299a983c8e13424018efc347b4a2b/modules/shared.py#L410
- for tokens, (text, weight) in zip(tokenized, parsed):
- if text == "BREAK" and weight == -1:
- next_chunk()
- continue
-
- position = 0
- while position < len(tokens):
- token = tokens[position]
-
- if token == self.comma_token:
- last_comma = len(chunk.tokens)
-
- # this is when we are at the end of alloted 75 tokens for the current chunk, and the current token is not a comma. opts.comma_padding_backtrack
- # is a setting that specifies that if there is a comma nearby, the text after the comma should be moved out of this chunk and into the next.
- elif (
- comma_padding_backtrack != 0
- and len(chunk.tokens) == self.chunk_length
- and last_comma != -1
- and len(chunk.tokens) - last_comma <= comma_padding_backtrack
- ):
- break_location = last_comma + 1
-
- reloc_tokens = chunk.tokens[break_location:]
- reloc_mults = chunk.multipliers[break_location:]
-
- chunk.tokens = chunk.tokens[:break_location]
- chunk.multipliers = chunk.multipliers[:break_location]
-
- next_chunk()
- chunk.tokens = reloc_tokens
- chunk.multipliers = reloc_mults
-
- if len(chunk.tokens) == self.chunk_length:
- next_chunk()
-
- chunk.tokens.append(token)
- chunk.multipliers.append(weight)
- position += 1
-
- if len(chunk.tokens) > 0 or len(chunks) == 0:
- next_chunk(is_last=True)
-
- return chunks, token_count
-
- def process_texts(self, texts):
- """
- Accepts a list of texts and calls tokenize_line() on each, with cache. Returns the list of results and maximum
- length, in tokens, of all texts.
- """
-
- token_count = 0
-
- cache = {}
- batch_chunks = []
- for line in texts:
- if line in cache:
- chunks = cache[line]
- else:
- chunks, current_token_count = self.tokenize_line(line)
- token_count = max(current_token_count, token_count)
-
- cache[line] = chunks
-
- batch_chunks.append(chunks)
-
- return batch_chunks, token_count
-
- def forward(self, texts):
- """
- Accepts an array of texts; Passes texts through transformers network to create a tensor with numerical representation of those texts.
- Returns a tensor with shape of (B, T, C), where B is length of the array; T is length, in tokens, of texts (including padding) - T will
- be a multiple of 77; and C is dimensionality of each token - for SD1 it's 768, and for SD2 it's 1024.
- An example shape returned by this function can be: (2, 77, 768).
- Webui usually sends just one text at a time through this function - the only time when texts is an array with more than one elemenet
- is when you do prompt editing: "a picture of a [cat:dog:0.4] eating ice cream"
- """
-
- batch_chunks, token_count = self.process_texts(texts)
- chunk_count = max([len(x) for x in batch_chunks])
-
- zs = []
- ts = []
- for i in range(chunk_count):
- batch_chunk = [
- chunks[i] if i < len(chunks) else self.empty_chunk()
- for chunks in batch_chunks
- ]
-
- tokens = [x.tokens for x in batch_chunk]
- multipliers = [x.multipliers for x in batch_chunk]
- # self.embeddings.fixes = [x.fixes for x in batch_chunk]
-
- # for fixes in self.embeddings.fixes:
- # for position, embedding in fixes:
- # used_embeddings[embedding.name] = embedding
-
- z = self.process_tokens(tokens, multipliers)
- zs.append(z)
- ts.append(tokens)
-
- return np.hstack(ts), torch.hstack(zs)
-
- def process_tokens(self, remade_batch_tokens, batch_multipliers):
- """
- sends one single prompt chunk to be encoded by transformers neural network.
- remade_batch_tokens is a batch of tokens - a list, where every element is a list of tokens; usually
- there are exactly 77 tokens in the list. batch_multipliers is the same but for multipliers instead of tokens.
- Multipliers are used to give more or less weight to the outputs of transformers network. Each multiplier
- corresponds to one token.
- """
- tokens = torch.asarray(remade_batch_tokens).to(self.device())
-
- # this is for SD2: SD1 uses the same token for padding and end of text, while SD2 uses different ones.
- if self.id_end != self.id_pad:
- for batch_pos in range(len(remade_batch_tokens)):
- index = remade_batch_tokens[batch_pos].index(self.id_end)
- tokens[batch_pos, index + 1 : tokens.shape[1]] = self.id_pad
-
- z = self.encode_with_transformers(tokens)
-
- # restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
- batch_multipliers = torch.asarray(batch_multipliers).to(self.device())
- original_mean = z.mean()
- z = z * batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
- new_mean = z.mean()
- z = z * (original_mean / new_mean)
-
- return z
-
-
-class FrozenCLIPEmbedderWithCustomWords(FrozenCLIPEmbedderWithCustomWordsBase):
- def __init__(self, tokenizer, text_encoder):
- super().__init__(text_encoder)
- self.tokenizer = tokenizer
- self.text_encoder = text_encoder
-
- vocab = self.tokenizer.get_vocab()
-
- self.comma_token = vocab.get(",", None)
-
- self.token_mults = {}
- tokens_with_parens = [
- (k, v)
- for k, v in vocab.items()
- if "(" in k or ")" in k or "[" in k or "]" in k
- ]
- for text, ident in tokens_with_parens:
- mult = 1.0
- for c in text:
- if c == "[":
- mult /= 1.1
- if c == "]":
- mult *= 1.1
- if c == "(":
- mult *= 1.1
- if c == ")":
- mult /= 1.1
-
- if mult != 1.0:
- self.token_mults[ident] = mult
-
- self.id_start = self.tokenizer.bos_token_id
- self.id_end = self.tokenizer.eos_token_id
- self.id_pad = self.id_end
-
- def tokenize(self, texts):
- tokenized = self.tokenizer(
- texts, truncation=False, add_special_tokens=False
- )["input_ids"]
-
- return tokenized
-
- def encode_with_transformers(self, tokens):
- CLIP_stop_at_last_layers = 1
- tokens = tokens.to(self.text_encoder.device)
- outputs = self.text_encoder(tokens, output_hidden_states=True)
-
- if CLIP_stop_at_last_layers > 1:
- z = outputs.hidden_states[-CLIP_stop_at_last_layers]
- z = self.text_encoder.text_model.final_layer_norm(z)
- else:
- z = outputs.last_hidden_state
-
- return z
-
-
-re_attention = re.compile(
- r"""
-\\\(|
-\\\)|
-\\\[|
-\\]|
-\\\\|
-\\|
-\(|
-\[|
-:([+-]?[.\d]+)\)|
-\)|
-]|
-[^\\()\[\]:]+|
-:
-""",
- re.X,
-)
-
-re_break = re.compile(r"\s*\bBREAK\b\s*", re.S)
-
-
-def parse_prompt_attention(text):
- """
- Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
- Accepted tokens are:
- (abc) - increases attention to abc by a multiplier of 1.1
- (abc:3.12) - increases attention to abc by a multiplier of 3.12
- [abc] - decreases attention to abc by a multiplier of 1.1
- \( - literal character '('
- \[ - literal character '['
- \) - literal character ')'
- \] - literal character ']'
- \\ - literal character '\'
- anything else - just text
-
- >>> parse_prompt_attention('normal text')
- [['normal text', 1.0]]
- >>> parse_prompt_attention('an (important) word')
- [['an ', 1.0], ['important', 1.1], [' word', 1.0]]
- >>> parse_prompt_attention('(unbalanced')
- [['unbalanced', 1.1]]
- >>> parse_prompt_attention('\(literal\]')
- [['(literal]', 1.0]]
- >>> parse_prompt_attention('(unnecessary)(parens)')
- [['unnecessaryparens', 1.1]]
- >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
- [['a ', 1.0],
- ['house', 1.5730000000000004],
- [' ', 1.1],
- ['on', 1.0],
- [' a ', 1.1],
- ['hill', 0.55],
- [', sun, ', 1.1],
- ['sky', 1.4641000000000006],
- ['.', 1.1]]
- """
-
- res = []
- round_brackets = []
- square_brackets = []
-
- round_bracket_multiplier = 1.1
- square_bracket_multiplier = 1 / 1.1
-
- def multiply_range(start_position, multiplier):
- for p in range(start_position, len(res)):
- res[p][1] *= multiplier
-
- for m in re_attention.finditer(text):
- text = m.group(0)
- weight = m.group(1)
-
- if text.startswith("\\"):
- res.append([text[1:], 1.0])
- elif text == "(":
- round_brackets.append(len(res))
- elif text == "[":
- square_brackets.append(len(res))
- elif weight is not None and len(round_brackets) > 0:
- multiply_range(round_brackets.pop(), float(weight))
- elif text == ")" and len(round_brackets) > 0:
- multiply_range(round_brackets.pop(), round_bracket_multiplier)
- elif text == "]" and len(square_brackets) > 0:
- multiply_range(square_brackets.pop(), square_bracket_multiplier)
- else:
- parts = re.split(re_break, text)
- for i, part in enumerate(parts):
- if i > 0:
- res.append(["BREAK", -1])
- res.append([part, 1.0])
-
- for pos in round_brackets:
- multiply_range(pos, round_bracket_multiplier)
-
- for pos in square_brackets:
- multiply_range(pos, square_bracket_multiplier)
-
- if len(res) == 0:
- res = [["", 1.0]]
-
- # merge runs of identical weights
- i = 0
- while i + 1 < len(res):
- if res[i][1] == res[i + 1][1]:
- res[i][0] += res[i + 1][0]
- res.pop(i + 1)
- else:
- i += 1
-
- return res
diff --git a/spaces/Jikiwi/sovits-models/inference/infer_tool_grad.py b/spaces/Jikiwi/sovits-models/inference/infer_tool_grad.py
deleted file mode 100644
index b75af49c08e2e724839828bc419792ed580809bb..0000000000000000000000000000000000000000
--- a/spaces/Jikiwi/sovits-models/inference/infer_tool_grad.py
+++ /dev/null
@@ -1,160 +0,0 @@
-import hashlib
-import json
-import logging
-import os
-import time
-from pathlib import Path
-import io
-import librosa
-import maad
-import numpy as np
-from inference import slicer
-import parselmouth
-import soundfile
-import torch
-import torchaudio
-
-from hubert import hubert_model
-import utils
-from models import SynthesizerTrn
-logging.getLogger('numba').setLevel(logging.WARNING)
-logging.getLogger('matplotlib').setLevel(logging.WARNING)
-
-def resize2d_f0(x, target_len):
- source = np.array(x)
- source[source < 0.001] = np.nan
- target = np.interp(np.arange(0, len(source) * target_len, len(source)) / target_len, np.arange(0, len(source)),
- source)
- res = np.nan_to_num(target)
- return res
-
-def get_f0(x, p_len,f0_up_key=0):
-
- time_step = 160 / 16000 * 1000
- f0_min = 50
- f0_max = 1100
- f0_mel_min = 1127 * np.log(1 + f0_min / 700)
- f0_mel_max = 1127 * np.log(1 + f0_max / 700)
-
- f0 = parselmouth.Sound(x, 16000).to_pitch_ac(
- time_step=time_step / 1000, voicing_threshold=0.6,
- pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency']
-
- pad_size=(p_len - len(f0) + 1) // 2
- if(pad_size>0 or p_len - len(f0) - pad_size>0):
- f0 = np.pad(f0,[[pad_size,p_len - len(f0) - pad_size]], mode='constant')
-
- f0 *= pow(2, f0_up_key / 12)
- f0_mel = 1127 * np.log(1 + f0 / 700)
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (f0_mel_max - f0_mel_min) + 1
- f0_mel[f0_mel <= 1] = 1
- f0_mel[f0_mel > 255] = 255
- f0_coarse = np.rint(f0_mel).astype(np.int)
- return f0_coarse, f0
-
-def clean_pitch(input_pitch):
- num_nan = np.sum(input_pitch == 1)
- if num_nan / len(input_pitch) > 0.9:
- input_pitch[input_pitch != 1] = 1
- return input_pitch
-
-
-def plt_pitch(input_pitch):
- input_pitch = input_pitch.astype(float)
- input_pitch[input_pitch == 1] = np.nan
- return input_pitch
-
-
-def f0_to_pitch(ff):
- f0_pitch = 69 + 12 * np.log2(ff / 440)
- return f0_pitch
-
-
-def fill_a_to_b(a, b):
- if len(a) < len(b):
- for _ in range(0, len(b) - len(a)):
- a.append(a[0])
-
-
-def mkdir(paths: list):
- for path in paths:
- if not os.path.exists(path):
- os.mkdir(path)
-
-
-class VitsSvc(object):
- def __init__(self):
- self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
- self.SVCVITS = None
- self.hps = None
- self.speakers = None
- self.hubert_soft = utils.get_hubert_model()
-
- def set_device(self, device):
- self.device = torch.device(device)
- self.hubert_soft.to(self.device)
- if self.SVCVITS != None:
- self.SVCVITS.to(self.device)
-
- def loadCheckpoint(self, path):
- self.hps = utils.get_hparams_from_file(f"checkpoints/{path}/config.json")
- self.SVCVITS = SynthesizerTrn(
- self.hps.data.filter_length // 2 + 1,
- self.hps.train.segment_size // self.hps.data.hop_length,
- **self.hps.model)
- _ = utils.load_checkpoint(f"checkpoints/{path}/model.pth", self.SVCVITS, None)
- _ = self.SVCVITS.eval().to(self.device)
- self.speakers = self.hps.spk
-
- def get_units(self, source, sr):
- source = source.unsqueeze(0).to(self.device)
- with torch.inference_mode():
- units = self.hubert_soft.units(source)
- return units
-
-
- def get_unit_pitch(self, in_path, tran):
- source, sr = torchaudio.load(in_path)
- source = torchaudio.functional.resample(source, sr, 16000)
- if len(source.shape) == 2 and source.shape[1] >= 2:
- source = torch.mean(source, dim=0).unsqueeze(0)
- soft = self.get_units(source, sr).squeeze(0).cpu().numpy()
- f0_coarse, f0 = get_f0(source.cpu().numpy()[0], soft.shape[0]*2, tran)
- return soft, f0
-
- def infer(self, speaker_id, tran, raw_path):
- speaker_id = self.speakers[speaker_id]
- sid = torch.LongTensor([int(speaker_id)]).to(self.device).unsqueeze(0)
- soft, pitch = self.get_unit_pitch(raw_path, tran)
- f0 = torch.FloatTensor(clean_pitch(pitch)).unsqueeze(0).to(self.device)
- stn_tst = torch.FloatTensor(soft)
- with torch.no_grad():
- x_tst = stn_tst.unsqueeze(0).to(self.device)
- x_tst = torch.repeat_interleave(x_tst, repeats=2, dim=1).transpose(1, 2)
- audio = self.SVCVITS.infer(x_tst, f0=f0, g=sid)[0,0].data.float()
- return audio, audio.shape[-1]
-
- def inference(self,srcaudio,chara,tran,slice_db):
- sampling_rate, audio = srcaudio
- audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
- if len(audio.shape) > 1:
- audio = librosa.to_mono(audio.transpose(1, 0))
- if sampling_rate != 16000:
- audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
- soundfile.write("tmpwav.wav", audio, 16000, format="wav")
- chunks = slicer.cut("tmpwav.wav", db_thresh=slice_db)
- audio_data, audio_sr = slicer.chunks2audio("tmpwav.wav", chunks)
- audio = []
- for (slice_tag, data) in audio_data:
- length = int(np.ceil(len(data) / audio_sr * self.hps.data.sampling_rate))
- raw_path = io.BytesIO()
- soundfile.write(raw_path, data, audio_sr, format="wav")
- raw_path.seek(0)
- if slice_tag:
- _audio = np.zeros(length)
- else:
- out_audio, out_sr = self.infer(chara, tran, raw_path)
- _audio = out_audio.cpu().numpy()
- audio.extend(list(_audio))
- audio = (np.array(audio) * 32768.0).astype('int16')
- return (self.hps.data.sampling_rate,audio)
diff --git a/spaces/JohnCalimoso/animalbreedidentificationversion1.5/Control/Guineapig/con_guineapig_SVM.py b/spaces/JohnCalimoso/animalbreedidentificationversion1.5/Control/Guineapig/con_guineapig_SVM.py
deleted file mode 100644
index 980c4b1404ba312ae0a447fd02eaecb42bc9f780..0000000000000000000000000000000000000000
--- a/spaces/JohnCalimoso/animalbreedidentificationversion1.5/Control/Guineapig/con_guineapig_SVM.py
+++ /dev/null
@@ -1,37 +0,0 @@
-import cv2
-import numpy as np
-from PIL import Image
-import pickle
-import tensorflow as tf
-import io
-
-class gpSVM:
- def __init__(self,url) -> None:
- self.image = url
-
- def predict_image(self):
- # Load the model
- load_extractor = tf.keras.models.load_model("././Model/Guineapig/resnetxSVM/resnet_EXTRACTOR.h5")
-
- modelpath = "././Model/Guineapig/resnetxSVM/dataSaved.pkl"
-
- with open(modelpath, 'rb') as file:
- saved_data = pickle.load(file)
- animal_breed = saved_data['class_name']
- model = saved_data['svm_model']
-
- im = Image.open(self.image)
- img = im.convert("RGB")
- img= np.asarray(img)
- image_resized= cv2.resize(img, (224,224))
- features = load_extractor.predict(np.expand_dims(image_resized, axis=0))
-
- reshaped_features = features.reshape(features.shape[0],-1)
- predicted_class = model.predict(reshaped_features)
- pred_prob = model.predict_proba(reshaped_features)
- prediction_probability = pred_prob[0][predicted_class[0]]
- predicted_class
-
- output_class= animal_breed[predicted_class[0]]
-
- return [output_class, prediction_probability]
diff --git a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/ppg2mel/train/optim.py b/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/ppg2mel/train/optim.py
deleted file mode 100644
index 62533b95864019df1eca855287cc0bcdb53745d4..0000000000000000000000000000000000000000
--- a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/ppg2mel/train/optim.py
+++ /dev/null
@@ -1,45 +0,0 @@
-import torch
-import numpy as np
-
-
-class Optimizer():
- def __init__(self, parameters, optimizer, lr, eps, lr_scheduler,
- **kwargs):
-
- # Setup torch optimizer
- self.opt_type = optimizer
- self.init_lr = lr
- self.sch_type = lr_scheduler
- opt = getattr(torch.optim, optimizer)
- if lr_scheduler == 'warmup':
- warmup_step = 4000.0
- init_lr = lr
- self.lr_scheduler = lambda step: init_lr * warmup_step ** 0.5 * \
- np.minimum((step+1)*warmup_step**-1.5, (step+1)**-0.5)
- self.opt = opt(parameters, lr=1.0)
- else:
- self.lr_scheduler = None
- self.opt = opt(parameters, lr=lr, eps=eps) # ToDo: 1e-8 better?
-
- def get_opt_state_dict(self):
- return self.opt.state_dict()
-
- def load_opt_state_dict(self, state_dict):
- self.opt.load_state_dict(state_dict)
-
- def pre_step(self, step):
- if self.lr_scheduler is not None:
- cur_lr = self.lr_scheduler(step)
- for param_group in self.opt.param_groups:
- param_group['lr'] = cur_lr
- else:
- cur_lr = self.init_lr
- self.opt.zero_grad()
- return cur_lr
-
- def step(self):
- self.opt.step()
-
- def create_msg(self):
- return ['Optim.Info.| Algo. = {}\t| Lr = {}\t (schedule = {})'
- .format(self.opt_type, self.init_lr, self.sch_type)]
diff --git a/spaces/Kimata/Sanskrit-TTS/indic_nlp_library/indicnlp/transliterate/script_unifier.py b/spaces/Kimata/Sanskrit-TTS/indic_nlp_library/indicnlp/transliterate/script_unifier.py
deleted file mode 100644
index 20f39339ffb3178ea17785aba09eb620d108f330..0000000000000000000000000000000000000000
--- a/spaces/Kimata/Sanskrit-TTS/indic_nlp_library/indicnlp/transliterate/script_unifier.py
+++ /dev/null
@@ -1,157 +0,0 @@
-#
-# Copyright (c) 2013-present, Anoop Kunchukuttan
-# All rights reserved.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-#
-
-#Program for normalization of text written in Unicode. This is mainly geared towards Indic scripts
-#
-# @author Anoop Kunchukuttan
-#
-
-import sys
-from indicnlp.normalize import indic_normalize
-from indicnlp.transliterate import unicode_transliterate
-from indicnlp import loader
-
-class AggressiveScriptUnifier():
-
- def __init__(self,common_lang='hi',nasals_mode='to_nasal_consonants'):
- self.common_lang=common_lang
- self.nasals_mode=nasals_mode
- self.do_normalize_chandras=True
- self.do_normalize_vowel_ending=True
- self.remove_nuktas=True
- self.normalizer_map={}
- self._init_normalizers()
-
- def _init_normalizers(self):
- normalizer_factory=indic_normalize.IndicNormalizerFactory()
-
- ## for languages with common parameters
- for lang in ['hi','mr','sa','kK','ne','sd','bn','gu','ta','te','kn']:
- self.normalizer_map[lang]=normalizer_factory.get_normalizer(lang, nasals_mode=self.nasals_mode,
- do_normalize_chandras=self.do_normalize_chandras, remove_nuktas=self.remove_nuktas,
- do_normalize_vowel_ending=self.do_normalize_vowel_ending)
-
- ## for languages with language specific parameters
- self.normalizer_map['pa']=normalizer_factory.get_normalizer('pa', nasals_mode=self.nasals_mode,
- do_normalize_chandras=self.do_normalize_chandras, remove_nuktas=self.remove_nuktas,
- do_normalize_vowel_ending=self.do_normalize_vowel_ending,
- do_canonicalize_addak=True, do_canonicalize_tippi=True,
- do_replace_vowel_bases=True)
- self.normalizer_map['or']=normalizer_factory.get_normalizer('or', nasals_mode=self.nasals_mode,
- do_normalize_chandras=self.do_normalize_chandras, remove_nuktas=self.remove_nuktas,
- do_normalize_vowel_ending=self.do_normalize_vowel_ending,
- do_remap_wa=True)
- self.normalizer_map['as']=normalizer_factory.get_normalizer('as', nasals_mode=self.nasals_mode,
- do_normalize_chandras=self.do_normalize_chandras, remove_nuktas=self.remove_nuktas,
- do_normalize_vowel_ending=self.do_normalize_vowel_ending,
- do_remap_assamese_chars=True)
- self.normalizer_map['ml']=normalizer_factory.get_normalizer('ml', nasals_mode=self.nasals_mode,
- do_normalize_chandras=self.do_normalize_chandras, remove_nuktas=self.remove_nuktas,
- do_normalize_vowel_ending=self.do_normalize_vowel_ending,
- do_canonicalize_chillus=True, do_correct_geminated_T=True)
-
- def transform(self,text,lang):
- text=self.normalizer_map[lang].normalize(text)
- text=unicode_transliterate.UnicodeIndicTransliterator.transliterate(text, lang, self.common_lang)
- return text
-
-class BasicScriptUnifier():
-
- def __init__(self,common_lang='hi',nasals_mode='do_nothing'):
- self.common_lang=common_lang
- self.nasals_mode=nasals_mode
- self.normalizer_map={}
- self._init_normalizers()
-
- def _init_normalizers(self):
- normalizer_factory=indic_normalize.IndicNormalizerFactory()
-
- for lang in ['hi','mr','sa','kK','ne','sd','bn','gu','ta','te','kn','pa','or','as','ml']:
- self.normalizer_map[lang]=normalizer_factory.get_normalizer(lang, nasals_mode=self.nasals_mode)
-
- def transform(self,text,lang):
-
- if lang in self.normalizer_map:
- text=self.normalizer_map[lang].normalize(text)
-
- text=unicode_transliterate.UnicodeIndicTransliterator.transliterate(text, lang, self.common_lang)
- return text
-
-class NaiveScriptUnifier():
-
- def __init__(self,common_lang='hi'):
- self.common_lang=common_lang
-
- def transform(self,text,lang):
-
- text=unicode_transliterate.UnicodeIndicTransliterator.transliterate(text, lang, self.common_lang)
- return text
-
-if __name__ == '__main__':
-
- loader.load()
-
- if len(sys.argv)<=4:
- print("Usage: python script_unifier ")
- sys.exit(1)
-
- if sys.argv[1]=='aggressive':
-
- language=sys.argv[4]
-
- unifier=AggressiveScriptUnifier(nasals_mode='to_nasal_consonants')
-
- with open(sys.argv[2],'r',encoding='utf-8') as ifile:
- with open(sys.argv[3],'w',encoding='utf-8') as ofile:
- for i, line in enumerate(ifile.readlines()):
-
- line=line.strip()
- transliterated_line=unifier.transform(line,language)
- ofile.write(transliterated_line+'\n')
-
- elif sys.argv[1]=='moderate':
-
- language=sys.argv[4]
-
- unifier=AggressiveScriptUnifier(nasals_mode='do_nothing')
-
- with open(sys.argv[2],'r',encoding='utf-8') as ifile:
- with open(sys.argv[3],'w',encoding='utf-8') as ofile:
- for i, line in enumerate(ifile.readlines()):
-
- line=line.strip()
- transliterated_line=unifier.transform(line,language)
- ofile.write(transliterated_line+'\n')
-
- elif sys.argv[1]=='basic':
-
- language=sys.argv[4]
-
- unifier=BasicScriptUnifier()
-
- with open(sys.argv[2],'r',encoding='utf-8') as ifile:
- with open(sys.argv[3],'w',encoding='utf-8') as ofile:
- for i, line in enumerate(ifile.readlines()):
-
- line=line.strip()
- transliterated_line=unifier.transform(line,language)
- ofile.write(transliterated_line+'\n')
-
- elif sys.argv[1]=='naive':
-
- language=sys.argv[4]
-
- unifier=NaiveScriptUnifier()
-
- with open(sys.argv[2],'r',encoding='utf-8') as ifile:
- with open(sys.argv[3],'w',encoding='utf-8') as ofile:
- for i, line in enumerate(ifile.readlines()):
-
- line=line.strip()
- transliterated_line=unifier.transform(line,language)
- ofile.write(transliterated_line+'\n')
diff --git a/spaces/KyanChen/RSPrompter/mmdet/engine/hooks/set_epoch_info_hook.py b/spaces/KyanChen/RSPrompter/mmdet/engine/hooks/set_epoch_info_hook.py
deleted file mode 100644
index 183f3167445dc0818e4fa37bdd2049d3876ed031..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmdet/engine/hooks/set_epoch_info_hook.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from mmengine.hooks import Hook
-from mmengine.model.wrappers import is_model_wrapper
-
-from mmdet.registry import HOOKS
-
-
-@HOOKS.register_module()
-class SetEpochInfoHook(Hook):
- """Set runner's epoch information to the model."""
-
- def before_train_epoch(self, runner):
- epoch = runner.epoch
- model = runner.model
- if is_model_wrapper(model):
- model = model.module
- model.set_epoch(epoch)
diff --git a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/infer/infer_libs/infer_pack/models_onnx.py b/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/infer/infer_libs/infer_pack/models_onnx.py
deleted file mode 100644
index 2b06555fafd8e64826844ecf4ee9e15b94fcec6a..0000000000000000000000000000000000000000
--- a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/infer/infer_libs/infer_pack/models_onnx.py
+++ /dev/null
@@ -1,824 +0,0 @@
-import math
-import logging
-
-logger = logging.getLogger(__name__)
-
-import numpy as np
-import torch
-from torch import nn
-from torch.nn import Conv1d, Conv2d, ConvTranspose1d
-from torch.nn import functional as F
-from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm
-
-from lib.infer.infer_libs.infer_pack import attentions, commons, modules
-from lib.infer.infer_libs.infer_pack.commons import get_padding, init_weights
-
-
-class TextEncoder256(nn.Module):
- def __init__(
- self,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- f0=True,
- ):
- super().__init__()
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.emb_phone = nn.Linear(256, hidden_channels)
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
- if f0 == True:
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
- self.encoder = attentions.Encoder(
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, phone, pitch, lengths):
- if pitch == None:
- x = self.emb_phone(phone)
- else:
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
- x = self.lrelu(x)
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.encoder(x * x_mask, x_mask)
- stats = self.proj(x) * x_mask
-
- m, logs = torch.split(stats, self.out_channels, dim=1)
- return m, logs, x_mask
-
-
-class TextEncoder768(nn.Module):
- def __init__(
- self,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- f0=True,
- ):
- super().__init__()
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.emb_phone = nn.Linear(768, hidden_channels)
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
- if f0 == True:
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
- self.encoder = attentions.Encoder(
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, phone, pitch, lengths):
- if pitch == None:
- x = self.emb_phone(phone)
- else:
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
- x = self.lrelu(x)
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.encoder(x * x_mask, x_mask)
- stats = self.proj(x) * x_mask
-
- m, logs = torch.split(stats, self.out_channels, dim=1)
- return m, logs, x_mask
-
-
-class ResidualCouplingBlock(nn.Module):
- def __init__(
- self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- n_flows=4,
- gin_channels=0,
- ):
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.flows = nn.ModuleList()
- for i in range(n_flows):
- self.flows.append(
- modules.ResidualCouplingLayer(
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=gin_channels,
- mean_only=True,
- )
- )
- self.flows.append(modules.Flip())
-
- def forward(self, x, x_mask, g=None, reverse=False):
- if not reverse:
- for flow in self.flows:
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
- else:
- for flow in reversed(self.flows):
- x = flow(x, x_mask, g=g, reverse=reverse)
- return x
-
- def remove_weight_norm(self):
- for i in range(self.n_flows):
- self.flows[i * 2].remove_weight_norm()
-
-
-class PosteriorEncoder(nn.Module):
- def __init__(
- self,
- in_channels,
- out_channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
-
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
- self.enc = modules.WN(
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=gin_channels,
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, g=None):
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.pre(x) * x_mask
- x = self.enc(x, x_mask, g=g)
- stats = self.proj(x) * x_mask
- m, logs = torch.split(stats, self.out_channels, dim=1)
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
- return z, m, logs, x_mask
-
- def remove_weight_norm(self):
- self.enc.remove_weight_norm()
-
-
-class Generator(torch.nn.Module):
- def __init__(
- self,
- initial_channel,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=0,
- ):
- super(Generator, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
- self.conv_pre = Conv1d(
- initial_channel, upsample_initial_channel, 7, 1, padding=3
- )
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- self.ups.append(
- weight_norm(
- ConvTranspose1d(
- upsample_initial_channel // (2**i),
- upsample_initial_channel // (2 ** (i + 1)),
- k,
- u,
- padding=(k - u) // 2,
- )
- )
- )
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel // (2 ** (i + 1))
- for j, (k, d) in enumerate(
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
- ):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- def forward(self, x, g=None):
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i * self.num_kernels + j](x)
- else:
- xs += self.resblocks[i * self.num_kernels + j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
-
- return x
-
- def remove_weight_norm(self):
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-class SineGen(torch.nn.Module):
- """Definition of sine generator
- SineGen(samp_rate, harmonic_num = 0,
- sine_amp = 0.1, noise_std = 0.003,
- voiced_threshold = 0,
- flag_for_pulse=False)
- samp_rate: sampling rate in Hz
- harmonic_num: number of harmonic overtones (default 0)
- sine_amp: amplitude of sine-wavefrom (default 0.1)
- noise_std: std of Gaussian noise (default 0.003)
- voiced_thoreshold: F0 threshold for U/V classification (default 0)
- flag_for_pulse: this SinGen is used inside PulseGen (default False)
- Note: when flag_for_pulse is True, the first time step of a voiced
- segment is always sin(np.pi) or cos(0)
- """
-
- def __init__(
- self,
- samp_rate,
- harmonic_num=0,
- sine_amp=0.1,
- noise_std=0.003,
- voiced_threshold=0,
- flag_for_pulse=False,
- ):
- super(SineGen, self).__init__()
- self.sine_amp = sine_amp
- self.noise_std = noise_std
- self.harmonic_num = harmonic_num
- self.dim = self.harmonic_num + 1
- self.sampling_rate = samp_rate
- self.voiced_threshold = voiced_threshold
-
- def _f02uv(self, f0):
- # generate uv signal
- uv = torch.ones_like(f0)
- uv = uv * (f0 > self.voiced_threshold)
- return uv
-
- def forward(self, f0, upp):
- """sine_tensor, uv = forward(f0)
- input F0: tensor(batchsize=1, length, dim=1)
- f0 for unvoiced steps should be 0
- output sine_tensor: tensor(batchsize=1, length, dim)
- output uv: tensor(batchsize=1, length, 1)
- """
- with torch.no_grad():
- f0 = f0[:, None].transpose(1, 2)
- f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
- # fundamental component
- f0_buf[:, :, 0] = f0[:, :, 0]
- for idx in np.arange(self.harmonic_num):
- f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
- idx + 2
- ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
- rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
- rand_ini = torch.rand(
- f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
- )
- rand_ini[:, 0] = 0
- rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
- tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
- tmp_over_one *= upp
- tmp_over_one = F.interpolate(
- tmp_over_one.transpose(2, 1),
- scale_factor=upp,
- mode="linear",
- align_corners=True,
- ).transpose(2, 1)
- rad_values = F.interpolate(
- rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
- ).transpose(
- 2, 1
- ) #######
- tmp_over_one %= 1
- tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
- cumsum_shift = torch.zeros_like(rad_values)
- cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
- sine_waves = torch.sin(
- torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
- )
- sine_waves = sine_waves * self.sine_amp
- uv = self._f02uv(f0)
- uv = F.interpolate(
- uv.transpose(2, 1), scale_factor=upp, mode="nearest"
- ).transpose(2, 1)
- noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
- noise = noise_amp * torch.randn_like(sine_waves)
- sine_waves = sine_waves * uv + noise
- return sine_waves, uv, noise
-
-
-class SourceModuleHnNSF(torch.nn.Module):
- """SourceModule for hn-nsf
- SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
- add_noise_std=0.003, voiced_threshod=0)
- sampling_rate: sampling_rate in Hz
- harmonic_num: number of harmonic above F0 (default: 0)
- sine_amp: amplitude of sine source signal (default: 0.1)
- add_noise_std: std of additive Gaussian noise (default: 0.003)
- note that amplitude of noise in unvoiced is decided
- by sine_amp
- voiced_threshold: threhold to set U/V given F0 (default: 0)
- Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
- F0_sampled (batchsize, length, 1)
- Sine_source (batchsize, length, 1)
- noise_source (batchsize, length 1)
- uv (batchsize, length, 1)
- """
-
- def __init__(
- self,
- sampling_rate,
- harmonic_num=0,
- sine_amp=0.1,
- add_noise_std=0.003,
- voiced_threshod=0,
- is_half=True,
- ):
- super(SourceModuleHnNSF, self).__init__()
-
- self.sine_amp = sine_amp
- self.noise_std = add_noise_std
- self.is_half = is_half
- # to produce sine waveforms
- self.l_sin_gen = SineGen(
- sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
- )
-
- # to merge source harmonics into a single excitation
- self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
- self.l_tanh = torch.nn.Tanh()
-
- def forward(self, x, upp=None):
- sine_wavs, uv, _ = self.l_sin_gen(x, upp)
- if self.is_half:
- sine_wavs = sine_wavs.half()
- sine_merge = self.l_tanh(self.l_linear(sine_wavs))
- return sine_merge, None, None # noise, uv
-
-
-class GeneratorNSF(torch.nn.Module):
- def __init__(
- self,
- initial_channel,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels,
- sr,
- is_half=False,
- ):
- super(GeneratorNSF, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
-
- self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
- self.m_source = SourceModuleHnNSF(
- sampling_rate=sr, harmonic_num=0, is_half=is_half
- )
- self.noise_convs = nn.ModuleList()
- self.conv_pre = Conv1d(
- initial_channel, upsample_initial_channel, 7, 1, padding=3
- )
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- c_cur = upsample_initial_channel // (2 ** (i + 1))
- self.ups.append(
- weight_norm(
- ConvTranspose1d(
- upsample_initial_channel // (2**i),
- upsample_initial_channel // (2 ** (i + 1)),
- k,
- u,
- padding=(k - u) // 2,
- )
- )
- )
- if i + 1 < len(upsample_rates):
- stride_f0 = np.prod(upsample_rates[i + 1 :])
- self.noise_convs.append(
- Conv1d(
- 1,
- c_cur,
- kernel_size=stride_f0 * 2,
- stride=stride_f0,
- padding=stride_f0 // 2,
- )
- )
- else:
- self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel // (2 ** (i + 1))
- for j, (k, d) in enumerate(
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
- ):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- self.upp = np.prod(upsample_rates)
-
- def forward(self, x, f0, g=None):
- har_source, noi_source, uv = self.m_source(f0, self.upp)
- har_source = har_source.transpose(1, 2)
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- x_source = self.noise_convs[i](har_source)
- x = x + x_source
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i * self.num_kernels + j](x)
- else:
- xs += self.resblocks[i * self.num_kernels + j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
- return x
-
- def remove_weight_norm(self):
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-sr2sr = {
- "32k": 32000,
- "40k": 40000,
- "48k": 48000,
-}
-
-
-class SynthesizerTrnMsNSFsidM(nn.Module):
- def __init__(
- self,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- spk_embed_dim,
- gin_channels,
- sr,
- version,
- **kwargs
- ):
- super().__init__()
- if type(sr) == type("strr"):
- sr = sr2sr[sr]
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.gin_channels = gin_channels
- # self.hop_length = hop_length#
- self.spk_embed_dim = spk_embed_dim
- if version == "v1":
- self.enc_p = TextEncoder256(
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- )
- else:
- self.enc_p = TextEncoder768(
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- )
- self.dec = GeneratorNSF(
- inter_channels,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=gin_channels,
- sr=sr,
- is_half=kwargs["is_half"],
- )
- self.enc_q = PosteriorEncoder(
- spec_channels,
- inter_channels,
- hidden_channels,
- 5,
- 1,
- 16,
- gin_channels=gin_channels,
- )
- self.flow = ResidualCouplingBlock(
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
- )
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
- self.speaker_map = None
- logger.debug(
- "gin_channels: "
- + gin_channels
- + ", self.spk_embed_dim: "
- + self.spk_embed_dim
- )
-
- def remove_weight_norm(self):
- self.dec.remove_weight_norm()
- self.flow.remove_weight_norm()
- self.enc_q.remove_weight_norm()
-
- def construct_spkmixmap(self, n_speaker):
- self.speaker_map = torch.zeros((n_speaker, 1, 1, self.gin_channels))
- for i in range(n_speaker):
- self.speaker_map[i] = self.emb_g(torch.LongTensor([[i]]))
- self.speaker_map = self.speaker_map.unsqueeze(0)
-
- def forward(self, phone, phone_lengths, pitch, nsff0, g, rnd, max_len=None):
- if self.speaker_map is not None: # [N, S] * [S, B, 1, H]
- g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1)) # [N, S, B, 1, 1]
- g = g * self.speaker_map # [N, S, B, 1, H]
- g = torch.sum(g, dim=1) # [N, 1, B, 1, H]
- g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N]
- else:
- g = g.unsqueeze(0)
- g = self.emb_g(g).transpose(1, 2)
-
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
- z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask
- z = self.flow(z_p, x_mask, g=g, reverse=True)
- o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
- return o
-
-
-class MultiPeriodDiscriminator(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminator, self).__init__()
- periods = [2, 3, 5, 7, 11, 17]
- # periods = [3, 5, 7, 11, 17, 23, 37]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
- ]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = [] #
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- # for j in range(len(fmap_r)):
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-class MultiPeriodDiscriminatorV2(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminatorV2, self).__init__()
- # periods = [2, 3, 5, 7, 11, 17]
- periods = [2, 3, 5, 7, 11, 17, 23, 37]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
- ]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = [] #
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- # for j in range(len(fmap_r)):
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-class DiscriminatorS(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(DiscriminatorS, self).__init__()
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList(
- [
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
- ]
- )
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
-
- def forward(self, x):
- fmap = []
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class DiscriminatorP(torch.nn.Module):
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
- super(DiscriminatorP, self).__init__()
- self.period = period
- self.use_spectral_norm = use_spectral_norm
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList(
- [
- norm_f(
- Conv2d(
- 1,
- 32,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 32,
- 128,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 128,
- 512,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 512,
- 1024,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 1024,
- 1024,
- (kernel_size, 1),
- 1,
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- ]
- )
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
-
- def forward(self, x):
- fmap = []
-
- # 1d to 2d
- b, c, t = x.shape
- if t % self.period != 0: # pad first
- n_pad = self.period - (t % self.period)
- x = F.pad(x, (0, n_pad), "reflect")
- t = t + n_pad
- x = x.view(b, c, t // self.period, self.period)
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
diff --git a/spaces/Marshalls/testmtd/models/ddc_model.py b/spaces/Marshalls/testmtd/models/ddc_model.py
deleted file mode 100644
index abe5992b40d42ffc0a1b21cbe8fd2237593ab373..0000000000000000000000000000000000000000
--- a/spaces/Marshalls/testmtd/models/ddc_model.py
+++ /dev/null
@@ -1,212 +0,0 @@
-from .base_model import BaseModel
-import torch.nn.functional as F
-from torch import nn
-import torch
-#from models import constants
-import numpy as np
-import os
-
-class DDCModel(nn.Module):
-#class DDCModel(BaseModel):
-
- def __init__(self, opt):
- super().__init__()
- #super().__init__(opt)
- self.opt = opt
- self.loss_names = ['ce', 'humaneness_reg', 'total']
- self.metric_names = ['accuracy']
- self.module_names = [''] # changed from 'model_names'
- self.schedulers = []
- self.net = DDCNet(opt)
- self.optimizers = [torch.optim.Adam([
- {'params': [param for name, param in self.net.named_parameters() if name[-4:] == 'bias'],
- 'lr': 2 * opt.learning_rate}, # bias parameters change quicker - no weight decay is applied
- {'params': [param for name, param in self.net.named_parameters() if name[-4:] != 'bias'],
- 'lr': opt.learning_rate, 'weight_decay': opt.weight_decay} # filter parameters have weight decay
- ])]
- self.loss_ce = None
- self.humaneness_reg = None
- self.save_dir=opt.checkpoints_dir+"/block_placement_ddc2"
- self.device="cpu"
-
- def name(self):
- return "DDCNet"
-
- def load_networks(self, epoch):
- for name in self.module_names:
- if isinstance(name, str):
- load_filename = '%s_net_%s.pth' % (epoch, name)
- load_path = os.path.join(self.save_dir, load_filename)
- net = getattr(self, 'net' + name)
- if isinstance(net, torch.nn.DataParallel):
- net = net.module
- print('loading the model from %s' % load_path)
- # if you are using PyTorch newer than 0.4 (e.g., built from
- # GitHub source), you can remove str() on self.device
- state_dict = torch.load(load_path, map_location=str(self.device))
- if hasattr(state_dict, '_metadata'):
- del state_dict._metadata
-
- # patch InstanceNorm checkpoints prior to 0.4
- #for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop
- # self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
- # if not self.opt.gpu_ids:
- # state_dict = {key[6:]: value for key, value in
- # state_dict.items()} # remove data_parallel's "module."
- net.load_state_dict(state_dict)
-
- @staticmethod
- def modify_commandline_options(parser, is_train):
- # parser.add_argument('--num_classes', type=int, default=20)
- # parser.add_argument('--output_channels', type=int, default=(4*3))
- # parser.add_argument('--kernel_size', type=int, default=2)
- # parser.add_argument('--bias', action='store_false')
- parser.add_argument('--entropy_loss_coeff', type=float, default=0.0)
- parser.add_argument('--humaneness_reg_coeff', type=float, default=0.0)
- parser.add_argument('--hidden_dim', type=int, default=512)
- parser.add_argument('--num_classes', type=int, default=2)
- parser.add_argument('--dropout', type=float, default=0.0)
- return parser
-
- def set_input(self, data):
- # move multiple samples of the same song to the second dimension and the reshape to batch dimension
- input_ = data['input']
- target_ = data['target']
- input_shape = input_.shape
- target_shape = target_.shape
- # 0 batch dimension, 1 window dimension, 2 context time dimension, 3 frequency dimension, 4 mel_window_size dimension, 5 time dimension
- self.input = input_.reshape((input_shape[0]*input_shape[1], input_shape[2], input_shape[3], input_shape[4], input_shape[5])).to(self.device)
- self.input = self.input.permute(0,4,1,2,3) # batch/window x time x temporal_context x frequency_features x mel_window_sizes
- #we collapse all the dimensions of target_ because that is the same way the output of the network is being processed for the cross entropy calculation (see self.forward)
- # here, 0 is the batch dimension, 1 is the window index, 2 is the time dimension, 3 is the output channel dimension
- self.target = target_.reshape((target_shape[0]*target_shape[1]*target_shape[2]*target_shape[3])).to(self.device)
-
- def forward(self):
- self.output = self.net.forward(self.input)
- x = self.output
- [n, l , classes] = x.size()
- x = x.view(n * l, classes)
-
- # print(x)
- self.loss_ce = F.cross_entropy(x, self.target)
- if self.opt.entropy_loss_coeff > 0:
- S = F.softmax(x, dim=1) * F.log_softmax(x, dim=1)
- S = -1.0 * S.mean()
- self.loss_ce += self.opt.entropy_loss_coeff * S
- self.metric_accuracy = (torch.argmax(x,1) == self.target).sum().float()/len(self.target)
-
- #TODO: implement humaneness_reg maybe
- # problem is we don't have past notes available in input, so need to do that differently
- # just use output I guess :P
- # step_size = self.opt.step_size
- # humaneness_delta = constants.HUMAN_DELTA
- # window_size = int(humaneness_delta/step_size)
- #
- # receptive_field = self.net.module.receptive_field
- # notes = (torch.argmax(input[:,-5:,receptive_field//2-(window_size):receptive_field//2],1)==4).float()
- # distance_factor = torch.tensor(np.exp(-2*np.arange(window_size,0,-1)/window_size)).float().cuda()
- # if self.opt.entropy_loss_coeff > 0:
- # weights = torch.tensordot(notes,distance_factor,dims=1)
- # humaneness_reg = F.cross_entropy(x,torch.zeros(weights.shape).long().cuda(), reduction='none')
- # humaneness_reg = torch.dot(humaneness_reg, weights)
- # self.loss_humaneness_reg = humaneness_reg
- # # self.loss_humaneness_reg = 0
- # self.loss_total = self.loss_ce + self.opt.humaneness_reg_coeff * self.loss_humaneness_reg
- # else:
- # self.loss_humaneness_reg = 0
- # self.loss_total = self.loss_ce
- self.loss_humaneness_reg = 0
- self.loss_total = self.loss_ce
-
- def backward(self):
- self.optimizers[0].zero_grad()
- self.loss_total.backward()
- self.optimizers[0].step()
-
- def optimize_parameters(self):
- self.set_requires_grad(self.net, requires_grad=True)
- self.forward()
- self.backward()
- for scheduler in self.schedulers:
- # step for schedulers that update after each iteration
- try:
- scheduler.batch_step()
- except AttributeError:
- pass
-
- def prepare_input(self,y):
- # dimensions of y are: features x window_sizes x time
- receptive_field = 1
- input_length = y.shape[-1]
- y = np.concatenate((np.zeros((y.shape[0],y.shape[1],receptive_field+self.opt.time_shifts//2)),y),2)
- # we also pad at the end to allow generation to be of the same length of song, by padding an amount corresponding to time_shifts
- y = np.concatenate((y,np.zeros((y.shape[0],y.shape[1],self.opt.time_shifts//2))),2)
- input_windowss = []
- time_shifts = self.opt.time_shifts - 1
- # loop that gets the input features for each of the windows, shifted by `ii`, and saves them in `input_windowss`
- for ii in range(-time_shifts//2, time_shifts//2+1):
- input_windows = [y[:,:,self.opt.time_shifts//2+ii:self.opt.time_shifts//2+ii+input_length]]
- input_windows = torch.tensor(input_windows)
- input_windows = (input_windows - input_windows.mean())/torch.abs(input_windows).max()
- # input_windows = (input_windows.permute(3,0,1,2) - input_windows.mean(-1)).permute(1,2,3,0)
- input_windowss.append(input_windows.float())
- input = torch.stack(input_windowss,dim=1).float()
- input_shape = input.shape
- input = input.to(self.device)
- input = input.permute(0,4,1,2,3) # batch/window x time x temporal_context x frequency_features x mel_window_sizes
- return input
-
- def generate(self,y):
- input = self.prepare_input(y)
- if self.opt.cuda:
- with torch.no_grad():
- self.net.module.eval()
- return F.softmax(self.net.module.forward(input)[0],2)
- else:
- with torch.no_grad():
- self.net.eval()
- return F.softmax(self.net.forward(input)[0],2)
-
- def generate_features(self,y):
- input = self.prepare_input(y)
- if self.opt.cuda:
- with torch.no_grad():
- self.net.module.eval()
- logits, h = self.net.module.forward(input)
- return h, F.softmax(logits,2)
- else:
- with torch.no_grad():
- self.net.eval()
- logits, h = self.net.forward(input)
- return h, F.softmax(logits,2)
-
-
-class DDCNet(nn.Module):
- def __init__(self,opt):
- super(DDCNet, self).__init__()
- self.conv1 = nn.Conv2d(3, 20, (7,3)) #assumes CHW format
- # self.pool = nn.MaxPool1d(3, 3)
- self.pool = nn.MaxPool2d((1,3), (1,3))
- self.conv2 = nn.Conv2d(20, 20, 3)
- # self.fc1 = nn.Linear(20 * 9, 256)
- # self.fc2 = nn.Linear(256, 128)
- self.lstm = nn.LSTM(input_size=20*7*8, hidden_size=opt.hidden_dim, num_layers=2, batch_first=True) # Define the LSTM
- self.hidden_to_state = nn.Linear(opt.hidden_dim,
- opt.num_classes)
-
- def forward(self, x):
- # batch/window x time x temporal_context x frequency_features x mel_window_sizes
- # print(x.shape)
- [N,L,deltaT,dim,winsizes] = x.shape
- x = x.reshape(N*L,deltaT,dim,winsizes)
- x = x.permute(0,3,1,2)
- x = self.pool(F.relu(self.conv1(x)))
- x = self.pool(F.relu(self.conv2(x)))
- # print(x.shape)
- x = x.reshape(N,L,20*7*8) # batch x time x CNN_features
- # x = F.relu(self.fc1(x))
- # x = F.relu(self.fc2(x))
- lstm_out, _ = self.lstm(x)
- logits = self.hidden_to_state(lstm_out)
- # print(logits.shape)
- return logits, lstm_out
diff --git a/spaces/MetaWabbit/Auto-GPT/run_continuous.sh b/spaces/MetaWabbit/Auto-GPT/run_continuous.sh
deleted file mode 100644
index 1f4436c88503172c0578b15a8447ed8268502578..0000000000000000000000000000000000000000
--- a/spaces/MetaWabbit/Auto-GPT/run_continuous.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/bash
-
-./run.sh --continuous $@
diff --git a/spaces/MirageML/sjc/guided_diffusion/nn.py b/spaces/MirageML/sjc/guided_diffusion/nn.py
deleted file mode 100644
index a4cd59c2324b003626b8cf4c7581effd334908d3..0000000000000000000000000000000000000000
--- a/spaces/MirageML/sjc/guided_diffusion/nn.py
+++ /dev/null
@@ -1,170 +0,0 @@
-"""
-Various utilities for neural networks.
-"""
-
-import math
-
-import torch as th
-import torch.nn as nn
-
-
-# PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
-class SiLU(nn.Module):
- def forward(self, x):
- return x * th.sigmoid(x)
-
-
-class GroupNorm32(nn.GroupNorm):
- def forward(self, x):
- return super().forward(x.float()).type(x.dtype)
-
-
-def conv_nd(dims, *args, **kwargs):
- """
- Create a 1D, 2D, or 3D convolution module.
- """
- if dims == 1:
- return nn.Conv1d(*args, **kwargs)
- elif dims == 2:
- return nn.Conv2d(*args, **kwargs)
- elif dims == 3:
- return nn.Conv3d(*args, **kwargs)
- raise ValueError(f"unsupported dimensions: {dims}")
-
-
-def linear(*args, **kwargs):
- """
- Create a linear module.
- """
- return nn.Linear(*args, **kwargs)
-
-
-def avg_pool_nd(dims, *args, **kwargs):
- """
- Create a 1D, 2D, or 3D average pooling module.
- """
- if dims == 1:
- return nn.AvgPool1d(*args, **kwargs)
- elif dims == 2:
- return nn.AvgPool2d(*args, **kwargs)
- elif dims == 3:
- return nn.AvgPool3d(*args, **kwargs)
- raise ValueError(f"unsupported dimensions: {dims}")
-
-
-def update_ema(target_params, source_params, rate=0.99):
- """
- Update target parameters to be closer to those of source parameters using
- an exponential moving average.
-
- :param target_params: the target parameter sequence.
- :param source_params: the source parameter sequence.
- :param rate: the EMA rate (closer to 1 means slower).
- """
- for targ, src in zip(target_params, source_params):
- targ.detach().mul_(rate).add_(src, alpha=1 - rate)
-
-
-def zero_module(module):
- """
- Zero out the parameters of a module and return it.
- """
- for p in module.parameters():
- p.detach().zero_()
- return module
-
-
-def scale_module(module, scale):
- """
- Scale the parameters of a module and return it.
- """
- for p in module.parameters():
- p.detach().mul_(scale)
- return module
-
-
-def mean_flat(tensor):
- """
- Take the mean over all non-batch dimensions.
- """
- return tensor.mean(dim=list(range(1, len(tensor.shape))))
-
-
-def normalization(channels):
- """
- Make a standard normalization layer.
-
- :param channels: number of input channels.
- :return: an nn.Module for normalization.
- """
- return GroupNorm32(32, channels)
-
-
-def timestep_embedding(timesteps, dim, max_period=10000):
- """
- Create sinusoidal timestep embeddings.
-
- :param timesteps: a 1-D Tensor of N indices, one per batch element.
- These may be fractional.
- :param dim: the dimension of the output.
- :param max_period: controls the minimum frequency of the embeddings.
- :return: an [N x dim] Tensor of positional embeddings.
- """
- half = dim // 2
- freqs = th.exp(
- -math.log(max_period) * th.arange(start=0, end=half, dtype=th.float32) / half
- ).to(device=timesteps.device)
- args = timesteps[:, None].float() * freqs[None]
- embedding = th.cat([th.cos(args), th.sin(args)], dim=-1)
- if dim % 2:
- embedding = th.cat([embedding, th.zeros_like(embedding[:, :1])], dim=-1)
- return embedding
-
-
-def checkpoint(func, inputs, params, flag):
- """
- Evaluate a function without caching intermediate activations, allowing for
- reduced memory at the expense of extra compute in the backward pass.
-
- :param func: the function to evaluate.
- :param inputs: the argument sequence to pass to `func`.
- :param params: a sequence of parameters `func` depends on but does not
- explicitly take as arguments.
- :param flag: if False, disable gradient checkpointing.
- """
- if flag:
- args = tuple(inputs) + tuple(params)
- return CheckpointFunction.apply(func, len(inputs), *args)
- else:
- return func(*inputs)
-
-
-class CheckpointFunction(th.autograd.Function):
- @staticmethod
- def forward(ctx, run_function, length, *args):
- ctx.run_function = run_function
- ctx.input_tensors = list(args[:length])
- ctx.input_params = list(args[length:])
- with th.no_grad():
- output_tensors = ctx.run_function(*ctx.input_tensors)
- return output_tensors
-
- @staticmethod
- def backward(ctx, *output_grads):
- ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
- with th.enable_grad():
- # Fixes a bug where the first op in run_function modifies the
- # Tensor storage in place, which is not allowed for detach()'d
- # Tensors.
- shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
- output_tensors = ctx.run_function(*shallow_copies)
- input_grads = th.autograd.grad(
- output_tensors,
- ctx.input_tensors + ctx.input_params,
- output_grads,
- allow_unused=True,
- )
- del ctx.input_tensors
- del ctx.input_params
- del output_tensors
- return (None, None) + input_grads
diff --git a/spaces/MirageML/sjc/sd1/ldm/modules/image_degradation/bsrgan_light.py b/spaces/MirageML/sjc/sd1/ldm/modules/image_degradation/bsrgan_light.py
deleted file mode 100644
index 9e1f823996bf559e9b015ea9aa2b3cd38dd13af1..0000000000000000000000000000000000000000
--- a/spaces/MirageML/sjc/sd1/ldm/modules/image_degradation/bsrgan_light.py
+++ /dev/null
@@ -1,650 +0,0 @@
-# -*- coding: utf-8 -*-
-import numpy as np
-import cv2
-import torch
-
-from functools import partial
-import random
-from scipy import ndimage
-import scipy
-import scipy.stats as ss
-from scipy.interpolate import interp2d
-from scipy.linalg import orth
-import albumentations
-
-import ldm.modules.image_degradation.utils_image as util
-
-"""
-# --------------------------------------------
-# Super-Resolution
-# --------------------------------------------
-#
-# Kai Zhang (cskaizhang@gmail.com)
-# https://github.com/cszn
-# From 2019/03--2021/08
-# --------------------------------------------
-"""
-
-
-def modcrop_np(img, sf):
- '''
- Args:
- img: numpy image, WxH or WxHxC
- sf: scale factor
- Return:
- cropped image
- '''
- w, h = img.shape[:2]
- im = np.copy(img)
- return im[:w - w % sf, :h - h % sf, ...]
-
-
-"""
-# --------------------------------------------
-# anisotropic Gaussian kernels
-# --------------------------------------------
-"""
-
-
-def analytic_kernel(k):
- """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)"""
- k_size = k.shape[0]
- # Calculate the big kernels size
- big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2))
- # Loop over the small kernel to fill the big one
- for r in range(k_size):
- for c in range(k_size):
- big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k
- # Crop the edges of the big kernel to ignore very small values and increase run time of SR
- crop = k_size // 2
- cropped_big_k = big_k[crop:-crop, crop:-crop]
- # Normalize to 1
- return cropped_big_k / cropped_big_k.sum()
-
-
-def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
- """ generate an anisotropic Gaussian kernel
- Args:
- ksize : e.g., 15, kernel size
- theta : [0, pi], rotation angle range
- l1 : [0.1,50], scaling of eigenvalues
- l2 : [0.1,l1], scaling of eigenvalues
- If l1 = l2, will get an isotropic Gaussian kernel.
- Returns:
- k : kernel
- """
-
- v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.]))
- V = np.array([[v[0], v[1]], [v[1], -v[0]]])
- D = np.array([[l1, 0], [0, l2]])
- Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))
- k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)
-
- return k
-
-
-def gm_blur_kernel(mean, cov, size=15):
- center = size / 2.0 + 0.5
- k = np.zeros([size, size])
- for y in range(size):
- for x in range(size):
- cy = y - center + 1
- cx = x - center + 1
- k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov)
-
- k = k / np.sum(k)
- return k
-
-
-def shift_pixel(x, sf, upper_left=True):
- """shift pixel for super-resolution with different scale factors
- Args:
- x: WxHxC or WxH
- sf: scale factor
- upper_left: shift direction
- """
- h, w = x.shape[:2]
- shift = (sf - 1) * 0.5
- xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0)
- if upper_left:
- x1 = xv + shift
- y1 = yv + shift
- else:
- x1 = xv - shift
- y1 = yv - shift
-
- x1 = np.clip(x1, 0, w - 1)
- y1 = np.clip(y1, 0, h - 1)
-
- if x.ndim == 2:
- x = interp2d(xv, yv, x)(x1, y1)
- if x.ndim == 3:
- for i in range(x.shape[-1]):
- x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1)
-
- return x
-
-
-def blur(x, k):
- '''
- x: image, NxcxHxW
- k: kernel, Nx1xhxw
- '''
- n, c = x.shape[:2]
- p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2
- x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate')
- k = k.repeat(1, c, 1, 1)
- k = k.view(-1, 1, k.shape[2], k.shape[3])
- x = x.view(1, -1, x.shape[2], x.shape[3])
- x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c)
- x = x.view(n, c, x.shape[2], x.shape[3])
-
- return x
-
-
-def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0):
- """"
- # modified version of https://github.com/assafshocher/BlindSR_dataset_generator
- # Kai Zhang
- # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var
- # max_var = 2.5 * sf
- """
- # Set random eigen-vals (lambdas) and angle (theta) for COV matrix
- lambda_1 = min_var + np.random.rand() * (max_var - min_var)
- lambda_2 = min_var + np.random.rand() * (max_var - min_var)
- theta = np.random.rand() * np.pi # random theta
- noise = -noise_level + np.random.rand(*k_size) * noise_level * 2
-
- # Set COV matrix using Lambdas and Theta
- LAMBDA = np.diag([lambda_1, lambda_2])
- Q = np.array([[np.cos(theta), -np.sin(theta)],
- [np.sin(theta), np.cos(theta)]])
- SIGMA = Q @ LAMBDA @ Q.T
- INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :]
-
- # Set expectation position (shifting kernel for aligned image)
- MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2)
- MU = MU[None, None, :, None]
-
- # Create meshgrid for Gaussian
- [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1]))
- Z = np.stack([X, Y], 2)[:, :, :, None]
-
- # Calcualte Gaussian for every pixel of the kernel
- ZZ = Z - MU
- ZZ_t = ZZ.transpose(0, 1, 3, 2)
- raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise)
-
- # shift the kernel so it will be centered
- # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor)
-
- # Normalize the kernel and return
- # kernel = raw_kernel_centered / np.sum(raw_kernel_centered)
- kernel = raw_kernel / np.sum(raw_kernel)
- return kernel
-
-
-def fspecial_gaussian(hsize, sigma):
- hsize = [hsize, hsize]
- siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0]
- std = sigma
- [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1))
- arg = -(x * x + y * y) / (2 * std * std)
- h = np.exp(arg)
- h[h < scipy.finfo(float).eps * h.max()] = 0
- sumh = h.sum()
- if sumh != 0:
- h = h / sumh
- return h
-
-
-def fspecial_laplacian(alpha):
- alpha = max([0, min([alpha, 1])])
- h1 = alpha / (alpha + 1)
- h2 = (1 - alpha) / (alpha + 1)
- h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]]
- h = np.array(h)
- return h
-
-
-def fspecial(filter_type, *args, **kwargs):
- '''
- python code from:
- https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
- '''
- if filter_type == 'gaussian':
- return fspecial_gaussian(*args, **kwargs)
- if filter_type == 'laplacian':
- return fspecial_laplacian(*args, **kwargs)
-
-
-"""
-# --------------------------------------------
-# degradation models
-# --------------------------------------------
-"""
-
-
-def bicubic_degradation(x, sf=3):
- '''
- Args:
- x: HxWxC image, [0, 1]
- sf: down-scale factor
- Return:
- bicubicly downsampled LR image
- '''
- x = util.imresize_np(x, scale=1 / sf)
- return x
-
-
-def srmd_degradation(x, k, sf=3):
- ''' blur + bicubic downsampling
- Args:
- x: HxWxC image, [0, 1]
- k: hxw, double
- sf: down-scale factor
- Return:
- downsampled LR image
- Reference:
- @inproceedings{zhang2018learning,
- title={Learning a single convolutional super-resolution network for multiple degradations},
- author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
- booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
- pages={3262--3271},
- year={2018}
- }
- '''
- x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror'
- x = bicubic_degradation(x, sf=sf)
- return x
-
-
-def dpsr_degradation(x, k, sf=3):
- ''' bicubic downsampling + blur
- Args:
- x: HxWxC image, [0, 1]
- k: hxw, double
- sf: down-scale factor
- Return:
- downsampled LR image
- Reference:
- @inproceedings{zhang2019deep,
- title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels},
- author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
- booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
- pages={1671--1681},
- year={2019}
- }
- '''
- x = bicubic_degradation(x, sf=sf)
- x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
- return x
-
-
-def classical_degradation(x, k, sf=3):
- ''' blur + downsampling
- Args:
- x: HxWxC image, [0, 1]/[0, 255]
- k: hxw, double
- sf: down-scale factor
- Return:
- downsampled LR image
- '''
- x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
- # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2))
- st = 0
- return x[st::sf, st::sf, ...]
-
-
-def add_sharpening(img, weight=0.5, radius=50, threshold=10):
- """USM sharpening. borrowed from real-ESRGAN
- Input image: I; Blurry image: B.
- 1. K = I + weight * (I - B)
- 2. Mask = 1 if abs(I - B) > threshold, else: 0
- 3. Blur mask:
- 4. Out = Mask * K + (1 - Mask) * I
- Args:
- img (Numpy array): Input image, HWC, BGR; float32, [0, 1].
- weight (float): Sharp weight. Default: 1.
- radius (float): Kernel size of Gaussian blur. Default: 50.
- threshold (int):
- """
- if radius % 2 == 0:
- radius += 1
- blur = cv2.GaussianBlur(img, (radius, radius), 0)
- residual = img - blur
- mask = np.abs(residual) * 255 > threshold
- mask = mask.astype('float32')
- soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0)
-
- K = img + weight * residual
- K = np.clip(K, 0, 1)
- return soft_mask * K + (1 - soft_mask) * img
-
-
-def add_blur(img, sf=4):
- wd2 = 4.0 + sf
- wd = 2.0 + 0.2 * sf
-
- wd2 = wd2/4
- wd = wd/4
-
- if random.random() < 0.5:
- l1 = wd2 * random.random()
- l2 = wd2 * random.random()
- k = anisotropic_Gaussian(ksize=random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2)
- else:
- k = fspecial('gaussian', random.randint(2, 4) + 3, wd * random.random())
- img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror')
-
- return img
-
-
-def add_resize(img, sf=4):
- rnum = np.random.rand()
- if rnum > 0.8: # up
- sf1 = random.uniform(1, 2)
- elif rnum < 0.7: # down
- sf1 = random.uniform(0.5 / sf, 1)
- else:
- sf1 = 1.0
- img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3]))
- img = np.clip(img, 0.0, 1.0)
-
- return img
-
-
-# def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
-# noise_level = random.randint(noise_level1, noise_level2)
-# rnum = np.random.rand()
-# if rnum > 0.6: # add color Gaussian noise
-# img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
-# elif rnum < 0.4: # add grayscale Gaussian noise
-# img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
-# else: # add noise
-# L = noise_level2 / 255.
-# D = np.diag(np.random.rand(3))
-# U = orth(np.random.rand(3, 3))
-# conv = np.dot(np.dot(np.transpose(U), D), U)
-# img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
-# img = np.clip(img, 0.0, 1.0)
-# return img
-
-def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
- noise_level = random.randint(noise_level1, noise_level2)
- rnum = np.random.rand()
- if rnum > 0.6: # add color Gaussian noise
- img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
- elif rnum < 0.4: # add grayscale Gaussian noise
- img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
- else: # add noise
- L = noise_level2 / 255.
- D = np.diag(np.random.rand(3))
- U = orth(np.random.rand(3, 3))
- conv = np.dot(np.dot(np.transpose(U), D), U)
- img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
- img = np.clip(img, 0.0, 1.0)
- return img
-
-
-def add_speckle_noise(img, noise_level1=2, noise_level2=25):
- noise_level = random.randint(noise_level1, noise_level2)
- img = np.clip(img, 0.0, 1.0)
- rnum = random.random()
- if rnum > 0.6:
- img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
- elif rnum < 0.4:
- img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
- else:
- L = noise_level2 / 255.
- D = np.diag(np.random.rand(3))
- U = orth(np.random.rand(3, 3))
- conv = np.dot(np.dot(np.transpose(U), D), U)
- img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
- img = np.clip(img, 0.0, 1.0)
- return img
-
-
-def add_Poisson_noise(img):
- img = np.clip((img * 255.0).round(), 0, 255) / 255.
- vals = 10 ** (2 * random.random() + 2.0) # [2, 4]
- if random.random() < 0.5:
- img = np.random.poisson(img * vals).astype(np.float32) / vals
- else:
- img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114])
- img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255.
- noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray
- img += noise_gray[:, :, np.newaxis]
- img = np.clip(img, 0.0, 1.0)
- return img
-
-
-def add_JPEG_noise(img):
- quality_factor = random.randint(80, 95)
- img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR)
- result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor])
- img = cv2.imdecode(encimg, 1)
- img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB)
- return img
-
-
-def random_crop(lq, hq, sf=4, lq_patchsize=64):
- h, w = lq.shape[:2]
- rnd_h = random.randint(0, h - lq_patchsize)
- rnd_w = random.randint(0, w - lq_patchsize)
- lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :]
-
- rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf)
- hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :]
- return lq, hq
-
-
-def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
- """
- This is the degradation model of BSRGAN from the paper
- "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
- ----------
- img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
- sf: scale factor
- isp_model: camera ISP model
- Returns
- -------
- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
- hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
- """
- isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
- sf_ori = sf
-
- h1, w1 = img.shape[:2]
- img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
- h, w = img.shape[:2]
-
- if h < lq_patchsize * sf or w < lq_patchsize * sf:
- raise ValueError(f'img size ({h1}X{w1}) is too small!')
-
- hq = img.copy()
-
- if sf == 4 and random.random() < scale2_prob: # downsample1
- if np.random.rand() < 0.5:
- img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])),
- interpolation=random.choice([1, 2, 3]))
- else:
- img = util.imresize_np(img, 1 / 2, True)
- img = np.clip(img, 0.0, 1.0)
- sf = 2
-
- shuffle_order = random.sample(range(7), 7)
- idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
- if idx1 > idx2: # keep downsample3 last
- shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
-
- for i in shuffle_order:
-
- if i == 0:
- img = add_blur(img, sf=sf)
-
- elif i == 1:
- img = add_blur(img, sf=sf)
-
- elif i == 2:
- a, b = img.shape[1], img.shape[0]
- # downsample2
- if random.random() < 0.75:
- sf1 = random.uniform(1, 2 * sf)
- img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])),
- interpolation=random.choice([1, 2, 3]))
- else:
- k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
- k_shifted = shift_pixel(k, sf)
- k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
- img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror')
- img = img[0::sf, 0::sf, ...] # nearest downsampling
- img = np.clip(img, 0.0, 1.0)
-
- elif i == 3:
- # downsample3
- img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
- img = np.clip(img, 0.0, 1.0)
-
- elif i == 4:
- # add Gaussian noise
- img = add_Gaussian_noise(img, noise_level1=2, noise_level2=8)
-
- elif i == 5:
- # add JPEG noise
- if random.random() < jpeg_prob:
- img = add_JPEG_noise(img)
-
- elif i == 6:
- # add processed camera sensor noise
- if random.random() < isp_prob and isp_model is not None:
- with torch.no_grad():
- img, hq = isp_model.forward(img.copy(), hq)
-
- # add final JPEG compression noise
- img = add_JPEG_noise(img)
-
- # random crop
- img, hq = random_crop(img, hq, sf_ori, lq_patchsize)
-
- return img, hq
-
-
-# todo no isp_model?
-def degradation_bsrgan_variant(image, sf=4, isp_model=None):
- """
- This is the degradation model of BSRGAN from the paper
- "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
- ----------
- sf: scale factor
- isp_model: camera ISP model
- Returns
- -------
- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
- hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
- """
- image = util.uint2single(image)
- isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
- sf_ori = sf
-
- h1, w1 = image.shape[:2]
- image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
- h, w = image.shape[:2]
-
- hq = image.copy()
-
- if sf == 4 and random.random() < scale2_prob: # downsample1
- if np.random.rand() < 0.5:
- image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])),
- interpolation=random.choice([1, 2, 3]))
- else:
- image = util.imresize_np(image, 1 / 2, True)
- image = np.clip(image, 0.0, 1.0)
- sf = 2
-
- shuffle_order = random.sample(range(7), 7)
- idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
- if idx1 > idx2: # keep downsample3 last
- shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
-
- for i in shuffle_order:
-
- if i == 0:
- image = add_blur(image, sf=sf)
-
- # elif i == 1:
- # image = add_blur(image, sf=sf)
-
- if i == 0:
- pass
-
- elif i == 2:
- a, b = image.shape[1], image.shape[0]
- # downsample2
- if random.random() < 0.8:
- sf1 = random.uniform(1, 2 * sf)
- image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])),
- interpolation=random.choice([1, 2, 3]))
- else:
- k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
- k_shifted = shift_pixel(k, sf)
- k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
- image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror')
- image = image[0::sf, 0::sf, ...] # nearest downsampling
-
- image = np.clip(image, 0.0, 1.0)
-
- elif i == 3:
- # downsample3
- image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
- image = np.clip(image, 0.0, 1.0)
-
- elif i == 4:
- # add Gaussian noise
- image = add_Gaussian_noise(image, noise_level1=1, noise_level2=2)
-
- elif i == 5:
- # add JPEG noise
- if random.random() < jpeg_prob:
- image = add_JPEG_noise(image)
- #
- # elif i == 6:
- # # add processed camera sensor noise
- # if random.random() < isp_prob and isp_model is not None:
- # with torch.no_grad():
- # img, hq = isp_model.forward(img.copy(), hq)
-
- # add final JPEG compression noise
- image = add_JPEG_noise(image)
- image = util.single2uint(image)
- example = {"image": image}
- return example
-
-
-
-
-if __name__ == '__main__':
- print("hey")
- img = util.imread_uint('utils/test.png', 3)
- img = img[:448, :448]
- h = img.shape[0] // 4
- print("resizing to", h)
- sf = 4
- deg_fn = partial(degradation_bsrgan_variant, sf=sf)
- for i in range(20):
- print(i)
- img_hq = img
- img_lq = deg_fn(img)["image"]
- img_hq, img_lq = util.uint2single(img_hq), util.uint2single(img_lq)
- print(img_lq)
- img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img_hq)["image"]
- print(img_lq.shape)
- print("bicubic", img_lq_bicubic.shape)
- print(img_hq.shape)
- lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
- interpolation=0)
- lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic),
- (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
- interpolation=0)
- img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1)
- util.imsave(img_concat, str(i) + '.png')
diff --git a/spaces/Miuzarte/SUI-svc-4.0/inference/slicer.py b/spaces/Miuzarte/SUI-svc-4.0/inference/slicer.py
deleted file mode 100644
index b05840bcf6bdced0b6e2adbecb1a1dd5b3dee462..0000000000000000000000000000000000000000
--- a/spaces/Miuzarte/SUI-svc-4.0/inference/slicer.py
+++ /dev/null
@@ -1,142 +0,0 @@
-import librosa
-import torch
-import torchaudio
-
-
-class Slicer:
- def __init__(self,
- sr: int,
- threshold: float = -40.,
- min_length: int = 5000,
- min_interval: int = 300,
- hop_size: int = 20,
- max_sil_kept: int = 5000):
- if not min_length >= min_interval >= hop_size:
- raise ValueError('The following condition must be satisfied: min_length >= min_interval >= hop_size')
- if not max_sil_kept >= hop_size:
- raise ValueError('The following condition must be satisfied: max_sil_kept >= hop_size')
- min_interval = sr * min_interval / 1000
- self.threshold = 10 ** (threshold / 20.)
- self.hop_size = round(sr * hop_size / 1000)
- self.win_size = min(round(min_interval), 4 * self.hop_size)
- self.min_length = round(sr * min_length / 1000 / self.hop_size)
- self.min_interval = round(min_interval / self.hop_size)
- self.max_sil_kept = round(sr * max_sil_kept / 1000 / self.hop_size)
-
- def _apply_slice(self, waveform, begin, end):
- if len(waveform.shape) > 1:
- return waveform[:, begin * self.hop_size: min(waveform.shape[1], end * self.hop_size)]
- else:
- return waveform[begin * self.hop_size: min(waveform.shape[0], end * self.hop_size)]
-
- # @timeit
- def slice(self, waveform):
- if len(waveform.shape) > 1:
- samples = librosa.to_mono(waveform)
- else:
- samples = waveform
- if samples.shape[0] <= self.min_length:
- return {"0": {"slice": False, "split_time": f"0,{len(waveform)}"}}
- rms_list = librosa.feature.rms(y=samples, frame_length=self.win_size, hop_length=self.hop_size).squeeze(0)
- sil_tags = []
- silence_start = None
- clip_start = 0
- for i, rms in enumerate(rms_list):
- # Keep looping while frame is silent.
- if rms < self.threshold:
- # Record start of silent frames.
- if silence_start is None:
- silence_start = i
- continue
- # Keep looping while frame is not silent and silence start has not been recorded.
- if silence_start is None:
- continue
- # Clear recorded silence start if interval is not enough or clip is too short
- is_leading_silence = silence_start == 0 and i > self.max_sil_kept
- need_slice_middle = i - silence_start >= self.min_interval and i - clip_start >= self.min_length
- if not is_leading_silence and not need_slice_middle:
- silence_start = None
- continue
- # Need slicing. Record the range of silent frames to be removed.
- if i - silence_start <= self.max_sil_kept:
- pos = rms_list[silence_start: i + 1].argmin() + silence_start
- if silence_start == 0:
- sil_tags.append((0, pos))
- else:
- sil_tags.append((pos, pos))
- clip_start = pos
- elif i - silence_start <= self.max_sil_kept * 2:
- pos = rms_list[i - self.max_sil_kept: silence_start + self.max_sil_kept + 1].argmin()
- pos += i - self.max_sil_kept
- pos_l = rms_list[silence_start: silence_start + self.max_sil_kept + 1].argmin() + silence_start
- pos_r = rms_list[i - self.max_sil_kept: i + 1].argmin() + i - self.max_sil_kept
- if silence_start == 0:
- sil_tags.append((0, pos_r))
- clip_start = pos_r
- else:
- sil_tags.append((min(pos_l, pos), max(pos_r, pos)))
- clip_start = max(pos_r, pos)
- else:
- pos_l = rms_list[silence_start: silence_start + self.max_sil_kept + 1].argmin() + silence_start
- pos_r = rms_list[i - self.max_sil_kept: i + 1].argmin() + i - self.max_sil_kept
- if silence_start == 0:
- sil_tags.append((0, pos_r))
- else:
- sil_tags.append((pos_l, pos_r))
- clip_start = pos_r
- silence_start = None
- # Deal with trailing silence.
- total_frames = rms_list.shape[0]
- if silence_start is not None and total_frames - silence_start >= self.min_interval:
- silence_end = min(total_frames, silence_start + self.max_sil_kept)
- pos = rms_list[silence_start: silence_end + 1].argmin() + silence_start
- sil_tags.append((pos, total_frames + 1))
- # Apply and return slices.
- if len(sil_tags) == 0:
- return {"0": {"slice": False, "split_time": f"0,{len(waveform)}"}}
- else:
- chunks = []
- # 第一段静音并非从头开始,补上有声片段
- if sil_tags[0][0]:
- chunks.append(
- {"slice": False, "split_time": f"0,{min(waveform.shape[0], sil_tags[0][0] * self.hop_size)}"})
- for i in range(0, len(sil_tags)):
- # 标识有声片段(跳过第一段)
- if i:
- chunks.append({"slice": False,
- "split_time": f"{sil_tags[i - 1][1] * self.hop_size},{min(waveform.shape[0], sil_tags[i][0] * self.hop_size)}"})
- # 标识所有静音片段
- chunks.append({"slice": True,
- "split_time": f"{sil_tags[i][0] * self.hop_size},{min(waveform.shape[0], sil_tags[i][1] * self.hop_size)}"})
- # 最后一段静音并非结尾,补上结尾片段
- if sil_tags[-1][1] * self.hop_size < len(waveform):
- chunks.append({"slice": False, "split_time": f"{sil_tags[-1][1] * self.hop_size},{len(waveform)}"})
- chunk_dict = {}
- for i in range(len(chunks)):
- chunk_dict[str(i)] = chunks[i]
- return chunk_dict
-
-
-def cut(audio_path, db_thresh=-30, min_len=5000):
- audio, sr = librosa.load(audio_path, sr=None)
- slicer = Slicer(
- sr=sr,
- threshold=db_thresh,
- min_length=min_len
- )
- chunks = slicer.slice(audio)
- return chunks
-
-
-def chunks2audio(audio_path, chunks):
- chunks = dict(chunks)
- audio, sr = torchaudio.load(audio_path)
- if len(audio.shape) == 2 and audio.shape[1] >= 2:
- audio = torch.mean(audio, dim=0).unsqueeze(0)
- audio = audio.cpu().numpy()[0]
- result = []
- for k, v in chunks.items():
- tag = v["split_time"].split(",")
- if tag[0] != tag[1]:
- result.append((v["slice"], audio[int(tag[0]):int(tag[1])]))
- return result, sr
diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/decoders/aster_decoder.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/decoders/aster_decoder.py
deleted file mode 100644
index 83e249b08c00acc06a7a31a5b5e44ba70ff3b712..0000000000000000000000000000000000000000
--- a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/decoders/aster_decoder.py
+++ /dev/null
@@ -1,181 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from typing import Dict, Optional, Sequence, Tuple, Union
-
-import torch
-import torch.nn as nn
-
-from mmocr.models.common.dictionary import Dictionary
-from mmocr.registry import MODELS
-from mmocr.structures import TextRecogDataSample
-from .base import BaseDecoder
-
-
-@MODELS.register_module()
-class ASTERDecoder(BaseDecoder):
- """Implement attention decoder.
-
- Args:
- in_channels (int): Number of input channels.
- emb_dims (int): Dims of char embedding. Defaults to 512.
- attn_dims (int): Dims of attention. Both hidden states and features
- will be projected to this dims. Defaults to 512.
- hidden_size (int): Dims of hidden state for GRU. Defaults to 512.
- dictionary (dict or :obj:`Dictionary`): The config for `Dictionary` or
- the instance of `Dictionary`. Defaults to None.
- max_seq_len (int): Maximum output sequence length :math:`T`. Defaults
- to 25.
- module_loss (dict, optional): Config to build loss. Defaults to None.
- postprocessor (dict, optional): Config to build postprocessor.
- Defaults to None.
- init_cfg (dict or list[dict], optional): Initialization configs.
- Defaults to None.
- """
-
- def __init__(self,
- in_channels: int,
- emb_dims: int = 512,
- attn_dims: int = 512,
- hidden_size: int = 512,
- dictionary: Union[Dictionary, Dict] = None,
- max_seq_len: int = 25,
- module_loss: Dict = None,
- postprocessor: Dict = None,
- init_cfg=dict(type='Xavier', layer='Conv2d')):
- super().__init__(
- init_cfg=init_cfg,
- dictionary=dictionary,
- module_loss=module_loss,
- postprocessor=postprocessor,
- max_seq_len=max_seq_len)
-
- self.start_idx = self.dictionary.start_idx
- self.num_classes = self.dictionary.num_classes
- self.in_channels = in_channels
- self.embedding_dim = emb_dims
- self.att_dims = attn_dims
- self.hidden_size = hidden_size
-
- # Projection layers
- self.proj_feat = nn.Linear(in_channels, attn_dims)
- self.proj_hidden = nn.Linear(hidden_size, attn_dims)
- self.proj_sum = nn.Linear(attn_dims, 1)
-
- # Decoder input embedding
- self.embedding = nn.Embedding(self.num_classes, self.att_dims)
-
- # GRU
- self.gru = nn.GRU(
- input_size=self.in_channels + self.embedding_dim,
- hidden_size=self.hidden_size,
- batch_first=True)
-
- # Prediction layer
- self.fc = nn.Linear(hidden_size, self.dictionary.num_classes)
- self.softmax = nn.Softmax(dim=-1)
-
- def _attention(self, feat: torch.Tensor, prev_hidden: torch.Tensor,
- prev_char: torch.Tensor
- ) -> Tuple[torch.Tensor, torch.Tensor]:
- """Implement the attention mechanism.
-
- Args:
- feat (Tensor): Feature map from encoder of shape :math:`(N, T, C)`.
- prev_hidden (Tensor): Previous hidden state from GRU of shape
- :math:`(1, N, self.hidden_size)`.
- prev_char (Tensor): Previous predicted character of shape
- :math:`(N, )`.
-
- Returns:
- tuple(Tensor, Tensor):
- - output (Tensor): Predicted character of current time step of
- shape :math:`(N, 1)`.
- - state (Tensor): Hidden state from GRU of current time step of
- shape :math:`(N, self.hidden_size)`.
- """
- # Calculate the attention weights
- B, T, _ = feat.size()
- feat_proj = self.proj_feat(feat) # [N, T, attn_dims]
- hidden_proj = self.proj_hidden(prev_hidden) # [1, N, attn_dims]
- hidden_proj = hidden_proj.squeeze(0).unsqueeze(1) # [N, 1, attn_dims]
- hidden_proj = hidden_proj.expand(B, T,
- self.att_dims) # [N, T, attn_dims]
-
- sum_tanh = torch.tanh(feat_proj + hidden_proj) # [N, T, attn_dims]
- sum_proj = self.proj_sum(sum_tanh).squeeze(-1) # [N, T]
- attn_weights = torch.softmax(sum_proj, dim=1) # [N, T]
-
- # GRU forward
- context = torch.bmm(attn_weights.unsqueeze(1), feat).squeeze(1)
- char_embed = self.embedding(prev_char.long()) # [N, emb_dims]
- output, state = self.gru(
- torch.cat([char_embed, context], 1).unsqueeze(1), prev_hidden)
- output = output.squeeze(1)
- output = self.fc(output)
- return output, state
-
- def forward_train(
- self,
- feat: torch.Tensor = None,
- out_enc: Optional[torch.Tensor] = None,
- data_samples: Optional[Sequence[TextRecogDataSample]] = None
- ) -> torch.Tensor:
- """
- Args:
- feat (Tensor): Feature from backbone. Unused in this decoder.
- out_enc (torch.Tensor, optional): Encoder output. Defaults to None.
- data_samples (list[TextRecogDataSample], optional): Batch of
- TextRecogDataSample, containing gt_text information. Defaults
- to None.
-
- Returns:
- Tensor: The raw logit tensor. Shape :math:`(N, T, C)` where
- :math:`C` is ``num_classes``.
- """
- B = out_enc.shape[0]
- state = torch.zeros(1, B, self.hidden_size).to(out_enc.device)
- padded_targets = [
- data_sample.gt_text.padded_indexes for data_sample in data_samples
- ]
- padded_targets = torch.stack(padded_targets, dim=0).to(out_enc.device)
- outputs = []
- for i in range(self.max_seq_len):
- prev_char = padded_targets[:, i].to(out_enc.device)
- output, state = self._attention(out_enc, state, prev_char)
- outputs.append(output)
- outputs = torch.cat([_.unsqueeze(1) for _ in outputs], 1)
- return outputs
-
- def forward_test(
- self,
- feat: Optional[torch.Tensor] = None,
- out_enc: Optional[torch.Tensor] = None,
- data_samples: Optional[Sequence[TextRecogDataSample]] = None
- ) -> torch.Tensor:
- """
- Args:
- feat (Tensor): Feature from backbone. Unused in this decoder.
- out_enc (torch.Tensor, optional): Encoder output. Defaults to None.
- data_samples (list[TextRecogDataSample], optional): Batch of
- TextRecogDataSample, containing gt_text information. Defaults
- to None. Unused in this decoder.
-
- Returns:
- Tensor: The raw logit tensor. Shape :math:`(N, T, C)` where
- :math:`C` is ``num_classes``.
- """
- B = out_enc.shape[0]
- predicted = []
- state = torch.zeros(1, B, self.hidden_size).to(out_enc.device)
- outputs = []
- for i in range(self.max_seq_len):
- if i == 0:
- prev_char = torch.zeros(B).fill_(self.start_idx).to(
- out_enc.device)
- else:
- prev_char = predicted
-
- output, state = self._attention(out_enc, state, prev_char)
- outputs.append(output)
- _, predicted = output.max(-1)
- outputs = torch.cat([_.unsqueeze(1) for _ in outputs], 1)
- return self.softmax(outputs)
diff --git a/spaces/NATSpeech/PortaSpeech/inference/tts/gradio/infer.py b/spaces/NATSpeech/PortaSpeech/inference/tts/gradio/infer.py
deleted file mode 100644
index faae06f1b6fb17167e698d07518b7b52d821d6c4..0000000000000000000000000000000000000000
--- a/spaces/NATSpeech/PortaSpeech/inference/tts/gradio/infer.py
+++ /dev/null
@@ -1,69 +0,0 @@
-import importlib
-import re
-
-import gradio as gr
-import yaml
-from gradio.inputs import Textbox
-
-from inference.tts.base_tts_infer import BaseTTSInfer
-from utils.commons.hparams import set_hparams
-from utils.commons.hparams import hparams as hp
-import numpy as np
-
-from utils.text.text_encoder import PUNCS
-
-
-class GradioInfer:
- def __init__(self, exp_name, inference_cls, title, description, article, example_inputs):
- self.exp_name = exp_name
- self.title = title
- self.description = description
- self.article = article
- self.example_inputs = example_inputs
- pkg = ".".join(inference_cls.split(".")[:-1])
- cls_name = inference_cls.split(".")[-1]
- self.inference_cls = getattr(importlib.import_module(pkg), cls_name)
-
- def greet(self, text):
- sents = re.split(rf'([{PUNCS}])', text.replace('\n', ','))
- if sents[-1] not in list(PUNCS):
- sents = sents + ['.']
- audio_outs = []
- s = ""
- for i in range(0, len(sents), 2):
- if len(sents[i]) > 0:
- s += sents[i] + sents[i + 1]
- if len(s) >= 400 or (i >= len(sents) - 2 and len(s) > 0):
- audio_out = self.infer_ins.infer_once({
- 'text': s
- })
- audio_out = audio_out * 32767
- audio_out = audio_out.astype(np.int16)
- audio_outs.append(audio_out)
- audio_outs.append(np.zeros(int(hp['audio_sample_rate'] * 0.3)).astype(np.int16))
- s = ""
- audio_outs = np.concatenate(audio_outs)
- return hp['audio_sample_rate'], audio_outs
-
- def run(self):
- set_hparams(exp_name=self.exp_name)
- infer_cls = self.inference_cls
- self.infer_ins: BaseTTSInfer = infer_cls(hp)
- example_inputs = self.example_inputs
- iface = gr.Interface(fn=self.greet,
- inputs=Textbox(
- lines=10, placeholder=None, default=example_inputs[0], label="input text"),
- outputs="audio",
- allow_flagging="never",
- title=self.title,
- description=self.description,
- article=self.article,
- examples=example_inputs,
- enable_queue=True)
- iface.launch(cache_examples=True)
-
-
-if __name__ == '__main__':
- gradio_config = yaml.safe_load(open('inference/tts/gradio/gradio_settings.yaml'))
- g = GradioInfer(**gradio_config)
- g.run()
diff --git a/spaces/NN520/AI/src/components/providers.tsx b/spaces/NN520/AI/src/components/providers.tsx
deleted file mode 100644
index 892226412d80fe0b05211911b9e245cd22876460..0000000000000000000000000000000000000000
--- a/spaces/NN520/AI/src/components/providers.tsx
+++ /dev/null
@@ -1,15 +0,0 @@
-'use client'
-
-import * as React from 'react'
-import { ThemeProvider as NextThemesProvider } from 'next-themes'
-import { ThemeProviderProps } from 'next-themes/dist/types'
-
-import { TooltipProvider } from '@/components/ui/tooltip'
-
-export function Providers({ children, ...props }: ThemeProviderProps) {
- return (
-
- {children}
-
- )
-}
diff --git a/spaces/NemesisAlm/GeolocationCountryClassification/README.md b/spaces/NemesisAlm/GeolocationCountryClassification/README.md
deleted file mode 100644
index 8c02424b89dd28db83102a4a060b308ebec626f7..0000000000000000000000000000000000000000
--- a/spaces/NemesisAlm/GeolocationCountryClassification/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
----
-title: GeolocationCountryClassification
-app_file: app.py
-sdk: gradio
-sdk_version: 3.41.2
----
diff --git a/spaces/Niansuh/DALL-E/README.md b/spaces/Niansuh/DALL-E/README.md
deleted file mode 100644
index fbbdcfaa627363412e2da593ab0546bfb9ae874c..0000000000000000000000000000000000000000
--- a/spaces/Niansuh/DALL-E/README.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-title: DALL·E
-emoji: 🥑
-colorFrom: yellow
-colorTo: green
-sdk: static
-pinned: false
-license: apache-2.0
----
\ No newline at end of file
diff --git a/spaces/NicolasVana/image-captioning/app.py b/spaces/NicolasVana/image-captioning/app.py
deleted file mode 100644
index efa955ad4b2499e449c2ca3be733a5a708d524a1..0000000000000000000000000000000000000000
--- a/spaces/NicolasVana/image-captioning/app.py
+++ /dev/null
@@ -1,92 +0,0 @@
-import streamlit as st
-import requests
-import io
-
-
-# Designing the interface
-st.title("Medical Image Captioning")
-
-st.sidebar.markdown(
- """
- This project features 3 different Medical image captioning models.
- Two of the use the InceptionV3 architecture to do feature extraction and then generate the captions using an LSTM model.
- The difference between these two is that the first one uses InceptionV3 trained on ImageNet data and outputs 2048 features.
- The second one is based on a retrained version of InceptionV3 that uses the CUI data from the ROCO dataset to extract 745 features from the images.
- The final model is transformer based on...
- """
-)
-
-with st.spinner('Loading objects ...'):
- from model import *
-
-random_image_id = get_random_image_id()
-
-st.sidebar.title("Select a sample image")
-sample_image_id = st.sidebar.selectbox(
- "Please choose a sample image",
- sample_image_ids
-)
-
-st.sidebar.title("Select a model Type")
-model_type = st.sidebar.selectbox(
- "Please choose a model",
- ['Pretrained Inception', 'Retrained Inception', 'Transformer']
-)
-
-inception, lstm = fetch_model(model_type)
-word2Index, index2Word, variable_params = fetch_auxiliary_files(model_type)
-max_len = variable_params['max_caption_len']
-
-if st.sidebar.button("Random ROCO (test) images"):
- random_image_id = get_random_image_id()
- sample_image_id = "None"
-
-bytes_data = None
-with st.sidebar.form("file-uploader-form", clear_on_submit=True):
- uploaded_file = st.file_uploader("Choose a file")
- submitted = st.form_submit_button("Upload")
- if submitted and uploaded_file is not None:
- bytes_data = io.BytesIO(uploaded_file.getvalue())
-
-if (bytes_data is None) and submitted:
-
- st.write("No file is selected to upload")
-
-else:
-
- image_id = random_image_id
- if sample_image_id != "None":
- assert type(sample_image_id) == int
- image_id = sample_image_id
-
- sample_name = f"ROCO_{str(image_id).zfill(5)}.jpg"
- sample_path = os.path.join(sample_dir, sample_name)
-
- if bytes_data is not None:
- image = Image.open(bytes_data)
- elif os.path.isfile(sample_path):
- image = Image.open(sample_path)
-
- width, height = 299, 299
- resized = image.resize(size=(width, height))
-
- if bytes_data is None:
- st.markdown(f"ROCO_{str(image_id).zfill(5)}.jpg")
- show = st.image(resized)
- show.image(resized, '\n\nSelected Image')
-
- # For newline
- st.sidebar.write('\n')
-
- with st.spinner('Generating image caption ...'):
- st.header(f'Predicted caption:\n\n')
-
- preprocessed_img = preprocess_image_inception(resized)
- features = extract_features(inception, preprocessed_img)
- caption = generate_caption(lstm, features, max_len, word2Index, index2Word)
- st.subheader(caption)
-
- st.sidebar.header("Model predicts: ")
- st.sidebar.write(f"{caption}")
-
- image.close()
diff --git a/spaces/OFA-Sys/FAST-CPU-small-stable-diffusion-v0/pipeline_openvino_stable_diffusion.py b/spaces/OFA-Sys/FAST-CPU-small-stable-diffusion-v0/pipeline_openvino_stable_diffusion.py
deleted file mode 100644
index 2c934742760f46b0bbeb4676a3cdc66178eac8d1..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/FAST-CPU-small-stable-diffusion-v0/pipeline_openvino_stable_diffusion.py
+++ /dev/null
@@ -1,405 +0,0 @@
-# Copyright 2022 The OFA-Sys Team.
-# This source code is licensed under the Apache 2.0 license
-# found in the LICENSE file in the root directory.
-# Copyright 2022 The HuggingFace Inc. team.
-# All rights reserved.
-# This source code is licensed under the Apache 2.0 license
-# found in the LICENSE file in the root directory.
-
-import inspect
-from typing import Callable, List, Optional, Union
-
-import numpy as np
-import torch
-import os
-
-from transformers import CLIPFeatureExtractor, CLIPTokenizer
-
-from diffusers.configuration_utils import FrozenDict
-from diffusers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
-from diffusers.utils import deprecate, logging
-from diffusers import OnnxRuntimeModel
-
-from diffusers import OnnxStableDiffusionPipeline, DiffusionPipeline
-from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
-from openvino.runtime import Core
-ORT_TO_NP_TYPE = {
- "tensor(bool)": np.bool_,
- "tensor(int8)": np.int8,
- "tensor(uint8)": np.uint8,
- "tensor(int16)": np.int16,
- "tensor(uint16)": np.uint16,
- "tensor(int32)": np.int32,
- "tensor(uint32)": np.uint32,
- "tensor(int64)": np.int64,
- "tensor(uint64)": np.uint64,
- "tensor(float16)": np.float16,
- "tensor(float)": np.float32,
- "tensor(double)": np.float64,
-}
-
-logger = logging.get_logger(__name__)
-
-
-class OpenVINOStableDiffusionPipeline(DiffusionPipeline):
- vae_encoder: OnnxRuntimeModel
- vae_decoder: OnnxRuntimeModel
- text_encoder: OnnxRuntimeModel
- tokenizer: CLIPTokenizer
- unet: OnnxRuntimeModel
- scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler]
- safety_checker: OnnxRuntimeModel
- feature_extractor: CLIPFeatureExtractor
-
- _optional_components = ["safety_checker", "feature_extractor"]
-
- def __init__(
- self,
- vae_encoder: OnnxRuntimeModel,
- vae_decoder: OnnxRuntimeModel,
- text_encoder: OnnxRuntimeModel,
- tokenizer: CLIPTokenizer,
- unet: OnnxRuntimeModel,
- scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
- safety_checker: OnnxRuntimeModel,
- feature_extractor: CLIPFeatureExtractor,
- requires_safety_checker: bool = True,
- ):
- super().__init__()
-
- if hasattr(scheduler.config,
- "steps_offset") and scheduler.config.steps_offset != 1:
- deprecation_message = (
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
- " file")
- deprecate("steps_offset!=1",
- "1.0.0",
- deprecation_message,
- standard_warn=False)
- new_config = dict(scheduler.config)
- new_config["steps_offset"] = 1
- scheduler._internal_dict = FrozenDict(new_config)
-
- if hasattr(scheduler.config,
- "clip_sample") and scheduler.config.clip_sample is True:
- deprecation_message = (
- f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
- " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
- " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
- " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
- " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
- )
- deprecate("clip_sample not set",
- "1.0.0",
- deprecation_message,
- standard_warn=False)
- new_config = dict(scheduler.config)
- new_config["clip_sample"] = False
- scheduler._internal_dict = FrozenDict(new_config)
-
- if safety_checker is None and requires_safety_checker:
- logger.warning(
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
- )
-
- if safety_checker is not None and feature_extractor is None:
- raise ValueError(
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
- )
-
- self.register_modules(
- vae_encoder=vae_encoder,
- vae_decoder=vae_decoder,
- text_encoder=text_encoder,
- tokenizer=tokenizer,
- unet=unet,
- scheduler=scheduler,
- safety_checker=safety_checker,
- feature_extractor=feature_extractor,
- )
- self.convert_to_openvino()
- self.register_to_config(
- requires_safety_checker=requires_safety_checker)
-
- @classmethod
- def from_onnx_pipeline(cls, onnx_pipe: OnnxStableDiffusionPipeline):
- r"""
- Create OpenVINOStableDiffusionPipeline from a onnx stable pipeline.
- Parameters:
- onnx_pipe (OnnxStableDiffusionPipeline)
- """
- return cls(onnx_pipe.vae_encoder, onnx_pipe.vae_decoder,
- onnx_pipe.text_encoder, onnx_pipe.tokenizer, onnx_pipe.unet,
- onnx_pipe.scheduler, onnx_pipe.safety_checker,
- onnx_pipe.feature_extractor, True)
-
- def convert_to_openvino(self):
- ie = Core()
-
- # VAE decoder
- vae_decoder_onnx = ie.read_model(
- model=os.path.join(self.vae_decoder.model_save_dir, "model.onnx"))
- vae_decoder = ie.compile_model(model=vae_decoder_onnx,
- device_name="CPU")
-
- # Text encoder
- text_encoder_onnx = ie.read_model(
- model=os.path.join(self.text_encoder.model_save_dir, "model.onnx"))
- text_encoder = ie.compile_model(model=text_encoder_onnx,
- device_name="CPU")
-
- # Unet
- unet_onnx = ie.read_model(
- model=os.path.join(self.unet.model_save_dir, "model.onnx"))
- unet = ie.compile_model(model=unet_onnx, device_name="CPU")
-
- self.register_modules(vae_decoder=vae_decoder,
- text_encoder=text_encoder,
- unet=unet)
-
- def _encode_prompt(self, prompt, num_images_per_prompt,
- do_classifier_free_guidance, negative_prompt):
- r"""
- Encodes the prompt into text encoder hidden states.
- Args:
- prompt (`str` or `List[str]`):
- prompt to be encoded
- num_images_per_prompt (`int`):
- number of images that should be generated per prompt
- do_classifier_free_guidance (`bool`):
- whether to use classifier free guidance or not
- negative_prompt (`str` or `List[str]`):
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
- if `guidance_scale` is less than `1`).
- """
- batch_size = len(prompt) if isinstance(prompt, list) else 1
-
- # get prompt text embeddings
- text_inputs = self.tokenizer(
- prompt,
- padding="max_length",
- max_length=self.tokenizer.model_max_length,
- truncation=True,
- return_tensors="np",
- )
- text_input_ids = text_inputs.input_ids
- untruncated_ids = self.tokenizer(prompt,
- padding="max_length",
- return_tensors="np").input_ids
-
- if not np.array_equal(text_input_ids, untruncated_ids):
- removed_text = self.tokenizer.batch_decode(
- untruncated_ids[:, self.tokenizer.model_max_length - 1:-1])
- logger.warning(
- "The following part of your input was truncated because CLIP can only handle sequences up to"
- f" {self.tokenizer.model_max_length} tokens: {removed_text}")
-
- prompt_embeds = self.text_encoder(
- {"input_ids":
- text_input_ids.astype(np.int32)})[self.text_encoder.outputs[0]]
- prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0)
-
- # get unconditional embeddings for classifier free guidance
- if do_classifier_free_guidance:
- uncond_tokens: List[str]
- if negative_prompt is None:
- uncond_tokens = [""] * batch_size
- elif type(prompt) is not type(negative_prompt):
- raise TypeError(
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
- f" {type(prompt)}.")
- elif isinstance(negative_prompt, str):
- uncond_tokens = [negative_prompt] * batch_size
- elif batch_size != len(negative_prompt):
- raise ValueError(
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
- " the batch size of `prompt`.")
- else:
- uncond_tokens = negative_prompt
-
- max_length = text_input_ids.shape[-1]
- uncond_input = self.tokenizer(
- uncond_tokens,
- padding="max_length",
- max_length=max_length,
- truncation=True,
- return_tensors="np",
- )
- negative_prompt_embeds = self.text_encoder({
- "input_ids":
- uncond_input.input_ids.astype(np.int32)
- })[self.text_encoder.outputs[0]]
- negative_prompt_embeds = np.repeat(negative_prompt_embeds,
- num_images_per_prompt,
- axis=0)
-
- # For classifier free guidance, we need to do two forward passes.
- # Here we concatenate the unconditional and text embeddings into a single batch
- # to avoid doing two forward passes
- prompt_embeds = np.concatenate(
- [negative_prompt_embeds, prompt_embeds])
-
- return prompt_embeds
-
- def __call__(
- self,
- prompt: Union[str, List[str]],
- height: Optional[int] = 512,
- width: Optional[int] = 512,
- num_inference_steps: Optional[int] = 50,
- guidance_scale: Optional[float] = 7.5,
- negative_prompt: Optional[Union[str, List[str]]] = None,
- num_images_per_prompt: Optional[int] = 1,
- eta: Optional[float] = 0.0,
- generator: Optional[np.random.RandomState] = None,
- latents: Optional[np.ndarray] = None,
- output_type: Optional[str] = "pil",
- return_dict: bool = True,
- callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
- callback_steps: Optional[int] = 1,
- ):
- if isinstance(prompt, str):
- batch_size = 1
- elif isinstance(prompt, list):
- batch_size = len(prompt)
- else:
- raise ValueError(
- f"`prompt` has to be of type `str` or `list` but is {type(prompt)}"
- )
-
- if height % 8 != 0 or width % 8 != 0:
- raise ValueError(
- f"`height` and `width` have to be divisible by 8 but are {height} and {width}."
- )
-
- if (callback_steps is None) or (callback_steps is not None and
- (not isinstance(callback_steps, int)
- or callback_steps <= 0)):
- raise ValueError(
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
- f" {type(callback_steps)}.")
-
- if generator is None:
- generator = np.random
-
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
- # corresponds to doing no classifier free guidance.
- do_classifier_free_guidance = guidance_scale > 1.0
-
- prompt_embeds = self._encode_prompt(prompt, num_images_per_prompt,
- do_classifier_free_guidance,
- negative_prompt)
-
- # get the initial random noise unless the user supplied it
- latents_dtype = prompt_embeds.dtype
- latents_shape = (batch_size * num_images_per_prompt, 4, height // 8,
- width // 8)
- if latents is None:
- latents = generator.randn(*latents_shape).astype(latents_dtype)
- elif latents.shape != latents_shape:
- raise ValueError(
- f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}"
- )
-
- # set timesteps
- self.scheduler.set_timesteps(num_inference_steps)
-
- latents = latents * np.float64(self.scheduler.init_noise_sigma)
-
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
- # and should be between [0, 1]
- accepts_eta = "eta" in set(
- inspect.signature(self.scheduler.step).parameters.keys())
- extra_step_kwargs = {}
- if accepts_eta:
- extra_step_kwargs["eta"] = eta
-
- # timestep_dtype = next(
- # (input.type for input in self.unet.model.get_inputs() if input.name == "timestep"), "tensor(float)"
- # )
- timestep_dtype = 'tensor(int64)'
- timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype]
-
- for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
- # expand the latents if we are doing classifier free guidance
- latent_model_input = np.concatenate(
- [latents] * 2) if do_classifier_free_guidance else latents
- latent_model_input = self.scheduler.scale_model_input(
- torch.from_numpy(latent_model_input), t)
- latent_model_input = latent_model_input.cpu().numpy()
-
- # predict the noise residual
- timestep = np.array([t], dtype=timestep_dtype)
- unet_input = {
- "sample": latent_model_input,
- "timestep": timestep,
- "encoder_hidden_states": prompt_embeds
- }
- noise_pred = self.unet(unet_input)[self.unet.outputs[0]]
- # noise_pred = noise_pred[0]
-
- # perform guidance
- if do_classifier_free_guidance:
- noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2)
- noise_pred = noise_pred_uncond + guidance_scale * (
- noise_pred_text - noise_pred_uncond)
-
- # compute the previous noisy sample x_t -> x_t-1
- scheduler_output = self.scheduler.step(
- torch.from_numpy(noise_pred), t, torch.from_numpy(latents),
- **extra_step_kwargs)
- latents = scheduler_output.prev_sample.numpy()
-
- # call the callback, if provided
- if callback is not None and i % callback_steps == 0:
- callback(i, t, latents)
-
- latents = 1 / 0.18215 * latents
- image = self.vae_decoder({"latent_sample":
- latents})[self.vae_decoder.outputs[0]]
-
- image = np.clip(image / 2 + 0.5, 0, 1)
- image = image.transpose((0, 2, 3, 1))
-
- if self.safety_checker is not None:
- safety_checker_input = self.feature_extractor(
- self.numpy_to_pil(image),
- return_tensors="np").pixel_values.astype(image.dtype)
-
- image, has_nsfw_concepts = self.safety_checker(
- clip_input=safety_checker_input, images=image)
-
- # There will throw an error if use safety_checker batchsize>1
- images, has_nsfw_concept = [], []
- for i in range(image.shape[0]):
- image_i, has_nsfw_concept_i = self.safety_checker(
- clip_input=safety_checker_input[i:i + 1],
- images=image[i:i + 1])
- images.append(image_i)
- has_nsfw_concept.append(has_nsfw_concept_i[0])
- image = np.concatenate(images)
- else:
- has_nsfw_concept = None
-
- if output_type == "pil":
- image = self.numpy_to_pil(image)
-
- if not return_dict:
- return (image, has_nsfw_concept)
-
- return StableDiffusionPipelineOutput(
- images=image, nsfw_content_detected=has_nsfw_concept)
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/multilingual/sampled_multi_epoch_dataset.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/multilingual/sampled_multi_epoch_dataset.py
deleted file mode 100644
index 17387b2f85c0ee76db1a003091331b46de8d8def..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/multilingual/sampled_multi_epoch_dataset.py
+++ /dev/null
@@ -1,199 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import hashlib
-import logging
-import math
-
-import numpy as np
-from fairseq.data import SampledMultiDataset
-
-from .sampled_multi_dataset import CollateFormat, default_virtual_size_func
-
-
-logger = logging.getLogger(__name__)
-
-
-class SampledMultiEpochDataset(SampledMultiDataset):
- """Samples from multiple sub-datasets according to sampling ratios
- using virtual epoch sizes to speed up dataloading.
- Args:
- datasets (
- List[~torch.utils.data.Dataset]
- or OrderedDict[str, ~torch.utils.data.Dataset]
- ): datasets
- sampling_ratios (List[float]): list of probability of each dataset to be sampled
- (default: None, which corresponds to concating all dataset together).
- seed (int): RNG seed to use (default: 2).
- epoch (int): starting epoch number (default: 1).
- eval_key (str, optional): a key used at evaluation time that causes
- this instance to pass-through batches from *datasets[eval_key]*.
- collate_format (CollateFormat): collater output format, either CollateFormat.ordered_dict or
- CollateFormat.single (default: CollateFormat.single) where CollateFormat.single configures
- the collater to output batches of data mixed from all sub-datasets,
- and CollateFormat.ordered_dict configures the collater to output a dictionary of batches indexed by keys
- of sub-datasets.
- Note that not all sub-datasets will present in a single batch in both formats.
- virtual_size (int, or callable): the expected virtual size of the dataset (default: default_virtual_size_func).
- split (str): the split of the data, e.g. 'train', 'valid' or 'test'.
- virtual_epoch_size (int): virtual epoch size, the dataset will go through the data by
- this virtual epoch size one by one to speed up data loading, e.g. indicing and filtering
- can be performed whenever a virtual epoch is loaded without waiting for the whole dataset to be loaded.
- shared_collater (bool): whether or not to all sub-datasets have the same collater.
- shard_epoch (int): the real epoch number for shard selection.
- shuffle (bool): whether or not to shuffle data (default: True).
- """
-
- def __init__(
- self,
- datasets,
- sampling_ratios=None,
- seed=2,
- epoch=1,
- eval_key=None,
- collate_format=CollateFormat.single,
- virtual_size=default_virtual_size_func,
- split="",
- virtual_epoch_size=None,
- shared_collater=False,
- shard_epoch=1,
- shuffle=True,
- ):
- self.virtual_epoch_size = virtual_epoch_size
- self._current_epoch_start_index = None
- self._random_global_indices = None
- self.shard_epoch = shard_epoch if shard_epoch is not None else 1
- self.load_next_shard = None
- self._epoch_sizes = None
- super().__init__(
- datasets=datasets,
- sampling_ratios=sampling_ratios,
- seed=seed,
- epoch=epoch,
- eval_key=eval_key,
- collate_format=collate_format,
- virtual_size=virtual_size,
- split=split,
- shared_collater=shared_collater,
- shuffle=shuffle,
- )
-
- def _setup(self, epoch):
- self.virtual_epoch_size = (
- self.virtual_epoch_size
- if self.virtual_epoch_size is not None
- else self.virtual_size
- )
- if self.virtual_epoch_size > self.virtual_size:
- logger.warning(
- f"virtual epoch size {self.virtual_epoch_size} "
- f"is greater than virtual dataset size {self.virtual_size}"
- )
- self.virtual_epoch_size = self.virtual_size
- self.num_virtual_epochs = math.ceil(self.virtual_size / self.virtual_epoch_size)
- self._current_epoch_start_index = self._get_epoch_start_index(epoch)
- logger.info(
- f"virtual epoch size {self.virtual_epoch_size}; virtual dataset size {self.virtual_size}"
- )
-
- def _map_epoch_index_to_global(self, index):
- index = self._current_epoch_start_index + index
- # add randomness
- return self._random_global_indices[index]
-
- @property
- def sizes(self):
- if self._epoch_sizes is not None:
- return self._epoch_sizes
- _sizes = super().sizes
- indices = self._random_global_indices[
- self._current_epoch_start_index : self._current_epoch_start_index
- + len(self)
- ]
- self._epoch_sizes = _sizes[indices]
- # del super()._sizes to save memory
- del self._sizes
- self._sizes = None
- return self._epoch_sizes
-
- def _get_dataset_and_index(self, index):
- i = self._map_epoch_index_to_global(index)
- return super()._get_dataset_and_index(i)
-
- def __len__(self):
- return (
- self.virtual_epoch_size
- if self._current_epoch_start_index + self.virtual_epoch_size
- < self.virtual_size
- else self.virtual_size - self._current_epoch_start_index
- )
-
- def set_epoch(self, epoch):
- if self._current_epoch_start_index is None:
- # initializing epoch idnices of a virtual dataset
- self._setup(epoch)
- self._next_virtual_epoch(epoch)
- else:
- # working on already intialized epoch indices
- if epoch == self._cur_epoch:
- # re-enter so return
- return
- self._next_virtual_epoch(epoch)
-
- def _get_epoch_start_index(self, epoch):
- assert epoch >= 1 # fairseq is using 1-based epoch everywhere
- return ((epoch - 1) % self.num_virtual_epochs) * self.virtual_epoch_size
-
- def _next_global_indices(self, epoch):
- rng = np.random.RandomState(
- [
- int(
- hashlib.sha1(
- str(self.__class__.__name__).encode("utf-8")
- ).hexdigest(),
- 16,
- )
- % (2 ** 32),
- self.seed % (2 ** 32), # global seed
- epoch, # epoch index,
- ]
- )
- del self._random_global_indices
- self._random_global_indices = rng.choice(
- self.virtual_size, self.virtual_size, replace=False
- )
- if self.load_next_shard is None:
- self.load_next_shard = False
- else:
- # increase shard epoch for next loading
- self.shard_epoch += 1
- self.load_next_shard = True
- logger.info(
- "to load next epoch/shard in next load_dataset: "
- f"epoch={epoch}/shard_epoch={self.shard_epoch}"
- )
-
- def _next_virtual_epoch(self, epoch):
- index = self._get_epoch_start_index(epoch)
- if index == 0 or self._random_global_indices is None:
- # need to start from the beginning,
- # so call super().set_epoch(epoch) to establish the global virtual indices
- logger.info(
- "establishing a new set of global virtual indices for "
- f"epoch={epoch}/shard_epoch={self.shard_epoch}"
- )
- super().set_epoch(epoch)
- self._next_global_indices(epoch)
- else:
- self._cur_epoch = epoch
-
- # reset cache sizes and ordered_indices for the epoch after moving to a new epoch
- self._clean_if_not_none(
- [
- self._epoch_sizes,
- ]
- )
- self._epoch_sizes = None
- self._current_epoch_start_index = index
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/scripts/compare_namespaces.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/scripts/compare_namespaces.py
deleted file mode 100644
index bc24db624f8db36f546c263ba3a806dae6d466bf..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/scripts/compare_namespaces.py
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/env python
-"""Helper script to compare two argparse.Namespace objects."""
-
-from argparse import Namespace # noqa
-
-
-def main():
-
- ns1 = eval(input("Namespace 1: "))
- ns2 = eval(input("Namespace 2: "))
-
- def keys(ns):
- ks = set()
- for k in dir(ns):
- if not k.startswith("_"):
- ks.add(k)
- return ks
-
- k1 = keys(ns1)
- k2 = keys(ns2)
-
- def print_keys(ks, ns1, ns2=None):
- for k in ks:
- if ns2 is None:
- print("{}\t{}".format(k, getattr(ns1, k, None)))
- else:
- print(
- "{}\t{}\t{}".format(k, getattr(ns1, k, None), getattr(ns2, k, None))
- )
-
- print("Keys unique to namespace 1:")
- print_keys(k1 - k2, ns1)
- print()
-
- print("Keys unique to namespace 2:")
- print_keys(k2 - k1, ns2)
- print()
-
- print("Overlapping keys with different values:")
- ks = [k for k in k1 & k2 if getattr(ns1, k, "None") != getattr(ns2, k, "None")]
- print_keys(ks, ns1, ns2)
- print()
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/noisychannel/__init__.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/noisychannel/__init__.py
deleted file mode 100644
index 89f1aef4f6328d25425e0bcabb42dfffd2ed35f0..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/noisychannel/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from .rerank_options import * # noqa
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_recognition/tasks/__init__.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_recognition/tasks/__init__.py
deleted file mode 100644
index 7ac3b8dc69639c92cc129294356e9012745e3fb2..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_recognition/tasks/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-import importlib
-import os
-
-
-for file in sorted(os.listdir(os.path.dirname(__file__))):
- if file.endswith(".py") and not file.startswith("_"):
- task_name = file[: file.find(".py")]
- importlib.import_module("examples.speech_recognition.tasks." + task_name)
diff --git a/spaces/Omnibus/Video-Diffusion-WebUI/video_diffusion/stable_diffusion_video/stable_video_text2video.py b/spaces/Omnibus/Video-Diffusion-WebUI/video_diffusion/stable_diffusion_video/stable_video_text2video.py
deleted file mode 100644
index 8235f010644f025c202574a38fa703f9399f7851..0000000000000000000000000000000000000000
--- a/spaces/Omnibus/Video-Diffusion-WebUI/video_diffusion/stable_diffusion_video/stable_video_text2video.py
+++ /dev/null
@@ -1,158 +0,0 @@
-import gradio as gr
-import numpy as np
-import torch
-
-from video_diffusion.stable_diffusion_video.stable_diffusion_pipeline import StableDiffusionWalkPipeline
-from video_diffusion.utils.model_list import stable_model_list
-
-
-class StableDiffusionText2VideoGenerator:
- def __init__(self):
- self.pipe = None
-
- def load_model(
- self,
- model_path,
- ):
- if self.pipe is None:
- self.pipe = StableDiffusionWalkPipeline.from_pretrained(
- model_path,
- torch_dtype=torch.float16,
- revision="fp16",
- )
-
- self.pipe.to("cuda")
- self.pipe.enable_xformers_memory_efficient_attention()
- self.pipe.enable_attention_slicing()
-
- return self.pipe
-
- def generate_video(
- self,
- model_path: str,
- first_prompts: str,
- second_prompts: str,
- negative_prompt: str,
- num_interpolation_steps: int,
- guidance_scale: int,
- num_inference_step: int,
- height: int,
- width: int,
- upsample: bool,
- fps=int,
- ):
- first_seed = np.random.randint(0, 100000)
- second_seed = np.random.randint(0, 100000)
- seeds = [first_seed, second_seed]
- prompts = [first_prompts, second_prompts]
- pipe = self.load_model(model_path=model_path)
-
- output_video = pipe.walk(
- prompts=prompts,
- num_interpolation_steps=int(num_interpolation_steps),
- height=height,
- width=width,
- guidance_scale=guidance_scale,
- num_inference_steps=num_inference_step,
- negative_prompt=negative_prompt,
- seeds=seeds,
- upsample=upsample,
- fps=fps,
- )
-
- return output_video
-
- def app():
- with gr.Blocks():
- with gr.Row():
- with gr.Column():
- stable_text2video_first_prompt = gr.Textbox(
- lines=1,
- placeholder="First Prompt",
- show_label=False,
- )
- stable_text2video_second_prompt = gr.Textbox(
- lines=1,
- placeholder="Second Prompt",
- show_label=False,
- )
- stable_text2video_negative_prompt = gr.Textbox(
- lines=1,
- placeholder="Negative Prompt ",
- show_label=False,
- )
- with gr.Row():
- with gr.Column():
- stable_text2video_model_path = gr.Dropdown(
- choices=stable_model_list,
- label="Stable Model List",
- value=stable_model_list[0],
- )
- stable_text2video_guidance_scale = gr.Slider(
- minimum=0,
- maximum=15,
- step=1,
- value=8.5,
- label="Guidance Scale",
- )
- stable_text2video_num_inference_steps = gr.Slider(
- minimum=1,
- maximum=100,
- step=1,
- value=30,
- label="Number of Inference Steps",
- )
- stable_text2video_fps = gr.Slider(
- minimum=1,
- maximum=60,
- step=1,
- value=10,
- label="Fps",
- )
- with gr.Row():
- with gr.Column():
- stable_text2video_num_interpolation_steps = gr.Number(
- value=10,
- label="Number of Interpolation Steps",
- )
- stable_text2video_height = gr.Slider(
- minimum=1,
- maximum=1000,
- step=1,
- value=512,
- label="Height",
- )
- stable_text2video_width = gr.Slider(
- minimum=1,
- maximum=1000,
- step=1,
- value=512,
- label="Width",
- )
- stable_text2video_upsample = gr.Checkbox(
- label="Upsample",
- default=False,
- )
-
- text2video_generate = gr.Button(value="Generator")
-
- with gr.Column():
- text2video_output = gr.Video(label="Output")
-
- text2video_generate.click(
- fn=StableDiffusionText2VideoGenerator().generate_video,
- inputs=[
- stable_text2video_model_path,
- stable_text2video_first_prompt,
- stable_text2video_second_prompt,
- stable_text2video_negative_prompt,
- stable_text2video_num_interpolation_steps,
- stable_text2video_guidance_scale,
- stable_text2video_num_inference_steps,
- stable_text2video_height,
- stable_text2video_width,
- stable_text2video_upsample,
- stable_text2video_fps,
- ],
- outputs=text2video_output,
- )
diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/setup.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/setup.py
deleted file mode 100644
index 50a5e23e9ba6fab32775ebd16fb2746c7bf6660c..0000000000000000000000000000000000000000
--- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/setup.py
+++ /dev/null
@@ -1,206 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) Facebook, Inc. and its affiliates.
-
-import glob
-import os
-import shutil
-from os import path
-from setuptools import find_packages, setup
-from typing import List
-import torch
-from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension
-
-torch_ver = [int(x) for x in torch.__version__.split(".")[:2]]
-assert torch_ver >= [1, 8], "Requires PyTorch >= 1.8"
-
-
-def get_version():
- init_py_path = path.join(path.abspath(path.dirname(__file__)), "detectron2", "__init__.py")
- init_py = open(init_py_path, "r").readlines()
- version_line = [l.strip() for l in init_py if l.startswith("__version__")][0]
- version = version_line.split("=")[-1].strip().strip("'\"")
-
- # The following is used to build release packages.
- # Users should never use it.
- suffix = os.getenv("D2_VERSION_SUFFIX", "")
- version = version + suffix
- if os.getenv("BUILD_NIGHTLY", "0") == "1":
- from datetime import datetime
-
- date_str = datetime.today().strftime("%y%m%d")
- version = version + ".dev" + date_str
-
- new_init_py = [l for l in init_py if not l.startswith("__version__")]
- new_init_py.append('__version__ = "{}"\n'.format(version))
- with open(init_py_path, "w") as f:
- f.write("".join(new_init_py))
- return version
-
-
-def get_extensions():
- this_dir = path.dirname(path.abspath(__file__))
- extensions_dir = path.join(this_dir, "detectron2", "layers", "csrc")
-
- main_source = path.join(extensions_dir, "vision.cpp")
- sources = glob.glob(path.join(extensions_dir, "**", "*.cpp"))
-
- from torch.utils.cpp_extension import ROCM_HOME
-
- is_rocm_pytorch = (
- True if ((torch.version.hip is not None) and (ROCM_HOME is not None)) else False
- )
- if is_rocm_pytorch:
- assert torch_ver >= [1, 8], "ROCM support requires PyTorch >= 1.8!"
-
- # common code between cuda and rocm platforms, for hipify version [1,0,0] and later.
- source_cuda = glob.glob(path.join(extensions_dir, "**", "*.cu")) + glob.glob(
- path.join(extensions_dir, "*.cu")
- )
- sources = [main_source] + sources
-
- extension = CppExtension
-
- extra_compile_args = {"cxx": []}
- define_macros = []
-
- if (torch.cuda.is_available() and ((CUDA_HOME is not None) or is_rocm_pytorch)) or os.getenv(
- "FORCE_CUDA", "0"
- ) == "1":
- extension = CUDAExtension
- sources += source_cuda
-
- if not is_rocm_pytorch:
- define_macros += [("WITH_CUDA", None)]
- extra_compile_args["nvcc"] = [
- "-O3",
- "-DCUDA_HAS_FP16=1",
- "-D__CUDA_NO_HALF_OPERATORS__",
- "-D__CUDA_NO_HALF_CONVERSIONS__",
- "-D__CUDA_NO_HALF2_OPERATORS__",
- ]
- else:
- define_macros += [("WITH_HIP", None)]
- extra_compile_args["nvcc"] = []
-
- if torch_ver < [1, 7]:
- # supported by https://github.com/pytorch/pytorch/pull/43931
- CC = os.environ.get("CC", None)
- if CC is not None:
- extra_compile_args["nvcc"].append("-ccbin={}".format(CC))
-
- include_dirs = [extensions_dir]
-
- ext_modules = [
- extension(
- "detectron2._C",
- sources,
- include_dirs=include_dirs,
- define_macros=define_macros,
- extra_compile_args=extra_compile_args,
- )
- ]
-
- return ext_modules
-
-
-def get_model_zoo_configs() -> List[str]:
- """
- Return a list of configs to include in package for model zoo. Copy over these configs inside
- detectron2/model_zoo.
- """
-
- # Use absolute paths while symlinking.
- source_configs_dir = path.join(path.dirname(path.realpath(__file__)), "configs")
- destination = path.join(
- path.dirname(path.realpath(__file__)), "detectron2", "model_zoo", "configs"
- )
- # Symlink the config directory inside package to have a cleaner pip install.
-
- # Remove stale symlink/directory from a previous build.
- if path.exists(source_configs_dir):
- if path.islink(destination):
- os.unlink(destination)
- elif path.isdir(destination):
- shutil.rmtree(destination)
-
- if not path.exists(destination):
- try:
- os.symlink(source_configs_dir, destination)
- except OSError:
- # Fall back to copying if symlink fails: ex. on Windows.
- shutil.copytree(source_configs_dir, destination)
-
- config_paths = glob.glob("configs/**/*.yaml", recursive=True) + glob.glob(
- "configs/**/*.py", recursive=True
- )
- return config_paths
-
-
-# For projects that are relative small and provide features that are very close
-# to detectron2's core functionalities, we install them under detectron2.projects
-PROJECTS = {
-
-}
-
-setup(
- name="detectron2",
- version=get_version(),
- author="FAIR",
- url="https://github.com/facebookresearch/detectron2",
- description="Detectron2 is FAIR's next-generation research "
- "platform for object detection and segmentation.",
- packages=find_packages(exclude=("configs", "tests*")) + list(PROJECTS.keys()),
- package_dir=PROJECTS,
- package_data={"detectron2.model_zoo": get_model_zoo_configs()},
- python_requires=">=3.6",
- install_requires=[
- # These dependencies are not pure-python.
- # In general, avoid adding more dependencies like them because they are not
- # guaranteed to be installable by `pip install` on all platforms.
- # To tell if a package is pure-python, go to https://pypi.org/project/{name}/#files
- "Pillow>=7.1", # or use pillow-simd for better performance
- "matplotlib", # TODO move it to optional after we add opencv visualization
- "pycocotools>=2.0.2", # corresponds to https://github.com/ppwwyyxx/cocoapi
- # Do not add opencv here. Just like pytorch, user should install
- # opencv themselves, preferrably by OS's package manager, or by
- # choosing the proper pypi package name at https://github.com/skvark/opencv-python
- # The following are pure-python dependencies that should be easily installable
- "termcolor>=1.1",
- "yacs>=0.1.8",
- "tabulate",
- "cloudpickle",
- "tqdm>4.29.0",
- "tensorboard",
- # Lock version of fvcore/iopath because they may have breaking changes
- # NOTE: when updating fvcore/iopath version, make sure fvcore depends
- # on compatible version of iopath.
- "fvcore>=0.1.5,<0.1.6", # required like this to make it pip installable
- "iopath>=0.1.7,<0.1.10",
- "future", # used by caffe2
- "pydot", # used to save caffe2 SVGs
- "dataclasses; python_version<'3.7'",
- "omegaconf>=2.1",
- "hydra-core>=1.1",
- "black==21.4b2",
- # If a new dependency is required at import time (in addition to runtime), it
- # probably needs to exist in docs/requirements.txt, or as a mock in docs/conf.py
- ],
- extras_require={
- # optional dependencies, required by some features
- "all": [
- "shapely",
- "pygments>=2.2",
- "psutil",
- "panopticapi @ https://github.com/cocodataset/panopticapi/archive/master.zip",
- ],
- # dev dependencies. Install them by `pip install 'detectron2[dev]'`
- "dev": [
- "flake8==3.8.1",
- "isort==4.3.21",
- "flake8-bugbear",
- "flake8-comprehensions",
- ],
- },
- ext_modules=get_extensions(),
- cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
-)
diff --git a/spaces/PSLD/PSLD/stable-diffusion/ldm/modules/x_transformer.py b/spaces/PSLD/PSLD/stable-diffusion/ldm/modules/x_transformer.py
deleted file mode 100644
index 5fc15bf9cfe0111a910e7de33d04ffdec3877576..0000000000000000000000000000000000000000
--- a/spaces/PSLD/PSLD/stable-diffusion/ldm/modules/x_transformer.py
+++ /dev/null
@@ -1,641 +0,0 @@
-"""shout-out to https://github.com/lucidrains/x-transformers/tree/main/x_transformers"""
-import torch
-from torch import nn, einsum
-import torch.nn.functional as F
-from functools import partial
-from inspect import isfunction
-from collections import namedtuple
-from einops import rearrange, repeat, reduce
-
-# constants
-
-DEFAULT_DIM_HEAD = 64
-
-Intermediates = namedtuple('Intermediates', [
- 'pre_softmax_attn',
- 'post_softmax_attn'
-])
-
-LayerIntermediates = namedtuple('Intermediates', [
- 'hiddens',
- 'attn_intermediates'
-])
-
-
-class AbsolutePositionalEmbedding(nn.Module):
- def __init__(self, dim, max_seq_len):
- super().__init__()
- self.emb = nn.Embedding(max_seq_len, dim)
- self.init_()
-
- def init_(self):
- nn.init.normal_(self.emb.weight, std=0.02)
-
- def forward(self, x):
- n = torch.arange(x.shape[1], device=x.device)
- return self.emb(n)[None, :, :]
-
-
-class FixedPositionalEmbedding(nn.Module):
- def __init__(self, dim):
- super().__init__()
- inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
- self.register_buffer('inv_freq', inv_freq)
-
- def forward(self, x, seq_dim=1, offset=0):
- t = torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq) + offset
- sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq)
- emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1)
- return emb[None, :, :]
-
-
-# helpers
-
-def exists(val):
- return val is not None
-
-
-def default(val, d):
- if exists(val):
- return val
- return d() if isfunction(d) else d
-
-
-def always(val):
- def inner(*args, **kwargs):
- return val
- return inner
-
-
-def not_equals(val):
- def inner(x):
- return x != val
- return inner
-
-
-def equals(val):
- def inner(x):
- return x == val
- return inner
-
-
-def max_neg_value(tensor):
- return -torch.finfo(tensor.dtype).max
-
-
-# keyword argument helpers
-
-def pick_and_pop(keys, d):
- values = list(map(lambda key: d.pop(key), keys))
- return dict(zip(keys, values))
-
-
-def group_dict_by_key(cond, d):
- return_val = [dict(), dict()]
- for key in d.keys():
- match = bool(cond(key))
- ind = int(not match)
- return_val[ind][key] = d[key]
- return (*return_val,)
-
-
-def string_begins_with(prefix, str):
- return str.startswith(prefix)
-
-
-def group_by_key_prefix(prefix, d):
- return group_dict_by_key(partial(string_begins_with, prefix), d)
-
-
-def groupby_prefix_and_trim(prefix, d):
- kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
- kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
- return kwargs_without_prefix, kwargs
-
-
-# classes
-class Scale(nn.Module):
- def __init__(self, value, fn):
- super().__init__()
- self.value = value
- self.fn = fn
-
- def forward(self, x, **kwargs):
- x, *rest = self.fn(x, **kwargs)
- return (x * self.value, *rest)
-
-
-class Rezero(nn.Module):
- def __init__(self, fn):
- super().__init__()
- self.fn = fn
- self.g = nn.Parameter(torch.zeros(1))
-
- def forward(self, x, **kwargs):
- x, *rest = self.fn(x, **kwargs)
- return (x * self.g, *rest)
-
-
-class ScaleNorm(nn.Module):
- def __init__(self, dim, eps=1e-5):
- super().__init__()
- self.scale = dim ** -0.5
- self.eps = eps
- self.g = nn.Parameter(torch.ones(1))
-
- def forward(self, x):
- norm = torch.norm(x, dim=-1, keepdim=True) * self.scale
- return x / norm.clamp(min=self.eps) * self.g
-
-
-class RMSNorm(nn.Module):
- def __init__(self, dim, eps=1e-8):
- super().__init__()
- self.scale = dim ** -0.5
- self.eps = eps
- self.g = nn.Parameter(torch.ones(dim))
-
- def forward(self, x):
- norm = torch.norm(x, dim=-1, keepdim=True) * self.scale
- return x / norm.clamp(min=self.eps) * self.g
-
-
-class Residual(nn.Module):
- def forward(self, x, residual):
- return x + residual
-
-
-class GRUGating(nn.Module):
- def __init__(self, dim):
- super().__init__()
- self.gru = nn.GRUCell(dim, dim)
-
- def forward(self, x, residual):
- gated_output = self.gru(
- rearrange(x, 'b n d -> (b n) d'),
- rearrange(residual, 'b n d -> (b n) d')
- )
-
- return gated_output.reshape_as(x)
-
-
-# feedforward
-
-class GEGLU(nn.Module):
- def __init__(self, dim_in, dim_out):
- super().__init__()
- self.proj = nn.Linear(dim_in, dim_out * 2)
-
- def forward(self, x):
- x, gate = self.proj(x).chunk(2, dim=-1)
- return x * F.gelu(gate)
-
-
-class FeedForward(nn.Module):
- def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
- super().__init__()
- inner_dim = int(dim * mult)
- dim_out = default(dim_out, dim)
- project_in = nn.Sequential(
- nn.Linear(dim, inner_dim),
- nn.GELU()
- ) if not glu else GEGLU(dim, inner_dim)
-
- self.net = nn.Sequential(
- project_in,
- nn.Dropout(dropout),
- nn.Linear(inner_dim, dim_out)
- )
-
- def forward(self, x):
- return self.net(x)
-
-
-# attention.
-class Attention(nn.Module):
- def __init__(
- self,
- dim,
- dim_head=DEFAULT_DIM_HEAD,
- heads=8,
- causal=False,
- mask=None,
- talking_heads=False,
- sparse_topk=None,
- use_entmax15=False,
- num_mem_kv=0,
- dropout=0.,
- on_attn=False
- ):
- super().__init__()
- if use_entmax15:
- raise NotImplementedError("Check out entmax activation instead of softmax activation!")
- self.scale = dim_head ** -0.5
- self.heads = heads
- self.causal = causal
- self.mask = mask
-
- inner_dim = dim_head * heads
-
- self.to_q = nn.Linear(dim, inner_dim, bias=False)
- self.to_k = nn.Linear(dim, inner_dim, bias=False)
- self.to_v = nn.Linear(dim, inner_dim, bias=False)
- self.dropout = nn.Dropout(dropout)
-
- # talking heads
- self.talking_heads = talking_heads
- if talking_heads:
- self.pre_softmax_proj = nn.Parameter(torch.randn(heads, heads))
- self.post_softmax_proj = nn.Parameter(torch.randn(heads, heads))
-
- # explicit topk sparse attention
- self.sparse_topk = sparse_topk
-
- # entmax
- #self.attn_fn = entmax15 if use_entmax15 else F.softmax
- self.attn_fn = F.softmax
-
- # add memory key / values
- self.num_mem_kv = num_mem_kv
- if num_mem_kv > 0:
- self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
- self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
-
- # attention on attention
- self.attn_on_attn = on_attn
- self.to_out = nn.Sequential(nn.Linear(inner_dim, dim * 2), nn.GLU()) if on_attn else nn.Linear(inner_dim, dim)
-
- def forward(
- self,
- x,
- context=None,
- mask=None,
- context_mask=None,
- rel_pos=None,
- sinusoidal_emb=None,
- prev_attn=None,
- mem=None
- ):
- b, n, _, h, talking_heads, device = *x.shape, self.heads, self.talking_heads, x.device
- kv_input = default(context, x)
-
- q_input = x
- k_input = kv_input
- v_input = kv_input
-
- if exists(mem):
- k_input = torch.cat((mem, k_input), dim=-2)
- v_input = torch.cat((mem, v_input), dim=-2)
-
- if exists(sinusoidal_emb):
- # in shortformer, the query would start at a position offset depending on the past cached memory
- offset = k_input.shape[-2] - q_input.shape[-2]
- q_input = q_input + sinusoidal_emb(q_input, offset=offset)
- k_input = k_input + sinusoidal_emb(k_input)
-
- q = self.to_q(q_input)
- k = self.to_k(k_input)
- v = self.to_v(v_input)
-
- q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))
-
- input_mask = None
- if any(map(exists, (mask, context_mask))):
- q_mask = default(mask, lambda: torch.ones((b, n), device=device).bool())
- k_mask = q_mask if not exists(context) else context_mask
- k_mask = default(k_mask, lambda: torch.ones((b, k.shape[-2]), device=device).bool())
- q_mask = rearrange(q_mask, 'b i -> b () i ()')
- k_mask = rearrange(k_mask, 'b j -> b () () j')
- input_mask = q_mask * k_mask
-
- if self.num_mem_kv > 0:
- mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b=b), (self.mem_k, self.mem_v))
- k = torch.cat((mem_k, k), dim=-2)
- v = torch.cat((mem_v, v), dim=-2)
- if exists(input_mask):
- input_mask = F.pad(input_mask, (self.num_mem_kv, 0), value=True)
-
- dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
- mask_value = max_neg_value(dots)
-
- if exists(prev_attn):
- dots = dots + prev_attn
-
- pre_softmax_attn = dots
-
- if talking_heads:
- dots = einsum('b h i j, h k -> b k i j', dots, self.pre_softmax_proj).contiguous()
-
- if exists(rel_pos):
- dots = rel_pos(dots)
-
- if exists(input_mask):
- dots.masked_fill_(~input_mask, mask_value)
- del input_mask
-
- if self.causal:
- i, j = dots.shape[-2:]
- r = torch.arange(i, device=device)
- mask = rearrange(r, 'i -> () () i ()') < rearrange(r, 'j -> () () () j')
- mask = F.pad(mask, (j - i, 0), value=False)
- dots.masked_fill_(mask, mask_value)
- del mask
-
- if exists(self.sparse_topk) and self.sparse_topk < dots.shape[-1]:
- top, _ = dots.topk(self.sparse_topk, dim=-1)
- vk = top[..., -1].unsqueeze(-1).expand_as(dots)
- mask = dots < vk
- dots.masked_fill_(mask, mask_value)
- del mask
-
- attn = self.attn_fn(dots, dim=-1)
- post_softmax_attn = attn
-
- attn = self.dropout(attn)
-
- if talking_heads:
- attn = einsum('b h i j, h k -> b k i j', attn, self.post_softmax_proj).contiguous()
-
- out = einsum('b h i j, b h j d -> b h i d', attn, v)
- out = rearrange(out, 'b h n d -> b n (h d)')
-
- intermediates = Intermediates(
- pre_softmax_attn=pre_softmax_attn,
- post_softmax_attn=post_softmax_attn
- )
-
- return self.to_out(out), intermediates
-
-
-class AttentionLayers(nn.Module):
- def __init__(
- self,
- dim,
- depth,
- heads=8,
- causal=False,
- cross_attend=False,
- only_cross=False,
- use_scalenorm=False,
- use_rmsnorm=False,
- use_rezero=False,
- rel_pos_num_buckets=32,
- rel_pos_max_distance=128,
- position_infused_attn=False,
- custom_layers=None,
- sandwich_coef=None,
- par_ratio=None,
- residual_attn=False,
- cross_residual_attn=False,
- macaron=False,
- pre_norm=True,
- gate_residual=False,
- **kwargs
- ):
- super().__init__()
- ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs)
- attn_kwargs, _ = groupby_prefix_and_trim('attn_', kwargs)
-
- dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD)
-
- self.dim = dim
- self.depth = depth
- self.layers = nn.ModuleList([])
-
- self.has_pos_emb = position_infused_attn
- self.pia_pos_emb = FixedPositionalEmbedding(dim) if position_infused_attn else None
- self.rotary_pos_emb = always(None)
-
- assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance'
- self.rel_pos = None
-
- self.pre_norm = pre_norm
-
- self.residual_attn = residual_attn
- self.cross_residual_attn = cross_residual_attn
-
- norm_class = ScaleNorm if use_scalenorm else nn.LayerNorm
- norm_class = RMSNorm if use_rmsnorm else norm_class
- norm_fn = partial(norm_class, dim)
-
- norm_fn = nn.Identity if use_rezero else norm_fn
- branch_fn = Rezero if use_rezero else None
-
- if cross_attend and not only_cross:
- default_block = ('a', 'c', 'f')
- elif cross_attend and only_cross:
- default_block = ('c', 'f')
- else:
- default_block = ('a', 'f')
-
- if macaron:
- default_block = ('f',) + default_block
-
- if exists(custom_layers):
- layer_types = custom_layers
- elif exists(par_ratio):
- par_depth = depth * len(default_block)
- assert 1 < par_ratio <= par_depth, 'par ratio out of range'
- default_block = tuple(filter(not_equals('f'), default_block))
- par_attn = par_depth // par_ratio
- depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper
- par_width = (depth_cut + depth_cut // par_attn) // par_attn
- assert len(default_block) <= par_width, 'default block is too large for par_ratio'
- par_block = default_block + ('f',) * (par_width - len(default_block))
- par_head = par_block * par_attn
- layer_types = par_head + ('f',) * (par_depth - len(par_head))
- elif exists(sandwich_coef):
- assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth'
- layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef
- else:
- layer_types = default_block * depth
-
- self.layer_types = layer_types
- self.num_attn_layers = len(list(filter(equals('a'), layer_types)))
-
- for layer_type in self.layer_types:
- if layer_type == 'a':
- layer = Attention(dim, heads=heads, causal=causal, **attn_kwargs)
- elif layer_type == 'c':
- layer = Attention(dim, heads=heads, **attn_kwargs)
- elif layer_type == 'f':
- layer = FeedForward(dim, **ff_kwargs)
- layer = layer if not macaron else Scale(0.5, layer)
- else:
- raise Exception(f'invalid layer type {layer_type}')
-
- if isinstance(layer, Attention) and exists(branch_fn):
- layer = branch_fn(layer)
-
- if gate_residual:
- residual_fn = GRUGating(dim)
- else:
- residual_fn = Residual()
-
- self.layers.append(nn.ModuleList([
- norm_fn(),
- layer,
- residual_fn
- ]))
-
- def forward(
- self,
- x,
- context=None,
- mask=None,
- context_mask=None,
- mems=None,
- return_hiddens=False
- ):
- hiddens = []
- intermediates = []
- prev_attn = None
- prev_cross_attn = None
-
- mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers
-
- for ind, (layer_type, (norm, block, residual_fn)) in enumerate(zip(self.layer_types, self.layers)):
- is_last = ind == (len(self.layers) - 1)
-
- if layer_type == 'a':
- hiddens.append(x)
- layer_mem = mems.pop(0)
-
- residual = x
-
- if self.pre_norm:
- x = norm(x)
-
- if layer_type == 'a':
- out, inter = block(x, mask=mask, sinusoidal_emb=self.pia_pos_emb, rel_pos=self.rel_pos,
- prev_attn=prev_attn, mem=layer_mem)
- elif layer_type == 'c':
- out, inter = block(x, context=context, mask=mask, context_mask=context_mask, prev_attn=prev_cross_attn)
- elif layer_type == 'f':
- out = block(x)
-
- x = residual_fn(out, residual)
-
- if layer_type in ('a', 'c'):
- intermediates.append(inter)
-
- if layer_type == 'a' and self.residual_attn:
- prev_attn = inter.pre_softmax_attn
- elif layer_type == 'c' and self.cross_residual_attn:
- prev_cross_attn = inter.pre_softmax_attn
-
- if not self.pre_norm and not is_last:
- x = norm(x)
-
- if return_hiddens:
- intermediates = LayerIntermediates(
- hiddens=hiddens,
- attn_intermediates=intermediates
- )
-
- return x, intermediates
-
- return x
-
-
-class Encoder(AttentionLayers):
- def __init__(self, **kwargs):
- assert 'causal' not in kwargs, 'cannot set causality on encoder'
- super().__init__(causal=False, **kwargs)
-
-
-
-class TransformerWrapper(nn.Module):
- def __init__(
- self,
- *,
- num_tokens,
- max_seq_len,
- attn_layers,
- emb_dim=None,
- max_mem_len=0.,
- emb_dropout=0.,
- num_memory_tokens=None,
- tie_embedding=False,
- use_pos_emb=True
- ):
- super().__init__()
- assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
-
- dim = attn_layers.dim
- emb_dim = default(emb_dim, dim)
-
- self.max_seq_len = max_seq_len
- self.max_mem_len = max_mem_len
- self.num_tokens = num_tokens
-
- self.token_emb = nn.Embedding(num_tokens, emb_dim)
- self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len) if (
- use_pos_emb and not attn_layers.has_pos_emb) else always(0)
- self.emb_dropout = nn.Dropout(emb_dropout)
-
- self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity()
- self.attn_layers = attn_layers
- self.norm = nn.LayerNorm(dim)
-
- self.init_()
-
- self.to_logits = nn.Linear(dim, num_tokens) if not tie_embedding else lambda t: t @ self.token_emb.weight.t()
-
- # memory tokens (like [cls]) from Memory Transformers paper
- num_memory_tokens = default(num_memory_tokens, 0)
- self.num_memory_tokens = num_memory_tokens
- if num_memory_tokens > 0:
- self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim))
-
- # let funnel encoder know number of memory tokens, if specified
- if hasattr(attn_layers, 'num_memory_tokens'):
- attn_layers.num_memory_tokens = num_memory_tokens
-
- def init_(self):
- nn.init.normal_(self.token_emb.weight, std=0.02)
-
- def forward(
- self,
- x,
- return_embeddings=False,
- mask=None,
- return_mems=False,
- return_attn=False,
- mems=None,
- **kwargs
- ):
- b, n, device, num_mem = *x.shape, x.device, self.num_memory_tokens
- x = self.token_emb(x)
- x += self.pos_emb(x)
- x = self.emb_dropout(x)
-
- x = self.project_emb(x)
-
- if num_mem > 0:
- mem = repeat(self.memory_tokens, 'n d -> b n d', b=b)
- x = torch.cat((mem, x), dim=1)
-
- # auto-handle masking after appending memory tokens
- if exists(mask):
- mask = F.pad(mask, (num_mem, 0), value=True)
-
- x, intermediates = self.attn_layers(x, mask=mask, mems=mems, return_hiddens=True, **kwargs)
- x = self.norm(x)
-
- mem, x = x[:, :num_mem], x[:, num_mem:]
-
- out = self.to_logits(x) if not return_embeddings else x
-
- if return_mems:
- hiddens = intermediates.hiddens
- new_mems = list(map(lambda pair: torch.cat(pair, dim=-2), zip(mems, hiddens))) if exists(mems) else hiddens
- new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems))
- return out, new_mems
-
- if return_attn:
- attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
- return out, attn_maps
-
- return out
-
diff --git a/spaces/PSLD/PSLD/stable-diffusion/scripts/download_first_stages.sh b/spaces/PSLD/PSLD/stable-diffusion/scripts/download_first_stages.sh
deleted file mode 100644
index a8d79e99ccdff0a8d8762f23f3c0642401f32f6c..0000000000000000000000000000000000000000
--- a/spaces/PSLD/PSLD/stable-diffusion/scripts/download_first_stages.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/bash
-wget -O models/first_stage_models/kl-f4/model.zip https://ommer-lab.com/files/latent-diffusion/kl-f4.zip
-wget -O models/first_stage_models/kl-f8/model.zip https://ommer-lab.com/files/latent-diffusion/kl-f8.zip
-wget -O models/first_stage_models/kl-f16/model.zip https://ommer-lab.com/files/latent-diffusion/kl-f16.zip
-wget -O models/first_stage_models/kl-f32/model.zip https://ommer-lab.com/files/latent-diffusion/kl-f32.zip
-wget -O models/first_stage_models/vq-f4/model.zip https://ommer-lab.com/files/latent-diffusion/vq-f4.zip
-wget -O models/first_stage_models/vq-f4-noattn/model.zip https://ommer-lab.com/files/latent-diffusion/vq-f4-noattn.zip
-wget -O models/first_stage_models/vq-f8/model.zip https://ommer-lab.com/files/latent-diffusion/vq-f8.zip
-wget -O models/first_stage_models/vq-f8-n256/model.zip https://ommer-lab.com/files/latent-diffusion/vq-f8-n256.zip
-wget -O models/first_stage_models/vq-f16/model.zip https://ommer-lab.com/files/latent-diffusion/vq-f16.zip
-
-
-
-cd models/first_stage_models/kl-f4
-unzip -o model.zip
-
-cd ../kl-f8
-unzip -o model.zip
-
-cd ../kl-f16
-unzip -o model.zip
-
-cd ../kl-f32
-unzip -o model.zip
-
-cd ../vq-f4
-unzip -o model.zip
-
-cd ../vq-f4-noattn
-unzip -o model.zip
-
-cd ../vq-f8
-unzip -o model.zip
-
-cd ../vq-f8-n256
-unzip -o model.zip
-
-cd ../vq-f16
-unzip -o model.zip
-
-cd ../..
\ No newline at end of file
diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/ice-9/poll.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/ice-9/poll.go
deleted file mode 100644
index 930137f8af294233bbbe8239f58ddb211d1db2b7..0000000000000000000000000000000000000000
Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/ice-9/poll.go and /dev/null differ
diff --git a/spaces/PeepDaSlan9/AutoGPT/tests/unit/test_browse_scrape_links.py b/spaces/PeepDaSlan9/AutoGPT/tests/unit/test_browse_scrape_links.py
deleted file mode 100644
index 0a3340e7397a997da96b8ab9828954230e1a3c20..0000000000000000000000000000000000000000
--- a/spaces/PeepDaSlan9/AutoGPT/tests/unit/test_browse_scrape_links.py
+++ /dev/null
@@ -1,118 +0,0 @@
-# Generated by CodiumAI
-
-# Dependencies:
-# pip install pytest-mock
-import pytest
-
-from autogpt.commands.web_requests import scrape_links
-
-"""
-Code Analysis
-
-Objective:
-The objective of the 'scrape_links' function is to scrape hyperlinks from a
-given URL and return them in a formatted way.
-
-Inputs:
-- url: a string representing the URL to be scraped.
-
-Flow:
-1. Send a GET request to the given URL using the requests library and the user agent header from the config file.
-2. Check if the response contains an HTTP error. If it does, return "error".
-3. Parse the HTML content of the response using the BeautifulSoup library.
-4. Remove any script and style tags from the parsed HTML.
-5. Extract all hyperlinks from the parsed HTML using the 'extract_hyperlinks' function.
-6. Format the extracted hyperlinks using the 'format_hyperlinks' function.
-7. Return the formatted hyperlinks.
-
-Outputs:
-- A list of formatted hyperlinks.
-
-Additional aspects:
-- The function uses the 'requests' and 'BeautifulSoup' libraries to send HTTP
-requests and parse HTML content, respectively.
-- The 'extract_hyperlinks' function is called to extract hyperlinks from the parsed HTML.
-- The 'format_hyperlinks' function is called to format the extracted hyperlinks.
-- The function checks for HTTP errors and returns "error" if any are found.
-"""
-
-
-class TestScrapeLinks:
- # Tests that the function returns a list of formatted hyperlinks when
- # provided with a valid url that returns a webpage with hyperlinks.
- def test_valid_url_with_hyperlinks(self):
- url = "https://www.google.com"
- result = scrape_links(url)
- assert len(result) > 0
- assert isinstance(result, list)
- assert isinstance(result[0], str)
-
- # Tests that the function returns correctly formatted hyperlinks when given a valid url.
- def test_valid_url(self, mocker):
- # Mock the requests.get() function to return a response with sample HTML containing hyperlinks
- mock_response = mocker.Mock()
- mock_response.status_code = 200
- mock_response.text = (
- "Google"
- )
- mocker.patch("requests.Session.get", return_value=mock_response)
-
- # Call the function with a valid URL
- result = scrape_links("https://www.example.com")
-
- # Assert that the function returns correctly formatted hyperlinks
- assert result == ["Google (https://www.google.com)"]
-
- # Tests that the function returns "error" when given an invalid url.
- def test_invalid_url(self, mocker):
- # Mock the requests.get() function to return an HTTP error response
- mock_response = mocker.Mock()
- mock_response.status_code = 404
- mocker.patch("requests.Session.get", return_value=mock_response)
-
- # Call the function with an invalid URL
- result = scrape_links("https://www.invalidurl.com")
-
- # Assert that the function returns "error"
- assert "Error:" in result
-
- # Tests that the function returns an empty list when the html contains no hyperlinks.
- def test_no_hyperlinks(self, mocker):
- # Mock the requests.get() function to return a response with sample HTML containing no hyperlinks
- mock_response = mocker.Mock()
- mock_response.status_code = 200
- mock_response.text = "
No hyperlinks here
"
- mocker.patch("requests.Session.get", return_value=mock_response)
-
- # Call the function with a URL containing no hyperlinks
- result = scrape_links("https://www.example.com")
-
- # Assert that the function returns an empty list
- assert result == []
-
- # Tests that scrape_links() correctly extracts and formats hyperlinks from
- # a sample HTML containing a few hyperlinks.
- def test_scrape_links_with_few_hyperlinks(self, mocker):
- # Mock the requests.get() function to return a response with a sample HTML containing hyperlinks
- mock_response = mocker.Mock()
- mock_response.status_code = 200
- mock_response.text = """
-
-
-
-'''
-
-# 创建Gradio界面
-inp = [gradio.inputs.Textbox(label="请输入你的API-key(sk开头的字符串)",
- default="",
- type='password'),
- gradio.inputs.Textbox(lines=5,
- label="请输入特定的分析要求和格式(否则为默认格式)",
- default="""* Overall Review
-Please briefly summarize the main points and contributions of this paper.
-xxx
-* Paper Strength
-Please provide a list of the strengths of this paper, including but not limited to: innovative and practical methodology, insightful empirical findings or in-depth theoretical analysis,
-well-structured review of relevant literature, and any other factors that may make the paper valuable to readers. (Maximum length: 2,000 characters)
-(1) xxx
-(2) xxx
-(3) xxx
-* Paper Weakness
-Please provide a numbered list of your main concerns regarding this paper (so authors could respond to the concerns individually).
-These may include, but are not limited to: inadequate implementation details for reproducing the study, limited evaluation and ablation studies for the proposed method,
-correctness of the theoretical analysis or experimental results, lack of comparisons or discussions with widely-known baselines in the field, lack of clarity in exposition,
-or any other factors that may impede the reader's understanding or benefit from the paper. Please kindly refrain from providing a general assessment of the paper's novelty without providing detailed explanations. (Maximum length: 2,000 characters)
-(1) xxx
-(2) xxx
-(3) xxx
-* Questions To Authors And Suggestions For Rebuttal
-Please provide a numbered list of specific and clear questions that pertain to the details of the proposed method, evaluation setting, or additional results that would aid in supporting the authors' claims.
-The questions should be formulated in a manner that, after the authors have answered them during the rebuttal, it would enable a more thorough assessment of the paper's quality. (Maximum length: 2,000 characters)
-*Overall score (1-10)
-The paper is scored on a scale of 1-10, with 10 being the full mark, and 6 stands for borderline accept. Then give the reason for your rating.
-xxx"""
- ),
- gradio.inputs.File(label="请上传论文PDF文件(请务必等pdf上传完成后再点击Submit!)",type="bytes"),
- gradio.inputs.Radio(choices=["English", "Chinese", "French", "German","Japenese"],
- default="English",
- label="选择输出语言"),
-]
-
-chat_reviewer_gui = gradio.Interface(fn=main,
- inputs=inp,
- outputs = [gradio.Textbox(lines=25, label="分析结果"), gradio.Textbox(lines=2, label="资源统计")],
- title=title,
- description=description)
-
-# Start server
-chat_reviewer_gui .launch(quiet=True, show_api=False)
\ No newline at end of file
diff --git a/spaces/Shreeradha/GradioChatBotAI/README.md b/spaces/Shreeradha/GradioChatBotAI/README.md
deleted file mode 100644
index 8360f792c493b559d35bada6056338e5e88516eb..0000000000000000000000000000000000000000
--- a/spaces/Shreeradha/GradioChatBotAI/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: GradioChatBotAI
-emoji: 🏆
-colorFrom: red
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.39.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Shularp/marian_translation_test_th_ar_en/README.md b/spaces/Shularp/marian_translation_test_th_ar_en/README.md
deleted file mode 100644
index d5a6a1e631caf3e9e147999c4f756256b40098ac..0000000000000000000000000000000000000000
--- a/spaces/Shularp/marian_translation_test_th_ar_en/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Marian Translation Test Th Ar En
-emoji: 📉
-colorFrom: green
-colorTo: gray
-sdk: gradio
-sdk_version: 3.23.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Sing11104/bingo-11104/README.md b/spaces/Sing11104/bingo-11104/README.md
deleted file mode 100644
index 5d6936218874c647b5d22e13ad4be7edb8936f92..0000000000000000000000000000000000000000
--- a/spaces/Sing11104/bingo-11104/README.md
+++ /dev/null
@@ -1,28 +0,0 @@
----
-title: bingo
-emoji: 😊
-colorFrom: red
-colorTo: red
-sdk: docker
-license: mit
-duplicated_from: hf4all/bingo
----
-
-
-
-# Bingo
-
-Bingo,一个让你呼吸顺畅 New Bing。
-
-高度还原 New Bing 网页版的主要操作,国内可用,兼容绝大多数微软 Bing AI 的功能,可自行部署使用。
-
-
-
-[](https://hub.docker.com/repository/docker/weaigc/bingo/)
-[](https://hub.docker.com/repository/docker/weaigc/bingo/)
-[](https://github.com/weaigc/bingo/blob/main/license)
-
-问题反馈请前往 https://github.com/weaigc/bingo/issues
-
-
-
diff --git a/spaces/StealYourGhost/Joeythemonster-anything-midjourney-v-4-1/README.md b/spaces/StealYourGhost/Joeythemonster-anything-midjourney-v-4-1/README.md
deleted file mode 100644
index 3d299291769219de0ea751e6bc335d3d65ba6703..0000000000000000000000000000000000000000
--- a/spaces/StealYourGhost/Joeythemonster-anything-midjourney-v-4-1/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Joeythemonster Anything Midjourney V 4 1
-emoji: 🚀
-colorFrom: green
-colorTo: red
-sdk: gradio
-sdk_version: 3.19.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/SujanMidatani/resume_details_extractor/app.py b/spaces/SujanMidatani/resume_details_extractor/app.py
deleted file mode 100644
index 771365b2d68daa3cc4314f81e2a433aa83f22fc2..0000000000000000000000000000000000000000
--- a/spaces/SujanMidatani/resume_details_extractor/app.py
+++ /dev/null
@@ -1,127 +0,0 @@
-from PyPDF2 import PdfReader
-import gradio as gr
-from langchain.chat_models import ChatOpenAI
-from kor import create_extraction_chain, Object, Text
-from langchain.prompts.prompt import PromptTemplate
-# from langchain.chains import LLMChain
-# from langchain.llms import OpenAI
-
-from dotenv import load_dotenv
-
-load_dotenv()
-def gen_text(pdf_file):
- with open(pdf_file.name, "rb") as f:
- reader = PdfReader(f)
- num_pages = len(reader.pages)
- text = ""
- for page in reader.pages:
- text += page.extract_text()
- print(text)
- constraints=context_extracter(text)
-
- return constraints
-
-
-
-# def generate_questions(resume,role='',experience=''):
-# _PROMPT_TEMPLATE = """
-# this is the resume of user:
-# {resume_details}
-
-# here is the role he want to join in :
-# {role}
-
-# Based on the following experience:
-# {experience}
-
-# What are your interview questions for the given user resume and role he want to join in with that experience?
-# generate no of questions = {questions}!
-# """
-# PROMPT = PromptTemplate(input_variables=["resume_details", "role", "experience",'questions'], template=_PROMPT_TEMPLATE)
-
-# llm1 = OpenAI(model_name="text-davinci-003", temperature=0)
-# chain = LLMChain(llm=llm1, prompt=PROMPT)
-# prompt = chain.predict_and_parse(resume_details= gen_text(resume),
-# role= role,
-# experience= experience,
-# questions=10)
-# return prompt.split('\n')
-def context_extracter(text):
-
- llm = ChatOpenAI(
- model_name="gpt-3.5-turbo-16k",
- temperature=0,
- max_tokens=2000,
- frequency_penalty=0,
- presence_penalty=0,
- top_p=1.0,
- )
- schema = Object(
- id="interviewer",
- description=(
- "interviewer is examining resume text and should produce set of attributes which represents that person by his resume"
-
- ),
- attributes=[
- Text(
- id="summary_or_objective",
- description="A brief overview of the candidate's professional background, skills, and career goals",
- examples=[],
- many=True,
- ),
- Text(
- id="work_experience",
- description="Details of previous employment positions, including job titles, company names, employment dates, and a description of responsibilities and achievements for each role ",
- examples=[],
- many=True,
- ),
- Text(
- id="education",
- description="Information about the candidate's educational qualifications, including degrees, certificates, and the names of institutions attended",
- examples=[],
- many=True,
- ),
- Text(
- id="skills",
- description="A section highlighting the candidate's relevant skills, such as technical skills, languages spoken, software proficiency, or specific tools used",
- examples=[],
- many=True,
- ),
- Text(
- id="achievements_or_awards",
- description="Any notable achievements, awards, or recognition received by the candidate during their education or career.",
- examples=[],
- many=True,
- ),
- Text(
- id="certifications_or_licenses",
- description="Information about any professional certifications or licenses held by the candidate that are relevant to the desired position",
- examples=[],
- many=True,
- ),
- Text(
- id="projects",
- description="Details of significant projects the candidate has worked on, including a brief description, their role, and any notable outcomes",
- examples=[],
- many=True,
- ),
- Text(
- id="publications_or_presentations",
- description=" If applicable, a list of publications or presentations the candidate has authored or delivered, including the titles, dates, and locations",
- examples=[],
- many=True,
- ),
- ],
- many=True,
- )
- # chain = LLMChain(llm=llm1, prompt=PROMPT)
- chain = create_extraction_chain(llm, schema, encoder_or_encoder_class='json')
- return chain.run(text=text)['data']
-
-k=gr.Interface(
- fn=gen_text,
- inputs=['file'],
- outputs=['json']
-)
-k.launch()
-
\ No newline at end of file
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/consoleapp.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/consoleapp.py
deleted file mode 100644
index c2bbe1888f543cb33c4c88796594bab1da297e62..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/consoleapp.py
+++ /dev/null
@@ -1,12 +0,0 @@
-"""
-Shim to maintain backwards compatibility with old IPython.consoleapp imports.
-"""
-# Copyright (c) IPython Development Team.
-# Distributed under the terms of the Modified BSD License.
-
-from warnings import warn
-
-warn("The `IPython.consoleapp` package has been deprecated since IPython 4.0."
- "You should import from jupyter_client.consoleapp instead.", stacklevel=2)
-
-from jupyter_client.consoleapp import *
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/chromadb/telemetry/posthog.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/chromadb/telemetry/posthog.py
deleted file mode 100644
index a20e20dd25767b2247d7d8aaa67603c212571c52..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/chromadb/telemetry/posthog.py
+++ /dev/null
@@ -1,35 +0,0 @@
-import posthog
-import logging
-import sys
-from chromadb.config import System
-from chromadb.telemetry import Telemetry, TelemetryEvent
-from overrides import override
-
-logger = logging.getLogger(__name__)
-
-
-class Posthog(Telemetry):
- def __init__(self, system: System):
- if not system.settings.anonymized_telemetry or "pytest" in sys.modules:
- posthog.disabled = True
- else:
- logger.info(
- "Anonymized telemetry enabled. See https://docs.trychroma.com/telemetry for more information."
- )
-
- posthog.project_api_key = "phc_YeUxaojbKk5KPi8hNlx1bBKHzuZ4FDtl67kH1blv8Bh"
- posthog_logger = logging.getLogger("posthog")
- # Silence posthog's logging
- posthog_logger.disabled = True
- super().__init__(system)
-
- @override
- def capture(self, event: TelemetryEvent) -> None:
- try:
- posthog.capture(
- self.user_id,
- event.name,
- {**(event.properties), "chroma_context": self.context},
- )
- except Exception as e:
- logger.error(f"Failed to send telemetry event {event.name}: {e}")
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/driver/compression.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/driver/compression.py
deleted file mode 100644
index db69ae3f040d424c9ee4811372b52b0a310a7575..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/driver/compression.py
+++ /dev/null
@@ -1,77 +0,0 @@
-import zlib
-from abc import abstractmethod
-from typing import Union
-
-import lz4
-import lz4.frame
-import zstandard
-
-try:
- import brotli
-except ImportError:
- brotli = None
-
-
-available_compression = ['lz4', 'zstd']
-
-if brotli:
- available_compression.append('br')
-available_compression.extend(['gzip', 'deflate'])
-
-comp_map = {}
-
-
-class Compressor:
- def __init_subclass__(cls, tag: str, thread_safe: bool = True):
- comp_map[tag] = cls() if thread_safe else cls
-
- @abstractmethod
- def compress_block(self, block) -> Union[bytes, bytearray]:
- return block
-
- def flush(self):
- pass
-
-
-class GzipCompressor(Compressor, tag='gzip', thread_safe=False):
- def __init__(self, level: int = 6, wbits: int = 31):
- self.zlib_obj = zlib.compressobj(level=level, wbits=wbits)
-
- def compress_block(self, block):
- return self.zlib_obj.compress(block)
-
- def flush(self):
- return self.zlib_obj.flush()
-
-
-class Lz4Compressor(Compressor, tag='lz4', thread_safe=False):
- def __init__(self):
- self.comp = lz4.frame.LZ4FrameCompressor()
-
- def compress_block(self, block):
- output = self.comp.begin(len(block))
- output += self.comp.compress(block)
- return output + self.comp.flush()
-
-
-class ZstdCompressor(Compressor, tag='zstd'):
- def compress_block(self, block):
- return zstandard.compress(block)
-
-
-class BrotliCompressor(Compressor, tag='br'):
- def compress_block(self, block):
- return brotli.compress(block)
-
-
-null_compressor = Compressor()
-
-
-def get_compressor(compression: str) -> Compressor:
- if not compression:
- return null_compressor
- comp = comp_map[compression]
- try:
- return comp()
- except TypeError:
- return comp
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/computation/tensorflow_backend.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/computation/tensorflow_backend.py
deleted file mode 100644
index fc963cdb48b3617406aec40ba58e3e301e358679..0000000000000000000000000000000000000000
--- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/computation/tensorflow_backend.py
+++ /dev/null
@@ -1,287 +0,0 @@
-import typing
-from typing import TYPE_CHECKING, Callable, List, Optional, Tuple
-
-import numpy as np
-
-from docarray.computation import AbstractComputationalBackend
-from docarray.computation.abstract_numpy_based_backend import AbstractNumpyBasedBackend
-from docarray.typing import TensorFlowTensor
-from docarray.utils._internal.misc import import_library
-
-if TYPE_CHECKING:
- import tensorflow as tf # type: ignore
- import tensorflow._api.v2.experimental.numpy as tnp # type: ignore
-else:
- tf = import_library('tensorflow', raise_error=True)
- tnp = tf._api.v2.experimental.numpy
-
-
-def _unsqueeze_if_single_axis(*matrices: tf.Tensor) -> List[tf.Tensor]:
- """
- Unsqueezes tensors that only have one axis, at dim 0.
- This ensures that all outputs can be treated as matrices, not vectors.
-
- :param matrices: Matrices to be unsqueezed
- :return: List of the input matrices,
- where single axis matrices are unsqueezed at dim 0.
- """
- unsqueezed = []
- for m in matrices:
- if len(m.shape) == 1:
- unsqueezed.append(tf.expand_dims(m, axis=0))
- else:
- unsqueezed.append(m)
- return unsqueezed
-
-
-def _unsqueeze_if_scalar(t: tf.Tensor) -> tf.Tensor:
- """
- Unsqueezes tensor of a scalar, from shape () to shape (1,).
-
- :param t: tensor to unsqueeze.
- :return: unsqueezed tf.Tensor
- """
- if len(t.shape) == 0: # avoid scalar output
- t = tf.expand_dims(t, 0)
- return t
-
-
-def norm_left(t: tf.Tensor) -> TensorFlowTensor:
- return TensorFlowTensor(tensor=t)
-
-
-def norm_right(t: TensorFlowTensor) -> tf.Tensor:
- return t.tensor
-
-
-class TensorFlowCompBackend(AbstractNumpyBasedBackend[TensorFlowTensor]):
- """
- Computational backend for TensorFlow.
- """
-
- _module = tnp
- _cast_output: Callable = norm_left
- _get_tensor: Callable = norm_right
-
- @classmethod
- def to_numpy(cls, array: 'TensorFlowTensor') -> 'np.ndarray':
- return cls._get_tensor(array).numpy()
-
- @classmethod
- def none_value(cls) -> typing.Any:
- """Provide a compatible value that represents None in numpy."""
- return tf.constant(float('nan'))
-
- @classmethod
- def to_device(cls, tensor: 'TensorFlowTensor', device: str) -> 'TensorFlowTensor':
- """Move the tensor to the specified device."""
- if cls.device(tensor) == device:
- return tensor
- else:
- with tf.device(device):
- return cls._cast_output(tf.identity(cls._get_tensor(tensor)))
-
- @classmethod
- def device(cls, tensor: 'TensorFlowTensor') -> Optional[str]:
- """Return device on which the tensor is allocated."""
- return cls._get_tensor(tensor).device
-
- @classmethod
- def detach(cls, tensor: 'TensorFlowTensor') -> 'TensorFlowTensor':
- """
- Returns the tensor detached from its current graph.
-
- :param tensor: tensor to be detached
- :return: a detached tensor with the same data.
- """
- return cls._cast_output(tf.stop_gradient(cls._get_tensor(tensor)))
-
- @classmethod
- def dtype(cls, tensor: 'TensorFlowTensor') -> tf.dtypes:
- """Get the data type of the tensor."""
- d_type = cls._get_tensor(tensor).dtype
- return d_type.name
-
- @classmethod
- def minmax_normalize(
- cls,
- tensor: 'TensorFlowTensor',
- t_range: Tuple = (0.0, 1.0),
- x_range: Optional[Tuple] = None,
- eps: float = 1e-7,
- ) -> 'TensorFlowTensor':
- a, b = t_range
-
- t = tf.cast(cls._get_tensor(tensor), tf.float32)
- min_d = x_range[0] if x_range else tnp.min(t, axis=-1, keepdims=True)
- max_d = x_range[1] if x_range else tnp.max(t, axis=-1, keepdims=True)
-
- i = (b - a) * (t - min_d) / (max_d - min_d + tf.constant(eps) + a)
-
- normalized = tnp.clip(i, *((a, b) if a < b else (b, a)))
- return cls._cast_output(tf.cast(normalized, tensor.tensor.dtype))
-
- class Retrieval(AbstractComputationalBackend.Retrieval[TensorFlowTensor]):
- """
- Abstract class for retrieval and ranking functionalities
- """
-
- @staticmethod
- def top_k(
- values: 'TensorFlowTensor',
- k: int,
- descending: bool = False,
- device: Optional[str] = None,
- ) -> Tuple['TensorFlowTensor', 'TensorFlowTensor']:
- """
- Retrieves the top k smallest values in `values`,
- and returns them alongside their indices in the input `values`.
- Can also be used to retrieve the top k largest values,
- by setting the `descending` flag.
-
- :param values: TensorFlowTensor of values to rank.
- Should be of shape (n_queries, n_values_per_query).
- Inputs of shape (n_values_per_query,) will be expanded
- to (1, n_values_per_query).
- :param k: number of values to retrieve
- :param descending: retrieve largest values instead of smallest values
- :param device: the computational device to use.
- :return: Tuple of TensorFlowTensors containing the retrieved values, and
- their indices. Both are of shape (n_queries, k)
- """
- comp_be = TensorFlowCompBackend
- if device is not None:
- values = comp_be.to_device(values, device)
-
- tf_values: tf.Tensor = comp_be._get_tensor(values)
- if len(tf_values.shape) <= 1:
- tf_values = tf.expand_dims(tf_values, axis=0)
-
- len_tf_values = (
- tf_values.shape[-1] if len(tf_values.shape) > 1 else len(tf_values)
- )
- k = min(k, len_tf_values)
-
- if not descending:
- tf_values = -tf_values
-
- result = tf.math.top_k(input=tf_values, k=k, sorted=True)
- res_values = result.values
- res_indices = result.indices
-
- if not descending:
- res_values = -result.values
-
- return comp_be._cast_output(res_values), comp_be._cast_output(res_indices)
-
- class Metrics(AbstractComputationalBackend.Metrics[TensorFlowTensor]):
- """
- Abstract base class for metrics (distances and similarities).
- """
-
- @staticmethod
- def cosine_sim(
- x_mat: 'TensorFlowTensor',
- y_mat: 'TensorFlowTensor',
- eps: float = 1e-7,
- device: Optional[str] = None,
- ) -> 'TensorFlowTensor':
- """Pairwise cosine similarities between all vectors in x_mat and y_mat.
-
- :param x_mat: tensor of shape (n_vectors, n_dim), where n_vectors is the
- number of vectors and n_dim is the number of dimensions of each example.
- :param y_mat: tensor of shape (n_vectors, n_dim), where n_vectors is the
- number of vectors and n_dim is the number of dimensions of each example.
- :param eps: a small jitter to avoid divde by zero
- :param device: the device to use for computations.
- If not provided, the devices of x_mat and y_mat are used.
- :return: Tensor of shape (n_vectors, n_vectors) containing all pairwise
- cosine distances.
- The index [i_x, i_y] contains the cosine distance between
- x_mat[i_x] and y_mat[i_y].
- """
- comp_be = TensorFlowCompBackend
- x_mat_tf: tf.Tensor = comp_be._get_tensor(x_mat)
- y_mat_tf: tf.Tensor = comp_be._get_tensor(y_mat)
-
- with tf.device(device):
- x_mat_tf = tf.identity(x_mat_tf)
- y_mat_tf = tf.identity(y_mat_tf)
-
- x_mat_tf, y_mat_tf = _unsqueeze_if_single_axis(x_mat_tf, y_mat_tf)
-
- a_n = tf.linalg.normalize(x_mat_tf, axis=1)[1]
- b_n = tf.linalg.normalize(y_mat_tf, axis=1)[1]
- a_norm = x_mat_tf / tf.clip_by_value(
- a_n, clip_value_min=eps, clip_value_max=tf.float32.max
- )
- b_norm = y_mat_tf / tf.clip_by_value(
- b_n, clip_value_min=eps, clip_value_max=tf.float32.max
- )
- sims = tf.squeeze(tf.linalg.matmul(a_norm, tf.transpose(b_norm)))
- sims = _unsqueeze_if_scalar(sims)
-
- return comp_be._cast_output(sims)
-
- @staticmethod
- def euclidean_dist(
- x_mat: 'TensorFlowTensor',
- y_mat: 'TensorFlowTensor',
- device: Optional[str] = None,
- ) -> 'TensorFlowTensor':
- """Pairwise Euclidian distances between all vectors in x_mat and y_mat.
-
- :param x_mat: tensor of shape (n_vectors, n_dim), where n_vectors is the
- number of vectors and n_dim is the number of dimensions of each example.
- :param y_mat: tensor of shape (n_vectors, n_dim), where n_vectors is the
- number of vectors and n_dim is the number of dimensions of each example.
- :param device: the device to use for pytorch computations.
- If not provided, the devices of x_mat and y_mat are used.
- :return: Tensor of shape (n_vectors, n_vectors) containing all pairwise
- euclidian distances.
- The index [i_x, i_y] contains the euclidian distance between
- x_mat[i_x] and y_mat[i_y].
- """
- comp_be = TensorFlowCompBackend
- x_mat_tf: tf.Tensor = comp_be._get_tensor(x_mat)
- y_mat_tf: tf.Tensor = comp_be._get_tensor(y_mat)
-
- with tf.device(device):
- x_mat_tf = tf.identity(x_mat_tf)
- y_mat_tf = tf.identity(y_mat_tf)
-
- x_mat_tf, y_mat_tf = _unsqueeze_if_single_axis(x_mat_tf, y_mat_tf)
-
- dists = tf.squeeze(tf.norm(tf.subtract(x_mat_tf, y_mat_tf), axis=-1))
- dists = _unsqueeze_if_scalar(dists)
-
- return comp_be._cast_output(dists)
-
- @staticmethod
- def sqeuclidean_dist(
- x_mat: 'TensorFlowTensor',
- y_mat: 'TensorFlowTensor',
- device: Optional[str] = None,
- ) -> 'TensorFlowTensor':
- """Pairwise Squared Euclidian distances between all vectors
- in x_mat and y_mat.
-
- :param x_mat: tensor of shape (n_vectors, n_dim), where n_vectors is the
- number of vectors and n_dim is the number of dimensions of each
- example.
- :param y_mat: tensor of shape (n_vectors, n_dim), where n_vectors is the
- number of vectors and n_dim is the number of dimensions of each
- example.
- :param device: the device to use for pytorch computations.
- If not provided, the devices of x_mat and y_mat are used.
- :return: Tensor of shape (n_vectors, n_vectors) containing all pairwise
- euclidian distances.
- The index [i_x, i_y] contains the euclidian distance between
- x_mat[i_x] and y_mat[i_y].
- """
- dists = TensorFlowCompBackend.Metrics.euclidean_dist(x_mat, y_mat)
- squared: tf.Tensor = tf.math.square(
- TensorFlowCompBackend._get_tensor(dists)
- )
-
- return TensorFlowCompBackend._cast_output(squared)
diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/core/evaluation/class_names.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/core/evaluation/class_names.py
deleted file mode 100644
index ffae816cf980ce4b03e491cc0c4298cb823797e6..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/core/evaluation/class_names.py
+++ /dev/null
@@ -1,152 +0,0 @@
-import annotator.uniformer.mmcv as mmcv
-
-
-def cityscapes_classes():
- """Cityscapes class names for external use."""
- return [
- 'road', 'sidewalk', 'building', 'wall', 'fence', 'pole',
- 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky',
- 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
- 'bicycle'
- ]
-
-
-def ade_classes():
- """ADE20K class names for external use."""
- return [
- 'wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ',
- 'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth',
- 'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car',
- 'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug',
- 'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe',
- 'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column',
- 'signboard', 'chest of drawers', 'counter', 'sand', 'sink',
- 'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path',
- 'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door',
- 'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table',
- 'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove',
- 'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar',
- 'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower',
- 'chandelier', 'awning', 'streetlight', 'booth', 'television receiver',
- 'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister',
- 'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van',
- 'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything',
- 'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent',
- 'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank',
- 'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake',
- 'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce',
- 'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen',
- 'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass',
- 'clock', 'flag'
- ]
-
-
-def voc_classes():
- """Pascal VOC class names for external use."""
- return [
- 'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
- 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
- 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
- 'tvmonitor'
- ]
-
-
-def cityscapes_palette():
- """Cityscapes palette for external use."""
- return [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156],
- [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0],
- [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60],
- [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100],
- [0, 0, 230], [119, 11, 32]]
-
-
-def ade_palette():
- """ADE20K palette for external use."""
- return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
- [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
- [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
- [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
- [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
- [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
- [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
- [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
- [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
- [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
- [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
- [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
- [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
- [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
- [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255],
- [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255],
- [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0],
- [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0],
- [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255],
- [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255],
- [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20],
- [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255],
- [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255],
- [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255],
- [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0],
- [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0],
- [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255],
- [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112],
- [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160],
- [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163],
- [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0],
- [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0],
- [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255],
- [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204],
- [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255],
- [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255],
- [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194],
- [102, 255, 0], [92, 0, 255]]
-
-
-def voc_palette():
- """Pascal VOC palette for external use."""
- return [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128],
- [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0],
- [192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128],
- [192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0],
- [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]]
-
-
-dataset_aliases = {
- 'cityscapes': ['cityscapes'],
- 'ade': ['ade', 'ade20k'],
- 'voc': ['voc', 'pascal_voc', 'voc12', 'voc12aug']
-}
-
-
-def get_classes(dataset):
- """Get class names of a dataset."""
- alias2name = {}
- for name, aliases in dataset_aliases.items():
- for alias in aliases:
- alias2name[alias] = name
-
- if mmcv.is_str(dataset):
- if dataset in alias2name:
- labels = eval(alias2name[dataset] + '_classes()')
- else:
- raise ValueError(f'Unrecognized dataset: {dataset}')
- else:
- raise TypeError(f'dataset must a str, but got {type(dataset)}')
- return labels
-
-
-def get_palette(dataset):
- """Get class palette (RGB) of a dataset."""
- alias2name = {}
- for name, aliases in dataset_aliases.items():
- for alias in aliases:
- alias2name[alias] = name
-
- if mmcv.is_str(dataset):
- if dataset in alias2name:
- labels = eval(alias2name[dataset] + '_palette()')
- else:
- raise ValueError(f'Unrecognized dataset: {dataset}')
- else:
- raise TypeError(f'dataset must a str, but got {type(dataset)}')
- return labels
diff --git a/spaces/TH5314/newbing/src/lib/hooks/use-enter-submit.tsx b/spaces/TH5314/newbing/src/lib/hooks/use-enter-submit.tsx
deleted file mode 100644
index d66b2d3253baff164235d4ca791aae6d84721835..0000000000000000000000000000000000000000
--- a/spaces/TH5314/newbing/src/lib/hooks/use-enter-submit.tsx
+++ /dev/null
@@ -1,23 +0,0 @@
-import { useRef, type RefObject } from 'react'
-
-export function useEnterSubmit(): {
- formRef: RefObject
- onKeyDown: (event: React.KeyboardEvent) => void
-} {
- const formRef = useRef(null)
-
- const handleKeyDown = (
- event: React.KeyboardEvent
- ): void => {
- if (
- event.key === 'Enter' &&
- !event.shiftKey &&
- !event.nativeEvent.isComposing
- ) {
- formRef.current?.requestSubmit()
- event.preventDefault()
- }
- }
-
- return { formRef, onKeyDown: handleKeyDown }
-}
diff --git a/spaces/TensoraCO/code-explainer/README.md b/spaces/TensoraCO/code-explainer/README.md
deleted file mode 100644
index ffbbc7167bdd32eb776999693d5e322e88e1a9d9..0000000000000000000000000000000000000000
--- a/spaces/TensoraCO/code-explainer/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Code Explainer
-emoji: 🪞
-colorFrom: red
-colorTo: purple
-sdk: gradio
-sdk_version: 3.0.24
-app_file: app.py
-pinned: false
-license: apache-2.0
-duplicated_from: os1187/code-explainer
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Tetel/secondbing/EdgeGPT/locale.py b/spaces/Tetel/secondbing/EdgeGPT/locale.py
deleted file mode 100644
index c57dc26e143e4331af2d456fefe16c71eca10e2c..0000000000000000000000000000000000000000
--- a/spaces/Tetel/secondbing/EdgeGPT/locale.py
+++ /dev/null
@@ -1,85 +0,0 @@
-from enum import Enum
-
-try:
- from typing import Union, Literal
-except ImportError:
- from typing_extensions import Literal
-from typing import Optional
-
-
-class LocationHint(Enum):
- USA = {
- "locale": "en-US",
- "LocationHint": [
- {
- "country": "United States",
- "state": "California",
- "city": "Los Angeles",
- "timezoneoffset": 8,
- "countryConfidence": 8,
- "Center": {
- "Latitude": 34.0536909,
- "Longitude": -118.242766,
- },
- "RegionType": 2,
- "SourceType": 1,
- },
- ],
- }
- CHINA = {
- "locale": "zh-CN",
- "LocationHint": [
- {
- "country": "China",
- "state": "",
- "city": "Beijing",
- "timezoneoffset": 8,
- "countryConfidence": 8,
- "Center": {
- "Latitude": 39.9042,
- "Longitude": 116.4074,
- },
- "RegionType": 2,
- "SourceType": 1,
- },
- ],
- }
- EU = {
- "locale": "en-IE",
- "LocationHint": [
- {
- "country": "Norway",
- "state": "",
- "city": "Oslo",
- "timezoneoffset": 1,
- "countryConfidence": 8,
- "Center": {
- "Latitude": 59.9139,
- "Longitude": 10.7522,
- },
- "RegionType": 2,
- "SourceType": 1,
- },
- ],
- }
- UK = {
- "locale": "en-GB",
- "LocationHint": [
- {
- "country": "United Kingdom",
- "state": "",
- "city": "London",
- "timezoneoffset": 0,
- "countryConfidence": 8,
- "Center": {
- "Latitude": 51.5074,
- "Longitude": -0.1278,
- },
- "RegionType": 2,
- "SourceType": 1,
- },
- ],
- }
-
-
-LOCATION_HINT_TYPES = Optional[Union[LocationHint, Literal["USA", "CHINA", "EU", "UK"]]]
diff --git a/spaces/TheBritishLibrary/British-Library-books-genre-classifier-v2/app.py b/spaces/TheBritishLibrary/British-Library-books-genre-classifier-v2/app.py
deleted file mode 100644
index 3d795b1c5f63e0495bb91408e361f5599396bbb6..0000000000000000000000000000000000000000
--- a/spaces/TheBritishLibrary/British-Library-books-genre-classifier-v2/app.py
+++ /dev/null
@@ -1,89 +0,0 @@
-import gradio as gr
-from transformers import pipeline
-from transformers import AutoTokenizer, AutoModelForSequenceClassification
-
-
-sample_text = [
- [
- "Poems on various subjects. Whereto is prefixed a short essay on the structure of English verse"
- ],
- [
- "Journal of a Residence in China and the neighbouring countries from 1830 to 1833. With an introductory essay by the Hon. and Rev. Baptist Wriothesley Noel. [With a map.]"
- ],
- ["The Adventures of Oliver Twist. [With plates.]"],
- ["['The Adventures of Sherlock Holmes', 'Single Works']"],
- [
- "['Coal, Iron, and Oil; or, the Practical American miner. A plain and popular work on our mines and mineral resources ... With numerous maps and engravings, etc']"
- ],
- [
- "Summer Travelling in Iceland; being the narrative of two journeys across the island ... With a chapter on Askja by E. Delmar Morgan ... Containing also a literal translation of three sagas. Maps, etc'"
- ],
- [
- "History of the Monument. With a brief account of the Great Fire of London, which it commemorates. By Charles Welch. (With illustrations and a map of Old London.)",
- ],
- ["The history and antiquities of Newbury and its environs [By E. W. Gray.]"],
- ["""A Christmas Carol"""],
-]
-
-description = """
-V2 of a British Library Books genre detection model. The interpretation interface helps show what words the model is using to make its predictions. Words highlighted in red contributed to the model being more confident about a prediction. The intensity of colour corresponds to the importance of that part of the input. The words that decrease the label confidence are highlighted in blue."""
-
-article = """
-
-
British Library Books genre detection demo
-This demo allows you to play with a 'genre' detection model which has been trained to predict, from the title of a book, whether it is 'fiction' or 'non-fiction'.
-
-
-The [model](https://huggingface.co/TheBritishLibrary/bl-books-genre) was trained on training data drawn from [digitised books](https://www.bl.uk/collection-guides/digitised-printed-books) at the British Library. These Books are mainly from the 19th Century.
-The demo also shows you which parts of the input the model is using most to make its prediction. The examples include titles from the BL books collection. You may notice that the model makes mistakes on short titles in particular, this can partly be explained by the title format in the original data. For example the novel *'Vanity Fair'* by William Makepeace Thackeray
-is found in the training data as:
-
-```
-Vanity Fair. A novel without a hero ... With all the original illustrations by the author, etc
-```
-
-You can see that the model gets a bit of help with the genre here 😉. Since the model was trained for a very particular dataset and task it might not work well on titles that don't match this original corpus.
-
-## Background
-
-This model was developed as part of work by the [Living with Machines](https://livingwithmachines.ac.uk/). The process of training the model and working with the data is documented in a [tutorial](github.com/living-with-machines/genre-classification).
-
-## Model description
-
-This model is intended to predict, from the title of a book, whether it is 'fiction' or 'non-fiction'. This model was trained on data created from the [Digitised printed books (18th-19th Century)](https://www.bl.uk/collection-guides/digitised-printed-books) book collection.
-This dataset is dominated by English language books though it includes books in several other languages in much smaller numbers. You can find more information about the model [here](https://huggingface.co/BritishLibraryLabs/bl-books-genre)
-
-## Training data
-
-The model is trained on a particular collection of books digitised by the British Library. As a result, the model may do less well on titles that look different to this data. In particular, the training data, was mostly English, and mostly from the 19th Century. The model is likely to do less well with non-English languages and book titles which fall outside of the 19th Century. Since the data was derived from books catalogued by the British Library it is also possible the model will perform less well for books held by other institutions if, for example, they catalogue book titles in different ways, or have different biases in the types of books they hold. Some of the data was generated using weak supervision. You can learn more about how this was done [here](https://living-with-machines.github.io/genre-classification/04_snorkel.html)
-
-### Credits
-
->This work was partly supported by [Living with Machines](https://livingwithmachines.ac.uk/). This project, funded by the UK Research and Innovation (UKRI) Strategic Priority Fund, is a multidisciplinary collaboration delivered by the Arts and Humanities Research Council (AHRC), with The Alan Turing Institute, the British Library and the Universities of Cambridge, East Anglia, Exeter, and Queen Mary University of London.
-"""
-
-
-tokenizer = AutoTokenizer.from_pretrained("TheBritishLibrary/bl-books-genre")
-
-model = AutoModelForSequenceClassification.from_pretrained(
- "TheBritishLibrary/bl-books-genre"
-)
-classifier = pipeline("text-classification", model=model, tokenizer=tokenizer, top_k=10)
-
-
-def predict(text):
- predictions = classifier(text)
- return {pred["label"]: float(pred["score"]) for pred in predictions}
-
-
-gr.Interface(
- predict,
- inputs=gr.Textbox(label="Book title"),
- outputs="label",
- interpretation="shap",
- num_shap=10.0,
- theme="huggingface",
- examples=sample_text,
- description=description,
- article=article,
-).launch(enable_queue=True)
diff --git a/spaces/WUXIAOMO/stabilityai-stable-diffusion-2-1-test-space/app.py b/spaces/WUXIAOMO/stabilityai-stable-diffusion-2-1-test-space/app.py
deleted file mode 100644
index 0160420876923d89f2ab5fccb9f4d13725e29972..0000000000000000000000000000000000000000
--- a/spaces/WUXIAOMO/stabilityai-stable-diffusion-2-1-test-space/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/stabilityai/stable-diffusion-2-1").launch()
\ No newline at end of file
diff --git a/spaces/Y-T-G/Blur-Anything/tracker/util/range_transform.py b/spaces/Y-T-G/Blur-Anything/tracker/util/range_transform.py
deleted file mode 100644
index d5f678212d8fe0754c9250144ad1985ad32d0643..0000000000000000000000000000000000000000
--- a/spaces/Y-T-G/Blur-Anything/tracker/util/range_transform.py
+++ /dev/null
@@ -1,12 +0,0 @@
-import torchvision.transforms as transforms
-
-im_mean = (124, 116, 104)
-
-im_normalization = transforms.Normalize(
- mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
-)
-
-inv_im_trans = transforms.Normalize(
- mean=[-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.225],
- std=[1 / 0.229, 1 / 0.224, 1 / 0.225],
-)
diff --git a/spaces/YUANAI/DiffspeechResearch/tasks/vocoder/hifigan.py b/spaces/YUANAI/DiffspeechResearch/tasks/vocoder/hifigan.py
deleted file mode 100644
index a07370ab84f2d5ba6b20cc37db9773c1c2879b73..0000000000000000000000000000000000000000
--- a/spaces/YUANAI/DiffspeechResearch/tasks/vocoder/hifigan.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import torch.nn.functional as F
-from torch import nn
-
-from modules.vocoder.hifigan.hifigan import HifiGanGenerator, MultiPeriodDiscriminator, MultiScaleDiscriminator, \
- generator_loss, feature_loss, discriminator_loss
-from modules.vocoder.hifigan.mel_utils import mel_spectrogram
-from modules.vocoder.hifigan.stft_loss import MultiResolutionSTFTLoss
-from tasks.vocoder.vocoder_base import VocoderBaseTask
-from utils.commons.hparams import hparams
-from utils.nn.model_utils import print_arch
-
-
-class HifiGanTask(VocoderBaseTask):
- def build_model(self):
- self.model_gen = HifiGanGenerator(hparams)
- self.model_disc = nn.ModuleDict()
- self.model_disc['mpd'] = MultiPeriodDiscriminator()
- self.model_disc['msd'] = MultiScaleDiscriminator()
- self.stft_loss = MultiResolutionSTFTLoss()
- print_arch(self.model_gen)
- if hparams['load_ckpt'] != '':
- self.load_ckpt(hparams['load_ckpt'], 'model_gen', 'model_gen', force=True, strict=True)
- self.load_ckpt(hparams['load_ckpt'], 'model_disc', 'model_disc', force=True, strict=True)
- return self.model_gen
-
- def _training_step(self, sample, batch_idx, optimizer_idx):
- mel = sample['mels']
- y = sample['wavs']
- f0 = sample['f0']
- loss_output = {}
- if optimizer_idx == 0:
- #######################
- # Generator #
- #######################
- y_ = self.model_gen(mel, f0)
- y_mel = mel_spectrogram(y.squeeze(1), hparams).transpose(1, 2)
- y_hat_mel = mel_spectrogram(y_.squeeze(1), hparams).transpose(1, 2)
- loss_output['mel'] = F.l1_loss(y_hat_mel, y_mel) * hparams['lambda_mel']
- _, y_p_hat_g, fmap_f_r, fmap_f_g = self.model_disc['mpd'](y, y_, mel)
- _, y_s_hat_g, fmap_s_r, fmap_s_g = self.model_disc['msd'](y, y_, mel)
- loss_output['a_p'] = generator_loss(y_p_hat_g) * hparams['lambda_adv']
- loss_output['a_s'] = generator_loss(y_s_hat_g) * hparams['lambda_adv']
- if hparams['use_fm_loss']:
- loss_output['fm_f'] = feature_loss(fmap_f_r, fmap_f_g)
- loss_output['fm_s'] = feature_loss(fmap_s_r, fmap_s_g)
- if hparams['use_ms_stft']:
- loss_output['sc'], loss_output['mag'] = self.stft_loss(y.squeeze(1), y_.squeeze(1))
- self.y_ = y_.detach()
- self.y_mel = y_mel.detach()
- self.y_hat_mel = y_hat_mel.detach()
- else:
- #######################
- # Discriminator #
- #######################
- y_ = self.y_
- # MPD
- y_p_hat_r, y_p_hat_g, _, _ = self.model_disc['mpd'](y, y_.detach(), mel)
- loss_output['r_p'], loss_output['f_p'] = discriminator_loss(y_p_hat_r, y_p_hat_g)
- # MSD
- y_s_hat_r, y_s_hat_g, _, _ = self.model_disc['msd'](y, y_.detach(), mel)
- loss_output['r_s'], loss_output['f_s'] = discriminator_loss(y_s_hat_r, y_s_hat_g)
- total_loss = sum(loss_output.values())
- return total_loss, loss_output
diff --git a/spaces/Yiqin/ChatVID/model/fastchat/data/inspect.py b/spaces/Yiqin/ChatVID/model/fastchat/data/inspect.py
deleted file mode 100644
index 7f63eb0bc3c29efa185306cef247b9cbd6afdfe2..0000000000000000000000000000000000000000
--- a/spaces/Yiqin/ChatVID/model/fastchat/data/inspect.py
+++ /dev/null
@@ -1,23 +0,0 @@
-"""
-Usage:
-python3 -m fastchat.data.inspect --in sharegpt_20230322_clean_lang_split.json
-"""
-import argparse
-import json
-
-import tqdm
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--in-file", type=str, required=True)
- parser.add_argument("--begin", type=int)
- args = parser.parse_args()
-
- content = json.load(open(args.in_file, "r"))
- for sample in tqdm.tqdm(content[args.begin :]):
- print(f"id: {sample['id']}")
- for conv in sample["conversations"]:
- print(conv["from"] + ": ")
- print(conv["value"])
- input()
diff --git a/spaces/Yuliang/ICON/lib/pymaf/models/maf_extractor.py b/spaces/Yuliang/ICON/lib/pymaf/models/maf_extractor.py
deleted file mode 100644
index b5ca2279b5ca470b5abc8b3c477951ffcac323a8..0000000000000000000000000000000000000000
--- a/spaces/Yuliang/ICON/lib/pymaf/models/maf_extractor.py
+++ /dev/null
@@ -1,137 +0,0 @@
-# This script is borrowed and extended from https://github.com/shunsukesaito/PIFu/blob/master/lib/model/SurfaceClassifier.py
-
-from packaging import version
-import torch
-import scipy
-import numpy as np
-import torch.nn as nn
-import torch.nn.functional as F
-
-from lib.common.config import cfg
-from lib.pymaf.utils.geometry import projection
-from lib.pymaf.core.path_config import MESH_DOWNSAMPLEING
-
-import logging
-
-logger = logging.getLogger(__name__)
-
-
-class MAF_Extractor(nn.Module):
- ''' Mesh-aligned Feature Extrator
-
- As discussed in the paper, we extract mesh-aligned features based on 2D projection of the mesh vertices.
- The features extrated from spatial feature maps will go through a MLP for dimension reduction.
- '''
-
- def __init__(self, device=torch.device('cuda')):
- super().__init__()
-
- self.device = device
- self.filters = []
- self.num_views = 1
- filter_channels = cfg.MODEL.PyMAF.MLP_DIM
- self.last_op = nn.ReLU(True)
-
- for l in range(0, len(filter_channels) - 1):
- if 0 != l:
- self.filters.append(
- nn.Conv1d(filter_channels[l] + filter_channels[0],
- filter_channels[l + 1], 1))
- else:
- self.filters.append(
- nn.Conv1d(filter_channels[l], filter_channels[l + 1], 1))
-
- self.add_module("conv%d" % l, self.filters[l])
-
- self.im_feat = None
- self.cam = None
-
- # downsample SMPL mesh and assign part labels
- # from https://github.com/nkolot/GraphCMR/blob/master/data/mesh_downsampling.npz
- smpl_mesh_graph = np.load(MESH_DOWNSAMPLEING,
- allow_pickle=True,
- encoding='latin1')
-
- A = smpl_mesh_graph['A']
- U = smpl_mesh_graph['U']
- D = smpl_mesh_graph['D'] # shape: (2,)
-
- # downsampling
- ptD = []
- for i in range(len(D)):
- d = scipy.sparse.coo_matrix(D[i])
- i = torch.LongTensor(np.array([d.row, d.col]))
- v = torch.FloatTensor(d.data)
- ptD.append(torch.sparse.FloatTensor(i, v, d.shape))
-
- # downsampling mapping from 6890 points to 431 points
- # ptD[0].to_dense() - Size: [1723, 6890]
- # ptD[1].to_dense() - Size: [431. 1723]
- Dmap = torch.matmul(ptD[1].to_dense(),
- ptD[0].to_dense()) # 6890 -> 431
- self.register_buffer('Dmap', Dmap)
-
- def reduce_dim(self, feature):
- '''
- Dimension reduction by multi-layer perceptrons
- :param feature: list of [B, C_s, N] point-wise features before dimension reduction
- :return: [B, C_p x N] concatantion of point-wise features after dimension reduction
- '''
- y = feature
- tmpy = feature
- for i, f in enumerate(self.filters):
- y = self._modules['conv' +
- str(i)](y if i == 0 else torch.cat([y, tmpy], 1))
- if i != len(self.filters) - 1:
- y = F.leaky_relu(y)
- if self.num_views > 1 and i == len(self.filters) // 2:
- y = y.view(-1, self.num_views, y.shape[1],
- y.shape[2]).mean(dim=1)
- tmpy = feature.view(-1, self.num_views, feature.shape[1],
- feature.shape[2]).mean(dim=1)
-
- y = self.last_op(y)
-
- y = y.view(y.shape[0], -1)
- return y
-
- def sampling(self, points, im_feat=None, z_feat=None):
- '''
- Given 2D points, sample the point-wise features for each point,
- the dimension of point-wise features will be reduced from C_s to C_p by MLP.
- Image features should be pre-computed before this call.
- :param points: [B, N, 2] image coordinates of points
- :im_feat: [B, C_s, H_s, W_s] spatial feature maps
- :return: [B, C_p x N] concatantion of point-wise features after dimension reduction
- '''
- if im_feat is None:
- im_feat = self.im_feat
-
- batch_size = im_feat.shape[0]
-
- if version.parse(torch.__version__) >= version.parse('1.3.0'):
- # Default grid_sample behavior has changed to align_corners=False since 1.3.0.
- point_feat = torch.nn.functional.grid_sample(
- im_feat, points.unsqueeze(2), align_corners=True)[..., 0]
- else:
- point_feat = torch.nn.functional.grid_sample(
- im_feat, points.unsqueeze(2))[..., 0]
-
- mesh_align_feat = self.reduce_dim(point_feat)
- return mesh_align_feat
-
- def forward(self, p, s_feat=None, cam=None, **kwargs):
- ''' Returns mesh-aligned features for the 3D mesh points.
-
- Args:
- p (tensor): [B, N_m, 3] mesh vertices
- s_feat (tensor): [B, C_s, H_s, W_s] spatial feature maps
- cam (tensor): [B, 3] camera
- Return:
- mesh_align_feat (tensor): [B, C_p x N_m] mesh-aligned features
- '''
- if cam is None:
- cam = self.cam
- p_proj_2d = projection(p, cam, retain_z=False)
- mesh_align_feat = self.sampling(p_proj_2d, s_feat)
- return mesh_align_feat
diff --git a/spaces/abdvl/datahub_qa_bot/docs/what/relationship.md b/spaces/abdvl/datahub_qa_bot/docs/what/relationship.md
deleted file mode 100644
index 1908bbd6ce75f0a76e65e92c84a8fa375d16b21c..0000000000000000000000000000000000000000
--- a/spaces/abdvl/datahub_qa_bot/docs/what/relationship.md
+++ /dev/null
@@ -1,106 +0,0 @@
-# What is a relationship?
-
-A relationship is a named associate between exactly two [entities](entity.md), a source and a destination.
-
-
-
-From the above graph, a `Group` entity can be linked to a `User` entity via a `HasMember` relationship.
-Note that the name of the relationship reflects the direction, i.e. pointing from `Group` to `User`.
-This is due to the fact that the actual metadata aspect holding this information is associated with `Group`, rather than User.
-Had the direction been reversed, the relationship would have been named `IsMemberOf` instead.
-See [Direction of Relationships](#direction-of-relationships) for more discussions on relationship directionality.
-A specific instance of a relationship, e.g. `urn:li:corpGroup:group1` has a member `urn:li:corpuser:user1`,
-corresponds to an edge in the metadata graph.
-
-Similar to an entity, a relationship can also be associated with optional attributes that are derived from the metadata.
-For example, from the `Membership` metadata aspect shown below, we’re able to derive the `HasMember` relationship that links a specific `Group` to a specific `User`. We can also include additional attribute to the relationship, e.g. importance, which corresponds to the position of the specific member in the original membership array. This allows complex graph query that travel only relationships that match certain criteria, e.g. "returns only the top-5 most important members of this group."
-Similar to the entity attributes, relationship attributes should only be added based on the expected query patterns to reduce the indexing cost.
-
-```
-namespace: com.linkedin.group
-
-import com.linkedin.common.AuditStamp
-import com.linkedin.common.CorpuserUrn
-
-/**
- * The membership metadata for a group
- */
-record Membership {
-
- /** Audit stamp for the last change */
- modified: AuditStamp
-
- /** Admin of the group */
- admin: CorpuserUrn
-
- /** Members of the group, ordered in descending importance */
- members: array[CorpuserUrn]
-}
-```
-
-Relationships are meant to be "entity-neutral". In other words, one would expect to use the same `OwnedBy` relationship to link a `Dataset` to a `User` and to link a `Dashboard` to a `User`. As Pegasus doesn’t allow typing a field using multiple URNs (because they’re all essentially strings), we resort to using generic URN type for the source and destination.
-We also introduce a `@pairings` [annotation](https://linkedin.github.io/rest.li/pdl_migration#shorthand-for-custom-properties) to limit the allowed source and destination URN types.
-
-While it’s possible to model relationships in rest.li as [association resources](https://linkedin.github.io/rest.li/modeling/modeling#association), which often get stored as mapping tables, it is far more common to model them as "foreign keys" field in a metadata aspect. For instance, the `Ownership` aspect is likely to contain an array of owner’s corpuser URNs.
-
-Below is an example of how a relationship is modeled in PDL. Note that:
-1. As the `source` and `destination` are of generic URN type, we’re able to factor them out to a common `BaseRelationship` model.
-2. Each model is expected to have a `@pairings` annotation that is an array of all allowed source-destination URN pairs.
-3. Unlike entity attributes, there’s no requirement on making all relationship attributes optional since relationships do not support partial updates.
-
-```
-namespace com.linkedin.metadata.relationship
-
-import com.linkedin.common.Urn
-
-/**
- * Common fields that apply to all relationships
- */
-record BaseRelationship {
-
- /**
- * Urn for the source of the relationship
- */
- source: Urn
-
- /**
- * Urn for the destination of the relationship
- */
- destination: Urn
-}
-```
-
-```
-namespace com.linkedin.metadata.relationship
-
-/**
- * Data model for a has-member relationship
- */
-@pairings = [ {
- "destination" : "com.linkedin.common.urn.CorpGroupUrn",
- "source" : "com.linkedin.common.urn.CorpUserUrn"
-} ]
-record HasMembership includes BaseRelationship
-{
- /**
- * The importance of the membership
- */
- importance: int
-}
-```
-
-## Direction of Relationships
-
-As relationships are modeled as directed edges between nodes, it’s natural to ask which way should it be pointing,
-or should there be edges going both ways? The answer is, "doesn’t really matter." It’s rather an aesthetic choice than technical one.
-
-For one, the actual direction doesn’t really impact the execution of graph queries. Most graph DBs are fully capable of traversing edges in reverse direction efficiently.
-
-That being said, generally there’s a more "natural way" to specify the direction of a relationship, which closely relate to how the metadata is stored. For example, the membership information for an LDAP group is generally stored as a list in group’s metadata. As a result, it’s more natural to model a `HasMember` relationship that points from a group to a member, instead of a `IsMemberOf` relationship pointing from member to group.
-
-Since all relationships are explicitly declared, it’s fairly easy for a user to discover what relationships are available and their directionality by inspecting
-the [relationships directory](../../metadata-models/src/main/pegasus/com/linkedin/metadata/relationship). It’s also possible to provide a UI for the catalog of entities and relationships for analysts who are interested in building complex graph queries to gain insights into the metadata.
-
-## High Cardinality Relationships
-
-See [this doc](../advanced/high-cardinality.md) for suggestions on how to best model relationships with high cardinality.
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/visualization/color.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/visualization/color.py
deleted file mode 100644
index 9041e0e6b7581c3356795d6a3c5e84667c88f025..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/visualization/color.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from enum import Enum
-
-import numpy as np
-
-from annotator.uniformer.mmcv.utils import is_str
-
-
-class Color(Enum):
- """An enum that defines common colors.
-
- Contains red, green, blue, cyan, yellow, magenta, white and black.
- """
- red = (0, 0, 255)
- green = (0, 255, 0)
- blue = (255, 0, 0)
- cyan = (255, 255, 0)
- yellow = (0, 255, 255)
- magenta = (255, 0, 255)
- white = (255, 255, 255)
- black = (0, 0, 0)
-
-
-def color_val(color):
- """Convert various input to color tuples.
-
- Args:
- color (:obj:`Color`/str/tuple/int/ndarray): Color inputs
-
- Returns:
- tuple[int]: A tuple of 3 integers indicating BGR channels.
- """
- if is_str(color):
- return Color[color].value
- elif isinstance(color, Color):
- return color.value
- elif isinstance(color, tuple):
- assert len(color) == 3
- for channel in color:
- assert 0 <= channel <= 255
- return color
- elif isinstance(color, int):
- assert 0 <= color <= 255
- return color, color, color
- elif isinstance(color, np.ndarray):
- assert color.ndim == 1 and color.size == 3
- assert np.all((color >= 0) & (color <= 255))
- color = color.astype(np.uint8)
- return tuple(color)
- else:
- raise TypeError(f'Invalid type for color: {type(color)}')
diff --git a/spaces/abidlabs/docquery/app.py b/spaces/abidlabs/docquery/app.py
deleted file mode 100644
index 4289ceac77757d87913e92789cba4ca85035b1af..0000000000000000000000000000000000000000
--- a/spaces/abidlabs/docquery/app.py
+++ /dev/null
@@ -1,16 +0,0 @@
-import gradio as gr
-from transformers import pipeline
-
-nlp = pipeline(
- "document-question-answering",
- model="impira/layoutlm-document-qa",
-)
-
-def predict(img, text):
- prediction = nlp(
- img,
- text
- )
- return prediction[0]['answer']
-
-gr.Interface(predict, [gr.Image(type="filepath"), gr.Textbox(label="Question")], gr.Textbox()).launch()
\ No newline at end of file
diff --git a/spaces/adityapathakk/crop-health/README.md b/spaces/adityapathakk/crop-health/README.md
deleted file mode 100644
index e61d25795ed80ffd9616d60dcc4a9040fcea1096..0000000000000000000000000000000000000000
--- a/spaces/adityapathakk/crop-health/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Crop Health
-emoji: 🏢
-colorFrom: purple
-colorTo: pink
-sdk: gradio
-sdk_version: 2.8.12
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
diff --git a/spaces/akhaliq/Real-ESRGAN/realesrgan/__init__.py b/spaces/akhaliq/Real-ESRGAN/realesrgan/__init__.py
deleted file mode 100644
index bfea78f284116dee22510d4aa91f9e44afb7d472..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/Real-ESRGAN/realesrgan/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-# flake8: noqa
-from .archs import *
-from .data import *
-from .models import *
-from .utils import *
-#from .version import *
diff --git a/spaces/akhaliq/SummerTime/model/third_party/HMNet/Models/Networks/MeetingNet_Transformer.py b/spaces/akhaliq/SummerTime/model/third_party/HMNet/Models/Networks/MeetingNet_Transformer.py
deleted file mode 100644
index f4e3e33c18b65e84a7b360aa1c5267051a586916..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/SummerTime/model/third_party/HMNet/Models/Networks/MeetingNet_Transformer.py
+++ /dev/null
@@ -1,1528 +0,0 @@
-# Copyright (c) Microsoft Corporation.
-# Licensed under the MIT license.
-
-import copy
-import math
-import numpy as np
-import random
-import time
-import torch
-from torch.autograd import Variable
-from torch.distributions import Categorical
-import torch.nn as nn
-import torch.nn.init as init
-import torch.nn.functional as F
-from torch.nn.parameter import Parameter
-from model.third_party.HMNet.Models.Networks.Layers import dropout, set_seq_dropout
-from model.third_party.HMNet.Models.Networks.Transformer import (
- EncoderBlock,
- LayerNorm,
- Embedder,
- Splitter,
- Attention,
- MLP,
-)
-from model.third_party.HMNet.ThirdParty.Huggingface.Transformers.src.transformers import (
- tokenization_transfo_xl,
-)
-from model.third_party.HMNet.ThirdParty.Huggingface.Transformers.src.transformers.modeling_encoder_decoder import (
- calc_banned_ngram_tokens,
- calc_banned_bad_words_ids,
- top_k_top_p_filtering,
- BeamHypotheses,
-)
-import sys
-import os
-
-# These two dicts are adapted from SpaCy 2.3.1, since HMNet's embedding for POS and ENT is fixed
-POS = {
- "": 0,
- "$": 1,
- "''": 2,
- ",": 3,
- "-LRB-": 4,
- "-RRB-": 5,
- ".": 6,
- ":": 7,
- "ADD": 8,
- "AFX": 9,
- "CC": 10,
- "CD": 11,
- "DT": 12,
- "EX": 13,
- "FW": 14,
- "HYPH": 15,
- "IN": 16,
- "JJ": 17,
- "JJR": 18,
- "JJS": 19,
- "LS": 20,
- "MD": 21,
- "NFP": 22,
- "NN": 23,
- "NNP": 24,
- "NNPS": 25,
- "NNS": 26,
- "PDT": 27,
- "POS": 28,
- "PRP": 29,
- "PRP$": 30,
- "RB": 31,
- "RBR": 32,
- "RBS": 33,
- "RP": 34,
- "SYM": 35,
- "TO": 36,
- "UH": 37,
- "VB": 38,
- "VBD": 39,
- "VBG": 40,
- "VBN": 41,
- "VBP": 42,
- "VBZ": 43,
- "WDT": 44,
- "WP": 45,
- "WP$": 46,
- "WRB": 47,
- "XX": 48,
- "_SP": 49,
- "``": 50,
-}
-ENT = {
- "": 0,
- "B-ORG": 1,
- "B-DATE": 2,
- "B-PERSON": 3,
- "B-GPE": 4,
- "B-MONEY": 5,
- "B-CARDINAL": 6,
- "B-NORP": 7,
- "B-PERCENT": 8,
- "B-WORK_OF_ART": 9,
- "B-LOC": 10,
- "B-TIME": 11,
- "B-QUANTITY": 12,
- "B-FAC": 13,
- "B-EVENT": 14,
- "B-ORDINAL": 15,
- "B-PRODUCT": 16,
- "B-LAW": 17,
- "B-LANGUAGE": 18,
- "I-ORG": 19,
- "I-DATE": 20,
- "I-PERSON": 21,
- "I-GPE": 22,
- "I-MONEY": 23,
- "I-CARDINAL": 24,
- "I-NORP": 25,
- "I-PERCENT": 26,
- "I-WORK_OF_ART": 27,
- "I-LOC": 28,
- "I-TIME": 29,
- "I-QUANTITY": 30,
- "I-FAC": 31,
- "I-EVENT": 32,
- "I-ORDINAL": 33,
- "I-PRODUCT": 34,
- "I-LAW": 35,
- "I-LANGUAGE": 36,
- "L-ORG": 37,
- "L-DATE": 38,
- "L-PERSON": 39,
- "L-GPE": 40,
- "L-MONEY": 41,
- "L-CARDINAL": 42,
- "L-NORP": 43,
- "L-PERCENT": 44,
- "L-WORK_OF_ART": 45,
- "L-LOC": 46,
- "L-TIME": 47,
- "L-QUANTITY": 48,
- "L-FAC": 49,
- "L-EVENT": 50,
- "L-ORDINAL": 51,
- "L-PRODUCT": 52,
- "L-LAW": 53,
- "L-LANGUAGE": 54,
- "U-ORG": 55,
- "U-DATE": 56,
- "U-PERSON": 57,
- "U-GPE": 58,
- "U-MONEY": 59,
- "U-CARDINAL": 60,
- "U-NORP": 61,
- "U-PERCENT": 62,
- "U-WORK_OF_ART": 63,
- "U-LOC": 64,
- "U-TIME": 65,
- "U-QUANTITY": 66,
- "U-FAC": 67,
- "U-EVENT": 68,
- "U-ORDINAL": 69,
- "U-PRODUCT": 70,
- "U-LAW": 71,
- "U-LANGUAGE": 72,
- "O": 73,
-}
-
-
-class MeetingNet_Transformer(nn.Module):
- def __init__(self, opt):
- super(MeetingNet_Transformer, self).__init__()
-
- self.opt = opt
- self.use_cuda = self.opt["cuda"] == True
- self.config = {}
-
- # load tokenizer
- self.tokenizer_class = getattr(tokenization_transfo_xl, opt["PRE_TOKENIZER"])
- self.pretrained_tokenizer_path = os.path.join(
- opt["datadir"], opt["PRE_TOKENIZER_PATH"]
- )
- if not os.path.isdir(self.pretrained_tokenizer_path):
- """
- This if-else statement makes sure the pre-trained tokenizer exists
- If it does not exist, it assumes the input string is the HuggingFace tokenizer name,
- and downloads it from their website.
- """
- self.pretrained_tokenizer_path = opt["PRE_TOKENIZER_PATH"]
- else:
- print("Loading Tokenizer from {}...".format(self.pretrained_tokenizer_path))
-
- # here is a simple workaround to make sure all special tokens are not None
- self.tokenizer = self.tokenizer_class.from_pretrained(
- self.pretrained_tokenizer_path
- )
- special_tokens_tuple_list = [
- ("eos_token", 128),
- ("unk_token", 129),
- ("pad_token", 130),
- ("bos_token", 131),
- ]
-
- for special_token_name, special_token_id_offset in special_tokens_tuple_list:
- if getattr(self.tokenizer, special_token_name) == None:
- setattr(
- self.tokenizer,
- special_token_name,
- self.tokenizer.convert_ids_to_tokens(
- len(self.tokenizer) - special_token_id_offset
- ),
- )
- self.config[special_token_name] = self.tokenizer.convert_ids_to_tokens(
- len(self.tokenizer) - special_token_id_offset
- )
- self.config[special_token_name + "_id"] = (
- len(self.tokenizer) - special_token_id_offset
- )
-
- self.vocab_size = self.tokenizer.vocab_size
- opt["vocab_size"] = self.vocab_size
- self.role_size = int(opt["ROLE_SIZE"])
- vocab_dim = int(opt["VOCAB_DIM"])
- role_dim = int(opt["ROLE_DIM"])
- opt["transformer_embed_dim"] = vocab_dim
- embed = nn.Embedding(
- self.vocab_size, vocab_dim, padding_idx=self.tokenizer.pad_token_id
- )
- nn.init.normal_(embed.weight, std=0.02)
- embedder = Embedder(opt, embed)
- role_embed = nn.Embedding(self.role_size, role_dim, padding_idx=0)
-
- self.encoder = Encoder(
- opt, self.vocab_size, vocab_dim, role_dim, embedder, role_embed
- )
- self.decoder = Decoder(
- opt,
- vocab_dim,
- self.vocab_size,
- embedder,
- self.encoder.token_transformer_dim,
- self.encoder.sent_transformer_dim,
- )
-
- if "PYLEARN_MODEL" in self.opt:
- self.from_pretrained(os.path.join(opt["datadir"], opt["PYLEARN_MODEL"]))
-
- def save_pretrained(self, save_dir):
- network_state = dict([(k, v) for k, v in self.state_dict().items()])
- params = {
- "state_dict": {"network": network_state},
- "config": self.opt,
- }
- torch.save(params, os.path.join(save_dir, "model.pt"))
-
- def from_pretrained(self, load_dir):
- checkpoint = torch.load(
- os.path.join(load_dir, "model.pt"),
- map_location=torch.device("cuda", self.opt["local_rank"])
- if self.use_cuda
- else "cpu",
- )
- state_dict = checkpoint["state_dict"]
-
- self.load_state_dict(state_dict["network"])
-
- return self
-
- def get_training_parameters(self):
- return [p for p in self.parameters() if p.requires_grad]
-
- def forward(self, batch, beam_search=False, max_sent_len=None):
- if beam_search:
- # return self.beam_search(batch, max_sent_len)
- return self.generate(batch, max_sent_len)
-
- outputs = self._forward(**batch)
- vocab_logprob = outputs[0]
-
- # assume all encoder-decoder model input has BOS and EOS
- # otherwise the loss will be ill-defined
- return vocab_logprob
-
- """
- Input:
- encoders_input_ids = 1 * num_turns * x_len (word_ids)
- encoders_input_roles = 1 * num_turns (role_ids)
- encoders_input_pos = 1 * num_turns * x_len (pos_ids)
- encoders_input_ent = 1 * num_turns * x_len (ent_ids)
- decoder_input_ids = 1 * y_len (word_ids)
- Output:
- vocab_logprob = 1 x y_len x vocab_size
- """
-
- def _forward(self, **kwargs):
-
- encoder_input_ids = kwargs.pop("encoder_input_ids")
- encoder_input_roles = kwargs.pop("encoder_input_roles")
- encoder_input_pos = kwargs.pop("encoder_input_pos")
- encoder_input_ent = kwargs.pop("encoder_input_ent")
- decoder_input_ids = kwargs.pop("decoder_input_ids")
-
- token_encoder_outputs, sent_encoder_outputs = self.encoder(
- encoder_input_ids, encoder_input_roles, encoder_input_pos, encoder_input_ent
- )
- vocab_logprob = self.decoder(
- token_encoder_outputs, sent_encoder_outputs, decoder_input_ids
- )
- return vocab_logprob, (token_encoder_outputs, sent_encoder_outputs)
-
- def generate(self, batch, max_sent_len):
- self.eval()
- self.beam_width = int(self.opt["BEAM_WIDTH"])
-
- input_ids = batch["encoder_input_ids"]
- input_roles = batch["encoder_input_roles"]
- input_pos = batch["encoder_input_pos"]
- input_ent = batch["encoder_input_ent"]
-
- batch_size = input_ids.shape[0]
-
- num_return_sequences = self.opt.get("NUM_RETURN_SEQUENCES", 1)
- outputs = self._generate(
- input_ids=input_ids,
- input_roles=input_roles,
- input_pos=input_pos,
- input_ent=input_ent,
- min_length=self.opt.get("MIN_GEN_LENGTH", None),
- max_length=max_sent_len,
- num_beams=self.beam_width,
- bad_words_ids=None,
- bos_token_id=self.tokenizer.bos_token_id,
- decoder_start_token_id=self.tokenizer.bos_token_id,
- eos_token_id=self.tokenizer.eos_token_id,
- pad_token_id=self.tokenizer.pad_token_id,
- do_sample=self.opt.get("DO_SAMPLE", False),
- top_k=self.opt.get("TOP_K", 50),
- top_p=self.opt.get("TOP_P", 1),
- repetition_penalty=self.opt.get("REPETITION_PENALTY", 1.0),
- length_penalty=self.opt.get("LENGTH_PENALTY", 1.0),
- no_repeat_ngram_size=self.opt.get("NO_REPEAT_NGRAM_SIZE", 3),
- num_return_sequences=num_return_sequences,
- )
-
- sents = []
- outputs = outputs.view(outputs.shape[0], num_return_sequences, -1)
-
- for idx in range(batch_size):
- # TODO: use real inference scores
- candidates = [
- (self.tokenizer.convert_ids_to_tokens(outputs[idx, i, :]), 0.0)
- for i in range(num_return_sequences)
- ]
- sents.append(candidates)
-
- return sents
-
- def prepare_inputs_for_generation(self, input_ids, past, attention_mask, **kwargs):
- assert past is not None, "past has to be defined for encoder_outputs"
-
- # first step
- if type(past) is tuple:
- encoder_outputs = past
- else:
- encoder_outputs = (past,)
-
- return {
- "decoder_input_ids": input_ids,
- "token_encoder_outputs": encoder_outputs[0],
- "sent_encoder_outputs": encoder_outputs[1],
- }
-
- def prepare_scores_for_generation(self, scores, **kwargs):
- return scores
-
- def enforce_repetition_penalty_(
- self, lprobs, batch_size, num_beams, prev_output_tokens, repetition_penalty
- ):
- """repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858)."""
- for i in range(batch_size * num_beams):
- for previous_token in set(prev_output_tokens[i].tolist()):
- # if score < 0 then repetition penalty has to multiplied to reduce the previous token probability
- if lprobs[i, previous_token] < 0:
- lprobs[i, previous_token] *= repetition_penalty
- else:
- lprobs[i, previous_token] /= repetition_penalty
-
- @torch.no_grad()
- def _generate(
- self,
- input_ids=None,
- input_roles=None,
- input_pos=None,
- input_ent=None,
- max_length=None,
- min_length=None,
- do_sample=None,
- early_stopping=False,
- num_beams=None,
- temperature=1.0,
- top_k=None,
- top_p=None,
- repetition_penalty=None,
- bad_words_ids=None,
- bos_token_id=None,
- pad_token_id=None,
- eos_token_id=None,
- length_penalty=None,
- no_repeat_ngram_size=None,
- num_return_sequences=None,
- attention_mask=None,
- decoder_start_token_id=None,
- ):
- r"""Generates sequences for models with a LM head. The method currently supports greedy decoding, beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling.
-
- Adapted in part from `Facebook's XLM beam search code`_.
-
- .. _`Facebook's XLM beam search code`:
- https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529
-
-
- Parameters:
-
- input_ids: (`optional`) `torch.LongTensor` of shape `(batch_size, sequence_length)`
- The sequence used as a prompt for the generation. If `None` the method initializes
- it as an empty `torch.LongTensor` of shape `(1,)`.
-
- max_length: (`optional`) int
- The max length of the sequence to be generated. Between `min_length` and infinity. Default to 20.
-
- min_length: (`optional`) int
- The min length of the sequence to be generated. Between 0 and infinity. Default to 0.
-
- do_sample: (`optional`) bool
- If set to `False` greedy decoding is used. Otherwise sampling is used. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.
-
- early_stopping: (`optional`) bool
- if set to `True` beam search is stopped when at least `num_beams` sentences finished per batch. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.
-
- num_beams: (`optional`) int
- Number of beams for beam search. Must be between 1 and infinity. 1 means no beam search. Default to 1.
-
- temperature: (`optional`) float
- The value used to module the next token probabilities. Must be strictly positive. Default to 1.0.
-
- top_k: (`optional`) int
- The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50.
-
- top_p: (`optional`) float
- The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1.
-
- repetition_penalty: (`optional`) float
- The parameter for repetition penalty. Between 1.0 and infinity. 1.0 means no penalty. Default to 1.0.
-
- pad_token_id: (`optional`) int
- Padding token. Default to specicic model pad_token_id or None if it does not exist.
-
- bos_token_id: (`optional`) int
- BOS token. Defaults to `bos_token_id` as defined in the models config.
-
- eos_token_id: (`optional`) int
- EOS token. Defaults to `eos_token_id` as defined in the models config.
-
- length_penalty: (`optional`) float
- Exponential penalty to the length. Default to 1.
-
- no_repeat_ngram_size: (`optional`) int
- If set to int > 0, all ngrams of size `no_repeat_ngram_size` can only occur once.
- bad_words_ids: (`optional`) list of lists of int
- `bad_words_ids` contains tokens that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use `tokenizer.encode(bad_word, add_prefix_space=True)`.
-
- num_return_sequences: (`optional`) int
- The number of independently computed returned sequences for each element in the batch. Default to 1.
-
- attention_mask (`optional`) obj: `torch.LongTensor` of same shape as `input_ids`
- Mask to avoid performing attention on padding token indices.
- Mask values selected in ``[0, 1]``:
- ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
- Defaults to `None`.
-
- `What are attention masks? <../glossary.html#attention-mask>`__
-
- decoder_start_token_id=None: (`optional`) int
- If an encoder-decoder model starts decoding with a different token than BOS.
- Defaults to `None` and is changed to `BOS` later.
-
- Return:
-
- output: `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`
- sequence_length is either equal to max_length or shorter if all batches finished early due to the `eos_token_id`
-
- Examples::
-
- tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
- model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
- outputs = model.generate(max_length=40) # do greedy decoding
- print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
-
- tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer
- model = AutoModelWithLMHead.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache.
- input_context = 'The dog'
- input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
- outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog'
- for i in range(3): # 3 output sequences were generated
- print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
-
- tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
- model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
- input_context = 'The dog'
- input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
- outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3) # 3 generate sequences using by sampling
- for i in range(3): # 3 output sequences were generated
- print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
-
- tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer
- model = AutoModelWithLMHead.from_pretrained('ctrl') # Download model and configuration from S3 and cache.
- input_context = 'Legal My neighbor is' # "Legal" is one of the control codes for ctrl
- input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
- outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences
- print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
-
- tokenizer = AutoTokenizer.from_pretrained('gpt2') # Initialize tokenizer
- model = AutoModelWithLMHead.from_pretrained('gpt2') # Download model and configuration from S3 and cache.
- input_context = 'My cute dog' # "Legal" is one of the control codes for ctrl
- bad_words_ids = [tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ['idiot', 'stupid', 'shut up']]
- input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
- outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) # generate sequences without allowing bad_words to be generated
- """
-
- max_length = max_length if max_length is not None else self.config.max_length
- min_length = min_length if min_length is not None else self.config.min_length
- do_sample = do_sample if do_sample is not None else self.config.do_sample
- early_stopping = (
- early_stopping if early_stopping is not None else self.config.early_stopping
- )
- num_beams = num_beams if num_beams is not None else self.config.num_beams
- temperature = (
- temperature if temperature is not None else self.config.temperature
- )
- top_k = top_k if top_k is not None else self.config.top_k
- top_p = top_p if top_p is not None else self.config.top_p
- repetition_penalty = (
- repetition_penalty
- if repetition_penalty is not None
- else self.config.repetition_penalty
- )
- bos_token_id = (
- bos_token_id if bos_token_id is not None else self.config.bos_token_id
- )
- pad_token_id = (
- pad_token_id if pad_token_id is not None else self.config.pad_token_id
- )
- eos_token_id = (
- eos_token_id if eos_token_id is not None else self.config.eos_token_id
- )
- length_penalty = (
- length_penalty if length_penalty is not None else self.config.length_penalty
- )
- no_repeat_ngram_size = (
- no_repeat_ngram_size
- if no_repeat_ngram_size is not None
- else self.config.no_repeat_ngram_size
- )
- bad_words_ids = bad_words_ids
- num_return_sequences = (
- num_return_sequences
- if num_return_sequences is not None
- else self.config.num_return_sequences
- )
- decoder_start_token_id = (
- decoder_start_token_id
- if decoder_start_token_id is not None
- else self.config.decoder_start_token_id
- )
-
- if input_ids is not None:
- batch_size = input_ids.shape[0] # overriden by the input batch_size
- else:
- batch_size = 1
-
- assert (
- isinstance(max_length, int) and max_length > 0
- ), "`max_length` should be a strictly positive integer."
- assert (
- isinstance(min_length, int) and min_length >= 0
- ), "`min_length` should be a positive integer."
- assert isinstance(do_sample, bool), "`do_sample` should be a boolean."
- assert isinstance(early_stopping, bool), "`early_stopping` should be a boolean."
- assert (
- isinstance(num_beams, int) and num_beams > 0
- ), "`num_beams` should be a strictly positive integer."
- assert temperature > 0, "`temperature` should be strictly positive."
- assert (
- isinstance(top_k, int) and top_k >= 0
- ), "`top_k` should be a positive integer."
- assert 0 <= top_p <= 1, "`top_p` should be between 0 and 1."
- assert repetition_penalty >= 1.0, "`repetition_penalty` should be >= 1."
- assert input_ids is not None or (
- isinstance(bos_token_id, int) and bos_token_id >= 0
- ), "If input_ids is not defined, `bos_token_id` should be a positive integer."
- assert pad_token_id is None or (
- isinstance(pad_token_id, int) and (pad_token_id >= 0)
- ), "`pad_token_id` should be a positive integer."
- assert (eos_token_id is None) or (
- isinstance(eos_token_id, int) and (eos_token_id >= 0)
- ), "`eos_token_id` should be a positive integer."
- assert length_penalty > 0, "`length_penalty` should be strictly positive."
- assert (
- isinstance(no_repeat_ngram_size, int) and no_repeat_ngram_size >= 0
- ), "`no_repeat_ngram_size` should be a positive integer."
- assert (
- isinstance(num_return_sequences, int) and num_return_sequences > 0
- ), "`num_return_sequences` should be a strictly positive integer."
- assert (
- bad_words_ids is None
- or isinstance(bad_words_ids, list)
- and isinstance(bad_words_ids[0], list)
- ), "`bad_words_ids` is either `None` or a list of lists of tokens that should not be generated"
-
- if input_ids is None:
- assert isinstance(bos_token_id, int) and bos_token_id >= 0, (
- "you should either supply a context to complete as `input_ids` input "
- "or a `bos_token_id` (integer >= 0) as a first token to start the generation."
- )
- input_ids = torch.full(
- (batch_size, 1),
- bos_token_id,
- dtype=torch.long,
- device=next(self.parameters()).device,
- )
- else:
- assert (
- input_ids.dim() == 3
- ), "Input prompt should be of shape (batch_size, sequence length)."
-
- # not allow to duplicate outputs when greedy decoding
- if do_sample is False:
- if num_beams == 1:
- # no_beam_search greedy generation conditions
- assert (
- num_return_sequences == 1
- ), "Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1"
-
- else:
- # beam_search greedy generation conditions
- assert (
- num_beams >= num_return_sequences
- ), "Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences"
-
- # create attention mask if necessary
- # TODO (PVP): this should later be handled by the forward fn() in each model in the future see PR 3140
- if (
- (attention_mask is None)
- and (pad_token_id is not None)
- and (pad_token_id in input_ids)
- ):
- attention_mask = input_ids.ne(pad_token_id).long()
- elif attention_mask is None:
- attention_mask = input_ids.new_ones(input_ids.shape)
-
- # set pad_token_id to eos_token_id if not set. Important that this is done after
- # attention_mask is created
- if pad_token_id is None and eos_token_id is not None:
- logger.warning(
- "Setting `pad_token_id` to {} (first `eos_token_id`) to generate sequence".format(
- eos_token_id
- )
- )
- pad_token_id = eos_token_id
-
- # current position and vocab size
- vocab_size = self.vocab_size
-
- # set effective batch size and effective batch multiplier according to do_sample
- if do_sample:
- effective_batch_size = batch_size * num_return_sequences
- effective_batch_mult = num_return_sequences
- else:
- effective_batch_size = batch_size
- effective_batch_mult = 1
-
- if decoder_start_token_id is None:
- decoder_start_token_id = bos_token_id
-
- assert (
- decoder_start_token_id is not None
- ), "decoder_start_token_id or bos_token_id has to be defined for encoder-decoder generation"
-
- encoder_outputs = self.encoder(input_ids, input_roles, input_pos, input_ent)
-
- # # Expand input ids if num_beams > 1 or num_return_sequences > 1
- # if num_return_sequences > 1 or num_beams > 1:
- # input_sent_len = input_ids.shape[2]
- # input_word_len = input_ids.shape[3]
- # input_ids = input_ids.unsqueeze(1).expand(batch_size, effective_batch_mult * num_beams, input_sent_len, input_word_len)
- # attention_mask = attention_mask.unsqueeze(1).expand(
- # batch_size, effective_batch_mult * num_beams, input_sent_len, input_word_len
- # )
-
- # input_ids = input_ids.contiguous().view(
- # effective_batch_size * num_beams, input_sent_len, input_word_len
- # ) # shape: (batch_size * num_return_sequences * num_beams, input_sent_len, input_word_len)
- # attention_mask = attention_mask.contiguous().view(
- # effective_batch_size * num_beams, input_sent_len, input_word_len
- # ) # shape: (batch_size * num_return_sequences * num_beams, input_sent_len, input_word_len)
-
- # create empty decoder_input_ids
- input_ids = torch.full(
- (effective_batch_size * num_beams, 1),
- decoder_start_token_id,
- dtype=torch.long,
- device=next(self.parameters()).device,
- )
- cur_len = 1
-
- assert (
- batch_size == encoder_outputs[0].shape[0]
- ), f"expected encoder_outputs[0] to have 1st dimension bs={batch_size}, got {encoder_outputs[0].shape[0]} "
-
- # expand batch_idx to assign correct encoder output for expanded input_ids (due to num_beams > 1 and num_return_sequences > 1)
- expanded_batch_idxs = (
- torch.arange(batch_size)
- .view(-1, 1)
- .repeat(1, num_beams * effective_batch_mult)
- .view(-1)
- .to(input_ids.device)
- )
- # expand encoder_outputs
- encoder_outputs = (
- encoder_outputs[0].index_select(0, expanded_batch_idxs),
- encoder_outputs[1].index_select(0, expanded_batch_idxs),
- )
-
- if num_beams > 1:
- output = self._generate_beam_search(
- input_ids,
- cur_len=cur_len,
- max_length=max_length,
- min_length=min_length,
- do_sample=do_sample,
- early_stopping=early_stopping,
- temperature=temperature,
- top_k=top_k,
- top_p=top_p,
- repetition_penalty=repetition_penalty,
- no_repeat_ngram_size=no_repeat_ngram_size,
- bad_words_ids=bad_words_ids,
- bos_token_id=bos_token_id,
- pad_token_id=pad_token_id,
- decoder_start_token_id=decoder_start_token_id,
- eos_token_id=eos_token_id,
- batch_size=effective_batch_size,
- num_return_sequences=num_return_sequences,
- length_penalty=length_penalty,
- num_beams=num_beams,
- vocab_size=vocab_size,
- encoder_outputs=encoder_outputs,
- attention_mask=attention_mask,
- )
- else:
- output = self._generate_no_beam_search(
- input_ids,
- cur_len=cur_len,
- max_length=max_length,
- min_length=min_length,
- do_sample=do_sample,
- temperature=temperature,
- top_k=top_k,
- top_p=top_p,
- repetition_penalty=repetition_penalty,
- no_repeat_ngram_size=no_repeat_ngram_size,
- bad_words_ids=bad_words_ids,
- bos_token_id=bos_token_id,
- pad_token_id=pad_token_id,
- decoder_start_token_id=decoder_start_token_id,
- eos_token_id=eos_token_id,
- batch_size=effective_batch_size,
- encoder_outputs=encoder_outputs,
- attention_mask=attention_mask,
- )
-
- return output
-
- def _generate_no_beam_search(
- self,
- input_ids,
- cur_len,
- max_length,
- min_length,
- do_sample,
- temperature,
- top_k,
- top_p,
- repetition_penalty,
- no_repeat_ngram_size,
- bad_words_ids,
- bos_token_id,
- pad_token_id,
- eos_token_id,
- decoder_start_token_id,
- batch_size,
- encoder_outputs,
- attention_mask,
- ):
- """Generate sequences for each example without beam search (num_beams == 1).
- All returned sequence are generated independantly.
- """
- # length of generated sentences / unfinished sentences
- unfinished_sents = input_ids.new(batch_size).fill_(1)
- sent_lengths = input_ids.new(batch_size).fill_(max_length)
-
- past = encoder_outputs # defined for encoder-decoder models, None for decoder-only models
-
- while cur_len < max_length:
- model_inputs = self.prepare_inputs_for_generation(
- input_ids, past=past, attention_mask=attention_mask
- )
-
- outputs = self.decoder(**model_inputs)
- next_token_logits = outputs[:, -1, :]
-
- # repetition penalty from CTRL paper (https://arxiv.org/abs/1909.05858)
- if repetition_penalty != 1.0:
- self.enforce_repetition_penalty_(
- next_token_logits, batch_size, 1, input_ids, repetition_penalty
- )
-
- if no_repeat_ngram_size > 0:
- # calculate a list of banned tokens to prevent repetitively generating the same ngrams
- # from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345
- banned_tokens = calc_banned_ngram_tokens(
- input_ids, batch_size, no_repeat_ngram_size, cur_len
- )
- for batch_idx in range(batch_size):
- next_token_logits[batch_idx, banned_tokens[batch_idx]] = -float(
- "inf"
- )
-
- if bad_words_ids is not None:
- # calculate a list of banned tokens according to bad words
- banned_tokens = calc_banned_bad_words_ids(input_ids, bad_words_ids)
-
- for batch_idx in range(batch_size):
- next_token_logits[batch_idx, banned_tokens[batch_idx]] = -float(
- "inf"
- )
-
- # set eos token prob to zero if min_length is not reached
- if eos_token_id is not None and cur_len < min_length:
- next_token_logits[:, eos_token_id] = -float("inf")
-
- if do_sample:
- # Temperature (higher temperature => more likely to sample low probability tokens)
- if temperature != 1.0:
- next_token_logits = next_token_logits / temperature
- # Top-p/top-k filtering
- next_token_logits = top_k_top_p_filtering(
- next_token_logits, top_k=top_k, top_p=top_p
- )
- # Sample
- probs = F.softmax(next_token_logits, dim=-1)
- next_token = torch.multinomial(probs, num_samples=1).squeeze(1)
- else:
- # Greedy decoding
- next_token = torch.argmax(next_token_logits, dim=-1)
-
- # update generations and finished sentences
- if eos_token_id is not None:
- # pad finished sentences if eos_token_id exist
- tokens_to_add = next_token * unfinished_sents + (pad_token_id) * (
- 1 - unfinished_sents
- )
- else:
- tokens_to_add = next_token
-
- input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1)
-
- if eos_token_id is not None:
- eos_in_sents = tokens_to_add == eos_token_id
- # if sentence is unfinished and the token to add is eos, sent_lengths is filled with current length
- is_sents_unfinished_and_token_to_add_is_eos = unfinished_sents.mul(
- eos_in_sents.long()
- ).bool()
- sent_lengths.masked_fill_(
- is_sents_unfinished_and_token_to_add_is_eos, cur_len + 1
- )
- # unfinished_sents is set to zero if eos in sentence
- unfinished_sents.mul_((~eos_in_sents).long())
-
- # stop when there is a in each sentence, or if we exceed the maximul length
- if unfinished_sents.max() == 0:
- break
-
- cur_len = cur_len + 1
-
- # if there are different sentences lengths in the batch, some batches have to be padded
- if sent_lengths.min().item() != sent_lengths.max().item():
- assert (
- pad_token_id is not None
- ), "`Pad_token_id` has to be defined if batches have different lengths"
- # finished sents are filled with pad_token
- decoded = input_ids.new(batch_size, sent_lengths.max().item()).fill_(
- pad_token_id
- )
- else:
- decoded = input_ids
-
- for hypo_idx, hypo in enumerate(input_ids):
- decoded[hypo_idx, : sent_lengths[hypo_idx]] = hypo[: sent_lengths[hypo_idx]]
-
- return decoded
-
- def _generate_beam_search(
- self,
- input_ids,
- cur_len,
- max_length,
- min_length,
- do_sample,
- early_stopping,
- temperature,
- top_k,
- top_p,
- repetition_penalty,
- no_repeat_ngram_size,
- bad_words_ids,
- bos_token_id,
- pad_token_id,
- eos_token_id,
- decoder_start_token_id,
- batch_size,
- num_return_sequences,
- length_penalty,
- num_beams,
- vocab_size,
- encoder_outputs,
- attention_mask,
- ):
- """Generate sequences for each example with beam search."""
-
- # generated hypotheses
- generated_hyps = [
- BeamHypotheses(
- num_beams, max_length, length_penalty, early_stopping=early_stopping
- )
- for _ in range(batch_size)
- ]
-
- # scores for each sentence in the beam
- beam_scores = torch.zeros(
- (batch_size, num_beams), dtype=torch.float, device=input_ids.device
- )
-
- # for greedy decoding it is made sure that only tokens of the first beam are considered to avoid sampling the exact same tokens three times
- if do_sample is False:
- beam_scores[:, 1:] = -1e9
- beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,)
-
- # cache compute states
- past = encoder_outputs # defined for encoder-decoder models, None for decoder-only models
-
- # done sentences
- done = [False for _ in range(batch_size)]
-
- while cur_len < max_length:
- model_inputs = self.prepare_inputs_for_generation(
- input_ids, past=past, attention_mask=attention_mask
- )
- outputs = self.decoder(
- **model_inputs
- ) # (batch_size * num_beams, cur_len, vocab_size)
- next_token_logits = outputs[
- :, -1, :
- ] # (batch_size * num_beams, vocab_size)
-
- # repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858)
- if repetition_penalty != 1.0:
- self.enforce_repetition_penalty_(
- next_token_logits,
- batch_size,
- num_beams,
- input_ids,
- repetition_penalty,
- )
-
- if temperature != 1.0:
- next_token_logits = next_token_logits / temperature
-
- scores = F.log_softmax(
- next_token_logits, dim=-1
- ) # (batch_size * num_beams, vocab_size)
- if do_sample is False:
- # TODO (PVP) still a bit hacky here - there might be a better solution
- scores = self.prepare_scores_for_generation(
- scores, cur_len=cur_len, max_length=max_length
- )
-
- # set eos token prob to zero if min_length is not reached
- if eos_token_id is not None and cur_len < min_length:
- scores[:, eos_token_id] = -float("inf")
-
- if no_repeat_ngram_size > 0:
- # calculate a list of banned tokens to prevent repetitively generating the same ngrams
- num_batch_hypotheses = batch_size * num_beams
- # from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345
- banned_batch_tokens = calc_banned_ngram_tokens(
- input_ids, num_batch_hypotheses, no_repeat_ngram_size, cur_len
- )
- for i, banned_tokens in enumerate(banned_batch_tokens):
- scores[i, banned_tokens] = -float("inf")
-
- if bad_words_ids is not None:
- # calculate a list of banned tokens according to bad words
- banned_tokens = calc_banned_bad_words_ids(input_ids, bad_words_ids)
-
- for i, banned_tokens in enumerate(banned_tokens):
- scores[i, banned_tokens] = -float("inf")
-
- assert scores.shape == (
- batch_size * num_beams,
- vocab_size,
- ), "Shapes of scores: {} != {}".format(
- scores.shape, (batch_size * num_beams, vocab_size)
- )
-
- if do_sample:
- _scores = scores + beam_scores[:, None].expand_as(
- scores
- ) # (batch_size * num_beams, vocab_size)
- # Top-p/top-k filtering
- _scores = top_k_top_p_filtering(
- _scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2
- ) # (batch_size * num_beams, vocab_size)
- # re-organize to group the beam together to sample from all beam_idxs
- _scores = _scores.contiguous().view(
- batch_size, num_beams * vocab_size
- ) # (batch_size, num_beams * vocab_size)
-
- # Sample 2 next tokens for each beam (so we have some spare tokens and match output of greedy beam search)
- probs = F.softmax(_scores, dim=-1)
- next_tokens = torch.multinomial(
- probs, num_samples=2 * num_beams
- ) # (batch_size, num_beams * 2)
- # Compute next scores
- next_scores = torch.gather(
- _scores, -1, next_tokens
- ) # (batch_size, num_beams * 2)
- # sort the sampled vector to make sure that the first num_beams samples are the best
- next_scores, next_scores_indices = torch.sort(
- next_scores, descending=True, dim=1
- )
- next_tokens = torch.gather(
- next_tokens, -1, next_scores_indices
- ) # (batch_size, num_beams * 2)
-
- else:
- next_scores = scores + beam_scores[:, None].expand_as(
- scores
- ) # (batch_size * num_beams, vocab_size)
-
- # re-organize to group the beam together (we are keeping top hypothesis accross beams)
- next_scores = next_scores.view(
- batch_size, num_beams * vocab_size
- ) # (batch_size, num_beams * vocab_size)
-
- next_scores, next_tokens = torch.topk(
- next_scores, 2 * num_beams, dim=1, largest=True, sorted=True
- )
-
- assert (
- next_scores.size() == next_tokens.size() == (batch_size, 2 * num_beams)
- )
-
- # next batch beam content
- next_batch_beam = []
-
- # for each sentence
- for batch_idx in range(batch_size):
-
- # if we are done with this sentence
- if done[batch_idx]:
- assert (
- len(generated_hyps[batch_idx]) >= num_beams
- ), "Batch can only be done if at least {} beams have been generated".format(
- num_beams
- )
- assert (
- eos_token_id is not None and pad_token_id is not None
- ), "generated beams >= num_beams -> eos_token_id and pad_token have to be defined"
- next_batch_beam.extend(
- [(0, pad_token_id, 0)] * num_beams
- ) # pad the batch
- continue
-
- # next sentence beam content
- next_sent_beam = []
-
- # next tokens for this sentence
- for beam_token_rank, (beam_token_id, beam_token_score) in enumerate(
- zip(next_tokens[batch_idx], next_scores[batch_idx])
- ):
- # get beam and token IDs
- beam_id = beam_token_id // vocab_size
- token_id = beam_token_id % vocab_size
-
- effective_beam_id = batch_idx * num_beams + beam_id
- # add to generated hypotheses if end of sentence or last iteration
- if (eos_token_id is not None) and (token_id.item() == eos_token_id):
- # if beam_token does not belong to top num_beams tokens, it should not be added
- is_beam_token_worse_than_top_num_beams = (
- beam_token_rank >= num_beams
- )
- if is_beam_token_worse_than_top_num_beams:
- continue
- generated_hyps[batch_idx].add(
- input_ids[effective_beam_id].clone(),
- beam_token_score.item(),
- )
- else:
- # add next predicted token if it is not eos_token
- next_sent_beam.append(
- (beam_token_score, token_id, effective_beam_id)
- )
-
- # the beam for next step is full
- if len(next_sent_beam) == num_beams:
- break
-
- # Check if were done so that we can save a pad step if all(done)
- done[batch_idx] = done[batch_idx] or generated_hyps[batch_idx].is_done(
- next_scores[batch_idx].max().item(), cur_len=cur_len
- )
-
- # update next beam content
- assert len(next_sent_beam) == num_beams, "Beam should always be full"
- next_batch_beam.extend(next_sent_beam)
- assert len(next_batch_beam) == num_beams * (batch_idx + 1)
-
- # stop when we are done with each sentence
- if all(done):
- break
-
- # sanity check / prepare next batch
- assert len(next_batch_beam) == batch_size * num_beams
- beam_scores = beam_scores.new([x[0] for x in next_batch_beam])
- beam_tokens = input_ids.new([x[1] for x in next_batch_beam])
- beam_idx = input_ids.new([x[2] for x in next_batch_beam])
-
- # re-order batch
- input_ids = input_ids[beam_idx, :]
- input_ids = torch.cat([input_ids, beam_tokens.unsqueeze(1)], dim=-1)
- # re-order internal states
- if past is not None:
- past = self._reorder_cache(past, beam_idx)
-
- # update current length
- cur_len = cur_len + 1
-
- # finalize all open beam hypotheses and end to generated hypotheses
- for batch_idx in range(batch_size):
- if done[batch_idx]:
- continue
-
- # test that beam scores match previously calculated scores if not eos and batch_idx not done
- if eos_token_id is not None and all(
- (token_id % vocab_size).item() is not eos_token_id
- for token_id in next_tokens[batch_idx]
- ):
- assert torch.all(
- next_scores[batch_idx, :num_beams]
- == beam_scores.view(batch_size, num_beams)[batch_idx]
- ), "If batch_idx is not done, final next scores: {} have to equal to accumulated beam_scores: {}".format(
- next_scores[:, :num_beams][batch_idx],
- beam_scores.view(batch_size, num_beams)[batch_idx],
- )
-
- # need to add best num_beams hypotheses to generated hyps
- for beam_id in range(num_beams):
- effective_beam_id = batch_idx * num_beams + beam_id
- final_score = beam_scores[effective_beam_id].item()
- final_tokens = input_ids[effective_beam_id]
- generated_hyps[batch_idx].add(final_tokens, final_score)
-
- # depending on whether greedy generation is wanted or not define different output_batch_size and output_num_return_sequences_per_batch
- output_batch_size = (
- batch_size if do_sample else batch_size * num_return_sequences
- )
- output_num_return_sequences_per_batch = 1 if do_sample else num_return_sequences
-
- # select the best hypotheses
- sent_lengths = input_ids.new(output_batch_size)
- best = []
-
- # retrieve best hypotheses
- for i, hypotheses in enumerate(generated_hyps):
- sorted_hyps = sorted(hypotheses.beams, key=lambda x: x[0])
- for j in range(output_num_return_sequences_per_batch):
- effective_batch_idx = output_num_return_sequences_per_batch * i + j
- best_hyp = sorted_hyps.pop()[1]
- sent_lengths[effective_batch_idx] = len(best_hyp)
- best.append(best_hyp)
-
- # shorter batches are filled with pad_token
- if sent_lengths.min().item() != sent_lengths.max().item():
- assert pad_token_id is not None, "`Pad_token_id` has to be defined"
- sent_max_len = min(sent_lengths.max().item() + 1, max_length)
- decoded = input_ids.new(output_batch_size, sent_max_len).fill_(pad_token_id)
-
- # fill with hypothesis and eos_token_id if necessary
- for i, hypo in enumerate(best):
- decoded[i, : sent_lengths[i]] = hypo
- if sent_lengths[i] < max_length:
- decoded[i, sent_lengths[i]] = eos_token_id
- else:
- # none of the hypotheses have an eos_token
- assert (len(hypo) == max_length for hypo in best)
- decoded = (
- torch.stack(best).type(torch.long).to(next(self.parameters()).device)
- )
-
- return decoded
-
- # force one of token_ids to be generated by setting prob of all other tokens to 0.
- def _force_token_ids_generation(self, scores, token_ids):
- if isinstance(token_ids, int):
- token_ids = [token_ids]
- all_but_token_ids_mask = torch.tensor(
- [x for x in range(self.vocab_size) if x not in token_ids],
- dtype=torch.long,
- device=next(self.parameters()).device,
- )
- assert (
- len(scores.shape) == 2
- ), "scores should be of rank 2 with shape: [batch_size, vocab_size]"
- scores[:, all_but_token_ids_mask] = -float("inf")
-
- @staticmethod
- def _reorder_cache(past, beam_idx):
- reordered_past = []
- for layer_past in past:
- # get the correct batch idx from layer past batch dim
- # batch dim of `past` and `mems` is at 2nd position
- reordered_layer_past = [
- layer_past[i, :].unsqueeze(0).clone().detach() for i in beam_idx
- ]
- reordered_layer_past = torch.cat(reordered_layer_past, dim=0)
- # check that shape matches
- assert reordered_layer_past.shape == layer_past.shape
- reordered_past.append(reordered_layer_past)
- past = tuple(reordered_past)
- return past
-
-
-"""
- Transformer encoder
-"""
-
-
-class MeetingTransformerEncoder(nn.Module):
- """
- Input:
- transformer_embed_dim: transformer dimension
- """
-
- def __init__(self, opt, transformer_embed_dim):
- super(MeetingTransformerEncoder, self).__init__()
- vocab = int(opt["vocab_size"])
- n_layer = int(opt["TRANSFORMER_LAYER"])
- opt["transformer_embed_dim"] = transformer_embed_dim
- block = EncoderBlock(opt)
- self.blocks = nn.ModuleList([copy.deepcopy(block) for _ in range(n_layer)])
-
- """
- Input:
- x: batch x len x n_state
- Output:
- h: batch x len x n_state
- """
-
- def forward(self, x):
- h = x
- for block in self.blocks:
- h = block(h, None)
- return h
-
-
-"""
- One encoder block of transformer
-"""
-
-
-class MeetingDecoderBlock(nn.Module):
- def __init__(self, opt, n_state):
- super(MeetingDecoderBlock, self).__init__()
- self.opt = opt
- self.decoder_splitter = Splitter(n_state)
- self.attn = Attention(n_state, opt)
- self.token_attn = Attention(n_state, opt)
- self.sent_attn = Attention(n_state, opt)
- self.ln_1 = LayerNorm(n_state)
- self.ln_2 = LayerNorm(n_state)
- opt["transformer_embed_dim"] = n_state
- self.mlp = MLP(4 * n_state, opt)
- self.ln_3 = LayerNorm(n_state)
- self.ln_4 = LayerNorm(n_state)
-
- """
- Input:
- y: batch x len x n_state (decoder part)
- token_enc_key: batch x encoder_len x n_state
- token_enc_value: batch x encoder_len x n_state
- sent_enc_key: batch x encoder_len x n_state
- sent_enc_value: batch x encoder_len x n_state
- Output:
- h: batch x len x n_state
- """
-
- def forward(self, y, token_enc_key, token_enc_value, sent_enc_key, sent_enc_value):
- query, key, value = self.decoder_splitter(y)
- # batch x len x n_state
-
- # self-attention
- a = self.attn(query, key, value, None, one_dir_visible=True)
- # batch x len x n_state
-
- n = self.ln_1(y + a) # residual
-
- if "NO_HIERARCHY" in self.opt:
- q = y
- r = n
- else:
- # src-tgt attention on sentences
- q = self.sent_attn(n, sent_enc_key, sent_enc_value, None)
- r = self.ln_3(n + q) # residual
- # batch x len x n_state
-
- # src-tgt attention on tokens
- o = self.token_attn(r, token_enc_key, token_enc_value, None)
- p = self.ln_2(r + o) # residual
- # batch x len x n_state
-
- m = self.mlp(p)
- h = self.ln_4(p + m)
- return h
-
-
-"""
- Transformer decoder
-"""
-
-
-class MeetingTransformerDecoder(nn.Module):
- """
- Input:
- embed_size: decoder transformer dimension
- token_dim: dimension of transformer from token encoder side
- sent_dim: dimension of transformer from sent encoder side
- """
-
- def __init__(self, opt, embedder, embed_size, token_dim, sent_dim):
- super(MeetingTransformerDecoder, self).__init__()
- self.fp16 = "FP16" in opt
- vocab_size = int(opt["vocab_size"])
- n_layer = int(opt["TRANSFORMER_LAYER"])
- self.encoder_splitter = Splitter(embed_size)
- block = MeetingDecoderBlock(opt, embed_size)
- self.token_linear = nn.Linear(token_dim, embed_size)
- self.sent_linear = nn.Linear(sent_dim, embed_size)
- self.blocks = nn.ModuleList([copy.deepcopy(block) for _ in range(n_layer)])
- self.linear = nn.Linear(embed_size, vocab_size, bias=False)
- self.linear.weight = embedder.embed.weight # share weight
-
- """
- Input:
- token_encoder_outputs: 1 x (encoder_len - sent_num) x token_transformer_dim
- sent_encoder_outputs: 1 x sent_num x sent_transformer_dim
- y: batch x len x n_state
- Output:
- prob: batch x len x vocab_size (probabilities after softmax)
- """
-
- def forward(self, token_encoder_inputs, sent_encoder_inputs, decoder_input_ids):
- _, token_enc_key, token_enc_value = self.encoder_splitter(
- self.token_linear(token_encoder_inputs)
- )
- # token_enc_key: batch x encoder_len x n_state
- # token_enc_value: batch x encoder_len x n_state
-
- _, sent_enc_key, sent_enc_value = self.encoder_splitter(
- self.sent_linear(sent_encoder_inputs)
- )
- # sent_enc_key: batch x encoder_len x n_state
- # sent_enc_value: batch x encoder_len x n_state
-
- h = decoder_input_ids
- for block in self.blocks:
- h = block(h, token_enc_key, token_enc_value, sent_enc_key, sent_enc_value)
- prob = F.softmax(self.linear(h), dim=-1)
- return prob
-
-
-class Encoder(nn.Module):
- """
- vocab_size: size of input vocabulary
- embed_size: word embedding dimension of dictionary
- role_dim: role embedding dimension
- embed: the nn.Embedding for vocab
- role_embed: the nn.Embedding for role
- """
-
- def __init__(self, opt, vocab_size, embed_size, role_dim, embedder, role_embed):
- super(Encoder, self).__init__()
- self.opt = opt
- self.vocab_size = vocab_size
-
- set_seq_dropout("VARIATIONAL_DROPOUT" in self.opt)
-
- self.embed_size = embed_size
- self.embedder = embedder
- self.role_embed = role_embed
-
- self.token_transformer_dim = embed_size
- if "USE_POSENT" in opt:
- print("Use POS and ENT")
- pos_dim = opt["POS_DIM"]
- ent_dim = opt["ENT_DIM"]
- self.pos_embed = nn.Embedding(len(POS), pos_dim)
- self.ent_embed = nn.Embedding(len(ENT), ent_dim)
- self.token_transformer_dim += pos_dim + ent_dim
-
- self.sent_transformer_dim = self.token_transformer_dim
- if "USE_ROLE" in opt:
- print("USE_ROLE")
- role_dim = opt["ROLE_DIM"]
- self.sent_transformer_dim += role_dim
-
- self.token_encoder = MeetingTransformerEncoder(opt, self.token_transformer_dim)
- self.sent_encoder = MeetingTransformerEncoder(opt, self.sent_transformer_dim)
-
- """
- x = bz * sent_num * x_len (word_ids)
- x_role = bz * sent_num (role_ids)
- x_pos = bz * sent_num * x_len (pos_ids)
- x_ent = bz * sent_num * x_len (ent_ids)
- outputs:
- token_encoder_outputs: bz x x_len_total x token_transformer_dim
- sent_encoder_outputs: bz x sent_num x sent_transformer_dim
- """
-
- def forward(self, x, x_role, x_pos, x_ent):
- batch_size = x.size(0)
- sent_num = x.size(1)
- x_len = x.size(2)
-
- # x contains word id >= vocab_size
- vocab_x = x.clone()
- vocab_x[vocab_x >= self.vocab_size] = 1 # UNK
- embedded = self.embedder(vocab_x.view(batch_size, -1))
- # embedded = 1 x sent_num * x_len x embed_size
- embedded = embedded.view(batch_size, sent_num, x_len, -1)
- # embedded = 1 x sent_num x x_len x embed_size
-
- if "USE_ROLE" in self.opt:
- role_embed = self.role_embed(x_role) # 1 x sent_num x role_dim
-
- if "USE_POSENT" in self.opt:
- embedded = torch.cat(
- [embedded, self.pos_embed(x_pos), self.ent_embed(x_ent)], dim=3
- )
- # 1 x sent_num x x_len x (embed_size + pos_dim + ent_dim )
-
- feat_dim = embedded.size(3)
-
- token_transformer_output = self.token_encoder(
- embedded.view(-1, x_len, feat_dim)
- )
- token_transformer_dim = token_transformer_output.size(2)
- token_transformer_output = token_transformer_output.view(
- batch_size, sent_num, x_len, token_transformer_dim
- )
- # 1 x sent_num x x_len x token_transformer_dim
-
- sent_encoder_inputs = token_transformer_output[
- :, :, 0, :
- ] # 1 x sent_num x token_transformer_dim
- if "USE_ROLE" in self.opt:
- sent_encoder_inputs = torch.cat([sent_encoder_inputs, role_embed], dim=2)
- sent_encoder_outputs = self.sent_encoder(
- sent_encoder_inputs
- ) # 1 x sent_num x sent_transformer_dim
-
- token_transformer_output = token_transformer_output.view(
- batch_size, -1, token_transformer_dim
- )
-
- return token_transformer_output, sent_encoder_outputs
-
-
-class Decoder(nn.Module):
- def __init__(
- self,
- opt,
- embed_size,
- vocab_size,
- embedder,
- token_transformer_dim,
- sent_transformer_dim,
- ):
- super(Decoder, self).__init__()
- self.opt = opt
- self.embed_size = embed_size
- self.vocab_size = vocab_size
- self.embedder = embedder
- self.sent_decoder = MeetingTransformerDecoder(
- opt, embedder, embed_size, token_transformer_dim, sent_transformer_dim
- )
-
- def forward(self, token_encoder_outputs, sent_encoder_outputs, decoder_input_ids):
- vocab_y = decoder_input_ids.clone()
- vocab_y[vocab_y >= self.vocab_size] = 1 # UNK
- embedded = self.embedder(vocab_y)
-
- vocab_prob = self.sent_decoder(
- token_encoder_outputs, sent_encoder_outputs, embedded
- )
- # vocab_prob: batch x y_len x vocab_size
-
- vocab_logprob = torch.log(vocab_prob + 1e-15)
- return vocab_logprob
diff --git a/spaces/akhaliq/VQMIVC/train.py b/spaces/akhaliq/VQMIVC/train.py
deleted file mode 100644
index 5fab7eca4ccd2ea6e45352ee049d24cd68a50679..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/VQMIVC/train.py
+++ /dev/null
@@ -1,411 +0,0 @@
-import hydra
-from hydra import utils
-from itertools import chain
-from pathlib import Path
-import numpy as np
-
-import torch
-import torch.optim as optim
-from torch.utils.data import DataLoader
-
-
-from dataset import CPCDataset_sameSeq as CPCDataset
-from scheduler import WarmupScheduler
-from model_encoder import Encoder, CPCLoss_sameSeq, Encoder_lf0
-from model_decoder import Decoder_ac
-from model_encoder import SpeakerEncoder as Encoder_spk
-from mi_estimators import CLUBSample_group, CLUBSample_reshape
-
-import apex.amp as amp
-import os
-import time
-
-torch.manual_seed(137)
-np.random.seed(137)
-
-def save_checkpoint(encoder, encoder_lf0, cpc, encoder_spk, \
- cs_mi_net, ps_mi_net, cp_mi_net, decoder, \
- optimizer, optimizer_cs_mi_net, optimizer_ps_mi_net, optimizer_cp_mi_net, scheduler, amp, epoch, checkpoint_dir, cfg):
- if cfg.use_amp:
- amp_state_dict = amp.state_dict()
- else:
- amp_state_dict = None
- checkpoint_state = {
- "encoder": encoder.state_dict(),
- "encoder_lf0": encoder_lf0.state_dict(),
- "cpc": cpc.state_dict(),
- "encoder_spk": encoder_spk.state_dict(),
- "ps_mi_net": ps_mi_net.state_dict(),
- "cp_mi_net": cp_mi_net.state_dict(),
- "cs_mi_net": cs_mi_net.state_dict(),
- "decoder": decoder.state_dict(),
- "optimizer": optimizer.state_dict(),
- "optimizer_cs_mi_net": optimizer_cs_mi_net.state_dict(),
- "optimizer_ps_mi_net": optimizer_ps_mi_net.state_dict(),
- "optimizer_cp_mi_net": optimizer_cp_mi_net.state_dict(),
- "scheduler": scheduler.state_dict(),
- "amp": amp_state_dict,
- "epoch": epoch
- }
- checkpoint_dir.mkdir(exist_ok=True, parents=True)
- checkpoint_path = checkpoint_dir / "model.ckpt-{}.pt".format(epoch)
- torch.save(checkpoint_state, checkpoint_path)
- print("Saved checkpoint: {}".format(checkpoint_path.stem))
-
-
-
-def mi_first_forward(mels, lf0, encoder, encoder_lf0, encoder_spk, cs_mi_net, optimizer_cs_mi_net,
- ps_mi_net, optimizer_ps_mi_net, cp_mi_net, optimizer_cp_mi_net, cfg):
- optimizer_cs_mi_net.zero_grad()
- optimizer_ps_mi_net.zero_grad()
- optimizer_cp_mi_net.zero_grad()
- z, _, _, _, _ = encoder(mels)
- z = z.detach()
- lf0_embs = encoder_lf0(lf0).detach()
- spk_embs = encoder_spk(mels).detach()
- if cfg.use_CSMI:
- lld_cs_loss = -cs_mi_net.loglikeli(spk_embs, z)
- if cfg.use_amp:
- with amp.scale_loss(lld_cs_loss, optimizer_cs_mi_net) as sl:
- sl.backward()
- else:
- lld_cs_loss.backward()
- optimizer_cs_mi_net.step()
- else:
- lld_cs_loss = torch.tensor(0.)
-
- if cfg.use_CPMI:
- lld_cp_loss = -cp_mi_net.loglikeli(lf0_embs.unsqueeze(1).reshape(lf0_embs.shape[0],-1,2,lf0_embs.shape[-1]).mean(2), z)
- if cfg.use_amp:
- with amp.scale_loss(lld_cp_loss, optimizer_cp_mi_net) as slll:
- slll.backward()
- else:
- lld_cp_loss.backward()
- torch.nn.utils.clip_grad_norm_(cp_mi_net.parameters(), 1)
- optimizer_cp_mi_net.step()
- else:
- lld_cp_loss = torch.tensor(0.)
-
- if cfg.use_PSMI:
- lld_ps_loss = -ps_mi_net.loglikeli(spk_embs, lf0_embs)
- if cfg.use_amp:
- with amp.scale_loss(lld_ps_loss, optimizer_ps_mi_net) as sll:
- sll.backward()
- else:
- lld_ps_loss.backward()
- optimizer_ps_mi_net.step()
- else:
- lld_ps_loss = torch.tensor(0.)
-
- return optimizer_cs_mi_net, lld_cs_loss, optimizer_ps_mi_net, lld_ps_loss, optimizer_cp_mi_net, lld_cp_loss
-
-
-def mi_second_forward(mels, lf0, encoder, encoder_lf0, cpc, encoder_spk, cs_mi_net, ps_mi_net, cp_mi_net, decoder, cfg, optimizer, scheduler):
- optimizer.zero_grad()
- z, c, _, vq_loss, perplexity = encoder(mels)
- cpc_loss, accuracy = cpc(z, c)
- spk_embs = encoder_spk(mels)
- lf0_embs = encoder_lf0(lf0)
- recon_loss, pred_mels = decoder(z, lf0_embs, spk_embs, mels.transpose(1,2))
-
- loss = recon_loss + cpc_loss + vq_loss
-
- if cfg.use_CSMI:
- mi_cs_loss = cfg.mi_weight*cs_mi_net.mi_est(spk_embs, z)
- else:
- mi_cs_loss = torch.tensor(0.).to(loss.device)
-
- if cfg.use_CPMI:
- mi_cp_loss = cfg.mi_weight*cp_mi_net.mi_est(lf0_embs.unsqueeze(1).reshape(lf0_embs.shape[0],-1,2,lf0_embs.shape[-1]).mean(2), z)
- else:
- mi_cp_loss = torch.tensor(0.).to(loss.device)
-
- if cfg.use_PSMI:
- mi_ps_loss = cfg.mi_weight*ps_mi_net.mi_est(spk_embs, lf0_embs)
- else:
- mi_ps_loss = torch.tensor(0.).to(loss.device)
-
- loss = loss + mi_cs_loss + mi_ps_loss + mi_cp_loss
-
- if cfg.use_amp:
- with amp.scale_loss(loss, optimizer) as scaled_loss:
- scaled_loss.backward()
- else:
- loss.backward()
-
- optimizer.step()
- return optimizer, recon_loss, vq_loss, cpc_loss, accuracy, perplexity, mi_cs_loss, mi_ps_loss, mi_cp_loss
-
-
-def calculate_eval_loss(mels, lf0, \
- encoder, encoder_lf0, cpc, \
- encoder_spk, cs_mi_net, ps_mi_net, \
- cp_mi_net, decoder, cfg):
- with torch.no_grad():
- z, c, z_beforeVQ, vq_loss, perplexity = encoder(mels)
- c = c
- lf0_embs = encoder_lf0(lf0)
- spk_embs = encoder_spk(mels)
-
- if cfg.use_CSMI:
- lld_cs_loss = -cs_mi_net.loglikeli(spk_embs, z)
- mi_cs_loss = cfg.mi_weight*cs_mi_net.mi_est(spk_embs, z)
- else:
- lld_cs_loss = torch.tensor(0.)
- mi_cs_loss = torch.tensor(0.)
-
- # z, c, z_beforeVQ, vq_loss, perplexity = encoder(mels)
- cpc_loss, accuracy = cpc(z, c)
- recon_loss, pred_mels = decoder(z, lf0_embs, spk_embs, mels.transpose(1,2))
-
- if cfg.use_CPMI:
- mi_cp_loss = cfg.mi_weight*cp_mi_net.mi_est(lf0_embs.unsqueeze(1).reshape(lf0_embs.shape[0],-1,2,lf0_embs.shape[-1]).mean(2), z)
- lld_cp_loss = -cp_mi_net.loglikeli(lf0_embs.unsqueeze(1).reshape(lf0_embs.shape[0],-1,2,lf0_embs.shape[-1]).mean(2), z)
- else:
- mi_cp_loss = torch.tensor(0.)
- lld_cp_loss = torch.tensor(0.)
-
- if cfg.use_PSMI:
- mi_ps_loss = cfg.mi_weight*ps_mi_net.mi_est(spk_embs, lf0_embs)
- lld_ps_loss = -ps_mi_net.loglikeli(spk_embs, lf0_embs)
- else:
- mi_ps_loss = torch.tensor(0.)
- lld_ps_loss = torch.tensor(0.)
-
- return recon_loss, vq_loss, cpc_loss, accuracy, perplexity, mi_cs_loss, lld_cs_loss, mi_ps_loss, lld_ps_loss, mi_cp_loss, lld_cp_loss
-
-
-def to_eval(all_models):
- for m in all_models:
- m.eval()
-
-
-def to_train(all_models):
- for m in all_models:
- m.train()
-
-
-def eval_model(epoch, checkpoint_dir, device, valid_dataloader, encoder, encoder_lf0, cpc, encoder_spk, cs_mi_net, ps_mi_net, cp_mi_net, decoder, cfg):
- stime = time.time()
- average_cpc_loss = average_vq_loss = average_perplexity = average_recon_loss = 0
- average_accuracies = np.zeros(cfg.training.n_prediction_steps)
- average_lld_cs_loss = average_mi_cs_loss = average_lld_ps_loss = average_mi_ps_loss = average_lld_cp_loss = average_mi_cp_loss = 0
- all_models = [encoder, encoder_lf0, cpc, encoder_spk, cs_mi_net, ps_mi_net, cp_mi_net, decoder]
- to_eval(all_models)
- for i, (mels, lf0, speakers) in enumerate(valid_dataloader, 1):
- lf0 = lf0.to(device)
- mels = mels.to(device) # (bs, 80, 128)
- recon_loss, vq_loss, cpc_loss, accuracy, perplexity, mi_cs_loss, lld_cs_loss, mi_ps_loss, lld_ps_loss, mi_cp_loss, lld_cp_loss = \
- calculate_eval_loss(mels, lf0, \
- encoder, encoder_lf0, cpc, \
- encoder_spk, cs_mi_net, ps_mi_net, \
- cp_mi_net, decoder, cfg)
-
- average_recon_loss += (recon_loss.item() - average_recon_loss) / i
- average_cpc_loss += (cpc_loss.item() - average_cpc_loss) / i
- average_vq_loss += (vq_loss.item() - average_vq_loss) / i
- average_perplexity += (perplexity.item() - average_perplexity) / i
- average_accuracies += (np.array(accuracy) - average_accuracies) / i
- average_lld_cs_loss += (lld_cs_loss.item() - average_lld_cs_loss) / i
- average_mi_cs_loss += (mi_cs_loss.item() - average_mi_cs_loss) / i
- average_lld_ps_loss += (lld_ps_loss.item() - average_lld_ps_loss) / i
- average_mi_ps_loss += (mi_ps_loss.item() - average_mi_ps_loss) / i
- average_lld_cp_loss += (lld_cp_loss.item() - average_lld_cp_loss) / i
- average_mi_cp_loss += (mi_cp_loss.item() - average_mi_cp_loss) / i
-
-
- ctime = time.time()
- print("Eval | epoch:{}, recon loss:{:.3f}, cpc loss:{:.3f}, vq loss:{:.3f}, perpexlity:{:.3f}, lld cs loss:{:.3f}, mi cs loss:{:.3E}, lld ps loss:{:.3f}, mi ps loss:{:.3f}, lld cp loss:{:.3f}, mi cp loss:{:.3f}, used time:{:.3f}s"
- .format(epoch, average_recon_loss, average_cpc_loss, average_vq_loss, average_perplexity, average_lld_cs_loss, average_mi_cs_loss, average_lld_ps_loss, average_mi_ps_loss, average_lld_cp_loss, average_mi_cp_loss, ctime-stime))
- print(100 * average_accuracies)
- results_txt = open(f'{str(checkpoint_dir)}/results.txt', 'a')
- results_txt.write("Eval | epoch:{}, recon loss:{:.3f}, cpc loss:{:.3f}, vq loss:{:.3f}, perpexlity:{:.3f}, lld cs loss:{:.3f}, mi cs loss:{:.3E}, lld ps loss:{:.3f}, mi ps loss:{:.3f}, lld cp loss:{:.3f}, mi cp loss:{:.3f}"
- .format(epoch, average_recon_loss, average_cpc_loss, average_vq_loss, average_perplexity, average_lld_cs_loss, average_mi_cs_loss, average_lld_ps_loss, average_mi_ps_loss, average_lld_cp_loss, average_mi_cp_loss)+'\n')
- results_txt.write(' '.join([str(cpc_acc) for cpc_acc in average_accuracies])+'\n')
- results_txt.close()
-
- to_train(all_models)
-
-
-@hydra.main(config_path="config/train.yaml")
-def train_model(cfg):
- cfg.checkpoint_dir = f'{cfg.checkpoint_dir}/useCSMI{cfg.use_CSMI}_useCPMI{cfg.use_CPMI}_usePSMI{cfg.use_PSMI}_useAmp{cfg.use_amp}'
- if cfg.encoder_lf0_type == 'no_emb': # default
- dim_lf0 = 1
- else:
- dim_lf0 = 64
-
- checkpoint_dir = Path(utils.to_absolute_path(cfg.checkpoint_dir))
- checkpoint_dir.mkdir(exist_ok=True, parents=True)
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
-
- # define model
- encoder = Encoder(**cfg.model.encoder)
- encoder_lf0 = Encoder_lf0(cfg.encoder_lf0_type)
- cpc = CPCLoss_sameSeq(**cfg.model.cpc)
- encoder_spk = Encoder_spk()
- cs_mi_net = CLUBSample_group(256, cfg.model.encoder.z_dim, 512)
- ps_mi_net = CLUBSample_group(256, dim_lf0, 512)
- cp_mi_net = CLUBSample_reshape(dim_lf0, cfg.model.encoder.z_dim, 512)
- decoder = Decoder_ac(dim_neck=cfg.model.encoder.z_dim, dim_lf0=dim_lf0, use_l1_loss=True)
-
- encoder.to(device)
- cpc.to(device)
- encoder_lf0.to(device)
- encoder_spk.to(device)
- cs_mi_net.to(device)
- ps_mi_net.to(device)
- cp_mi_net.to(device)
- decoder.to(device)
-
- optimizer = optim.Adam(
- chain(encoder.parameters(), encoder_lf0.parameters(), cpc.parameters(), encoder_spk.parameters(), decoder.parameters()),
- lr=cfg.training.scheduler.initial_lr)
- optimizer_cs_mi_net = optim.Adam(cs_mi_net.parameters(), lr=cfg.mi_lr)
- optimizer_ps_mi_net = optim.Adam(ps_mi_net.parameters(), lr=cfg.mi_lr)
- optimizer_cp_mi_net = optim.Adam(cp_mi_net.parameters(), lr=cfg.mi_lr)
- # TODO: use_amp is set default to True to speed up training; no-amp -> more stable training? => need to be verified
- if cfg.use_amp:
- [encoder, encoder_lf0, cpc, encoder_spk, decoder], optimizer = amp.initialize([encoder, encoder_lf0, cpc, encoder_spk, decoder], optimizer, opt_level='O1')
- [cs_mi_net], optimizer_cs_mi_net = amp.initialize([cs_mi_net], optimizer_cs_mi_net, opt_level='O1')
- [ps_mi_net], optimizer_ps_mi_net = amp.initialize([ps_mi_net], optimizer_ps_mi_net, opt_level='O1')
- [cp_mi_net], optimizer_cp_mi_net = amp.initialize([cp_mi_net], optimizer_cp_mi_net, opt_level='O1')
-
- root_path = Path(utils.to_absolute_path("data"))
- dataset = CPCDataset(
- root=root_path,
- n_sample_frames=cfg.training.sample_frames, # 128
- mode='train')
- valid_dataset = CPCDataset(
- root=root_path,
- n_sample_frames=cfg.training.sample_frames, # 128
- mode='valid')
-
- warmup_epochs = 2000 // (len(dataset)//cfg.training.batch_size)
- print('warmup_epochs:', warmup_epochs)
- scheduler = WarmupScheduler(
- optimizer,
- warmup_epochs=warmup_epochs,
- initial_lr=cfg.training.scheduler.initial_lr,
- max_lr=cfg.training.scheduler.max_lr,
- milestones=cfg.training.scheduler.milestones,
- gamma=cfg.training.scheduler.gamma)
-
- dataloader = DataLoader(
- dataset,
- batch_size=cfg.training.batch_size, # 256
- shuffle=True,
- num_workers=cfg.training.n_workers,
- pin_memory=True,
- drop_last=False)
- valid_dataloader = DataLoader(
- valid_dataset,
- batch_size=cfg.training.batch_size, # 256
- shuffle=False,
- num_workers=cfg.training.n_workers,
- pin_memory=True,
- drop_last=False)
-
- if cfg.resume:
- print("Resume checkpoint from: {}:".format(cfg.resume))
- resume_path = utils.to_absolute_path(cfg.resume)
- checkpoint = torch.load(resume_path, map_location=lambda storage, loc: storage)
- encoder.load_state_dict(checkpoint["encoder"])
- encoder_lf0.load_state_dict(checkpoint["encoder_lf0"])
- cpc.load_state_dict(checkpoint["cpc"])
- encoder_spk.load_state_dict(checkpoint["encoder_spk"])
- cs_mi_net.load_state_dict(checkpoint["cs_mi_net"])
- ps_mi_net.load_state_dict(checkpoint["ps_mi_net"])
- if cfg.use_CPMI:
- cp_mi_net.load_state_dict(checkpoint["cp_mi_net"])
- decoder.load_state_dict(checkpoint["decoder"])
- optimizer.load_state_dict(checkpoint["optimizer"])
- optimizer_cs_mi_net.load_state_dict(checkpoint["optimizer_cs_mi_net"])
- optimizer_ps_mi_net.load_state_dict(checkpoint["optimizer_ps_mi_net"])
- optimizer_cp_mi_net.load_state_dict(checkpoint["optimizer_cp_mi_net"])
- if cfg.use_amp:
- amp.load_state_dict(checkpoint["amp"])
- scheduler.load_state_dict(checkpoint["scheduler"])
- start_epoch = checkpoint["epoch"]
- else:
- start_epoch = 1
-
- if os.path.exists(f'{str(checkpoint_dir)}/results.txt'):
- wmode = 'a'
- else:
- wmode = 'w'
- results_txt = open(f'{str(checkpoint_dir)}/results.txt', wmode)
- results_txt.write('save training info...\n')
- results_txt.close()
-
- global_step = 0
- stime = time.time()
- for epoch in range(start_epoch, cfg.training.n_epochs + 1):
- average_cpc_loss = average_vq_loss = average_perplexity = average_recon_loss = 0
- average_accuracies = np.zeros(cfg.training.n_prediction_steps)
- average_lld_cs_loss = average_mi_cs_loss = average_lld_ps_loss = average_mi_ps_loss = average_lld_cp_loss = average_mi_cp_loss = 0
-
- for i, (mels, lf0, speakers) in enumerate(dataloader, 1):
- lf0 = lf0.to(device)
- mels = mels.to(device) # (bs, 80, 128)
- if cfg.use_CSMI or cfg.use_CPMI or cfg.use_PSMI:
- for j in range(cfg.mi_iters):
- optimizer_cs_mi_net, lld_cs_loss, optimizer_ps_mi_net, lld_ps_loss, optimizer_cp_mi_net, lld_cp_loss = mi_first_forward(mels, lf0, encoder, encoder_lf0, encoder_spk, cs_mi_net, optimizer_cs_mi_net, \
- ps_mi_net, optimizer_ps_mi_net, cp_mi_net, optimizer_cp_mi_net, cfg)
- else:
- lld_cs_loss = torch.tensor(0.)
- lld_ps_loss = torch.tensor(0.)
- lld_cp_loss = torch.tensor(0.)
-
- optimizer, recon_loss, vq_loss, cpc_loss, accuracy, perplexity, mi_cs_loss, mi_ps_loss, mi_cp_loss = mi_second_forward(mels, lf0, \
- encoder, encoder_lf0, cpc, \
- encoder_spk, cs_mi_net, ps_mi_net, \
- cp_mi_net, decoder, cfg, \
- optimizer, scheduler)
-
- average_recon_loss += (recon_loss.item() - average_recon_loss) / i
- average_cpc_loss += (cpc_loss.item() - average_cpc_loss) / i
- average_vq_loss += (vq_loss.item() - average_vq_loss) / i
- average_perplexity += (perplexity.item() - average_perplexity) / i
- average_accuracies += (np.array(accuracy) - average_accuracies) / i
- average_lld_cs_loss += (lld_cs_loss.item() - average_lld_cs_loss) / i
- average_mi_cs_loss += (mi_cs_loss.item() - average_mi_cs_loss) / i
- average_lld_ps_loss += (lld_ps_loss.item() - average_lld_ps_loss) / i
- average_mi_ps_loss += (mi_ps_loss.item() - average_mi_ps_loss) / i
- average_lld_cp_loss += (lld_cp_loss.item() - average_lld_cp_loss) / i
- average_mi_cp_loss += (mi_cp_loss.item() - average_mi_cp_loss) / i
-
-
- ctime = time.time()
- print("epoch:{}, global step:{}, recon loss:{:.3f}, cpc loss:{:.3f}, vq loss:{:.3f}, perpexlity:{:.3f}, lld cs loss:{:.3f}, mi cs loss:{:.3E}, lld ps loss:{:.3f}, mi ps loss:{:.3f}, lld cp loss:{:.3f}, mi cp loss:{:.3f}, used time:{:.3f}s"
- .format(epoch, global_step, average_recon_loss, average_cpc_loss, average_vq_loss, average_perplexity, average_lld_cs_loss, average_mi_cs_loss, average_lld_ps_loss, average_mi_ps_loss, average_lld_cp_loss, average_mi_cp_loss, ctime-stime))
- print(100 * average_accuracies)
- stime = time.time()
- global_step += 1
- # scheduler.step()
-
- results_txt = open(f'{str(checkpoint_dir)}/results.txt', 'a')
- results_txt.write("epoch:{}, global step:{}, recon loss:{:.3f}, cpc loss:{:.3f}, vq loss:{:.3f}, perpexlity:{:.3f}, lld cs loss:{:.3f}, mi cs loss:{:.3E}, lld ps loss:{:.3f}, mi ps loss:{:.3f}, lld cp loss:{:.3f}, mi cp loss:{:.3f}"
- .format(epoch, global_step, average_recon_loss, average_cpc_loss, average_vq_loss, average_perplexity, average_lld_cs_loss, average_mi_cs_loss, average_lld_ps_loss, average_mi_ps_loss, average_lld_cp_loss, average_mi_cp_loss)+'\n')
- results_txt.write(' '.join([str(cpc_acc) for cpc_acc in average_accuracies])+'\n')
- results_txt.close()
- scheduler.step()
-
-
- if epoch % cfg.training.log_interval == 0 and epoch != start_epoch:
- eval_model(epoch, checkpoint_dir, device, valid_dataloader, encoder, encoder_lf0, cpc, encoder_spk, cs_mi_net, ps_mi_net, cp_mi_net, decoder, cfg)
-
- ctime = time.time()
- print("epoch:{}, global step:{}, recon loss:{:.3f}, cpc loss:{:.3f}, vq loss:{:.3f}, perpexlity:{:.3f}, lld cs loss:{:.3f}, mi cs loss:{:.3E}, lld ps loss:{:.3f}, mi ps loss:{:.3f}, lld cp loss:{:.3f}, mi cp loss:{:.3f}, used time:{:.3f}s"
- .format(epoch, global_step, average_recon_loss, average_cpc_loss, average_vq_loss, average_perplexity, average_lld_cs_loss, average_mi_cs_loss, average_lld_ps_loss, average_mi_ps_loss, average_lld_cp_loss, average_mi_cp_loss, ctime-stime))
- print(100 * average_accuracies)
- stime = time.time()
-
- if epoch % cfg.training.checkpoint_interval == 0 and epoch != start_epoch:
- save_checkpoint(encoder, encoder_lf0, cpc, encoder_spk, \
- cs_mi_net, ps_mi_net, cp_mi_net, decoder, \
- optimizer, optimizer_cs_mi_net, optimizer_ps_mi_net, optimizer_cp_mi_net, scheduler, amp, epoch, checkpoint_dir, cfg)
-
-
-if __name__ == "__main__":
- train_model()
diff --git a/spaces/akhaliq/dreambooth-training/train_dreambooth.py b/spaces/akhaliq/dreambooth-training/train_dreambooth.py
deleted file mode 100644
index c18edc83b6a5850b86ee75c8ef2f36bb91691b95..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/dreambooth-training/train_dreambooth.py
+++ /dev/null
@@ -1,818 +0,0 @@
-import argparse
-import itertools
-import math
-import os
-from pathlib import Path
-from typing import Optional
-import subprocess
-import sys
-
-import torch
-import torch.nn.functional as F
-import torch.utils.checkpoint
-from torch.utils.data import Dataset
-
-from accelerate import Accelerator
-from accelerate.logging import get_logger
-from accelerate.utils import set_seed
-from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
-from diffusers.optimization import get_scheduler
-from huggingface_hub import HfFolder, Repository, whoami
-from PIL import Image
-from torchvision import transforms
-from tqdm.auto import tqdm
-from transformers import CLIPTextModel, CLIPTokenizer
-
-
-logger = get_logger(__name__)
-
-
-def parse_args():
- parser = argparse.ArgumentParser(description="Simple example of a training script.")
- parser.add_argument(
- "--pretrained_model_name_or_path",
- type=str,
- default=None,
- #required=True,
- help="Path to pretrained model or model identifier from huggingface.co/models.",
- )
- parser.add_argument(
- "--tokenizer_name",
- type=str,
- default=None,
- help="Pretrained tokenizer name or path if not the same as model_name",
- )
- parser.add_argument(
- "--instance_data_dir",
- type=str,
- default=None,
- #required=True,
- help="A folder containing the training data of instance images.",
- )
- parser.add_argument(
- "--class_data_dir",
- type=str,
- default=None,
- required=False,
- help="A folder containing the training data of class images.",
- )
- parser.add_argument(
- "--instance_prompt",
- type=str,
- default=None,
- help="The prompt with identifier specifying the instance",
- )
- parser.add_argument(
- "--class_prompt",
- type=str,
- default="",
- help="The prompt to specify images in the same class as provided instance images.",
- )
- parser.add_argument(
- "--with_prior_preservation",
- default=False,
- action="store_true",
- help="Flag to add prior preservation loss.",
- )
- parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
- parser.add_argument(
- "--num_class_images",
- type=int,
- default=100,
- help=(
- "Minimal class images for prior preservation loss. If not have enough images, additional images will be"
- " sampled with class_prompt."
- ),
- )
- parser.add_argument(
- "--output_dir",
- type=str,
- default="",
- help="The output directory where the model predictions and checkpoints will be written.",
- )
- parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
- parser.add_argument(
- "--resolution",
- type=int,
- default=512,
- help=(
- "The resolution for input images, all the images in the train/validation dataset will be resized to this"
- " resolution"
- ),
- )
- parser.add_argument(
- "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution"
- )
- parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder")
- parser.add_argument(
- "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
- )
- parser.add_argument(
- "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
- )
- parser.add_argument("--num_train_epochs", type=int, default=1)
- parser.add_argument(
- "--max_train_steps",
- type=int,
- default=None,
- help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
- )
- parser.add_argument(
- "--gradient_accumulation_steps",
- type=int,
- default=1,
- help="Number of updates steps to accumulate before performing a backward/update pass.",
- )
- parser.add_argument(
- "--gradient_checkpointing",
- action="store_true",
- help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
- )
- parser.add_argument(
- "--learning_rate",
- type=float,
- default=5e-6,
- help="Initial learning rate (after the potential warmup period) to use.",
- )
- parser.add_argument(
- "--scale_lr",
- action="store_true",
- default=False,
- help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
- )
- parser.add_argument(
- "--lr_scheduler",
- type=str,
- default="constant",
- help=(
- 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
- ' "constant", "constant_with_warmup"]'
- ),
- )
- parser.add_argument(
- "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
- )
- parser.add_argument(
- "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
- )
- parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
- parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
- parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
- parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
- parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
- parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
- parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
- parser.add_argument(
- "--hub_model_id",
- type=str,
- default=None,
- help="The name of the repository to keep in sync with the local `output_dir`.",
- )
- parser.add_argument(
- "--logging_dir",
- type=str,
- default="logs",
- help=(
- "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
- " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
- ),
- )
- parser.add_argument(
- "--mixed_precision",
- type=str,
- default="no",
- choices=["no", "fp16", "bf16"],
- help=(
- "Whether to use mixed precision. Choose"
- "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
- "and an Nvidia Ampere GPU."
- ),
- )
-
- parser.add_argument(
- "--save_n_steps",
- type=int,
- default=1,
- help=("Save the model every n global_steps"),
- )
-
-
- parser.add_argument(
- "--save_starting_step",
- type=int,
- default=1,
- help=("The step from which it starts saving intermediary checkpoints"),
- )
-
- parser.add_argument(
- "--stop_text_encoder_training",
- type=int,
- default=1000000,
- help=("The step at which the text_encoder is no longer trained"),
- )
-
-
- parser.add_argument(
- "--image_captions_filename",
- action="store_true",
- help="Get captions from filename",
- )
-
-
- parser.add_argument(
- "--dump_only_text_encoder",
- action="store_true",
- default=False,
- help="Dump only text encoder",
- )
-
- parser.add_argument(
- "--train_only_unet",
- action="store_true",
- default=False,
- help="Train only the unet",
- )
-
- parser.add_argument(
- "--Session_dir",
- type=str,
- default="",
- help="Current session directory",
- )
-
-
-
-
- parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
-
- args = parser.parse_args()
- env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
- if env_local_rank != -1 and env_local_rank != args.local_rank:
- args.local_rank = env_local_rank
-
- #if args.instance_data_dir is None:
- # raise ValueError("You must specify a train data directory.")
-
- #if args.with_prior_preservation:
- # if args.class_data_dir is None:
- # raise ValueError("You must specify a data directory for class images.")
- # if args.class_prompt is None:
- # raise ValueError("You must specify prompt for class images.")
-
- return args
-
-
-class DreamBoothDataset(Dataset):
- """
- A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
- It pre-processes the images and the tokenizes prompts.
- """
-
- def __init__(
- self,
- instance_data_root,
- instance_prompt,
- tokenizer,
- args,
- class_data_root=None,
- class_prompt=None,
- size=512,
- center_crop=False,
- ):
- self.size = size
- self.center_crop = center_crop
- self.tokenizer = tokenizer
- self.image_captions_filename = None
-
- self.instance_data_root = Path(instance_data_root)
- if not self.instance_data_root.exists():
- raise ValueError("Instance images root doesn't exists.")
-
- self.instance_images_path = list(Path(instance_data_root).iterdir())
- self.num_instance_images = len(self.instance_images_path)
- self.instance_prompt = instance_prompt
- self._length = self.num_instance_images
-
- if args.image_captions_filename:
- self.image_captions_filename = True
-
- if class_data_root is not None:
- self.class_data_root = Path(class_data_root)
- self.class_data_root.mkdir(parents=True, exist_ok=True)
- self.class_images_path = list(self.class_data_root.iterdir())
- self.num_class_images = len(self.class_images_path)
- self._length = max(self.num_class_images, self.num_instance_images)
- self.class_prompt = class_prompt
- else:
- self.class_data_root = None
-
- self.image_transforms = transforms.Compose(
- [
- transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
- transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
- transforms.ToTensor(),
- transforms.Normalize([0.5], [0.5]),
- ]
- )
-
- def __len__(self):
- return self._length
-
- def __getitem__(self, index):
- example = {}
- path = self.instance_images_path[index % self.num_instance_images]
- instance_image = Image.open(path)
- if not instance_image.mode == "RGB":
- instance_image = instance_image.convert("RGB")
-
- instance_prompt = self.instance_prompt
-
- if self.image_captions_filename:
- filename = Path(path).stem
- pt=''.join([i for i in filename if not i.isdigit()])
- pt=pt.replace("_"," ")
- pt=pt.replace("(","")
- pt=pt.replace(")","")
- instance_prompt = pt
- sys.stdout.write(" [0;32m" +instance_prompt+" [0m")
- sys.stdout.flush()
-
-
- example["instance_images"] = self.image_transforms(instance_image)
- example["instance_prompt_ids"] = self.tokenizer(
- instance_prompt,
- padding="do_not_pad",
- truncation=True,
- max_length=self.tokenizer.model_max_length,
- ).input_ids
-
- if self.class_data_root:
- class_image = Image.open(self.class_images_path[index % self.num_class_images])
- if not class_image.mode == "RGB":
- class_image = class_image.convert("RGB")
- example["class_images"] = self.image_transforms(class_image)
- example["class_prompt_ids"] = self.tokenizer(
- self.class_prompt,
- padding="do_not_pad",
- truncation=True,
- max_length=self.tokenizer.model_max_length,
- ).input_ids
-
- return example
-
-
-
-class PromptDataset(Dataset):
- "A simple dataset to prepare the prompts to generate class images on multiple GPUs."
-
- def __init__(self, prompt, num_samples):
- self.prompt = prompt
- self.num_samples = num_samples
-
- def __len__(self):
- return self.num_samples
-
- def __getitem__(self, index):
- example = {}
- example["prompt"] = self.prompt
- example["index"] = index
- return example
-
-
-def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):
- if token is None:
- token = HfFolder.get_token()
- if organization is None:
- username = whoami(token)["name"]
- return f"{username}/{model_id}"
- else:
- return f"{organization}/{model_id}"
-
-def merge_two_dicts(starting_dict: dict, updater_dict: dict) -> dict:
- """
- Starts from base starting dict and then adds the remaining key values from updater replacing the values from
- the first starting/base dict with the second updater dict.
-
- For later: how does d = {**d1, **d2} replace collision?
-
- :param starting_dict:
- :param updater_dict:
- :return:
- """
- new_dict: dict = starting_dict.copy() # start with keys and values of starting_dict
- new_dict.update(updater_dict) # modifies starting_dict with keys and values of updater_dict
- return new_dict
-
-def merge_args(args1: argparse.Namespace, args2: argparse.Namespace) -> argparse.Namespace:
- """
-
- ref: https://stackoverflow.com/questions/56136549/how-can-i-merge-two-argparse-namespaces-in-python-2-x
- :param args1:
- :param args2:
- :return:
- """
- # - the merged args
- # The vars() function returns the __dict__ attribute to values of the given object e.g {field:value}.
- merged_key_values_for_namespace: dict = merge_two_dicts(vars(args1), vars(args2))
- args = argparse.Namespace(**merged_key_values_for_namespace)
- return args
-
-def run_training(args_imported):
- args_default = parse_args()
- args = merge_args(args_default, args_imported)
- print(args)
- logging_dir = Path(args.output_dir, args.logging_dir)
- i=args.save_starting_step
- accelerator = Accelerator(
- gradient_accumulation_steps=args.gradient_accumulation_steps,
- mixed_precision=args.mixed_precision,
- log_with="tensorboard",
- logging_dir=logging_dir,
- )
-
- # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate
- # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models.
- # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate.
- if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1:
- raise ValueError(
- "Gradient accumulation is not supported when training the text encoder in distributed training. "
- "Please set gradient_accumulation_steps to 1. This feature will be supported in the future."
- )
-
- if args.seed is not None:
- set_seed(args.seed)
-
- if args.with_prior_preservation:
- class_images_dir = Path(args.class_data_dir)
- if not class_images_dir.exists():
- class_images_dir.mkdir(parents=True)
- cur_class_images = len(list(class_images_dir.iterdir()))
-
- if cur_class_images < args.num_class_images:
- torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32
- pipeline = StableDiffusionPipeline.from_pretrained(
- args.pretrained_model_name_or_path, torch_dtype=torch_dtype
- )
- pipeline.set_progress_bar_config(disable=True)
-
- num_new_images = args.num_class_images - cur_class_images
- logger.info(f"Number of class images to sample: {num_new_images}.")
-
- sample_dataset = PromptDataset(args.class_prompt, num_new_images)
- sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)
-
- sample_dataloader = accelerator.prepare(sample_dataloader)
- pipeline.to(accelerator.device)
-
- for example in tqdm(
- sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process
- ):
- with torch.autocast("cuda"):
- images = pipeline(example["prompt"]).images
-
- for i, image in enumerate(images):
- image.save(class_images_dir / f"{example['index'][i] + cur_class_images}.jpg")
-
- del pipeline
- if torch.cuda.is_available():
- torch.cuda.empty_cache()
-
- # Handle the repository creation
- if accelerator.is_main_process:
- if args.push_to_hub:
- if args.hub_model_id is None:
- repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
- else:
- repo_name = args.hub_model_id
- repo = Repository(args.output_dir, clone_from=repo_name)
-
- with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
- if "step_*" not in gitignore:
- gitignore.write("step_*\n")
- if "epoch_*" not in gitignore:
- gitignore.write("epoch_*\n")
- elif args.output_dir is not None:
- os.makedirs(args.output_dir, exist_ok=True)
-
- # Load the tokenizer
- if args.tokenizer_name:
- tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name)
- elif args.pretrained_model_name_or_path:
- tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
-
- # Load models and create wrapper for stable diffusion
- if args.train_only_unet:
- if os.path.exists(str(args.output_dir+"/text_encoder_trained")):
- text_encoder = CLIPTextModel.from_pretrained(args.output_dir, subfolder="text_encoder_trained")
- elif os.path.exists(str(args.output_dir+"/text_encoder")):
- text_encoder = CLIPTextModel.from_pretrained(args.output_dir, subfolder="text_encoder")
- else:
- text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
- else:
- text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
- vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
- unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
-
- vae.requires_grad_(False)
- if not args.train_text_encoder:
- text_encoder.requires_grad_(False)
-
- if args.gradient_checkpointing:
- unet.enable_gradient_checkpointing()
- if args.train_text_encoder:
- text_encoder.gradient_checkpointing_enable()
-
- if args.scale_lr:
- args.learning_rate = (
- args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
- )
-
- # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
- if args.use_8bit_adam:
- try:
- import bitsandbytes as bnb
- except ImportError:
- raise ImportError(
- "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
- )
-
- optimizer_class = bnb.optim.AdamW8bit
- else:
- optimizer_class = torch.optim.AdamW
-
- params_to_optimize = (
- itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters()
- )
- optimizer = optimizer_class(
- params_to_optimize,
- lr=args.learning_rate,
- betas=(args.adam_beta1, args.adam_beta2),
- weight_decay=args.adam_weight_decay,
- eps=args.adam_epsilon,
- )
-
- noise_scheduler = DDPMScheduler(
- beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000
- )
-
- train_dataset = DreamBoothDataset(
- instance_data_root=args.instance_data_dir,
- instance_prompt=args.instance_prompt,
- class_data_root=args.class_data_dir if args.with_prior_preservation else None,
- class_prompt=args.class_prompt,
- tokenizer=tokenizer,
- size=args.resolution,
- center_crop=args.center_crop,
- args=args,
- )
-
- def collate_fn(examples):
- input_ids = [example["instance_prompt_ids"] for example in examples]
- pixel_values = [example["instance_images"] for example in examples]
-
- # Concat class and instance examples for prior preservation.
- # We do this to avoid doing two forward passes.
- if args.with_prior_preservation:
- input_ids += [example["class_prompt_ids"] for example in examples]
- pixel_values += [example["class_images"] for example in examples]
-
- pixel_values = torch.stack(pixel_values)
- pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
-
- input_ids = tokenizer.pad({"input_ids": input_ids}, padding=True, return_tensors="pt").input_ids
-
- batch = {
- "input_ids": input_ids,
- "pixel_values": pixel_values,
- }
- return batch
-
- train_dataloader = torch.utils.data.DataLoader(
- train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn
- )
-
- # Scheduler and math around the number of training steps.
- overrode_max_train_steps = False
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
- if args.max_train_steps is None:
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
- overrode_max_train_steps = True
-
- lr_scheduler = get_scheduler(
- args.lr_scheduler,
- optimizer=optimizer,
- num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps,
- num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
- )
-
- if args.train_text_encoder:
- unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
- unet, text_encoder, optimizer, train_dataloader, lr_scheduler
- )
- else:
- unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
- unet, optimizer, train_dataloader, lr_scheduler
- )
-
- weight_dtype = torch.float32
- if args.mixed_precision == "fp16":
- weight_dtype = torch.float16
- elif args.mixed_precision == "bf16":
- weight_dtype = torch.bfloat16
-
- # Move text_encode and vae to gpu.
- # For mixed precision training we cast the text_encoder and vae weights to half-precision
- # as these models are only used for inference, keeping weights in full precision is not required.
- vae.to(accelerator.device, dtype=weight_dtype)
- if not args.train_text_encoder:
- text_encoder.to(accelerator.device, dtype=weight_dtype)
-
- # We need to recalculate our total training steps as the size of the training dataloader may have changed.
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
- if overrode_max_train_steps:
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
- # Afterwards we recalculate our number of training epochs
- args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
-
- # We need to initialize the trackers we use, and also store our configuration.
- # The trackers initializes automatically on the main process.
- if accelerator.is_main_process:
- accelerator.init_trackers("dreambooth", config=vars(args))
-
- def bar(prg):
- br='|'+'█' * prg + ' ' * (25-prg)+'|'
- return br
-
- # Train!
- total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
-
- logger.info("***** Running training *****")
- logger.info(f" Num examples = {len(train_dataset)}")
- logger.info(f" Num batches each epoch = {len(train_dataloader)}")
- logger.info(f" Num Epochs = {args.num_train_epochs}")
- logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
- logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
- logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
- logger.info(f" Total optimization steps = {args.max_train_steps}")
- # Only show the progress bar once on each machine.
- progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
- global_step = 0
-
- for epoch in range(args.num_train_epochs):
- unet.train()
- if args.train_text_encoder:
- text_encoder.train()
- for step, batch in enumerate(train_dataloader):
- with accelerator.accumulate(unet):
- # Convert images to latent space
- latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample()
- latents = latents * 0.18215
-
- # Sample noise that we'll add to the latents
- noise = torch.randn_like(latents)
- bsz = latents.shape[0]
- # Sample a random timestep for each image
- timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
- timesteps = timesteps.long()
-
- # Add noise to the latents according to the noise magnitude at each timestep
- # (this is the forward diffusion process)
- noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
-
- # Get the text embedding for conditioning
- encoder_hidden_states = text_encoder(batch["input_ids"])[0]
-
- # Predict the noise residual
- noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample
-
- if args.with_prior_preservation:
- # Chunk the noise and noise_pred into two parts and compute the loss on each part separately.
- noise_pred, noise_pred_prior = torch.chunk(noise_pred, 2, dim=0)
- noise, noise_prior = torch.chunk(noise, 2, dim=0)
-
- # Compute instance loss
- loss = F.mse_loss(noise_pred.float(), noise.float(), reduction="none").mean([1, 2, 3]).mean()
-
- # Compute prior loss
- prior_loss = F.mse_loss(noise_pred_prior.float(), noise_prior.float(), reduction="mean")
-
- # Add the prior loss to the instance loss.
- loss = loss + args.prior_loss_weight * prior_loss
- else:
- loss = F.mse_loss(noise_pred.float(), noise.float(), reduction="mean")
-
- accelerator.backward(loss)
- if accelerator.sync_gradients:
- params_to_clip = (
- itertools.chain(unet.parameters(), text_encoder.parameters())
- if args.train_text_encoder
- else unet.parameters()
- )
- accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
- optimizer.step()
- lr_scheduler.step()
- optimizer.zero_grad()
-
- # Checks if the accelerator has performed an optimization step behind the scenes
- if accelerator.sync_gradients:
- progress_bar.update(1)
- global_step += 1
-
- fll=round((global_step*100)/args.max_train_steps)
- fll=round(fll/4)
- pr=bar(fll)
-
- logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
- progress_bar.set_postfix(**logs)
- progress_bar.set_description_str("Progress:"+pr)
- accelerator.log(logs, step=global_step)
-
- if global_step >= args.max_train_steps:
- break
-
- if args.train_text_encoder and global_step == args.stop_text_encoder_training and global_step >= 30:
- if accelerator.is_main_process:
- print(" [0;32m" +" Freezing the text_encoder ..."+" [0m")
- frz_dir=args.output_dir + "/text_encoder_frozen"
- if os.path.exists(frz_dir):
- subprocess.call('rm -r '+ frz_dir, shell=True)
- os.mkdir(frz_dir)
- pipeline = StableDiffusionPipeline.from_pretrained(
- args.pretrained_model_name_or_path,
- unet=accelerator.unwrap_model(unet),
- text_encoder=accelerator.unwrap_model(text_encoder),
- )
- pipeline.text_encoder.save_pretrained(frz_dir)
-
- if args.save_n_steps >= 200:
- if global_step < args.max_train_steps-100 and global_step+1==i:
- ckpt_name = "_step_" + str(global_step+1)
- save_dir = Path(args.output_dir+ckpt_name)
- save_dir=str(save_dir)
- save_dir=save_dir.replace(" ", "_")
- if not os.path.exists(save_dir):
- os.mkdir(save_dir)
- inst=save_dir[16:]
- inst=inst.replace(" ", "_")
- print(" [1;32mSAVING CHECKPOINT: "+args.Session_dir+"/"+inst+".ckpt")
- # Create the pipeline using the trained modules and save it.
- if accelerator.is_main_process:
- pipeline = StableDiffusionPipeline.from_pretrained(
- args.pretrained_model_name_or_path,
- unet=accelerator.unwrap_model(unet),
- text_encoder=accelerator.unwrap_model(text_encoder),
- )
- pipeline.save_pretrained(save_dir)
- frz_dir=args.output_dir + "/text_encoder_frozen"
- if args.train_text_encoder and os.path.exists(frz_dir):
- subprocess.call('rm -r '+save_dir+'/text_encoder/*.*', shell=True)
- subprocess.call('cp -f '+frz_dir +'/*.* '+ save_dir+'/text_encoder', shell=True)
- chkpth=args.Session_dir+"/"+inst+".ckpt"
- subprocess.call('python /content/diffusers/scripts/convert_diffusers_to_original_stable_diffusion.py --model_path ' + save_dir + ' --checkpoint_path ' + chkpth + ' --half', shell=True)
- i=i+args.save_n_steps
-
- accelerator.wait_for_everyone()
-
- # Create the pipeline using using the trained modules and save it.
- if accelerator.is_main_process:
- if args.dump_only_text_encoder:
- txt_dir=args.output_dir + "/text_encoder_trained"
- if not os.path.exists(txt_dir):
- os.mkdir(txt_dir)
- pipeline = StableDiffusionPipeline.from_pretrained(
- args.pretrained_model_name_or_path,
- unet=accelerator.unwrap_model(unet),
- text_encoder=accelerator.unwrap_model(text_encoder),
- )
- pipeline.text_encoder.save_pretrained(txt_dir)
-
- elif args.train_only_unet:
- pipeline = StableDiffusionPipeline.from_pretrained(
- args.pretrained_model_name_or_path,
- unet=accelerator.unwrap_model(unet),
- text_encoder=accelerator.unwrap_model(text_encoder),
- )
- pipeline.save_pretrained(args.output_dir)
- txt_dir=args.output_dir + "/text_encoder_trained"
- subprocess.call('rm -r '+txt_dir, shell=True)
-
- else:
- pipeline = StableDiffusionPipeline.from_pretrained(
- args.pretrained_model_name_or_path,
- unet=accelerator.unwrap_model(unet),
- text_encoder=accelerator.unwrap_model(text_encoder),
- )
- frz_dir=args.output_dir + "/text_encoder_frozen"
- pipeline.save_pretrained(args.output_dir)
- if args.train_text_encoder and os.path.exists(frz_dir):
- subprocess.call('mv -f '+frz_dir +'/*.* '+ args.output_dir+'/text_encoder', shell=True)
- subprocess.call('rm -r '+ frz_dir, shell=True)
-
- if args.push_to_hub:
- repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True)
-
- accelerator.end_training()
-
-if __name__ == "__main__":
- pass
- #main()
diff --git a/spaces/alexrame/rewardedsoups/streamlit_app/data/locomotion/trajectories/19.html b/spaces/alexrame/rewardedsoups/streamlit_app/data/locomotion/trajectories/19.html
deleted file mode 100644
index 1083a2d57d16d3dd5558368160b5dd13fd8f1021..0000000000000000000000000000000000000000
--- a/spaces/alexrame/rewardedsoups/streamlit_app/data/locomotion/trajectories/19.html
+++ /dev/null
@@ -1,48 +0,0 @@
-
-
-
- brax visualizer
-
-
-
-
-
-
-
-
diff --git a/spaces/allknowingroger/Image-Models-Test14/app.py b/spaces/allknowingroger/Image-Models-Test14/app.py
deleted file mode 100644
index c45ef6020c60ab162e782d6d93068eb23dcad25e..0000000000000000000000000000000000000000
--- a/spaces/allknowingroger/Image-Models-Test14/app.py
+++ /dev/null
@@ -1,144 +0,0 @@
-import gradio as gr
-# import os
-# import sys
-# from pathlib import Path
-import time
-
-models =[
- "AaAsr/weight",
- "badmonk/renaxoi",
- "digiplay/K-main2.1",
- "digiplay/realmixUnrealjourney_v1",
- "digiplay/AIGEN_v1.4_diffusers",
- "digiplay/CamelliaMix_NSFW_diffusers_v1.1",
- "digiplay/GhostMixV1.2VAE",
- "digiplay/CamelliaMIx_2.5D_diffusers",
- "digiplay/LemonTea2.5D",
-]
-
-
-model_functions = {}
-model_idx = 1
-for model_path in models:
- try:
- model_functions[model_idx] = gr.Interface.load(f"models/{model_path}", live=False, preprocess=True, postprocess=False)
- except Exception as error:
- def the_fn(txt):
- return None
- model_functions[model_idx] = gr.Interface(fn=the_fn, inputs=["text"], outputs=["image"])
- model_idx+=1
-
-
-def send_it_idx(idx):
- def send_it_fn(prompt):
- output = (model_functions.get(str(idx)) or model_functions.get(str(1)))(prompt)
- return output
- return send_it_fn
-
-def get_prompts(prompt_text):
- return prompt_text
-
-def clear_it(val):
- if int(val) != 0:
- val = 0
- else:
- val = 0
- pass
- return val
-
-def all_task_end(cnt,t_stamp):
- to = t_stamp + 60
- et = time.time()
- if et > to and t_stamp != 0:
- d = gr.update(value=0)
- tog = gr.update(value=1)
- #print(f'to: {to} et: {et}')
- else:
- if cnt != 0:
- d = gr.update(value=et)
- else:
- d = gr.update(value=0)
- tog = gr.update(value=0)
- #print (f'passing: to: {to} et: {et}')
- pass
- return d, tog
-
-def all_task_start():
- print("\n\n\n\n\n\n\n")
- t = time.gmtime()
- t_stamp = time.time()
- current_time = time.strftime("%H:%M:%S", t)
- return gr.update(value=t_stamp), gr.update(value=t_stamp), gr.update(value=0)
-
-def clear_fn():
- nn = len(models)
- return tuple([None, *[None for _ in range(nn)]])
-
-
-
-with gr.Blocks(title="SD Models") as my_interface:
- with gr.Column(scale=12):
- # with gr.Row():
- # gr.Markdown("""- Primary prompt: 你想画的内容(英文单词,如 a cat, 加英文逗号效果更好;点 Improve 按钮进行完善)\n- Real prompt: 完善后的提示词,出现后再点右边的 Run 按钮开始运行""")
- with gr.Row():
- with gr.Row(scale=6):
- primary_prompt=gr.Textbox(label="Prompt", value="")
- # real_prompt=gr.Textbox(label="Real prompt")
- with gr.Row(scale=6):
- # improve_prompts_btn=gr.Button("Improve")
- with gr.Row():
- run=gr.Button("Run",variant="primary")
- clear_btn=gr.Button("Clear")
- with gr.Row():
- sd_outputs = {}
- model_idx = 1
- for model_path in models:
- with gr.Column(scale=3, min_width=320):
- with gr.Box():
- sd_outputs[model_idx] = gr.Image(label=model_path)
- pass
- model_idx += 1
- pass
- pass
-
- with gr.Row(visible=False):
- start_box=gr.Number(interactive=False)
- end_box=gr.Number(interactive=False)
- tog_box=gr.Textbox(value=0,interactive=False)
-
- start_box.change(
- all_task_end,
- [start_box, end_box],
- [start_box, tog_box],
- every=1,
- show_progress=False)
-
- primary_prompt.submit(all_task_start, None, [start_box, end_box, tog_box])
- run.click(all_task_start, None, [start_box, end_box, tog_box])
- runs_dict = {}
- model_idx = 1
- for model_path in models:
- runs_dict[model_idx] = run.click(model_functions[model_idx], inputs=[primary_prompt], outputs=[sd_outputs[model_idx]])
- model_idx += 1
- pass
- pass
-
- # improve_prompts_btn_clicked=improve_prompts_btn.click(
- # get_prompts,
- # inputs=[primary_prompt],
- # outputs=[primary_prompt],
- # cancels=list(runs_dict.values()))
- clear_btn.click(
- clear_fn,
- None,
- [primary_prompt, *list(sd_outputs.values())],
- cancels=[*list(runs_dict.values())])
- tog_box.change(
- clear_it,
- tog_box,
- tog_box,
- cancels=[*list(runs_dict.values())])
-
-my_interface.queue(concurrency_count=600, status_update_rate=1)
-my_interface.launch(inline=True, show_api=False)
-
\ No newline at end of file
diff --git a/spaces/alphunt/diffdock-alphunt-demo/models/all_atom_score_model.py b/spaces/alphunt/diffdock-alphunt-demo/models/all_atom_score_model.py
deleted file mode 100644
index 6c08aefdb972589303525275a948a8b21da1d346..0000000000000000000000000000000000000000
--- a/spaces/alphunt/diffdock-alphunt-demo/models/all_atom_score_model.py
+++ /dev/null
@@ -1,415 +0,0 @@
-from e3nn import o3
-import torch
-from torch import nn
-from torch.nn import functional as F
-from torch_cluster import radius, radius_graph
-from torch_scatter import scatter_mean
-import numpy as np
-
-from models.score_model import AtomEncoder, TensorProductConvLayer, GaussianSmearing
-from utils import so3, torus
-from datasets.process_mols import lig_feature_dims, rec_residue_feature_dims, rec_atom_feature_dims
-
-
-class TensorProductScoreModel(torch.nn.Module):
- def __init__(self, t_to_sigma, device, timestep_emb_func, in_lig_edge_features=4, sigma_embed_dim=32, sh_lmax=2,
- ns=16, nv=4, num_conv_layers=2, lig_max_radius=5, rec_max_radius=30, cross_max_distance=250,
- center_max_distance=30, distance_embed_dim=32, cross_distance_embed_dim=32, no_torsion=False,
- scale_by_sigma=True, use_second_order_repr=False, batch_norm=True,
- dynamic_max_cross=False, dropout=0.0, lm_embedding_type=False, confidence_mode=False,
- confidence_dropout=0, confidence_no_batchnorm=False, num_confidence_outputs=1):
- super(TensorProductScoreModel, self).__init__()
- self.t_to_sigma = t_to_sigma
- self.in_lig_edge_features = in_lig_edge_features
- self.sigma_embed_dim = sigma_embed_dim
- self.lig_max_radius = lig_max_radius
- self.rec_max_radius = rec_max_radius
- self.cross_max_distance = cross_max_distance
- self.dynamic_max_cross = dynamic_max_cross
- self.center_max_distance = center_max_distance
- self.distance_embed_dim = distance_embed_dim
- self.cross_distance_embed_dim = cross_distance_embed_dim
- self.sh_irreps = o3.Irreps.spherical_harmonics(lmax=sh_lmax)
- self.ns, self.nv = ns, nv
- self.scale_by_sigma = scale_by_sigma
- self.device = device
- self.no_torsion = no_torsion
- self.num_conv_layers = num_conv_layers
- self.timestep_emb_func = timestep_emb_func
- self.confidence_mode = confidence_mode
- self.num_conv_layers = num_conv_layers
-
- # embedding layers
- self.lig_node_embedding = AtomEncoder(emb_dim=ns, feature_dims=lig_feature_dims, sigma_embed_dim=sigma_embed_dim)
- self.lig_edge_embedding = nn.Sequential(nn.Linear(in_lig_edge_features + sigma_embed_dim + distance_embed_dim, ns),nn.ReLU(),nn.Dropout(dropout),nn.Linear(ns, ns))
-
- self.rec_node_embedding = AtomEncoder(emb_dim=ns, feature_dims=rec_residue_feature_dims, sigma_embed_dim=sigma_embed_dim, lm_embedding_type=lm_embedding_type)
- self.rec_edge_embedding = nn.Sequential(nn.Linear(sigma_embed_dim + distance_embed_dim, ns), nn.ReLU(), nn.Dropout(dropout),nn.Linear(ns, ns))
-
- self.atom_node_embedding = AtomEncoder(emb_dim=ns, feature_dims=rec_atom_feature_dims, sigma_embed_dim=sigma_embed_dim)
- self.atom_edge_embedding = nn.Sequential(nn.Linear(sigma_embed_dim + distance_embed_dim, ns), nn.ReLU(), nn.Dropout(dropout),nn.Linear(ns, ns))
-
- self.lr_edge_embedding = nn.Sequential(nn.Linear(sigma_embed_dim + cross_distance_embed_dim, ns), nn.ReLU(), nn.Dropout(dropout),nn.Linear(ns, ns))
- self.ar_edge_embedding = nn.Sequential(nn.Linear(sigma_embed_dim + distance_embed_dim, ns), nn.ReLU(), nn.Dropout(dropout),nn.Linear(ns, ns))
- self.la_edge_embedding = nn.Sequential(nn.Linear(sigma_embed_dim + cross_distance_embed_dim, ns), nn.ReLU(), nn.Dropout(dropout),nn.Linear(ns, ns))
-
- self.lig_distance_expansion = GaussianSmearing(0.0, lig_max_radius, distance_embed_dim)
- self.rec_distance_expansion = GaussianSmearing(0.0, rec_max_radius, distance_embed_dim)
- self.cross_distance_expansion = GaussianSmearing(0.0, cross_max_distance, cross_distance_embed_dim)
-
- if use_second_order_repr:
- irrep_seq = [
- f'{ns}x0e',
- f'{ns}x0e + {nv}x1o + {nv}x2e',
- f'{ns}x0e + {nv}x1o + {nv}x2e + {nv}x1e + {nv}x2o',
- f'{ns}x0e + {nv}x1o + {nv}x2e + {nv}x1e + {nv}x2o + {ns}x0o'
- ]
- else:
- irrep_seq = [
- f'{ns}x0e',
- f'{ns}x0e + {nv}x1o',
- f'{ns}x0e + {nv}x1o + {nv}x1e',
- f'{ns}x0e + {nv}x1o + {nv}x1e + {ns}x0o'
- ]
-
- # convolutional layers
- conv_layers = []
- for i in range(num_conv_layers):
- in_irreps = irrep_seq[min(i, len(irrep_seq) - 1)]
- out_irreps = irrep_seq[min(i + 1, len(irrep_seq) - 1)]
- parameters = {
- 'in_irreps': in_irreps,
- 'sh_irreps': self.sh_irreps,
- 'out_irreps': out_irreps,
- 'n_edge_features': 3 * ns,
- 'residual': False,
- 'batch_norm': batch_norm,
- 'dropout': dropout
- }
-
- for _ in range(9): # 3 intra & 6 inter per each layer
- conv_layers.append(TensorProductConvLayer(**parameters))
-
- self.conv_layers = nn.ModuleList(conv_layers)
-
- # confidence and affinity prediction layers
- if self.confidence_mode:
- output_confidence_dim = num_confidence_outputs
-
- self.confidence_predictor = nn.Sequential(
- nn.Linear(2 * self.ns if num_conv_layers >= 3 else self.ns, ns),
- nn.BatchNorm1d(ns) if not confidence_no_batchnorm else nn.Identity(),
- nn.ReLU(),
- nn.Dropout(confidence_dropout),
- nn.Linear(ns, ns),
- nn.BatchNorm1d(ns) if not confidence_no_batchnorm else nn.Identity(),
- nn.ReLU(),
- nn.Dropout(confidence_dropout),
- nn.Linear(ns, output_confidence_dim)
- )
-
- else:
- # convolution for translational and rotational scores
- self.center_distance_expansion = GaussianSmearing(0.0, center_max_distance, distance_embed_dim)
- self.center_edge_embedding = nn.Sequential(
- nn.Linear(distance_embed_dim + sigma_embed_dim, ns),
- nn.ReLU(),
- nn.Dropout(dropout),
- nn.Linear(ns, ns)
- )
-
- self.final_conv = TensorProductConvLayer(
- in_irreps=self.conv_layers[-1].out_irreps,
- sh_irreps=self.sh_irreps,
- out_irreps=f'2x1o + 2x1e',
- n_edge_features=2 * ns,
- residual=False,
- dropout=dropout,
- batch_norm=batch_norm
- )
-
- self.tr_final_layer = nn.Sequential(nn.Linear(1 + sigma_embed_dim, ns), nn.Dropout(dropout), nn.ReLU(), nn.Linear(ns, 1))
- self.rot_final_layer = nn.Sequential(nn.Linear(1 + sigma_embed_dim, ns), nn.Dropout(dropout), nn.ReLU(), nn.Linear(ns, 1))
-
- if not no_torsion:
- # convolution for torsional score
- self.final_edge_embedding = nn.Sequential(
- nn.Linear(distance_embed_dim, ns),
- nn.ReLU(),
- nn.Dropout(dropout),
- nn.Linear(ns, ns)
- )
- self.final_tp_tor = o3.FullTensorProduct(self.sh_irreps, "2e")
- self.tor_bond_conv = TensorProductConvLayer(
- in_irreps=self.conv_layers[-1].out_irreps,
- sh_irreps=self.final_tp_tor.irreps_out,
- out_irreps=f'{ns}x0o + {ns}x0e',
- n_edge_features=3 * ns,
- residual=False,
- dropout=dropout,
- batch_norm=batch_norm
- )
- self.tor_final_layer = nn.Sequential(
- nn.Linear(2 * ns if not self.odd_parity else ns, ns, bias=False),
- nn.Tanh(),
- nn.Dropout(dropout),
- nn.Linear(ns, 1, bias=False)
- )
-
- def forward(self, data):
- if not self.confidence_mode:
- tr_sigma, rot_sigma, tor_sigma = self.t_to_sigma(*[data.complex_t[noise_type] for noise_type in ['tr', 'rot', 'tor']])
- else:
- tr_sigma, rot_sigma, tor_sigma = [data.complex_t[noise_type] for noise_type in ['tr', 'rot', 'tor']]
-
- # build ligand graph
- lig_node_attr, lig_edge_index, lig_edge_attr, lig_edge_sh = self.build_lig_conv_graph(data)
- lig_node_attr = self.lig_node_embedding(lig_node_attr)
- lig_edge_attr = self.lig_edge_embedding(lig_edge_attr)
-
- # build receptor graph
- rec_node_attr, rec_edge_index, rec_edge_attr, rec_edge_sh = self.build_rec_conv_graph(data)
- rec_node_attr = self.rec_node_embedding(rec_node_attr)
- rec_edge_attr = self.rec_edge_embedding(rec_edge_attr)
-
- # build atom graph
- atom_node_attr, atom_edge_index, atom_edge_attr, atom_edge_sh = self.build_atom_conv_graph(data)
- atom_node_attr = self.atom_node_embedding(atom_node_attr)
- atom_edge_attr = self.atom_edge_embedding(atom_edge_attr)
-
- # build cross graph
- cross_cutoff = (tr_sigma * 3 + 20).unsqueeze(1) if self.dynamic_max_cross else self.cross_max_distance
- lr_edge_index, lr_edge_attr, lr_edge_sh, la_edge_index, la_edge_attr, \
- la_edge_sh, ar_edge_index, ar_edge_attr, ar_edge_sh = self.build_cross_conv_graph(data, cross_cutoff)
- lr_edge_attr= self.lr_edge_embedding(lr_edge_attr)
- la_edge_attr = self.la_edge_embedding(la_edge_attr)
- ar_edge_attr = self.ar_edge_embedding(ar_edge_attr)
-
- for l in range(self.num_conv_layers):
- # LIGAND updates
- lig_edge_attr_ = torch.cat([lig_edge_attr, lig_node_attr[lig_edge_index[0], :self.ns], lig_node_attr[lig_edge_index[1], :self.ns]], -1)
- lig_update = self.conv_layers[9*l](lig_node_attr, lig_edge_index, lig_edge_attr_, lig_edge_sh)
-
- lr_edge_attr_ = torch.cat([lr_edge_attr, lig_node_attr[lr_edge_index[0], :self.ns], rec_node_attr[lr_edge_index[1], :self.ns]], -1)
- lr_update = self.conv_layers[9*l+1](rec_node_attr, lr_edge_index, lr_edge_attr_, lr_edge_sh,
- out_nodes=lig_node_attr.shape[0])
-
- la_edge_attr_ = torch.cat([la_edge_attr, lig_node_attr[la_edge_index[0], :self.ns], atom_node_attr[la_edge_index[1], :self.ns]], -1)
- la_update = self.conv_layers[9*l+2](atom_node_attr, la_edge_index, la_edge_attr_, la_edge_sh,
- out_nodes=lig_node_attr.shape[0])
-
- if l != self.num_conv_layers-1: # last layer optimisation
-
- # ATOM UPDATES
- atom_edge_attr_ = torch.cat([atom_edge_attr, atom_node_attr[atom_edge_index[0], :self.ns], atom_node_attr[atom_edge_index[1], :self.ns]], -1)
- atom_update = self.conv_layers[9*l+3](atom_node_attr, atom_edge_index, atom_edge_attr_, atom_edge_sh)
-
- al_edge_attr_ = torch.cat([la_edge_attr, atom_node_attr[la_edge_index[1], :self.ns], lig_node_attr[la_edge_index[0], :self.ns]], -1)
- al_update = self.conv_layers[9*l+4](lig_node_attr, torch.flip(la_edge_index, dims=[0]), al_edge_attr_,
- la_edge_sh, out_nodes=atom_node_attr.shape[0])
-
- ar_edge_attr_ = torch.cat([ar_edge_attr, atom_node_attr[ar_edge_index[0], :self.ns], rec_node_attr[ar_edge_index[1], :self.ns]],-1)
- ar_update = self.conv_layers[9*l+5](rec_node_attr, ar_edge_index, ar_edge_attr_, ar_edge_sh, out_nodes=atom_node_attr.shape[0])
-
- # RECEPTOR updates
- rec_edge_attr_ = torch.cat([rec_edge_attr, rec_node_attr[rec_edge_index[0], :self.ns], rec_node_attr[rec_edge_index[1], :self.ns]], -1)
- rec_update = self.conv_layers[9*l+6](rec_node_attr, rec_edge_index, rec_edge_attr_, rec_edge_sh)
-
- rl_edge_attr_ = torch.cat([lr_edge_attr, rec_node_attr[lr_edge_index[1], :self.ns], lig_node_attr[lr_edge_index[0], :self.ns]], -1)
- rl_update = self.conv_layers[9*l+7](lig_node_attr, torch.flip(lr_edge_index, dims=[0]), rl_edge_attr_,
- lr_edge_sh, out_nodes=rec_node_attr.shape[0])
-
- ra_edge_attr_ = torch.cat([ar_edge_attr, rec_node_attr[ar_edge_index[1], :self.ns], atom_node_attr[ar_edge_index[0], :self.ns]], -1)
- ra_update = self.conv_layers[9*l+8](atom_node_attr, torch.flip(ar_edge_index, dims=[0]), ra_edge_attr_,
- ar_edge_sh, out_nodes=rec_node_attr.shape[0])
-
- # padding original features and update features with residual updates
- lig_node_attr = F.pad(lig_node_attr, (0, lig_update.shape[-1] - lig_node_attr.shape[-1]))
- lig_node_attr = lig_node_attr + lig_update + la_update + lr_update
-
- if l != self.num_conv_layers - 1: # last layer optimisation
- atom_node_attr = F.pad(atom_node_attr, (0, atom_update.shape[-1] - rec_node_attr.shape[-1]))
- atom_node_attr = atom_node_attr + atom_update + al_update + ar_update
- rec_node_attr = F.pad(rec_node_attr, (0, rec_update.shape[-1] - rec_node_attr.shape[-1]))
- rec_node_attr = rec_node_attr + rec_update + ra_update + rl_update
-
- # confidence and affinity prediction
- if self.confidence_mode:
- scalar_lig_attr = torch.cat([lig_node_attr[:,:self.ns],lig_node_attr[:,-self.ns:]], dim=1) if self.num_conv_layers >= 3 else lig_node_attr[:,:self.ns]
- confidence = self.confidence_predictor(scatter_mean(scalar_lig_attr, data['ligand'].batch, dim=0)).squeeze(dim=-1)
- return confidence
-
- # compute translational and rotational score vectors
- center_edge_index, center_edge_attr, center_edge_sh = self.build_center_conv_graph(data)
- center_edge_attr = self.center_edge_embedding(center_edge_attr)
- center_edge_attr = torch.cat([center_edge_attr, lig_node_attr[center_edge_index[0], :self.ns]], -1)
- global_pred = self.final_conv(lig_node_attr, center_edge_index, center_edge_attr, center_edge_sh, out_nodes=data.num_graphs)
-
- tr_pred = global_pred[:, :3] + global_pred[:, 6:9]
- rot_pred = global_pred[:, 3:6] + global_pred[:, 9:]
- data.graph_sigma_emb = self.timestep_emb_func(data.complex_t['tr'])
-
- # adjust the magniture of the score vectors
- tr_norm = torch.linalg.vector_norm(tr_pred, dim=1).unsqueeze(1)
- tr_pred = tr_pred / tr_norm * self.tr_final_layer(torch.cat([tr_norm, data.graph_sigma_emb], dim=1))
-
- rot_norm = torch.linalg.vector_norm(rot_pred, dim=1).unsqueeze(1)
- rot_pred = rot_pred / rot_norm * self.rot_final_layer(torch.cat([rot_norm, data.graph_sigma_emb], dim=1))
-
- if self.scale_by_sigma:
- tr_pred = tr_pred / tr_sigma.unsqueeze(1)
- rot_pred = rot_pred * so3.score_norm(rot_sigma.cpu()).unsqueeze(1).to(data['ligand'].x.device)
-
- if self.no_torsion or data['ligand'].edge_mask.sum() == 0: return tr_pred, rot_pred, torch.empty(0,device=self.device)
-
- # torsional components
- tor_bonds, tor_edge_index, tor_edge_attr, tor_edge_sh = self.build_bond_conv_graph(data)
- tor_bond_vec = data['ligand'].pos[tor_bonds[1]] - data['ligand'].pos[tor_bonds[0]]
- tor_bond_attr = lig_node_attr[tor_bonds[0]] + lig_node_attr[tor_bonds[1]]
-
- tor_bonds_sh = o3.spherical_harmonics("2e", tor_bond_vec, normalize=True, normalization='component')
- tor_edge_sh = self.final_tp_tor(tor_edge_sh, tor_bonds_sh[tor_edge_index[0]])
-
- tor_edge_attr = torch.cat([tor_edge_attr, lig_node_attr[tor_edge_index[1], :self.ns],
- tor_bond_attr[tor_edge_index[0], :self.ns]], -1)
- tor_pred = self.tor_bond_conv(lig_node_attr, tor_edge_index, tor_edge_attr, tor_edge_sh,
- out_nodes=data['ligand'].edge_mask.sum(), reduce='mean')
- tor_pred = self.tor_final_layer(tor_pred).squeeze(1)
- edge_sigma = tor_sigma[data['ligand'].batch][data['ligand', 'ligand'].edge_index[0]][data['ligand'].edge_mask]
-
- if self.scale_by_sigma:
- tor_pred = tor_pred * torch.sqrt(torch.tensor(torus.score_norm(edge_sigma.cpu().numpy())).float()
- .to(data['ligand'].x.device))
- return tr_pred, rot_pred, tor_pred
-
- def build_lig_conv_graph(self, data):
- # build the graph between ligand atoms
- data['ligand'].node_sigma_emb = self.timestep_emb_func(data['ligand'].node_t['tr'])
-
- radius_edges = radius_graph(data['ligand'].pos, self.lig_max_radius, data['ligand'].batch)
- edge_index = torch.cat([data['ligand', 'ligand'].edge_index, radius_edges], 1).long()
- edge_attr = torch.cat([
- data['ligand', 'ligand'].edge_attr,
- torch.zeros(radius_edges.shape[-1], self.in_lig_edge_features, device=data['ligand'].x.device)
- ], 0)
-
- edge_sigma_emb = data['ligand'].node_sigma_emb[edge_index[0].long()]
- edge_attr = torch.cat([edge_attr, edge_sigma_emb], 1)
- node_attr = torch.cat([data['ligand'].x, data['ligand'].node_sigma_emb], 1)
-
- src, dst = edge_index
- edge_vec = data['ligand'].pos[dst.long()] - data['ligand'].pos[src.long()]
- edge_length_emb = self.lig_distance_expansion(edge_vec.norm(dim=-1))
-
- edge_attr = torch.cat([edge_attr, edge_length_emb], 1)
- edge_sh = o3.spherical_harmonics(self.sh_irreps, edge_vec, normalize=True, normalization='component')
-
- return node_attr, edge_index, edge_attr, edge_sh
-
- def build_rec_conv_graph(self, data):
- # build the graph between receptor residues
- data['receptor'].node_sigma_emb = self.timestep_emb_func(data['receptor'].node_t['tr'])
- node_attr = torch.cat([data['receptor'].x, data['receptor'].node_sigma_emb], 1)
-
- # this assumes the edges were already created in preprocessing since protein's structure is fixed
- edge_index = data['receptor', 'receptor'].edge_index
- src, dst = edge_index
- edge_vec = data['receptor'].pos[dst.long()] - data['receptor'].pos[src.long()]
-
- edge_length_emb = self.rec_distance_expansion(edge_vec.norm(dim=-1))
- edge_sigma_emb = data['receptor'].node_sigma_emb[edge_index[0].long()]
- edge_attr = torch.cat([edge_sigma_emb, edge_length_emb], 1)
- edge_sh = o3.spherical_harmonics(self.sh_irreps, edge_vec, normalize=True, normalization='component')
-
- return node_attr, edge_index, edge_attr, edge_sh
-
- def build_atom_conv_graph(self, data):
- # build the graph between receptor atoms
- data['atom'].node_sigma_emb = self.timestep_emb_func(data['atom'].node_t['tr'])
- node_attr = torch.cat([data['atom'].x, data['atom'].node_sigma_emb], 1)
-
- # this assumes the edges were already created in preprocessing since protein's structure is fixed
- edge_index = data['atom', 'atom'].edge_index
- src, dst = edge_index
- edge_vec = data['atom'].pos[dst.long()] - data['atom'].pos[src.long()]
-
- edge_length_emb = self.lig_distance_expansion(edge_vec.norm(dim=-1))
- edge_sigma_emb = data['atom'].node_sigma_emb[edge_index[0].long()]
- edge_attr = torch.cat([edge_sigma_emb, edge_length_emb], 1)
- edge_sh = o3.spherical_harmonics(self.sh_irreps, edge_vec, normalize=True, normalization='component')
-
- return node_attr, edge_index, edge_attr, edge_sh
-
- def build_cross_conv_graph(self, data, lr_cross_distance_cutoff):
- # build the cross edges between ligan atoms, receptor residues and receptor atoms
-
- # LIGAND to RECEPTOR
- if torch.is_tensor(lr_cross_distance_cutoff):
- # different cutoff for every graph
- lr_edge_index = radius(data['receptor'].pos / lr_cross_distance_cutoff[data['receptor'].batch],
- data['ligand'].pos / lr_cross_distance_cutoff[data['ligand'].batch], 1,
- data['receptor'].batch, data['ligand'].batch, max_num_neighbors=10000)
- else:
- lr_edge_index = radius(data['receptor'].pos, data['ligand'].pos, lr_cross_distance_cutoff,
- data['receptor'].batch, data['ligand'].batch, max_num_neighbors=10000)
-
- lr_edge_vec = data['receptor'].pos[lr_edge_index[1].long()] - data['ligand'].pos[lr_edge_index[0].long()]
- lr_edge_length_emb = self.cross_distance_expansion(lr_edge_vec.norm(dim=-1))
- lr_edge_sigma_emb = data['ligand'].node_sigma_emb[lr_edge_index[0].long()]
- lr_edge_attr = torch.cat([lr_edge_sigma_emb, lr_edge_length_emb], 1)
- lr_edge_sh = o3.spherical_harmonics(self.sh_irreps, lr_edge_vec, normalize=True, normalization='component')
-
- cutoff_d = lr_cross_distance_cutoff[data['ligand'].batch[lr_edge_index[0]]].squeeze() \
- if torch.is_tensor(lr_cross_distance_cutoff) else lr_cross_distance_cutoff
-
- # LIGAND to ATOM
- la_edge_index = radius(data['atom'].pos, data['ligand'].pos, self.lig_max_radius,
- data['atom'].batch, data['ligand'].batch, max_num_neighbors=10000)
-
- la_edge_vec = data['atom'].pos[la_edge_index[1].long()] - data['ligand'].pos[la_edge_index[0].long()]
- la_edge_length_emb = self.cross_distance_expansion(la_edge_vec.norm(dim=-1))
- la_edge_sigma_emb = data['ligand'].node_sigma_emb[la_edge_index[0].long()]
- la_edge_attr = torch.cat([la_edge_sigma_emb, la_edge_length_emb], 1)
- la_edge_sh = o3.spherical_harmonics(self.sh_irreps, la_edge_vec, normalize=True, normalization='component')
-
- # ATOM to RECEPTOR
- ar_edge_index = data['atom', 'receptor'].edge_index
- ar_edge_vec = data['receptor'].pos[ar_edge_index[1].long()] - data['atom'].pos[ar_edge_index[0].long()]
- ar_edge_length_emb = self.rec_distance_expansion(ar_edge_vec.norm(dim=-1))
- ar_edge_sigma_emb = data['atom'].node_sigma_emb[ar_edge_index[0].long()]
- ar_edge_attr = torch.cat([ar_edge_sigma_emb, ar_edge_length_emb], 1)
- ar_edge_sh = o3.spherical_harmonics(self.sh_irreps, ar_edge_vec, normalize=True, normalization='component')
-
- return lr_edge_index, lr_edge_attr, lr_edge_sh, la_edge_index, la_edge_attr, \
- la_edge_sh, ar_edge_index, ar_edge_attr, ar_edge_sh
-
- def build_center_conv_graph(self, data):
- # build the filter for the convolution of the center with the ligand atoms
- # for translational and rotational score
- edge_index = torch.cat([data['ligand'].batch.unsqueeze(0), torch.arange(len(data['ligand'].batch)).to(data['ligand'].x.device).unsqueeze(0)], dim=0)
-
- center_pos, count = torch.zeros((data.num_graphs, 3)).to(data['ligand'].x.device), torch.zeros((data.num_graphs, 3)).to(data['ligand'].x.device)
- center_pos.index_add_(0, index=data['ligand'].batch, source=data['ligand'].pos)
- center_pos = center_pos / torch.bincount(data['ligand'].batch).unsqueeze(1)
-
- edge_vec = data['ligand'].pos[edge_index[1]] - center_pos[edge_index[0]]
- edge_attr = self.center_distance_expansion(edge_vec.norm(dim=-1))
- edge_sigma_emb = data['ligand'].node_sigma_emb[edge_index[1].long()]
- edge_attr = torch.cat([edge_attr, edge_sigma_emb], 1)
- edge_sh = o3.spherical_harmonics(self.sh_irreps, edge_vec, normalize=True, normalization='component')
- return edge_index, edge_attr, edge_sh
-
- def build_bond_conv_graph(self, data):
- # build graph for the pseudotorque layer
- bonds = data['ligand', 'ligand'].edge_index[:, data['ligand'].edge_mask].long()
- bond_pos = (data['ligand'].pos[bonds[0]] + data['ligand'].pos[bonds[1]]) / 2
- bond_batch = data['ligand'].batch[bonds[0]]
- edge_index = radius(data['ligand'].pos, bond_pos, self.lig_max_radius, batch_x=data['ligand'].batch, batch_y=bond_batch)
-
- edge_vec = data['ligand'].pos[edge_index[1]] - bond_pos[edge_index[0]]
- edge_attr = self.lig_distance_expansion(edge_vec.norm(dim=-1))
-
- edge_attr = self.final_edge_embedding(edge_attr)
- edge_sh = o3.spherical_harmonics(self.sh_irreps, edge_vec, normalize=True, normalization='component')
-
- return bonds, edge_index, edge_attr, edge_sh
diff --git a/spaces/anaclaudia13ct/insect_detection/data/scripts/get_coco128.sh b/spaces/anaclaudia13ct/insect_detection/data/scripts/get_coco128.sh
deleted file mode 100644
index ee05a867e5644be8cc7549b89cad89d5e84573d0..0000000000000000000000000000000000000000
--- a/spaces/anaclaudia13ct/insect_detection/data/scripts/get_coco128.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
-# Download COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017)
-# Example usage: bash data/scripts/get_coco128.sh
-# parent
-# ├── yolov5
-# └── datasets
-# └── coco128 ← downloads here
-
-# Download/unzip images and labels
-d='../datasets' # unzip directory
-url=https://github.com/ultralytics/yolov5/releases/download/v1.0/
-f='coco128.zip' # or 'coco128-segments.zip', 68 MB
-echo 'Downloading' $url$f ' ...'
-curl -L $url$f -o $f && unzip -q $f -d $d && rm $f &
-
-wait # finish background tasks
diff --git a/spaces/aodianyun/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/src/midas/midas_net_custom.py b/spaces/aodianyun/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/src/midas/midas_net_custom.py
deleted file mode 100644
index 50e4acb5e53d5fabefe3dde16ab49c33c2b7797c..0000000000000000000000000000000000000000
--- a/spaces/aodianyun/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/src/midas/midas_net_custom.py
+++ /dev/null
@@ -1,128 +0,0 @@
-"""MidashNet: Network for monocular depth estimation trained by mixing several datasets.
-This file contains code that is adapted from
-https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
-"""
-import torch
-import torch.nn as nn
-
-from .base_model import BaseModel
-from .blocks import FeatureFusionBlock, FeatureFusionBlock_custom, Interpolate, _make_encoder
-
-
-class MidasNet_small(BaseModel):
- """Network for monocular depth estimation.
- """
-
- def __init__(self, path=None, features=64, backbone="efficientnet_lite3", non_negative=True, exportable=True, channels_last=False, align_corners=True,
- blocks={'expand': True}):
- """Init.
-
- Args:
- path (str, optional): Path to saved model. Defaults to None.
- features (int, optional): Number of features. Defaults to 256.
- backbone (str, optional): Backbone network for encoder. Defaults to resnet50
- """
- print("Loading weights: ", path)
-
- super(MidasNet_small, self).__init__()
-
- use_pretrained = False if path else True
-
- self.channels_last = channels_last
- self.blocks = blocks
- self.backbone = backbone
-
- self.groups = 1
-
- features1=features
- features2=features
- features3=features
- features4=features
- self.expand = False
- if "expand" in self.blocks and self.blocks['expand'] == True:
- self.expand = True
- features1=features
- features2=features*2
- features3=features*4
- features4=features*8
-
- self.pretrained, self.scratch = _make_encoder(self.backbone, features, use_pretrained, groups=self.groups, expand=self.expand, exportable=exportable)
-
- self.scratch.activation = nn.ReLU(False)
-
- self.scratch.refinenet4 = FeatureFusionBlock_custom(features4, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
- self.scratch.refinenet3 = FeatureFusionBlock_custom(features3, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
- self.scratch.refinenet2 = FeatureFusionBlock_custom(features2, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
- self.scratch.refinenet1 = FeatureFusionBlock_custom(features1, self.scratch.activation, deconv=False, bn=False, align_corners=align_corners)
-
-
- self.scratch.output_conv = nn.Sequential(
- nn.Conv2d(features, features//2, kernel_size=3, stride=1, padding=1, groups=self.groups),
- Interpolate(scale_factor=2, mode="bilinear"),
- nn.Conv2d(features//2, 32, kernel_size=3, stride=1, padding=1),
- self.scratch.activation,
- nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
- nn.ReLU(True) if non_negative else nn.Identity(),
- nn.Identity(),
- )
-
- if path:
- self.load(path)
-
-
- def forward(self, x):
- """Forward pass.
-
- Args:
- x (tensor): input data (image)
-
- Returns:
- tensor: depth
- """
- if self.channels_last==True:
- print("self.channels_last = ", self.channels_last)
- x.contiguous(memory_format=torch.channels_last)
-
-
- layer_1 = self.pretrained.layer1(x)
- layer_2 = self.pretrained.layer2(layer_1)
- layer_3 = self.pretrained.layer3(layer_2)
- layer_4 = self.pretrained.layer4(layer_3)
-
- layer_1_rn = self.scratch.layer1_rn(layer_1)
- layer_2_rn = self.scratch.layer2_rn(layer_2)
- layer_3_rn = self.scratch.layer3_rn(layer_3)
- layer_4_rn = self.scratch.layer4_rn(layer_4)
-
-
- path_4 = self.scratch.refinenet4(layer_4_rn)
- path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
- path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
- path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
-
- out = self.scratch.output_conv(path_1)
-
- return torch.squeeze(out, dim=1)
-
-
-
-def fuse_model(m):
- prev_previous_type = nn.Identity()
- prev_previous_name = ''
- previous_type = nn.Identity()
- previous_name = ''
- for name, module in m.named_modules():
- if prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d and type(module) == nn.ReLU:
- # print("FUSED ", prev_previous_name, previous_name, name)
- torch.quantization.fuse_modules(m, [prev_previous_name, previous_name, name], inplace=True)
- elif prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d:
- # print("FUSED ", prev_previous_name, previous_name)
- torch.quantization.fuse_modules(m, [prev_previous_name, previous_name], inplace=True)
- # elif previous_type == nn.Conv2d and type(module) == nn.ReLU:
- # print("FUSED ", previous_name, name)
- # torch.quantization.fuse_modules(m, [previous_name, name], inplace=True)
-
- prev_previous_type = previous_type
- prev_previous_name = previous_name
- previous_type = type(module)
- previous_name = name
\ No newline at end of file
diff --git a/spaces/artificialguybr/video-dubbing/Wav2Lip/evaluation/scores_LSE/calculate_scores_real_videos.sh b/spaces/artificialguybr/video-dubbing/Wav2Lip/evaluation/scores_LSE/calculate_scores_real_videos.sh
deleted file mode 100644
index 4a45cd568d10bfeea9fc31255fcdf121d3f4e0e9..0000000000000000000000000000000000000000
--- a/spaces/artificialguybr/video-dubbing/Wav2Lip/evaluation/scores_LSE/calculate_scores_real_videos.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-rm all_scores.txt
-yourfilenames=`ls $1`
-
-for eachfile in $yourfilenames
-do
- python run_pipeline.py --videofile $1/$eachfile --reference wav2lip --data_dir tmp_dir
- python calculate_scores_real_videos.py --videofile $1/$eachfile --reference wav2lip --data_dir tmp_dir >> all_scores.txt
-done
diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/multiline_highlight.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/multiline_highlight.py
deleted file mode 100644
index 231ff7372e310992258a98ccd9f534902bf10253..0000000000000000000000000000000000000000
--- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/multiline_highlight.py
+++ /dev/null
@@ -1,36 +0,0 @@
-"""
-Multi-Line Highlight
-====================
-This multi-line chart uses an invisible Voronoi tessellation to handle mouseover to
-identify the nearest point and then highlight the line on which the point falls.
-It is adapted from the Vega-Lite example found at
-https://bl.ocks.org/amitkaps/fe4238e716db53930b2f1a70d3401701
-"""
-# category: interactive charts
-import altair as alt
-from vega_datasets import data
-
-source = data.stocks()
-
-highlight = alt.selection(type='single', on='mouseover',
- fields=['symbol'], nearest=True)
-
-base = alt.Chart(source).encode(
- x='date:T',
- y='price:Q',
- color='symbol:N'
-)
-
-points = base.mark_circle().encode(
- opacity=alt.value(0)
-).add_selection(
- highlight
-).properties(
- width=600
-)
-
-lines = base.mark_line().encode(
- size=alt.condition(~highlight, alt.value(1), alt.value(3))
-)
-
-points + lines
diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/data/multi_corpus_dataset.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/data/multi_corpus_dataset.py
deleted file mode 100644
index a3f47c720d0215323ffea7eb5cf5fd7766fbefa6..0000000000000000000000000000000000000000
--- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/data/multi_corpus_dataset.py
+++ /dev/null
@@ -1,256 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import logging
-import time
-from collections import OrderedDict
-from typing import Dict, List, Optional
-
-import numpy as np
-from fairseq.data import data_utils
-
-from . import FairseqDataset
-
-logger = logging.getLogger(__name__)
-
-
-class MultiCorpusDataset(FairseqDataset):
- """
- Stores multiple instances of FairseqDataset together.
- Unless batch_sample=True, requires each instance
- to be the same dataset, as the collate method needs to work on batches with
- samples from each dataset.
-
- Allows specifying a distribution over the datasets to use. Note that unlike
- MultiCorpusSampledDataset, this distribution allows sampling for each item,
- rather than on a batch level. Note that datasets with sampling probabilty
- of 0 will be skipped.
-
- Each time ordered_indices() is called, a new sample is generated with
- the specified distribution.
-
- Args:
- datasets: a OrderedDict of FairseqDataset instances.
- distribution: a List containing the probability of getting an utterance from
- corresponding dataset
- seed: random seed for sampling the datsets
- sort_indices: if true, will sort the ordered indices by size
- batch_sample: if true, will ensure each batch is from a single dataset
- """
-
- def __init__(
- self,
- datasets: Dict[str, FairseqDataset],
- distribution: List[float],
- seed: int,
- sort_indices: bool = False,
- batch_sample: bool = False,
- distributed_rank: Optional[int] = None,
- ):
- super().__init__()
- assert isinstance(datasets, OrderedDict)
- assert len(datasets) == len(distribution)
- assert sum(distribution) == 1
- self.datasets = datasets
- self.distribution = distribution
- self.seed = seed
- self.sort_indices = sort_indices
- self.batch_sample = batch_sample
- self.distributed_rank = distributed_rank
-
- # Avoid repeated conversions to list later
- self.dataset_list = list(datasets.values())
- self.total_num_instances = 0
-
- first_dataset = self.dataset_list[0]
-
- self.num_instances_per_dataset = []
- self.dataset_offsets = []
- for i, dataset in enumerate(self.dataset_list):
- assert isinstance(dataset, FairseqDataset)
- assert type(dataset) is type(first_dataset)
- self.num_instances_per_dataset.append(
- 0 if self.distribution[i] == 0 else len(dataset)
- )
- self.dataset_offsets.append(self.total_num_instances)
- self.total_num_instances += self.num_instances_per_dataset[i]
-
- def ordered_indices(self):
- start = time.time()
- with data_utils.numpy_seed(self.seed, self.epoch):
- logger.info(
- f"sampling new dataset with seed {self.seed} epoch {self.epoch}"
- )
- sampled_indices = []
- num_selected_instances = 0
-
- # For each dataset i, sample self.distribution[i] * self.total_num_instances
- for i, key in enumerate(self.datasets):
- if self.distribution[i] == 0:
- # skip dataset if sampling probability is 0
- continue
-
- if i < len(self.datasets) - 1:
- num_instances = int(self.distribution[i] * self.total_num_instances)
- high = self.dataset_offsets[i + 1]
- else:
- num_instances = self.total_num_instances - num_selected_instances
- high = self.total_num_instances
-
- logger.info(f"sampling {num_instances} from {key} dataset")
- num_selected_instances += num_instances
-
- # First, add k copies of the dataset where k = num_instances // len(dataset).
- # This ensures an equal distribution of the data points as much as possible.
- # For the remaining entries randomly sample them
- dataset_size = len(self.datasets[key])
- num_copies = num_instances // dataset_size
- dataset_indices = (
- np.random.permutation(high - self.dataset_offsets[i])
- + self.dataset_offsets[i]
- )[: num_instances - num_copies * dataset_size]
- if num_copies > 0:
- sampled_indices += list(
- np.concatenate(
- (
- np.repeat(
- np.arange(self.dataset_offsets[i], high), num_copies
- ),
- dataset_indices,
- )
- )
- )
- else:
- sampled_indices += list(dataset_indices)
-
- assert (
- len(sampled_indices) == self.total_num_instances
- ), f"{len(sampled_indices)} vs {self.total_num_instances}"
-
- np.random.shuffle(sampled_indices)
- if self.sort_indices:
- sampled_indices.sort(key=lambda i: self.num_tokens(i))
-
- logger.info(
- "multi_corpus_dataset ordered_indices took {}s".format(
- time.time() - start
- )
- )
- return np.array(sampled_indices, dtype=np.int64)
-
- def _map_index(self, index: int):
- """
- If dataset A has length N and dataset B has length M
- then index 1 maps to index 1 of dataset A, and index N + 1
- maps to index 1 of B.
- """
- counter = 0
- for num_instances, key in zip(self.num_instances_per_dataset, self.datasets):
- if index < counter + num_instances:
- return index - counter, key
- counter += num_instances
- raise ValueError(
- "Invalid index: {}, max: {}".format(index, self.total_num_instances)
- )
-
- def __len__(self):
- """
- Length of this dataset is the sum of individual datasets
- """
- return self.total_num_instances
-
- def __getitem__(self, index):
- new_index, key = self._map_index(index)
- try:
- item = self.datasets[key][new_index]
- item["full_id"] = index
- return item
- except Exception as e:
- e.args = (f"Error from {key} dataset", *e.args)
- raise
-
- def collater(self, samples):
- """
- If we are doing batch sampling, then pick the right collater to use.
-
- Otherwise we assume all collaters are the same.
- """
- if len(samples) == 0:
- return None
- if "full_id" in samples[0]:
- _, key = self._map_index(samples[0]["full_id"])
- try:
- batch = self.datasets[key].collater(samples)
- except Exception:
- print(f"Collating failed for key {key}", flush=True)
- raise
- return batch
- else:
- # Subclasses may override __getitem__ to not specify full_id
- return list(self.datasets.values())[0].collater(samples)
-
- def num_tokens(self, index: int):
- index, key = self._map_index(index)
- return self.datasets[key].num_tokens(index)
-
- def size(self, index: int):
- index, key = self._map_index(index)
- return self.datasets[key].size(index)
-
- @property
- def can_reuse_epoch_itr_across_epochs(self):
- return False
-
- def set_epoch(self, epoch, **unused):
- super().set_epoch(epoch)
- logger.info(f"setting epoch of multi_corpus_dataset to {epoch}")
- self.epoch = epoch
-
- @property
- def supports_prefetch(self):
- return False
-
- @property
- def supports_fetch_outside_dataloader(self):
- return all(
- self.datasets[key].supports_fetch_outside_dataloader
- for key in self.datasets
- )
-
- def batch_by_size(
- self,
- indices,
- max_tokens=None,
- max_sentences=None,
- required_batch_size_multiple=1,
- ):
- if not self.batch_sample:
- return super().batch_by_size(
- indices, max_tokens, max_sentences, required_batch_size_multiple
- )
-
- dataset_indices = {key: [] for key in self.datasets}
- for i in indices:
- _, key = self._map_index(i)
- dataset_indices[key].append(i)
-
- batches = []
- for key in dataset_indices:
- cur_batches = super().batch_by_size(
- np.array(dataset_indices[key], dtype=np.int64),
- max_tokens,
- max_sentences,
- required_batch_size_multiple,
- )
- logger.info(f"Created {len(cur_batches)} batches for dataset {key}")
- batches += cur_batches
-
- # If this dataset is used in a distributed training setup,
- # then shuffle such that the order is seeded by the distributed rank
- # as well
- if self.distributed_rank is not None:
- with data_utils.numpy_seed(self.seed, self.epoch, self.distributed_rank):
- np.random.shuffle(batches)
- return batches
diff --git a/spaces/at2507/SM_NLP_RecoSys/Data/Mentor_interviews/Hossein Kalbasi.html b/spaces/at2507/SM_NLP_RecoSys/Data/Mentor_interviews/Hossein Kalbasi.html
deleted file mode 100644
index 629f96277ec57e3493172ad4ee3fe55249176c59..0000000000000000000000000000000000000000
--- a/spaces/at2507/SM_NLP_RecoSys/Data/Mentor_interviews/Hossein Kalbasi.html
+++ /dev/null
@@ -1,134 +0,0 @@
-
-
-
- Hossein Kalbasi
-
-
-
-
-
I am currently one of the top Data Science mentors at GreatLearning. I enjoy helping others and being part of the long-term career growth of mentees.
Interview
5-6 years exp in SE and DS
BS in Industrial engineering / manufacturing
MS: AI in manufacutring
worked at several different startups
Content intelligence / recommendatonn systems
How did you hear about SM?
Saw you on LinkedIn (Reza)
Talked on the phone with him once. Saw a shout-out on LinkedIn for Reza and talked to him about it.
I can connect with and help more people
ISA is interesting
Find people who are interested enough
Mentorship experience?
Helps my friends out and peers finding jobs
held python workshops
mentoring at great learning - half lecture, half programming
helping ppl throughout their jobs
What are beginners lacking?
Don't know what industry is actually like. Don't know what to learn/expect
Sometimes, overestimate (need to learn everything)
Sometime, underestimate
Sometimes missing programming skills, or data science skills
And how can you help?
Find out what their passion is. Why do you want this job? How o you see you your career
Be picky with the details'.
I am very self taught and I know how to learn things on my own. Can direct folks in this direction
Use my teaching experience to help them when they are stuck. Don't do it for them. Give them tips and tricks
Then help them with their CV, networking tactics, on LI etc.
-
-
Questions about SM?
History of SM?
What's the day-to-day of a mentor?
-
-
-
-
-
-
\ No newline at end of file
diff --git a/spaces/augmentedimaginationhackathon/paperstocode/frontend/src/app/app.module.ts b/spaces/augmentedimaginationhackathon/paperstocode/frontend/src/app/app.module.ts
deleted file mode 100644
index 389ad33f96a6ff2bdbf5b69c314707f11fd3b9f9..0000000000000000000000000000000000000000
--- a/spaces/augmentedimaginationhackathon/paperstocode/frontend/src/app/app.module.ts
+++ /dev/null
@@ -1,20 +0,0 @@
-import { NgModule } from '@angular/core';
-import { BrowserModule } from '@angular/platform-browser';
-
-import { AppRoutingModule } from './app-routing.module';
-import { AppComponent } from './app.component';
-import { UploaderComponent } from './uploader/uploader.component';
-
-@NgModule({
- declarations: [
- AppComponent,
- UploaderComponent
- ],
- imports: [
- BrowserModule,
- AppRoutingModule
- ],
- providers: [],
- bootstrap: [AppComponent]
-})
-export class AppModule { }
diff --git a/spaces/aurora10/GPT4ALL_CHATBOT/README.md b/spaces/aurora10/GPT4ALL_CHATBOT/README.md
deleted file mode 100644
index 3f056cf96093e6102dadfec7ee05eb8d853b8d35..0000000000000000000000000000000000000000
--- a/spaces/aurora10/GPT4ALL_CHATBOT/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: GPT4ALL CHATBOT
-emoji: 📚
-colorFrom: pink
-colorTo: green
-sdk: gradio
-sdk_version: 3.39.0
-app_file: app.py
-pinned: false
-duplicated_from: aurora10/GPT4ALL_CHATBOT
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/autosummproject/autosumm/utils/__init__.py b/spaces/autosummproject/autosumm/utils/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/awaawawawa/iurf7irfuyytruyyugb/static/style.css b/spaces/awaawawawa/iurf7irfuyytruyyugb/static/style.css
deleted file mode 100644
index 6a3c98f8fab848caaaf7b844b24ce23c8c5c8dde..0000000000000000000000000000000000000000
--- a/spaces/awaawawawa/iurf7irfuyytruyyugb/static/style.css
+++ /dev/null
@@ -1,79 +0,0 @@
-body {
- --text: hsl(0 0% 15%);
- padding: 2.5rem;
- font-family: sans-serif;
- color: var(--text);
-}
-body.dark-theme {
- --text: hsl(0 0% 90%);
- background-color: hsl(223 39% 7%);
-}
-
-main {
- max-width: 80rem;
- text-align: center;
-}
-
-section {
- display: flex;
- flex-direction: column;
- align-items: center;
-}
-
-a {
- color: var(--text);
-}
-
-select, input, button, .text-gen-output {
- padding: 0.5rem 1rem;
-}
-
-select, img, input {
- margin: 0.5rem auto 1rem;
-}
-
-form {
- width: 25rem;
- margin: 0 auto;
-}
-
-input {
- width: 70%;
-}
-
-button {
- cursor: pointer;
-}
-
-.text-gen-output {
- min-height: 1.2rem;
- margin: 1rem;
- border: 0.5px solid grey;
-}
-
-#dataset button {
- width: 6rem;
- margin: 0.5rem;
-}
-
-#dataset button.hidden {
- visibility: hidden;
-}
-
-table {
- max-width: 40rem;
- text-align: left;
- border-collapse: collapse;
-}
-
-thead {
- font-weight: bold;
-}
-
-td {
- padding: 0.5rem;
-}
-
-td:not(thead td) {
- border: 0.5px solid grey;
-}
diff --git a/spaces/awacke1/runwayml-stable-diffusion-v1-5/app.py b/spaces/awacke1/runwayml-stable-diffusion-v1-5/app.py
deleted file mode 100644
index a82df332731f067826d3e1ef79fabceffb74d07e..0000000000000000000000000000000000000000
--- a/spaces/awacke1/runwayml-stable-diffusion-v1-5/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/runwayml/stable-diffusion-v1-5").launch()
\ No newline at end of file
diff --git a/spaces/badayvedat/LLaVA/docs/LLaVA_from_LLaMA2.md b/spaces/badayvedat/LLaVA/docs/LLaVA_from_LLaMA2.md
deleted file mode 100644
index b4163668a33ff705c28f5b103b727514161e5652..0000000000000000000000000000000000000000
--- a/spaces/badayvedat/LLaVA/docs/LLaVA_from_LLaMA2.md
+++ /dev/null
@@ -1,29 +0,0 @@
-# LLaVA (based on Llama 2 LLM, Preview)
-
-*NOTE: This is a technical preview. We are still running hyperparameter search, and will release the final model soon. If you'd like to contribute to this, please contact us.*
-
-:llama: **-Introduction-** [Llama 2 is an open-source LLM released by Meta AI](https://about.fb.com/news/2023/07/llama-2/) today (July 18, 2023). Compared with its early version [Llama 1](https://ai.meta.com/blog/large-language-model-llama-meta-ai/), Llama 2 is more favored in ***stronger language performance***, ***longer context window***, and importantly ***commercially usable***! While Llama 2 is changing the LLM market landscape in the language space, its multimodal ability remains unknown. We quickly develop the LLaVA variant based on the latest Llama 2 checkpoints, and release it to the community for the public use.
-
-You need to apply for and download the lastest Llama 2 checkpoints to start your own training (apply [here](https://ai.meta.com/resources/models-and-libraries/llama-downloads/))
-
-
-## Training
-
-Please checkout [`pretrain.sh`](https://github.com/haotian-liu/LLaVA/blob/main/scripts/pretrain.sh), [`finetune.sh`](https://github.com/haotian-liu/LLaVA/blob/main/scripts/finetune.sh), [`finetune_lora.sh`](https://github.com/haotian-liu/LLaVA/blob/main/scripts/finetune_lora.sh).
-
-## LLaVA (based on Llama 2), What is different?
-
-:volcano: How is the new LLaVA based on Llama 2 different from Llama 1? The comparisons of the training process are described:
-- **Pre-training**. The pre-trained base LLM is changed from Llama 1 to Llama 2
-- **Language instruction-tuning**. The previous LLaVA model starts with Vicuna, which is instruct tuned on ShareGPT data from Llama 1; The new LLaVA model starts with Llama 2 Chat, which is an instruct tuned checkpoint on dialogue data from Llama 2.
-- **Multimodal instruction-tuning**. The same LLaVA-Lighting process is applied.
-
-
-### Results
-
-- Llama 2 is better at following the instructions of role playing; Llama 2 fails in following the instructions of translation
-- The quantitative evaluation on [LLaVA-Bench](https://github.com/haotian-liu/LLaVA/blob/main/docs/LLaVA_Bench.md) demonstrates on-par performance between Llama 2 and Llama 1 in LLaVA's multimodal chat ability.
-
-
-
-
diff --git a/spaces/banana-projects/web3d/node_modules/@tweenjs/tween.js/README.md b/spaces/banana-projects/web3d/node_modules/@tweenjs/tween.js/README.md
deleted file mode 100644
index 5e8c8df4fd2c29e39a7343dcb3661b1e174e721c..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/@tweenjs/tween.js/README.md
+++ /dev/null
@@ -1,295 +0,0 @@
-# tween.js
-
-JavaScript tweening engine for easy animations, incorporating optimised Robert Penner's equations.
-
-[![NPM Version][npm-image]][npm-url]
-[![NPM Downloads][downloads-image]][downloads-url]
-[![Travis tests][travis-image]][travis-url]
-[![Flattr this][flattr-image]][flattr-url]
-[![CDNJS][cdnjs-image]][cdnjs-url]
-
-```javascript
-var box = document.createElement('div');
-box.style.setProperty('background-color', '#008800');
-box.style.setProperty('width', '100px');
-box.style.setProperty('height', '100px');
-document.body.appendChild(box);
-
-// Setup the animation loop.
-function animate(time) {
- requestAnimationFrame(animate);
- TWEEN.update(time);
-}
-requestAnimationFrame(animate);
-
-var coords = { x: 0, y: 0 }; // Start at (0, 0)
-var tween = new TWEEN.Tween(coords) // Create a new tween that modifies 'coords'.
- .to({ x: 300, y: 200 }, 1000) // Move to (300, 200) in 1 second.
- .easing(TWEEN.Easing.Quadratic.Out) // Use an easing function to make the animation smooth.
- .onUpdate(function() { // Called after tween.js updates 'coords'.
- // Move 'box' to the position described by 'coords' with a CSS translation.
- box.style.setProperty('transform', 'translate(' + coords.x + 'px, ' + coords.y + 'px)');
- })
- .start(); // Start the tween immediately.
-```
-
-[Test it with CodePen](https://codepen.io/mikebolt/pen/zzzvZg)
-
-## Installation
-
-Download the [library](https://raw.githubusercontent.com/tweenjs/tween.js/master/src/Tween.js) and include it in your code:
-
-```html
-
-```
-
-You can also reference a CDN-hosted version in your code, thanks to cdnjs. For example:
-
-```html
-
-```
-
-See [tween.js](https://cdnjs.com/libraries/tween.js/) for more versions.
-
-### More advanced users might want to...
-
-#### Use `npm`
-
-```bash
-npm install @tweenjs/tween.js
-```
-
-Then include the Tween.js module with the standard node.js `require`:
-
-```javascript
-var TWEEN = require('@tweenjs/tween.js');
-```
-
-And you can use Tween.js as in all other examples--for example:
-
-```javascript
-var t = new TWEEN.Tween( /* etc */ );
-t.start();
-```
-
-You will need to use a tool such as `browserify` to convert code using this style into something that can be run in the browser (browsers don't know about `require`).
-
-#### Use `bower`
-
-```bash
-bower install @tweenjs/tweenjs --save
-```
-
-or install an specific tag. They are git tags, and you can run `git tag` in the command line for a list if you have cloned the repository locally, or you can also check out the list in the [tween.js tags page](https://github.com/tweenjs/tween.js/tags). For example, to install `v16.3.0`:
-
-```bash
-bower install @tweenjs/tweenjs#v16.3.0
-```
-
-Then reference the library source:
-
-```html
-
-```
-
-## Features
-
-* Does one thing and one thing only: tween properties
-* Doesn't take care of CSS units (e.g. appending `px`)
-* Doesn't interpolate colours
-* Easing functions are reusable outside of Tween
-* Can also use custom easing functions
-
-## Documentation
-
-* [User guide](./docs/user_guide.md)
-* [Contributor guide](./docs/contributor_guide.md)
-* [Tutorial](http://learningthreejs.com/blog/2011/08/17/tweenjs-for-smooth-animation/) using tween.js with three.js
-* Also: [libtween](https://github.com/jsm174/libtween), a port of tween.js to C by [jsm174](https://github.com/jsm174)
-* Also: [es6-tween](https://github.com/tweenjs/es6-tween), a port of tween.js to ES6/Harmony by [dalisoft](https://github.com/dalisoft)
-* [Understanding tween.js](https://mikebolt.me/article/understanding-tweenjs.html)
-
-## Examples
-
-
-
-## Tests
-
-You need to install `npm` first--this comes with node.js, so install that one first. Then, cd to `tween.js`'s directory and run:
-
-```bash
-npm install
-```
-
-if running the tests for the first time, to install additional dependencies for running tests, and then run
-
-```bash
-npm test
-```
-
-every time you want to run the tests.
-
-If you want to add any feature or change existing features, you *must* run the tests to make sure you didn't break anything else. If you send a pull request (PR) to add something new and it doesn't have tests, or the tests don't pass, the PR won't be accepted. See [contributing](CONTRIBUTING.md) for more information.
-
-## People
-
-Maintainers: [mikebolt](https://github.com/mikebolt), [sole](https://github.com/sole).
-
-[All contributors](http://github.com/tweenjs/tween.js/contributors).
-
-## Projects using tween.js
-
-[](https://aframe.io)
-[](http://www.moma.org/interactives/exhibitions/2012/inventingabstraction/)
-[](http://www.chromeweblab.com/)
-[](http://5013.es/toys/macchina)
-[](http://egraether.com/mine3d/)
-[](http://ro.me)
-[](http://data-arts.appspot.com/globe)
-[](http://www.androidify.com/)
-[](http://thewildernessdowntown.com/)
-[](http://dejavis.org/linechart)
-
-[npm-image]: https://img.shields.io/npm/v/@tweenjs/tween.js.svg
-[npm-url]: https://npmjs.org/package/@tweenjs/tween.js
-[downloads-image]: https://img.shields.io/npm/dm/@tweenjs/tween.js.svg
-[downloads-url]: https://npmjs.org/package/@tweenjs/tween.js
-[travis-image]: https://travis-ci.org/tweenjs/tween.js.svg?branch=master
-[travis-url]: https://travis-ci.org/tweenjs/tween.js
-[flattr-image]: https://api.flattr.com/button/flattr-badge-large.png
-[flattr-url]: https://flattr.com/thing/45014/tween-js
-[cdnjs-image]: https://img.shields.io/cdnjs/v/tween.js.svg
-[cdnjs-url]: https://cdnjs.com/libraries/tween.js
-
diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/loaders/TDSLoader.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/loaders/TDSLoader.js
deleted file mode 100644
index 3ea517af122737c1b0959f7348b5855b7a0d89a9..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/examples/js/loaders/TDSLoader.js
+++ /dev/null
@@ -1,1127 +0,0 @@
-/*
- * Autodesk 3DS three.js file loader, based on lib3ds.
- *
- * Loads geometry with uv and materials basic properties with texture support.
- *
- * @author @tentone
- * @author @timknip
- * @class TDSLoader
- * @constructor
- */
-
-'use strict';
-
-THREE.TDSLoader = function ( manager ) {
-
- this.manager = ( manager !== undefined ) ? manager : THREE.DefaultLoadingManager;
- this.debug = false;
-
- this.group = null;
- this.position = 0;
-
- this.materials = [];
- this.meshes = [];
-
-};
-
-THREE.TDSLoader.prototype = {
-
- constructor: THREE.TDSLoader,
-
- crossOrigin: 'anonymous',
-
- /**
- * Load 3ds file from url.
- *
- * @method load
- * @param {[type]} url URL for the file.
- * @param {Function} onLoad onLoad callback, receives group Object3D as argument.
- * @param {Function} onProgress onProgress callback.
- * @param {Function} onError onError callback.
- */
- load: function ( url, onLoad, onProgress, onError ) {
-
- var scope = this;
-
- var path = this.path !== undefined ? this.path : THREE.LoaderUtils.extractUrlBase( url );
-
- var loader = new THREE.FileLoader( this.manager );
- loader.setPath( this.path );
- loader.setResponseType( 'arraybuffer' );
-
- loader.load( url, function ( data ) {
-
- onLoad( scope.parse( data, path ) );
-
- }, onProgress, onError );
-
- },
-
- /**
- * Parse arraybuffer data and load 3ds file.
- *
- * @method parse
- * @param {ArrayBuffer} arraybuffer Arraybuffer data to be loaded.
- * @param {String} path Path for external resources.
- * @return {Object3D} Group loaded from 3ds file.
- */
- parse: function ( arraybuffer, path ) {
-
- this.group = new THREE.Group();
- this.position = 0;
- this.materials = [];
- this.meshes = [];
-
- this.readFile( arraybuffer, path );
-
- for ( var i = 0; i < this.meshes.length; i ++ ) {
-
- this.group.add( this.meshes[ i ] );
-
- }
-
- return this.group;
-
- },
-
- /**
- * Decode file content to read 3ds data.
- *
- * @method readFile
- * @param {ArrayBuffer} arraybuffer Arraybuffer data to be loaded.
- */
- readFile: function ( arraybuffer, path ) {
-
- var data = new DataView( arraybuffer );
- var chunk = this.readChunk( data );
-
- if ( chunk.id === MLIBMAGIC || chunk.id === CMAGIC || chunk.id === M3DMAGIC ) {
-
- var next = this.nextChunk( data, chunk );
-
- while ( next !== 0 ) {
-
- if ( next === M3D_VERSION ) {
-
- var version = this.readDWord( data );
- this.debugMessage( '3DS file version: ' + version );
-
- } else if ( next === MDATA ) {
-
- this.resetPosition( data );
- this.readMeshData( data, path );
-
- } else {
-
- this.debugMessage( 'Unknown main chunk: ' + next.toString( 16 ) );
-
- }
-
- next = this.nextChunk( data, chunk );
-
- }
-
- }
-
- this.debugMessage( 'Parsed ' + this.meshes.length + ' meshes' );
-
- },
-
- /**
- * Read mesh data chunk.
- *
- * @method readMeshData
- * @param {Dataview} data Dataview in use.
- */
- readMeshData: function ( data, path ) {
-
- var chunk = this.readChunk( data );
- var next = this.nextChunk( data, chunk );
-
- while ( next !== 0 ) {
-
- if ( next === MESH_VERSION ) {
-
- var version = + this.readDWord( data );
- this.debugMessage( 'Mesh Version: ' + version );
-
- } else if ( next === MASTER_SCALE ) {
-
- var scale = this.readFloat( data );
- this.debugMessage( 'Master scale: ' + scale );
- this.group.scale.set( scale, scale, scale );
-
- } else if ( next === NAMED_OBJECT ) {
-
- this.debugMessage( 'Named Object' );
- this.resetPosition( data );
- this.readNamedObject( data );
-
- } else if ( next === MAT_ENTRY ) {
-
- this.debugMessage( 'Material' );
- this.resetPosition( data );
- this.readMaterialEntry( data, path );
-
- } else {
-
- this.debugMessage( 'Unknown MDATA chunk: ' + next.toString( 16 ) );
-
- }
-
- next = this.nextChunk( data, chunk );
-
- }
-
- },
-
- /**
- * Read named object chunk.
- *
- * @method readNamedObject
- * @param {Dataview} data Dataview in use.
- */
- readNamedObject: function ( data ) {
-
- var chunk = this.readChunk( data );
- var name = this.readString( data, 64 );
- chunk.cur = this.position;
-
- var next = this.nextChunk( data, chunk );
- while ( next !== 0 ) {
-
- if ( next === N_TRI_OBJECT ) {
-
- this.resetPosition( data );
- var mesh = this.readMesh( data );
- mesh.name = name;
- this.meshes.push( mesh );
-
- } else {
-
- this.debugMessage( 'Unknown named object chunk: ' + next.toString( 16 ) );
-
- }
-
- next = this.nextChunk( data, chunk );
-
- }
-
- this.endChunk( chunk );
-
- },
-
- /**
- * Read material data chunk and add it to the material list.
- *
- * @method readMaterialEntry
- * @param {Dataview} data Dataview in use.
- */
- readMaterialEntry: function ( data, path ) {
-
- var chunk = this.readChunk( data );
- var next = this.nextChunk( data, chunk );
- var material = new THREE.MeshPhongMaterial();
-
- while ( next !== 0 ) {
-
- if ( next === MAT_NAME ) {
-
- material.name = this.readString( data, 64 );
- this.debugMessage( ' Name: ' + material.name );
-
- } else if ( next === MAT_WIRE ) {
-
- this.debugMessage( ' Wireframe' );
- material.wireframe = true;
-
- } else if ( next === MAT_WIRE_SIZE ) {
-
- var value = this.readByte( data );
- material.wireframeLinewidth = value;
- this.debugMessage( ' Wireframe Thickness: ' + value );
-
- } else if ( next === MAT_TWO_SIDE ) {
-
- material.side = THREE.DoubleSide;
- this.debugMessage( ' DoubleSided' );
-
- } else if ( next === MAT_ADDITIVE ) {
-
- this.debugMessage( ' Additive Blending' );
- material.blending = THREE.AdditiveBlending;
-
- } else if ( next === MAT_DIFFUSE ) {
-
- this.debugMessage( ' Diffuse Color' );
- material.color = this.readColor( data );
-
- } else if ( next === MAT_SPECULAR ) {
-
- this.debugMessage( ' Specular Color' );
- material.specular = this.readColor( data );
-
- } else if ( next === MAT_AMBIENT ) {
-
- this.debugMessage( ' Ambient color' );
- material.color = this.readColor( data );
-
- } else if ( next === MAT_SHININESS ) {
-
- var shininess = this.readWord( data );
- material.shininess = shininess;
- this.debugMessage( ' Shininess : ' + shininess );
-
- } else if ( next === MAT_TEXMAP ) {
-
- this.debugMessage( ' ColorMap' );
- this.resetPosition( data );
- material.map = this.readMap( data, path );
-
- } else if ( next === MAT_BUMPMAP ) {
-
- this.debugMessage( ' BumpMap' );
- this.resetPosition( data );
- material.bumpMap = this.readMap( data, path );
-
- } else if ( next === MAT_OPACMAP ) {
-
- this.debugMessage( ' OpacityMap' );
- this.resetPosition( data );
- material.alphaMap = this.readMap( data, path );
-
- } else if ( next === MAT_SPECMAP ) {
-
- this.debugMessage( ' SpecularMap' );
- this.resetPosition( data );
- material.specularMap = this.readMap( data, path );
-
- } else {
-
- this.debugMessage( ' Unknown material chunk: ' + next.toString( 16 ) );
-
- }
-
- next = this.nextChunk( data, chunk );
-
- }
-
- this.endChunk( chunk );
-
- this.materials[ material.name ] = material;
-
- },
-
- /**
- * Read mesh data chunk.
- *
- * @method readMesh
- * @param {Dataview} data Dataview in use.
- */
- readMesh: function ( data ) {
-
- var chunk = this.readChunk( data );
- var next = this.nextChunk( data, chunk );
-
- var geometry = new THREE.BufferGeometry();
- var uvs = [];
-
- var material = new THREE.MeshPhongMaterial();
- var mesh = new THREE.Mesh( geometry, material );
- mesh.name = 'mesh';
-
- while ( next !== 0 ) {
-
- if ( next === POINT_ARRAY ) {
-
- var points = this.readWord( data );
-
- this.debugMessage( ' Vertex: ' + points );
-
- //BufferGeometry
-
- var vertices = [];
-
- for ( var i = 0; i < points; i ++ ) {
-
- vertices.push( this.readFloat( data ) );
- vertices.push( this.readFloat( data ) );
- vertices.push( this.readFloat( data ) );
-
- }
-
- geometry.addAttribute( 'position', new THREE.Float32BufferAttribute( vertices, 3 ) );
-
- } else if ( next === FACE_ARRAY ) {
-
- this.resetPosition( data );
- this.readFaceArray( data, mesh );
-
- } else if ( next === TEX_VERTS ) {
-
- var texels = this.readWord( data );
-
- this.debugMessage( ' UV: ' + texels );
-
- //BufferGeometry
-
- var uvs = [];
-
- for ( var i = 0; i < texels; i ++ ) {
-
- uvs.push( this.readFloat( data ) );
- uvs.push( this.readFloat( data ) );
-
- }
-
- geometry.addAttribute( 'uv', new THREE.Float32BufferAttribute( uvs, 2 ) );
-
-
- } else if ( next === MESH_MATRIX ) {
-
- this.debugMessage( ' Tranformation Matrix (TODO)' );
-
- var values = [];
- for ( var i = 0; i < 12; i ++ ) {
-
- values[ i ] = this.readFloat( data );
-
- }
-
- var matrix = new THREE.Matrix4();
-
- //X Line
- matrix.elements[ 0 ] = values[ 0 ];
- matrix.elements[ 1 ] = values[ 6 ];
- matrix.elements[ 2 ] = values[ 3 ];
- matrix.elements[ 3 ] = values[ 9 ];
-
- //Y Line
- matrix.elements[ 4 ] = values[ 2 ];
- matrix.elements[ 5 ] = values[ 8 ];
- matrix.elements[ 6 ] = values[ 5 ];
- matrix.elements[ 7 ] = values[ 11 ];
-
- //Z Line
- matrix.elements[ 8 ] = values[ 1 ];
- matrix.elements[ 9 ] = values[ 7 ];
- matrix.elements[ 10 ] = values[ 4 ];
- matrix.elements[ 11 ] = values[ 10 ];
-
- //W Line
- matrix.elements[ 12 ] = 0;
- matrix.elements[ 13 ] = 0;
- matrix.elements[ 14 ] = 0;
- matrix.elements[ 15 ] = 1;
-
- matrix.transpose();
-
- var inverse = new THREE.Matrix4();
- inverse.getInverse( matrix, true );
- geometry.applyMatrix( inverse );
-
- matrix.decompose( mesh.position, mesh.quaternion, mesh.scale );
-
- } else {
-
- this.debugMessage( ' Unknown mesh chunk: ' + next.toString( 16 ) );
-
- }
-
- next = this.nextChunk( data, chunk );
-
- }
-
- this.endChunk( chunk );
-
- geometry.computeVertexNormals();
-
- return mesh;
-
- },
-
- /**
- * Read face array data chunk.
- *
- * @method readFaceArray
- * @param {Dataview} data Dataview in use.
- * @param {Mesh} mesh Mesh to be filled with the data read.
- */
- readFaceArray: function ( data, mesh ) {
-
- var chunk = this.readChunk( data );
- var faces = this.readWord( data );
-
- this.debugMessage( ' Faces: ' + faces );
-
- var index = [];
-
- for ( var i = 0; i < faces; ++ i ) {
-
- index.push( this.readWord( data ), this.readWord( data ), this.readWord( data ) );
-
- var visibility = this.readWord( data );
-
- }
-
- mesh.geometry.setIndex( index );
-
- //The rest of the FACE_ARRAY chunk is subchunks
-
- while ( this.position < chunk.end ) {
-
- var chunk = this.readChunk( data );
-
- if ( chunk.id === MSH_MAT_GROUP ) {
-
- this.debugMessage( ' Material Group' );
-
- this.resetPosition( data );
-
- var group = this.readMaterialGroup( data );
-
- var material = this.materials[ group.name ];
-
- if ( material !== undefined ) {
-
- mesh.material = material;
-
- if ( material.name === '' ) {
-
- material.name = mesh.name;
-
- }
-
- }
-
- } else {
-
- this.debugMessage( ' Unknown face array chunk: ' + chunk.toString( 16 ) );
-
- }
-
- this.endChunk( chunk );
-
- }
-
- this.endChunk( chunk );
-
- },
-
- /**
- * Read texture map data chunk.
- *
- * @method readMap
- * @param {Dataview} data Dataview in use.
- * @return {Texture} Texture read from this data chunk.
- */
- readMap: function ( data, path ) {
-
- var chunk = this.readChunk( data );
- var next = this.nextChunk( data, chunk );
- var texture = {};
-
- var loader = new THREE.TextureLoader( this.manager );
- loader.setPath( this.resourcePath || path ).setCrossOrigin( this.crossOrigin );
-
- while ( next !== 0 ) {
-
- if ( next === MAT_MAPNAME ) {
-
- var name = this.readString( data, 128 );
- texture = loader.load( name );
-
- this.debugMessage( ' File: ' + path + name );
-
- } else if ( next === MAT_MAP_UOFFSET ) {
-
- texture.offset.x = this.readFloat( data );
- this.debugMessage( ' OffsetX: ' + texture.offset.x );
-
- } else if ( next === MAT_MAP_VOFFSET ) {
-
- texture.offset.y = this.readFloat( data );
- this.debugMessage( ' OffsetY: ' + texture.offset.y );
-
- } else if ( next === MAT_MAP_USCALE ) {
-
- texture.repeat.x = this.readFloat( data );
- this.debugMessage( ' RepeatX: ' + texture.repeat.x );
-
- } else if ( next === MAT_MAP_VSCALE ) {
-
- texture.repeat.y = this.readFloat( data );
- this.debugMessage( ' RepeatY: ' + texture.repeat.y );
-
- } else {
-
- this.debugMessage( ' Unknown map chunk: ' + next.toString( 16 ) );
-
- }
-
- next = this.nextChunk( data, chunk );
-
- }
-
- this.endChunk( chunk );
-
- return texture;
-
- },
-
- /**
- * Read material group data chunk.
- *
- * @method readMaterialGroup
- * @param {Dataview} data Dataview in use.
- * @return {Object} Object with name and index of the object.
- */
- readMaterialGroup: function ( data ) {
-
- var chunk = this.readChunk( data );
- var name = this.readString( data, 64 );
- var numFaces = this.readWord( data );
-
- this.debugMessage( ' Name: ' + name );
- this.debugMessage( ' Faces: ' + numFaces );
-
- var index = [];
- for ( var i = 0; i < numFaces; ++ i ) {
-
- index.push( this.readWord( data ) );
-
- }
-
- return { name: name, index: index };
-
- },
-
- /**
- * Read a color value.
- *
- * @method readColor
- * @param {DataView} data Dataview.
- * @return {Color} Color value read..
- */
- readColor: function ( data ) {
-
- var chunk = this.readChunk( data );
- var color = new THREE.Color();
-
- if ( chunk.id === COLOR_24 || chunk.id === LIN_COLOR_24 ) {
-
- var r = this.readByte( data );
- var g = this.readByte( data );
- var b = this.readByte( data );
-
- color.setRGB( r / 255, g / 255, b / 255 );
-
- this.debugMessage( ' Color: ' + color.r + ', ' + color.g + ', ' + color.b );
-
- } else if ( chunk.id === COLOR_F || chunk.id === LIN_COLOR_F ) {
-
- var r = this.readFloat( data );
- var g = this.readFloat( data );
- var b = this.readFloat( data );
-
- color.setRGB( r, g, b );
-
- this.debugMessage( ' Color: ' + color.r + ', ' + color.g + ', ' + color.b );
-
- } else {
-
- this.debugMessage( ' Unknown color chunk: ' + chunk.toString( 16 ) );
-
- }
-
- this.endChunk( chunk );
- return color;
-
- },
-
- /**
- * Read next chunk of data.
- *
- * @method readChunk
- * @param {DataView} data Dataview.
- * @return {Object} Chunk of data read.
- */
- readChunk: function ( data ) {
-
- var chunk = {};
-
- chunk.cur = this.position;
- chunk.id = this.readWord( data );
- chunk.size = this.readDWord( data );
- chunk.end = chunk.cur + chunk.size;
- chunk.cur += 6;
-
- return chunk;
-
- },
-
- /**
- * Set position to the end of the current chunk of data.
- *
- * @method endChunk
- * @param {Object} chunk Data chunk.
- */
- endChunk: function ( chunk ) {
-
- this.position = chunk.end;
-
- },
-
- /**
- * Move to the next data chunk.
- *
- * @method nextChunk
- * @param {DataView} data Dataview.
- * @param {Object} chunk Data chunk.
- */
- nextChunk: function ( data, chunk ) {
-
- if ( chunk.cur >= chunk.end ) {
-
- return 0;
-
- }
-
- this.position = chunk.cur;
-
- try {
-
- var next = this.readChunk( data );
- chunk.cur += next.size;
- return next.id;
-
- } catch ( e ) {
-
- this.debugMessage( 'Unable to read chunk at ' + this.position );
- return 0;
-
- }
-
- },
-
- /**
- * Reset dataview position.
- *
- * @method resetPosition
- * @param {DataView} data Dataview.
- */
- resetPosition: function () {
-
- this.position -= 6;
-
- },
-
- /**
- * Read byte value.
- *
- * @method readByte
- * @param {DataView} data Dataview to read data from.
- * @return {Number} Data read from the dataview.
- */
- readByte: function ( data ) {
-
- var v = data.getUint8( this.position, true );
- this.position += 1;
- return v;
-
- },
-
- /**
- * Read 32 bit float value.
- *
- * @method readFloat
- * @param {DataView} data Dataview to read data from.
- * @return {Number} Data read from the dataview.
- */
- readFloat: function ( data ) {
-
- try {
-
- var v = data.getFloat32( this.position, true );
- this.position += 4;
- return v;
-
- } catch ( e ) {
-
- this.debugMessage( e + ' ' + this.position + ' ' + data.byteLength );
-
- }
-
- },
-
- /**
- * Read 32 bit signed integer value.
- *
- * @method readInt
- * @param {DataView} data Dataview to read data from.
- * @return {Number} Data read from the dataview.
- */
- readInt: function ( data ) {
-
- var v = data.getInt32( this.position, true );
- this.position += 4;
- return v;
-
- },
-
- /**
- * Read 16 bit signed integer value.
- *
- * @method readShort
- * @param {DataView} data Dataview to read data from.
- * @return {Number} Data read from the dataview.
- */
- readShort: function ( data ) {
-
- var v = data.getInt16( this.position, true );
- this.position += 2;
- return v;
-
- },
-
- /**
- * Read 64 bit unsigned integer value.
- *
- * @method readDWord
- * @param {DataView} data Dataview to read data from.
- * @return {Number} Data read from the dataview.
- */
- readDWord: function ( data ) {
-
- var v = data.getUint32( this.position, true );
- this.position += 4;
- return v;
-
- },
-
- /**
- * Read 32 bit unsigned integer value.
- *
- * @method readWord
- * @param {DataView} data Dataview to read data from.
- * @return {Number} Data read from the dataview.
- */
- readWord: function ( data ) {
-
- var v = data.getUint16( this.position, true );
- this.position += 2;
- return v;
-
- },
-
- /**
- * Read string value.
- *
- * @method readString
- * @param {DataView} data Dataview to read data from.
- * @param {Number} maxLength Max size of the string to be read.
- * @return {String} Data read from the dataview.
- */
- readString: function ( data, maxLength ) {
-
- var s = '';
-
- for ( var i = 0; i < maxLength; i ++ ) {
-
- var c = this.readByte( data );
- if ( ! c ) {
-
- break;
-
- }
-
- s += String.fromCharCode( c );
-
- }
-
- return s;
-
- },
-
- /**
- * Set path to adjust the path to the original 3ds file.
- *
- * @method setPath
- * @param {String} path Path to file.
- * @return Self for chaining.
- */
- setPath: function ( path ) {
-
- this.path = path;
-
- return this;
-
- },
-
- /**
- * Set resource path used to determine the path to attached resources like textures.
- *
- * @method setResourcePath
- * @param {String} resourcePath Path to resources.
- * @return Self for chaining.
- */
- setResourcePath: function ( resourcePath ) {
-
- this.resourcePath = resourcePath;
-
- return this;
-
- },
-
- /**
- * Set crossOrigin value to configure CORS settings
- * for the image loading process.
- *
- * @method setCrossOrigin
- * @param {String} crossOrigin crossOrigin string.
- * @return Self for chaining.
- */
- setCrossOrigin: function ( crossOrigin ) {
-
- this.crossOrigin = crossOrigin;
-
- return this;
-
- },
-
- /**
- * Print debug message to the console.
- *
- * Is controlled by a flag to show or hide debug messages.
- *
- * @method debugMessage
- * @param {Object} message Debug message to print to the console.
- */
- debugMessage: function ( message ) {
-
- if ( this.debug ) {
-
- console.log( message );
-
- }
-
- }
-};
-
-var NULL_CHUNK = 0x0000;
-var M3DMAGIC = 0x4D4D;
-var SMAGIC = 0x2D2D;
-var LMAGIC = 0x2D3D;
-var MLIBMAGIC = 0x3DAA;
-var MATMAGIC = 0x3DFF;
-var CMAGIC = 0xC23D;
-var M3D_VERSION = 0x0002;
-var M3D_KFVERSION = 0x0005;
-var COLOR_F = 0x0010;
-var COLOR_24 = 0x0011;
-var LIN_COLOR_24 = 0x0012;
-var LIN_COLOR_F = 0x0013;
-var INT_PERCENTAGE = 0x0030;
-var FLOAT_PERCENTAGE = 0x0031;
-var MDATA = 0x3D3D;
-var MESH_VERSION = 0x3D3E;
-var MASTER_SCALE = 0x0100;
-var LO_SHADOW_BIAS = 0x1400;
-var HI_SHADOW_BIAS = 0x1410;
-var SHADOW_MAP_SIZE = 0x1420;
-var SHADOW_SAMPLES = 0x1430;
-var SHADOW_RANGE = 0x1440;
-var SHADOW_FILTER = 0x1450;
-var RAY_BIAS = 0x1460;
-var O_CONSTS = 0x1500;
-var AMBIENT_LIGHT = 0x2100;
-var BIT_MAP = 0x1100;
-var SOLID_BGND = 0x1200;
-var V_GRADIENT = 0x1300;
-var USE_BIT_MAP = 0x1101;
-var USE_SOLID_BGND = 0x1201;
-var USE_V_GRADIENT = 0x1301;
-var FOG = 0x2200;
-var FOG_BGND = 0x2210;
-var LAYER_FOG = 0x2302;
-var DISTANCE_CUE = 0x2300;
-var DCUE_BGND = 0x2310;
-var USE_FOG = 0x2201;
-var USE_LAYER_FOG = 0x2303;
-var USE_DISTANCE_CUE = 0x2301;
-var MAT_ENTRY = 0xAFFF;
-var MAT_NAME = 0xA000;
-var MAT_AMBIENT = 0xA010;
-var MAT_DIFFUSE = 0xA020;
-var MAT_SPECULAR = 0xA030;
-var MAT_SHININESS = 0xA040;
-var MAT_SHIN2PCT = 0xA041;
-var MAT_TRANSPARENCY = 0xA050;
-var MAT_XPFALL = 0xA052;
-var MAT_USE_XPFALL = 0xA240;
-var MAT_REFBLUR = 0xA053;
-var MAT_SHADING = 0xA100;
-var MAT_USE_REFBLUR = 0xA250;
-var MAT_SELF_ILLUM = 0xA084;
-var MAT_TWO_SIDE = 0xA081;
-var MAT_DECAL = 0xA082;
-var MAT_ADDITIVE = 0xA083;
-var MAT_WIRE = 0xA085;
-var MAT_FACEMAP = 0xA088;
-var MAT_TRANSFALLOFF_IN = 0xA08A;
-var MAT_PHONGSOFT = 0xA08C;
-var MAT_WIREABS = 0xA08E;
-var MAT_WIRE_SIZE = 0xA087;
-var MAT_TEXMAP = 0xA200;
-var MAT_SXP_TEXT_DATA = 0xA320;
-var MAT_TEXMASK = 0xA33E;
-var MAT_SXP_TEXTMASK_DATA = 0xA32A;
-var MAT_TEX2MAP = 0xA33A;
-var MAT_SXP_TEXT2_DATA = 0xA321;
-var MAT_TEX2MASK = 0xA340;
-var MAT_SXP_TEXT2MASK_DATA = 0xA32C;
-var MAT_OPACMAP = 0xA210;
-var MAT_SXP_OPAC_DATA = 0xA322;
-var MAT_OPACMASK = 0xA342;
-var MAT_SXP_OPACMASK_DATA = 0xA32E;
-var MAT_BUMPMAP = 0xA230;
-var MAT_SXP_BUMP_DATA = 0xA324;
-var MAT_BUMPMASK = 0xA344;
-var MAT_SXP_BUMPMASK_DATA = 0xA330;
-var MAT_SPECMAP = 0xA204;
-var MAT_SXP_SPEC_DATA = 0xA325;
-var MAT_SPECMASK = 0xA348;
-var MAT_SXP_SPECMASK_DATA = 0xA332;
-var MAT_SHINMAP = 0xA33C;
-var MAT_SXP_SHIN_DATA = 0xA326;
-var MAT_SHINMASK = 0xA346;
-var MAT_SXP_SHINMASK_DATA = 0xA334;
-var MAT_SELFIMAP = 0xA33D;
-var MAT_SXP_SELFI_DATA = 0xA328;
-var MAT_SELFIMASK = 0xA34A;
-var MAT_SXP_SELFIMASK_DATA = 0xA336;
-var MAT_REFLMAP = 0xA220;
-var MAT_REFLMASK = 0xA34C;
-var MAT_SXP_REFLMASK_DATA = 0xA338;
-var MAT_ACUBIC = 0xA310;
-var MAT_MAPNAME = 0xA300;
-var MAT_MAP_TILING = 0xA351;
-var MAT_MAP_TEXBLUR = 0xA353;
-var MAT_MAP_USCALE = 0xA354;
-var MAT_MAP_VSCALE = 0xA356;
-var MAT_MAP_UOFFSET = 0xA358;
-var MAT_MAP_VOFFSET = 0xA35A;
-var MAT_MAP_ANG = 0xA35C;
-var MAT_MAP_COL1 = 0xA360;
-var MAT_MAP_COL2 = 0xA362;
-var MAT_MAP_RCOL = 0xA364;
-var MAT_MAP_GCOL = 0xA366;
-var MAT_MAP_BCOL = 0xA368;
-var NAMED_OBJECT = 0x4000;
-var N_DIRECT_LIGHT = 0x4600;
-var DL_OFF = 0x4620;
-var DL_OUTER_RANGE = 0x465A;
-var DL_INNER_RANGE = 0x4659;
-var DL_MULTIPLIER = 0x465B;
-var DL_EXCLUDE = 0x4654;
-var DL_ATTENUATE = 0x4625;
-var DL_SPOTLIGHT = 0x4610;
-var DL_SPOT_ROLL = 0x4656;
-var DL_SHADOWED = 0x4630;
-var DL_LOCAL_SHADOW2 = 0x4641;
-var DL_SEE_CONE = 0x4650;
-var DL_SPOT_RECTANGULAR = 0x4651;
-var DL_SPOT_ASPECT = 0x4657;
-var DL_SPOT_PROJECTOR = 0x4653;
-var DL_SPOT_OVERSHOOT = 0x4652;
-var DL_RAY_BIAS = 0x4658;
-var DL_RAYSHAD = 0x4627;
-var N_CAMERA = 0x4700;
-var CAM_SEE_CONE = 0x4710;
-var CAM_RANGES = 0x4720;
-var OBJ_HIDDEN = 0x4010;
-var OBJ_VIS_LOFTER = 0x4011;
-var OBJ_DOESNT_CAST = 0x4012;
-var OBJ_DONT_RECVSHADOW = 0x4017;
-var OBJ_MATTE = 0x4013;
-var OBJ_FAST = 0x4014;
-var OBJ_PROCEDURAL = 0x4015;
-var OBJ_FROZEN = 0x4016;
-var N_TRI_OBJECT = 0x4100;
-var POINT_ARRAY = 0x4110;
-var POINT_FLAG_ARRAY = 0x4111;
-var FACE_ARRAY = 0x4120;
-var MSH_MAT_GROUP = 0x4130;
-var SMOOTH_GROUP = 0x4150;
-var MSH_BOXMAP = 0x4190;
-var TEX_VERTS = 0x4140;
-var MESH_MATRIX = 0x4160;
-var MESH_COLOR = 0x4165;
-var MESH_TEXTURE_INFO = 0x4170;
-var KFDATA = 0xB000;
-var KFHDR = 0xB00A;
-var KFSEG = 0xB008;
-var KFCURTIME = 0xB009;
-var AMBIENT_NODE_TAG = 0xB001;
-var OBJECT_NODE_TAG = 0xB002;
-var CAMERA_NODE_TAG = 0xB003;
-var TARGET_NODE_TAG = 0xB004;
-var LIGHT_NODE_TAG = 0xB005;
-var L_TARGET_NODE_TAG = 0xB006;
-var SPOTLIGHT_NODE_TAG = 0xB007;
-var NODE_ID = 0xB030;
-var NODE_HDR = 0xB010;
-var PIVOT = 0xB013;
-var INSTANCE_NAME = 0xB011;
-var MORPH_SMOOTH = 0xB015;
-var BOUNDBOX = 0xB014;
-var POS_TRACK_TAG = 0xB020;
-var COL_TRACK_TAG = 0xB025;
-var ROT_TRACK_TAG = 0xB021;
-var SCL_TRACK_TAG = 0xB022;
-var MORPH_TRACK_TAG = 0xB026;
-var FOV_TRACK_TAG = 0xB023;
-var ROLL_TRACK_TAG = 0xB024;
-var HOT_TRACK_TAG = 0xB027;
-var FALL_TRACK_TAG = 0xB028;
-var HIDE_TRACK_TAG = 0xB029;
-var POLY_2D = 0x5000;
-var SHAPE_OK = 0x5010;
-var SHAPE_NOT_OK = 0x5011;
-var SHAPE_HOOK = 0x5020;
-var PATH_3D = 0x6000;
-var PATH_MATRIX = 0x6005;
-var SHAPE_2D = 0x6010;
-var M_SCALE = 0x6020;
-var M_TWIST = 0x6030;
-var M_TEETER = 0x6040;
-var M_FIT = 0x6050;
-var M_BEVEL = 0x6060;
-var XZ_CURVE = 0x6070;
-var YZ_CURVE = 0x6080;
-var INTERPCT = 0x6090;
-var DEFORM_LIMIT = 0x60A0;
-var USE_CONTOUR = 0x6100;
-var USE_TWEEN = 0x6110;
-var USE_SCALE = 0x6120;
-var USE_TWIST = 0x6130;
-var USE_TEETER = 0x6140;
-var USE_FIT = 0x6150;
-var USE_BEVEL = 0x6160;
-var DEFAULT_VIEW = 0x3000;
-var VIEW_TOP = 0x3010;
-var VIEW_BOTTOM = 0x3020;
-var VIEW_LEFT = 0x3030;
-var VIEW_RIGHT = 0x3040;
-var VIEW_FRONT = 0x3050;
-var VIEW_BACK = 0x3060;
-var VIEW_USER = 0x3070;
-var VIEW_CAMERA = 0x3080;
-var VIEW_WINDOW = 0x3090;
-var VIEWPORT_LAYOUT_OLD = 0x7000;
-var VIEWPORT_DATA_OLD = 0x7010;
-var VIEWPORT_LAYOUT = 0x7001;
-var VIEWPORT_DATA = 0x7011;
-var VIEWPORT_DATA_3 = 0x7012;
-var VIEWPORT_SIZE = 0x7020;
-var NETWORK_VIEW = 0x7030;
diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/nodes/accessors/ScreenUVNode.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/nodes/accessors/ScreenUVNode.js
deleted file mode 100644
index a2aac5f0108f8dce48daff00ad4f2b1c540b5796..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/examples/js/nodes/accessors/ScreenUVNode.js
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * @author sunag / http://www.sunag.com.br/
- */
-
-import { TempNode } from '../core/TempNode.js';
-import { ResolutionNode } from './ResolutionNode.js';
-
-function ScreenUVNode( resolution ) {
-
- TempNode.call( this, 'v2' );
-
- this.resolution = resolution || new ResolutionNode();
-
-}
-
-ScreenUVNode.prototype = Object.create( TempNode.prototype );
-ScreenUVNode.prototype.constructor = ScreenUVNode;
-ScreenUVNode.prototype.nodeType = "ScreenUV";
-
-ScreenUVNode.prototype.generate = function ( builder, output ) {
-
- var result;
-
- if ( builder.isShader( 'fragment' ) ) {
-
- result = '( gl_FragCoord.xy / ' + this.resolution.build( builder, 'v2' ) + ')';
-
- } else {
-
- console.warn( "THREE.ScreenUVNode is not compatible with " + builder.shader + " shader." );
-
- result = 'vec2( 0.0 )';
-
- }
-
- return builder.format( result, this.getType( builder ), output );
-
-};
-
-ScreenUVNode.prototype.copy = function ( source ) {
-
- TempNode.prototype.copy.call( this, source );
-
- this.resolution = source.resolution;
-
-};
-
-ScreenUVNode.prototype.toJSON = function ( meta ) {
-
- var data = this.getJSONNode( meta );
-
- if ( ! data ) {
-
- data = this.createJSONNode( meta );
-
- data.resolution = this.resolution.toJSON( meta ).uuid;
-
- }
-
- return data;
-
-};
-
-export { ScreenUVNode };
-
diff --git a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/skinnormal_vertex.glsl.js b/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/skinnormal_vertex.glsl.js
deleted file mode 100644
index 2b8e5cc84c7e512f79949d51f354a42be554b5ab..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/skinnormal_vertex.glsl.js
+++ /dev/null
@@ -1,20 +0,0 @@
-export default /* glsl */`
-#ifdef USE_SKINNING
-
- mat4 skinMatrix = mat4( 0.0 );
- skinMatrix += skinWeight.x * boneMatX;
- skinMatrix += skinWeight.y * boneMatY;
- skinMatrix += skinWeight.z * boneMatZ;
- skinMatrix += skinWeight.w * boneMatW;
- skinMatrix = bindMatrixInverse * skinMatrix * bindMatrix;
-
- objectNormal = vec4( skinMatrix * vec4( objectNormal, 0.0 ) ).xyz;
-
- #ifdef USE_TANGENT
-
- objectTangent = vec4( skinMatrix * vec4( objectTangent, 0.0 ) ).xyz;
-
- #endif
-
-#endif
-`;
diff --git a/spaces/bhasker412/IDD-YOLO-Tracking/trackers/strongsort/sort/kalman_filter.py b/spaces/bhasker412/IDD-YOLO-Tracking/trackers/strongsort/sort/kalman_filter.py
deleted file mode 100644
index 87c48d7e332bef5f8feab8abf7936409abbf5d03..0000000000000000000000000000000000000000
--- a/spaces/bhasker412/IDD-YOLO-Tracking/trackers/strongsort/sort/kalman_filter.py
+++ /dev/null
@@ -1,214 +0,0 @@
-# vim: expandtab:ts=4:sw=4
-import numpy as np
-import scipy.linalg
-"""
-Table for the 0.95 quantile of the chi-square distribution with N degrees of
-freedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv
-function and used as Mahalanobis gating threshold.
-"""
-chi2inv95 = {
- 1: 3.8415,
- 2: 5.9915,
- 3: 7.8147,
- 4: 9.4877,
- 5: 11.070,
- 6: 12.592,
- 7: 14.067,
- 8: 15.507,
- 9: 16.919}
-
-
-class KalmanFilter(object):
- """
- A simple Kalman filter for tracking bounding boxes in image space.
- The 8-dimensional state space
- x, y, a, h, vx, vy, va, vh
- contains the bounding box center position (x, y), aspect ratio a, height h,
- and their respective velocities.
- Object motion follows a constant velocity model. The bounding box location
- (x, y, a, h) is taken as direct observation of the state space (linear
- observation model).
- """
-
- def __init__(self):
- ndim, dt = 4, 1.
-
- # Create Kalman filter model matrices.
- self._motion_mat = np.eye(2 * ndim, 2 * ndim)
- for i in range(ndim):
- self._motion_mat[i, ndim + i] = dt
-
- self._update_mat = np.eye(ndim, 2 * ndim)
-
- # Motion and observation uncertainty are chosen relative to the current
- # state estimate. These weights control the amount of uncertainty in
- # the model. This is a bit hacky.
- self._std_weight_position = 1. / 20
- self._std_weight_velocity = 1. / 160
-
- def initiate(self, measurement):
- """Create track from unassociated measurement.
- Parameters
- ----------
- measurement : ndarray
- Bounding box coordinates (x, y, a, h) with center position (x, y),
- aspect ratio a, and height h.
- Returns
- -------
- (ndarray, ndarray)
- Returns the mean vector (8 dimensional) and covariance matrix (8x8
- dimensional) of the new track. Unobserved velocities are initialized
- to 0 mean.
- """
- mean_pos = measurement
- mean_vel = np.zeros_like(mean_pos)
- mean = np.r_[mean_pos, mean_vel]
-
- std = [
- 2 * self._std_weight_position * measurement[0], # the center point x
- 2 * self._std_weight_position * measurement[1], # the center point y
- 1 * measurement[2], # the ratio of width/height
- 2 * self._std_weight_position * measurement[3], # the height
- 10 * self._std_weight_velocity * measurement[0],
- 10 * self._std_weight_velocity * measurement[1],
- 0.1 * measurement[2],
- 10 * self._std_weight_velocity * measurement[3]]
- covariance = np.diag(np.square(std))
- return mean, covariance
-
- def predict(self, mean, covariance):
- """Run Kalman filter prediction step.
- Parameters
- ----------
- mean : ndarray
- The 8 dimensional mean vector of the object state at the previous
- time step.
- covariance : ndarray
- The 8x8 dimensional covariance matrix of the object state at the
- previous time step.
- Returns
- -------
- (ndarray, ndarray)
- Returns the mean vector and covariance matrix of the predicted
- state. Unobserved velocities are initialized to 0 mean.
- """
- std_pos = [
- self._std_weight_position * mean[0],
- self._std_weight_position * mean[1],
- 1 * mean[2],
- self._std_weight_position * mean[3]]
- std_vel = [
- self._std_weight_velocity * mean[0],
- self._std_weight_velocity * mean[1],
- 0.1 * mean[2],
- self._std_weight_velocity * mean[3]]
- motion_cov = np.diag(np.square(np.r_[std_pos, std_vel]))
-
- mean = np.dot(self._motion_mat, mean)
- covariance = np.linalg.multi_dot((
- self._motion_mat, covariance, self._motion_mat.T)) + motion_cov
-
- return mean, covariance
-
- def project(self, mean, covariance, confidence=.0):
- """Project state distribution to measurement space.
- Parameters
- ----------
- mean : ndarray
- The state's mean vector (8 dimensional array).
- covariance : ndarray
- The state's covariance matrix (8x8 dimensional).
- confidence: (dyh) 检测框置信度
- Returns
- -------
- (ndarray, ndarray)
- Returns the projected mean and covariance matrix of the given state
- estimate.
- """
- std = [
- self._std_weight_position * mean[3],
- self._std_weight_position * mean[3],
- 1e-1,
- self._std_weight_position * mean[3]]
-
-
- std = [(1 - confidence) * x for x in std]
-
- innovation_cov = np.diag(np.square(std))
-
- mean = np.dot(self._update_mat, mean)
- covariance = np.linalg.multi_dot((
- self._update_mat, covariance, self._update_mat.T))
- return mean, covariance + innovation_cov
-
- def update(self, mean, covariance, measurement, confidence=.0):
- """Run Kalman filter correction step.
- Parameters
- ----------
- mean : ndarray
- The predicted state's mean vector (8 dimensional).
- covariance : ndarray
- The state's covariance matrix (8x8 dimensional).
- measurement : ndarray
- The 4 dimensional measurement vector (x, y, a, h), where (x, y)
- is the center position, a the aspect ratio, and h the height of the
- bounding box.
- confidence: (dyh)检测框置信度
- Returns
- -------
- (ndarray, ndarray)
- Returns the measurement-corrected state distribution.
- """
- projected_mean, projected_cov = self.project(mean, covariance, confidence)
-
- chol_factor, lower = scipy.linalg.cho_factor(
- projected_cov, lower=True, check_finite=False)
- kalman_gain = scipy.linalg.cho_solve(
- (chol_factor, lower), np.dot(covariance, self._update_mat.T).T,
- check_finite=False).T
- innovation = measurement - projected_mean
-
- new_mean = mean + np.dot(innovation, kalman_gain.T)
- new_covariance = covariance - np.linalg.multi_dot((
- kalman_gain, projected_cov, kalman_gain.T))
- return new_mean, new_covariance
-
- def gating_distance(self, mean, covariance, measurements,
- only_position=False):
- """Compute gating distance between state distribution and measurements.
- A suitable distance threshold can be obtained from `chi2inv95`. If
- `only_position` is False, the chi-square distribution has 4 degrees of
- freedom, otherwise 2.
- Parameters
- ----------
- mean : ndarray
- Mean vector over the state distribution (8 dimensional).
- covariance : ndarray
- Covariance of the state distribution (8x8 dimensional).
- measurements : ndarray
- An Nx4 dimensional matrix of N measurements, each in
- format (x, y, a, h) where (x, y) is the bounding box center
- position, a the aspect ratio, and h the height.
- only_position : Optional[bool]
- If True, distance computation is done with respect to the bounding
- box center position only.
- Returns
- -------
- ndarray
- Returns an array of length N, where the i-th element contains the
- squared Mahalanobis distance between (mean, covariance) and
- `measurements[i]`.
- """
- mean, covariance = self.project(mean, covariance)
-
- if only_position:
- mean, covariance = mean[:2], covariance[:2, :2]
- measurements = measurements[:, :2]
-
- cholesky_factor = np.linalg.cholesky(covariance)
- d = measurements - mean
- z = scipy.linalg.solve_triangular(
- cholesky_factor, d.T, lower=True, check_finite=False,
- overwrite_b=True)
- squared_maha = np.sum(z * z, axis=0)
- return squared_maha
\ No newline at end of file
diff --git a/spaces/bigjoker/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/parseq_adapter.py b/spaces/bigjoker/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/parseq_adapter.py
deleted file mode 100644
index 314b594ac25792358807bdb602cae7f97387edf4..0000000000000000000000000000000000000000
--- a/spaces/bigjoker/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/parseq_adapter.py
+++ /dev/null
@@ -1,164 +0,0 @@
-import copy
-import json
-import logging
-import operator
-from operator import itemgetter
-
-import numpy as np
-import pandas as pd
-import requests
-
-from .animation_key_frames import DeformAnimKeys
-
-logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
-
-
-class ParseqAnimKeys():
- def __init__(self, parseq_args, anim_args):
-
- # Resolve manifest either directly from supplied value
- # or via supplied URL
- manifestOrUrl = parseq_args.parseq_manifest.strip()
- if (manifestOrUrl.startswith('http')):
- logging.info(f"Loading Parseq manifest from URL: {manifestOrUrl}")
- try:
- body = requests.get(manifestOrUrl).text
- logging.debug(f"Loaded remote manifest: {body}")
- self.parseq_json = json.loads(body)
-
- # Add the parseq manifest without the detailed frame data to parseq_args.
- # This ensures it will be saved in the settings file, so that you can always
- # see exactly what parseq prompts and keyframes were used, even if what the URL
- # points to changes.
- parseq_args.fetched_parseq_manifest_summary = copy.deepcopy(self.parseq_json)
- if parseq_args.fetched_parseq_manifest_summary['rendered_frames']:
- del parseq_args.fetched_parseq_manifest_summary['rendered_frames']
- if parseq_args.fetched_parseq_manifest_summary['rendered_frames_meta']:
- del parseq_args.fetched_parseq_manifest_summary['rendered_frames_meta']
-
- except Exception as e:
- logging.error(f"Unable to load Parseq manifest from URL: {manifestOrUrl}")
- raise e
- else:
- self.parseq_json = json.loads(manifestOrUrl)
-
- self.default_anim_keys = DeformAnimKeys(anim_args)
- self.rendered_frames = self.parseq_json['rendered_frames']
- self.max_frame = self.get_max('frame')
- count_defined_frames = len(self.rendered_frames)
- expected_defined_frames = self.max_frame+1 # frames are 0-indexed
-
- self.required_frames = anim_args.max_frames
-
- if (expected_defined_frames != count_defined_frames):
- logging.warning(f"There may be duplicated or missing frame data in the Parseq input: expected {expected_defined_frames} frames including frame 0 because the highest frame number is {self.max_frame}, but there are {count_defined_frames} frames defined.")
-
- if (anim_args.max_frames > count_defined_frames):
- logging.info(f"Parseq data defines {count_defined_frames} frames, but the requested animation is {anim_args.max_frames} frames. The last Parseq frame definition will be duplicated to match the expected frame count.")
- if (anim_args.max_frames < count_defined_frames):
- logging.info(f"Parseq data defines {count_defined_frames} frames, but the requested animation is {anim_args.max_frames} frames. The last Parseq frame definitions will be ignored.")
- else:
- logging.info(f"Parseq data defines {count_defined_frames} frames.")
-
- # Parseq treats input values as absolute values. So if you want to
- # progressively rotate 180 degrees over 4 frames, you specify: 45, 90, 135, 180.
- # However, many animation parameters are relative to the previous frame if there is enough
- # loopback strength. So if you want to rotate 180 degrees over 5 frames, the animation engine expects:
- # 45, 45, 45, 45. Therefore, for such parameter, we use the fact that Parseq supplies delta values.
- optional_delta = '_delta' if parseq_args.parseq_use_deltas else ''
- self.angle_series = self.parseq_to_anim_series('angle' + optional_delta)
- self.zoom_series = self.parseq_to_anim_series('zoom' + optional_delta)
- self.translation_x_series = self.parseq_to_anim_series('translation_x' + optional_delta)
- self.translation_y_series = self.parseq_to_anim_series('translation_y' + optional_delta)
- self.translation_z_series = self.parseq_to_anim_series('translation_z' + optional_delta)
- self.rotation_3d_x_series = self.parseq_to_anim_series('rotation_3d_x' + optional_delta)
- self.rotation_3d_y_series = self.parseq_to_anim_series('rotation_3d_y' + optional_delta)
- self.rotation_3d_z_series = self.parseq_to_anim_series('rotation_3d_z' + optional_delta)
- self.perspective_flip_theta_series = self.parseq_to_anim_series('perspective_flip_theta' + optional_delta)
- self.perspective_flip_phi_series = self.parseq_to_anim_series('perspective_flip_phi' + optional_delta)
- self.perspective_flip_gamma_series = self.parseq_to_anim_series('perspective_flip_gamma' + optional_delta)
-
- # Non-motion animation args
- self.perspective_flip_fv_series = self.parseq_to_anim_series('perspective_flip_fv')
- self.noise_schedule_series = self.parseq_to_anim_series('noise')
- self.strength_schedule_series = self.parseq_to_anim_series('strength')
- self.sampler_schedule_series = self.parseq_to_anim_series('sampler_schedule')
- self.contrast_schedule_series = self.parseq_to_anim_series('contrast')
- self.cfg_scale_schedule_series = self.parseq_to_anim_series('scale')
- self.steps_schedule_series = self.parseq_to_anim_series("steps_schedule")
- self.seed_schedule_series = self.parseq_to_anim_series('seed')
- self.fov_series = self.parseq_to_anim_series('fov')
- self.near_series = self.parseq_to_anim_series('near')
- self.far_series = self.parseq_to_anim_series('far')
- self.prompts = self.parseq_to_anim_series('deforum_prompt') # formatted as "{positive} --neg {negative}"
- self.subseed_series = self.parseq_to_anim_series('subseed')
- self.subseed_strength_series = self.parseq_to_anim_series('subseed_strength')
- self.kernel_schedule_series = self.parseq_to_anim_series('antiblur_kernel')
- self.sigma_schedule_series = self.parseq_to_anim_series('antiblur_sigma')
- self.amount_schedule_series = self.parseq_to_anim_series('antiblur_amount')
- self.threshold_schedule_series = self.parseq_to_anim_series('antiblur_threshold')
-
- # Config:
- # TODO this is currently ignored. User must ensure the output FPS set in parseq
- # matches the one set in Deforum to avoid unexpected results.
- self.config_output_fps = self.parseq_json['options']['output_fps']
-
- def get_max(self, seriesName):
- return max(self.rendered_frames, key=itemgetter(seriesName))[seriesName]
-
- def parseq_to_anim_series(self, seriesName):
-
- # Check if valus is present in first frame of JSON data. If not, assume it's undefined.
- # The Parseq contract is that the first frame (at least) must define values for all fields.
- try:
- if self.rendered_frames[0][seriesName] is not None:
- logging.info(f"Found {seriesName} in first frame of Parseq data. Assuming it's defined.")
- except KeyError:
- return None
-
- key_frame_series = pd.Series([np.nan for a in range(self.required_frames)])
-
- for frame in self.rendered_frames:
- frame_idx = frame['frame']
- if frame_idx < self.required_frames:
- if not np.isnan(key_frame_series[frame_idx]):
- logging.warning(f"Duplicate frame definition {frame_idx} detected for data {seriesName}. Latest wins.")
- key_frame_series[frame_idx] = frame[seriesName]
-
- # If the animation will have more frames than Parseq defines,
- # duplicate final value to match the required frame count.
- while (frame_idx < self.required_frames):
- key_frame_series[frame_idx] = operator.itemgetter(-1)(self.rendered_frames)[seriesName]
- frame_idx += 1
-
- return key_frame_series
-
- # fallback to anim_args if the series is not defined in the Parseq data
- def __getattribute__(inst, name):
- try:
- definedField = super(ParseqAnimKeys, inst).__getattribute__(name)
- except AttributeError:
- # No field with this name has been explicitly extracted from the JSON data.
- # It must be a new parameter. Let's see if it's in the raw JSON.
-
- # parseq doesn't use _series, _schedule or _schedule_series suffixes in the
- # JSON data - remove them.
- strippableSuffixes = ['_series', '_schedule']
- parseqName = name
- while any(parseqName.endswith(suffix) for suffix in strippableSuffixes):
- for suffix in strippableSuffixes:
- if parseqName.endswith(suffix):
- parseqName = parseqName[:-len(suffix)]
-
- # returns None if not defined in Parseq JSON data
- definedField = inst.parseq_to_anim_series(parseqName)
- if (definedField is not None):
- # add the field to the instance so we don't compute it again.
- setattr(inst, name, definedField)
-
- if (definedField is not None):
- return definedField
- else:
- logging.info(f"Data for {name} not defined in Parseq data (looked for: '{parseqName}'). Falling back to standard Deforum values.")
- return getattr(inst.default_anim_keys, name)
-
diff --git a/spaces/bioriAsaeru/text-to-voice/CorelDRAW Graphics Suite X7 V21.3.0.755 2020 Keygen Crack The Ultimate Solution for Graphic Designers and Artists.md b/spaces/bioriAsaeru/text-to-voice/CorelDRAW Graphics Suite X7 V21.3.0.755 2020 Keygen Crack The Ultimate Solution for Graphic Designers and Artists.md
deleted file mode 100644
index 5a0e786876d599ba6590c3b8e7eb1f2d0878abd2..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/CorelDRAW Graphics Suite X7 V21.3.0.755 2020 Keygen Crack The Ultimate Solution for Graphic Designers and Artists.md
+++ /dev/null
@@ -1,5 +0,0 @@
-
-
Hence, If you could not really go to the live life webinar of the demonstration of the fresh edition of the well-known graphics suite CorelDRAW Sixth is v21 Keygen linux windwos 94fbr, do not miss the video clip saving óf it. A briefing óf nearly 2 hours, fully in Spanish language to understand the Technology advantages of the brand-new version. Furthermore to discover guidelines and tricks of this system in the hands of the Corel Product Manager.Also visit: High quality edition of 2019 for macintosh and home windows. CorelDRAW Graphics Selection 2019 Service Program code ListIt will be not really a easy briefing of brand-new functions of version number 20 of the CorelDRAW portbale kyuhaa selection. So, the video also displays the brand-new functions in action. In addition to detailing very clearly other ideas and new functions. In the webinar video, the brand-new system and features have got in crack Corel DRAW Graphics Selection 2019 area is furthermore explained and proven as the great symmetry sketching mode.
-
CorelDRAW Graphics Suite X7 V21.3.0.755 2020 Keygen Crack
Fill out the form below to register for an account
- {% endif %}
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/spaces/chendl/compositional_test/multimodal/YOLOX/demo/MegEngine/python/models/network_blocks.py b/spaces/chendl/compositional_test/multimodal/YOLOX/demo/MegEngine/python/models/network_blocks.py
deleted file mode 100644
index f0e40d3f2aea5bbd00493311219821a7e5d5e8be..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/multimodal/YOLOX/demo/MegEngine/python/models/network_blocks.py
+++ /dev/null
@@ -1,183 +0,0 @@
-#!/usr/bin/env python3
-# -*- encoding: utf-8 -*-
-# Copyright (c) Megvii Inc. All rights reserved.
-
-import megengine.functional as F
-import megengine.module as M
-
-
-class UpSample(M.Module):
-
- def __init__(self, scale_factor=2, mode="bilinear"):
- super().__init__()
- self.scale_factor = scale_factor
- self.mode = mode
-
- def forward(self, x):
- return F.vision.interpolate(x, scale_factor=self.scale_factor, mode=self.mode)
-
-
-class SiLU(M.Module):
- """export-friendly version of M.SiLU()"""
-
- @staticmethod
- def forward(x):
- return x * F.sigmoid(x)
-
-
-def get_activation(name="silu"):
- if name == "silu":
- module = SiLU()
- elif name == "relu":
- module = M.ReLU()
- elif name == "lrelu":
- module = M.LeakyReLU(0.1)
- else:
- raise AttributeError("Unsupported act type: {}".format(name))
- return module
-
-
-class BaseConv(M.Module):
- """A Conv2d -> Batchnorm -> silu/leaky relu block"""
-
- def __init__(self, in_channels, out_channels, ksize, stride, groups=1, bias=False, act="silu"):
- super().__init__()
- # same padding
- pad = (ksize - 1) // 2
- self.conv = M.Conv2d(
- in_channels,
- out_channels,
- kernel_size=ksize,
- stride=stride,
- padding=pad,
- groups=groups,
- bias=bias,
- )
- self.bn = M.BatchNorm2d(out_channels)
- self.act = get_activation(act)
-
- def forward(self, x):
- return self.act(self.bn(self.conv(x)))
-
- def fuseforward(self, x):
- return self.act(self.conv(x))
-
-
-class DWConv(M.Module):
- """Depthwise Conv + Conv"""
- def __init__(self, in_channels, out_channels, ksize, stride=1, act="silu"):
- super().__init__()
- self.dconv = BaseConv(
- in_channels, in_channels, ksize=ksize,
- stride=stride, groups=in_channels, act=act
- )
- self.pconv = BaseConv(
- in_channels, out_channels, ksize=1,
- stride=1, groups=1, act=act
- )
-
- def forward(self, x):
- x = self.dconv(x)
- return self.pconv(x)
-
-
-class Bottleneck(M.Module):
- # Standard bottleneck
- def __init__(
- self, in_channels, out_channels, shortcut=True,
- expansion=0.5, depthwise=False, act="silu"
- ):
- super().__init__()
- hidden_channels = int(out_channels * expansion)
- Conv = DWConv if depthwise else BaseConv
- self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act)
- self.conv2 = Conv(hidden_channels, out_channels, 3, stride=1, act=act)
- self.use_add = shortcut and in_channels == out_channels
-
- def forward(self, x):
- y = self.conv2(self.conv1(x))
- if self.use_add:
- y = y + x
- return y
-
-
-class ResLayer(M.Module):
- "Residual layer with `in_channels` inputs."
- def __init__(self, in_channels: int):
- super().__init__()
- mid_channels = in_channels // 2
- self.layer1 = BaseConv(in_channels, mid_channels, ksize=1, stride=1, act="lrelu")
- self.layer2 = BaseConv(mid_channels, in_channels, ksize=3, stride=1, act="lrelu")
-
- def forward(self, x):
- out = self.layer2(self.layer1(x))
- return x + out
-
-
-class SPPBottleneck(M.Module):
- """Spatial pyramid pooling layer used in YOLOv3-SPP"""
- def __init__(self, in_channels, out_channels, kernel_sizes=(5, 9, 13), activation="silu"):
- super().__init__()
- hidden_channels = in_channels // 2
- self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=activation)
- self.m = [M.MaxPool2d(kernel_size=ks, stride=1, padding=ks // 2) for ks in kernel_sizes]
- conv2_channels = hidden_channels * (len(kernel_sizes) + 1)
- self.conv2 = BaseConv(conv2_channels, out_channels, 1, stride=1, act=activation)
-
- def forward(self, x):
- x = self.conv1(x)
- x = F.concat([x] + [m(x) for m in self.m], axis=1)
- x = self.conv2(x)
- return x
-
-
-class CSPLayer(M.Module):
- """C3 in yolov5, CSP Bottleneck with 3 convolutions"""
-
- def __init__(
- self, in_channels, out_channels, n=1,
- shortcut=True, expansion=0.5, depthwise=False, act="silu"
- ):
- """
- Args:
- in_channels (int): input channels.
- out_channels (int): output channels.
- n (int): number of Bottlenecks. Default value: 1.
- """
- # ch_in, ch_out, number, shortcut, groups, expansion
- super().__init__()
- hidden_channels = int(out_channels * expansion) # hidden channels
- self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act)
- self.conv2 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act)
- self.conv3 = BaseConv(2 * hidden_channels, out_channels, 1, stride=1, act=act)
- module_list = [
- Bottleneck(hidden_channels, hidden_channels, shortcut, 1.0, depthwise, act=act)
- for _ in range(n)
- ]
- self.m = M.Sequential(*module_list)
-
- def forward(self, x):
- x_1 = self.conv1(x)
- x_2 = self.conv2(x)
- x_1 = self.m(x_1)
- x = F.concat((x_1, x_2), axis=1)
- return self.conv3(x)
-
-
-class Focus(M.Module):
- """Focus width and height information into channel space."""
-
- def __init__(self, in_channels, out_channels, ksize=1, stride=1, act="silu"):
- super().__init__()
- self.conv = BaseConv(in_channels * 4, out_channels, ksize, stride, act=act)
-
- def forward(self, x):
- # shape of x (b,c,w,h) -> y(b,4c,w/2,h/2)
- patch_top_left = x[..., ::2, ::2]
- patch_top_right = x[..., ::2, 1::2]
- patch_bot_left = x[..., 1::2, ::2]
- patch_bot_right = x[..., 1::2, 1::2]
- x = F.concat(
- (patch_top_left, patch_bot_left, patch_top_right, patch_bot_right,), axis=1,
- )
- return self.conv(x)
diff --git a/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/utils/checkpoint.py b/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/utils/checkpoint.py
deleted file mode 100644
index a0c200e41da9ad8b720369a2181c9642724622ca..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/utils/checkpoint.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding:utf-8 -*-
-# Copyright (c) Megvii Inc. All rights reserved.
-import os
-import shutil
-from loguru import logger
-
-import torch
-
-
-def load_ckpt(model, ckpt):
- model_state_dict = model.state_dict()
- load_dict = {}
- for key_model, v in model_state_dict.items():
- if key_model not in ckpt:
- logger.warning(
- "{} is not in the ckpt. Please double check and see if this is desired.".format(
- key_model
- )
- )
- continue
- v_ckpt = ckpt[key_model]
- if v.shape != v_ckpt.shape:
- logger.warning(
- "Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format(
- key_model, v_ckpt.shape, key_model, v.shape
- )
- )
- continue
- load_dict[key_model] = v_ckpt
-
- model.load_state_dict(load_dict, strict=False)
- return model
-
-
-def save_checkpoint(state, is_best, save_dir, model_name=""):
- if not os.path.exists(save_dir):
- os.makedirs(save_dir)
- filename = os.path.join(save_dir, model_name + "_ckpt.pth")
- torch.save(state, filename)
- if is_best:
- best_filename = os.path.join(save_dir, "best_ckpt.pth")
- shutil.copyfile(filename, best_filename)
diff --git a/spaces/chilge/Fushimi/resample.py b/spaces/chilge/Fushimi/resample.py
deleted file mode 100644
index fabae4afbb330cccad1681b7941a63547c93c640..0000000000000000000000000000000000000000
--- a/spaces/chilge/Fushimi/resample.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import os
-import argparse
-import librosa
-import numpy as np
-from multiprocessing import Pool, cpu_count
-from scipy.io import wavfile
-from tqdm import tqdm
-
-
-def process(item):
- spkdir, wav_name, args = item
- # speaker 's5', 'p280', 'p315' are excluded,
- speaker = spkdir.split(os.sep)[-1]
- wav_path = os.path.join(args.in_dir, speaker, wav_name)
- if os.path.exists(wav_path) and '.wav' in wav_path:
- os.makedirs(os.path.join(args.out_dir2, speaker), exist_ok=True)
- wav, sr = librosa.load(wav_path, None)
- wav, _ = librosa.effects.trim(wav, top_db=20)
- peak = np.abs(wav).max()
- if peak > 1.0:
- wav = 0.98 * wav / peak
- wav2 = librosa.resample(wav, orig_sr=sr, target_sr=args.sr2)
- save_name = wav_name
- save_path2 = os.path.join(args.out_dir2, speaker, save_name)
- wavfile.write(
- save_path2,
- args.sr2,
- (wav2 * np.iinfo(np.int16).max).astype(np.int16)
- )
-
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--sr2", type=int, default=32000, help="sampling rate")
- parser.add_argument("--in_dir", type=str, default="./dataset_raw", help="path to source dir")
- parser.add_argument("--out_dir2", type=str, default="./dataset/32k", help="path to target dir")
- args = parser.parse_args()
- processs = cpu_count()-2 if cpu_count() >4 else 1
- pool = Pool(processes=processs)
-
- for speaker in os.listdir(args.in_dir):
- spk_dir = os.path.join(args.in_dir, speaker)
- if os.path.isdir(spk_dir):
- print(spk_dir)
- for _ in tqdm(pool.imap_unordered(process, [(spk_dir, i, args) for i in os.listdir(spk_dir) if i.endswith("wav")])):
- pass
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/ImageMath.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/ImageMath.py
deleted file mode 100644
index ac7d36b698c2ec9839d8a771734c9f730f701534..0000000000000000000000000000000000000000
--- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/ImageMath.py
+++ /dev/null
@@ -1,263 +0,0 @@
-#
-# The Python Imaging Library
-# $Id$
-#
-# a simple math add-on for the Python Imaging Library
-#
-# History:
-# 1999-02-15 fl Original PIL Plus release
-# 2005-05-05 fl Simplified and cleaned up for PIL 1.1.6
-# 2005-09-12 fl Fixed int() and float() for Python 2.4.1
-#
-# Copyright (c) 1999-2005 by Secret Labs AB
-# Copyright (c) 2005 by Fredrik Lundh
-#
-# See the README file for information on usage and redistribution.
-#
-
-import builtins
-
-from . import Image, _imagingmath
-
-
-def _isconstant(v):
- return isinstance(v, (int, float))
-
-
-class _Operand:
- """Wraps an image operand, providing standard operators"""
-
- def __init__(self, im):
- self.im = im
-
- def __fixup(self, im1):
- # convert image to suitable mode
- if isinstance(im1, _Operand):
- # argument was an image.
- if im1.im.mode in ("1", "L"):
- return im1.im.convert("I")
- elif im1.im.mode in ("I", "F"):
- return im1.im
- else:
- msg = f"unsupported mode: {im1.im.mode}"
- raise ValueError(msg)
- else:
- # argument was a constant
- if _isconstant(im1) and self.im.mode in ("1", "L", "I"):
- return Image.new("I", self.im.size, im1)
- else:
- return Image.new("F", self.im.size, im1)
-
- def apply(self, op, im1, im2=None, mode=None):
- im1 = self.__fixup(im1)
- if im2 is None:
- # unary operation
- out = Image.new(mode or im1.mode, im1.size, None)
- im1.load()
- try:
- op = getattr(_imagingmath, op + "_" + im1.mode)
- except AttributeError as e:
- msg = f"bad operand type for '{op}'"
- raise TypeError(msg) from e
- _imagingmath.unop(op, out.im.id, im1.im.id)
- else:
- # binary operation
- im2 = self.__fixup(im2)
- if im1.mode != im2.mode:
- # convert both arguments to floating point
- if im1.mode != "F":
- im1 = im1.convert("F")
- if im2.mode != "F":
- im2 = im2.convert("F")
- if im1.size != im2.size:
- # crop both arguments to a common size
- size = (min(im1.size[0], im2.size[0]), min(im1.size[1], im2.size[1]))
- if im1.size != size:
- im1 = im1.crop((0, 0) + size)
- if im2.size != size:
- im2 = im2.crop((0, 0) + size)
- out = Image.new(mode or im1.mode, im1.size, None)
- im1.load()
- im2.load()
- try:
- op = getattr(_imagingmath, op + "_" + im1.mode)
- except AttributeError as e:
- msg = f"bad operand type for '{op}'"
- raise TypeError(msg) from e
- _imagingmath.binop(op, out.im.id, im1.im.id, im2.im.id)
- return _Operand(out)
-
- # unary operators
- def __bool__(self):
- # an image is "true" if it contains at least one non-zero pixel
- return self.im.getbbox() is not None
-
- def __abs__(self):
- return self.apply("abs", self)
-
- def __pos__(self):
- return self
-
- def __neg__(self):
- return self.apply("neg", self)
-
- # binary operators
- def __add__(self, other):
- return self.apply("add", self, other)
-
- def __radd__(self, other):
- return self.apply("add", other, self)
-
- def __sub__(self, other):
- return self.apply("sub", self, other)
-
- def __rsub__(self, other):
- return self.apply("sub", other, self)
-
- def __mul__(self, other):
- return self.apply("mul", self, other)
-
- def __rmul__(self, other):
- return self.apply("mul", other, self)
-
- def __truediv__(self, other):
- return self.apply("div", self, other)
-
- def __rtruediv__(self, other):
- return self.apply("div", other, self)
-
- def __mod__(self, other):
- return self.apply("mod", self, other)
-
- def __rmod__(self, other):
- return self.apply("mod", other, self)
-
- def __pow__(self, other):
- return self.apply("pow", self, other)
-
- def __rpow__(self, other):
- return self.apply("pow", other, self)
-
- # bitwise
- def __invert__(self):
- return self.apply("invert", self)
-
- def __and__(self, other):
- return self.apply("and", self, other)
-
- def __rand__(self, other):
- return self.apply("and", other, self)
-
- def __or__(self, other):
- return self.apply("or", self, other)
-
- def __ror__(self, other):
- return self.apply("or", other, self)
-
- def __xor__(self, other):
- return self.apply("xor", self, other)
-
- def __rxor__(self, other):
- return self.apply("xor", other, self)
-
- def __lshift__(self, other):
- return self.apply("lshift", self, other)
-
- def __rshift__(self, other):
- return self.apply("rshift", self, other)
-
- # logical
- def __eq__(self, other):
- return self.apply("eq", self, other)
-
- def __ne__(self, other):
- return self.apply("ne", self, other)
-
- def __lt__(self, other):
- return self.apply("lt", self, other)
-
- def __le__(self, other):
- return self.apply("le", self, other)
-
- def __gt__(self, other):
- return self.apply("gt", self, other)
-
- def __ge__(self, other):
- return self.apply("ge", self, other)
-
-
-# conversions
-def imagemath_int(self):
- return _Operand(self.im.convert("I"))
-
-
-def imagemath_float(self):
- return _Operand(self.im.convert("F"))
-
-
-# logical
-def imagemath_equal(self, other):
- return self.apply("eq", self, other, mode="I")
-
-
-def imagemath_notequal(self, other):
- return self.apply("ne", self, other, mode="I")
-
-
-def imagemath_min(self, other):
- return self.apply("min", self, other)
-
-
-def imagemath_max(self, other):
- return self.apply("max", self, other)
-
-
-def imagemath_convert(self, mode):
- return _Operand(self.im.convert(mode))
-
-
-ops = {}
-for k, v in list(globals().items()):
- if k[:10] == "imagemath_":
- ops[k[10:]] = v
-
-
-def eval(expression, _dict={}, **kw):
- """
- Evaluates an image expression.
-
- :param expression: A string containing a Python-style expression.
- :param options: Values to add to the evaluation context. You
- can either use a dictionary, or one or more keyword
- arguments.
- :return: The evaluated expression. This is usually an image object, but can
- also be an integer, a floating point value, or a pixel tuple,
- depending on the expression.
- """
-
- # build execution namespace
- args = ops.copy()
- args.update(_dict)
- args.update(kw)
- for k, v in list(args.items()):
- if hasattr(v, "im"):
- args[k] = _Operand(v)
-
- compiled_code = compile(expression, "", "eval")
-
- def scan(code):
- for const in code.co_consts:
- if type(const) == type(compiled_code):
- scan(const)
-
- for name in code.co_names:
- if name not in args and name != "abs":
- msg = f"'{name}' not allowed"
- raise ValueError(msg)
-
- scan(compiled_code)
- out = builtins.eval(expression, {"__builtins": {"abs": abs}}, args)
- try:
- return out.im
- except AttributeError:
- return out
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/altair/vegalite/display.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/altair/vegalite/display.py
deleted file mode 100644
index 91c5f33e093b32cf81accd6fdeeb8a18292c28c0..0000000000000000000000000000000000000000
--- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/altair/vegalite/display.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from ..utils.display import Displayable, default_renderer_base, json_renderer_base
-from ..utils.display import RendererRegistry, HTMLRenderer
-
-
-__all__ = (
- "Displayable",
- "default_renderer_base",
- "json_renderer_base",
- "RendererRegistry",
- "HTMLRenderer",
-)
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_p_r_e_p.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_p_r_e_p.py
deleted file mode 100644
index b4b92f3e924ba2f20ade9a6cca45ce78284ffe21..0000000000000000000000000000000000000000
--- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_p_r_e_p.py
+++ /dev/null
@@ -1,7 +0,0 @@
-from fontTools import ttLib
-
-superclass = ttLib.getTableClass("fpgm")
-
-
-class table__p_r_e_p(superclass):
- pass
diff --git a/spaces/cihyFjudo/fairness-paper-search/8th Grade Lesson Plans for Literary Terms How to Teach Plot Character Setting and Theme.md b/spaces/cihyFjudo/fairness-paper-search/8th Grade Lesson Plans for Literary Terms How to Teach Plot Character Setting and Theme.md
deleted file mode 100644
index 90652c56d6994a0a4e09b4b8bd5db56a33061b09..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/8th Grade Lesson Plans for Literary Terms How to Teach Plot Character Setting and Theme.md
+++ /dev/null
@@ -1,25 +0,0 @@
-
-
A scavenger hunt would work great to practice literary genres, not just the big ones like novels, short stories, and poems, but also the more specific ones like historical fiction, science fiction, and so on. This one is especially easy. Just give each kid or group a list of the terms and set them loose to browse the classroom library and bookshelves in search of examples.
Kids working together in groups can gain a good understanding of what certain literary terms really mean by helping each other with definitions and examples. Demonstrating what they have discovered to the whole class shows a further level of understanding.
-
Any good story, and many good informational titles, will include examples of a number of literary terms. One nice follow-up activity after any whole class story is to have students search the story for examples of whichever literary terms are prominent in that story.
-
One group of literary terms that many classes start with in the beginning of the year is story elements. Here is one idea for a student writing project that incorporates the ideas of character, setting, plot, conflict, resolution, and theme. This project, in which students plan and then write a story, could be done as a whole class or in small groups. Here are the steps:
-
-
Are you're looking for a literary devices lesson plan? This literary terms lesson covers seven literary terms related to poetry and other literature. This lesson plan on literary terms is appropriate for students in the upper elementary or middle school grades.
-
Verify how well students have mastered the literary terms covered in this lesson plan by assigning the practice questions below. Add these practice questions to a blank document to create a worksheet. You can use this as homework or an in-class activity. Check student work using the answer key below.
-
The literary devices discussed in this lesson provide students with a great introduction to literary devices, though there are many more types of literary devices. Once students have mastered these, consider exploring examples of foreshadowing. From there, move into examples of analogy.
-
Freytag's Pyramid is a tool for mapping plot structure, which allows readers to visualize the key features of stories. Students whose experience with text is limited have internalized the pattern described by Freytag's Pyramid through oral storytelling and television viewing. They need help seeing that the patterns they are familiar with are the same ones writers use to construct a short story, play, or novel. This lesson plan provides a basic introduction to Freytag's Pyramid and to the literary element of plot. After viewing a brief presentation about plot structure, students brainstorm the significant events in a story with which they are all familiar and place those events on Freytag's Pyramid. They work in small groups to map the plot of another story. For homework, they map the plot of a favorite television show. Finally, they apply their knowledge of Freytag's Pyramid to map the plot of a narrative poem.
-
Write literary terms on the board. You will need one for each student. Yes, there are enough literary terms: plot, setting, exposition, mood, theme, tone, character, conflict, 5 types of conflict, point of view, three types of point of view, dramatic, verbal, and situational irony, suspense, foreshadowing, alliteration, synecdoche, personification, metaphor, simile, onomatopoeia, hyperbole, meiosis, rhythm, meter, voice, style.....
Pronounce each term and have students repeat it.
Assign one term to each student.
On a clear slice of paper, each student will design a poster. The poster will have the term at the top with its definition below. The middle of the paper will have a visual representation--picture or symbol--that represents the term along with a written example from a piece of literature familiar to the class.
Instruct students to extract a slice of notebook paper and copy the terms. Leave enough room to take notes next to each word.
Everything should be removed from desks except the literary term poster, the notes paper, and a writing instrument.
Every 45 seconds, shout pass. Students will pass their paper to the next designated person. Each student will have 45 seconds to study each literary terms poster. Once the term has made it around the room, stop.
Instruct students to make any corrections on any poster.
Do the activity in part one of this series.
Give a quiz the next day and boast how great you are at teaching to different learning styles and how you deserve a raise and a vacation in the Dominican Republic!
ELA Common Core Standards CoveredAmaze your administrator by teaching literary terms to different learning styles. Here are some ELA Common Core Standards to cement your raise.
-
Are you introducing poetry to your students? One of the best ways to teach poetry is to explore the structure of the poem. These structures are also known as the elements of poetry. The basic elements of poetry include meter, rhyme, scheme, verse, and stanza. In order to dive deeper into poetry, students will first need to understand these structural elements. In this blog post, you will learn strategies for teaching poetry and ideas for your elements of poetry lesson plans.
-
Drama Works! Companion Book of Lesson Plans by Jonas Basom contains the 150 printed lesson plans from Drama Works! Online. The lessons include more than 1200 activities in total, including variations for beginner, intermediate, and advanced levels. Using the 65 indexes in the back, the teacher can quickly look up lessons by 25 drama skills, 12 theatre categories, 11 school subjects, 11 learning styles, and 6 age/grade level groups (preschool to college). The book also includes the glossary of theatre and literary terms with definitions.
-
The companion book allows the user to save time and money by not needing to print from Drama Works! Online. It provides offline access to the lessons without a computer and without needing to login. The teacher or substitute has instant access to complete lesson plans ready to read and lead. The lessons were designed to align with the National Core Arts Standards, Texas theatre standards (TEKS), and Common Core ELA.
-
W= By the end of the lesson, students will have read and comprehended a story written with complex word choice and sentence structure. Students will understand the impact of literary devices such as personification, symbol, simile, and setting on a story.
-
T=This lesson can be tailored to various reading levels by allowing students able to read the text independently to do so; reading aloud to other students or utilizing an audio version for struggling readers; using an adapted version of the story for readers significantly below grade level.
-
O=This lesson is organized using before, during, and after reading activities to help students approach the text. Prior to this lesson, students would already have used student-friendly definition formulas, defined literary terms and plot elements and discussed their use in poetry, and gained relevant background knowledge on the author's life. Following the reading of this story, students would read one or two more stories with guidance from the teacher before reading and responding independently to a story. This technique scaffolds their ability to read complex texts independently.
-
Teacher and student supports include selection summaries in Spanish, English, Haitian Creole, and Brazilian Portuguese, plus multilingual glossaries of literary and informational terms in 10 languages.
-
I totally agree and know that students spend entirely too much time with level one identification of literary terms. However, students have to learn these terms before they can rigorously apply this knowledge to a text and make meaning.
-
I'm going to politely disagree with this author. As a writer, how can you use literary elements in your writing if you cant identify them? As an artist, we all learn the basic discipline specific language so that we can have technical conversations about the art and it's meaning. Without an understanding of that terminology along with application beyond a multiple choice test, I can't really expect any deep conversations with students about authors craft. As a student, I may make a claim that has nothing to do with literary terminology or devices, but I may use those devices as support to make my claim. That language is a gateway to access deeper conversation in literature. That access is especially important for students with lower socioeconomic economic status. I've had the same issues with teaching grammar. If we don't teach terms like subject and predicate, how can we teach parallelism. In regards to standardized testing, I hope as English professionals we would be teaching skills beyond what testing asks of them. Getting a 5 on an AP test has little to do with a true understanding of great writing.
-
If we chose to ignore teaching students about literary devices, what would then be the use of critical analysis when pertaining to a novel study and the simple task of being able to make sense of the story and its purpose? Like I say to my students before every lesson involving critical analysis; we use analysis everyday whether it be while driving, playing a sport or creating a work of art. Without analysis, we would be a jump first and think afterwards type of society. The world would be a different place without something so basic. Something to think about.
-
Hi Christina, I have spent the afternoon thinking about how to teach literary terms and looking for related teaching materials. I was relieved to find your webpage and your approach to teaching literary terms as it is action-based rather than theory-based. Great job!
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/cihyFjudo/fairness-paper-search/Beauty and the Goblin King (Fairy Tale Heat Book 1) PDF A Novel by Lidiya Foxglove.md b/spaces/cihyFjudo/fairness-paper-search/Beauty and the Goblin King (Fairy Tale Heat Book 1) PDF A Novel by Lidiya Foxglove.md
deleted file mode 100644
index 4d4e619b51e0ea9dc2109b73b58e72d60cbb9f43..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Beauty and the Goblin King (Fairy Tale Heat Book 1) PDF A Novel by Lidiya Foxglove.md
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
"How does a book published as nonfiction sneak onto a list of fiction?" asks judge Stephen Graham Jones. "Easy: Read it all as made up, while also, for the scare, completely and 100 percent (secretly) believing in it, because not believing in this case draws a bull's-eye on your back that can only be seen from the sky." Our judges had a hard time deciding between Communion and Whitley Strieber's equally scary fictional Roswell alien tale Majestic -- so why not read them both?
-
Robert W. Chambers' "King in Yellow" stories "are a foundational classic that doesn't get as much attention as Lovecraft for the simple reason that there are only four of them," says our judge Ruthanna Emrys. "This is the best of the lot and a sterling example of a story where the narrative undermines the narrator's prejudices (and eventually everything else he says). It starts with the main character talking approvingly about a rising fascist movement complete with 'suicide chambers' and forced removal of Jews, but quickly becomes obvious that the author is not in sympathy." She also points out that Chambers was one of the first authors to imagine a book (or in this case a play) that harms its readers.
-
Beauty and the Goblin King (Fairy Tale Heat Book 1) book pdf
Anne Rivers Siddons was best known for writing posh fiction about posh Southern people when she turned out this perfect haunted house novel. Taking one part economic anxiety from Robert Marasco's Burnt Offerings, one part emotional unease from Shirley Jackson's The Haunting of Hill House, and adding her own observations about Southern yuppies, she updated the haunted house formula to include this beautiful, modern home that wages unrelenting psychic warfare against its owners. Everyone has felt, at some point or another, that their house hates them. Siddons' book explains exactly how much.
-
Trafficking in the kind of American Gothic perfected by Ray Bradbury, John Bellairs' three books set in the fictional Michigan town of New Zebedee are lonely and charming and shot through with a sense of creeping damp and creeping doom. Sort of a Harry Potter for less sporty boys, they star chubby Lewis Barnavelt, who has been banished to live with his Uncle Jonathan after his parents die in a car wreck. The good news: Uncle Jonathan is a wizard. The bad news: Living with him means that Lewis will probably die. Simultaneously comforting and creepy, the New Zebedee books, with their scratchy illustrations by Edward Gorey, scarred children throughout the '70s and '80s.
-
High in the mountains, Zel lives with her mother, who insists they have all they need -- for they have each other. Zel's life is peaceful and protected -- until a chance encounter changes everything. When she meets a beautiful young prince at the market one day, she is profoundly moved by new emotions. But Zel's mother sees the future unfolding -- and she will do the unspeakable to prevent Zel from leaving her..."Will leave readers spellbound."-- Publishers Weekly, starred reviewExploreSimilar booksBook lists with this bookWhy do people like this book?TopicsEuropeFairy talesGenresComing soon...PreviewBookshop.orgAmazonThrough the WoodsByEmily Carroll,
-
'It came from the woods. Most strange things do.'
Five mysterious, spine-tingling stories follow journeys into (and out of?) the eerie abyss.
These chilling tales spring from the macabre imagination of acclaimed and award-winning comic creator Emily Carroll.
Come take a walk in the woods and see what awaits you there...ExploreSimilar booksBook lists with this bookWhy do people like this book?TopicsFairy talesThe supernaturalGenresComing soon...PreviewBookshop.orgAmazonThe Little PrinceByAntoine de Saint-Exupery, Richard Howard (translator),
-
With an introduction by Helen Simpson. From familiar fairy tales and legends - Red Riding Hood, Bluebeard, Puss in Boots, Beauty and the Beast, vampires and werewolves - Angela Carter has created an absorbing collection of dark, sensual, fantastic stories.ExploreSimilar booksBook lists with this bookWhy do people like this book?TopicsLittle Red Riding HoodFairy talesFeminismGenresComing soon...PreviewBookshop.orgAmazonMistress Masham's ReposeByT. H. White,
-
My inner child is still captivated by the Lilliputian world of T.H. White's Mistress Masham's Repose every time I read it. I don't know why the idea of discovering a secret miniature kingdom is so alluring: I think it may have something to do with my love for dollhouses when I was a child. T.H. White was best known for The Once and Future King and The Sword in the Stone, based on the Arthurian legends; he was a master at taking an old story (Gulliver's Travels in the case of Mistress Masham's Repose) and making it truly his own.Explore this bookMistress Masham's ReposeByT. H. White,
-
The Tale of Despereaux is the story of an unlikely hero. It reveals what happens when an overly tiny mouse with big ears dares to be different. He is fascinated by light, music, and a book left open in the library, but his peers do not approve. When Despereaux falls in love with the princess, the other mice condemn him to the dungeon. This usually means death, but Roscuro the rat intends to harm the princess, and Despereaux is determined to stop him. I enjoyed how the motivations of the key characters were subtly revealed, then brought together in clever and surprising ways. Every detail was included for a reason, making for a most satisfying read. Unable to stop, I read this modern fairy tale in one sitting.
-
CHARMING BOOK.ExploreSimilar booksBook lists with this bookWhy do people like this book?TopicsFairy talesPrincessesGenresComing soon...PreviewBookshop.orgAmazonFolkByZoe Gilbert,
-
-
As soon as I opened this book, I could see the inspiration from all the things that I personally love had been weaved into this beautiful visual story. I enjoy how illuminated manuscripts and ancient books inspire the pages with tiny detail. This book adds to its rich storytelling by playing with these elements. So beautifully designed and such wonderful integration of text and image. A stunning tale for young and old, with a message that makes my heart happy.
-
"A collection of darkly mischievous stories based on classic fairy tales"--Front flap.ExploreSimilar booksBook lists with this bookWhy do people like this book?TopicsMermaidsGenderFairy talesFeminismGenresComing soon...PreviewBookshop.orgAmazonAnd Her Smile Will Untether the UniverseByGwendolyn Kiste,
-
The River Wife is a simple and subtle fable of love. It tells the story of the river wife - part human, part fish - whose duty is to tend the river, but instead falls in love with a man. Tender and melancholy, it speaks of desire and love, mothers and daughters, kinship and care, duty and sacrifice, water and wisdom. There is a great sternness and sadness here, coupled with gentleness. A love story, an environmental fable, a retelling of the Orpheus myth, The River Wife is grave, tender and otherworldly.ExploreSimilar booksBook lists with this bookWhy do people like this book?TopicsFishRiversFablesFairy talesGenresComing soon...PreviewBookshop.orgAmazonThe Complete Fairy TalesByGeorge MacDonald, U.C. Knoepflmacher,
-
George MacDonald occupied a major position in the intellectual life of his Victorian contemporaries. This volume brings together all eleven of his shorter fairy stories as well as his essay "The Fantastic Imagination". The subjects are those of traditional fantasy: good and wicked fairies, children embarking on elaborate quests, and journeys into unsettling dreamworlds. Within this familiar imaginative landscape, his children's stories were profoundly experimental, questioning the association of childhood with purity and innocence, and the need to separate fairy tale wonder from adult scepticism and disbelief.ExploreSimilar booksBook lists with this bookWhy do people like this book?TopicsFolkloreFairy talesFairiesGenresComing soon...PreviewBookshop.orgAmazonGossip from the Forest: The Tangled Roots of Our Forests and FairytalesBySara Maitland,
-
This book does that job beautifully. As a piece of narrative nonfiction, it collects fairy tales, personal memoirs, and natural history in a lyrical journey through the forests of England. Maitland centers each chapter on an English woodland and the stories associated with it, be they fairy tales or history. More importantly, she discusses not only how myth shapes culture, but how landscape shapes myth. I reference it time and again not only as an academic, but as an author who creates worlds rich in landscape and folklore.
-
Young Anders is carried away from his bleak life as an unloved foster child in Stockholm, Sweden, to become Mio, son of the King of Farawayland.ExploreSimilar booksBook lists with this bookWhy do people like this book?TopicsEuropeFairy talesGenresComing soon...PreviewBookshop.orgAmazonFriends With The MonstersByAlbany Walker,
-
Christmas is a wonderful time for magical tales that children love. In this one, a poor but good-hearted cobbler is rewarded for his honesty during the night, when clever elves sneak into his shop and make shoes for him to sell. It gives children the chance to imagine invisible helpers, and also the thrill of doing good deeds in secret.Explore this bookThe Elves And The ShoemakerByGrimm Brothers, Jim LaMarche,
-
Here is the classic tale of elfin magic, loved by generations of children and made new by an artist of international acclaim. Jim LaMarche's stunning paintings, reminiscent of his earlier work in The Rainbabies, are the perfect compliment to this favorite Grimm fairy tale.ExploreSimilar booksBook lists with this bookWhy do people like this book?TopicsElvesChristmasFolkloreFairy talesGenresComing soon...PreviewBookshop.orgAmazonThe RainbabiesByLaura Krauss Melmed, Jim LaMarche (illustrator),
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/cihyFjudo/fairness-paper-search/Download Apne Apne Phanday 1080p The most funny and social film of 2016.md b/spaces/cihyFjudo/fairness-paper-search/Download Apne Apne Phanday 1080p The most funny and social film of 2016.md
deleted file mode 100644
index 0199b25ab9546254ac37866c9d97dfbd4c528e44..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Download Apne Apne Phanday 1080p The most funny and social film of 2016.md
+++ /dev/null
@@ -1,12 +0,0 @@
-
-
mains ka form bharte samay graduate passout marksheet apke sath honi chaahie as a proof. prelims me sirf apko apne total marks aur percentage ka pata hona chahie, us samay apse koi bhi document nahi maangi jati. islie mains tak marksheet ka intzam kar len.
Sir apka bahut bahut thanks apne reply kiya Sir me self study kar sakta hu Mere ghar bale mujhe kahi bahar jake coaching nahi karne de rahe h or itna paisa v nahi h Apne ek bat sahi kahi me bahut confused hota hu kya padhu kya na padhu kaise padhu But sir mujhe ias banna hai Me guidance ke liye mobile ka use kar raha hu And sir apka guidance bahut hi accha hai hindi medium balo ke liye Me apse hi request karta hu Aap ek yesa topic taiyar kijiye jisse logo ko apna aim saralta se prapt ho Aap hi ek topic jodiye apne questions me Jisse pata chale ki students ko kaise padhna chaiye kon kon si book se padhna chaiye current ke liye mobile se kya or konsi magazine padhna chaiye Daily kya kya padhna chaiye kya sunna chaiye Or books ki list with writer v bataiye Thank you so much Mujhe aasha hai aap isko ghambirta se sochke meri or other students ki help karenge ?
-
sir pehle mai aapka abhar prakat karna chahunga aapne bahut kuch de diya hai aur bhavishya mai dete rahenge aisi mai kaamna karta hu kya ham mains ke liye purna roop se sansar lochan par nirbhar rah sakte hai ek bada sa prayas aur karna hai sir aapko mains ke syllabus ke according aapko thoda thoda sabhi topic ko cover kar explan karna hai jisse ki baki coaching dwara jo aarthik shoshan ho raha kuch had tak kam ho jaye bahut bahut dhanybad sir aapka ham sab aapke abhari hai
-
Prashant ji, sabse pehle apko graduation karna jaruri hai. UPSC me 10th, 12th k marks ko nahi dekha jaata hai. yedi aap graduate hote ho to aap upsc ke lie eligible kahlaate ho. Doosri baat, aapne kaha ki aap physically fit nahi ho. Yedi aapko kuch disorder hai ya pareshani hai, to aapko uska laabh pariksha me bhi mil sakta hai. Islie khul kar bataaye ki aapko exactly hai kya!
-
Book ke baare men to maine bataya hi hai. google men jaakar ias books in hindi type karen waha mere site ka link milega. abhi ke liye aap apne general studies ko majboot karen aur kitaaben kharide jo maine suggest ki hain.
-
-
Sir, mujhe ye pata karna hai ki kaun se optionals ke liye complete study material mil jata hai market se hindi medium ke liye.. jisse main apne liye sahi optional ka chunaav kar saku.. pls help me sir..
-
Hello sir, sir mane 2012 me 12th art se Ki h or ab 2017 me BA distance se krunga jiska 1st year 2018 me hoga. Jisme mane Political science and Public administration subject liye h. Sir me Home Guard hu or ab me bade level pr desh Ki sewa krna chahata hu IPS ban kar. Sir ab meri age 27 h me obc se hu and I am also married . Or mera 1 beta h 5 year old. Sir kya me IPS ban skta hu ya apne bete pr focus kru taki use IPS bana sku.
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/cihyFjudo/fairness-paper-search/Giveaway PhraseExpress Standard Edition for Windows and Mac FREE - Enter Now!.md b/spaces/cihyFjudo/fairness-paper-search/Giveaway PhraseExpress Standard Edition for Windows and Mac FREE - Enter Now!.md
deleted file mode 100644
index 814246654ab33cc8b071f5d736a31134d66f64e4..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Giveaway PhraseExpress Standard Edition for Windows and Mac FREE - Enter Now!.md
+++ /dev/null
@@ -1,7 +0,0 @@
-
-
As we do not have any possible combination hardware and software combinations for testing, we kindly ask for your understanding, that we are not able to make a reliable compatibility statement about your individual setup.That is why we offer free trial versions of our software, that enable you to try the software and test compatibility with your individual system configuration before making a purchase decision.Our trial versions allow to test almost everything. Restart the program for another trial period. If you decide to purchase our software after your careful examination in your own environment, there is no additional installation required. You will receive a license key, which removes the occasional license reminders and turns your trial installation into a paid edition. You can continue to use any data created with the demo version.Please note, that we do not entertain a refund policy. So, please take your time to test everything.Can I transfer a license to another user? Yes, you can deregister your license key from one installation for use with another user name as often as you want.You can also move the license from a Windows installation to a Mac installation (and vice versa).
-
Giveaway: PhraseExpress Standard Edition for Windows and Mac FREE
Yes, PhraseExpress can store both, images and tables in phrases.Image files can easily be dragged into the phrase content and resized with the mouse.How to renew maintenance? Returning customers get renewal discount. You can also upgrade to a higher product edition:
Maintenance for single licenses can be extended online.
To renew maintenance for multiple licenses, please send your license keys to receive a custom quote.How many monitors does ShareMouse support? The ShareMouse Standard Edition supports exactly one display per computer.The Professional Edition supports a maximum of four monitors per computer, but probably a maximum of 26 monitors in total.Unfortunately, we are not able to state the maximum number of supported displays as we do not have so many monitors in our labs. Please test the free demo version for compatibility with your individual setup.Can Macro Recorder record either keyboard or mouse input only? Macro Recorder always captures both, the mouse and keyboard. But you can filter either mouse or keyboard input from the playback, providing you with a choice of which part of the recording you need.For example, Macro Recorder can even play only the mouse clicks, but omit the mouse movement paths or play the mouse movements faster or slower on a case-by-case basis.
-
Using PhraseExpress will help you type faster. You can set up keyboard shortcuts for specific sentences or words. If you use the shortcuts automatically, PhraseExpress will fill in the phrases.In addition to working in multiple places, PhraseExpress also be used on Windows 11. However, Windows 11 users can typically use this app while writing emails or reports. PhraseExpress offers a 30-day free trial of all its services. You will get a free personal account after the trial period, but you will not have access to cool and commercial features. You can choose a professional, standard, or enterprise subscription if you want a subscription account.
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/cihyFjudo/fairness-paper-search/Lectura veraniega Cmo fomentar el hbito de la lectura en los nios y adolescentes.md b/spaces/cihyFjudo/fairness-paper-search/Lectura veraniega Cmo fomentar el hbito de la lectura en los nios y adolescentes.md
deleted file mode 100644
index 614d26d31e8a266164f4e15b80a6beb50368ce9b..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Lectura veraniega Cmo fomentar el hbito de la lectura en los nios y adolescentes.md
+++ /dev/null
@@ -1,22 +0,0 @@
-
-
La autora de Malena es un nombre de tango, Los aires difíciles, El corazón helado o Los pacientes del doctor García (Tusquets), última entrega de su serie Los episodios de una guerra interminable, se decanta por lecturas que revisan páginas incómodas de un pasado no tan lejano.
-
Vengaré tu muerte (Alfaguara) es la tercera novela que la autora dedica a la intriga criminal tras El verano del inglés y Naturaleza muerta. La académica aboga por lecturas lecturas rompedoras y clásicas.
Esa forma de presentarlo hace que parezca un libro de texto. Y, la verdad, yo en vacaciones, o en un fin de semana, que es cuando lo leí, no tengo muchas ganas de leer un libro de texto. Si lo ignoran y se meten desde el principio en los capítulos, e incluso siguen un orden medio aleatorio, se darán cuenta de que lo que tienen en las manos son las reflexiones de un grupo de pensadores que entienden la economía de forma muy profunda, y mucho menos ortodoxa de lo que un libro de texto nos puede dar. El conjunto es muy agradable de leer porque está lleno de pensamientos desafiantes y no ordinarios. Así que lo que voy a hacer es dejarles con unos cuantos subrayados que me llamaron la atención y sugerirles una lectura desordenada y con la mente abierta. No les decepcionará.
-
En este libro, el autor describe su percepción del paisaje como un gabinete de curiosidades, cuenta cómo recoge diversos fenómenos y elementos individuales, los relaciona entre sí y los reorganiza. A través de sus páginas de lectura ligera, consigue que en de la mente del lector se despliegue todo un cosmos en el que la falta de integridad de la naturaleza aparece como una ganancia en lugar de una pérdida. Günther Vogt es un tal coleccionista, y su gabinete de curiosidades es un compendio de paisajes y de su manera de entenderlos.
-
Este verano será un tanto especial y la lectura puede ser una gran aliada. La siguiente selección de novedades editoriales están dirigidas a adultos, jóvenes y pequeños y hay para todos los gustos: narrativa, formación, cuentos, ensayos... ¡Elige la tuya!
-
Además de escritor, el autor austriaco fue un lector apasionado que publicó multitud de reseñas en la prensa escrita de la época y escribió los prólogos de numerosas obras de otros autores. Ahora la editorial Acantilado reúne diversos textos con los que el escritor invita a reflexionar sobre el amor y la pasión que le profesa a la literatura y a los libros, y también como una invitación al diálogo para que otros lectores se contagien de esa pasión por la lectura.
-
Un sinfín de personajes extraordinarios divertirán, emocionarán y expicarán la vida a los pequeños de Primaria. Acompañándolos en sus aventuras podrán relacionar y reconocer algunos de los conflictos a los que se van a enfrentar, comprobar cómo los resuelven los héroes de papel y adquirir estrategias útiles para su propio desarrollo. Estructurado en cuarenta y cinco lecturas, este libro además aporta diferentes propuestas didácticas estimulantes y reflexivas.
-
¿Lo notas, lector? Es el calorcito del sol veraniego que ya se nota en la piel. Los pantalones cortos asoman a la calle y las chaquetas van quedando relegadas al fondo del armario. Y si hay algo que nos gusta más que las noches de verano, las vacaciones o la piscina y el relax es precisamente ¡lo que vamos a leer! Junto a la piscina. Mirando al mar. En la montaña. En casa. Haciendo terraceo en nuestra calle o volviendo al pueblo a por un poquito de tranquilidad (y de más lectura, claro). Y, como todo en verano, lo que más apetece es leer libros cortos y ligeros.
-
-
Si lo prefieres, y siguiendo con lecturas ligeras para el verano, puedes leer el último libro de Megan Maxell, ¿Y si lo probamos?. En esta ocasión, la reina del romance nos presenta a Verónica, una mujer de 38 años, independiente, trabajadora, autónoma y bastante cabezota y controladora. Tras un desengaño amoroso, Verónica toma una decisión para disfrutar del sexo sin compromiso. Sus reglas son: no enrollarse con hombres casados, no mezclar trabajo y diversión, y siempre con hombres menores de treinta años.
-
En nuestra vida nos cruzamos con muchas personas, pero son pocas las que dejan huella y menos las que consiguen cambiarla. Esas son las personas que de verdad importan. Y ellas protagonizan la primera novela de La Vecina Rubia. Ella misma nos cuenta el argumento de este libro que va a ser un must en tus lecturas de verano:
-
Los niños y niñas son los más afortunados, pues tienen muchos días de vacaciones en verano para poder leer. Y para ellos, además, hay un montón de propuestas emocionantes llenas de fantasía, aventura y misterio. Aquí van algunas lecturas ligeras para que se puedan llevar los libros a la playa, a la piscina, al pueblo o a cualquier otro lugar.
-
Llega el verano y, con él, la tregua que cada año nos concedemos para recuperar el cuerpo y el espíritu. Disponemos de más tiempo libre y podemos retomar algunas actividades que la asfixiante realidad cotidiana dejó en un segundo o tercer plano. Una de ellas es leer en verano. Pero no cualquier lectura.
-
Dicen los expertos que nuestros hijos cada vez leen menos. Quizá descifren más texto que nunca (descifrar al pie de la letra, porque los mensajes electrónicos que se envían suelen estar verdaderamente cifrados), pero han disminuido la lectura por placer. Cada vez dedican más tiempo a los medios digitales y menos al papel.
-
Las redes sociales on-line a las que están "enganchados" les han aportado algunas ventajas (y bastantes inconvenientes), pero les han restado la lectura; así lo apuntan los autores del estudio Menores y Redes Sociales: "la lectura libre -textos no obligados por las tareas escolares- es una actividad claramente más frecuente entre los no usuarios de redes sociales". Aunque es una cuestión que se deberá investigar más a fondo en un futuro, parece claro que el uso de redes sociales incide en la actividad lectora como hobby.
-
Un informe publicado en los Estados Unidos y titulado Generación M descubrió que "la lectura por placer de cualquier tipo se relaciona más estrechamente con el nivel de un estudiante que cualquier otro medio de comunicación". En definitiva, la ventaja que podemos proporcionar a nuestros hijos si les hacemos lectores es enorme; porque como resume José Antonio Millán: "la lectura es la llave del conocimiento en la sociedad de la información".
-
El verano es un buen momento para "contagiar" a nuestros hijos con el "virus de la lectura", para transmitirles el "vicio" de sostener entre las manos un libro, acariciando sus lomos, pellizcando con los dedos las esquinas de sus hojas para pasar página. Una vez contagiados ya no podrán resistir la tentación de leer y esto les hará más cultos, más capaces, más profundos, más reflexivos. En resumen: más humanos.
-
Casi al final del verano os traigo una lectura ideal para esta estación, Color Morado Traidor de ediciones SM, El Vaco de Vapor. Protagonizada por La Pandilla de la Lupa, que no conocía de nada, pero a partir de ahora buscaré sus aventuras.
Sinopsis Imaginad, un verano cualquiera, cuatro amigos que disfrutan de sus vacaciones. Este año son un tanto especiales, Alex, Manu, Carol y Erika van a pasar unos días en casa de los abuelos de Alex en el pueblo. Bueno, la verdad, es que ellos no se esperan las aventuras que van a vivir allí. Conocerán a la familia de Alex, Mario el hermano adolescente que se pasa todo el día ausente, Julio el hermano pequeño que esconde un secretillo, los tíos que todos tienen algo especial , y Lola la misteriosa china, que esconde un secreto deportivo, de la que hablan todos.
Tendrán dos misterios que resolver: la desaparición diaria de una porra de Carol, con su subsecuente enfado, y una extraña luz verde acompañada de susurros que tiene a Manu bastante asustado. Y eso mientras pasean por el monte, se bañan en pozas, compran en el mercado o juegan al Cluedo. ¿Quien iba a imaginar que pasar unos días con la familia iba a ser tan divertido? Ellos no, y cuando llegan al final de su estancia saben que pocas veces lo van a volver a pasar tan bien.
Opinión Al principio estaba un poco escéptica sobre la historia que iba a leer. Pensaba que tenía delante a otro Club de los Cinco en versión española. Pero no es así, el tratamiento de los personajes es muy cercano, te puedes ver identificado con alguno de ellos. La aventura o las aventuras son, como decirlo, simples, sin grandes giros ni grandilocuencias. Son cosas que pueden ocurrir de verdad ¿Quien no ha hecho un pedido de churros y porras, y al final ha faltado para alguien? Se trata la amistad sana, es un grupo de niños que pasan el verano juntos, que se divierten con cosas sencillas y disfrutan de la naturaleza. Después el texto esta organizado como un diario donde cada uno de los personajes describe el día o las situaciones desde su punto de vista. Eso hace que veamos claramente las cuatro personalidades tan diferentes de los protagonistas.
Y las ilustraciones esparcidas por todo el libro ayudan mucho, si no tienes mucha imaginación, a meterte en la narración y en hacerte una idea de los actores de la historia. Así que es un libro muy recomendable para lectores con un poquito de experiencia, ya que la letra es grande aunque hay mucho texto. ¡Qué paséis una buena lectura!
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/cleanmaster/akagi-sovits3/README.md b/spaces/cleanmaster/akagi-sovits3/README.md
deleted file mode 100644
index 3c969c09a164d54541550ca9a095cf124dd8bb44..0000000000000000000000000000000000000000
--- a/spaces/cleanmaster/akagi-sovits3/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Akagi Sovits3
-emoji: 💻
-colorFrom: blue
-colorTo: pink
-sdk: gradio
-sdk_version: 3.16.2
-app_file: app.py
-pinned: false
-license: openrail
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/ttLib/tables/grUtils.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/ttLib/tables/grUtils.py
deleted file mode 100644
index 785684b1eb30a76ae598bfe46416d4556fc422a0..0000000000000000000000000000000000000000
--- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/ttLib/tables/grUtils.py
+++ /dev/null
@@ -1,92 +0,0 @@
-import struct, warnings
-
-try:
- import lz4
-except ImportError:
- lz4 = None
-else:
- import lz4.block
-
-# old scheme for VERSION < 0.9 otherwise use lz4.block
-
-
-def decompress(data):
- (compression,) = struct.unpack(">L", data[4:8])
- scheme = compression >> 27
- size = compression & 0x07FFFFFF
- if scheme == 0:
- pass
- elif scheme == 1 and lz4:
- res = lz4.block.decompress(struct.pack("L", (scheme << 27) + (len(data) & 0x07FFFFFF))
- if scheme == 0:
- return data
- elif scheme == 1 and lz4:
- res = lz4.block.compress(
- data, mode="high_compression", compression=16, store_size=False
- )
- return hdr + res
- else:
- warnings.warn("Table failed to compress by unsupported compression scheme")
- return data
-
-
-def _entries(attrs, sameval):
- ak = 0
- vals = []
- lastv = 0
- for k, v in attrs:
- if len(vals) and (k != ak + 1 or (sameval and v != lastv)):
- yield (ak - len(vals) + 1, len(vals), vals)
- vals = []
- ak = k
- vals.append(v)
- lastv = v
- yield (ak - len(vals) + 1, len(vals), vals)
-
-
-def entries(attributes, sameval=False):
- g = _entries(sorted(attributes.items(), key=lambda x: int(x[0])), sameval)
- return g
-
-
-def bininfo(num, size=1):
- if num == 0:
- return struct.pack(">4H", 0, 0, 0, 0)
- srange = 1
- select = 0
- while srange <= num:
- srange *= 2
- select += 1
- select -= 1
- srange //= 2
- srange *= size
- shift = num * size - srange
- return struct.pack(">4H", num, srange, select, shift)
-
-
-def num2tag(n):
- if n < 0x200000:
- return str(n)
- else:
- return (
- struct.unpack("4s", struct.pack(">L", n))[0].replace(b"\000", b"").decode()
- )
-
-
-def tag2num(n):
- try:
- return int(n)
- except ValueError:
- n = (n + " ")[:4]
- return struct.unpack(">L", n.encode("ascii"))[0]
diff --git a/spaces/codelion/Grounding_DINO_demo/groundingdino/util/visualizer.py b/spaces/codelion/Grounding_DINO_demo/groundingdino/util/visualizer.py
deleted file mode 100644
index 7a1b7b101e9b73f75f9136bc67f2063c7c1cf1c1..0000000000000000000000000000000000000000
--- a/spaces/codelion/Grounding_DINO_demo/groundingdino/util/visualizer.py
+++ /dev/null
@@ -1,318 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-@File : visualizer.py
-@Time : 2022/04/05 11:39:33
-@Author : Shilong Liu
-@Contact : slongliu86@gmail.com
-"""
-
-import datetime
-import os
-
-import cv2
-import matplotlib.pyplot as plt
-import numpy as np
-import torch
-from matplotlib import transforms
-from matplotlib.collections import PatchCollection
-from matplotlib.patches import Polygon
-from pycocotools import mask as maskUtils
-
-
-def renorm(
- img: torch.FloatTensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
-) -> torch.FloatTensor:
- # img: tensor(3,H,W) or tensor(B,3,H,W)
- # return: same as img
- assert img.dim() == 3 or img.dim() == 4, "img.dim() should be 3 or 4 but %d" % img.dim()
- if img.dim() == 3:
- assert img.size(0) == 3, 'img.size(0) shoule be 3 but "%d". (%s)' % (
- img.size(0),
- str(img.size()),
- )
- img_perm = img.permute(1, 2, 0)
- mean = torch.Tensor(mean)
- std = torch.Tensor(std)
- img_res = img_perm * std + mean
- return img_res.permute(2, 0, 1)
- else: # img.dim() == 4
- assert img.size(1) == 3, 'img.size(1) shoule be 3 but "%d". (%s)' % (
- img.size(1),
- str(img.size()),
- )
- img_perm = img.permute(0, 2, 3, 1)
- mean = torch.Tensor(mean)
- std = torch.Tensor(std)
- img_res = img_perm * std + mean
- return img_res.permute(0, 3, 1, 2)
-
-
-class ColorMap:
- def __init__(self, basergb=[255, 255, 0]):
- self.basergb = np.array(basergb)
-
- def __call__(self, attnmap):
- # attnmap: h, w. np.uint8.
- # return: h, w, 4. np.uint8.
- assert attnmap.dtype == np.uint8
- h, w = attnmap.shape
- res = self.basergb.copy()
- res = res[None][None].repeat(h, 0).repeat(w, 1) # h, w, 3
- attn1 = attnmap.copy()[..., None] # h, w, 1
- res = np.concatenate((res, attn1), axis=-1).astype(np.uint8)
- return res
-
-
-def rainbow_text(x, y, ls, lc, **kw):
- """
- Take a list of strings ``ls`` and colors ``lc`` and place them next to each
- other, with text ls[i] being shown in color lc[i].
-
- This example shows how to do both vertical and horizontal text, and will
- pass all keyword arguments to plt.text, so you can set the font size,
- family, etc.
- """
- t = plt.gca().transData
- fig = plt.gcf()
- plt.show()
-
- # horizontal version
- for s, c in zip(ls, lc):
- text = plt.text(x, y, " " + s + " ", color=c, transform=t, **kw)
- text.draw(fig.canvas.get_renderer())
- ex = text.get_window_extent()
- t = transforms.offset_copy(text._transform, x=ex.width, units="dots")
-
- # #vertical version
- # for s,c in zip(ls,lc):
- # text = plt.text(x,y," "+s+" ",color=c, transform=t,
- # rotation=90,va='bottom',ha='center',**kw)
- # text.draw(fig.canvas.get_renderer())
- # ex = text.get_window_extent()
- # t = transforms.offset_copy(text._transform, y=ex.height, units='dots')
-
-
-class COCOVisualizer:
- def __init__(self, coco=None, tokenlizer=None) -> None:
- self.coco = coco
-
- def visualize(self, img, tgt, caption=None, dpi=180, savedir="vis"):
- """
- img: tensor(3, H, W)
- tgt: make sure they are all on cpu.
- must have items: 'image_id', 'boxes', 'size'
- """
- plt.figure(dpi=dpi)
- plt.rcParams["font.size"] = "5"
- ax = plt.gca()
- img = renorm(img).permute(1, 2, 0)
- # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':
- # import ipdb; ipdb.set_trace()
- ax.imshow(img)
-
- self.addtgt(tgt)
-
- if tgt is None:
- image_id = 0
- elif "image_id" not in tgt:
- image_id = 0
- else:
- image_id = tgt["image_id"]
-
- if caption is None:
- savename = "{}/{}-{}.png".format(
- savedir, int(image_id), str(datetime.datetime.now()).replace(" ", "-")
- )
- else:
- savename = "{}/{}-{}-{}.png".format(
- savedir, caption, int(image_id), str(datetime.datetime.now()).replace(" ", "-")
- )
- print("savename: {}".format(savename))
- os.makedirs(os.path.dirname(savename), exist_ok=True)
- plt.savefig(savename)
- plt.close()
-
- def addtgt(self, tgt):
- """ """
- if tgt is None or not "boxes" in tgt:
- ax = plt.gca()
-
- if "caption" in tgt:
- ax.set_title(tgt["caption"], wrap=True)
-
- ax.set_axis_off()
- return
-
- ax = plt.gca()
- H, W = tgt["size"]
- numbox = tgt["boxes"].shape[0]
-
- color = []
- polygons = []
- boxes = []
- for box in tgt["boxes"].cpu():
- unnormbbox = box * torch.Tensor([W, H, W, H])
- unnormbbox[:2] -= unnormbbox[2:] / 2
- [bbox_x, bbox_y, bbox_w, bbox_h] = unnormbbox.tolist()
- boxes.append([bbox_x, bbox_y, bbox_w, bbox_h])
- poly = [
- [bbox_x, bbox_y],
- [bbox_x, bbox_y + bbox_h],
- [bbox_x + bbox_w, bbox_y + bbox_h],
- [bbox_x + bbox_w, bbox_y],
- ]
- np_poly = np.array(poly).reshape((4, 2))
- polygons.append(Polygon(np_poly))
- c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0]
- color.append(c)
-
- p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.1)
- ax.add_collection(p)
- p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2)
- ax.add_collection(p)
-
- if "strings_positive" in tgt and len(tgt["strings_positive"]) > 0:
- assert (
- len(tgt["strings_positive"]) == numbox
- ), f"{len(tgt['strings_positive'])} = {numbox}, "
- for idx, strlist in enumerate(tgt["strings_positive"]):
- cate_id = int(tgt["labels"][idx])
- _string = str(cate_id) + ":" + " ".join(strlist)
- bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx]
- # ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1})
- ax.text(
- bbox_x,
- bbox_y,
- _string,
- color="black",
- bbox={"facecolor": color[idx], "alpha": 0.6, "pad": 1},
- )
-
- if "box_label" in tgt:
- assert len(tgt["box_label"]) == numbox, f"{len(tgt['box_label'])} = {numbox}, "
- for idx, bl in enumerate(tgt["box_label"]):
- _string = str(bl)
- bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx]
- # ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1})
- ax.text(
- bbox_x,
- bbox_y,
- _string,
- color="black",
- bbox={"facecolor": color[idx], "alpha": 0.6, "pad": 1},
- )
-
- if "caption" in tgt:
- ax.set_title(tgt["caption"], wrap=True)
- # plt.figure()
- # rainbow_text(0.0,0.0,"all unicorns poop rainbows ! ! !".split(),
- # ['red', 'orange', 'brown', 'green', 'blue', 'purple', 'black'])
-
- if "attn" in tgt:
- # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':
- # import ipdb; ipdb.set_trace()
- if isinstance(tgt["attn"], tuple):
- tgt["attn"] = [tgt["attn"]]
- for item in tgt["attn"]:
- attn_map, basergb = item
- attn_map = (attn_map - attn_map.min()) / (attn_map.max() - attn_map.min() + 1e-3)
- attn_map = (attn_map * 255).astype(np.uint8)
- cm = ColorMap(basergb)
- heatmap = cm(attn_map)
- ax.imshow(heatmap)
- ax.set_axis_off()
-
- def showAnns(self, anns, draw_bbox=False):
- """
- Display the specified annotations.
- :param anns (array of object): annotations to display
- :return: None
- """
- if len(anns) == 0:
- return 0
- if "segmentation" in anns[0] or "keypoints" in anns[0]:
- datasetType = "instances"
- elif "caption" in anns[0]:
- datasetType = "captions"
- else:
- raise Exception("datasetType not supported")
- if datasetType == "instances":
- ax = plt.gca()
- ax.set_autoscale_on(False)
- polygons = []
- color = []
- for ann in anns:
- c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0]
- if "segmentation" in ann:
- if type(ann["segmentation"]) == list:
- # polygon
- for seg in ann["segmentation"]:
- poly = np.array(seg).reshape((int(len(seg) / 2), 2))
- polygons.append(Polygon(poly))
- color.append(c)
- else:
- # mask
- t = self.imgs[ann["image_id"]]
- if type(ann["segmentation"]["counts"]) == list:
- rle = maskUtils.frPyObjects(
- [ann["segmentation"]], t["height"], t["width"]
- )
- else:
- rle = [ann["segmentation"]]
- m = maskUtils.decode(rle)
- img = np.ones((m.shape[0], m.shape[1], 3))
- if ann["iscrowd"] == 1:
- color_mask = np.array([2.0, 166.0, 101.0]) / 255
- if ann["iscrowd"] == 0:
- color_mask = np.random.random((1, 3)).tolist()[0]
- for i in range(3):
- img[:, :, i] = color_mask[i]
- ax.imshow(np.dstack((img, m * 0.5)))
- if "keypoints" in ann and type(ann["keypoints"]) == list:
- # turn skeleton into zero-based index
- sks = np.array(self.loadCats(ann["category_id"])[0]["skeleton"]) - 1
- kp = np.array(ann["keypoints"])
- x = kp[0::3]
- y = kp[1::3]
- v = kp[2::3]
- for sk in sks:
- if np.all(v[sk] > 0):
- plt.plot(x[sk], y[sk], linewidth=3, color=c)
- plt.plot(
- x[v > 0],
- y[v > 0],
- "o",
- markersize=8,
- markerfacecolor=c,
- markeredgecolor="k",
- markeredgewidth=2,
- )
- plt.plot(
- x[v > 1],
- y[v > 1],
- "o",
- markersize=8,
- markerfacecolor=c,
- markeredgecolor=c,
- markeredgewidth=2,
- )
-
- if draw_bbox:
- [bbox_x, bbox_y, bbox_w, bbox_h] = ann["bbox"]
- poly = [
- [bbox_x, bbox_y],
- [bbox_x, bbox_y + bbox_h],
- [bbox_x + bbox_w, bbox_y + bbox_h],
- [bbox_x + bbox_w, bbox_y],
- ]
- np_poly = np.array(poly).reshape((4, 2))
- polygons.append(Polygon(np_poly))
- color.append(c)
-
- # p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)
- # ax.add_collection(p)
- p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2)
- ax.add_collection(p)
- elif datasetType == "captions":
- for ann in anns:
- print(ann["caption"])
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/adxenc.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/adxenc.c
deleted file mode 100644
index 796efdab63bd89f3ae0597e95ea320bff783652f..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/adxenc.c
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * ADX ADPCM codecs
- * Copyright (c) 2001,2003 BERO
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include "avcodec.h"
-#include "adx.h"
-#include "bytestream.h"
-#include "codec_internal.h"
-#include "encode.h"
-#include "put_bits.h"
-
-/**
- * @file
- * SEGA CRI adx codecs.
- *
- * Reference documents:
- * http://ku-www.ss.titech.ac.jp/~yatsushi/adx.html
- * adx2wav & wav2adx http://www.geocities.co.jp/Playtown/2004/
- */
-
-static void adx_encode(ADXContext *c, uint8_t *adx, const int16_t *wav,
- ADXChannelState *prev, int channels)
-{
- PutBitContext pb;
- int scale;
- int i, j;
- int s0, s1, s2, d;
- int max = 0;
- int min = 0;
-
- s1 = prev->s1;
- s2 = prev->s2;
- for (i = 0, j = 0; j < 32; i += channels, j++) {
- s0 = wav[i];
- d = s0 + ((-c->coeff[0] * s1 - c->coeff[1] * s2) >> COEFF_BITS);
- if (max < d)
- max = d;
- if (min > d)
- min = d;
- s2 = s1;
- s1 = s0;
- }
-
- if (max == 0 && min == 0) {
- prev->s1 = s1;
- prev->s2 = s2;
- memset(adx, 0, BLOCK_SIZE);
- return;
- }
-
- if (max / 7 > -min / 8)
- scale = max / 7;
- else
- scale = -min / 8;
-
- if (scale == 0)
- scale = 1;
-
- AV_WB16(adx, scale);
-
- init_put_bits(&pb, adx + 2, 16);
-
- s1 = prev->s1;
- s2 = prev->s2;
- for (i = 0, j = 0; j < 32; i += channels, j++) {
- d = wav[i] + ((-c->coeff[0] * s1 - c->coeff[1] * s2) >> COEFF_BITS);
-
- d = av_clip_intp2(ROUNDED_DIV(d, scale), 3);
-
- put_sbits(&pb, 4, d);
-
- s0 = d * scale + ((c->coeff[0] * s1 + c->coeff[1] * s2) >> COEFF_BITS);
- s2 = s1;
- s1 = s0;
- }
- prev->s1 = s1;
- prev->s2 = s2;
-
- flush_put_bits(&pb);
-}
-
-#define HEADER_SIZE 36
-
-static int adx_encode_header(AVCodecContext *avctx, uint8_t *buf, int bufsize)
-{
- ADXContext *c = avctx->priv_data;
-
- bytestream_put_be16(&buf, 0x8000); /* header signature */
- bytestream_put_be16(&buf, HEADER_SIZE - 4); /* copyright offset */
- bytestream_put_byte(&buf, 3); /* encoding */
- bytestream_put_byte(&buf, BLOCK_SIZE); /* block size */
- bytestream_put_byte(&buf, 4); /* sample size */
- bytestream_put_byte(&buf, avctx->ch_layout.nb_channels); /* channels */
- bytestream_put_be32(&buf, avctx->sample_rate); /* sample rate */
- bytestream_put_be32(&buf, 0); /* total sample count */
- bytestream_put_be16(&buf, c->cutoff); /* cutoff frequency */
- bytestream_put_byte(&buf, 3); /* version */
- bytestream_put_byte(&buf, 0); /* flags */
- bytestream_put_be32(&buf, 0); /* unknown */
- bytestream_put_be32(&buf, 0); /* loop enabled */
- bytestream_put_be16(&buf, 0); /* padding */
- bytestream_put_buffer(&buf, "(c)CRI", 6); /* copyright signature */
-
- return HEADER_SIZE;
-}
-
-static av_cold int adx_encode_init(AVCodecContext *avctx)
-{
- ADXContext *c = avctx->priv_data;
-
- if (avctx->ch_layout.nb_channels > 2) {
- av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
- return AVERROR(EINVAL);
- }
- avctx->frame_size = BLOCK_SAMPLES;
-
- /* the cutoff can be adjusted, but this seems to work pretty well */
- c->cutoff = 500;
- ff_adx_calculate_coeffs(c->cutoff, avctx->sample_rate, COEFF_BITS, c->coeff);
-
- return 0;
-}
-
-static int adx_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
- const AVFrame *frame, int *got_packet_ptr)
-{
- ADXContext *c = avctx->priv_data;
- const int16_t *samples = frame ? (const int16_t *)frame->data[0] : NULL;
- uint8_t *dst;
- int channels = avctx->ch_layout.nb_channels;
- int ch, out_size, ret;
-
- if (!samples) {
- if (c->eof)
- return 0;
- if ((ret = ff_get_encode_buffer(avctx, avpkt, 18, 0)) < 0)
- return ret;
- c->eof = 1;
- dst = avpkt->data;
- bytestream_put_be16(&dst, 0x8001);
- bytestream_put_be16(&dst, 0x000E);
- bytestream_put_be64(&dst, 0x0);
- bytestream_put_be32(&dst, 0x0);
- bytestream_put_be16(&dst, 0x0);
- *got_packet_ptr = 1;
- return 0;
- }
-
- out_size = BLOCK_SIZE * channels + !c->header_parsed * HEADER_SIZE;
- if ((ret = ff_get_encode_buffer(avctx, avpkt, out_size, 0)) < 0)
- return ret;
- dst = avpkt->data;
-
- if (!c->header_parsed) {
- int hdrsize;
- if ((hdrsize = adx_encode_header(avctx, dst, avpkt->size)) < 0) {
- av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n");
- return AVERROR(EINVAL);
- }
- dst += hdrsize;
- c->header_parsed = 1;
- }
-
- for (ch = 0; ch < channels; ch++) {
- adx_encode(c, dst, samples + ch, &c->prev[ch], channels);
- dst += BLOCK_SIZE;
- }
-
- *got_packet_ptr = 1;
- return 0;
-}
-
-const FFCodec ff_adpcm_adx_encoder = {
- .p.name = "adpcm_adx",
- CODEC_LONG_NAME("SEGA CRI ADX ADPCM"),
- .p.type = AVMEDIA_TYPE_AUDIO,
- .p.id = AV_CODEC_ID_ADPCM_ADX,
- .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
- AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE,
- .priv_data_size = sizeof(ADXContext),
- .init = adx_encode_init,
- FF_CODEC_ENCODE_CB(adx_encode_frame),
- .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16,
- AV_SAMPLE_FMT_NONE },
- .caps_internal = FF_CODEC_CAP_EOF_FLUSH,
-};
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/binkaudio.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/binkaudio.c
deleted file mode 100644
index f28ecba7603f4a3247dd5feaa9a914c3ff49ae1e..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/binkaudio.c
+++ /dev/null
@@ -1,397 +0,0 @@
-/*
- * Bink Audio decoder
- * Copyright (c) 2007-2011 Peter Ross (pross@xvid.org)
- * Copyright (c) 2009 Daniel Verkamp (daniel@drv.nu)
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/**
- * @file
- * Bink Audio decoder
- *
- * Technical details here:
- * http://wiki.multimedia.cx/index.php?title=Bink_Audio
- */
-
-#include "config_components.h"
-
-#include "libavutil/channel_layout.h"
-#include "libavutil/intfloat.h"
-#include "libavutil/mem_internal.h"
-#include "libavutil/tx.h"
-
-#define BITSTREAM_READER_LE
-#include "avcodec.h"
-#include "decode.h"
-#include "get_bits.h"
-#include "codec_internal.h"
-#include "internal.h"
-#include "wma_freqs.h"
-
-#define MAX_DCT_CHANNELS 6
-#define MAX_CHANNELS 2
-#define BINK_BLOCK_MAX_SIZE (MAX_CHANNELS << 11)
-
-typedef struct BinkAudioContext {
- GetBitContext gb;
- int version_b; ///< Bink version 'b'
- int first;
- int channels;
- int ch_offset;
- int frame_len; ///< transform size (samples)
- int overlap_len; ///< overlap size (samples)
- int block_size;
- int num_bands;
- float root;
- unsigned int bands[26];
- float previous[MAX_DCT_CHANNELS][BINK_BLOCK_MAX_SIZE / 16]; ///< coeffs from previous audio block
- float quant_table[96];
- AVPacket *pkt;
- AVTXContext *tx;
- av_tx_fn tx_fn;
-} BinkAudioContext;
-
-
-static av_cold int decode_init(AVCodecContext *avctx)
-{
- BinkAudioContext *s = avctx->priv_data;
- int sample_rate = avctx->sample_rate;
- int sample_rate_half;
- int i, ret;
- int frame_len_bits;
- int max_channels = avctx->codec->id == AV_CODEC_ID_BINKAUDIO_RDFT ? MAX_CHANNELS : MAX_DCT_CHANNELS;
- int channels = avctx->ch_layout.nb_channels;
-
- /* determine frame length */
- if (avctx->sample_rate < 22050) {
- frame_len_bits = 9;
- } else if (avctx->sample_rate < 44100) {
- frame_len_bits = 10;
- } else {
- frame_len_bits = 11;
- }
-
- if (channels < 1 || channels > max_channels) {
- av_log(avctx, AV_LOG_ERROR, "invalid number of channels: %d\n", channels);
- return AVERROR_INVALIDDATA;
- }
- av_channel_layout_uninit(&avctx->ch_layout);
- av_channel_layout_default(&avctx->ch_layout, channels);
-
- s->version_b = avctx->extradata_size >= 4 && avctx->extradata[3] == 'b';
-
- if (avctx->codec->id == AV_CODEC_ID_BINKAUDIO_RDFT) {
- // audio is already interleaved for the RDFT format variant
- avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
- if (sample_rate > INT_MAX / channels)
- return AVERROR_INVALIDDATA;
- sample_rate *= channels;
- s->channels = 1;
- if (!s->version_b)
- frame_len_bits += av_log2(channels);
- } else {
- s->channels = channels;
- avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
- }
-
- s->frame_len = 1 << frame_len_bits;
- s->overlap_len = s->frame_len / 16;
- s->block_size = (s->frame_len - s->overlap_len) * FFMIN(MAX_CHANNELS, s->channels);
- sample_rate_half = (sample_rate + 1LL) / 2;
- if (avctx->codec->id == AV_CODEC_ID_BINKAUDIO_RDFT)
- s->root = 2.0 / (sqrt(s->frame_len) * 32768.0);
- else
- s->root = s->frame_len / (sqrt(s->frame_len) * 32768.0);
- for (i = 0; i < 96; i++) {
- /* constant is result of 0.066399999/log10(M_E) */
- s->quant_table[i] = expf(i * 0.15289164787221953823f) * s->root;
- }
-
- /* calculate number of bands */
- for (s->num_bands = 1; s->num_bands < 25; s->num_bands++)
- if (sample_rate_half <= ff_wma_critical_freqs[s->num_bands - 1])
- break;
-
- /* populate bands data */
- s->bands[0] = 2;
- for (i = 1; i < s->num_bands; i++)
- s->bands[i] = (ff_wma_critical_freqs[i - 1] * s->frame_len / sample_rate_half) & ~1;
- s->bands[s->num_bands] = s->frame_len;
-
- s->first = 1;
-
- if (CONFIG_BINKAUDIO_RDFT_DECODER && avctx->codec->id == AV_CODEC_ID_BINKAUDIO_RDFT) {
- float scale = 0.5;
- ret = av_tx_init(&s->tx, &s->tx_fn, AV_TX_FLOAT_RDFT, 1, 1 << frame_len_bits, &scale, 0);
- } else if (CONFIG_BINKAUDIO_DCT_DECODER) {
- float scale = 1.0 / (1 << frame_len_bits);
- ret = av_tx_init(&s->tx, &s->tx_fn, AV_TX_FLOAT_DCT, 1, 1 << (frame_len_bits - 1), &scale, 0);
- } else {
- av_assert0(0);
- }
- if (ret < 0)
- return ret;
-
- s->pkt = avctx->internal->in_pkt;
-
- return 0;
-}
-
-static float get_float(GetBitContext *gb)
-{
- int power = get_bits(gb, 5);
- float f = ldexpf(get_bits(gb, 23), power - 23);
- if (get_bits1(gb))
- f = -f;
- return f;
-}
-
-static const uint8_t rle_length_tab[16] = {
- 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 32, 64
-};
-
-/**
- * Decode Bink Audio block
- * @param[out] out Output buffer (must contain s->block_size elements)
- * @return 0 on success, negative error code on failure
- */
-static int decode_block(BinkAudioContext *s, float **out, int use_dct,
- int channels, int ch_offset)
-{
- int ch, i, j, k;
- float q, quant[25];
- int width, coeff;
- GetBitContext *gb = &s->gb;
- LOCAL_ALIGNED_32(float, coeffs, [4098]);
-
- if (use_dct)
- skip_bits(gb, 2);
-
- for (ch = 0; ch < channels; ch++) {
- if (s->version_b) {
- if (get_bits_left(gb) < 64)
- return AVERROR_INVALIDDATA;
- coeffs[0] = av_int2float(get_bits_long(gb, 32)) * s->root;
- coeffs[1] = av_int2float(get_bits_long(gb, 32)) * s->root;
- } else {
- if (get_bits_left(gb) < 58)
- return AVERROR_INVALIDDATA;
- coeffs[0] = get_float(gb) * s->root;
- coeffs[1] = get_float(gb) * s->root;
- }
-
- if (get_bits_left(gb) < s->num_bands * 8)
- return AVERROR_INVALIDDATA;
- for (i = 0; i < s->num_bands; i++) {
- int value = get_bits(gb, 8);
- quant[i] = s->quant_table[FFMIN(value, 95)];
- }
-
- k = 0;
- q = quant[0];
-
- // parse coefficients
- i = 2;
- while (i < s->frame_len) {
- if (s->version_b) {
- j = i + 16;
- } else {
- int v = get_bits1(gb);
- if (v) {
- v = get_bits(gb, 4);
- j = i + rle_length_tab[v] * 8;
- } else {
- j = i + 8;
- }
- }
-
- j = FFMIN(j, s->frame_len);
-
- width = get_bits(gb, 4);
- if (width == 0) {
- memset(coeffs + i, 0, (j - i) * sizeof(*coeffs));
- i = j;
- while (s->bands[k] < i)
- q = quant[k++];
- } else {
- while (i < j) {
- if (s->bands[k] == i)
- q = quant[k++];
- coeff = get_bits(gb, width);
- if (coeff) {
- int v;
- v = get_bits1(gb);
- if (v)
- coeffs[i] = -q * coeff;
- else
- coeffs[i] = q * coeff;
- } else {
- coeffs[i] = 0.0f;
- }
- i++;
- }
- }
- }
-
- if (CONFIG_BINKAUDIO_DCT_DECODER && use_dct) {
- coeffs[0] /= 0.5;
- s->tx_fn(s->tx, out[ch + ch_offset], coeffs, sizeof(float));
- } else if (CONFIG_BINKAUDIO_RDFT_DECODER) {
- for (int i = 2; i < s->frame_len; i += 2)
- coeffs[i + 1] *= -1;
-
- coeffs[s->frame_len + 0] = coeffs[1];
- coeffs[s->frame_len + 1] = coeffs[1] = 0;
- s->tx_fn(s->tx, out[ch + ch_offset], coeffs, sizeof(AVComplexFloat));
- }
- }
-
- for (ch = 0; ch < channels; ch++) {
- int j;
- int count = s->overlap_len * channels;
- if (!s->first) {
- j = ch;
- for (i = 0; i < s->overlap_len; i++, j += channels)
- out[ch + ch_offset][i] = (s->previous[ch + ch_offset][i] * (count - j) +
- out[ch + ch_offset][i] * j) / count;
- }
- memcpy(s->previous[ch + ch_offset], &out[ch + ch_offset][s->frame_len - s->overlap_len],
- s->overlap_len * sizeof(*s->previous[ch + ch_offset]));
- }
-
- s->first = 0;
-
- return 0;
-}
-
-static av_cold int decode_end(AVCodecContext *avctx)
-{
- BinkAudioContext * s = avctx->priv_data;
- av_tx_uninit(&s->tx);
- return 0;
-}
-
-static void get_bits_align32(GetBitContext *s)
-{
- int n = (-get_bits_count(s)) & 31;
- if (n) skip_bits(s, n);
-}
-
-static int binkaudio_receive_frame(AVCodecContext *avctx, AVFrame *frame)
-{
- BinkAudioContext *s = avctx->priv_data;
- GetBitContext *gb = &s->gb;
- int new_pkt, ret;
-
-again:
- new_pkt = !s->pkt->data;
- if (!s->pkt->data) {
- ret = ff_decode_get_packet(avctx, s->pkt);
- if (ret < 0) {
- s->ch_offset = 0;
- return ret;
- }
-
- if (s->pkt->size < 4) {
- av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
- ret = AVERROR_INVALIDDATA;
- goto fail;
- }
-
- ret = init_get_bits8(gb, s->pkt->data, s->pkt->size);
- if (ret < 0)
- goto fail;
-
- /* skip reported size */
- skip_bits_long(gb, 32);
- }
-
- /* get output buffer */
- if (s->ch_offset == 0) {
- frame->nb_samples = s->frame_len;
- if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
- return ret;
- if (!new_pkt)
- frame->pts = AV_NOPTS_VALUE;
- }
-
- if (decode_block(s, (float **)frame->extended_data,
- avctx->codec->id == AV_CODEC_ID_BINKAUDIO_DCT,
- FFMIN(MAX_CHANNELS, s->channels - s->ch_offset), s->ch_offset)) {
- av_log(avctx, AV_LOG_ERROR, "Incomplete packet\n");
- s->ch_offset = 0;
- return AVERROR_INVALIDDATA;
- }
- s->ch_offset += MAX_CHANNELS;
- get_bits_align32(gb);
- if (!get_bits_left(gb)) {
- memset(gb, 0, sizeof(*gb));
- av_packet_unref(s->pkt);
- }
- if (s->ch_offset >= s->channels) {
- s->ch_offset = 0;
- } else {
- goto again;
- }
-
- frame->nb_samples = s->block_size / FFMIN(avctx->ch_layout.nb_channels, MAX_CHANNELS);
-
- return 0;
-fail:
- s->ch_offset = 0;
- av_packet_unref(s->pkt);
- return ret;
-}
-
-static void decode_flush(AVCodecContext *avctx)
-{
- BinkAudioContext *const s = avctx->priv_data;
-
- /* s->pkt coincides with avctx->internal->in_pkt
- * and is unreferenced generically when flushing. */
- s->first = 1;
- s->ch_offset = 0;
-}
-
-const FFCodec ff_binkaudio_rdft_decoder = {
- .p.name = "binkaudio_rdft",
- CODEC_LONG_NAME("Bink Audio (RDFT)"),
- .p.type = AVMEDIA_TYPE_AUDIO,
- .p.id = AV_CODEC_ID_BINKAUDIO_RDFT,
- .priv_data_size = sizeof(BinkAudioContext),
- .init = decode_init,
- .flush = decode_flush,
- .close = decode_end,
- FF_CODEC_RECEIVE_FRAME_CB(binkaudio_receive_frame),
- .p.capabilities = AV_CODEC_CAP_DR1,
- .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
-};
-
-const FFCodec ff_binkaudio_dct_decoder = {
- .p.name = "binkaudio_dct",
- CODEC_LONG_NAME("Bink Audio (DCT)"),
- .p.type = AVMEDIA_TYPE_AUDIO,
- .p.id = AV_CODEC_ID_BINKAUDIO_DCT,
- .priv_data_size = sizeof(BinkAudioContext),
- .init = decode_init,
- .flush = decode_flush,
- .close = decode_end,
- FF_CODEC_RECEIVE_FRAME_CB(binkaudio_receive_frame),
- .p.capabilities = AV_CODEC_CAP_DR1,
- .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
-};
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/escape124.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/escape124.c
deleted file mode 100644
index 592de09a9fdbed03b57557e8a908e3161a14ac8f..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/escape124.c
+++ /dev/null
@@ -1,385 +0,0 @@
-/*
- * Escape 124 Video Decoder
- * Copyright (C) 2008 Eli Friedman (eli.friedman@gmail.com)
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#define BITSTREAM_READER_LE
-#include "avcodec.h"
-#include "codec_internal.h"
-#include "decode.h"
-#include "get_bits.h"
-
-typedef union MacroBlock {
- uint16_t pixels[4];
- uint32_t pixels32[2];
-} MacroBlock;
-
-typedef union SuperBlock {
- uint16_t pixels[64];
- uint32_t pixels32[32];
-} SuperBlock;
-
-typedef struct CodeBook {
- unsigned depth;
- unsigned size;
- MacroBlock* blocks;
-} CodeBook;
-
-typedef struct Escape124Context {
- AVFrame *frame;
-
- unsigned num_superblocks;
-
- CodeBook codebooks[3];
-} Escape124Context;
-
-/**
- * Initialize the decoder
- * @param avctx decoder context
- * @return 0 success, negative on error
- */
-static av_cold int escape124_decode_init(AVCodecContext *avctx)
-{
- Escape124Context *s = avctx->priv_data;
-
- avctx->pix_fmt = AV_PIX_FMT_RGB555;
-
- s->num_superblocks = ((unsigned)avctx->width / 8) *
- ((unsigned)avctx->height / 8);
-
- s->frame = av_frame_alloc();
- if (!s->frame)
- return AVERROR(ENOMEM);
-
- return 0;
-}
-
-static av_cold int escape124_decode_close(AVCodecContext *avctx)
-{
- unsigned i;
- Escape124Context *s = avctx->priv_data;
-
- for (i = 0; i < 3; i++)
- av_freep(&s->codebooks[i].blocks);
-
- av_frame_free(&s->frame);
-
- return 0;
-}
-
-static CodeBook unpack_codebook(GetBitContext* gb, unsigned depth,
- unsigned size)
-{
- unsigned i, j;
- CodeBook cb = { 0 };
-
- cb.blocks = av_malloc(size ? size * sizeof(MacroBlock) : 1);
- if (!cb.blocks)
- return cb;
-
- cb.depth = depth;
- cb.size = size;
- for (i = 0; i < size; i++) {
- unsigned mask_bits = get_bits(gb, 4);
- unsigned color[2];
- color[0] = get_bits(gb, 15);
- color[1] = get_bits(gb, 15);
-
- for (j = 0; j < 4; j++)
- cb.blocks[i].pixels[j] = color[(mask_bits>>j) & 1];
- }
- return cb;
-}
-
-static unsigned decode_skip_count(GetBitContext* gb)
-{
- unsigned value;
- // This function reads a maximum of 23 bits,
- // which is within the padding space
- if (get_bits_left(gb) < 1)
- return -1;
- value = get_bits1(gb);
- if (!value)
- return value;
-
- value += get_bits(gb, 3);
- if (value != (1 + ((1 << 3) - 1)))
- return value;
-
- value += get_bits(gb, 7);
- if (value != (1 + ((1 << 3) - 1)) + ((1 << 7) - 1))
- return value;
-
- return value + get_bits(gb, 12);
-}
-
-static MacroBlock decode_macroblock(Escape124Context* s, GetBitContext* gb,
- int* codebook_index, int superblock_index)
-{
- // This function reads a maximum of 22 bits; the callers
- // guard this function appropriately
- unsigned block_index, depth;
- int value = get_bits1(gb);
- if (value) {
- static const int8_t transitions[3][2] = { {2, 1}, {0, 2}, {1, 0} };
- value = get_bits1(gb);
- *codebook_index = transitions[*codebook_index][value];
- }
-
- depth = s->codebooks[*codebook_index].depth;
-
- // depth = 0 means that this shouldn't read any bits;
- // in theory, this is the same as get_bits(gb, 0), but
- // that doesn't actually work.
- block_index = get_bitsz(gb, depth);
-
- if (*codebook_index == 1) {
- block_index += superblock_index << s->codebooks[1].depth;
- }
-
- // This condition can occur with invalid bitstreams and
- // *codebook_index == 2
- if (block_index >= s->codebooks[*codebook_index].size || !s->codebooks[*codebook_index].blocks)
- return (MacroBlock) { { 0 } };
-
- return s->codebooks[*codebook_index].blocks[block_index];
-}
-
-static void insert_mb_into_sb(SuperBlock* sb, MacroBlock mb, unsigned index) {
- // Formula: ((index / 4) * 16 + (index % 4) * 2) / 2
- uint32_t *dst = sb->pixels32 + index + (index & -4);
-
- // This technically violates C99 aliasing rules, but it should be safe.
- dst[0] = mb.pixels32[0];
- dst[4] = mb.pixels32[1];
-}
-
-static void copy_superblock(uint16_t* dest, ptrdiff_t dest_stride,
- uint16_t* src, ptrdiff_t src_stride)
-{
- unsigned y;
- if (src)
- for (y = 0; y < 8; y++)
- memcpy(dest + y * dest_stride, src + y * src_stride,
- sizeof(uint16_t) * 8);
- else
- for (y = 0; y < 8; y++)
- memset(dest + y * dest_stride, 0, sizeof(uint16_t) * 8);
-}
-
-static const uint16_t mask_matrix[] = {0x1, 0x2, 0x10, 0x20,
- 0x4, 0x8, 0x40, 0x80,
- 0x100, 0x200, 0x1000, 0x2000,
- 0x400, 0x800, 0x4000, 0x8000};
-
-static int escape124_decode_frame(AVCodecContext *avctx, AVFrame *frame,
- int *got_frame, AVPacket *avpkt)
-{
- int buf_size = avpkt->size;
- Escape124Context *s = avctx->priv_data;
-
- GetBitContext gb;
- unsigned frame_flags, frame_size;
- unsigned i;
-
- unsigned superblock_index, cb_index = 1,
- superblock_col_index = 0,
- superblocks_per_row = avctx->width / 8, skip = -1;
-
- uint16_t* old_frame_data, *new_frame_data;
- ptrdiff_t old_stride, new_stride;
-
- int ret;
-
- if ((ret = init_get_bits8(&gb, avpkt->data, avpkt->size)) < 0)
- return ret;
-
- // This call also guards the potential depth reads for the
- // codebook unpacking.
- // Check if the amount we will read minimally is available on input.
- // The 64 represent the immediately next 2 frame_* elements read, the 23/4320
- // represent a lower bound of the space needed for skipped superblocks. Non
- // skipped SBs need more space.
- if (get_bits_left(&gb) < 64 + s->num_superblocks * 23LL / 4320)
- return AVERROR_INVALIDDATA;
-
- frame_flags = get_bits_long(&gb, 32);
- frame_size = get_bits_long(&gb, 32);
-
- // Leave last frame unchanged
- // FIXME: Is this necessary? I haven't seen it in any real samples
- if (!(frame_flags & 0x114) || !(frame_flags & 0x7800000)) {
- if (!s->frame->data[0])
- return AVERROR_INVALIDDATA;
-
- av_log(avctx, AV_LOG_DEBUG, "Skipping frame\n");
-
- *got_frame = 1;
- if ((ret = av_frame_ref(frame, s->frame)) < 0)
- return ret;
-
- return frame_size;
- }
-
- for (i = 0; i < 3; i++) {
- if (frame_flags & (1 << (17 + i))) {
- unsigned cb_depth, cb_size;
- if (i == 2) {
- // This codebook can be cut off at places other than
- // powers of 2, leaving some of the entries undefined.
- cb_size = get_bits(&gb, 20);
- if (!cb_size) {
- av_log(avctx, AV_LOG_ERROR, "Invalid codebook size 0.\n");
- return AVERROR_INVALIDDATA;
- }
- cb_depth = av_log2(cb_size - 1) + 1;
- } else {
- cb_depth = get_bits(&gb, 4);
- if (i == 0) {
- // This is the most basic codebook: pow(2,depth) entries
- // for a depth-length key
- cb_size = 1 << cb_depth;
- } else {
- // This codebook varies per superblock
- // FIXME: I don't think this handles integer overflow
- // properly
- cb_size = s->num_superblocks << cb_depth;
- }
- }
- if (s->num_superblocks >= INT_MAX >> cb_depth) {
- av_log(avctx, AV_LOG_ERROR, "Depth or num_superblocks are too large\n");
- return AVERROR_INVALIDDATA;
- }
-
- av_freep(&s->codebooks[i].blocks);
- if (cb_size >= INT_MAX / 34 || get_bits_left(&gb) < (int)cb_size * 34)
- return AVERROR_INVALIDDATA;
-
- if (cb_size >= INT_MAX / sizeof(MacroBlock))
- return AVERROR_INVALIDDATA;
- s->codebooks[i] = unpack_codebook(&gb, cb_depth, cb_size);
- if (!s->codebooks[i].blocks)
- return AVERROR(ENOMEM);
- }
- }
-
- if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
- return ret;
-
- new_frame_data = (uint16_t*)frame->data[0];
- new_stride = frame->linesize[0] / 2;
- old_frame_data = (uint16_t*)s->frame->data[0];
- old_stride = s->frame->linesize[0] / 2;
-
- for (superblock_index = 0; superblock_index < s->num_superblocks;
- superblock_index++) {
- MacroBlock mb;
- SuperBlock sb;
- unsigned multi_mask = 0;
-
- if (skip == -1) {
- // Note that this call will make us skip the rest of the blocks
- // if the frame prematurely ends
- skip = decode_skip_count(&gb);
- }
-
- if (skip) {
- copy_superblock(new_frame_data, new_stride,
- old_frame_data, old_stride);
- } else {
- copy_superblock(sb.pixels, 8,
- old_frame_data, old_stride);
-
- while (get_bits_left(&gb) >= 1 && !get_bits1(&gb)) {
- unsigned mask;
- mb = decode_macroblock(s, &gb, &cb_index, superblock_index);
- mask = get_bits(&gb, 16);
- multi_mask |= mask;
- for (i = 0; i < 16; i++) {
- if (mask & mask_matrix[i]) {
- insert_mb_into_sb(&sb, mb, i);
- }
- }
- }
-
- if (!get_bits1(&gb)) {
- unsigned inv_mask = get_bits(&gb, 4);
- for (i = 0; i < 4; i++) {
- if (inv_mask & (1 << i)) {
- multi_mask ^= 0xF << i*4;
- } else {
- multi_mask ^= get_bits(&gb, 4) << i*4;
- }
- }
-
- for (i = 0; i < 16; i++) {
- if (multi_mask & mask_matrix[i]) {
- mb = decode_macroblock(s, &gb, &cb_index,
- superblock_index);
- insert_mb_into_sb(&sb, mb, i);
- }
- }
- } else if (frame_flags & (1 << 16)) {
- while (get_bits_left(&gb) >= 1 && !get_bits1(&gb)) {
- mb = decode_macroblock(s, &gb, &cb_index, superblock_index);
- insert_mb_into_sb(&sb, mb, get_bits(&gb, 4));
- }
- }
-
- copy_superblock(new_frame_data, new_stride, sb.pixels, 8);
- }
-
- superblock_col_index++;
- new_frame_data += 8;
- if (old_frame_data)
- old_frame_data += 8;
- if (superblock_col_index == superblocks_per_row) {
- new_frame_data += new_stride * 8 - superblocks_per_row * 8;
- if (old_frame_data)
- old_frame_data += old_stride * 8 - superblocks_per_row * 8;
- superblock_col_index = 0;
- }
- skip--;
- }
-
- av_log(avctx, AV_LOG_DEBUG,
- "Escape sizes: %i, %i, %i\n",
- frame_size, buf_size, get_bits_count(&gb) / 8);
-
- av_frame_unref(s->frame);
- if ((ret = av_frame_ref(s->frame, frame)) < 0)
- return ret;
-
- *got_frame = 1;
-
- return frame_size;
-}
-
-
-const FFCodec ff_escape124_decoder = {
- .p.name = "escape124",
- CODEC_LONG_NAME("Escape 124"),
- .p.type = AVMEDIA_TYPE_VIDEO,
- .p.id = AV_CODEC_ID_ESCAPE124,
- .priv_data_size = sizeof(Escape124Context),
- .init = escape124_decode_init,
- .close = escape124_decode_close,
- FF_CODEC_DECODE_CB(escape124_decode_frame),
- .p.capabilities = AV_CODEC_CAP_DR1,
-};
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/fitsdec.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/fitsdec.c
deleted file mode 100644
index b9c51e70c311adc62bce57d6b968081bd30a2fdd..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/fitsdec.c
+++ /dev/null
@@ -1,333 +0,0 @@
-/*
- * FITS image decoder
- * Copyright (c) 2017 Paras Chadha
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/**
- * @file
- * FITS image decoder
- *
- * Specification: https://fits.gsfc.nasa.gov/fits_standard.html Version 3.0
- *
- * Support all 2d images alongwith, bzero, bscale and blank keywords.
- * RGBA images are supported as NAXIS3 = 3 or 4 i.e. Planes in RGBA order. Also CTYPE = 'RGB ' should be present.
- * Also to interpret data, values are linearly scaled using min-max scaling but not RGB images.
- */
-
-#include "avcodec.h"
-#include "codec_internal.h"
-#include "decode.h"
-#include
-#include "libavutil/intreadwrite.h"
-#include "libavutil/intfloat.h"
-#include "libavutil/dict.h"
-#include "libavutil/opt.h"
-#include "fits.h"
-
-typedef struct FITSContext {
- const AVClass *class;
- int blank_val;
-} FITSContext;
-
-/**
- * Calculate the data_min and data_max values from the data.
- * This is called if the values are not present in the header.
- * @param ptr8 pointer to the data
- * @param header pointer to the header
- * @param end pointer to end of packet
- * @return 0 if calculated successfully otherwise AVERROR_INVALIDDATA
- */
-static int fill_data_min_max(const uint8_t *ptr8, FITSHeader *header, const uint8_t *end)
-{
- uint8_t t8;
- int16_t t16;
- int32_t t32;
- int64_t t64;
- float tflt;
- double tdbl;
- int i, j;
-
- header->data_min = DBL_MAX;
- header->data_max = -DBL_MAX;
- switch (header->bitpix) {
-#define CASE_N(a, t, rd) \
- case a: \
- for (i = 0; i < header->naxisn[1]; i++) { \
- for (j = 0; j < header->naxisn[0]; j++) { \
- t = rd; \
- if (!header->blank_found || t != header->blank) { \
- if (t > header->data_max) \
- header->data_max = t; \
- if (t < header->data_min) \
- header->data_min = t; \
- } \
- ptr8 += abs(a) >> 3; \
- } \
- } \
- break
-
- CASE_N(-64, tdbl, av_int2double(AV_RB64(ptr8)));
- CASE_N(-32, tflt, av_int2float(AV_RB32(ptr8)));
- CASE_N(8, t8, ptr8[0]);
- CASE_N(16, t16, AV_RB16(ptr8));
- CASE_N(32, t32, AV_RB32(ptr8));
- CASE_N(64, t64, AV_RB64(ptr8));
- default:
- return AVERROR_INVALIDDATA;
- }
- return 0;
-}
-
-/**
- * Read the fits header and store the values in FITSHeader pointed by header
- * @param avctx AVCodec context
- * @param ptr pointer to pointer to the data
- * @param header pointer to the FITSHeader
- * @param end pointer to end of packet
- * @param metadata pointer to pointer to AVDictionary to store metadata
- * @return 0 if calculated successfully otherwise AVERROR_INVALIDDATA
- */
-static int fits_read_header(AVCodecContext *avctx, const uint8_t **ptr, FITSHeader *header,
- const uint8_t *end, AVDictionary **metadata)
-{
- const uint8_t *ptr8 = *ptr;
- int lines_read, bytes_left, i, ret;
- size_t size;
-
- lines_read = 1; // to account for first header line, SIMPLE or XTENSION which is not included in packet...
- avpriv_fits_header_init(header, STATE_BITPIX);
- do {
- if (end - ptr8 < 80)
- return AVERROR_INVALIDDATA;
- ret = avpriv_fits_header_parse_line(avctx, header, ptr8, &metadata);
- ptr8 += 80;
- lines_read++;
- } while (!ret);
- if (ret < 0)
- return ret;
-
- bytes_left = (((lines_read + 35) / 36) * 36 - lines_read) * 80;
- if (end - ptr8 < bytes_left)
- return AVERROR_INVALIDDATA;
- ptr8 += bytes_left;
-
- if (header->rgb && (header->naxis != 3 || (header->naxisn[2] != 3 && header->naxisn[2] != 4))) {
- av_log(avctx, AV_LOG_ERROR, "File contains RGB image but NAXIS = %d and NAXIS3 = %d\n", header->naxis, header->naxisn[2]);
- return AVERROR_INVALIDDATA;
- }
-
- if (!header->rgb && header->naxis != 2) {
- av_log(avctx, AV_LOG_ERROR, "unsupported number of dimensions, NAXIS = %d\n", header->naxis);
- return AVERROR_INVALIDDATA;
- }
-
- if (header->blank_found && (header->bitpix == -32 || header->bitpix == -64)) {
- av_log(avctx, AV_LOG_WARNING, "BLANK keyword found but BITPIX = %d\n. Ignoring BLANK", header->bitpix);
- header->blank_found = 0;
- }
-
- size = abs(header->bitpix) >> 3;
- for (i = 0; i < header->naxis; i++) {
- if (size == 0 || header->naxisn[i] > SIZE_MAX / size) {
- av_log(avctx, AV_LOG_ERROR, "unsupported size of FITS image");
- return AVERROR_INVALIDDATA;
- }
- size *= header->naxisn[i];
- }
-
- if (end - ptr8 < size)
- return AVERROR_INVALIDDATA;
- *ptr = ptr8;
-
- if (!header->rgb && (!header->data_min_found || !header->data_max_found)) {
- ret = fill_data_min_max(ptr8, header, end);
- if (ret < 0) {
- av_log(avctx, AV_LOG_ERROR, "invalid BITPIX, %d\n", header->bitpix);
- return ret;
- }
- } else {
- /*
- * instead of applying bscale and bzero to every element,
- * we can do inverse transformation on data_min and data_max
- */
- header->data_min = (header->data_min - header->bzero) / header->bscale;
- header->data_max = (header->data_max - header->bzero) / header->bscale;
- }
- if (!header->rgb && header->data_min >= header->data_max) {
- if (header->data_min > header->data_max) {
- av_log(avctx, AV_LOG_ERROR, "data min/max (%g %g) is invalid\n", header->data_min, header->data_max);
- return AVERROR_INVALIDDATA;
- }
- av_log(avctx, AV_LOG_WARNING, "data min/max indicates a blank image\n");
- header->data_max ++;
- }
-
- return 0;
-}
-
-static int fits_decode_frame(AVCodecContext *avctx, AVFrame *p,
- int *got_frame, AVPacket *avpkt)
-{
- const uint8_t *ptr8 = avpkt->data, *end;
- uint8_t t8;
- int16_t t16;
- int32_t t32;
- int64_t t64;
- float tflt;
- double tdbl;
- int ret, i, j, k;
- const int map[] = {2, 0, 1, 3}; // mapping from GBRA -> RGBA as RGBA is to be stored in FITS file..
- uint8_t *dst8;
- uint16_t *dst16;
- uint64_t t;
- FITSHeader header;
- FITSContext * fitsctx = avctx->priv_data;
-
- end = ptr8 + avpkt->size;
- p->metadata = NULL;
- ret = fits_read_header(avctx, &ptr8, &header, end, &p->metadata);
- if (ret < 0)
- return ret;
-
- if (header.rgb) {
- if (header.bitpix == 8) {
- if (header.naxisn[2] == 3) {
- avctx->pix_fmt = AV_PIX_FMT_GBRP;
- } else {
- avctx->pix_fmt = AV_PIX_FMT_GBRAP;
- }
- } else if (header.bitpix == 16) {
- if (header.naxisn[2] == 3) {
- avctx->pix_fmt = AV_PIX_FMT_GBRP16;
- } else {
- avctx->pix_fmt = AV_PIX_FMT_GBRAP16;
- }
- } else {
- av_log(avctx, AV_LOG_ERROR, "unsupported BITPIX = %d\n", header.bitpix);
- return AVERROR_INVALIDDATA;
- }
- } else {
- if (header.bitpix == 8) {
- avctx->pix_fmt = AV_PIX_FMT_GRAY8;
- } else {
- avctx->pix_fmt = AV_PIX_FMT_GRAY16;
- }
- }
-
- if ((ret = ff_set_dimensions(avctx, header.naxisn[0], header.naxisn[1])) < 0)
- return ret;
-
- if ((ret = ff_get_buffer(avctx, p, 0)) < 0)
- return ret;
-
- /*
- * FITS stores images with bottom row first. Therefore we have
- * to fill the image from bottom to top.
- */
- if (header.rgb) {
- switch(header.bitpix) {
-#define CASE_RGB(cas, dst, type, dref) \
- case cas: \
- for (k = 0; k < header.naxisn[2]; k++) { \
- for (i = 0; i < avctx->height; i++) { \
- dst = (type *) (p->data[map[k]] + (avctx->height - i - 1) * p->linesize[map[k]]); \
- for (j = 0; j < avctx->width; j++) { \
- t32 = dref(ptr8); \
- if (!header.blank_found || t32 != header.blank) { \
- t = t32 * header.bscale + header.bzero; \
- } else { \
- t = fitsctx->blank_val; \
- } \
- *dst++ = (type) t; \
- ptr8 += cas >> 3; \
- } \
- } \
- } \
- break
-
- CASE_RGB(8, dst8, uint8_t, *);
- CASE_RGB(16, dst16, uint16_t, AV_RB16);
- }
- } else {
- double scale = header.data_max - header.data_min;
-
- if (scale <= 0 || !isfinite(scale)) {
- scale = 1;
- }
- scale = 1/scale;
-
- switch (header.bitpix) {
-#define CASE_GRAY(cas, dst, type, t, rd) \
- case cas: \
- for (i = 0; i < avctx->height; i++) { \
- dst = (type *) (p->data[0] + (avctx->height-i-1)* p->linesize[0]); \
- for (j = 0; j < avctx->width; j++) { \
- t = rd; \
- if (!header.blank_found || t != header.blank) { \
- *dst++ = lrint(((t - header.data_min) * ((1 << (sizeof(type) * 8)) - 1)) * scale); \
- } else { \
- *dst++ = fitsctx->blank_val; \
- } \
- ptr8 += abs(cas) >> 3; \
- } \
- } \
- break
-
- CASE_GRAY(-64, dst16, uint16_t, tdbl, av_int2double(AV_RB64(ptr8)));
- CASE_GRAY(-32, dst16, uint16_t, tflt, av_int2float(AV_RB32(ptr8)));
- CASE_GRAY(8, dst8, uint8_t, t8, ptr8[0]);
- CASE_GRAY(16, dst16, uint16_t, t16, AV_RB16(ptr8));
- CASE_GRAY(32, dst16, uint16_t, t32, AV_RB32(ptr8));
- CASE_GRAY(64, dst16, uint16_t, t64, AV_RB64(ptr8));
- default:
- av_log(avctx, AV_LOG_ERROR, "invalid BITPIX, %d\n", header.bitpix);
- return AVERROR_INVALIDDATA;
- }
- }
-
- p->key_frame = 1;
- p->pict_type = AV_PICTURE_TYPE_I;
-
- *got_frame = 1;
-
- return avpkt->size;
-}
-
-static const AVOption fits_options[] = {
- { "blank_value", "value that is used to replace BLANK pixels in data array", offsetof(FITSContext, blank_val), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 65535, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM},
- { NULL },
-};
-
-static const AVClass fits_decoder_class = {
- .class_name = "FITS decoder",
- .item_name = av_default_item_name,
- .option = fits_options,
- .version = LIBAVUTIL_VERSION_INT,
-};
-
-const FFCodec ff_fits_decoder = {
- .p.name = "fits",
- .p.type = AVMEDIA_TYPE_VIDEO,
- .p.id = AV_CODEC_ID_FITS,
- .p.capabilities = AV_CODEC_CAP_DR1,
- CODEC_LONG_NAME("Flexible Image Transport System"),
- .p.priv_class = &fits_decoder_class,
- .priv_data_size = sizeof(FITSContext),
- FF_CODEC_DECODE_CB(fits_decode_frame),
-};
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/hq_hqa.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/hq_hqa.h
deleted file mode 100644
index 71aa36706c0c87c6323f829987fcc17efd3001e2..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/hq_hqa.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Canopus HQ/HQA decoder
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef AVCODEC_HQ_HQA_H
-#define AVCODEC_HQ_HQA_H
-
-#include
-
-#include "libavutil/mem_internal.h"
-
-#include "avcodec.h"
-#include "hq_hqadsp.h"
-#include "vlc.h"
-
-#define NUM_HQ_AC_ENTRIES 746
-#define NUM_HQ_PROFILES 22
-#define NUM_HQ_QUANTS 16
-
-typedef struct HQContext {
- AVCodecContext *avctx;
- HQDSPContext hqhqadsp;
-
- VLC hq_ac_vlc;
- VLC hqa_cbp_vlc;
- DECLARE_ALIGNED(16, int16_t, block)[12][64];
-} HQContext;
-
-typedef struct HQProfile {
- const uint8_t *perm_tab;
- int width, height;
- int num_slices;
- int tab_w, tab_h;
-} HQProfile;
-
-extern const int32_t * const ff_hq_quants[16][2][4];
-extern const HQProfile ff_hq_profile[NUM_HQ_PROFILES];
-
-extern const uint8_t ff_hq_ac_skips[NUM_HQ_AC_ENTRIES];
-extern const int16_t ff_hq_ac_syms [NUM_HQ_AC_ENTRIES];
-
-int ff_hq_init_vlcs(HQContext *c);
-
-#endif /* AVCODEC_HQ_HQA_H */
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/celp_math_mips.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/celp_math_mips.c
deleted file mode 100644
index ce711bd63c3e489a20cd47b56d3181f6380b1de1..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/celp_math_mips.c
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (c) 2012
- * MIPS Technologies, Inc., California.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * Author: Nedeljko Babic (nbabic@mips.com)
- *
- * Math operations optimized for MIPS
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/**
- * @file
- * Reference: libavcodec/celp_math.c
- */
-#include "config.h"
-#include "libavcodec/celp_math.h"
-#include "libavutil/mips/asmdefs.h"
-
-#if HAVE_INLINE_ASM
-#if !HAVE_MIPS32R6 && !HAVE_MIPS64R6
-static float ff_dot_productf_mips(const float* a, const float* b,
- int length)
-{
- float sum;
- const float* a_end = a + length;
-
- __asm__ volatile (
- "mtc1 $zero, %[sum] \n\t"
- "blez %[length], ff_dot_productf_end%= \n\t"
- "ff_dot_productf_madd%=: \n\t"
- "lwc1 $f2, 0(%[a]) \n\t"
- "lwc1 $f1, 0(%[b]) \n\t"
- PTR_ADDIU "%[a], %[a], 4 \n\t"
- PTR_ADDIU "%[b], %[b], 4 \n\t"
- "madd.s %[sum], %[sum], $f1, $f2 \n\t"
- "bne %[a], %[a_end], ff_dot_productf_madd%= \n\t"
- "ff_dot_productf_end%=: \n\t"
-
- : [sum] "=&f" (sum), [a] "+r" (a), [b] "+r" (b)
- : [a_end]"r"(a_end), [length] "r" (length)
- : "$f1", "$f2", "memory"
- );
- return sum;
-}
-#endif /* !HAVE_MIPS32R6 && !HAVE_MIPS64R6 */
-#endif /* HAVE_INLINE_ASM */
-
-void ff_celp_math_init_mips(CELPMContext *c)
-{
-#if HAVE_INLINE_ASM
-#if !HAVE_MIPS32R6 && !HAVE_MIPS64R6
- c->dot_productf = ff_dot_productf_mips;
-#endif
-#endif
-}
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Cars 3 Driven to Win - A World-Class Racing Experience for Xbox 360.md b/spaces/congsaPfin/Manga-OCR/logs/Cars 3 Driven to Win - A World-Class Racing Experience for Xbox 360.md
deleted file mode 100644
index b62a0c137fc4ceb2ef808adb7bbecded5ebee086..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Cars 3 Driven to Win - A World-Class Racing Experience for Xbox 360.md
+++ /dev/null
@@ -1,77 +0,0 @@
-
-
Cars 3 Driven to Win Xbox 360 Download: How to Get the Ultimate Racing Experience
-
Introduction
-
If you are a fan of the Cars movie franchise, you might be interested in playing Cars 3 Driven to Win, a racing video game inspired by the third installment of the series. In this game, you can play with over 20 characters from the film, including Lightning McQueen, Cruz Ramirez, Jackson Storm, and more. You can also race on over 20 tracks across iconic locations from the movie, such as Radiator Springs, Florida International Speedway, and Thunder Hollow. You can also advance your skills in six different game modes, from stunt showcases to battle races. Whether you want to play solo or with your friends and family, Cars 3 Driven to Win is a fun and exciting game that will test your speed, agility, and creativity.
-
But how can you get this game for your Xbox 360 console? In this article, we will show you two easy methods to download Cars 3 Driven to Win for Xbox 360. All you need is an internet connection, an Xbox Live account, and a valid payment method. Follow these simple steps and you will be ready to race in no time.
The first method is to download the game directly from the Microsoft Store on your Xbox 360 console. Here are the steps you need to follow:
-
Step 1: Sign up for Xbox Live
-
To access downloadable games from the Microsoft Store, you need to have an Xbox Live account. If you don't have one already, you can sign up for free on your console or on the Xbox website. You will need an email address and a password to create your account.
-
Step 2: Power up your Xbox 360 and sign into Xbox Live
-
Once you have your account ready, turn on your Xbox 360 console and controller. Press and hold the "Guide" button (the Xbox logo) on your controller to sign into Xbox Live. Make sure you are signed in with the profile that you want to use to download the game.
-
Step 3: Select the games tab and search for Cars 3 Driven to Win
-
On your console's home screen, press the RB button twice to select the games tab. Then select Search Games and press A. Enter "Cars 3 Driven to Win" in the search bar and select it from the list of results. This will open the game's page on the Microsoft Store.
-
Step 4: Select the game and confirm the purchase
-
On the game's page, you will see the price and the rating of the game. You will also see some screenshots and videos of the game, as well as a description and some reviews. If you are sure you want to buy the game, select Buy Game and press A. You will be asked to confirm your payment method and your billing information. If everything is correct, select Confirm Purchase and press A. The game will be added to your download queue.
-
Step 5: Check the download progress and enjoy the game
-
To check the download progress of the game, press the Guide button on your controller and select Active Downloads. You will see the percentage and the time remaining for the download. You can also pause or cancel the download if you want. Once the download is complete, you can launch the game from your games library or from the home screen. Enjoy racing with your favorite Cars characters!
-
cars 3 driven to win xbox 360 iso
-cars 3 driven to win xbox 360 gameplay
-cars 3 driven to win xbox 360 cheats
-cars 3 driven to win xbox 360 review
-cars 3 driven to win xbox 360 trailer
-cars 3 driven to win xbox 360 free download
-cars 3 driven to win xbox 360 digital code
-cars 3 driven to win xbox 360 online multiplayer
-cars 3 driven to win xbox 360 split screen
-cars 3 driven to win xbox 360 walmart
-cars 3 driven to win xbox 360 amazon
-cars 3 driven to win xbox 360 gamestop
-cars 3 driven to win xbox 360 ebay
-cars 3 driven to win xbox 360 best buy
-cars 3 driven to win xbox 360 target
-cars 3 driven to win xbox 360 price
-cars 3 driven to win xbox 360 release date
-cars 3 driven to win xbox 360 achievements
-cars 3 driven to win xbox 360 characters
-cars 3 driven to win xbox 360 unlockables
-cars 3 driven to win xbox 360 controls
-cars 3 driven to win xbox 360 how to play
-cars 3 driven to win xbox 360 walkthrough
-cars 3 driven to win xbox 360 tips and tricks
-cars 3 driven to win xbox 360 all tracks
-cars 3 driven to win xbox 360 all modes
-cars 3 driven to win xbox 360 all races
-cars 3 driven to win xbox 360 all stunts
-cars 3 driven to win xbox 360 all skills
-cars 3 driven to win xbox 360 all trophies
-cars 3 driven to win xbox 360 all collectibles
-cars 3 driven to win xbox 360 all challenges
-cars 3 driven to win xbox 360 all hall of fame entries
-cars 3 driven to win xbox 360 all customization options
-cars 3 driven to win xbox 360 all bonus content
-cars 3 driven to win xbox 360 comparison with other platforms
-cars 3 driven to win xbox one vs xbox one x vs xbox series x vs s vs pc vs ps4 vs ps5 vs switch vs wii u vs ps vita vs android vs ios vs mac os x vs linux vs windows vs chrome os vs web browser vs steam vs epic games store vs origin vs uplay vs gog galaxy vs discord store vs humble bundle store vs itch.io store vs gamejolt store vs kartridge store vs green man gaming store vs fanatical store vs indiegala store vs nuuvem store vs gamersgate store download performance graphics quality sound quality loading times frame rate resolution gameplay features online features offline features cross-play cross-save cross-buy cross-progression cloud gaming cloud saves cloud backup cloud sync mod support controller support keyboard and mouse support touch screen support vr support ar support motion control support voice control support gesture control support eye tracking support facial recognition support haptic feedback support adaptive triggers support ray tracing support hdr support dolby atmos support dolby vision support dts:x support dts-hd master audio support dolby truehd support dolby digital plus support dts express support dts coreless lossless audio codec support auro-3d support mpeg-h audio support mpeg-d surround sound system support mpeg-d usac support mpeg-d mpeg-h part2 l1 sound system support mpeg-d mpeg-h part2 l2 sound system support mpeg-d mpeg-h part2 l4 sound system support mpeg-d mpeg-h part2 l5 sound system support mpeg-d mpeg-h part2 l6 sound system support mpeg-d mpeg-h part2 l7 sound system support mpeg-d mpeg-h part2 l8 sound system support mpeg-d mpeg-h part2 l9 sound system support mpeg-d mpeg-h part2 l10 sound system support mpeg-d mpeg-h part2 l11 sound system support mpeg-d mpeg-h part2 l12 sound system support mpeg-d mpeg-h part2 l13 sound system support mpeg-d mpeg-h part2 l14 sound system support mpeg-d mpeg-h part2 l15 sound system
-
Method 2: From the Xbox website
-
The second method is to download the game from the Xbox website on your computer or mobile device. Here are the steps you need to follow:
-
Step 1: Go to the Xbox website and sign in with your Microsoft account
-
On your browser, go to the Xbox website and click on Sign In at the top right corner. Enter your email address and password that you used to create your Xbox Live account. If you don't have an account yet, you can sign up for free by clicking on Create one.
-
Step 2: Browse or search for Cars 3 Driven to Win and click on it
-
On the Xbox website, you can browse or search for games by genre, rating, price, or popularity. To find Cars 3 Driven to Win, you can either use the search bar at the top right corner or go to Games > Xbox 360 Games > Racing & Flying. Once you find the game, click on it to open its page.
-
Step 3: Click on Buy game and confirm the payment method
-
On the game's page, you will see similar information as on the Microsoft Store, such as the price, the rating, the screenshots, and the description. If you want to buy the game, click on Buy game and choose your payment method. You can use a credit card, a debit card, a PayPal account, or an Xbox gift card. Enter your billing information and confirm your purchase.
-
Step 4: Turn on your Xbox 360 and make sure you're signed into the right profile
-
After buying the game online, you need to turn on your Xbox 360 console and controller. Make sure you are signed into Xbox Live with the same profile that you used to buy the game online. If not, press and hold the Guide button on your controller and select Switch Profile.
-
Step 5: Check your game's download progress and enjoy the game
-
To check if your game is downloading, press and hold the Guide button on your controller and select Active Downloads. You should see Cars 3 Driven to Win in your download queue. You can also pause or cancel the download if you want. Once the download is complete, you can launch the game from your games library or from the home screen. Have fun racing with your favorite Cars characters!
-
Conclusion
-
Summary of the main points
-
In this article, we have shown you two easy methods to download Cars 3 Driven to Win for Xbox 360. You can either download it directly from the Microsoft Store on your console or from the Xbox website on your computer or mobile device. All you need is an internet connection, an Xbox Live account, and a valid payment method.
-
Call to action and final thoughts
-
If you are looking for a fun and exciting racing game that features your favorite characters from the Cars movie franchise, you should definitely try Cars 3 Driven to Win for Xbox 360. It is a game that will challenge your speed, agility, and creativity in six different game modes and over 20 tracks. You can also play solo or with your friends and family in split-screen mode.
-
So what are you waiting for? Download Cars 3 Driven to Win for Xbox 360 today and get ready to race like never before!
- FAQs Q: How much does Cars 3 Driven to Win cost for Xbox 360? A: The game costs $19.99 USD for Xbox 360. Q: How much storage space does Cars 3 Driven to Win require for Xbox 360? A: The game requires 4.5 GB of storage space for Xbox 360. Q: Can I play Cars 3 Driven to Win online with other players? A: No, the game does not support online multiplayer mode. However, you can play with up to four players in split-screen mode on the same console. Q: What are the minimum system requirements for Cars 3 Driven to Win for Xbox 360? A: The minimum system requirements for the game are: - Xbox 360 console with a hard drive - Xbox Live account - Internet connection - HDTV or monitor with HDMI or component input - Xbox 360 controller Q: What are the ratings and reviews for Cars 3 Driven to Win for Xbox 360? A: The game has a rating of E (Everyone) by the ESRB and a score of 7.1 out of 10 by IGN. The game has received mostly positive reviews from critics and players, who praised its graphics, gameplay, variety, and replay value. Q: Where can I find more information about Cars 3 Driven to Win for Xbox 360? A: You can find more information about the game on the official website, the Microsoft Store page, or the Xbox website page. You can also watch some gameplay videos on YouTube or read some tips and tricks on GameFAQs. 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download Extreme Car Driving Simulator 2015 APK and Unlock All the Cars and Features.md b/spaces/congsaPfin/Manga-OCR/logs/Download Extreme Car Driving Simulator 2015 APK and Unlock All the Cars and Features.md
deleted file mode 100644
index dda9206b165362e4f2323f075f19b35eea56eb07..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Download Extreme Car Driving Simulator 2015 APK and Unlock All the Cars and Features.md
+++ /dev/null
@@ -1,88 +0,0 @@
-
-
Extreme Car Driving Simulator 2015 APK: A Thrilling and Non-Stop Car Driving Game
-
Do you love driving cars and racing games? Do you want to experience the thrill and excitement of driving realistic cars on different maps and terrains? If yes, then you should try Extreme Car Driving Simulator 2015 APK, a car driving game for Android devices that will keep you hooked for hours.
-
What is Extreme Car Driving Simulator 2015 APK?
-
A realistic and immersive car driving game for Android devices
-
Extreme Car Driving Simulator 2015 APK is a car driving game that lets you drive various cars on different maps and terrains. You can choose from sports cars, muscle cars, off-road vehicles, and more. You can also customize your cars with different colors, wheels, spoilers, and stickers.
Features of Extreme Car Driving Simulator 2015 APK
-
HD user interfaces and graphics
-
The game has HD user interfaces and graphics that improve your game experience. You can enjoy the stunning details of the cars, the roads, the buildings, and the environment. You can also adjust the graphics quality according to your device performance.
-
Different maps and terrains to explore
-
The game offers different maps and terrains to explore with your cars. You can drive on city streets, highways, deserts, mountains, snow, and more. You can also encounter traffic, obstacles, ramps, bridges, and other elements that make the game more realistic and fun.
-
extreme car driving simulator 2015 mod apk
-download extreme car driving simulator 2015 for android
-extreme car driving simulator 2015 game online
-extreme car driving simulator 2015 cheats and hacks
-extreme car driving simulator 2015 free full version
-extreme car driving simulator 2015 unlimited money
-extreme car driving simulator 2015 best cars
-extreme car driving simulator 2015 review and rating
-extreme car driving simulator 2015 latest update
-extreme car driving simulator 2015 gameplay and features
-extreme car driving simulator 2015 realistic physics
-extreme car driving simulator 2015 tips and tricks
-extreme car driving simulator 2015 how to install
-extreme car driving simulator 2015 system requirements
-extreme car driving simulator 2015 new maps and modes
-extreme car driving simulator 2015 offline play
-extreme car driving simulator 2015 multiplayer mode
-extreme car driving simulator 2015 custom cars and skins
-extreme car driving simulator 2015 video and screenshots
-extreme car driving simulator 2015 comparison with other games
-extreme car driving simulator 2015 apk download link
-extreme car driving simulator 2015 fun and addictive
-extreme car driving simulator 2015 challenges and missions
-extreme car driving simulator 2015 support and feedback
-extreme car driving simulator 2015 bugs and fixes
-
Real driving experience with HUD, gear, speed, and simulations
-
The game gives you a real driving experience with HUD revs, gear, speed, plus TC, ABS, and ESP simulations. You can feel the physics of the cars as you accelerate, brake, drift, and crash. You can also switch between different camera views to see your car from different angles.
-
Free mode and checkpoint mode to play
-
The game has two modes to play: free mode and checkpoint mode. In free mode, you can drive freely on any map without any time limit or objective. You can explore the map at your own pace and do whatever you want with your car. In checkpoint mode, you have to reach certain checkpoints on the map within a given time limit. You can earn coins by completing checkpoints and use them to unlock new cars or upgrade your existing ones.
-
How to download and install Extreme Car Driving Simulator 2015 APK?
-
Download the APK file from a trusted source
-
To download Extreme Car Driving Simulator 2015 APK, you need to find a trusted source that provides the latest version of the game. You can use [APKPure](^1^) as an example of a reliable source that offers safe and fast downloads of various APK files.
-
Enable unknown sources on your device settings
-
To install Extreme Car Driving Simulator 2015 APK, you need to enable unknown sources on your device settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.
-
Install the APK file and launch the game
Install the APK file and launch the game
-
After you have downloaded the APK file and enabled unknown sources, you can install the APK file by tapping on it and following the instructions. Once the installation is complete, you can launch the game by tapping on its icon on your home screen or app drawer. You can then enjoy playing Extreme Car Driving Simulator 2015 APK on your device.
-
Why should you play Extreme Car Driving Simulator 2015 APK?
-
It is fun and addictive
-
Extreme Car Driving Simulator 2015 APK is a fun and addictive game that will keep you entertained for hours. You can drive different cars on different maps and terrains, and experience the thrill and excitement of realistic car driving. You can also perform stunts, drifts, jumps, and crashes with your cars, and see how they react to the physics and damage.
-
It is challenging and rewarding
-
Extreme Car Driving Simulator 2015 APK is a challenging and rewarding game that will test your driving skills and reflexes. You can play the checkpoint mode and try to complete the objectives within the time limit. You can also compete with other players online and see who can drive faster and better. You can earn coins by playing the game and use them to unlock new cars or upgrade your existing ones.
-
It is suitable for all ages and preferences
-
Extreme Car Driving Simulator 2015 APK is a game that is suitable for all ages and preferences. Whether you are a casual gamer or a hardcore racer, you can find something to enjoy in this game. You can choose from different cars, maps, terrains, modes, and settings to suit your taste and mood. You can also adjust the difficulty level and the controls to match your skill level and comfort.
-
Conclusion
-
Extreme Car Driving Simulator 2015 APK is a car driving game that you should not miss if you love driving cars and racing games. It is a realistic and immersive game that lets you drive various cars on different maps and terrains. It has HD user interfaces and graphics, real driving experience with HUD, gear, speed, and simulations, free mode and checkpoint mode to play, and online multiplayer mode to compete with other players. It is also easy to download and install, fun and addictive, challenging and rewarding, and suitable for all ages and preferences. So what are you waiting for? Download Extreme Car Driving Simulator 2015 APK now and enjoy the thrill and excitement of non-stop car driving.
-
FAQs
-
Here are some frequently asked questions about Extreme Car Driving Simulator 2015 APK:
-
-
-
Question
-
Answer
-
-
-
Is Extreme Car Driving Simulator 2015 APK free?
-
Yes, Extreme Car Driving Simulator 2015 APK is free to download and play. However, it may contain ads and in-app purchases that require real money.
-
-
-
Is Extreme Car Driving Simulator 2015 APK safe?
-
Yes, Extreme Car Driving Simulator 2015 APK is safe to download and install as long as you use a trusted source like [APKPure]. However, you should always be careful when installing apps from unknown sources and scan them for viruses or malware before opening them.
-
-
-
What are the requirements to play Extreme Car Driving Simulator 2015 APK?
-
To play Extreme Car Driving Simulator 2015 APK, you need an Android device that runs on Android 4.1 or higher. You also need at least 100 MB of free storage space on your device.
-
-
-
How can I contact the developer of Extreme Car Driving Simulator 2015 APK?
-
You can contact the developer of Extreme Car Driving Simulator 2015 APK by visiting their website at [AxesInMotion Racing] or sending them an email at support@axesinmotion.com.
-
-
-
How can I rate and review Extreme Car Driving Simulator 2015 APK?
-
You can rate and review Extreme Car Driving Simulator 2015 APK by visiting its page on the Google Play Store at [Extreme Car Driving Simulator] or by leaving a comment on its page on [APKPure].
-
-
- : https://apkpure.com/extreme-car-driving-simulator/com.aim.racing : http://www.axesinmotion.com/ : https://play.google.com/store/apps/details?id=com.aim.racing&hl=en_US&gl=US <|im_end| 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Free Call Of Duty Black Ops 3 For Android And Ios Apk Download ((LINK)).md b/spaces/congsaPfin/Manga-OCR/logs/Free Call Of Duty Black Ops 3 For Android And Ios Apk Download ((LINK)).md
deleted file mode 100644
index a8bbf2bce556b177039a1594bb9621ed670ca0c0..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Free Call Of Duty Black Ops 3 For Android And Ios Apk Download ((LINK)).md
+++ /dev/null
@@ -1,42 +0,0 @@
-
-
Free Call of Duty Black Ops 3 for Android and iOS/APK Download
-
Call of Duty is one of the most popular and successful first-person shooter franchises in the world. Millions of fans enjoy the thrilling and immersive gameplay, the stunning graphics, and the epic storylines of the games. One of the most acclaimed titles in the series is Call of Duty Black Ops 3, which was released in 2015 for PC, PlayStation 4, and Xbox One.
-
free call of duty black ops 3 for android and ios apk download
But what if you want to play Call of Duty Black Ops 3 on your mobile device? Is it possible to download it for free on Android and iOS devices? The answer is yes, it is possible, but not without some challenges and limitations. In this article, we will tell you everything you need to know about Call of Duty Black Ops 3 for mobile devices, including what it is, how to download it, what are its pros and cons, and some tips and tricks for playing it. Let's get started!
-
What is Call of Duty Black Ops 3?
-
Call of Duty Black Ops 3 is a first-person shooter game that is set in a dystopian future in the year 2065. The game follows a team of black ops soldiers who use advanced technology and cybernetic enhancements to fight against a rogue AI and a mysterious enemy faction. The game has three main modes: campaign, multiplayer, and zombies. The campaign mode allows you to play solo or co-op with up to four players, while the multiplayer mode offers various competitive modes and maps. The zombies mode lets you team up with other players or play solo to survive waves of undead enemies.
-
Features of Call of Duty Black Ops 3
-
Call of Duty Black Ops 3 has many features that make it an exciting and enjoyable game to play. Some of these features are:
-
-
A rich and immersive story that takes you to different locations around the world.
-
A customizable character that you can equip with different weapons, abilities, outfits, and accessories.
-
A variety of game modes and maps that cater to different playstyles and preferences.
-
A dynamic and fluid gameplay that allows you to run, jump, slide, wall-run, swim, and use vehicles.
-
A stunning graphics engine that delivers realistic visuals and effects.
-
A robust online community that supports cross-play between platforms.
-
-
How to download Call of Duty Black Ops 3 for free on Android and iOS devices?
-
Unfortunately, Call of Duty Black Ops 3 is not officially available for mobile devices. However, there are some unofficial ways to download it for free on Android and iOS devices. Here are two methods that you can try:
-
Method 1: Download from the official website
-
The official website of Call of Duty Mobile offers a free download link for Call of Duty Black Ops 3 for mobile devices. However, this link is not always working or accessible. To download it from the official website, follow these steps:
-
-
Go to [the official website](^1^) of Call of Duty Mobile.
-
Scroll down to the bottom of the page and click on "Download Now".
-
Select your device type (Android or iOS) and click on "Download".
-
Wait shotguns, light machine guns, sniper rifles, pistols, launchers, melee weapons, and special weapons. You can also equip different attachments, optics, camos, and paintjobs to customize your weapons. You can also choose from different perks that give you various advantages in the game, such as faster movement, increased health, reduced recoil, enhanced vision, and more. You should learn the weapons and perks in the game and use them according to your strategy and situation.
-
Practice your aim and movement skills
-
Call of Duty Black Ops 3 is a fast-paced and action-packed game that requires good aim and movement skills. You should practice your aim and movement skills to improve your accuracy, reflexes, and agility in the game. You can use the training mode or the custom games to practice your skills against bots or other players. You can also watch tutorials or tips videos online to learn from other players. You should also adjust the sensitivity and controls of the game to suit your preference and comfort.
-
Conclusion
-
Call of Duty Black Ops 3 is a great game that you can enjoy on your mobile device. However, you should be aware of the challenges and limitations of downloading and playing it on your device. You should also follow some tips and tricks to improve your skills and experience in the game. If you are a fan of Call of Duty or first-person shooter games, you should definitely give Call of Duty Black Ops 3 a try on your mobile device.
-
-
FAQs
-
Here are some frequently asked questions about Call of Duty Black Ops 3 for mobile devices:
-
-
Q: Is Call of Duty Black Ops 3 for mobile devices free? A: Yes, Call of Duty Black Ops 3 for mobile devices is free to download and play. However, you may need to pay for some in-game items or features.
-
Q: Is Call of Duty Black Ops 3 for mobile devices safe? A: Call of Duty Black Ops 3 for mobile devices is safe to download and play as long as you use the official website or a trusted APK website. However, you should be careful of malware or viruses that may harm your device or data.
-
Q: Is Call of Duty Black Ops 3 for mobile devices compatible with my device? A: Call of Duty Black Ops 3 for mobile devices is compatible with most Android and iOS devices that have at least 2 GB of RAM and 4 GB of storage space. However, some devices may not support the game or may have performance issues.
-
Q: Is Call of Duty Black Ops 3 for mobile devices offline? A: No, Call of Duty Black Ops 3 for mobile devices requires an internet connection to download, install, update, and play the game. You also need an internet connection to access the online features and modes of the game.
-
Q: Is Call of Duty Black Ops 3 for mobile devices cross-platform? A: Yes, Call of Duty Black Ops 3 for mobile devices supports cross-play between Android and iOS devices. You can also play with other players who are using PC or console versions of the game.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/PDF Download of Class 9 Hindi Chapter 6 - Nana Saheb ki Putri Devi Maina ko Bhasm kar Diya Gaya NCERT Solutions and Notes.md b/spaces/congsaPfin/Manga-OCR/logs/PDF Download of Class 9 Hindi Chapter 6 - Nana Saheb ki Putri Devi Maina ko Bhasm kar Diya Gaya NCERT Solutions and Notes.md
deleted file mode 100644
index 1a5ada40e9a8754f1c99e08d82dce775e706a25c..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/PDF Download of Class 9 Hindi Chapter 6 - Nana Saheb ki Putri Devi Maina ko Bhasm kar Diya Gaya NCERT Solutions and Notes.md
+++ /dev/null
@@ -1,134 +0,0 @@
-
-
Class 9 Hindi Chapter 6 Question Answer PDF Download
-
If you are looking for a reliable and comprehensive source of question answer for class 9 Hindi chapter 6, then you have come to the right place. In this article, we will provide you with a summary of the chapter, along with some short and long answer questions based on it. We will also provide you with a link to download the PDF file of the question answer section, which will help you revise and practice for your exam. So, let us begin.
-
Introduction
-
Class 9 Hindi chapter 6 is titled "Premchand ke Phate Jute", which means "Premchand's Torn Shoes". It is a story written by Harishankar Parsai, who was a famous satirist and humorist in Hindi literature. He was known for his witty and sarcastic style of writing, which exposed the hypocrisy and corruption of society. In this story, he presents a contrast between the simple and humble personality of Premchand, who was one of the greatest writers in Hindi literature, and the opportunistic and pretentious attitude of modern people. He also criticizes the tendency of people to judge others by their appearance and status, rather than their character and talent.
-
class 9 hindi chapter 6 question answer pdf download
This chapter is important to study because it gives us an insight into the life and works of Premchand, who was also known as "the people's writer". He wrote about the realistic and social issues of his time, such as poverty, oppression, injustice, casteism, communalism, etc. He also used simple and lucid language, which appealed to the masses. He was a pioneer of modern Hindi literature and influenced many writers after him. This chapter also teaches us some valuable lessons, such as being humble, honest, respectful, and sincere in our work and life. It also warns us against being greedy, selfish, dishonest, and superficial in our dealings with others.
-
Summary of the Chapter
-
The story begins with Parsai looking at a photograph of Premchand with his wife. He notices that Premchand is wearing torn shoes in the picture, and one of his toes is visible through a hole in his right shoe. Parsai is amazed by this sight and wonders how such a great writer could wear such shabby shoes. He then imagines how Premchand must have felt when he went out wearing those shoes. He thinks that Premchand must have been embarrassed and uncomfortable by his appearance, but he also must have been proud and confident by his achievements.
-
Parsai then compares Premchand's situation with that of modern people, who are obsessed with their looks and status. He says that nowadays people spend a lot of money on their clothes, shoes, accessories, etc., but they do not care about their work or talent. They also try to impress others by showing off their wealth and power, but they do not respect or appreciate others' work or talent. They also judge others by their appearance and status, rather than their character and ability. Parsai says that these people are like hollow shells, who have no substance or value inside them.
-
Parsai then praises Premchand for his simplicity and honesty. He says that Prem
chand was not bothered by his torn shoes, because he knew that his work was more important and valuable than his shoes. He says that Premchand's shoes were a symbol of his dignity and integrity, which he never compromised for anything. He also says that Premchand's shoes were a source of inspiration and motivation for him, as they reminded him of his struggle and success. He says that Premchand's shoes were a testament of his greatness and legacy, which no one can deny or forget.
-
Question Answer Section
-
In this section, we will provide you with some short and long answer questions based on the chapter. These questions will help you test your understanding and comprehension of the chapter, as well as improve your writing and analytical skills. You can also refer to these questions for your exam preparation and revision.
-
class 9 hindi kshitij chapter 6 premchand ke phate jute pdf download
-class 9 hindi kritika chapter 6 reedh ki haddi question answer pdf download
-class 9 hindi sanchayan chapter 6 diye jal uthe pdf download
-class 9 hindi sparsh chapter 6 yamraj ki disha question answer pdf download
-class 9 hindi kshitij chapter 6 summary in hindi pdf download
-class 9 hindi kritika chapter 6 important questions and answers pdf download
-class 9 hindi sanchayan chapter 6 extra questions and answers pdf download
-class 9 hindi sparsh chapter 6 explanation in hindi pdf download
-class 9 hindi kshitij chapter 6 ncert solutions pdf download
-class 9 hindi kritika chapter 6 word meanings in hindi pdf download
-class 9 hindi sanchayan chapter 6 ncert book pdf download
-class 9 hindi sparsh chapter 6 mcq questions with answers pdf download
-class 9 hindi kshitij chapter 6 video lecture in hindi pdf download
-class 9 hindi kritika chapter 6 notes in hindi pdf download
-class 9 hindi sanchayan chapter 6 online test in hindi pdf download
-class 9 hindi sparsh chapter 6 revision notes in hindi pdf download
-class 9 hindi kshitij chapter 6 workbook solutions pdf download
-class 9 hindi kritika chapter 6 character sketch in hindi pdf download
-class 9 hindi sanchayan chapter 6 lesson plan in hindi pdf download
-class 9 hindi sparsh chapter 6 previous year questions and answers pdf download
-class 9 hindi kshitij chapter 6 sample papers with solutions pdf download
-class 9 hindi kritika chapter 6 worksheet with answers pdf download
-class 9 hindi sanchayan chapter 6 cbse guide pdf download
-class 9 hindi sparsh chapter 6 grammar exercises in hindi pdf download
-class 9 hindi kshitij chapter 6 textbook solutions by aglasem pdf download
-class 9 hindi kritika chapter 6 textbook solutions by vedantu pdf download
-class 9 hindi sanchayan chapter 6 textbook solutions by ncertbooks.guru pdf download
-class 9 hindi sparsh chapter 6 textbook solutions by learncbse.in pdf download
-class 9 hindi kshitij chapter 6 question bank with answers pdf download
-class 9 hindi kritika chapter 6 practice questions with solutions pdf download
-class 9 hindi sanchayan chapter 6 model test papers with answers pdf download
-class 9 hindi sparsh chapter 6 mock test series with solutions pdf download
-class 9 hindi kshitij chapter 6 study material in hindi pdf download
-class 9 hindi kritika chapter 6 ppt presentation in hindi pdf download
-class 9 hindi sanchayan chapter 6 assignment with answers pdf download
-class 9 hindi sparsh chapter 6 project work in hindi pdf download
-class 9 hindi kshitij chapter 6 important points to remember in hindi pdf download
-class 9 hindi kritika chapter 6 theme and message in hindi pdf download
-class 9 hindi sanchayan chapter 6 main characters and their role in hindi pdf download
-class 9 hindi sparsh chapter
-
Short Answer Questions
-
Here are some of the short answer questions based on the chapter:
-
-
-
Question
-
Answer
-
-
-
Who is the author of the story "Premchand ke Phate Jute"?
-
The author of the story is Harishankar Parsai, who was a famous satirist and humorist in Hindi literature.
-
-
-
What did Parsai notice in the photograph of Premchand with his wife?
-
Parsai noticed that Premchand was wearing torn shoes in the photograph, and one of his toes was visible through a hole in his right shoe.
-
-
-
How did Parsai imagine Premchand's feelings when he went out wearing those shoes?
-
Parsai imagined that Premchand must have felt embarrassed and uncomfortable by his appearance, but he also must have felt proud and confident by his achievements.
-
-
-
How did Parsai compare Premchand's situation with that of modern people?
-
Parsai compared Premchand's situation with that of modern people, who are obsessed with their looks and status, but do not care about their work or talent. He also said that they judge others by their appearance and status, rather than their character and ability.
-
-
-
What did Parsai say about Premchand's simplicity and honesty?
-
Parsai said that Premchand was not bothered by his torn shoes, because he knew that his work was more important and valuable than his shoes. He also said that Premchand's shoes were a symbol of his dignity and integrity, which he never compromised for anything.
-
-
Long Answer Questions
-
Here are some of the long answer questions based on the chapter:
-
-
-
Question
-
Answer
-
-
-
What is the main theme of the story "Premchand ke Phate Jute"?
-
The main theme of the story is the contrast between the simple and humble personality of Premchand, who was one of the greatest writers in Hindi literature, and the opportunistic and pretentious attitude of modern people. The story also highlights the importance of work and talent over appearance and status, and the value of dignity and integrity over wealth and power. The story also conveys a message of respect and appreciation for Premchand's life and works, which have inspired and influenced many generations of readers and writers.
-
-
-
How does Parsai use satire and humor in the story to expose the hypocrisy and corruption of society?
-
Parsai uses satire and humor in the story to expose the hypocrisy and corruption of society in a witty and sarcastic manner. He uses irony, exaggeration, ridicule, and comparison to show the absurdity and falsity of modern people's behavior and values. He mocks their obsession with their looks and status, their tendency to judge others by their appearance and status, their lack of work ethic and talent, their greed and selfishness, their dishonesty and superficiality, etc. He also contrasts their behavior and values with those of Premchand, who was simple, honest, respectful, sincere, hardworking, talented, etc. He makes us laugh at the foolishness and vanity of modern people, but also makes us think about the moral and social issues that he raises in the story.
-
-
-
What are some of the lessons that we can learn from Premchand's life and works?
-
Some of the lessons that we can learn from Premchand's life and works are: - We should be humble, honest, respectful, and sincere in our work and life, and not compromise our dignity and integrity for anything. - We should value our work and talent over our appearance and status, and not try to impress others by showing off our wealth and power. - We should respect and appreciate others' work and talent, regardless of their appearance and status, and not judge them by their superficial qualities. - We should write about the realistic and social issues of our time, using simple and lucid language that appeals to the masses. - We should be inspired and motivated by our struggle and success, rather than be embarrassed or uncomfortable by our shortcomings or difficulties.
-
-
PDF Download Link
-
If you want to download the PDF file of the question answer section for class 9 Hindi chapter 6, you can click on the link below. The PDF file contains all the short and long answer questions that we have provided in this article, along with their answers. You can also print the PDF file or save it on your device for your convenience. The PDF file will help you revise and practice the chapter in an easy and effective way.
In conclusion, we can say that class 9 Hindi chapter 6 is a very interesting and informative story that teaches us about the life and works of Premchand, who was one of the greatest writers in Hindi literature. It also teaches us some valuable lessons, such as being humble, honest, respectful, and sincere in our work and life, valuing our work and talent over our appearance and status, respecting and appreciating others' work and talent, writing about the realistic and social issues of our time, etc. It also warns us against being greedy, selfish, dishonest, and superficial in our dealings with others. We hope that this article has helped you understand and enjoy the chapter better, and has also prepared you for your exam.
-
FAQs
-
Here are some of the frequently asked questions related to this chapter:
-
-
-
Question
-
Answer
-
-
-
Who was Premchand?
-
Premchand was one of the greatest writers in Hindi literature. He was also known as "the people's writer". He wrote about the realistic and social issues of his time, such as poverty, oppression, injustice, casteism, communalism, etc. He also used simple and lucid language, which appealed to the masses. He was a pioneer of modern Hindi literature and influenced many writers after him.
-
-
-
What is the meaning of "Premchand ke Phate Jute"?
-
"Premchand ke Phate Jute" means "Premchand's Torn Shoes". It is the title of a story written by Harishankar Parsai, who was a famous satirist and humorist in Hindi literature. He used this title to contrast the simple and humble personality of Premchand with the opportunistic and pretentious attitude of modern people.
-
-
-
What is satire and humor?
-
Satire and humor are literary devices that use irony, exaggeration, ridicule, and comparison to make fun of or criticize something or someone. They are often used to expose the hypocrisy and corruption of society or to convey a message or moral lesson.
-
-
-
How to write a good answer for a short or long question?
-
To write a good answer for a short or long question, you should follow these steps: - Read the question carefully and understand what it is asking. - Recall the relevant information from the chapter or your notes. - Organize your answer in a logical and coherent manner. - Use proper grammar, spelling, punctuation, and vocabulary. - Write your answer in your own words rather than copying from other sources. - For a short answer question, write one or two paragraphs with a clear and concise answer. - For a long answer question, write three or more paragraphs with a detailed and logical answer.
-
-
-
Where can I find more question answer for class 9 Hindi chapters?
-
You can find more question answer for class 9 Hindi chapters on various websites and apps that provide study material for students. Some of them are: - NCERT Solutions - Vedantu - Toppr - BYJU'S - etc.
-
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/QRIS OVO Merchant The Best Payment Solution for Indonesian SMEs.md b/spaces/congsaPfin/Manga-OCR/logs/QRIS OVO Merchant The Best Payment Solution for Indonesian SMEs.md
deleted file mode 100644
index 78084307888adba2c06afc9610e36ea69c901ed6..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/QRIS OVO Merchant The Best Payment Solution for Indonesian SMEs.md
+++ /dev/null
@@ -1,135 +0,0 @@
-
-
How to Download QRIS OVO Merchant
-
If you are a merchant who wants to accept payments from various digital wallets, such as OVO, Gopay, DANA, LinkAja, and others, you might want to consider using QRIS OVO Merchant. QRIS OVO Merchant is a service that allows you to generate a single QR code that can be scanned by customers using any payment service provider that supports QRIS (Quick Response Code Indonesian Standard). In this article, we will explain what QRIS OVO Merchant is, how to register as one, how to download and print your QR code, and how to accept payments using it.
-
What is QRIS OVO Merchant?
-
QRIS OVO Merchant is a service that enables you to accept payments from various digital wallets using a single QR code. This means that you don't need to have multiple QR codes for different payment service providers, which can save you space and hassle. By using QRIS OVO Merchant, you can also enjoy the following benefits:
Free of charge. You don't need to pay any fees or commissions for accepting payments using QRIS OVO Merchant.
-
Fast settlement process. You can receive your transaction funds within one business day after the transaction date.
-
Special cooperation with Nobu Bank. You can open a bank account with Nobu Bank and enjoy various features and benefits, such as cashback, interest, and insurance.
-
Partnership with GrabFood. You can join GrabFood as a partner and increase your sales and exposure.
-
There is where you are. You can find and join various merchants who are already using QRIS OVO Merchant across Indonesia.
-
-
The requirements to become QRIS OVO Merchant
-
To become QRIS OVO Merchant, you need to meet the following requirements:
-
-
You must have a valid identity card (KTP) or business license (SIUP).
-
You must have a smartphone that supports Android or iOS operating system.
-
You must have an active email address and phone number.
-
You must have an active bank account with any bank in Indonesia.
-
-
How to register as QRIS OVO Merchant?
-
The registration process for QRIS OVO Merchant depends on whether you are an individual merchant or a corporate merchant. Here are the steps for each type of merchant:
-
For individual merchants
-
-
Download the GrabMerchant app from Google Play Store or App Store.
-
Open the app and tap on "Register".
-
Fill in your personal information, such as name, email, phone number, address, etc.
-
Choose "OVO" as your payment method and tap on "Next".
-
Upload your identity card (KTP) and a photo of yourself holding the KTP.
-
Wait for the verification process to complete. You will receive an SMS notification once your registration is approved.
-
-
For corporate merchants
-
-
Visit [8](https://ovo.id/partnership) and fill in the online form with your business information, such as name, email, phone number, address, etc.
-
Choose "OVO" as your payment method and tap on "Submit".
-
You will receive an email confirmation with a link to complete your registration.
-
Click on the link and upload your business license (SIUP) and other supporting documents.
-
Wait for the verification process to complete. You will receive an SMS notification once your registration is approved.
-
-
How to download and print QRIS OVO Merchant code?
-
After you register as QRIS OVO Merchant, you can download and print your QR code from the GrabMerchant app or the OVO website. Here are the steps for each option:
-
For individual merchants
-
-
Open the GrabMerchant app and tap on "QR Code".
-
Tap on "Download" and choose the size and format of your QR code.
-
Save the QR code image to your device or share it via email or other apps.
-
Print the QR code image using a printer or a printing service.
-
Display the QR code in a visible and accessible location for your customers.
-
-
For corporate merchants
-
-
Visit [9](https://ovo.id/merchant) and log in with your email and password.
-
Click on "QR Code" and choose the size and format of your QR code.
-
Download the QR code image to your device or share it via email or other apps.
-
Print the QR code image using a printer or a printing service.
-
Display the QR code in a visible and accessible location for your customers.
-
-
How to accept payments using QRIS OVO Merchant code?
-
To accept payments using QRIS OVO Merchant code, you need to follow these steps:
-
For individual merchants
-
-
Ask your customer to scan your QR code using their preferred payment service provider app that supports QRIS, such as OVO, Gopay, DANA, LinkAja, etc.
-
The customer will see your merchant name and the amount to pay on their app screen. They can also enter a different amount if needed.
-
The customer will confirm the payment by entering their PIN or biometric authentication.
-
You will receive a notification on your GrabMerchant app that the payment is successful. You can also check the transaction history on the app.
-
You can issue a receipt to the customer if they request one. You can print it from the app or send it via email or SMS.
-
-
For corporate merchants
-
-
Ask your customer to scan your QR code using their preferred payment service provider app that supports QRIS, such as OVO, Gopay, DANA, LinkAja, etc.
-
The customer will see your merchant name and the amount to pay on their app screen. They can also enter a different amount if needed.
-
The customer will confirm the payment by entering their PIN or biometric authentication.
-
You will receive a notification on your OVO website dashboard that the payment is successful. You can also check the transaction history on the website.
-
You can issue a receipt to the customer if they request one. You can print it from the website or send it via email or SMS.
-
-
Conclusion
-
In conclusion, QRIS OVO Merchant is a convenient and cost-effective way to accept payments from various digital wallets using a single QR code. You can register as QRIS OVO Merchant easily by following the steps above. You can also download and print your QR code from the GrabMerchant app or the OVO website. To accept payments using QRIS OVO Merchant code, you just need to ask your customer to scan your QR code using their preferred payment service provider app that supports QRIS. You will receive your transaction funds within one business day after the transaction date. By using QRIS OVO Merchant, you can grow your business and reach more customers across Indonesia.
-
FAQs
-
-
What is QRIS?
-QRIS stands for Quick Response Code Indonesian Standard. It is a national standard for QR code payments that enables interoperability among different payment service providers in Indonesia.
-
What are the advantages of using QRIS?
-By using QRIS, you can accept payments from various payment service providers using a single QR code. This can save you space and hassle of having multiple QR codes for different payment service providers. You can also enjoy lower fees and faster settlement process compared to other payment methods.
-
How do I know if my payment service provider supports QRIS?
-You can check if your payment service provider supports QRIS by looking for the QRIS logo on their app screen. The logo consists of two concentric squares with four smaller squares inside them. You can also visit [10](https://qris.id ) to see the list of payment service providers that support QRIS.
-
How do I get my transaction funds from QRIS OVO Merchant?
-You will receive your transaction funds within one business day after the transaction date. The funds will be transferred to your bank account that you registered during the registration process. You can check your transaction history and balance on the GrabMerchant app or the OVO website.
-
How do I contact QRIS OVO Merchant customer service?
-If you have any questions or issues regarding QRIS OVO Merchant, you can contact the customer service via the following channels:
-
-
Phone: 1500-696 (for individual merchants) or 1500-286 (for corporate merchants)
-
Email: merchant@ovo.id
-
Live chat: available on the GrabMerchant app or the OVO website
-
-
How do I update my QRIS OVO Merchant information?
-If you need to update your QRIS OVO Merchant information, such as your name, address, phone number, email, bank account, etc., you can do so by following these steps:
-
-
For individual merchants: open the GrabMerchant app and tap on "Profile". Then, tap on "Edit Profile" and make the necessary changes. Tap on "Save" when you are done.
-
For corporate merchants: visit [11](https://ovo.id/merchant) and log in with your email and password. Then, click on "Profile" and make the necessary changes. Click on "Save" when you are done.
-
-
-
How to download qris ovo merchant app on android
-Benefits of using qris ovo merchant for online payments
-Qris ovo merchant registration guide for small businesses
-Qris ovo merchant vs other payment methods: pros and cons
-Qris ovo merchant customer service and support
-Qris ovo merchant fees and charges explained
-Qris ovo merchant review and testimonials from users
-How to use qris ovo merchant loyalty code for discounts
-Qris ovo merchant security and privacy features
-How to scan qris ovo merchant qr code with ovo app
-How to generate qris ovo merchant qr code for your store
-Qris ovo merchant integration with point of sale systems
-Qris ovo merchant best practices and tips for merchants
-Qris ovo merchant faq and troubleshooting
-Qris ovo merchant partnership and referral program
-How to update qris ovo merchant app to the latest version
-How to accept qris ovo merchant payments from other e-wallets
-Qris ovo merchant compliance with bank indonesia regulations
-Qris ovo merchant features and functions overview
-How to withdraw qris ovo merchant funds to your bank account
-How to check qris ovo merchant transaction history and reports
-How to change qris ovo merchant account information and settings
-Qris ovo merchant rewards and incentives for merchants
-Qris ovo merchant case studies and success stories
-Qris ovo merchant alternatives and competitors comparison
-How to promote qris ovo merchant to your customers
-Qris ovo merchant tutorial and training videos
-Qris ovo merchant feedback and suggestions form
-Qris ovo merchant terms and conditions agreement
-Qris ovo merchant contact details and locations
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Roblox Studio Launcher Beta APK Everything You Need to Know About the Latest Version.md b/spaces/congsaPfin/Manga-OCR/logs/Roblox Studio Launcher Beta APK Everything You Need to Know About the Latest Version.md
deleted file mode 100644
index 04841776822ddc9e63a6348db175457e2c6a9993..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Roblox Studio Launcher Beta APK Everything You Need to Know About the Latest Version.md
+++ /dev/null
@@ -1,74 +0,0 @@
-
-
How to Download and Install Roblox Studio Launcher Beta APK on Android
-
Roblox is a popular online platform that lets you create and play millions of immersive 3D experiences. If you are a Roblox developer or aspiring to be one, you might be interested in Roblox Studio Launcher Beta APK, a free and immersive creation engine that lets you make anything you can imagine on Roblox. In this article, we will show you what Roblox Studio Launcher Beta APK is, how to download and install it on your Android device, and how to use it to create or manage your experiences.
-
What is Roblox Studio Launcher Beta APK?
-
Roblox Studio Launcher Beta APK is an application file that lets you install and run Roblox Studio on your Android device. Roblox Studio is an all-in-one IDE that lets you create experiences that run on Roblox. It’s free to use and lets you reach millions of users using the Roblox app on console, desktop, and mobile devices.
Roblox Studio Launcher Beta APK has many features that make it a powerful and versatile tool for creating and publishing experiences on Roblox. Some of these features are:
-
-
Powerful 3D Building Tools: You can create almost anything you can imagine by combining parts and meshes into models with built-in materials or your own textures. You can also overlay or integrate user interface components like text labels, buttons, billboards, and inventory screens to provide more ways for users to interact with your experiences. You can also build or generate large scale terrains consisting of water, desert, mountains, and more to give your creations an intimate or large scale feeling. You can also take your experiences to the next level by adjusting lighting and atmosphere, integrating audio, and applying special effects.
-
Integrated Scripting, Debugging, and Testing: Roblox comes with a host of built-in features like matchmaking and physics, but scripting specific features is essential to making your experiences unique. Studio provides full access to the Roblox Engine APIs through a full-featured script editor with modern conveniences like autocompletion and code highlighting. Built-in debugging and profiling lets you catch errors and tune performance so your experiences run at their best on all devices. Finally, you can test directly in Studio through multiple testing and emulation modes.
-
Fast Publishing and Updating: Roblox not only provides the engine and tooling, but gives you access to a large social network of users on a broad array of devices. Just click to publish and your experiences are automatically available to this network with built-in discovery and monetization opportunities. Want to iterate on your experience? Make the changes and update them immediately for all users in an instant.
-
Collaborative and Extensible: Studio has built-in collaboration tools so you and your co-creators can build on your own time or together in real-time. Drag and drop parts to a shared workspace where changes appear to everyone at the same time, chat with your team in real-time, and have all your changes saved and managed in the cloud. You can also extend the feature set of Studio with custom plugins, or build your own plugins to share with the rest of the Roblox community.
-
-
Benefits of Roblox Studio Launcher Beta APK
-
Roblox Studio Launcher Beta APK has many benefits that make it a great option for creating and managing experiences on Roblox. Some of these benefits are:
-
-
website or on other platforms that support it, such as Windows, Mac, Linux, iOS, and Xbox One.
-
-
-
How can I get help or support for using Roblox Studio Launcher Beta APK?
-
If you need help or support for using Roblox Studio Launcher Beta APK, you can visit the official Roblox website or the Roblox Developer Forum, where you can find tutorials, guides, tips, and answers to common questions. You can also contact the Roblox Support team or send feedback through the app.
-
-
-
roblox studio launcher beta android apk
-how to download roblox studio launcher beta for mobile
-roblox studio launcher beta apk free download
-roblox studio launcher beta apk latest version
-roblox studio launcher beta apk mod
-roblox studio launcher beta apk no verification
-roblox studio launcher beta apk offline
-roblox studio launcher beta apk old version
-roblox studio launcher beta apk online
-roblox studio launcher beta apk update
-roblox studio launcher beta app download
-roblox studio launcher beta download for android
-roblox studio launcher beta download for pc
-roblox studio launcher beta download link
-roblox studio launcher beta download windows 10
-roblox studio launcher beta free download for android
-roblox studio launcher beta free download for pc
-roblox studio launcher beta full apk download
-roblox studio launcher beta game download
-roblox studio launcher beta hack apk download
-roblox studio launcher beta install apk
-roblox studio launcher beta mobile apk download
-roblox studio launcher beta new version apk download
-roblox studio launcher beta original apk download
-roblox studio launcher beta premium apk download
-roblox studio launcher beta pro apk download
-roblox studio launcher beta software download
-roblox studio launcher beta unlimited apk download
-robloxstudioapkdownload.com/roblox-studio-launcher-beta-apk-download/
-best site to download roblox studio launcher beta apk
-can i download roblox studio launcher beta apk on my phone
-download and install roblox studio launcher beta apk
-download roblox studio launcher beta apk 2023
-download roblox studio launcher beta apk from google play store
-download roblox studio launcher beta apk without human verification
-how do i download roblox studio launcher beta apk on my device
-how to create games with roblox studio launcher beta apk
-how to get roblox studio launcher beta apk for free
-how to install roblox studio launcher beta apk on android device
-how to update roblox studio launcher beta apk on my phone
-is it safe to download roblox studio launcher beta apk from unknown sources
-is there a way to download roblox studio launcher beta apk on ios device
-learn how to use roblox studio launcher beta apk with tutorials and guides
-what are the features of roblox studio launcher beta apk
-what are the requirements for downloading and running roblox studio launcher beta apk
-what is the difference between roblox studio and roblox studio launcher beta apk
-what is the file size of roblox studio launcher beta apk
-where can i find the official website of roblox studio launcher beta apk
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Survive and Thrive with Garena Free Fire Lite Download for Mobile.md b/spaces/congsaPfin/Manga-OCR/logs/Survive and Thrive with Garena Free Fire Lite Download for Mobile.md
deleted file mode 100644
index 0aaf05cb3b90285788968b81dbb6eb0f527eacf2..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Survive and Thrive with Garena Free Fire Lite Download for Mobile.md
+++ /dev/null
@@ -1,160 +0,0 @@
-
-
Garena Free Fire Lite: How to Download and Play the Lighter Version of the Popular Battle Royale Game
-
Introduction
-
Garena Free Fire is one of the most popular and successful battle royale games on mobile platforms, with over a billion downloads on Google Play Store alone. However, not everyone has a high-end device that can run the game smoothly and enjoy its full potential. That's why Garena has released a lighter version of the game, called Garena Free Fire Lite, that can run on low-end devices without compromising the core gameplay experience.
Garena Free Fire Lite is a modified version of the original game that has reduced graphics quality, file size, and system requirements. It is designed to run on devices with low RAM, storage, and CPU power, as well as on older versions of Android and iOS operating systems. The game still offers the same thrilling survival shooter gameplay, with 50 players dropping into a large map and fighting for their survival until only one remains.
-
Why should you play Garena Free Fire Lite?
-
If you are a fan of battle royale games but your device cannot handle the original game, then Garena Free Fire Lite is a perfect choice for you. You can enjoy the following benefits by playing this version:
-
-
Smooth and fast gameplay - The game runs at a stable frame rate and does not lag or crash even on low-end devices.
-
Easy download and installation - The game has a small file size of around 300 MB, which means you can download it quickly and save your data usage.
-
Compatible with most devices - The game supports Android devices with OS 4.0.3 or higher, iOS devices with OS 9 or higher, and PC via BlueStacks emulator.
-
Same features as the original game - The game has the same game modes, maps, characters, weapons, items, and events as the original game, so you won't miss out on anything.
-
-
How to download and install Garena Free Fire Lite?
-
For Android devices
-
To download and install Garena Free Fire Lite on your Android device, follow these steps:
-
-
Go to Google Play Store and search for "Garena Free Fire Lite". Alternatively, you can use this link:
-
Tap on "Install" and wait for the download to finish.
-
Once the download is complete, tap on "Open" to launch the game.
-
Allow the game to access your device's storage, microphone, and location.
-
Create or log in to your Garena account or use Facebook or Google to sign in.
-
Enjoy playing Garena Free Fire Lite!
-
-
For iOS devices
-
To download and install Garena Free Fire Lite on your iOS device, follow these steps:
-
garena free fire lite apk download
-garena free fire lite download for pc
-garena free fire lite download uptodown
-garena free fire lite download for android
-garena free fire lite download play store
-garena free fire lite download latest version
-garena free fire lite download apk pure
-garena free fire lite download for jio phone
-garena free fire lite download 2023
-garena free fire lite download size
-garena free fire lite download link
-garena free fire lite download app
-garena free fire lite download mod apk
-garena free fire lite download hack version
-garena free fire lite download obb file
-garena free fire lite download in tamil
-garena free fire lite download highly compressed
-garena free fire lite download kaise kare
-garena free fire lite download 50 mb
-garena free fire lite download update
-garena free fire lite download game loop
-garena free fire lite download without play store
-garena free fire lite download online
-garena free fire lite download for laptop
-garena free fire lite download 2022
-garena free fire lite download for ios
-garena free fire lite download in hindi
-garena free fire lite download 100 mb
-garena free fire lite download bluestacks
-garena free fire lite download new update 2023
-garena free fire lite download unlimited diamonds
-garena free fire lite download from huawei appgallery
-garena free fire lite download error fix
-garena free fire lite download 10 mb
-garena free fire lite download windows 10
-garena free fire lite download for low end devices
-garena free fire lite download google drive link
-garena free fire lite download in pc without emulator
-garena free fire lite download 6th anniversary edition
-garena free fire lite download with obb data file
-
-
Go to App Store and search for "Garena Free Fire Lite". Alternatively, you can use this link:
-
Tap on "Get" and enter your Apple ID password if prompted.
-
Wait for the download to finish and tap on the app icon to launch the game.
-
Allow the game to access your device's microphone and location.
-
Create or log in to your Garena account or use Facebook or Google to sign in.
-
Enjoy playing Garena Free Fire Lite!
-
-
For PC via BlueStacks
-
To download and install Garena Free Fire Lite on your PC, you need to use an Android emulator such as BlueStacks. Follow these steps:
-
-
Download and install BlueStacks on your PC from this link:
-
Launch BlueStacks and sign in with your Google account.
-
Go to Google Play Store and search for "Garena Free Fire Lite". Alternatively, you can use this link:
-
Install the game and open it from the home screen.
-
Create or log in to your Garena account or use Facebook or Google to sign in.
-
Enjoy playing Garena Free Fire Lite on your PC!
-
-
How to play Garena Free Fire Lite?
-
Game modes and maps
-
Garena Free Fire Lite offers the same game modes and maps as the original game. You can choose from the following modes:
-
-
Classic - The standard battle royale mode where 50 players compete for survival on one of the four maps: Bermuda, Purgatory, Kalahari, or Bermuda Remastered.
-
Clash Squad - A team-based mode where two squads of four players each fight in a series of rounds on a small map. The squad with the most wins at the end of the match wins.
-
Rampage - A special mode that features a new map called Rampage Island and a new gameplay mechanic called Beast Mode. Players can transform into powerful beasts with enhanced abilities by collecting energy crystals.
-
Other modes - There are also other limited-time modes that are available during certain events, such as Zombie Invasion, Big Head, and Bomb Squad.
-
-
Characters and skills
-
Garena Free Fire Lite has the same characters and skills as the original game. You can choose from over 40 characters, each with their own unique skills and personalities. Some of the most popular characters are:
-
-
Alok - A famous DJ who can create a sound wave that heals himself and his allies within a radius.
-
K - A professor and jiu-jitsu expert who can switch between two modes: Jiujitsu Mode, which increases his EP recovery rate, and Psychology Mode, which restores EP for himself and his allies.
-
Kelly - A sprinter who can run faster than other characters.
-
Moco - A hacker who can tag enemies that she shoots, revealing their location to her team for a few seconds.
-
-
You can also customize your character's appearance with various outfits, accessories, and skins.
-
Weapons and items
-
Garena Free Fire Lite has the same weapons and items as the original game. You can find and use various types of weapons, such as assault rifles, sniper rifles, shotguns, SMGs, pistols, melee weapons, and grenades. Some of the most popular weapons are:
-
-
M1887 - A powerful shotgun that can deal massive damage at close range.
-
AUG - An accurate assault rifle that has low recoil and high fire rate.
-
AWM - A deadly sniper rifle that can kill enemies with one shot if aimed at the head.
-
M79 - A grenade launcher that can blast enemies with explosive projectiles.
-
-
You can also equip your weapons with attachments, such as scopes, silencers, magazines, and muzzles, to improve their performance. You can also use items such as medkits, armor vests, helmets, backpacks, and gloo walls to aid your survival.
-
Tips and tricks to win in Garena Free Fire Lite
-
Choose your landing spot wisely
-
The first thing you need to do when you enter a match is to choose where to land on the map. You can use the mini-map to see the flight path of the plane and the safe zone. You should choose a landing spot that suits your play style and strategy. For example:
-
-
If you want to avoid early fights and loot peacefully, you should land in remote areas that are far from the plane's path and the safe zone.
-
If you want to get into action quickly and loot high-quality items, you should land in hotspots that are near the plane's path and the safe zone. However, be prepared to face many enemies there.
-
If you want to have a balanced approach, you should land in medium-risk areas that are not too crowded but not too isolated either. You can find decent loot there and have some fights without being overwhelmed.
-
-
Whatever landing spot you choose, make sure you have a backup plan in case things go wrong. You should also keep an eye on the map and the timer to know when to move to the next safe zone.
-
Loot fast and move smart
-
After landing, you need to loot as fast as possible and equip yourself with weapons, items, and armor. You should prioritize finding a primary weapon, such as an assault rifle or a shotgun, and a secondary weapon, such as a pistol or a melee weapon. You should also look for medkits, armor vests, helmets, backpacks, and gloo walls. These items will help you survive longer and fight better.
-
However, you should not spend too much time looting and camping in one place. You should always be on the move and look for better loot and positions. You should also avoid unnecessary fights and only engage when you have an advantage or a clear opportunity. You should also use cover, such as buildings, trees, rocks, and vehicles, to protect yourself from enemy fire and ambushes.
-
Use the gloo wall and other utilities
-
One of the most useful items in Garena Free Fire Lite is the gloo wall. It is a throwable item that creates a temporary wall that can block bullets and explosions. You can use it to create cover, block doors and windows, trap enemies, or escape from danger. You can also shoot through the gloo wall if you have a scope attached to your weapon.
-
Another useful item is the grenade. It is an explosive device that can deal damage and knock back enemies within a radius. You can use it to flush out enemies from hiding places, break their gloo walls, or finish them off when they are low on health. You can also use other types of grenades, such as flashbangs, smoke grenades, and tear gas grenades, to blind, confuse, or slow down your enemies.
-
You should also use other utilities, such as the scanner, the air strike, the supply drop, and the vehicle. The scanner is a device that can reveal the location of nearby enemies for a few seconds. The air strike is a call that can summon a bombardment on a selected area. The supply drop is a crate that contains high-quality loot that drops from the sky. The vehicle is a mode of transportation that can help you move faster and run over enemies.
-
Communicate and cooperate with your team
-
If you are playing in a squad mode, you need to communicate and cooperate with your team members. You can use the voice chat or the quick chat to talk to your teammates and share information, such as enemy locations, loot locations, strategies, and requests. You can also use the ping system to mark places or items on the map for your teammates to see.
-
You should also cooperate with your team by sticking together, supporting each other, reviving each other, sharing loot, and executing tactics. You should also assign roles to your team members based on their skills and preferences. For example:
-
-
The leader - The one who makes decisions and gives orders to the team.
-
The scout - The one who scouts ahead and gathers information about the enemies and the environment.
-
The sniper - The one who provides long-range support and takes out enemies from afar.
-
The rusher - The one who rushes into close combat and initiates fights.
-
-
By communicating and cooperating with your team, you can increase your chances of winning and have more fun playing Garena Free Fire Lite.
-
Conclusion
-
Garena Free Fire Lite is a lighter version of the original game that can run on low-end devices without compromising the core gameplay experience. It offers the same thrilling survival shooter gameplay with 50 players competing for survival on various maps and modes. It also has the same features as the original game with over 40 characters, various weapons, items, and events. You can download and install the game on your Android, iOS, or PC device easily and quickly. You can also play the game smoothly and fast without any lag or crash. You can also use some tips and tricks to improve your skills and win more matches. Garena Free Fire Lite is a great game for anyone who loves battle royale games but has a low-end device. Download it now and join the fun!
-
FAQs
-
What is the difference between Garena Free Fire and Garena Free Fire Lite?
-
The main difference between the two games is the graphics quality, file size, and system requirements. Garena Free Fire Lite has lower graphics quality, smaller file size, and lower system requirements than the original game. However, both games have the same gameplay features and content.
-
Can I play Garena Free Fire Lite with my friends who play Garena Free Fire?
-
Yes, you can play Garena Free Fire Lite with your friends who play Garena Free Fire. Both games share the same server and account system, so you can invite and join your friends from either game.
-
How can I get diamonds in Garena Free Fire Lite?
-
Diamonds are the premium currency in Garena Free Fire Lite that can be used to buy various items, such as characters, outfits, skins, and crates. You can get diamonds by purchasing them with real money or by completing certain tasks and events in the game.
-
How can I update Garena Free Fire Lite?
-
To update Garena Free Fire Lite, you need to go to the app store where you downloaded the game and check for any available updates. You can also enable the auto-update option in your device settings to update the game automatically whenever there is a new version.
-
How can I contact the customer service of Garena Free Fire Lite?
-
If you have any questions, problems, or feedback about Garena Free Fire Lite, you can contact the customer service of the game by following these steps:
-
-
Open the game and tap on the "Settings" icon on the top right corner of the screen.
-
Tap on the "Customer Service" option on the bottom left corner of the screen.
-
Fill in your details and your message and tap on "Submit".
-
Wait for a reply from the customer service team.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/contluForse/HuggingGPT/assets/Cheat-hack - Mp-hacks Esp V5.0 For Counter Strike 1.6 BEST.md b/spaces/contluForse/HuggingGPT/assets/Cheat-hack - Mp-hacks Esp V5.0 For Counter Strike 1.6 BEST.md
deleted file mode 100644
index 8821a6cd72e72d576e83a1cd71e583ec2314df59..0000000000000000000000000000000000000000
--- a/spaces/contluForse/HuggingGPT/assets/Cheat-hack - Mp-hacks Esp V5.0 For Counter Strike 1.6 BEST.md
+++ /dev/null
@@ -1,14 +0,0 @@
-
Cheat-hack - mp-hacks esp v5.0 for counter strike 1.6
-
-Counter Strike 1.6 cheat: sXe 12 WallHack + Aimbot + Inexinferis, ... Press F12 for a quick hack ... MP-Hacks ESP V5.0 cheat For Counter Strike 1.6 ... Counter-Strike cheat 1.6...
-Download cheats for Counter-Strike 1.6 ...
-You can download cheats for Counter-Strike 1.6 for free without SMS and without registration. ...
-Cheats for CS 1.6 (Download Cheats for Counter-Strike 1.6.)
-In this section you can find and download cheats for CS 1.6 ...
-Download cheats for CS 1.6 - Counter-Strike 1.6 ...
-Download cheat for CS 1.6 ...
-Download cheats for CS 1.6 - Counter-Strike 1.6
-Download cheats for CS 1.6 » Download cheats for CS 1.6 and CS:GO, download cheats for CS 1.6 for free and ... 8a78ff9644
-
-
-
diff --git a/spaces/contluForse/HuggingGPT/assets/Ei Jig Standard 1530 Pdf 11 NEW!.md b/spaces/contluForse/HuggingGPT/assets/Ei Jig Standard 1530 Pdf 11 NEW!.md
deleted file mode 100644
index b07c079d715a36558bb3a80bb2bdb292b39f1ab8..0000000000000000000000000000000000000000
--- a/spaces/contluForse/HuggingGPT/assets/Ei Jig Standard 1530 Pdf 11 NEW!.md
+++ /dev/null
@@ -1,8 +0,0 @@
-
-
-Security requirements Quality Assurance for the Production, Storage and Distribution of Aviation Fuel at Airports EI/JIG STANDARD 1530 This standard contains quality assurance requirements for the production, storage and distribution of aviation fuel at airports.
-This International Standard is applicable to aviation fuels obtained in storage or blending from different suppliers and may be applied to aviation fuels obtained in storage, blending and/or blending in a tanker.
-The requirements of this standard do not apply to: 8a78ff9644
-
-
-
diff --git a/spaces/cooelf/Multimodal-CoT/timm/models/resnet.py b/spaces/cooelf/Multimodal-CoT/timm/models/resnet.py
deleted file mode 100644
index 66baa37a90dbb2f2cdb510bc0b988cd25bd5887a..0000000000000000000000000000000000000000
--- a/spaces/cooelf/Multimodal-CoT/timm/models/resnet.py
+++ /dev/null
@@ -1,1455 +0,0 @@
-"""PyTorch ResNet
-
-This started as a copy of https://github.com/pytorch/vision 'resnet.py' (BSD-3-Clause) with
-additional dropout and dynamic global avg/max pool.
-
-ResNeXt, SE-ResNeXt, SENet, and MXNet Gluon stem/downsample variants, tiered stems added by Ross Wightman
-Copyright 2020 Ross Wightman
-"""
-import math
-from functools import partial
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
-from .helpers import build_model_with_cfg
-from .layers import DropBlock2d, DropPath, AvgPool2dSame, BlurPool2d, create_attn, get_attn, create_classifier
-from .registry import register_model
-
-__all__ = ['ResNet', 'BasicBlock', 'Bottleneck'] # model_registry will add each entrypoint fn to this
-
-
-def _cfg(url='', **kwargs):
- return {
- 'url': url,
- 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
- 'crop_pct': 0.875, 'interpolation': 'bilinear',
- 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
- 'first_conv': 'conv1', 'classifier': 'fc',
- **kwargs
- }
-
-
-default_cfgs = {
- # ResNet and Wide ResNet
- 'resnet18': _cfg(url='https://download.pytorch.org/models/resnet18-5c106cde.pth'),
- 'resnet18d': _cfg(
- url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet18d_ra2-48a79e06.pth',
- interpolation='bicubic', first_conv='conv1.0'),
- 'resnet34': _cfg(
- url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34-43635321.pth'),
- 'resnet34d': _cfg(
- url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34d_ra2-f8dcfcaf.pth',
- interpolation='bicubic', first_conv='conv1.0'),
- 'resnet26': _cfg(
- url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26-9aa10e23.pth',
- interpolation='bicubic'),
- 'resnet26d': _cfg(
- url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26d-69e92c46.pth',
- interpolation='bicubic', first_conv='conv1.0'),
- 'resnet26t': _cfg(
- url='',
- interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8)),
- 'resnet50': _cfg(
- url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet50_ram-a26f946b.pth',
- interpolation='bicubic'),
- 'resnet50d': _cfg(
- url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet50d_ra2-464e36ba.pth',
- interpolation='bicubic', first_conv='conv1.0'),
- 'resnet50t': _cfg(
- url='',
- interpolation='bicubic', first_conv='conv1.0'),
- 'resnet101': _cfg(url='', interpolation='bicubic'),
- 'resnet101d': _cfg(
- url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet101d_ra2-2803ffab.pth',
- interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8),
- crop_pct=1.0, test_input_size=(3, 320, 320)),
- 'resnet152': _cfg(url='', interpolation='bicubic'),
- 'resnet152d': _cfg(
- url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet152d_ra2-5cac0439.pth',
- interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8),
- crop_pct=1.0, test_input_size=(3, 320, 320)),
- 'resnet200': _cfg(url='', interpolation='bicubic'),
- 'resnet200d': _cfg(
- url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet200d_ra2-bdba9bf9.pth',
- interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8),
- crop_pct=1.0, test_input_size=(3, 320, 320)),
- 'tv_resnet34': _cfg(url='https://download.pytorch.org/models/resnet34-333f7ec4.pth'),
- 'tv_resnet50': _cfg(url='https://download.pytorch.org/models/resnet50-19c8e357.pth'),
- 'tv_resnet101': _cfg(url='https://download.pytorch.org/models/resnet101-5d3b4d8f.pth'),
- 'tv_resnet152': _cfg(url='https://download.pytorch.org/models/resnet152-b121ed2d.pth'),
- 'wide_resnet50_2': _cfg(
- url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/wide_resnet50_racm-8234f177.pth',
- interpolation='bicubic'),
- 'wide_resnet101_2': _cfg(url='https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth'),
-
- # ResNeXt
- 'resnext50_32x4d': _cfg(
- url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnext50_32x4d_ra-d733960d.pth',
- interpolation='bicubic'),
- 'resnext50d_32x4d': _cfg(
- url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnext50d_32x4d-103e99f8.pth',
- interpolation='bicubic',
- first_conv='conv1.0'),
- 'resnext101_32x4d': _cfg(url=''),
- 'resnext101_32x8d': _cfg(url='https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth'),
- 'resnext101_64x4d': _cfg(url=''),
- 'tv_resnext50_32x4d': _cfg(url='https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth'),
-
- # ResNeXt models - Weakly Supervised Pretraining on Instagram Hashtags
- # from https://github.com/facebookresearch/WSL-Images
- # Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only.
- 'ig_resnext101_32x8d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x8-c38310e5.pth'),
- 'ig_resnext101_32x16d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x16-c6f796b0.pth'),
- 'ig_resnext101_32x32d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x32-e4b90b00.pth'),
- 'ig_resnext101_32x48d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x48-3e41cc8a.pth'),
-
- # Semi-Supervised ResNe*t models from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models
- # Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only.
- 'ssl_resnet18': _cfg(
- url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet18-d92f0530.pth'),
- 'ssl_resnet50': _cfg(
- url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet50-08389792.pth'),
- 'ssl_resnext50_32x4d': _cfg(
- url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext50_32x4-ddb3e555.pth'),
- 'ssl_resnext101_32x4d': _cfg(
- url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x4-dc43570a.pth'),
- 'ssl_resnext101_32x8d': _cfg(
- url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x8-2cfe2f8b.pth'),
- 'ssl_resnext101_32x16d': _cfg(
- url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x16-15fffa57.pth'),
-
- # Semi-Weakly Supervised ResNe*t models from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models
- # Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only.
- 'swsl_resnet18': _cfg(
- url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet18-118f1556.pth'),
- 'swsl_resnet50': _cfg(
- url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet50-16a12f1b.pth'),
- 'swsl_resnext50_32x4d': _cfg(
- url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext50_32x4-72679e44.pth'),
- 'swsl_resnext101_32x4d': _cfg(
- url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x4-3f87e46b.pth'),
- 'swsl_resnext101_32x8d': _cfg(
- url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x8-b4712904.pth'),
- 'swsl_resnext101_32x16d': _cfg(
- url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x16-f3559a9c.pth'),
-
- # Squeeze-Excitation ResNets, to eventually replace the models in senet.py
- 'seresnet18': _cfg(
- url='',
- interpolation='bicubic'),
- 'seresnet34': _cfg(
- url='',
- interpolation='bicubic'),
- 'seresnet50': _cfg(
- url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet50_ra_224-8efdb4bb.pth',
- interpolation='bicubic'),
- 'seresnet50t': _cfg(
- url='',
- interpolation='bicubic',
- first_conv='conv1.0'),
- 'seresnet101': _cfg(
- url='',
- interpolation='bicubic'),
- 'seresnet152': _cfg(
- url='',
- interpolation='bicubic'),
- 'seresnet152d': _cfg(
- url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet152d_ra2-04464dd2.pth',
- interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8),
- crop_pct=1.0, test_input_size=(3, 320, 320)
- ),
- 'seresnet200d': _cfg(
- url='',
- interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.94, pool_size=(8, 8)),
- 'seresnet269d': _cfg(
- url='',
- interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.94, pool_size=(8, 8)),
-
-
- # Squeeze-Excitation ResNeXts, to eventually replace the models in senet.py
- 'seresnext26d_32x4d': _cfg(
- url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26d_32x4d-80fa48a3.pth',
- interpolation='bicubic',
- first_conv='conv1.0'),
- 'seresnext26t_32x4d': _cfg(
- url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26tn_32x4d-569cb627.pth',
- interpolation='bicubic',
- first_conv='conv1.0'),
- 'seresnext50_32x4d': _cfg(
- url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext50_32x4d_racm-a304a460.pth',
- interpolation='bicubic'),
- 'seresnext101_32x4d': _cfg(
- url='',
- interpolation='bicubic'),
- 'seresnext101_32x8d': _cfg(
- url='',
- interpolation='bicubic'),
- 'senet154': _cfg(
- url='',
- interpolation='bicubic',
- first_conv='conv1.0'),
-
- # Efficient Channel Attention ResNets
- 'ecaresnet26t': _cfg(
- url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet26t_ra2-46609757.pth',
- interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8),
- crop_pct=0.95, test_input_size=(3, 320, 320)),
- 'ecaresnetlight': _cfg(
- url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNetLight_4f34b35b.pth',
- interpolation='bicubic'),
- 'ecaresnet50d': _cfg(
- url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNet50D_833caf58.pth',
- interpolation='bicubic',
- first_conv='conv1.0'),
- 'ecaresnet50d_pruned': _cfg(
- url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45899/outputs/ECAResNet50D_P_9c67f710.pth',
- interpolation='bicubic',
- first_conv='conv1.0'),
- 'ecaresnet50t': _cfg(
- url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet50t_ra2-f7ac63c4.pth',
- interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8),
- crop_pct=0.95, test_input_size=(3, 320, 320)),
- 'ecaresnet101d': _cfg(
- url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNet101D_281c5844.pth',
- interpolation='bicubic', first_conv='conv1.0'),
- 'ecaresnet101d_pruned': _cfg(
- url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45610/outputs/ECAResNet101D_P_75a3370e.pth',
- interpolation='bicubic',
- first_conv='conv1.0'),
- 'ecaresnet200d': _cfg(
- url='',
- interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.94, pool_size=(8, 8)),
- 'ecaresnet269d': _cfg(
- url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet269d_320_ra2-7baa55cb.pth',
- interpolation='bicubic', first_conv='conv1.0', input_size=(3, 320, 320), pool_size=(10, 10),
- crop_pct=1.0, test_input_size=(3, 352, 352)),
-
- # Efficient Channel Attention ResNeXts
- 'ecaresnext26t_32x4d': _cfg(
- url='',
- interpolation='bicubic', first_conv='conv1.0'),
- 'ecaresnext50t_32x4d': _cfg(
- url='',
- interpolation='bicubic', first_conv='conv1.0'),
-
- # ResNets with anti-aliasing blur pool
- 'resnetblur18': _cfg(
- interpolation='bicubic'),
- 'resnetblur50': _cfg(
- url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnetblur50-84f4748f.pth',
- interpolation='bicubic'),
-
- # ResNet-RS models
- 'resnetrs50': _cfg(
- url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs50_ema-6b53758b.pth',
- input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.91, test_input_size=(3, 224, 224),
- interpolation='bicubic', first_conv='conv1.0'),
- 'resnetrs101': _cfg(
- url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs101_i192_ema-1509bbf6.pth',
- input_size=(3, 192, 192), pool_size=(6, 6), crop_pct=0.94, test_input_size=(3, 288, 288),
- interpolation='bicubic', first_conv='conv1.0'),
- 'resnetrs152': _cfg(
- url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs152_i256_ema-a9aff7f9.pth',
- input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320),
- interpolation='bicubic', first_conv='conv1.0'),
- 'resnetrs200': _cfg(
- url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs200_ema-623d2f59.pth',
- input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320),
- interpolation='bicubic', first_conv='conv1.0'),
- 'resnetrs270': _cfg(
- url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs270_ema-b40e674c.pth',
- input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 352, 352),
- interpolation='bicubic', first_conv='conv1.0'),
- 'resnetrs350': _cfg(
- url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs350_i256_ema-5a1aa8f1.pth',
- input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0, test_input_size=(3, 384, 384),
- interpolation='bicubic', first_conv='conv1.0'),
- 'resnetrs420': _cfg(
- url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs420_ema-972dee69.pth',
- input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, test_input_size=(3, 416, 416),
- interpolation='bicubic', first_conv='conv1.0'),
-}
-
-
-def get_padding(kernel_size, stride, dilation=1):
- padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
- return padding
-
-
-class BasicBlock(nn.Module):
- expansion = 1
-
- def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64,
- reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d,
- attn_layer=None, aa_layer=None, drop_block=None, drop_path=None):
- super(BasicBlock, self).__init__()
-
- assert cardinality == 1, 'BasicBlock only supports cardinality of 1'
- assert base_width == 64, 'BasicBlock does not support changing base width'
- first_planes = planes // reduce_first
- outplanes = planes * self.expansion
- first_dilation = first_dilation or dilation
- use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation)
-
- self.conv1 = nn.Conv2d(
- inplanes, first_planes, kernel_size=3, stride=1 if use_aa else stride, padding=first_dilation,
- dilation=first_dilation, bias=False)
- self.bn1 = norm_layer(first_planes)
- self.act1 = act_layer(inplace=True)
- self.aa = aa_layer(channels=first_planes, stride=stride) if use_aa else None
-
- self.conv2 = nn.Conv2d(
- first_planes, outplanes, kernel_size=3, padding=dilation, dilation=dilation, bias=False)
- self.bn2 = norm_layer(outplanes)
-
- self.se = create_attn(attn_layer, outplanes)
-
- self.act2 = act_layer(inplace=True)
- self.downsample = downsample
- self.stride = stride
- self.dilation = dilation
- self.drop_block = drop_block
- self.drop_path = drop_path
-
- def zero_init_last_bn(self):
- nn.init.zeros_(self.bn2.weight)
-
- def forward(self, x):
- shortcut = x
-
- x = self.conv1(x)
- x = self.bn1(x)
- if self.drop_block is not None:
- x = self.drop_block(x)
- x = self.act1(x)
- if self.aa is not None:
- x = self.aa(x)
-
- x = self.conv2(x)
- x = self.bn2(x)
- if self.drop_block is not None:
- x = self.drop_block(x)
-
- if self.se is not None:
- x = self.se(x)
-
- if self.drop_path is not None:
- x = self.drop_path(x)
-
- if self.downsample is not None:
- shortcut = self.downsample(shortcut)
- x += shortcut
- x = self.act2(x)
-
- return x
-
-
-class Bottleneck(nn.Module):
- expansion = 4
-
- def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64,
- reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d,
- attn_layer=None, aa_layer=None, drop_block=None, drop_path=None):
- super(Bottleneck, self).__init__()
-
- width = int(math.floor(planes * (base_width / 64)) * cardinality)
- first_planes = width // reduce_first
- outplanes = planes * self.expansion
- first_dilation = first_dilation or dilation
- use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation)
-
- self.conv1 = nn.Conv2d(inplanes, first_planes, kernel_size=1, bias=False)
- self.bn1 = norm_layer(first_planes)
- self.act1 = act_layer(inplace=True)
-
- self.conv2 = nn.Conv2d(
- first_planes, width, kernel_size=3, stride=1 if use_aa else stride,
- padding=first_dilation, dilation=first_dilation, groups=cardinality, bias=False)
- self.bn2 = norm_layer(width)
- self.act2 = act_layer(inplace=True)
- self.aa = aa_layer(channels=width, stride=stride) if use_aa else None
-
- self.conv3 = nn.Conv2d(width, outplanes, kernel_size=1, bias=False)
- self.bn3 = norm_layer(outplanes)
-
- self.se = create_attn(attn_layer, outplanes)
-
- self.act3 = act_layer(inplace=True)
- self.downsample = downsample
- self.stride = stride
- self.dilation = dilation
- self.drop_block = drop_block
- self.drop_path = drop_path
-
- def zero_init_last_bn(self):
- nn.init.zeros_(self.bn3.weight)
-
- def forward(self, x):
- shortcut = x
-
- x = self.conv1(x)
- x = self.bn1(x)
- if self.drop_block is not None:
- x = self.drop_block(x)
- x = self.act1(x)
-
- x = self.conv2(x)
- x = self.bn2(x)
- if self.drop_block is not None:
- x = self.drop_block(x)
- x = self.act2(x)
- if self.aa is not None:
- x = self.aa(x)
-
- x = self.conv3(x)
- x = self.bn3(x)
- if self.drop_block is not None:
- x = self.drop_block(x)
-
- if self.se is not None:
- x = self.se(x)
-
- if self.drop_path is not None:
- x = self.drop_path(x)
-
- if self.downsample is not None:
- shortcut = self.downsample(shortcut)
- x += shortcut
- x = self.act3(x)
-
- return x
-
-
-def downsample_conv(
- in_channels, out_channels, kernel_size, stride=1, dilation=1, first_dilation=None, norm_layer=None):
- norm_layer = norm_layer or nn.BatchNorm2d
- kernel_size = 1 if stride == 1 and dilation == 1 else kernel_size
- first_dilation = (first_dilation or dilation) if kernel_size > 1 else 1
- p = get_padding(kernel_size, stride, first_dilation)
-
- return nn.Sequential(*[
- nn.Conv2d(
- in_channels, out_channels, kernel_size, stride=stride, padding=p, dilation=first_dilation, bias=False),
- norm_layer(out_channels)
- ])
-
-
-def downsample_avg(
- in_channels, out_channels, kernel_size, stride=1, dilation=1, first_dilation=None, norm_layer=None):
- norm_layer = norm_layer or nn.BatchNorm2d
- avg_stride = stride if dilation == 1 else 1
- if stride == 1 and dilation == 1:
- pool = nn.Identity()
- else:
- avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d
- pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False)
-
- return nn.Sequential(*[
- pool,
- nn.Conv2d(in_channels, out_channels, 1, stride=1, padding=0, bias=False),
- norm_layer(out_channels)
- ])
-
-
-def drop_blocks(drop_block_rate=0.):
- return [
- None, None,
- DropBlock2d(drop_block_rate, 5, 0.25) if drop_block_rate else None,
- DropBlock2d(drop_block_rate, 3, 1.00) if drop_block_rate else None]
-
-
-def make_blocks(
- block_fn, channels, block_repeats, inplanes, reduce_first=1, output_stride=32,
- down_kernel_size=1, avg_down=False, drop_block_rate=0., drop_path_rate=0., **kwargs):
- stages = []
- feature_info = []
- net_num_blocks = sum(block_repeats)
- net_block_idx = 0
- net_stride = 4
- dilation = prev_dilation = 1
- for stage_idx, (planes, num_blocks, db) in enumerate(zip(channels, block_repeats, drop_blocks(drop_block_rate))):
- stage_name = f'layer{stage_idx + 1}' # never liked this name, but weight compat requires it
- stride = 1 if stage_idx == 0 else 2
- if net_stride >= output_stride:
- dilation *= stride
- stride = 1
- else:
- net_stride *= stride
-
- downsample = None
- if stride != 1 or inplanes != planes * block_fn.expansion:
- down_kwargs = dict(
- in_channels=inplanes, out_channels=planes * block_fn.expansion, kernel_size=down_kernel_size,
- stride=stride, dilation=dilation, first_dilation=prev_dilation, norm_layer=kwargs.get('norm_layer'))
- downsample = downsample_avg(**down_kwargs) if avg_down else downsample_conv(**down_kwargs)
-
- block_kwargs = dict(reduce_first=reduce_first, dilation=dilation, drop_block=db, **kwargs)
- blocks = []
- for block_idx in range(num_blocks):
- downsample = downsample if block_idx == 0 else None
- stride = stride if block_idx == 0 else 1
- block_dpr = drop_path_rate * net_block_idx / (net_num_blocks - 1) # stochastic depth linear decay rule
- blocks.append(block_fn(
- inplanes, planes, stride, downsample, first_dilation=prev_dilation,
- drop_path=DropPath(block_dpr) if block_dpr > 0. else None, **block_kwargs))
- prev_dilation = dilation
- inplanes = planes * block_fn.expansion
- net_block_idx += 1
-
- stages.append((stage_name, nn.Sequential(*blocks)))
- feature_info.append(dict(num_chs=inplanes, reduction=net_stride, module=stage_name))
-
- return stages, feature_info
-
-
-class ResNet(nn.Module):
- """ResNet / ResNeXt / SE-ResNeXt / SE-Net
-
- This class implements all variants of ResNet, ResNeXt, SE-ResNeXt, and SENet that
- * have > 1 stride in the 3x3 conv layer of bottleneck
- * have conv-bn-act ordering
-
- This ResNet impl supports a number of stem and downsample options based on the v1c, v1d, v1e, and v1s
- variants included in the MXNet Gluon ResNetV1b model. The C and D variants are also discussed in the
- 'Bag of Tricks' paper: https://arxiv.org/pdf/1812.01187. The B variant is equivalent to torchvision default.
-
- ResNet variants (the same modifications can be used in SE/ResNeXt models as well):
- * normal, b - 7x7 stem, stem_width = 64, same as torchvision ResNet, NVIDIA ResNet 'v1.5', Gluon v1b
- * c - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64)
- * d - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64), average pool in downsample
- * e - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128), average pool in downsample
- * s - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128)
- * t - 3 layer deep 3x3 stem, stem width = 32 (24, 48, 64), average pool in downsample
- * tn - 3 layer deep 3x3 stem, stem width = 32 (24, 32, 64), average pool in downsample
-
- ResNeXt
- * normal - 7x7 stem, stem_width = 64, standard cardinality and base widths
- * same c,d, e, s variants as ResNet can be enabled
-
- SE-ResNeXt
- * normal - 7x7 stem, stem_width = 64
- * same c, d, e, s variants as ResNet can be enabled
-
- SENet-154 - 3 layer deep 3x3 stem (same as v1c-v1s), stem_width = 64, cardinality=64,
- reduction by 2 on width of first bottleneck convolution, 3x3 downsample convs after first block
-
- Parameters
- ----------
- block : Block
- Class for the residual block. Options are BasicBlockGl, BottleneckGl.
- layers : list of int
- Numbers of layers in each block
- num_classes : int, default 1000
- Number of classification classes.
- in_chans : int, default 3
- Number of input (color) channels.
- cardinality : int, default 1
- Number of convolution groups for 3x3 conv in Bottleneck.
- base_width : int, default 64
- Factor determining bottleneck channels. `planes * base_width / 64 * cardinality`
- stem_width : int, default 64
- Number of channels in stem convolutions
- stem_type : str, default ''
- The type of stem:
- * '', default - a single 7x7 conv with a width of stem_width
- * 'deep' - three 3x3 convolution layers of widths stem_width, stem_width, stem_width * 2
- * 'deep_tiered' - three 3x3 conv layers of widths stem_width//4 * 3, stem_width, stem_width * 2
- block_reduce_first: int, default 1
- Reduction factor for first convolution output width of residual blocks,
- 1 for all archs except senets, where 2
- down_kernel_size: int, default 1
- Kernel size of residual block downsampling path, 1x1 for most archs, 3x3 for senets
- avg_down : bool, default False
- Whether to use average pooling for projection skip connection between stages/downsample.
- output_stride : int, default 32
- Set the output stride of the network, 32, 16, or 8. Typically used in segmentation.
- act_layer : nn.Module, activation layer
- norm_layer : nn.Module, normalization layer
- aa_layer : nn.Module, anti-aliasing layer
- drop_rate : float, default 0.
- Dropout probability before classifier, for training
- global_pool : str, default 'avg'
- Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax'
- """
-
- def __init__(self, block, layers, num_classes=1000, in_chans=3,
- cardinality=1, base_width=64, stem_width=64, stem_type='', replace_stem_pool=False,
- output_stride=32, block_reduce_first=1, down_kernel_size=1, avg_down=False,
- act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, drop_rate=0.0, drop_path_rate=0.,
- drop_block_rate=0., global_pool='avg', zero_init_last_bn=True, block_args=None):
- block_args = block_args or dict()
- assert output_stride in (8, 16, 32)
- self.num_classes = num_classes
- self.drop_rate = drop_rate
- super(ResNet, self).__init__()
-
- # Stem
- deep_stem = 'deep' in stem_type
- inplanes = stem_width * 2 if deep_stem else 64
- if deep_stem:
- stem_chs = (stem_width, stem_width)
- if 'tiered' in stem_type:
- stem_chs = (3 * (stem_width // 4), stem_width)
- self.conv1 = nn.Sequential(*[
- nn.Conv2d(in_chans, stem_chs[0], 3, stride=2, padding=1, bias=False),
- norm_layer(stem_chs[0]),
- act_layer(inplace=True),
- nn.Conv2d(stem_chs[0], stem_chs[1], 3, stride=1, padding=1, bias=False),
- norm_layer(stem_chs[1]),
- act_layer(inplace=True),
- nn.Conv2d(stem_chs[1], inplanes, 3, stride=1, padding=1, bias=False)])
- else:
- self.conv1 = nn.Conv2d(in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False)
- self.bn1 = norm_layer(inplanes)
- self.act1 = act_layer(inplace=True)
- self.feature_info = [dict(num_chs=inplanes, reduction=2, module='act1')]
-
- # Stem Pooling
- if replace_stem_pool:
- self.maxpool = nn.Sequential(*filter(None, [
- nn.Conv2d(inplanes, inplanes, 3, stride=1 if aa_layer else 2, padding=1, bias=False),
- aa_layer(channels=inplanes, stride=2) if aa_layer else None,
- norm_layer(inplanes),
- act_layer(inplace=True)
- ]))
- else:
- if aa_layer is not None:
- self.maxpool = nn.Sequential(*[
- nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
- aa_layer(channels=inplanes, stride=2)])
- else:
- self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
-
- # Feature Blocks
- channels = [64, 128, 256, 512]
- stage_modules, stage_feature_info = make_blocks(
- block, channels, layers, inplanes, cardinality=cardinality, base_width=base_width,
- output_stride=output_stride, reduce_first=block_reduce_first, avg_down=avg_down,
- down_kernel_size=down_kernel_size, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer,
- drop_block_rate=drop_block_rate, drop_path_rate=drop_path_rate, **block_args)
- for stage in stage_modules:
- self.add_module(*stage) # layer1, layer2, etc
- self.feature_info.extend(stage_feature_info)
-
- # Head (Pooling and Classifier)
- self.num_features = 512 * block.expansion
- self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
-
- self.init_weights(zero_init_last_bn=zero_init_last_bn)
-
- def init_weights(self, zero_init_last_bn=True):
- for n, m in self.named_modules():
- if isinstance(m, nn.Conv2d):
- nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
- elif isinstance(m, nn.BatchNorm2d):
- nn.init.ones_(m.weight)
- nn.init.zeros_(m.bias)
- if zero_init_last_bn:
- for m in self.modules():
- if hasattr(m, 'zero_init_last_bn'):
- m.zero_init_last_bn()
-
- def get_classifier(self):
- return self.fc
-
- def reset_classifier(self, num_classes, global_pool='avg'):
- self.num_classes = num_classes
- self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
-
- def forward_features(self, x):
- x = self.conv1(x)
- x = self.bn1(x)
- x = self.act1(x)
- x = self.maxpool(x)
-
- x = self.layer1(x)
- x = self.layer2(x)
- x = self.layer3(x)
- x = self.layer4(x)
- return x
-
- def forward(self, x):
- x = self.forward_features(x)
- x = self.global_pool(x)
- if self.drop_rate:
- x = F.dropout(x, p=float(self.drop_rate), training=self.training)
- x = self.fc(x)
- return x
-
-
-def _create_resnet(variant, pretrained=False, **kwargs):
- return build_model_with_cfg(
- ResNet, variant, pretrained,
- default_cfg=default_cfgs[variant],
- **kwargs)
-
-
-@register_model
-def resnet18(pretrained=False, **kwargs):
- """Constructs a ResNet-18 model.
- """
- model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs)
- return _create_resnet('resnet18', pretrained, **model_args)
-
-
-@register_model
-def resnet18d(pretrained=False, **kwargs):
- """Constructs a ResNet-18-D model.
- """
- model_args = dict(
- block=BasicBlock, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep', avg_down=True, **kwargs)
- return _create_resnet('resnet18d', pretrained, **model_args)
-
-
-@register_model
-def resnet34(pretrained=False, **kwargs):
- """Constructs a ResNet-34 model.
- """
- model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], **kwargs)
- return _create_resnet('resnet34', pretrained, **model_args)
-
-
-@register_model
-def resnet34d(pretrained=False, **kwargs):
- """Constructs a ResNet-34-D model.
- """
- model_args = dict(
- block=BasicBlock, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs)
- return _create_resnet('resnet34d', pretrained, **model_args)
-
-
-@register_model
-def resnet26(pretrained=False, **kwargs):
- """Constructs a ResNet-26 model.
- """
- model_args = dict(block=Bottleneck, layers=[2, 2, 2, 2], **kwargs)
- return _create_resnet('resnet26', pretrained, **model_args)
-
-
-@register_model
-def resnet26t(pretrained=False, **kwargs):
- """Constructs a ResNet-26-T model.
- """
- model_args = dict(
- block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep_tiered', avg_down=True, **kwargs)
- return _create_resnet('resnet26t', pretrained, **model_args)
-
-
-@register_model
-def resnet26d(pretrained=False, **kwargs):
- """Constructs a ResNet-26-D model.
- """
- model_args = dict(block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep', avg_down=True, **kwargs)
- return _create_resnet('resnet26d', pretrained, **model_args)
-
-
-@register_model
-def resnet50(pretrained=False, **kwargs):
- """Constructs a ResNet-50 model.
- """
- model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs)
- return _create_resnet('resnet50', pretrained, **model_args)
-
-
-@register_model
-def resnet50d(pretrained=False, **kwargs):
- """Constructs a ResNet-50-D model.
- """
- model_args = dict(
- block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs)
- return _create_resnet('resnet50d', pretrained, **model_args)
-
-
-@register_model
-def resnet50t(pretrained=False, **kwargs):
- """Constructs a ResNet-50-T model.
- """
- model_args = dict(
- block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep_tiered', avg_down=True, **kwargs)
- return _create_resnet('resnet50t', pretrained, **model_args)
-
-
-@register_model
-def resnet101(pretrained=False, **kwargs):
- """Constructs a ResNet-101 model.
- """
- model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], **kwargs)
- return _create_resnet('resnet101', pretrained, **model_args)
-
-
-@register_model
-def resnet101d(pretrained=False, **kwargs):
- """Constructs a ResNet-101-D model.
- """
- model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs)
- return _create_resnet('resnet101d', pretrained, **model_args)
-
-
-@register_model
-def resnet152(pretrained=False, **kwargs):
- """Constructs a ResNet-152 model.
- """
- model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], **kwargs)
- return _create_resnet('resnet152', pretrained, **model_args)
-
-
-@register_model
-def resnet152d(pretrained=False, **kwargs):
- """Constructs a ResNet-152-D model.
- """
- model_args = dict(
- block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs)
- return _create_resnet('resnet152d', pretrained, **model_args)
-
-
-@register_model
-def resnet200(pretrained=False, **kwargs):
- """Constructs a ResNet-200 model.
- """
- model_args = dict(block=Bottleneck, layers=[3, 24, 36, 3], **kwargs)
- return _create_resnet('resnet200', pretrained, **model_args)
-
-
-@register_model
-def resnet200d(pretrained=False, **kwargs):
- """Constructs a ResNet-200-D model.
- """
- model_args = dict(
- block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs)
- return _create_resnet('resnet200d', pretrained, **model_args)
-
-
-@register_model
-def tv_resnet34(pretrained=False, **kwargs):
- """Constructs a ResNet-34 model with original Torchvision weights.
- """
- model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], **kwargs)
- return _create_resnet('tv_resnet34', pretrained, **model_args)
-
-
-@register_model
-def tv_resnet50(pretrained=False, **kwargs):
- """Constructs a ResNet-50 model with original Torchvision weights.
- """
- model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs)
- return _create_resnet('tv_resnet50', pretrained, **model_args)
-
-
-@register_model
-def tv_resnet101(pretrained=False, **kwargs):
- """Constructs a ResNet-101 model w/ Torchvision pretrained weights.
- """
- model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], **kwargs)
- return _create_resnet('tv_resnet101', pretrained, **model_args)
-
-
-@register_model
-def tv_resnet152(pretrained=False, **kwargs):
- """Constructs a ResNet-152 model w/ Torchvision pretrained weights.
- """
- model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], **kwargs)
- return _create_resnet('tv_resnet152', pretrained, **model_args)
-
-
-@register_model
-def wide_resnet50_2(pretrained=False, **kwargs):
- """Constructs a Wide ResNet-50-2 model.
- The model is the same as ResNet except for the bottleneck number of channels
- which is twice larger in every block. The number of channels in outer 1x1
- convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
- channels, and in Wide ResNet-50-2 has 2048-1024-2048.
- """
- model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], base_width=128, **kwargs)
- return _create_resnet('wide_resnet50_2', pretrained, **model_args)
-
-
-@register_model
-def wide_resnet101_2(pretrained=False, **kwargs):
- """Constructs a Wide ResNet-101-2 model.
- The model is the same as ResNet except for the bottleneck number of channels
- which is twice larger in every block. The number of channels in outer 1x1
- convolutions is the same.
- """
- model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], base_width=128, **kwargs)
- return _create_resnet('wide_resnet101_2', pretrained, **model_args)
-
-
-@register_model
-def resnext50_32x4d(pretrained=False, **kwargs):
- """Constructs a ResNeXt50-32x4d model.
- """
- model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs)
- return _create_resnet('resnext50_32x4d', pretrained, **model_args)
-
-
-@register_model
-def resnext50d_32x4d(pretrained=False, **kwargs):
- """Constructs a ResNeXt50d-32x4d model. ResNext50 w/ deep stem & avg pool downsample
- """
- model_args = dict(
- block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4,
- stem_width=32, stem_type='deep', avg_down=True, **kwargs)
- return _create_resnet('resnext50d_32x4d', pretrained, **model_args)
-
-
-@register_model
-def resnext101_32x4d(pretrained=False, **kwargs):
- """Constructs a ResNeXt-101 32x4d model.
- """
- model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs)
- return _create_resnet('resnext101_32x4d', pretrained, **model_args)
-
-
-@register_model
-def resnext101_32x8d(pretrained=False, **kwargs):
- """Constructs a ResNeXt-101 32x8d model.
- """
- model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs)
- return _create_resnet('resnext101_32x8d', pretrained, **model_args)
-
-
-@register_model
-def resnext101_64x4d(pretrained=False, **kwargs):
- """Constructs a ResNeXt101-64x4d model.
- """
- model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=64, base_width=4, **kwargs)
- return _create_resnet('resnext101_64x4d', pretrained, **model_args)
-
-
-@register_model
-def tv_resnext50_32x4d(pretrained=False, **kwargs):
- """Constructs a ResNeXt50-32x4d model with original Torchvision weights.
- """
- model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs)
- return _create_resnet('tv_resnext50_32x4d', pretrained, **model_args)
-
-
-@register_model
-def ig_resnext101_32x8d(pretrained=True, **kwargs):
- """Constructs a ResNeXt-101 32x8 model pre-trained on weakly-supervised data
- and finetuned on ImageNet from Figure 5 in
- `"Exploring the Limits of Weakly Supervised Pretraining" `_
- Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/
- """
- model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs)
- return _create_resnet('ig_resnext101_32x8d', pretrained, **model_args)
-
-
-@register_model
-def ig_resnext101_32x16d(pretrained=True, **kwargs):
- """Constructs a ResNeXt-101 32x16 model pre-trained on weakly-supervised data
- and finetuned on ImageNet from Figure 5 in
- `"Exploring the Limits of Weakly Supervised Pretraining" `_
- Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/
- """
- model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16, **kwargs)
- return _create_resnet('ig_resnext101_32x16d', pretrained, **model_args)
-
-
-@register_model
-def ig_resnext101_32x32d(pretrained=True, **kwargs):
- """Constructs a ResNeXt-101 32x32 model pre-trained on weakly-supervised data
- and finetuned on ImageNet from Figure 5 in
- `"Exploring the Limits of Weakly Supervised Pretraining" `_
- Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/
- """
- model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=32, **kwargs)
- return _create_resnet('ig_resnext101_32x32d', pretrained, **model_args)
-
-
-@register_model
-def ig_resnext101_32x48d(pretrained=True, **kwargs):
- """Constructs a ResNeXt-101 32x48 model pre-trained on weakly-supervised data
- and finetuned on ImageNet from Figure 5 in
- `"Exploring the Limits of Weakly Supervised Pretraining" `_
- Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/
- """
- model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=48, **kwargs)
- return _create_resnet('ig_resnext101_32x48d', pretrained, **model_args)
-
-
-@register_model
-def ssl_resnet18(pretrained=True, **kwargs):
- """Constructs a semi-supervised ResNet-18 model pre-trained on YFCC100M dataset and finetuned on ImageNet
- `"Billion-scale Semi-Supervised Learning for Image Classification" `_
- Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/
- """
- model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs)
- return _create_resnet('ssl_resnet18', pretrained, **model_args)
-
-
-@register_model
-def ssl_resnet50(pretrained=True, **kwargs):
- """Constructs a semi-supervised ResNet-50 model pre-trained on YFCC100M dataset and finetuned on ImageNet
- `"Billion-scale Semi-Supervised Learning for Image Classification" `_
- Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/
- """
- model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs)
- return _create_resnet('ssl_resnet50', pretrained, **model_args)
-
-
-@register_model
-def ssl_resnext50_32x4d(pretrained=True, **kwargs):
- """Constructs a semi-supervised ResNeXt-50 32x4 model pre-trained on YFCC100M dataset and finetuned on ImageNet
- `"Billion-scale Semi-Supervised Learning for Image Classification" `_
- Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/
- """
- model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs)
- return _create_resnet('ssl_resnext50_32x4d', pretrained, **model_args)
-
-
-@register_model
-def ssl_resnext101_32x4d(pretrained=True, **kwargs):
- """Constructs a semi-supervised ResNeXt-101 32x4 model pre-trained on YFCC100M dataset and finetuned on ImageNet
- `"Billion-scale Semi-Supervised Learning for Image Classification" `_
- Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/
- """
- model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs)
- return _create_resnet('ssl_resnext101_32x4d', pretrained, **model_args)
-
-
-@register_model
-def ssl_resnext101_32x8d(pretrained=True, **kwargs):
- """Constructs a semi-supervised ResNeXt-101 32x8 model pre-trained on YFCC100M dataset and finetuned on ImageNet
- `"Billion-scale Semi-Supervised Learning for Image Classification" `_
- Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/
- """
- model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs)
- return _create_resnet('ssl_resnext101_32x8d', pretrained, **model_args)
-
-
-@register_model
-def ssl_resnext101_32x16d(pretrained=True, **kwargs):
- """Constructs a semi-supervised ResNeXt-101 32x16 model pre-trained on YFCC100M dataset and finetuned on ImageNet
- `"Billion-scale Semi-Supervised Learning for Image Classification" `_
- Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/
- """
- model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16, **kwargs)
- return _create_resnet('ssl_resnext101_32x16d', pretrained, **model_args)
-
-
-@register_model
-def swsl_resnet18(pretrained=True, **kwargs):
- """Constructs a semi-weakly supervised Resnet-18 model pre-trained on 1B weakly supervised
- image dataset and finetuned on ImageNet.
- `"Billion-scale Semi-Supervised Learning for Image Classification" `_
- Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/
- """
- model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs)
- return _create_resnet('swsl_resnet18', pretrained, **model_args)
-
-
-@register_model
-def swsl_resnet50(pretrained=True, **kwargs):
- """Constructs a semi-weakly supervised ResNet-50 model pre-trained on 1B weakly supervised
- image dataset and finetuned on ImageNet.
- `"Billion-scale Semi-Supervised Learning for Image Classification" `_
- Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/
- """
- model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs)
- return _create_resnet('swsl_resnet50', pretrained, **model_args)
-
-
-@register_model
-def swsl_resnext50_32x4d(pretrained=True, **kwargs):
- """Constructs a semi-weakly supervised ResNeXt-50 32x4 model pre-trained on 1B weakly supervised
- image dataset and finetuned on ImageNet.
- `"Billion-scale Semi-Supervised Learning for Image Classification" `_
- Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/
- """
- model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs)
- return _create_resnet('swsl_resnext50_32x4d', pretrained, **model_args)
-
-
-@register_model
-def swsl_resnext101_32x4d(pretrained=True, **kwargs):
- """Constructs a semi-weakly supervised ResNeXt-101 32x4 model pre-trained on 1B weakly supervised
- image dataset and finetuned on ImageNet.
- `"Billion-scale Semi-Supervised Learning for Image Classification" `_
- Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/
- """
- model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs)
- return _create_resnet('swsl_resnext101_32x4d', pretrained, **model_args)
-
-
-@register_model
-def swsl_resnext101_32x8d(pretrained=True, **kwargs):
- """Constructs a semi-weakly supervised ResNeXt-101 32x8 model pre-trained on 1B weakly supervised
- image dataset and finetuned on ImageNet.
- `"Billion-scale Semi-Supervised Learning for Image Classification" `_
- Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/
- """
- model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs)
- return _create_resnet('swsl_resnext101_32x8d', pretrained, **model_args)
-
-
-@register_model
-def swsl_resnext101_32x16d(pretrained=True, **kwargs):
- """Constructs a semi-weakly supervised ResNeXt-101 32x16 model pre-trained on 1B weakly supervised
- image dataset and finetuned on ImageNet.
- `"Billion-scale Semi-Supervised Learning for Image Classification" `_
- Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/
- """
- model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16, **kwargs)
- return _create_resnet('swsl_resnext101_32x16d', pretrained, **model_args)
-
-
-@register_model
-def ecaresnet26t(pretrained=False, **kwargs):
- """Constructs an ECA-ResNeXt-26-T model.
- This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels
- in the deep stem and ECA attn.
- """
- model_args = dict(
- block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32,
- stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs)
- return _create_resnet('ecaresnet26t', pretrained, **model_args)
-
-
-@register_model
-def ecaresnet50d(pretrained=False, **kwargs):
- """Constructs a ResNet-50-D model with eca.
- """
- model_args = dict(
- block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True,
- block_args=dict(attn_layer='eca'), **kwargs)
- return _create_resnet('ecaresnet50d', pretrained, **model_args)
-
-
-@register_model
-def resnetrs50(pretrained=False, **kwargs):
- """Constructs a ResNet-RS-50 model.
- Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579
- Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs
- """
- attn_layer = partial(get_attn('se'), rd_ratio=0.25)
- model_args = dict(
- block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', replace_stem_pool=True,
- avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs)
- return _create_resnet('resnetrs50', pretrained, **model_args)
-
-
-@register_model
-def resnetrs101(pretrained=False, **kwargs):
- """Constructs a ResNet-RS-101 model.
- Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579
- Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs
- """
- attn_layer = partial(get_attn('se'), rd_ratio=0.25)
- model_args = dict(
- block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', replace_stem_pool=True,
- avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs)
- return _create_resnet('resnetrs101', pretrained, **model_args)
-
-
-@register_model
-def resnetrs152(pretrained=False, **kwargs):
- """Constructs a ResNet-RS-152 model.
- Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579
- Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs
- """
- attn_layer = partial(get_attn('se'), rd_ratio=0.25)
- model_args = dict(
- block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', replace_stem_pool=True,
- avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs)
- return _create_resnet('resnetrs152', pretrained, **model_args)
-
-
-@register_model
-def resnetrs200(pretrained=False, **kwargs):
- """Constructs a ResNet-RS-200 model.
- Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579
- Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs
- """
- attn_layer = partial(get_attn('se'), rd_ratio=0.25)
- model_args = dict(
- block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', replace_stem_pool=True,
- avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs)
- return _create_resnet('resnetrs200', pretrained, **model_args)
-
-
-@register_model
-def resnetrs270(pretrained=False, **kwargs):
- """Constructs a ResNet-RS-270 model.
- Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579
- Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs
- """
- attn_layer = partial(get_attn('se'), rd_ratio=0.25)
- model_args = dict(
- block=Bottleneck, layers=[4, 29, 53, 4], stem_width=32, stem_type='deep', replace_stem_pool=True,
- avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs)
- return _create_resnet('resnetrs270', pretrained, **model_args)
-
-
-
-@register_model
-def resnetrs350(pretrained=False, **kwargs):
- """Constructs a ResNet-RS-350 model.
- Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579
- Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs
- """
- attn_layer = partial(get_attn('se'), rd_ratio=0.25)
- model_args = dict(
- block=Bottleneck, layers=[4, 36, 72, 4], stem_width=32, stem_type='deep', replace_stem_pool=True,
- avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs)
- return _create_resnet('resnetrs350', pretrained, **model_args)
-
-
-@register_model
-def resnetrs420(pretrained=False, **kwargs):
- """Constructs a ResNet-RS-420 model
- Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579
- Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs
- """
- attn_layer = partial(get_attn('se'), rd_ratio=0.25)
- model_args = dict(
- block=Bottleneck, layers=[4, 44, 87, 4], stem_width=32, stem_type='deep', replace_stem_pool=True,
- avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs)
- return _create_resnet('resnetrs420', pretrained, **model_args)
-
-
-@register_model
-def ecaresnet50d_pruned(pretrained=False, **kwargs):
- """Constructs a ResNet-50-D model pruned with eca.
- The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf
- """
- model_args = dict(
- block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True,
- block_args=dict(attn_layer='eca'), **kwargs)
- return _create_resnet('ecaresnet50d_pruned', pretrained, pruned=True, **model_args)
-
-
-@register_model
-def ecaresnet50t(pretrained=False, **kwargs):
- """Constructs an ECA-ResNet-50-T model.
- Like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels in the deep stem and ECA attn.
- """
- model_args = dict(
- block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32,
- stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs)
- return _create_resnet('ecaresnet50t', pretrained, **model_args)
-
-
-@register_model
-def ecaresnetlight(pretrained=False, **kwargs):
- """Constructs a ResNet-50-D light model with eca.
- """
- model_args = dict(
- block=Bottleneck, layers=[1, 1, 11, 3], stem_width=32, avg_down=True,
- block_args=dict(attn_layer='eca'), **kwargs)
- return _create_resnet('ecaresnetlight', pretrained, **model_args)
-
-
-@register_model
-def ecaresnet101d(pretrained=False, **kwargs):
- """Constructs a ResNet-101-D model with eca.
- """
- model_args = dict(
- block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True,
- block_args=dict(attn_layer='eca'), **kwargs)
- return _create_resnet('ecaresnet101d', pretrained, **model_args)
-
-
-@register_model
-def ecaresnet101d_pruned(pretrained=False, **kwargs):
- """Constructs a ResNet-101-D model pruned with eca.
- The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf
- """
- model_args = dict(
- block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True,
- block_args=dict(attn_layer='eca'), **kwargs)
- return _create_resnet('ecaresnet101d_pruned', pretrained, pruned=True, **model_args)
-
-
-@register_model
-def ecaresnet200d(pretrained=False, **kwargs):
- """Constructs a ResNet-200-D model with ECA.
- """
- model_args = dict(
- block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True,
- block_args=dict(attn_layer='eca'), **kwargs)
- return _create_resnet('ecaresnet200d', pretrained, **model_args)
-
-
-@register_model
-def ecaresnet269d(pretrained=False, **kwargs):
- """Constructs a ResNet-269-D model with ECA.
- """
- model_args = dict(
- block=Bottleneck, layers=[3, 30, 48, 8], stem_width=32, stem_type='deep', avg_down=True,
- block_args=dict(attn_layer='eca'), **kwargs)
- return _create_resnet('ecaresnet269d', pretrained, **model_args)
-
-
-@register_model
-def ecaresnext26t_32x4d(pretrained=False, **kwargs):
- """Constructs an ECA-ResNeXt-26-T model.
- This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels
- in the deep stem. This model replaces SE module with the ECA module
- """
- model_args = dict(
- block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32,
- stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs)
- return _create_resnet('ecaresnext26t_32x4d', pretrained, **model_args)
-
-
-@register_model
-def ecaresnext50t_32x4d(pretrained=False, **kwargs):
- """Constructs an ECA-ResNeXt-50-T model.
- This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels
- in the deep stem. This model replaces SE module with the ECA module
- """
- model_args = dict(
- block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32,
- stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs)
- return _create_resnet('ecaresnext50t_32x4d', pretrained, **model_args)
-
-
-@register_model
-def resnetblur18(pretrained=False, **kwargs):
- """Constructs a ResNet-18 model with blur anti-aliasing
- """
- model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], aa_layer=BlurPool2d, **kwargs)
- return _create_resnet('resnetblur18', pretrained, **model_args)
-
-
-@register_model
-def resnetblur50(pretrained=False, **kwargs):
- """Constructs a ResNet-50 model with blur anti-aliasing
- """
- model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], aa_layer=BlurPool2d, **kwargs)
- return _create_resnet('resnetblur50', pretrained, **model_args)
-
-
-@register_model
-def seresnet18(pretrained=False, **kwargs):
- model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], block_args=dict(attn_layer='se'), **kwargs)
- return _create_resnet('seresnet18', pretrained, **model_args)
-
-
-@register_model
-def seresnet34(pretrained=False, **kwargs):
- model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], block_args=dict(attn_layer='se'), **kwargs)
- return _create_resnet('seresnet34', pretrained, **model_args)
-
-
-@register_model
-def seresnet50(pretrained=False, **kwargs):
- model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], block_args=dict(attn_layer='se'), **kwargs)
- return _create_resnet('seresnet50', pretrained, **model_args)
-
-
-@register_model
-def seresnet50t(pretrained=False, **kwargs):
- model_args = dict(
- block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep_tiered', avg_down=True,
- block_args=dict(attn_layer='se'), **kwargs)
- return _create_resnet('seresnet50t', pretrained, **model_args)
-
-
-@register_model
-def seresnet101(pretrained=False, **kwargs):
- model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], block_args=dict(attn_layer='se'), **kwargs)
- return _create_resnet('seresnet101', pretrained, **model_args)
-
-
-@register_model
-def seresnet152(pretrained=False, **kwargs):
- model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], block_args=dict(attn_layer='se'), **kwargs)
- return _create_resnet('seresnet152', pretrained, **model_args)
-
-
-@register_model
-def seresnet152d(pretrained=False, **kwargs):
- model_args = dict(
- block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', avg_down=True,
- block_args=dict(attn_layer='se'), **kwargs)
- return _create_resnet('seresnet152d', pretrained, **model_args)
-
-
-@register_model
-def seresnet200d(pretrained=False, **kwargs):
- """Constructs a ResNet-200-D model with SE attn.
- """
- model_args = dict(
- block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True,
- block_args=dict(attn_layer='se'), **kwargs)
- return _create_resnet('seresnet200d', pretrained, **model_args)
-
-
-@register_model
-def seresnet269d(pretrained=False, **kwargs):
- """Constructs a ResNet-269-D model with SE attn.
- """
- model_args = dict(
- block=Bottleneck, layers=[3, 30, 48, 8], stem_width=32, stem_type='deep', avg_down=True,
- block_args=dict(attn_layer='se'), **kwargs)
- return _create_resnet('seresnet269d', pretrained, **model_args)
-
-
-@register_model
-def seresnext26d_32x4d(pretrained=False, **kwargs):
- """Constructs a SE-ResNeXt-26-D model.`
- This is technically a 28 layer ResNet, using the 'D' modifier from Gluon / bag-of-tricks for
- combination of deep stem and avg_pool in downsample.
- """
- model_args = dict(
- block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32,
- stem_type='deep', avg_down=True, block_args=dict(attn_layer='se'), **kwargs)
- return _create_resnet('seresnext26d_32x4d', pretrained, **model_args)
-
-
-@register_model
-def seresnext26t_32x4d(pretrained=False, **kwargs):
- """Constructs a SE-ResNet-26-T model.
- This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels
- in the deep stem.
- """
- model_args = dict(
- block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32,
- stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='se'), **kwargs)
- return _create_resnet('seresnext26t_32x4d', pretrained, **model_args)
-
-
-@register_model
-def seresnext26tn_32x4d(pretrained=False, **kwargs):
- """Constructs a SE-ResNeXt-26-T model.
- NOTE I deprecated previous 't' model defs and replaced 't' with 'tn', this was the only tn model of note
- so keeping this def for backwards compat with any uses out there. Old 't' model is lost.
- """
- return seresnext26t_32x4d(pretrained=pretrained, **kwargs)
-
-
-@register_model
-def seresnext50_32x4d(pretrained=False, **kwargs):
- model_args = dict(
- block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4,
- block_args=dict(attn_layer='se'), **kwargs)
- return _create_resnet('seresnext50_32x4d', pretrained, **model_args)
-
-
-@register_model
-def seresnext101_32x4d(pretrained=False, **kwargs):
- model_args = dict(
- block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4,
- block_args=dict(attn_layer='se'), **kwargs)
- return _create_resnet('seresnext101_32x4d', pretrained, **model_args)
-
-
-@register_model
-def seresnext101_32x8d(pretrained=False, **kwargs):
- model_args = dict(
- block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8,
- block_args=dict(attn_layer='se'), **kwargs)
- return _create_resnet('seresnext101_32x8d', pretrained, **model_args)
-
-
-@register_model
-def senet154(pretrained=False, **kwargs):
- model_args = dict(
- block=Bottleneck, layers=[3, 8, 36, 3], cardinality=64, base_width=4, stem_type='deep',
- down_kernel_size=3, block_reduce_first=2, block_args=dict(attn_layer='se'), **kwargs)
- return _create_resnet('senet154', pretrained, **model_args)
diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/normalbae/models/baseline.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/normalbae/models/baseline.py
deleted file mode 100644
index 602d0fbdac1acc9ede9bc1f2e10a5df78831ce9d..0000000000000000000000000000000000000000
--- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/normalbae/models/baseline.py
+++ /dev/null
@@ -1,85 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from .submodules.submodules import UpSampleBN, norm_normalize
-
-
-# This is the baseline encoder-decoder we used in the ablation study
-class NNET(nn.Module):
- def __init__(self, args=None):
- super(NNET, self).__init__()
- self.encoder = Encoder()
- self.decoder = Decoder(num_classes=4)
-
- def forward(self, x, **kwargs):
- out = self.decoder(self.encoder(x), **kwargs)
-
- # Bilinearly upsample the output to match the input resolution
- up_out = F.interpolate(out, size=[x.size(2), x.size(3)], mode='bilinear', align_corners=False)
-
- # L2-normalize the first three channels / ensure positive value for concentration parameters (kappa)
- up_out = norm_normalize(up_out)
- return up_out
-
- def get_1x_lr_params(self): # lr/10 learning rate
- return self.encoder.parameters()
-
- def get_10x_lr_params(self): # lr learning rate
- modules = [self.decoder]
- for m in modules:
- yield from m.parameters()
-
-
-# Encoder
-class Encoder(nn.Module):
- def __init__(self):
- super(Encoder, self).__init__()
-
- basemodel_name = 'tf_efficientnet_b5_ap'
- basemodel = torch.hub.load('rwightman/gen-efficientnet-pytorch', basemodel_name, pretrained=True)
-
- # Remove last layer
- basemodel.global_pool = nn.Identity()
- basemodel.classifier = nn.Identity()
-
- self.original_model = basemodel
-
- def forward(self, x):
- features = [x]
- for k, v in self.original_model._modules.items():
- if (k == 'blocks'):
- for ki, vi in v._modules.items():
- features.append(vi(features[-1]))
- else:
- features.append(v(features[-1]))
- return features
-
-
-# Decoder (no pixel-wise MLP, no uncertainty-guided sampling)
-class Decoder(nn.Module):
- def __init__(self, num_classes=4):
- super(Decoder, self).__init__()
- self.conv2 = nn.Conv2d(2048, 2048, kernel_size=1, stride=1, padding=0)
- self.up1 = UpSampleBN(skip_input=2048 + 176, output_features=1024)
- self.up2 = UpSampleBN(skip_input=1024 + 64, output_features=512)
- self.up3 = UpSampleBN(skip_input=512 + 40, output_features=256)
- self.up4 = UpSampleBN(skip_input=256 + 24, output_features=128)
- self.conv3 = nn.Conv2d(128, num_classes, kernel_size=3, stride=1, padding=1)
-
- def forward(self, features):
- x_block0, x_block1, x_block2, x_block3, x_block4 = features[4], features[5], features[6], features[8], features[11]
- x_d0 = self.conv2(x_block4)
- x_d1 = self.up1(x_d0, x_block3)
- x_d2 = self.up2(x_d1, x_block2)
- x_d3 = self.up3(x_d2, x_block1)
- x_d4 = self.up4(x_d3, x_block0)
- out = self.conv3(x_d4)
- return out
-
-
-if __name__ == '__main__':
- model = Baseline()
- x = torch.rand(2, 3, 480, 640)
- out = model(x)
- print(out.shape)
diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/configs/_base_/models/danet_r50-d8.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/configs/_base_/models/danet_r50-d8.py
deleted file mode 100644
index 2c934939fac48525f22ad86f489a041dd7db7d09..0000000000000000000000000000000000000000
--- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/configs/_base_/models/danet_r50-d8.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# model settings
-norm_cfg = dict(type='SyncBN', requires_grad=True)
-model = dict(
- type='EncoderDecoder',
- pretrained='open-mmlab://resnet50_v1c',
- backbone=dict(
- type='ResNetV1c',
- depth=50,
- num_stages=4,
- out_indices=(0, 1, 2, 3),
- dilations=(1, 1, 2, 4),
- strides=(1, 2, 1, 1),
- norm_cfg=norm_cfg,
- norm_eval=False,
- style='pytorch',
- contract_dilation=True),
- decode_head=dict(
- type='DAHead',
- in_channels=2048,
- in_index=3,
- channels=512,
- pam_channels=64,
- dropout_ratio=0.1,
- num_classes=19,
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
- auxiliary_head=dict(
- type='FCNHead',
- in_channels=1024,
- in_index=2,
- channels=256,
- num_convs=1,
- concat_input=False,
- dropout_ratio=0.1,
- num_classes=19,
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
- # model training and testing settings
- train_cfg=dict(),
- test_cfg=dict(mode='whole'))
diff --git a/spaces/crashedice/signify/signify/gan/models/template_model.py b/spaces/crashedice/signify/signify/gan/models/template_model.py
deleted file mode 100644
index 6d5f15f0a194c8e2b482526873693e3c32d2a4a5..0000000000000000000000000000000000000000
--- a/spaces/crashedice/signify/signify/gan/models/template_model.py
+++ /dev/null
@@ -1,99 +0,0 @@
-"""Model class template
-
-This module provides a template for users to implement custom models.
-You can specify '--model template' to use this model.
-The class name should be consistent with both the filename and its model option.
-The filename should be _dataset.py
-The class name should be Dataset.py
-It implements a simple image-to-image translation baseline based on regression loss.
-Given input-output pairs (data_A, data_B), it learns a network netG that can minimize the following L1 loss:
- min_ ||netG(data_A) - data_B||_1
-You need to implement the following functions:
- : Add model-specific options and rewrite default values for existing options.
- <__init__>: Initialize this model class.
- : Unpack input data and perform data pre-processing.
- : Run forward pass. This will be called by both and .
- : Update network weights; it will be called in every training iteration.
-"""
-import torch
-from signify.gan.models.base_model import BaseModel
-from signify.gan.models import networks
-
-
-class TemplateModel(BaseModel):
- @staticmethod
- def modify_commandline_options(parser, is_train=True):
- """Add new model-specific options and rewrite default values for existing options.
-
- Parameters:
- parser -- the option parser
- is_train -- if it is training phase or test phase. You can use this flag to add training-specific or test-specific options.
-
- Returns:
- the modified parser.
- """
- parser.set_defaults(dataset_mode='aligned') # You can rewrite default values for this model. For example, this model usually uses aligned dataset as its dataset.
- if is_train:
- parser.add_argument('--lambda_regression', type=float, default=1.0, help='weight for the regression loss') # You can define new arguments for this model.
-
- return parser
-
- def __init__(self, opt):
- """Initialize this model class.
-
- Parameters:
- opt -- training/test options
-
- A few things can be done here.
- - (required) call the initialization function of BaseModel
- - define loss function, visualization images, model names, and optimizers
- """
- BaseModel.__init__(self, opt) # call the initialization method of BaseModel
- # specify the training losses you want to print out. The program will call base_model.get_current_losses to plot the losses to the console and save them to the disk.
- self.loss_names = ['loss_G']
- # specify the images you want to save and display. The program will call base_model.get_current_visuals to save and display these images.
- self.visual_names = ['data_A', 'data_B', 'output']
- # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks to save and load networks.
- # you can use opt.isTrain to specify different behaviors for training and test. For example, some networks will not be used during test, and you don't need to load them.
- self.model_names = ['G']
- # define networks; you can use opt.isTrain to specify different behaviors for training and test.
- self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, gpu_ids=self.gpu_ids)
- if self.isTrain: # only defined during training time
- # define your loss functions. You can use losses provided by torch.nn such as torch.nn.L1Loss.
- # We also provide a GANLoss class "networks.GANLoss". self.criterionGAN = networks.GANLoss().to(self.device)
- self.criterionLoss = torch.nn.L1Loss()
- # define and initialize optimizers. You can define one optimizer for each network.
- # If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
- self.optimizer = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
- self.optimizers = [self.optimizer]
-
- # Our program will automatically call to define schedulers, load networks, and print networks
-
- def set_input(self, input):
- """Unpack input data from the dataloader and perform necessary pre-processing steps.
-
- Parameters:
- input: a dictionary that contains the data itself and its metadata information.
- """
- AtoB = self.opt.direction == 'AtoB' # use to swap data_A and data_B
- self.data_A = input['A' if AtoB else 'B'].to(self.device) # get image data A
- self.data_B = input['B' if AtoB else 'A'].to(self.device) # get image data B
- self.image_paths = input['A_paths' if AtoB else 'B_paths'] # get image paths
-
- def forward(self):
- """Run forward pass. This will be called by both functions and ."""
- self.output = self.netG(self.data_A) # generate output image given the input data_A
-
- def backward(self):
- """Calculate losses, gradients, and update network weights; called in every training iteration"""
- # caculate the intermediate results if necessary; here self.output has been computed during function
- # calculate loss given the input and intermediate results
- self.loss_G = self.criterionLoss(self.output, self.data_B) * self.opt.lambda_regression
- self.loss_G.backward() # calculate gradients of network G w.r.t. loss_G
-
- def optimize_parameters(self):
- """Update network weights; it will be called in every training iteration."""
- self.forward() # first call forward to calculate intermediate results
- self.optimizer.zero_grad() # clear network G's existing gradients
- self.backward() # calculate gradients for network G
- self.optimizer.step() # update gradients for network G
diff --git a/spaces/crimeacs/phase-hunter/app.py b/spaces/crimeacs/phase-hunter/app.py
deleted file mode 100644
index b0b0dacd21d3fe2a513419bc3de761d80d07cfb4..0000000000000000000000000000000000000000
--- a/spaces/crimeacs/phase-hunter/app.py
+++ /dev/null
@@ -1,815 +0,0 @@
-# Gradio app that takes seismic waveform as input and marks 2 phases on the waveform as output.
-
-import gradio as gr
-import numpy as np
-import pandas as pd
-from phasehunter.data_preparation import prepare_waveform
-import torch
-import io
-
-from scipy.stats import gaussian_kde
-from scipy.signal import resample
-from scipy.interpolate import interp1d
-
-from bmi_topography import Topography
-import earthpy.spatial as es
-
-import obspy
-from obspy.clients.fdsn import Client
-from obspy.clients.fdsn.header import FDSNNoDataException, FDSNTimeoutException, FDSNInternalServerException
-from obspy.geodetics.base import locations2degrees
-from obspy.taup import TauPyModel
-from obspy.taup.helper_classes import SlownessModelError
-
-from obspy.clients.fdsn.header import URL_MAPPINGS
-
-import matplotlib.pyplot as plt
-import matplotlib.dates as mdates
-from mpl_toolkits.axes_grid1 import ImageGrid
-
-from glob import glob
-
-import numpy as np
-from matplotlib import colors, cm
-from scipy.interpolate import griddata
-
-def resample_waveform(waveform, original_freq, target_freq):
- """
- Resample a waveform from original frequency to target frequency using SciPy's resample function.
-
- Args:
- waveform (numpy.ndarray): The input waveform as a 1D array.
- original_freq (float): The original sampling frequency of the waveform.
- target_freq (float): The target sampling frequency of the waveform.
-
- Returns:
- resampled_waveform (numpy.ndarray): The resampled waveform as a 1D array.
- """
- # Calculate the resampling ratio
- resampling_ratio = target_freq / original_freq
- # Calculate the new length of the resampled waveform
- resampled_length = int(waveform.shape[-1] * resampling_ratio)
- # Resample the waveform using SciPy's resample function
- resampled_waveform = resample(waveform, resampled_length, axis=-1)
-
- return resampled_waveform
-
-def sort_channels_to_ZNE(waveform, channels):
- # Input:
- # waveform: a 2D numpy array with shape (3, n), where n is the number of samples
- # channels: a list or tuple of 3 strings representing the channel order, e.g. ('N', 'Z', 'E')
- channels = list(channels)
-
- if len(channels) != 3 or set(channels) != {'Z', 'N', 'E'}:
- raise ValueError("Invalid channel input. It should be a permutation of 'Z', 'N', and 'E'.")
-
- # Find the indices of the Z, N, and E channels
- z_index = channels.index('Z')
- n_index = channels.index('N')
- e_index = channels.index('E')
-
- print(z_index, n_index, e_index)
- # Sort the channels to ZNE
- sorted_waveform = waveform[[z_index, n_index, e_index], :]
-
- return sorted_waveform
-
-def make_prediction(waveform, sampling_rate, order):
- waveform = np.load(waveform)
- print('Loaded', waveform.shape)
-
- if len(waveform.shape) == 1:
- waveform = waveform.reshape(1, waveform.shape[0])
-
- elif waveform.shape[0] == 3:
- waveform = sort_channels_to_ZNE(waveform, order)
-
- if sampling_rate != 100:
- waveform = resample_waveform(waveform, sampling_rate, 100)
- print('Resampled', waveform.shape)
-
-
- orig_waveform = waveform[:, :6000].copy()
- processed_input = prepare_waveform(waveform)
-
- # Make prediction
- with torch.inference_mode():
- output = model(processed_input)
-
- p_phase = output[:, 0]
- s_phase = output[:, 1]
-
- return processed_input, p_phase, s_phase, orig_waveform
-
-
-def mark_phases(waveform, uploaded_file, p_thres, s_thres, sampling_rate, order):
-
- if uploaded_file is not None:
- waveform = uploaded_file.name
-
- processed_input, p_phase, s_phase, orig_waveform = make_prediction(waveform, sampling_rate, order)
-
- # Create a plot of the waveform with the phases marked
- if sum(processed_input[0][2] == 0): #if input is 1C
- fig, ax = plt.subplots(nrows=2, figsize=(10, 2), sharex=True)
-
- ax[0].plot(orig_waveform[0], color='black', lw=1)
- ax[0].set_ylabel('Norm. Ampl.')
-
- else: #if input is 3C
- fig, ax = plt.subplots(nrows=4, figsize=(10, 6), sharex=True)
- ax[0].plot(orig_waveform[0], color='black', lw=1)
- ax[1].plot(orig_waveform[1], color='black', lw=1)
- ax[2].plot(orig_waveform[2], color='black', lw=1)
-
- ax[0].set_ylabel('Z')
- ax[1].set_ylabel('N')
- ax[2].set_ylabel('E')
-
-
- do_we_have_p = (p_phase.std().item()*60 < p_thres)
- if do_we_have_p:
- p_phase_plot = p_phase*processed_input.shape[-1]
- p_kde = gaussian_kde(p_phase_plot)
- p_dist_space = np.linspace( min(p_phase_plot)-10, max(p_phase_plot)+10, 500 )
- ax[-1].plot( p_dist_space, p_kde(p_dist_space), color='r')
- else:
- ax[-1].text(0.5, 0.75, 'No P phase detected', horizontalalignment='center', verticalalignment='center', transform=ax[-1].transAxes)
-
- do_we_have_s = (s_phase.std().item()*60 < s_thres)
- if do_we_have_s:
- s_phase_plot = s_phase*processed_input.shape[-1]
- s_kde = gaussian_kde(s_phase_plot)
- s_dist_space = np.linspace( min(s_phase_plot)-10, max(s_phase_plot)+10, 500 )
- ax[-1].plot( s_dist_space, s_kde(s_dist_space), color='b')
-
- for a in ax:
- a.axvline(p_phase.mean()*processed_input.shape[-1], color='r', linestyle='--', label='P', alpha=do_we_have_p)
- a.axvline(s_phase.mean()*processed_input.shape[-1], color='b', linestyle='--', label='S', alpha=do_we_have_s)
- else:
- ax[-1].text(0.5, 0.25, 'No S phase detected', horizontalalignment='center', verticalalignment='center', transform=ax[-1].transAxes)
-
- ax[-1].set_xlabel('Time, samples')
- ax[-1].set_ylabel('Uncert., samples')
- ax[-1].legend()
-
- plt.subplots_adjust(hspace=0., wspace=0.)
-
- # Convert the plot to an image and return it
- fig.canvas.draw()
- image = np.array(fig.canvas.renderer.buffer_rgba())
- plt.close(fig)
- return image
-
-def bin_distances(distances, bin_size=10):
- # Bin the distances into groups of `bin_size` kilometers
- binned_distances = {}
- for i, distance in enumerate(distances):
- bin_index = distance // bin_size
- if bin_index not in binned_distances:
- binned_distances[bin_index] = (distance, i)
- elif i < binned_distances[bin_index][1]:
- binned_distances[bin_index] = (distance, i)
-
- # Select the first distance in each bin and its index
- first_distances = []
- for bin_index in binned_distances:
- first_distance, first_distance_index = binned_distances[bin_index]
- first_distances.append(first_distance_index)
-
- return first_distances
-
-def variance_coefficient(residuals):
- # calculate the variance of the residuals
- var = residuals.var()
- # scale the variance to a coefficient between 0 and 1
- coeff = 1 - (var / (residuals.max() - residuals.min()))
- return coeff
-
-def predict_on_section(client_name, timestamp, eq_lat, eq_lon, radius_km, source_depth_km, velocity_model, max_waveforms, conf_thres_P, conf_thres_S):
- distances, t0s, st_lats, st_lons, waveforms, names = [], [], [], [], [], []
-
- taup_model = TauPyModel(model=velocity_model)
- client = Client(client_name)
-
- window = radius_km / 111.2
- max_waveforms = int(max_waveforms)
-
- assert eq_lat - window > -90 and eq_lat + window < 90, "Latitude out of bounds"
- assert eq_lon - window > -180 and eq_lon + window < 180, "Longitude out of bounds"
-
- starttime = obspy.UTCDateTime(timestamp)
- endtime = starttime + 120
-
- try:
- print('Starting to download inventory')
- inv = client.get_stations(network="*", station="*", location="*", channel="*H*",
- starttime=starttime, endtime=endtime,
- minlatitude=(eq_lat-window), maxlatitude=(eq_lat+window),
- minlongitude=(eq_lon-window), maxlongitude=(eq_lon+window),
- level='station')
- print('Finished downloading inventory')
-
- except (IndexError, FDSNNoDataException, FDSNTimeoutException, FDSNInternalServerException):
- fig, ax = plt.subplots()
- ax.text(0.5,0.5,'Something is wrong with the data provider, try another')
- fig.canvas.draw();
- image = np.array(fig.canvas.renderer.buffer_rgba())
- plt.close(fig)
- return image
-
- waveforms = []
- cached_waveforms = glob("data/cached/*.mseed")
-
- for network in inv:
- if network.code == 'SY':
- continue
- for station in network:
- print(f"Processing {network.code}.{station.code}...")
- distance = locations2degrees(eq_lat, eq_lon, station.latitude, station.longitude)
-
- arrivals = taup_model.get_travel_times(source_depth_in_km=source_depth_km,
- distance_in_degree=distance,
- phase_list=["P", "S"])
-
- if len(arrivals) > 0:
-
- starttime = obspy.UTCDateTime(timestamp) + arrivals[0].time - 15
- endtime = starttime + 60
- try:
- filename=f'{network.code}_{station.code}_{starttime}'
- if f"data/cached/{filename}.mseed" not in cached_waveforms:
- print(f'Downloading waveform for {filename}')
- waveform = client.get_waveforms(network=network.code, station=station.code, location="*", channel="*",
- starttime=starttime, endtime=endtime)
- waveform.write(f"data/cached/{network.code}_{station.code}_{starttime}.mseed", format="MSEED")
- print('Finished downloading and caching waveform')
- else:
- print('Reading cached waveform')
- waveform = obspy.read(f"data/cached/{network.code}_{station.code}_{starttime}.mseed")
-
-
- except (IndexError, FDSNNoDataException, FDSNTimeoutException, FDSNInternalServerException):
- print(f'Skipping {network.code}_{station.code}_{starttime}')
- continue
-
- waveform = waveform.select(channel="H[BH][ZNE]")
- waveform = waveform.merge(fill_value=0)
- waveform = waveform[:3].sort(keys=['channel'], reverse=True)
-
- len_check = [len(x.data) for x in waveform]
- if len(set(len_check)) > 1:
- continue
-
- if len(waveform) == 3:
- try:
- waveform = prepare_waveform(np.stack([x.data for x in waveform]))
-
- distances.append(distance)
- t0s.append(starttime)
- st_lats.append(station.latitude)
- st_lons.append(station.longitude)
- waveforms.append(waveform)
- names.append(f"{network.code}.{station.code}")
-
- print(f"Added {network.code}.{station.code} to the list of waveforms")
-
- except:
- continue
-
-
- # If there are no waveforms, return an empty plot
- if len(waveforms) == 0:
- print('No waveforms found')
- fig, ax = plt.subplots()
- # prints "No waveforms found" on the plot aligned at center and vertically
- ax.text(0.5,0.5,'No waveforms found', horizontalalignment='center', verticalalignment='center', transform=ax.transAxes)
- fig.canvas.draw();
- image = np.array(fig.canvas.renderer.buffer_rgba())
- plt.close(fig)
-
- output_picks = pd.DataFrame()
- output_picks.to_csv('data/picks.csv', index=False)
- output_csv = 'data/picks.csv'
- return image, output_picks, output_csv
-
-
- first_distances = bin_distances(distances, bin_size=10/111.2)
-
- # Edge case when there are way too many waveforms to process
- selection_indexes = np.random.choice(first_distances,
- np.min([len(first_distances), max_waveforms]),
- replace=False)
-
- waveforms = np.array(waveforms)[selection_indexes]
- distances = np.array(distances)[selection_indexes]
- t0s = np.array(t0s)[selection_indexes]
- st_lats = np.array(st_lats)[selection_indexes]
- st_lons = np.array(st_lons)[selection_indexes]
- names = np.array(names)[selection_indexes]
-
- waveforms = [torch.tensor(waveform) for waveform in waveforms]
-
- print('Starting to run predictions')
- with torch.no_grad():
- waveforms_torch = torch.vstack(waveforms)
- output = model(waveforms_torch)
-
- p_phases = output[:, 0]
- s_phases = output[:, 1]
-
- p_phases = p_phases.reshape(len(waveforms),-1)
- s_phases = s_phases.reshape(len(waveforms),-1)
-
- # Max confidence - min variance
- p_max_confidence = p_phases.std(axis=-1).min()
- s_max_confidence = s_phases.std(axis=-1).min()
-
- print(f"Starting plotting {len(waveforms)} waveforms")
- fig, ax = plt.subplots(ncols=3, figsize=(10, 3))
-
- # Plot topography
- print('Fetching topography')
- params = Topography.DEFAULT.copy()
- extra_window = 0.5
- params["south"] = np.min([st_lats.min(), eq_lat])-extra_window
- params["north"] = np.max([st_lats.max(), eq_lat])+extra_window
- params["west"] = np.min([st_lons.min(), eq_lon])-extra_window
- params["east"] = np.max([st_lons.max(), eq_lon])+extra_window
-
- topo_map = Topography(**params)
- topo_map.fetch()
- topo_map.load()
-
- print('Plotting topo')
- hillshade = es.hillshade(topo_map.da[0], altitude=10)
-
- topo_map.da.plot(ax = ax[1], cmap='Greys', add_colorbar=False, add_labels=False)
- topo_map.da.plot(ax = ax[2], cmap='Greys', add_colorbar=False, add_labels=False)
- ax[1].imshow(hillshade, cmap="Greys", alpha=0.5)
-
- output_picks = pd.DataFrame({'station_name' : [],
- 'st_lat' : [], 'st_lon' : [],
- 'starttime' : [],
- 'p_phase, s' : [], 'p_uncertainty, s' : [],
- 's_phase, s' : [], 's_uncertainty, s' : [],
- 'velocity_p, km/s' : [], 'velocity_s, km/s' : []})
-
- for i in range(len(waveforms)):
- print(f"Plotting waveform {i+1}/{len(waveforms)}")
- current_P = p_phases[i]
- current_S = s_phases[i]
-
- x = [t0s[i] + pd.Timedelta(seconds=k/100) for k in np.linspace(0,6000,6000)]
- x = mdates.date2num(x)
-
- # Normalize confidence for the plot
- p_conf = 1/(current_P.std()/p_max_confidence).item()
- s_conf = 1/(current_S.std()/s_max_confidence).item()
-
- delta_t = t0s[i].timestamp - obspy.UTCDateTime(timestamp).timestamp
-
- ax[0].plot(x, waveforms[i][0, 0]*10+distances[i]*111.2, color='black', alpha=0.5, lw=1)
-
- if (current_P.std().item()*60 < conf_thres_P) or (current_S.std().item()*60 < conf_thres_S):
- ax[0].scatter(x[int(current_P.mean()*waveforms[i][0].shape[-1])], waveforms[i][0, 0].mean()+distances[i]*111.2, color='r', alpha=p_conf, marker='|')
- ax[0].scatter(x[int(current_S.mean()*waveforms[i][0].shape[-1])], waveforms[i][0, 0].mean()+distances[i]*111.2, color='b', alpha=s_conf, marker='|')
-
- velocity_p = (distances[i]*111.2)/(delta_t+current_P.mean()*60).item()
- velocity_s = (distances[i]*111.2)/(delta_t+current_S.mean()*60).item()
-
- # Generate an array from st_lat to eq_lat and from st_lon to eq_lon
- x = np.linspace(st_lons[i], eq_lon, 50)
- y = np.linspace(st_lats[i], eq_lat, 50)
-
- # Plot the array
- ax[1].scatter(x, y, c=np.zeros_like(x)+velocity_p, alpha=0.1, vmin=0, vmax=8)
- ax[2].scatter(x, y, c=np.zeros_like(x)+velocity_s, alpha=0.1, vmin=0, vmax=8)
-
- else:
- velocity_p = np.nan
- velocity_s = np.nan
-
- ax[0].set_ylabel('Z')
- print(f"Station {st_lats[i]}, {st_lons[i]} has P velocity {velocity_p} and S velocity {velocity_s}")
-
- output_picks = output_picks.append(pd.DataFrame({'station_name': [names[i]],
- 'st_lat' : [st_lats[i]], 'st_lon' : [st_lons[i]],
- 'starttime' : [str(t0s[i])],
- 'p_phase, s' : [(delta_t+current_P.mean()*60).item()], 'p_uncertainty, s' : [current_P.std().item()*60],
- 's_phase, s' : [(delta_t+current_S.mean()*60).item()], 's_uncertainty, s' : [current_S.std().item()*60],
- 'velocity_p, km/s' : [velocity_p], 'velocity_s, km/s' : [velocity_s]}))
-
-
- # Add legend
- ax[0].scatter(None, None, color='r', marker='|', label='P')
- ax[0].scatter(None, None, color='b', marker='|', label='S')
- ax[0].xaxis.set_major_formatter(mdates.DateFormatter('%H:%M:%S'))
- ax[0].xaxis.set_major_locator(mdates.SecondLocator(interval=20))
- ax[0].legend()
-
- print('Plotting stations')
- for i in range(1,3):
- ax[i].scatter(st_lons, st_lats, color='b', label='Stations')
- ax[i].scatter(eq_lon, eq_lat, color='r', marker='*', label='Earthquake')
- ax[i].set_aspect('equal')
- ax[i].set_xticklabels(ax[i].get_xticks(), rotation = 50)
-
- fig.subplots_adjust(bottom=0.1, top=0.9, left=0.1, right=0.8,
- wspace=0.02, hspace=0.02)
-
- cb_ax = fig.add_axes([0.83, 0.1, 0.02, 0.8])
- cbar = fig.colorbar(ax[2].scatter(None, None, c=velocity_p, alpha=0.5, vmin=0, vmax=8), cax=cb_ax)
-
- cbar.set_label('Velocity (km/s)')
- ax[1].set_title('P Velocity')
- ax[2].set_title('S Velocity')
-
- for a in ax:
- a.tick_params(axis='both', which='major', labelsize=8)
-
- plt.subplots_adjust(hspace=0., wspace=0.5)
- fig.canvas.draw();
- image = np.array(fig.canvas.renderer.buffer_rgba())
- plt.close(fig)
-
- output_csv = f'data/velocity/{eq_lat}_{eq_lon}_{source_depth_km}_{timestamp}_{len(waveforms)}.csv'
- output_picks.to_csv(output_csv, index=False)
-
- return image, output_picks, output_csv
-
-def interpolate_vel_model(velocity_model, initial_velocity, lat_values, lon_values, depth_values, n_lat, n_lon, n_depth):
- # Create a mask for points with the initial velocity
- initial_velocity_mask = (velocity_model == initial_velocity)
-
- # Find the indices of points with non-initial velocities
- non_initial_velocity_indices = np.argwhere(~initial_velocity_mask)
-
- # Extract the coordinates and corresponding velocities of the known points
- known_points = np.column_stack([lat_values[non_initial_velocity_indices[:, 0]],
- lon_values[non_initial_velocity_indices[:, 1]],
- depth_values[non_initial_velocity_indices[:, 2]]])
-
- # Find the maximum depth in the known_points
- max_known_depth = np.max(known_points[:, 2])
-
- known_velocities = velocity_model[~initial_velocity_mask]
-
- # Create a grid of points for the entire volume
- grid_points = np.array(np.meshgrid(lat_values, lon_values, depth_values, indexing='ij')).reshape(3, -1).T
-
- # Create a mask for grid points that are deeper than the maximum known depth
- depth_mask = grid_points[:, 2] <= max_known_depth
-
- # Interpolate the velocities at the grid points
- interpolated_velocities = griddata(known_points, known_velocities, grid_points[depth_mask], method='linear')
-
- # Fill nan values with the nearest known velocities
- interpolated_velocities_filled = griddata(known_points, known_velocities, grid_points[depth_mask], method='nearest')
- interpolated_velocities[np.isnan(interpolated_velocities)] = interpolated_velocities_filled[np.isnan(interpolated_velocities)]
-
- # Initialize an array with the same length as grid_points and fill it with nan values
- interpolated_velocities_with_depth_limit = np.full(grid_points.shape[0], np.nan)
-
- # Update the array with the interpolated velocities for the masked grid points
- interpolated_velocities_with_depth_limit[depth_mask] = interpolated_velocities
-
- # Reshape the interpolated velocities to match the shape of the velocity_model
- interpolated_velocity_model = interpolated_velocities_with_depth_limit.reshape(n_lat, n_lon, n_depth)
-
- return interpolated_velocity_model
-
-
-# Function to find the closest index for a given value in an array
-def find_closest_index(array, value):
- return np.argmin(np.abs(array - value))
-
-# FIX AFTER CONFERENCE
-# def compute_velocity_model(azimuth, elevation, interpolate, n_lat, n_lon, n_depth):
-# filename = list(output_csv.temp_files)[0]
-
-# df = pd.read_csv(filename)
-# filename = filename.split('/')[-1]
-
-# # Current EQ location
-# eq_lat = float(filename.split("_")[0])
-# eq_lon = float(filename.split("_")[1])
-# eq_depth = float(filename.split("_")[2])
-
-# # Define the region of interest (latitude, longitude, and depth ranges)
-# lat_range = (np.min([df.st_lat.min(), eq_lat]), np.max([df.st_lat.max(), eq_lat]))
-# lon_range = (np.min([df.st_lon.min(), eq_lon]), np.max([df.st_lon.max(), eq_lon]))
-# depth_range = (0, 50)
-
-# # Define the number of nodes in each dimension
-# num_points = 100
-
-# taup_model = TauPyModel(model='1066a')
-
-# # Create the grid
-# lat_values = np.linspace(lat_range[0], lat_range[1], n_lat)
-# lon_values = np.linspace(lon_range[0], lon_range[1], n_lon)
-# depth_values = np.linspace(depth_range[0], depth_range[1], n_depth)
-
-# # Initialize the velocity model with constant values
-# initial_velocity = 0 # km/s, this can be P-wave or S-wave velocity
-# velocity_model = np.full((n_lat, n_lon, n_depth), initial_velocity, dtype=float)
-
-# # Loop through the stations and update the velocity model
-# for i in range(len(df)):
-# if ~np.isnan(df['velocity_p, km/s'].iloc[i]):
-
-# ray_path = taup_model.get_ray_paths_geo(source_depth_in_km=eq_depth,
-# source_latitude_in_deg=eq_lat,
-# source_longitude_in_deg=eq_lon,
-# receiver_latitude_in_deg=df.st_lat.iloc[i],
-# receiver_longitude_in_deg=df.st_lon.iloc[i],
-# phase_list=['P', 'S'])
-
-# # THERE IS A PROBLEM WITH THE RAY PATHS. APPARENTLY LAT AND LON DON'T EXIST (HOW DID IT WORK BEFORE?)
-# print(ray_path[0].path)
-
-# # Create the interpolator objects for latitude, longitude, and depth
-# interp_latitude = interp1d(np.linspace(0, ray_path[0].path['lat'].max(), len(ray_path[0].path['lat'])), ray_path[0].path['lat'])
-# interp_longitude = interp1d(np.linspace(0, ray_path[0].path['lon'].max(), len(ray_path[0].path['lon'])), ray_path[0].path['lon'])
-# interp_depth = interp1d(np.linspace(0, ray_path[0].path['depth'].max(), len(ray_path[0].path['depth'])), ray_path[0].path['depth'])
-
-# # Resample the ray path to N points
-# lat_values_interp = interp_latitude(np.linspace(0, ray_path[0].path['lat'].max(), num_points))
-# lon_values_interp = interp_longitude(np.linspace(0, ray_path[0].path['lon'].max(), num_points))
-# depth_values_interp = interp_depth(np.linspace(0, ray_path[0].path['depth'].max(), num_points))
-
-# # Loop through the interpolated coordinates and update the grid cells with the average P-wave velocity
-# for lat, lon, depth in zip(lat_values_interp, lon_values_interp, depth_values_interp):
-# lat_index = find_closest_index(lat_values, lat)
-# lon_index = find_closest_index(lon_values, lon)
-# depth_index = find_closest_index(depth_values, depth)
-
-# if velocity_model[lat_index, lon_index, depth_index] == initial_velocity:
-# velocity_model[lat_index, lon_index, depth_index] = df['velocity_p, km/s'].iloc[i]
-# else:
-# velocity_model[lat_index, lon_index, depth_index] = (velocity_model[lat_index, lon_index, depth_index] +
-# df['velocity_p, km/s'].iloc[i]) / 2
-
-# # Create the figure and axis
-# fig = plt.figure(figsize=(8, 8))
-# ax = fig.add_subplot(111, projection='3d')
-
-# # Set the plot limits
-# ax.set_xlim3d(lat_range[0], lat_range[1])
-# ax.set_ylim3d(lon_range[0], lon_range[1])
-# ax.set_zlim3d(depth_range[1], depth_range[0])
-
-# ax.set_xlabel('Latitude')
-# ax.set_ylabel('Longitude')
-# ax.set_zlabel('Depth (km)')
-# ax.set_title('Velocity Model')
-
-# # Create the meshgrid
-# x, y, z = np.meshgrid(
-# np.linspace(lat_range[0], lat_range[1], velocity_model.shape[0]+1),
-# np.linspace(lon_range[0], lon_range[1], velocity_model.shape[1]+1),
-# np.linspace(depth_range[0], depth_range[1], velocity_model.shape[2]+1),
-# indexing='ij'
-# )
-
-# # Create the color array
-# norm = plt.Normalize(vmin=2, vmax=8)
-# colors_vel = plt.cm.plasma(norm(velocity_model))
-
-# # Plot the voxels
-# if interpolate:
-# interpolated_velocity_model = interpolate_vel_model(velocity_model, initial_velocity, lat_values, lon_values, depth_values, n_lat, n_lon, n_depth)
-# colors_interp = plt.cm.plasma(norm(interpolated_velocity_model))
-# ax.voxels(x, y, z, interpolated_velocity_model > 0, facecolors=colors_interp, alpha=0.5, edgecolor='k')
-
-# ax.voxels(x, y, z, velocity_model > 0, facecolors=colors_vel, alpha=1, edgecolor='black')
-
-# # Set the view angle
-# ax.view_init(elev=elevation, azim=azimuth)
-
-# m = cm.ScalarMappable(cmap=plt.cm.plasma, norm=norm)
-# m.set_array([])
-# plt.colorbar(m)
-
-# # Show the plot
-# fig.canvas.draw();
-# image = np.array(fig.canvas.renderer.buffer_rgba())
-# plt.close(fig)
-
-# return image
-
-# model = torch.jit.load("model.pt")
-model = torch.jit.load("model.pt")
-model.eval()
-
-with gr.Blocks() as demo:
- gr.HTML("""
-
-
PhaseHunter 🏹
-
-
-
-
Detect P and S seismic phases with uncertainty
-
-
Tab 1: Detect seismic phases by selecting a sample waveform or uploading your own waveform in .npy format.
-
Tab 2: Select an earthquake from the global earthquake catalogue and PhaseHunter will analyze seismic stations in the given radius.
-
Waveforms should be sampled at 100 samples/sec and have 3 (Z, N, E) or 1 (Z) channels. PhaseHunter analyzes the first 6000 samples of your file.
-
-
Please contact me at anovosel@stanford.edu with questions and feedback
-
-""")
- with gr.Tab("Try on a single station"):
- with gr.Row():
- # Define the input and output types for Gradio
- inputs = gr.Dropdown(
- ["data/sample/sample_0.npy",
- "data/sample/sample_1.npy",
- "data/sample/sample_2.npy"],
- label="Sample waveform",
- info="Select one of the samples",
- value = "data/sample/sample_0.npy"
- )
- with gr.Column(scale=1):
- P_thres_inputs = gr.Slider(minimum=0.01,
- maximum=1,
- value=0.1,
- label="P uncertainty threshold (s)",
- step=0.01,
- info="Acceptable uncertainty for P picks expressed in std() seconds",
- interactive=True,
- )
-
- S_thres_inputs = gr.Slider(minimum=0.01,
- maximum=1,
- value=0.2,
- label="S uncertainty threshold (s)",
- step=0.01,
- info="Acceptable uncertainty for S picks expressed in std() seconds",
- interactive=True,
- )
- with gr.Column(scale=1):
- upload = gr.File(label="Upload your waveform")
- with gr.Row():
- sampling_rate_inputs = gr.Slider(minimum=10,
- maximum=1000,
- value=100,
- label="Samlping rate, Hz",
- step=10,
- info="Sampling rate of the waveform",
- interactive=True,
- )
- order_input = gr.Text(value='ZNE',
- label='Channel order',
- info='Order of the channels in the waveform file (e.g. ZNE)')
-
- button = gr.Button("Predict phases")
- outputs = gr.Image(label='Waveform with Phases Marked', type='numpy', interactive=False)
-
- button.click(mark_phases, inputs=[inputs, upload,
- P_thres_inputs, S_thres_inputs,
- sampling_rate_inputs, order_input],
- outputs=outputs)
- with gr.Tab("Select earthquake from catalogue"):
-
- gr.HTML("""
-
-
Using PhaseHunter to Analyze Seismic Waveforms
-
Select an earthquake from the global earthquake catalogue (e.g. USGS) and the app will download the waveform from the FDSN client of your choice. The app will use a velocity model of your choice to select appropriate time windows for each station within a specified radius of the earthquake.
-
The app will then analyze the waveforms and mark the detected phases on the waveform. Pick data for each waveform is reported in seconds from the start of the waveform.
-
Velocities are derived from distance and travel time determined by PhaseHunter picks (v = distance/predicted_pick_time). The background of the velocity plot is colored by DEM.
-
- """)
- with gr.Row():
- with gr.Column(scale=2):
- client_inputs = gr.Dropdown(
- choices = list(URL_MAPPINGS.keys()),
- label="FDSN Client",
- info="Select one of the available FDSN clients",
- value = "IRIS",
- interactive=True
- )
-
- velocity_inputs = gr.Dropdown(
- choices = ['1066a', '1066b', 'ak135',
- 'ak135f', 'herrin', 'iasp91',
- 'jb', 'prem', 'pwdk'],
- label="1D velocity model",
- info="Velocity model for station selection",
- value = "1066a",
- interactive=True
- )
-
- with gr.Column(scale=2):
- timestamp_inputs = gr.Textbox(value='2019-07-04T17:33:49-00',
- placeholder='YYYY-MM-DDTHH:MM:SS-TZ',
- label="Timestamp",
- info="Timestamp of the earthquake",
- max_lines=1,
- interactive=True)
-
- source_depth_inputs = gr.Number(value=10,
- label="Source depth (km)",
- info="Depth of the earthquake",
- interactive=True)
-
- with gr.Column(scale=2):
- eq_lat_inputs = gr.Number(value=35.766,
- label="Latitude",
- info="Latitude of the earthquake",
- interactive=True)
-
- eq_lon_inputs = gr.Number(value=-117.605,
- label="Longitude",
- info="Longitude of the earthquake",
- interactive=True)
-
- with gr.Column(scale=2):
- radius_inputs = gr.Slider(minimum=1,
- maximum=200,
- value=50,
- label="Radius (km)",
- step=10,
- info="""Select the radius around the earthquake to download data from.\n
- Note that the larger the radius, the longer the app will take to run.""",
- interactive=True)
-
- max_waveforms_inputs = gr.Slider(minimum=1,
- maximum=100,
- value=10,
- label="Max waveforms per section",
- step=1,
- info="Maximum number of waveforms to show per section\n (to avoid long prediction times)",
- interactive=True,
- )
- with gr.Column(scale=2):
- P_thres_inputs = gr.Slider(minimum=0.01,
- maximum=1,
- value=0.1,
- label="P uncertainty threshold, s",
- step=0.01,
- info="Acceptable uncertainty for P picks expressed in std() seconds",
- interactive=True,
- )
- S_thres_inputs = gr.Slider(minimum=0.01,
- maximum=1,
- value=0.2,
- label="S uncertainty threshold, s",
- step=0.01,
- info="Acceptable uncertainty for S picks expressed in std() seconds",
- interactive=True,
- )
-
- button_phases = gr.Button("Predict phases")
- output_image = gr.Image(label='Waveforms with Phases Marked', type='numpy', interactive=False)
-
- # with gr.Row():
- # with gr.Column(scale=2):
- # azimuth_input = gr.Slider(minimum=-180, maximum=180, value=0, step=5, label="Azimuth", interactive=True)
- # elevation_input = gr.Slider(minimum=-90, maximum=90, value=30, step=5, label="Elevation", interactive=True)
-
- # with gr.Row():
- # interpolate_input = gr.Checkbox(label="Interpolate", info="Interpolate velocity model")
- # n_lat_input = gr.Slider(minimum=5, maximum=100, value=50, step=5, label="N lat", info='Number of Lat grid points', interactive=True)
- # n_lon_input = gr.Slider(minimum=5, maximum=100, value=50, step=5, label="N lon", info='Number of Lon grid points', interactive=True)
- # n_depth_input = gr.Slider(minimum=5, maximum=100, value=50, step=5, label="N depth", info='Number of Depth grid points', interactive=True)
-
- # button = gr.Button("Look at 3D Velocities")
- # outputs_vel_model = gr.Image(label="3D Velocity Model")
-
- # button.click(compute_velocity_model,
- # inputs=[azimuth_input, elevation_input,
- # interpolate_input, n_lat_input,
- # n_lon_input, n_depth_input],
- # outputs=[outputs_vel_model])
-
- with gr.Row():
- output_picks = gr.Dataframe(label='Pick data',
- type='pandas',
- interactive=False)
- output_csv = gr.File(label="Output File", file_types=[".csv"])
-
- button_phases.click(predict_on_section,
- inputs=[client_inputs, timestamp_inputs,
- eq_lat_inputs, eq_lon_inputs,
- radius_inputs, source_depth_inputs,
- velocity_inputs, max_waveforms_inputs,
- P_thres_inputs, S_thres_inputs],
- outputs=[output_image, output_picks, output_csv])
-
-demo.launch()
\ No newline at end of file
diff --git a/spaces/crylake/img2poem/query2labels/lib/models/tresnet/layers/avg_pool.py b/spaces/crylake/img2poem/query2labels/lib/models/tresnet/layers/avg_pool.py
deleted file mode 100644
index a91836f6c2310cdb9e40f8a271dbac0b613971ba..0000000000000000000000000000000000000000
--- a/spaces/crylake/img2poem/query2labels/lib/models/tresnet/layers/avg_pool.py
+++ /dev/null
@@ -1,19 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-
-
-class FastAvgPool2d(nn.Module):
- def __init__(self, flatten=False):
- super(FastAvgPool2d, self).__init__()
- self.flatten = flatten
-
- def forward(self, x):
- if self.flatten:
- in_size = x.size()
- return x.view((in_size[0], in_size[1], -1)).mean(dim=2)
- else:
- return x.view(x.size(0), x.size(1), -1).mean(-1).view(x.size(0), x.size(1), 1, 1)
-
-
diff --git a/spaces/cvlab/zero123-live/CLIP/hubconf.py b/spaces/cvlab/zero123-live/CLIP/hubconf.py
deleted file mode 100644
index 520b354b62ab4d199d49462e9c65890d924c69e6..0000000000000000000000000000000000000000
--- a/spaces/cvlab/zero123-live/CLIP/hubconf.py
+++ /dev/null
@@ -1,42 +0,0 @@
-from clip.clip import tokenize as _tokenize, load as _load, available_models as _available_models
-import re
-import string
-
-dependencies = ["torch", "torchvision", "ftfy", "regex", "tqdm"]
-
-# For compatibility (cannot include special characters in function name)
-model_functions = { model: re.sub(f'[{string.punctuation}]', '_', model) for model in _available_models()}
-
-def _create_hub_entrypoint(model):
- def entrypoint(**kwargs):
- return _load(model, **kwargs)
-
- entrypoint.__doc__ = f"""Loads the {model} CLIP model
-
- Parameters
- ----------
- device : Union[str, torch.device]
- The device to put the loaded model
-
- jit : bool
- Whether to load the optimized JIT model or more hackable non-JIT model (default).
-
- download_root: str
- path to download the model files; by default, it uses "~/.cache/clip"
-
- Returns
- -------
- model : torch.nn.Module
- The {model} CLIP model
-
- preprocess : Callable[[PIL.Image], torch.Tensor]
- A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
- """
- return entrypoint
-
-def tokenize():
- return _tokenize
-
-_entrypoints = {model_functions[model]: _create_hub_entrypoint(model) for model in _available_models()}
-
-globals().update(_entrypoints)
\ No newline at end of file
diff --git a/spaces/cvlab/zero123-live/uses.md b/spaces/cvlab/zero123-live/uses.md
deleted file mode 100644
index cc186284c21f376f3c77766cfca2f10aec29f392..0000000000000000000000000000000000000000
--- a/spaces/cvlab/zero123-live/uses.md
+++ /dev/null
@@ -1,69 +0,0 @@
-
-# Uses
-_Note: This section is originally taken from the [Stable Diffusion v2 model card](https://huggingface.co/stabilityai/stable-diffusion-2), but applies in the same way to Zero-1-to-3._
-
-## Direct Use
-The model is intended for research purposes only. Possible research areas and tasks include:
-
-- Safe deployment of large-scale models.
-- Probing and understanding the limitations and biases of generative models.
-- Generation of artworks and use in design and other artistic processes.
-- Applications in educational or creative tools.
-- Research on generative models.
-
-Excluded uses are described below.
-
-### Misuse, Malicious Use, and Out-of-Scope Use
-The model should not be used to intentionally create or disseminate images that create hostile or alienating environments for people. This includes generating images that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes.
-
-#### Out-of-Scope Use
-The model was not trained to be factual or true representations of people or events, and therefore using the model to generate such content is out-of-scope for the abilities of this model.
-
-#### Misuse and Malicious Use
-Using the model to generate content that is cruel to individuals is a misuse of this model. This includes, but is not limited to:
-
-- Generating demeaning, dehumanizing, or otherwise harmful representations of people or their environments, cultures, religions, etc.
-- Intentionally promoting or propagating discriminatory content or harmful stereotypes.
-- Impersonating individuals without their consent.
-- Sexual content without consent of the people who might see it.
-- Mis- and disinformation
-- Representations of egregious violence and gore
-- Sharing of copyrighted or licensed material in violation of its terms of use.
-- Sharing content that is an alteration of copyrighted or licensed material in violation of its terms of use.
-
-## Limitations and Bias
-
-### Limitations
-
-- The model does not achieve perfect photorealism.
-- The model cannot render legible text.
-- Faces and people in general may not be parsed or generated properly.
-- The autoencoding part of the model is lossy.
-- Stable Diffusion was trained on a subset of the large-scale dataset [LAION-5B](https://laion.ai/blog/laion-5b/), which contains adult, violent and sexual content. To partially mitigate this, Stability AI has filtered the dataset using LAION's NSFW detector.
-- Zero-1-to-3 was subsequently finetuned on a subset of the large-scale dataset [Objaverse](https://objaverse.allenai.org/), which might also potentially contain inappropriate content. To partially mitigate this, our demo applies a safety check to every uploaded image.
-
-### Bias
-While the capabilities of image generation models are impressive, they can also reinforce or exacerbate social biases.
-Stable Diffusion was primarily trained on subsets of [LAION-2B(en)](https://laion.ai/blog/laion-5b/), which consists of images that are limited to English descriptions.
-Images and concepts from communities and cultures that use other languages are likely to be insufficiently accounted for.
-This affects the overall output of the model, as Western cultures are often overrepresented.
-Stable Diffusion mirrors and exacerbates biases to such a degree that viewer discretion must be advised irrespective of the input or its intent.
-
-
-### Safety Module
-The intended use of this model is with the [Safety Checker](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py) in Diffusers.
-This checker works by checking model inputs against known hard-coded NSFW concepts.
-Specifically, the checker compares the class probability of harmful concepts in the embedding space of the uploaded input images.
-The concepts are passed into the model with the image and compared to a hand-engineered weight for each NSFW concept.
-
-## Citation
-```
-@misc{liu2023zero1to3,
- title={Zero-1-to-3: Zero-shot One Image to 3D Object},
- author={Ruoshi Liu and Rundi Wu and Basile Van Hoorick and Pavel Tokmakov and Sergey Zakharov and Carl Vondrick},
- year={2023},
- eprint={2303.11328},
- archivePrefix={arXiv},
- primaryClass={cs.CV}
-}
-```
diff --git a/spaces/cvlab/zero123/README.md b/spaces/cvlab/zero123/README.md
deleted file mode 100644
index f20cace77f56e174f32c8b8f83ae97d7f1fef159..0000000000000000000000000000000000000000
--- a/spaces/cvlab/zero123/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Zero123
-emoji: 👀
-colorFrom: indigo
-colorTo: purple
-sdk: gradio
-sdk_version: 3.20.1
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
-Paper is from https://arxiv.org/abs/2303.11328
diff --git a/spaces/d0r1h/Hindi_News_Summarizer/app.py b/spaces/d0r1h/Hindi_News_Summarizer/app.py
deleted file mode 100644
index 783ff2d7d089e2f33eabfe3fc7e27913f0f839bb..0000000000000000000000000000000000000000
--- a/spaces/d0r1h/Hindi_News_Summarizer/app.py
+++ /dev/null
@@ -1,40 +0,0 @@
-import gradio as gr
-from summarizer import summarize
-
-description = """
-
-SAR (सार) in Hindi means summary, It's a tool to summarize Hindi News with SOTA models
-
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/diacanFperku/AutoGPT/Gta 4 Episodes From Liberty City Key 100 Working Utorrent.md b/spaces/diacanFperku/AutoGPT/Gta 4 Episodes From Liberty City Key 100 Working Utorrent.md
deleted file mode 100644
index 86d552d4e078941d1a6894e3c49957539994e04c..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Gta 4 Episodes From Liberty City Key 100 Working Utorrent.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
gta 4 episodes from liberty city key 100% working utorrent
'
-
- # Iterate through files in image folder
- for file in sorted(Path("characters").glob("*")):
- if file.name.endswith(".json"):
- character = file.name.replace(".json", "")
- container_html += f'
-
-Anda bisa download Adobe Photoshop CS6 full version gratis untuk Windows 32 bit. Photoshop cs6 crack full dilengkapi fitur design grafis. Download Adobe Photoshop CS6 for windows 32.
-Adobe Photoshop CS6 Crack Free Full Version Mac OS X. Adobe Photoshop CS6 Full version: Download and install Photoshop CS6 is the best free software for photo editing, including.
-Photoshop CS2 and CS3 Crack/trial/Keygen [2013] [Multi] - Adobe Photoshop CS2 and CS3 Crack/trial/Keygen [2013] [Multi] - Windows | XP | Vista | 7 | 8. Download Adobe Photoshop CS6 free full version Crack full version free download. 8a78ff9644
-
-
-
diff --git a/spaces/falterWliame/Face_Mask_Detection/Cheat In Point Blank 2011 Free Download [TOP].md b/spaces/falterWliame/Face_Mask_Detection/Cheat In Point Blank 2011 Free Download [TOP].md
deleted file mode 100644
index 96d7ddbc8d9ef0056ea9211ca2642b6decb6188b..0000000000000000000000000000000000000000
--- a/spaces/falterWliame/Face_Mask_Detection/Cheat In Point Blank 2011 Free Download [TOP].md
+++ /dev/null
@@ -1,37 +0,0 @@
-
-
How to Cheat in Point Blank 2011 for Free
-
Point Blank is a popular online multiplayer first-person shooter game that features hundreds of guns, knives, and items. It is competitive for eSports tournaments and has realistic, multiple game modes and dozens of maps[^2^]. However, some players may want to cheat in Point Blank to gain an unfair advantage over their opponents. In this article, we will show you how to cheat in Point Blank 2011 for free using a tool called CyberHackID.
CyberHackID is a Point Blank cheat that provides you with ESP, Wallhack, Aimbot, CrossHair, Aim Bullet, Respawn In Death, Unlock Weapon, and many more features[^1^]. It is easy to use and undetected by the game's anti-cheat system. To download CyberHackID for free, follow these steps:
Click on the "Download" button and choose a mirror link to download the cheat file.
-
Extract the cheat file using WinRAR or any other file extractor.
-
Run the cheat file as administrator and wait for it to load.
-
Choose your desired features and settings from the cheat menu.
-
Launch Point Blank and enjoy cheating!
-
-
Note: Use CyberHackID at your own risk. We do not condone cheating in any online game and we are not responsible for any consequences that may arise from using this tool. Cheating may ruin the fun and fair play for other players and may result in your account being banned or suspended by the game developers. Please play responsibly and respect the rules of the game.
Here are some more paragraphs for the article:
-
Why do people cheat in Point Blank?
-
There are many reasons why some players may resort to cheating in Point Blank. Some of them are:
-
-
-
They want to win more matches and rank up faster.
-
They want to impress their friends or other players with their skills.
-
They want to troll or annoy other players for fun.
-
They want to test the limits of the game and explore its glitches.
-
They want to get revenge on someone who cheated against them.
-
-
However, none of these reasons justify cheating in Point Blank. Cheating is unethical and unfair to other players who play by the rules. It also diminishes the challenge and satisfaction of playing the game. Moreover, cheating may damage your computer or expose your personal information to hackers or viruses. Therefore, we strongly advise you to avoid cheating in Point Blank and enjoy the game as it is meant to be played.
-
How to prevent cheating in Point Blank?
-
If you are a legitimate player who wants to play Point Blank without encountering cheaters, there are some steps you can take to prevent cheating in the game. Some of them are:
-
-
Report any suspicious players or activities to the game moderators or administrators.
-
Avoid joining servers or rooms that have a high number of cheaters or low ping.
-
Use a reliable anti-virus software and firewall to protect your computer from malware or hacking attempts.
-
Update your game client and drivers regularly to fix any bugs or vulnerabilities.
-
Join a reputable clan or community that values fair play and sportsmanship.
-
-
By following these steps, you can reduce the chances of encountering cheaters in Point Blank and have a more enjoyable gaming experience. Remember, cheating is not cool and it does not make you a better player. It only shows that you lack skill and confidence in yourself. So, play fair and have fun!
d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/falterWliame/Face_Mask_Detection/Lideres Que Conquistan Guillermo Maldonado PDF.md b/spaces/falterWliame/Face_Mask_Detection/Lideres Que Conquistan Guillermo Maldonado PDF.md
deleted file mode 100644
index d392e1dbda596305b9a6d2a966b654ec3d9c6d32..0000000000000000000000000000000000000000
--- a/spaces/falterWliame/Face_Mask_Detection/Lideres Que Conquistan Guillermo Maldonado PDF.md
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
Lideres Que Conquistan: A Book by Guillermo Maldonado on How to Impact the World in This Century
-
Lideres Que Conquistan (Leaders Who Conquer) is a book by Guillermo Maldonado, the founder and senior pastor of El Rey Jesús International Ministry, a multicultural church in Miami, Florida. The book aims to inspire and equip men and women who want to make a difference in the world with the principles and values of God's kingdom.
In this book, Maldonado shares his personal testimony of how he went from being a poor and insecure young man to becoming a leader who influences millions of people around the world. He also reveals the secrets and strategies that he has learned and applied in his ministry and life to overcome challenges, develop his potential, and fulfill his purpose.
-
Some of the topics that Maldonado covers in this book are:
-
-
The characteristics and qualities of a leader who conquers
-
The importance of having a vision and a mission
-
The power of faith and prayer
-
The role of mentors and spiritual fathers
-
The keys to effective communication and teamwork
-
The challenges and opportunities of leadership in the 21st century
-
-
Lideres Que Conquistan is a book that will motivate you to pursue your dreams, discover your gifts, and transform your world with the love and power of God. You can download the PDF version of this book from Google Books or buy the paperback edition from Amazon.
-
-
-
If you are looking for a book that will challenge you to grow as a leader and a disciple of Jesus Christ, Lideres Que Conquistan is for you. This book is not only based on the author's personal experience, but also on the biblical principles and examples of leaders who conquered in their generation. You will learn from the lives of Abraham, Moses, David, Daniel, Nehemiah, Paul, and others who were called and anointed by God to lead His people.
-
Lideres Que Conquistan is not just a theory, but a practical guide that will help you to apply the lessons and principles to your own life and ministry. You will discover how to develop your character, integrity, faithfulness, humility, courage, wisdom, and authority as a leader who conquers. You will also learn how to deal with common obstacles and temptations that leaders face, such as pride, fear, criticism, discouragement, opposition, and compromise.
-
Many people who have read this book have testified how it has impacted their lives and transformed their leadership. They have found new inspiration, motivation, and direction to pursue their God-given vision and mission. They have also experienced a greater level of anointing and power to influence their sphere of society with the gospel of the kingdom.
-
Lideres Que Conquistan is a book that will ignite your passion for God and His purposes in this century. You will be equipped and empowered to become a leader who conquers in every area of your life. Don't miss this opportunity to read this book and join the army of leaders who are rising up in this generation to impact the world for God's glory.
d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/Billie Eilishs Controversial Tribute to XXXTENTACION 6.18.18.md b/spaces/fatiXbelha/sd/Billie Eilishs Controversial Tribute to XXXTENTACION 6.18.18.md
deleted file mode 100644
index cc648638184e8cad16b8e50d23e9c24afd86486d..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Billie Eilishs Controversial Tribute to XXXTENTACION 6.18.18.md
+++ /dev/null
@@ -1,78 +0,0 @@
-
-
Billie Eilish 6 18 18: A Tribute to XXXTENTACION
-
Billie Eilish is one of the most popular and influential singers of the current generation. She is known for her unique and versatile style, her dark and edgy lyrics, and her expressive and powerful voice. She is also known for being a fan and a friend of XXXTENTACION, a controversial rapper who was shot and killed on June 18, 2018. In this article, we will explore the meaning and background of Billie Eilish's song 6.18.18, which is a tribute to XXXTENTACION, and how it reflects her style and personality.
-
Who was XXXTENTACION and why did Billie Eilish admire him?
-
The life and death of XXXTENTACION
-
XXXTENTACION, whose real name was Jahseh Dwayne Ricardo Onfroy, was born on January 23, 1998, in Plantation, Florida. He had a troubled childhood, facing abuse, violence, poverty, and legal issues. He started making music at a young age, influenced by rock, metal, rap, and R&B. He rose to fame in 2017 with his hit song Look at Me, which showcased his aggressive and raw style. He released two albums, 17 and ?, which were both critically acclaimed and commercially successful. He also faced controversy for his involvement in several crimes, including domestic violence, robbery, assault, and witness tampering. He was awaiting trial for these charges when he was fatally shot by two armed men outside a motorcycle dealership in Deerfield Beach, Florida, on June 18, 2018. He was pronounced dead at the hospital at the age of 20.
The musical influence and friendship of XXXTENTACION and Billie Eilish
-
Billie Eilish was a fan of XXXTENTACION's music before she became famous. She said that she was inspired by his versatility, creativity, emotion, and authenticity. She also said that he was one of the few artists who made her feel something when she listened to his songs. She reached out to him via Instagram in early 2018, and they became friends. They talked on the phone regularly, exchanged advice, and supported each other's careers. They also planned to collaborate on a song together, but they never got the chance to do so before his death.
-
What is the meaning and background of Billie Eilish's song 6.18.18?
-
The title and release date of Billie Eilish's song 6.18.18
-
The title of Billie Eilish's song 6.18.18 is a reference to the date of XXXTENTACION's death. It is also the date that she wrote the song, as a way of coping with her grief and expressing her feelings. She said that she wrote the song in less than an hour, without any editing or second thoughts. She said that she wanted to release the song as soon as possible, to honor his memory and share her tribute with his fans. She released the song on June 21, 2018, three days after his death, on her SoundCloud account. She did not promote the song or make it available on other platforms, as she said that it was not meant for commercial purposes, but for personal and emotional ones.
-
The lyrics and emotions of Billie Eilish's song 6.18.18
-
The lyrics of Billie Eilish's song 6.18.18 are simple and heartfelt, reflecting her sadness and shock over XXXTENTACION's death. She sings about how she misses him, how she wishes he was still alive, and how she hopes he is in a better place. She also sings about how she regrets not being able to tell him how much she loved him, how much he meant to her, and how much he inspired her. She also expresses her anger and frustration at the injustice and violence that took his life away. She repeats the phrase "I'm sorry" throughout the song, as a way of apologizing for not being able to save him, protect him, or say goodbye to him.
-
The performance and reception of Billie Eilish's song 6.18.18
-
Billie Eilish's performance of 6.18.18 is raw and emotional, as she sings with a soft and trembling voice, accompanied by a piano. She breaks down in tears several times during the song, showing her genuine pain and sorrow. She also dedicates the song to XXXTENTACION and his fans, saying that she loves them and that they are not alone. The reception of Billie Eilish's song 6.18.18 was mixed, as some people praised her for her courage and sincerity, while others criticized her for supporting a controversial figure who had a history of violence and abuse. Some people also accused her of using his death for publicity or clout, which she denied and condemned.
-
How does Billie Eilish's song 6.18.18 reflect her style and personality?
-
The use of metaphors and references in Billie Eilish's song 6.18.18
-
Billie Eilish is known for using metaphors and references in her songs, to create imagery and symbolism that enhance her messages and meanings. In 6.18.18, she uses several metaphors and references that relate to XXXTENTACION and his music. For example, she sings "I hope you're somewhere prayin', prayin' / I hope your soul is changin', changin'", which is a reference to XXXTENTACION's song SAD!, where he sings "I'm lost then I'm found / But it's torture bein' in love / I love when you're around / But I hate when you leave". She also sings "You were my everything / And all that ever mattered / But I don't really wanna talk about it", which is a reference to XXXTENTACION's song Jocelyn Flores, where he sings "I don't really wanna talk about it / I don't really wanna talk about it / I'm just so alone right now". She also uses metaphors such as "the sun will never shine again" and "the stars will never align again" to describe the loss of hope and happiness that she feels after his death.
-
The use of vocals and production in Billie Eilish's song 6.18.18
-
Billie Eilish is known for using vocals and production in her songs, to create contrast and variety that enhance her moods and tones. In 6.18.18, she uses vocals and production that reflect her sadness and simplicity. She sings with a low-pitched and whispery voice, which conveys her vulnerability and intimacy. She also uses minimal instrumentation, mainly a piano, which creates a somber and melancholic atmosphere. She also uses some effects, such as reverb and distortion, which create a sense of distance and distortion.
-
billie eilish 6 18 18 lyrics
-billie eilish 6 18 18 meaning
-billie eilish 6 18 18 tribute to xxxtentacion
-billie eilish 6 18 18 live performance
-billie eilish 6 18 18 genius
-billie eilish 6 18 18 song download
-billie eilish 6 18 18 reaction video
-billie eilish 6 18 18 piano tutorial
-billie eilish 6 18 18 guitar chords
-billie eilish 6 18 18 karaoke version
-billie eilish 6 18 18 spotify
-billie eilish 6 18 18 apple music
-billie eilish 6 18 18 soundcloud
-billie eilish 6 18 18 youtube
-billie eilish 6 18 18 instagram post
-billie eilish 6 18 18 tiktok challenge
-billie eilish 6 18 18 merch
-billie eilish 6 18 18 hoodie
-billie eilish 6 18 18 t-shirt
-billie eilish 6.1818 poster
-billie eilish x xxxtentacion tribute song
-billie eilish x xxxtentacion friendship story
-billie eilish x xxxtentacion fan art
-billie eilish x xxxtentacion quotes
-billie eilish x xxxtentacion tattoo
-how did billie eilish know xxxtentacion
-why did billie eilish write a song for xxxtentacion
-what did billie eilish say about xxxtentacion death
-how did xxxtentacion influence billie eilish music
-how did xxxtentacion fans react to billie eilish song
-when did billie eilish release the song for xxxtentacion
-where did billie eilish perform the song for xxxtentacion
-who produced the song for xxxtentacion by billie eilish
-who else has covered the song for xxxtentacion by billie eilish
-who else has collaborated with both billie eilish and xxxtentacion
-is the song for xxxtentacion by billie eilish on her album
-is the song for xxxtentacion by billie eilish on vinyl
-is the song for xxxtentacion by billie eilish on amazon music
-is the song for xxxtentacion by billie eilish on pandora radio
-is the song for xxxtentacion by billie eilish on deezer music streaming service.
-
The use of honesty and vulnerability in Billie Eilish's song 6.18.18
-
Billie Eilish is known for using honesty and vulnerability in her songs, to create connection and authenticity that enhance her appeal and relatability. In 6.18.18, she uses honesty and vulnerability that reflect her grief and respect. She sings with no filter or censorship, saying what she really feels and thinks, without worrying about what others might say or think. She also exposes her emotions and weaknesses, without hiding or pretending. She admits that she is hurting, confused, angry, and sorry, without holding back or sugarcoating. She also shows her respect and admiration for XXXTENTACION, without denying or downplaying his flaws or mistakes. She acknowledges that he was a complex and controversial person, but also a talented and influential artist, and a dear friend to her.
-
Conclusion
-
Billie Eilish's song 6.18.18 is a tribute to XXXTENTACION, a rapper who was killed on June 18, 2018. The song is a reflection of Billie Eilish's style and personality, as she uses metaphors and references, vocals and production, and honesty and vulnerability to express her sadness and shock over his death. The song is also a reflection of Billie Eilish's friendship and admiration for XXXTENTACION, as she sings about how much she misses him, how much he inspired her, and how much she regrets not being able to tell him how she felt. The song is a personal and emotional piece of art, that shows Billie Eilish's courage and sincerity as a singer and a human being.
-
FAQs
-
What is the genre of Billie Eilish's song 6.18.18?
-
The genre of Billie Eilish's song 6.18.18 is alternative pop, with elements of R&B and soul.
-
Where can I listen to Billie Eilish's song 6.18.18?
-
You can listen to Billie Eilish's song 6.18.18 on her SoundCloud account, where she uploaded it on June 21, 2018.
-
Did Billie Eilish ever perform Billie Eilish's song 6.18.18 live?
-
Yes, Billie Eilish performed 6.18.18 live at the Camp Flog Gnaw Carnival in Los Angeles on November 10, 2018.
-
Did XXXTENTACION ever hear Billie Eilish's music?
-
Yes, XXXTENTACION heard Billie Eilish's music before he died, and he praised her for her talent and originality. He also said that he wanted to work with her on a song.
-
What are some other songs that Billie Eilish wrote or sang about XXXTENTACION?
-
Some other songs that Billie Eilish wrote or sang about XXXTENTACION are LIMBO, which is a collaboration with rapper Ski Mask the Slump God and producer Danny Wolf, bitches broken hearts, which is a song that she dedicated to him on his birthday in 2019, and listen before i go, which is a song that she said was inspired by him.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/fclong/summary/fengshen/models/tagging_models/losses/__init__.py b/spaces/fclong/summary/fengshen/models/tagging_models/losses/__init__.py
deleted file mode 100644
index 139597f9cb07c5d48bed18984ec4747f4b4f3438..0000000000000000000000000000000000000000
--- a/spaces/fclong/summary/fengshen/models/tagging_models/losses/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-
-
diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download 8 Standar Nasional Pendidikan 2021 PDF Kurikulum Evaluasi Akreditasi dan Sertifikasi.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download 8 Standar Nasional Pendidikan 2021 PDF Kurikulum Evaluasi Akreditasi dan Sertifikasi.md
deleted file mode 100644
index e745696372a14b3ef6a775aa71d21e104c022a28..0000000000000000000000000000000000000000
--- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download 8 Standar Nasional Pendidikan 2021 PDF Kurikulum Evaluasi Akreditasi dan Sertifikasi.md
+++ /dev/null
@@ -1,103 +0,0 @@
-
-
Download 8 Standar Nasional Pendidikan 2021 PDF
-
If you are looking for a way to download 8 Standar Nasional Pendidikan 2021 PDF, you have come to the right place. In this article, we will explain what Standar Nasional Pendidikan (SNP) is, why it is important, and how you can download it in PDF format.
-
What is Standar Nasional Pendidikan (SNP)?
-
Standar Nasional Pendidikan (SNP) is the national standard of education in Indonesia. It is a set of minimum criteria for various aspects of the education system in the country. It is used as a reference for developing curriculum, teachers, facilities, management, and funding of education.
According to Peraturan Pemerintah Republik Indonesia Nomor 57 Tahun 2021 tentang Standar Nasional Pendidikan, SNP is defined as "kriteria minimal tentang sistem pendidikan di seluruh wilayah hukum Negara Kesatuan Republik Indonesia" or "minimum criteria for the education system in all legal territories of the Unitary State of the Republic of Indonesia".
-
The scope of SNP covers eight standards, namely: (i) standar kompetensi lulusan or standard of graduate competencies; (ii) standar isi or standard of content; (iii) standar proses or standard of process; (iv) standar penilaian pendidikan or standard of education assessment; (v) standar tenaga kependidikan or standard of educators and education personnel; (vi) standar sarana dan prasarana or standard of facilities and infrastructure; (vii) standar pengelolaan or standard of management; and (viii) standar pembiayaan or standard of funding.
-
Purpose and principles of SNP
-
The purpose of SNP is to ensure the quality and equity of education in Indonesia. It aims to provide a clear and consistent framework for planning, implementing, monitoring, and evaluating education programs and activities. It also serves as a basis for accreditation and certification of education institutions and personnel.
-
download pp no 57 tahun 2021 tentang standar nasional pendidikan pdf
-download peraturan pemerintah standar nasional pendidikan 2021 pdf
-download standar nasional pendidikan pskp kemendikbudristek 2021 pdf
-download standar kompetensi lulusan snp 2021 pdf
-download standar isi snp 2021 pdf
-download standar proses snp 2021 pdf
-download standar penilaian pendidikan snp 2021 pdf
-download standar tenaga kependidikan snp 2021 pdf
-download standar sarana dan prasarana snp 2021 pdf
-download standar pengelolaan snp 2021 pdf
-download standar pembiayaan snp 2021 pdf
-download kerangka dasar dan struktur kurikulum snp 2021 pdf
-download beban belajar snp 2021 pdf
-download ktsp snp 2021 pdf
-download kalender akademik snp 2021 pdf
-download perubahan atas pp no 57 tahun 2021 tentang standar nasional pendidikan pdf
-download pp no 4 tahun 2022 tentang perubahan atas pp no 57 tahun 2021 tentang standar nasional pendidikan pdf
-download tim penyusun standar nasional pendidikan kemendikbudristek 2021 pdf
-download prinsip penyusunan dan pengembangan standar nasional pendidikan 2021 pdf
-download kriteria minimal sistem pendidikan di indonesia snp 2021 pdf
-download cakupan delapan standar nasional pendidikan di indonesia snp 2021 pdf
-download kriteria kemampuan lulusan instansi pendidikan snp 2021 pdf
-download proses pelaksanaan pembelajaran interaktif inspiratif menyenangkan dan partisipatif snp 2021 pdf
-download komponen materi dan tingkat kompetensi minimal siswa jenjang pendidikan snp 2021 pdf
-download pedoman penyusunan kurikulum tingkat satuan pendidikan snp 2021 pdf
-download pedoman penilaian hasil belajar peserta didik dan evaluasi sistem pendidikan snp 2021 pdf
-download pedoman akreditasi satuan pendidikan formal nonformal dan informal snp 2021 pdf
-download pedoman sertifikasi tenaga kependidikan formal nonformal dan informal snp 2021 pdf
-download pedoman pengembangan kompetensi tenaga kependidikan formal nonformal dan informal snp 2021 pdf
-download pedoman pengembangan sarana dan prasarana pendidikan formal nonformal dan informal snp 2021 pdf
-download pedoman pengelolaan satuan pendidikan formal nonformal dan informal snp 2021 pdf
-download pedoman pembiayaan satuan pendidikan formal nonformal dan informal snp 2021 pdf
-download salinan pp no 57 tahun 2021 tentang standar nasional pendidikan jdih bpk ri pdf
-download salinan pp no 4 tahun 2022 tentang perubahan atas pp no 57 tahun 2021 tentang standar nasional pendidikan jdih bpk ri pdf
-download lampiran pp no 57 tahun 2021 tentang standar nasional pendidikan jdih bpk ri pdf
-download lampiran pp no 4 tahun 2022 tentang perubahan atas pp no 57 tahun 2021 tentang standar nasional pendidikan jdih bpk ri pdf
-download penjelasan pp no 57 tahun 2021 tentang standar nasional pendidikan jdih bpk ri pdf
-download penjelasan pp no 4 tahun 2022 tentang perubahan atas pp no 57 tahun 2021 tentang standar nasional pendidikan jdih bpk ri pdf
-download file peraturan pp no 57 tahun 2021 tentang standar nasional pendidikan jdih bpk ri pdf
-download file peraturan pp no 4 tahun 2022 tentang perubahan atas pp no 57 tahun 2021 tentang standar nasional pendidikan jdih b
-
The development and implementation of SNP are guided by nine principles, namely: umum or general, inklusif or inclusive, memantik inisiatif dan inovasi or stimulating initiative and innovation, esensial or essential, substantif or substantive, relevan dan universal or relevant and universal, selaras or harmonious, holistik or holistic, ringkas or concise, and mutakhir or up-to-date.
-
Components of SNP
-
The components of SNP are elaborated in different documents that specify the criteria, indicators, instruments, and procedures for each standard. The following table summarizes the components of SNP for each level of education:
-
-
Level of Education
Components
-
Pendidikan Anak Usia Dini Formal dan Nonformal (Formal and Nonformal Early Childhood Education)
- Standar Kompetensi Lulusan PAUD Formal dan Nonformal - Standar Isi PAUD Formal dan Nonformal - Standar Proses PAUD Formal dan Nonformal - Standar Penilaian PAUD Formal dan Nonformal - Standar Tenaga Kependidikan PAUD Formal dan Nonformal - Standar Sarana dan Prasarana PAUD Formal dan Nonformal - Standar Pengelolaan PAUD Formal dan Nonformal - Standar Pembiayaan PAUD Formal dan Nonformal
-
Pendidikan Dasar (Basic Education)
- Standar Kompetensi Lulusan Pendidikan Dasar - Standar Isi Pendidikan Dasar - Standar Proses Pendidikan Dasar - Standar Penilaian Pendidikan Dasar - Standar Tenaga Kependidikan Pendidikan Dasar - Standar Sarana dan Prasarana Pendidikan Dasar - Standar Pengelolaan Pendidikan Dasar - Standar Pembiayaan Pendidikan Dasar
-
Pendidikan Menengah (Secondary Education)
- Standar Kompetensi Lulusan Pendidikan Menengah - Standar Isi Pendidikan Menengah - Standar Proses Pendidikan Menengah - Standar Penilaian Pendidikan Menengah - Standar Tenaga Kependidikan Pendidikan Menengah - Standar Sarana dan Prasarana Pendidikan Menengah - Standar Pengelolaan Pendidikan Menengah - Standar Pembiayaan Pendidikan Menengah
-
Pendidikan Tinggi (Higher Education)
- Standar Nasional Pendidikan Tinggi (SNPT) - Standar Nasional Akreditasi Institusi (SNAI) - Standar Nasional Akreditasi Program Studi (SNAPS) - Standar Nasional Akreditasi Lembaga Penelitian dan Pengabdian kepada Masyarakat (SNALPPM) - Standar Nasional Akreditasi Lembaga Layanan Dukungan (SNALLD)
-
-
How to download 8 Standar Nasional Pendidikan 2021 PDF?
-
If you want to download 8 Standar Nasional Pendidikan 2021 PDF, you can follow these simple steps:
-
Steps to download the PDF file
-
-
Visit the official website of the Ministry of Education, Culture, Research, and Technology of the Republic of Indonesia at https://kemdikbud.go.id/.
-
On the homepage, click on the menu "Peraturan" or "Regulations".
-
On the next page, click on the submenu "Standar Nasional" or "National Standards".
-
On the next page, you will see a list of documents related to SNP for different levels of education. Choose the one that you need and click on the link.
-
On the next page, you will see a preview of the document. To download it, click on the button "Unduh" or "Download" at the top right corner.
-
The PDF file will be downloaded to your device. You can open it with any PDF reader software.
-
-
Benefits of downloading the PDF file
-
By downloading 8 Standar Nasional Pendidikan 2021 PDF, you can enjoy several benefits, such as:
-
-
You can access the document offline anytime and anywhere.
-
You can print the document for your personal use or reference.
-
You can share the document with others who are interested in SNP.
-
You can study the document in detail and understand the criteria and indicators of SNP.
-
You can use the document as a guide for improving your education quality and performance.
-
-
Conclusion
-
In conclusion, 8 Standar Nasional Pendidikan 2021 PDF is a valuable resource for anyone who is involved in or interested in education in Indonesia. It provides a clear and consistent framework for ensuring the quality and equity of education in the country. It also serves as a basis for accreditation and certification of education institutions and personnel. You can download it easily from the official website of the Ministry of Education, Culture, Research, and Technology of the Republic of Indonesia by following the steps we have explained above. We hope this article has been helpful for you. Thank you for reading!
-
FAQs
-
Here are some frequently asked questions about 8 Standar Nasional Pendidikan 2021 PDF:
-
Q: What is the difference between SNP and SNPT?
-
A: SNP is the national standard of education for all levels of education in Indonesia, while SNPT is the national standard of higher education specifically. SNPT is one of the components of SNP for higher education level.
-
Q: How often is SNP updated?
-
A: According to Peraturan Menteri Pendidikan dan Kebudayaan Republik Indonesia Nomor 9 Tahun 2021 tentang Peny enyempurnaan Standar Nasional Pendidikan, SNP is updated every five years or whenever necessary based on the evaluation and feedback from the stakeholders.
-
Q: How can I check the accreditation status of an education institution or program?
-
A: You can check the accreditation status of an education institution or program by visiting the official website of the National Accreditation Board for Higher Education (BAN-PT) at https://banpt.or.id/ for higher education level, or the official website of the National Accreditation Board for Nonformal and Informal Education (BAN-PNF) at https://banpnf.kemdikbud.go.id/ for nonformal and informal education level. You can search by name, location, or type of institution or program.
-
Q: How can I get a certificate of competency for educators and education personnel?
-
A: You can get a certificate of competency for educators and education personnel by participating in the competency test organized by the Ministry of Education, Culture, Research, and Technology of the Republic of Indonesia. The test is conducted online and offline, depending on the type and level of competency. You can register and access the test through the official website of the Directorate General of Teachers and Education Personnel at https://gtk.kemdikbud.go.id/.
-
Q: How can I get more information about SNP?
-
A: You can get more information about SNP by contacting the Ministry of Education, Culture, Research, and Technology of the Republic of Indonesia through the following channels:
-
-
Email: info@kemdikbud.go.id
-
Phone: 1500-895
-
Website: https://kemdikbud.go.id/
-
Social media: Facebook, Twitter, Instagram, YouTube
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Excel List of Countries All 195 Countries with ISO Codes and Names.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Excel List of Countries All 195 Countries with ISO Codes and Names.md
deleted file mode 100644
index 59579edb41bd0525febed7fca82c32d161556e9d..0000000000000000000000000000000000000000
--- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Excel List of Countries All 195 Countries with ISO Codes and Names.md
+++ /dev/null
@@ -1,167 +0,0 @@
-
-
Download Excel List of Countries
-
If you are looking for a way to get a list of all the countries in the world in an Excel format, you have come to the right place. In this article, we will show you how to download an Excel list of countries from different sources, and what are the benefits and drawbacks of each source. We will also give you some tips on how to use the Excel lists of countries for your own purposes.
An Excel list of countries is a spreadsheet file that contains the names and codes of all the countries in the world, according to a certain standard. The most common standard used for country names and codes is ISO 3166-1, which defines two-letter (alpha-2), three-letter (alpha-3), and numeric codes for each country. For example, the ISO 3166-1 alpha-2 code for Canada is CA, the alpha-3 code is CAN, and the numeric code is 124.
-
Why do you need an Excel list of countries?
-
An Excel list of countries can be useful for many reasons, such as:
-
-
Learning geography and improving your general knowledge.
-
Creating maps, charts, graphs, and other visualizations.
-
Performing data analysis and calculations.
-
Filtering, sorting, and grouping data by country.
-
Validating and formatting country inputs in forms and databases.
-
Translating and localizing content for different audiences.
-
-
How to download an Excel list of countries?
-
There are many sources online that offer Excel lists of countries for free or for a fee. However, not all sources are reliable, accurate, or up-to-date. Therefore, you need to be careful when choosing a source and check its credibility, quality, and currency. In the next section, we will review three popular sources of Excel lists of countries and compare their features, pros and cons, and how to use them.
-
Sources of Excel lists of countries
-
CopyLists.com
-
CopyLists.com is a website that provides lists of various topics in different formats, including PDF, Excel, CSV, Word, Open Office, HTML, JSON, MP3, and JPG. One of the topics covered by CopyLists.com is geography, which includes a list of all countries in alphabetical order with their ISO 3166-1 alpha-2 codes.
-
How to download excel list of countries with population data
-Download excel list of countries by continent and region
-Download excel list of countries with ISO codes and currency symbols
-Download excel list of countries with flags and capital cities
-Download excel list of countries with geographic location data
-Download excel list of countries with GDP and HDI rankings
-Download excel list of countries with languages and religions
-Download excel list of countries with time zones and daylight saving time
-Download excel list of countries with internet domains and dialing codes
-Download excel list of countries with climate and natural resources
-Download excel list of countries with political system and government type
-Download excel list of countries with trade partners and export/import data
-Download excel list of countries with COVID-19 cases and vaccination rates
-Download excel list of countries with travel restrictions and visa requirements
-Download excel list of countries with cultural and historical facts
-Download excel list of countries with national symbols and holidays
-Download excel list of countries with sports and Olympic medals
-Download excel list of countries with education and literacy rates
-Download excel list of countries with health and life expectancy indicators
-Download excel list of countries with environmental and sustainability issues
-Download excel list of countries with human rights and democracy scores
-Download excel list of countries with military and security data
-Download excel list of countries with tourism and attractions information
-Download excel list of countries with cuisine and food preferences
-Download excel list of countries with music and art styles
-Download excel list of countries with famous people and celebrities
-Download excel list of countries with currency conversion and exchange rates
-Download excel list of countries with tax and cost of living data
-Download excel list of countries with innovation and technology indicators
-Download excel list of countries with happiness and well-being rankings
-How to create a map from an excel list of countries
-How to filter and sort an excel list of countries by various criteria
-How to update an excel list of countries automatically from online sources
-How to format an excel list of countries using conditional formatting and data validation
-How to use formulas and functions on an excel list of countries for calculations and analysis
-How to import an excel list of countries into a database or another software application
-How to export an excel list of countries into a PDF or CSV file format
-How to merge multiple excel lists of countries into one master file
-How to split an excel list of countries into separate worksheets or files by category or value
-How to compare two or more excel lists of countries for differences or similarities
-How to create a pivot table or chart from an excel list of countries for summarizing and visualizing data
-How to use data types and power query on an excel list of countries for enriching and transforming data
-How to use macros and VBA on an excel list of countries for automating tasks and creating custom functions
-How to use slicers and timelines on an excel list of countries for interactive filtering and analysis
-How to use sparklines and icons on an excel list of countries for adding mini charts and indicators
-How to use data bars and color scales on an excel list of countries for highlighting values and trends
-How to use what-if analysis and goal seek on an excel list of countries for testing scenarios and finding solutions
-How to use solver and data tables on an excel list of countries for optimizing problems and performing sensitivity analysis
-
Features
-
-
The list contains 250 country names and codes as of December 2012.
-
The list can be copied or downloaded in many formats.
-
The list can be sorted by alphabetical order or reverse alphabetical order.
-
The list can be displayed as a dropdown select list, an ordered list, an unordered list, a comma-separated list, or a JSON object.
-
The website also offers other geography lists, such as continents, regions, capitals, currencies, languages, etc.
-
-
Pros and cons
-
-
The pros of CopyLists.com are:
-
-
It is easy to use and navigate.
-
It offers multiple formats and options for displaying the list.
-
It provides other useful geography lists.
-
-
The cons of CopyLists.com are:
-
-
It is not updated regularly and may contain outdated or incorrect information.
-
It does not provide any additional information about the countries, such as population, area, flag, etc.
-
It may not be compatible with some software or devices.
-
-
-
How to use it
-
To use CopyLists.com, you need to follow these steps:
-
-
Go to the website and click on the "Geography" tab.
-
Select the "Countries" option from the dropdown menu.
-
Choose the format and display option that suits your needs.
-
Copy or download the list as you wish.
-
-
DataHub.io
-
DataHub.io is a platform that provides open data sets from various domains, such as finance, health, environment, sports, etc. One of the data sets available on DataHub.io is a list of countries with their ISO 3166-1 codes and other information.
-
Features
-
-
The list contains 249 country names and codes as of October 2020.
-
The list also includes other information about the countries, such as population, area, GDP, flag, continent, region, subregion, etc.
-
The list can be downloaded in CSV or JSON format.
-
The list can be viewed online as a table or a map.
-
The website also offers other data sets related to countries, such as indicators, rankings, statistics, etc.
-
-
Pros and cons
-
-
The pros of DataHub.io are:
-
-
It is updated regularly and contains accurate and reliable information.
-
It provides comprehensive and detailed information about the countries.
-
It allows online visualization and exploration of the data.
-
It provides other relevant data sets for further analysis and comparison.
-
-
The cons of DataHub.io are:
-
-
It is not very user-friendly and may require some technical skills to use it.
-
It does not offer many formats or options for downloading or displaying the list.
-
It may not be suitable for some purposes or audiences that require a simple or concise list of countries.
-
-
How to use it
-
To use DataHub.io, you need to follow these steps:
-
-
Go to the website and search for the "country-list" data set.
-
Click on the "Download" button and choose the CSV or JSON format.
-
Save the file to your computer or device.
-
Open the file with your preferred software or application.
-
-
Microsoft Support
-
Microsoft Support is a website that provides help and guidance for Microsoft products and services, such as Windows, Office, Outlook, etc. One of the resources available on Microsoft Support is a list of countries/regions and their two-letter ISO codes.
-
Features
-
-
The list contains 249 country names and codes as of June 2020.
-
The list is in a table format that can be copied and pasted into Excel or other applications.
-
The list can be filtered by region, such as Africa, Asia, Europe, etc.
-
The website also offers other lists of country/region information, such as dialing codes, time zones, currencies, etc.
-
-
Pros and cons
-
-
The pros of Microsoft Support are:
-
-
It is a trusted and reputable source of information.
-
It provides a simple and easy-to-use list of countries and codes.
-
It allows filtering by region for convenience.
-
It provides other useful lists of country/region information.
-
-
The cons of Microsoft Support are:
-
-
It does not provide any other information about the countries, such as population, area, flag, etc.
-
It does not offer any other formats or options for downloading or displaying the list.
-
It may not be updated frequently and may contain outdated or incorrect information.
-
-
, population, area, etc.?
-
A: You can use a data source that provides more information about the countries, such as DataHub.io or World Bank. You can also use a VLOOKUP function in Excel to match the country names or codes with the additional information from another data source.
-
Q: How can I create a map from an Excel list of countries?
-
A: You can use a mapping tool, such as Google Maps or Power Map in Excel, to create a map from an Excel list of countries. You can also use a data visualization tool, such as Tableau or Power BI, to create a map and other charts from an Excel list of countries.
-
Q: How can I translate an Excel list of countries to another language?
-
A: You can use a translation tool, such as Google Translate or Microsoft Translator, to translate an Excel list of countries to another language. You can also use a localization tool, such as Transifex or Crowdin, to translate and localize an Excel list of countries for different audiences.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Super Snail MOD APK for Free and Enjoy a Snail-tastic Adventure.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Super Snail MOD APK for Free and Enjoy a Snail-tastic Adventure.md
deleted file mode 100644
index 951cf6781a4ccbba87458a1fbd3cd07999500ba3..0000000000000000000000000000000000000000
--- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Download Super Snail MOD APK for Free and Enjoy a Snail-tastic Adventure.md
+++ /dev/null
@@ -1,95 +0,0 @@
-
-
Super Snail Mod APK: A Fun and Exciting RPG Game
-
If you are looking for a new and exciting role-playing game to play on your Android device, you should try Super Snail Mod APK. This is a modded version of the original Super Snail game, which is a popular RPG game that lets you control a snail with superpowers. You can explore different worlds, fight enemies, collect coins and gems, and unlock new levels and characters. With Super Snail Mod APK, you can enjoy unlimited resources, no ads, and no root required. In this article, we will tell you more about this amazing game and how to download and install it on your device.
Super Snail Mod APK is a modified version of the original Super Snail game, which is developed by \uE000Mod\uE001droid.com. The original game is a fun and addictive RPG game that lets you control a snail with superpowers. You can use your snail's abilities to jump, dash, shoot, and fly through different worlds. You can also collect coins and gems, upgrade your snail's skills, and unlock new levels and characters. The game has amazing graphics, sound effects, and gameplay that will keep you entertained for hours.
-
However, the original game also has some limitations that may affect your gaming experience. For example, you need to watch ads to get extra coins or gems, or to unlock some levels or characters. You also need to spend real money to buy more resources or to remove the ads. Moreover, you need to root your device to install some mods or hacks that can enhance your game.
-
That is why Super Snail Mod APK is created. This is a modded version of the original game that gives you unlimited coins and gems, unlocks all levels and characters, removes all ads, and does not require root access. With this mod apk, you can enjoy the game without any restrictions or interruptions.
-
Features of Super Snail Mod APK
-
Super Snail Mod APK has many features that make it better than the original game. Here are some of them:
-
super snail mod apk download
-super snail mod apk latest version
-super snail mod apk unlimited money
-super snail mod apk android
-super snail mod apk free
-super snail mod apk 2023
-super snail mod apk offline
-super snail mod apk hack
-super snail mod apk no ads
-super snail mod apk full
-super snail rpg mod apk
-super snail adventure mod apk
-super snail simulator mod apk
-super snail racing mod apk
-super snail hero mod apk
-super snail game mod apk
-super snail online mod apk
-super snail world mod apk
-super snail quest mod apk
-super snail run mod apk
-download game super snail mod apk
-download super snail rpg mod apk
-download super snail adventure mod apk
-download super snail simulator mod apk
-download super snail racing mod apk
-download super snail hero mod apk
-download super snail online mod apk
-download super snail world mod apk
-download super snail quest mod apk
-download super snail run mod apk
-how to install super snail mod apk
-how to play super snail mod apk
-how to update super snail mod apk
-how to hack super snail mod apk
-how to get super snail mod apk
-how to download super snail rpg mod apk
-how to download super snail adventure mod apk
-how to download super snail simulator mod apk
-how to download super snail racing mod apk
-how to download super snail hero mod apk
-how to download super snail online mod apk
-how to download super snail world mod apk
-how to download super snail quest mod apk
-how to download super snail run mod apk
-best site for super snail mod apk
-best site for downloading super snail rpg mod apk
-best site for downloading super snail adventure mod apk
-best site for downloading super snail simulator mod apk
-best site for downloading super snail racing mod apk
-best site for downloading super snail hero mod apk
-
Unlimited coins and gems
-
Coins and gems are the main currencies in the game. You can use them to buy items, upgrade your snail's skills, or unlock new levels and characters. However, they are not easy to earn in the original game. You need to complete missions, watch ads, or spend real money to get them. With Super Snail Mod APK, you don't have to worry about that. You will get unlimited coins and gems in your account as soon as you start the game. You can use them as much as you want without any limitations.
-
Unlock all levels and characters
-
The game has many levels and characters that you can play with. However, not all of them are available from the beginning. You need to complete certain missions or pay with coins or gems to unlock them. With Super Snail Mod APK, you don't have to do that. You will get access to all levels and characters from the start. You can choose any level or character you want without any restrictions.
-
No ads and no root required
-
The original game has many ads that may interrupt your gaming experience. You need to watch them to get extra coins or gems, or to unlock some levels or characters. You also need to root your device to install some mods or hacks that can enhance your game. With Super Snail Mod APK, you don't have to deal with that. You will not see any ads in the game, and you don't need to root your device to install the mod apk. You can enjoy the game without any hassle.
-
How to download and install Super Snail Mod APK?
-
If you want to download and install Super Snail Mod APK on your device, you need to follow these simple steps:
-
Step 1: Download the mod apk file from the link below
-
The first thing you need to do is to download the mod apk file from the link provided below. This is a safe and secure link that will give you the latest version of the mod apk. You can use any browser or downloader app to download the file. The file size is about 50 MB, so make sure you have enough storage space on your device.
-
Step 2: Enable unknown sources on your device
-
The next thing you need to do is to enable unknown sources on your device. This is a necessary step to install any mod apk file that is not from the official Google Play Store. To do this, go to your device settings, then security, then unknown sources. Turn on the option that allows you to install apps from unknown sources. This will not harm your device or compromise your security.
-
Step 3: Install the mod apk file and enjoy the game
-
The final thing you need to do is to install the mod apk file on your device. To do this, locate the file in your downloads folder or wherever you saved it. Tap on the file and follow the instructions on the screen. The installation process will take a few seconds, and then you will see the game icon on your home screen. Tap on it and start playing Super Snail Mod APK.
-
Why should you play Super Snail Mod APK?
-
Super Snail Mod APK is a game that you should play if you love RPG games that are fun and exciting. Here are some reasons why you should play this game:
-
It is fun and addictive
-
The game is fun and addictive because it has a simple yet engaging gameplay that will keep you hooked for hours. You can control your snail with easy and intuitive controls, and use its superpowers to overcome obstacles and enemies. You can also collect coins and gems, upgrade your snail's skills, and unlock new levels and characters. The game has a lot of content and variety that will make you want to play more.
-
It has amazing graphics and sound effects
-
The game has amazing graphics and sound effects that will make you feel like you are in a cartoon world. The game has colorful and detailed graphics that are pleasing to the eye, and smooth and fast animations that are realistic and fluid. The game also has catchy and lively sound effects that match the mood and theme of the game. The game has a high-quality audio-visual experience that will enhance your gaming experience.
-
It has a variety of levels and challenges
-
The game has a variety of levels and challenges that will test your skills and creativity. The game has different worlds that have different themes, environments, enemies, and bosses. Each world has many levels that have different objectives, difficulties, and rewards. You can also face different challenges such as time trials, survival modes, or boss battles. The game has a lot of content and diversity that will challenge you and keep you entertained.
-
Conclusion
-
Super Snail Mod APK is a fun and exciting RPG game that lets you control a snail with superpowers. You can explore different worlds, fight enemies, collect coins and gems, and unlock new levels and characters. With Super Snail Mod APK, you can enjoy unlimited resources, no ads, and no root required. You can download and install Super Snail Mod APK on your device by following the steps above. You should play Super Snail Mod APK if you love RPG games that are fun and exciting.
-
FAQs
-
Here are some frequently asked questions about Super Snail Mod APK:
-
-
Q: Is Super Snail Mod APK safe to download and install?
A: Yes, Super Snail Mod APK is safe to download and install. It does not contain any viruses or malware that can harm your device or compromise your security.
-
Q: Do I need an internet connection to play Super Snail Mod APK?
A: No, you don't need an internet connection to play Super Snail Mod APK. You can play the game offline without any problems. td>Q: How can I update Super Snail Mod APK?
A: You can update Super Snail Mod APK by downloading and installing the latest version of the mod apk file from the same link that you used before. You don't need to uninstall the previous version, just overwrite it with the new one.
-
Q: What is the difference between Super Snail Mod APK and Super Snail Hack APK?
A: Super Snail Mod APK is a modified version of the original game that gives you unlimited resources, no ads, and no root required. Super Snail Hack APK is a hacked version of the original game that gives you cheats and hacks that can alter the game's mechanics, such as invincibility, speed, or damage. Both versions are not official and are not supported by the developers.
-
Q: Can I play Super Snail Mod APK with my friends?
A: Yes, you can play Super Snail Mod APK with your friends. The game has a multiplayer mode that lets you play with other players online. You can join or create a room, invite your friends, and compete or cooperate with them. You can also chat with them and share your scores and achievements.
-
-
I hope this article has helped you learn more about Super Snail Mod APK and how to download and install it on your device. If you have any questions or feedback, please leave a comment below. Thank you for reading and have fun playing Super Snail Mod APK.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/fffiloni/Video-Matting-Anything/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.cpp b/spaces/fffiloni/Video-Matting-Anything/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.cpp
deleted file mode 100644
index 551243fdadfd1682b5dc6628623b67a79b3f6c74..0000000000000000000000000000000000000000
--- a/spaces/fffiloni/Video-Matting-Anything/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.cpp
+++ /dev/null
@@ -1,43 +0,0 @@
-/*!
-**************************************************************************************************
-* Deformable DETR
-* Copyright (c) 2020 SenseTime. All Rights Reserved.
-* Licensed under the Apache License, Version 2.0 [see LICENSE for details]
-**************************************************************************************************
-* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
-**************************************************************************************************
-*/
-
-#include
-
-#include
-#include
-
-namespace groundingdino {
-
-at::Tensor
-ms_deform_attn_cpu_forward(
- const at::Tensor &value,
- const at::Tensor &spatial_shapes,
- const at::Tensor &level_start_index,
- const at::Tensor &sampling_loc,
- const at::Tensor &attn_weight,
- const int im2col_step)
-{
- AT_ERROR("Not implement on cpu");
-}
-
-std::vector
-ms_deform_attn_cpu_backward(
- const at::Tensor &value,
- const at::Tensor &spatial_shapes,
- const at::Tensor &level_start_index,
- const at::Tensor &sampling_loc,
- const at::Tensor &attn_weight,
- const at::Tensor &grad_output,
- const int im2col_step)
-{
- AT_ERROR("Not implement on cpu");
-}
-
-} // namespace groundingdino
diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/content-disposition/index.js b/spaces/fffiloni/controlnet-animation-doodle/node_modules/content-disposition/index.js
deleted file mode 100644
index ecec899a992d46f2e903a87475b1c342f2ce4d30..0000000000000000000000000000000000000000
--- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/content-disposition/index.js
+++ /dev/null
@@ -1,458 +0,0 @@
-/*!
- * content-disposition
- * Copyright(c) 2014-2017 Douglas Christopher Wilson
- * MIT Licensed
- */
-
-'use strict'
-
-/**
- * Module exports.
- * @public
- */
-
-module.exports = contentDisposition
-module.exports.parse = parse
-
-/**
- * Module dependencies.
- * @private
- */
-
-var basename = require('path').basename
-var Buffer = require('safe-buffer').Buffer
-
-/**
- * RegExp to match non attr-char, *after* encodeURIComponent (i.e. not including "%")
- * @private
- */
-
-var ENCODE_URL_ATTR_CHAR_REGEXP = /[\x00-\x20"'()*,/:;<=>?@[\\\]{}\x7f]/g // eslint-disable-line no-control-regex
-
-/**
- * RegExp to match percent encoding escape.
- * @private
- */
-
-var HEX_ESCAPE_REGEXP = /%[0-9A-Fa-f]{2}/
-var HEX_ESCAPE_REPLACE_REGEXP = /%([0-9A-Fa-f]{2})/g
-
-/**
- * RegExp to match non-latin1 characters.
- * @private
- */
-
-var NON_LATIN1_REGEXP = /[^\x20-\x7e\xa0-\xff]/g
-
-/**
- * RegExp to match quoted-pair in RFC 2616
- *
- * quoted-pair = "\" CHAR
- * CHAR =
- * @private
- */
-
-var QESC_REGEXP = /\\([\u0000-\u007f])/g // eslint-disable-line no-control-regex
-
-/**
- * RegExp to match chars that must be quoted-pair in RFC 2616
- * @private
- */
-
-var QUOTE_REGEXP = /([\\"])/g
-
-/**
- * RegExp for various RFC 2616 grammar
- *
- * parameter = token "=" ( token | quoted-string )
- * token = 1*
- * separators = "(" | ")" | "<" | ">" | "@"
- * | "," | ";" | ":" | "\" | <">
- * | "/" | "[" | "]" | "?" | "="
- * | "{" | "}" | SP | HT
- * quoted-string = ( <"> *(qdtext | quoted-pair ) <"> )
- * qdtext = >
- * quoted-pair = "\" CHAR
- * CHAR =
- * TEXT =
- * LWS = [CRLF] 1*( SP | HT )
- * CRLF = CR LF
- * CR =
- * LF =
- * SP =
- * HT =
- * CTL =
- * OCTET =
- * @private
- */
-
-var PARAM_REGEXP = /;[\x09\x20]*([!#$%&'*+.0-9A-Z^_`a-z|~-]+)[\x09\x20]*=[\x09\x20]*("(?:[\x20!\x23-\x5b\x5d-\x7e\x80-\xff]|\\[\x20-\x7e])*"|[!#$%&'*+.0-9A-Z^_`a-z|~-]+)[\x09\x20]*/g // eslint-disable-line no-control-regex
-var TEXT_REGEXP = /^[\x20-\x7e\x80-\xff]+$/
-var TOKEN_REGEXP = /^[!#$%&'*+.0-9A-Z^_`a-z|~-]+$/
-
-/**
- * RegExp for various RFC 5987 grammar
- *
- * ext-value = charset "'" [ language ] "'" value-chars
- * charset = "UTF-8" / "ISO-8859-1" / mime-charset
- * mime-charset = 1*mime-charsetc
- * mime-charsetc = ALPHA / DIGIT
- * / "!" / "#" / "$" / "%" / "&"
- * / "+" / "-" / "^" / "_" / "`"
- * / "{" / "}" / "~"
- * language = ( 2*3ALPHA [ extlang ] )
- * / 4ALPHA
- * / 5*8ALPHA
- * extlang = *3( "-" 3ALPHA )
- * value-chars = *( pct-encoded / attr-char )
- * pct-encoded = "%" HEXDIG HEXDIG
- * attr-char = ALPHA / DIGIT
- * / "!" / "#" / "$" / "&" / "+" / "-" / "."
- * / "^" / "_" / "`" / "|" / "~"
- * @private
- */
-
-var EXT_VALUE_REGEXP = /^([A-Za-z0-9!#$%&+\-^_`{}~]+)'(?:[A-Za-z]{2,3}(?:-[A-Za-z]{3}){0,3}|[A-Za-z]{4,8}|)'((?:%[0-9A-Fa-f]{2}|[A-Za-z0-9!#$&+.^_`|~-])+)$/
-
-/**
- * RegExp for various RFC 6266 grammar
- *
- * disposition-type = "inline" | "attachment" | disp-ext-type
- * disp-ext-type = token
- * disposition-parm = filename-parm | disp-ext-parm
- * filename-parm = "filename" "=" value
- * | "filename*" "=" ext-value
- * disp-ext-parm = token "=" value
- * | ext-token "=" ext-value
- * ext-token =
- * @private
- */
-
-var DISPOSITION_TYPE_REGEXP = /^([!#$%&'*+.0-9A-Z^_`a-z|~-]+)[\x09\x20]*(?:$|;)/ // eslint-disable-line no-control-regex
-
-/**
- * Create an attachment Content-Disposition header.
- *
- * @param {string} [filename]
- * @param {object} [options]
- * @param {string} [options.type=attachment]
- * @param {string|boolean} [options.fallback=true]
- * @return {string}
- * @public
- */
-
-function contentDisposition (filename, options) {
- var opts = options || {}
-
- // get type
- var type = opts.type || 'attachment'
-
- // get parameters
- var params = createparams(filename, opts.fallback)
-
- // format into string
- return format(new ContentDisposition(type, params))
-}
-
-/**
- * Create parameters object from filename and fallback.
- *
- * @param {string} [filename]
- * @param {string|boolean} [fallback=true]
- * @return {object}
- * @private
- */
-
-function createparams (filename, fallback) {
- if (filename === undefined) {
- return
- }
-
- var params = {}
-
- if (typeof filename !== 'string') {
- throw new TypeError('filename must be a string')
- }
-
- // fallback defaults to true
- if (fallback === undefined) {
- fallback = true
- }
-
- if (typeof fallback !== 'string' && typeof fallback !== 'boolean') {
- throw new TypeError('fallback must be a string or boolean')
- }
-
- if (typeof fallback === 'string' && NON_LATIN1_REGEXP.test(fallback)) {
- throw new TypeError('fallback must be ISO-8859-1 string')
- }
-
- // restrict to file base name
- var name = basename(filename)
-
- // determine if name is suitable for quoted string
- var isQuotedString = TEXT_REGEXP.test(name)
-
- // generate fallback name
- var fallbackName = typeof fallback !== 'string'
- ? fallback && getlatin1(name)
- : basename(fallback)
- var hasFallback = typeof fallbackName === 'string' && fallbackName !== name
-
- // set extended filename parameter
- if (hasFallback || !isQuotedString || HEX_ESCAPE_REGEXP.test(name)) {
- params['filename*'] = name
- }
-
- // set filename parameter
- if (isQuotedString || hasFallback) {
- params.filename = hasFallback
- ? fallbackName
- : name
- }
-
- return params
-}
-
-/**
- * Format object to Content-Disposition header.
- *
- * @param {object} obj
- * @param {string} obj.type
- * @param {object} [obj.parameters]
- * @return {string}
- * @private
- */
-
-function format (obj) {
- var parameters = obj.parameters
- var type = obj.type
-
- if (!type || typeof type !== 'string' || !TOKEN_REGEXP.test(type)) {
- throw new TypeError('invalid type')
- }
-
- // start with normalized type
- var string = String(type).toLowerCase()
-
- // append parameters
- if (parameters && typeof parameters === 'object') {
- var param
- var params = Object.keys(parameters).sort()
-
- for (var i = 0; i < params.length; i++) {
- param = params[i]
-
- var val = param.substr(-1) === '*'
- ? ustring(parameters[param])
- : qstring(parameters[param])
-
- string += '; ' + param + '=' + val
- }
- }
-
- return string
-}
-
-/**
- * Decode a RFC 5987 field value (gracefully).
- *
- * @param {string} str
- * @return {string}
- * @private
- */
-
-function decodefield (str) {
- var match = EXT_VALUE_REGEXP.exec(str)
-
- if (!match) {
- throw new TypeError('invalid extended field value')
- }
-
- var charset = match[1].toLowerCase()
- var encoded = match[2]
- var value
-
- // to binary string
- var binary = encoded.replace(HEX_ESCAPE_REPLACE_REGEXP, pdecode)
-
- switch (charset) {
- case 'iso-8859-1':
- value = getlatin1(binary)
- break
- case 'utf-8':
- value = Buffer.from(binary, 'binary').toString('utf8')
- break
- default:
- throw new TypeError('unsupported charset in extended field')
- }
-
- return value
-}
-
-/**
- * Get ISO-8859-1 version of string.
- *
- * @param {string} val
- * @return {string}
- * @private
- */
-
-function getlatin1 (val) {
- // simple Unicode -> ISO-8859-1 transformation
- return String(val).replace(NON_LATIN1_REGEXP, '?')
-}
-
-/**
- * Parse Content-Disposition header string.
- *
- * @param {string} string
- * @return {object}
- * @public
- */
-
-function parse (string) {
- if (!string || typeof string !== 'string') {
- throw new TypeError('argument string is required')
- }
-
- var match = DISPOSITION_TYPE_REGEXP.exec(string)
-
- if (!match) {
- throw new TypeError('invalid type format')
- }
-
- // normalize type
- var index = match[0].length
- var type = match[1].toLowerCase()
-
- var key
- var names = []
- var params = {}
- var value
-
- // calculate index to start at
- index = PARAM_REGEXP.lastIndex = match[0].substr(-1) === ';'
- ? index - 1
- : index
-
- // match parameters
- while ((match = PARAM_REGEXP.exec(string))) {
- if (match.index !== index) {
- throw new TypeError('invalid parameter format')
- }
-
- index += match[0].length
- key = match[1].toLowerCase()
- value = match[2]
-
- if (names.indexOf(key) !== -1) {
- throw new TypeError('invalid duplicate parameter')
- }
-
- names.push(key)
-
- if (key.indexOf('*') + 1 === key.length) {
- // decode extended value
- key = key.slice(0, -1)
- value = decodefield(value)
-
- // overwrite existing value
- params[key] = value
- continue
- }
-
- if (typeof params[key] === 'string') {
- continue
- }
-
- if (value[0] === '"') {
- // remove quotes and escapes
- value = value
- .substr(1, value.length - 2)
- .replace(QESC_REGEXP, '$1')
- }
-
- params[key] = value
- }
-
- if (index !== -1 && index !== string.length) {
- throw new TypeError('invalid parameter format')
- }
-
- return new ContentDisposition(type, params)
-}
-
-/**
- * Percent decode a single character.
- *
- * @param {string} str
- * @param {string} hex
- * @return {string}
- * @private
- */
-
-function pdecode (str, hex) {
- return String.fromCharCode(parseInt(hex, 16))
-}
-
-/**
- * Percent encode a single character.
- *
- * @param {string} char
- * @return {string}
- * @private
- */
-
-function pencode (char) {
- return '%' + String(char)
- .charCodeAt(0)
- .toString(16)
- .toUpperCase()
-}
-
-/**
- * Quote a string for HTTP.
- *
- * @param {string} val
- * @return {string}
- * @private
- */
-
-function qstring (val) {
- var str = String(val)
-
- return '"' + str.replace(QUOTE_REGEXP, '\\$1') + '"'
-}
-
-/**
- * Encode a Unicode string for HTTP (RFC 5987).
- *
- * @param {string} val
- * @return {string}
- * @private
- */
-
-function ustring (val) {
- var str = String(val)
-
- // percent encode as UTF-8
- var encoded = encodeURIComponent(str)
- .replace(ENCODE_URL_ATTR_CHAR_REGEXP, pencode)
-
- return 'UTF-8\'\'' + encoded
-}
-
-/**
- * Class for parsed Content-Disposition header for v8 optimization
- *
- * @public
- * @param {string} type
- * @param {object} parameters
- * @constructor
- */
-
-function ContentDisposition (type, parameters) {
- this.type = type
- this.parameters = parameters
-}
diff --git a/spaces/fffiloni/instant-TTS-Bark-cloning/checkpoints/blank.md b/spaces/fffiloni/instant-TTS-Bark-cloning/checkpoints/blank.md
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/flowers-team/SocialAISchool/utils/multimodalutils.py b/spaces/flowers-team/SocialAISchool/utils/multimodalutils.py
deleted file mode 100644
index 2fbde04f41f6be2652bc59004a8ef17871a82768..0000000000000000000000000000000000000000
--- a/spaces/flowers-team/SocialAISchool/utils/multimodalutils.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from enum import IntEnum
-import numpy as np
-import gym.spaces as spaces
-import torch
-
-raise DeprecationWarning("Do not use this. Grammar is defined in the env class; SocialAIGrammar is socialaigrammar.py")
-
-# class Grammar(object):
-#
-# templates = ["Where is ", "Who is"]
-# things = ["me", "exit", "you", "him", "task"]
-#
-# grammar_action_space = spaces.MultiDiscrete([len(templates), len(things)])
-#
-# @classmethod
-# def construct_utterance(cls, action):
-# return cls.templates[int(action[0])] + " " + cls.things[int(action[1])] + ". "
diff --git a/spaces/freddyaboulton/gradio_pdf/src/README.md b/spaces/freddyaboulton/gradio_pdf/src/README.md
deleted file mode 100644
index 19ed193b7d6f5ae09bd4ef94056e4b27838bb521..0000000000000000000000000000000000000000
--- a/spaces/freddyaboulton/gradio_pdf/src/README.md
+++ /dev/null
@@ -1,40 +0,0 @@
-
-# gradio_pdf
-Display PDFs in Gradio!
-
-## Example usage
-
-```python
-
-import gradio as gr
-from gradio_pdf import PDF
-from pdf2image import convert_from_path
-from transformers import pipeline
-from pathlib import Path
-
-dir_ = Path(__file__).parent
-
-p = pipeline(
- "document-question-answering",
- model="impira/layoutlm-document-qa",
-)
-
-def qa(question: str, doc: str) -> str:
- img = convert_from_path(doc)[0]
- output = p(img, question)
- return sorted(output, key=lambda x: x["score"], reverse=True)[0]['answer']
-
-
-demo = gr.Interface(
- qa,
- [gr.Textbox(label="Question"), PDF(label="Document")],
- gr.Textbox(),
- examples=[["What is the total gross worth?", str(dir_ / "invoice_2.pdf")],
- ["Whos is being invoiced?", str(dir_ / "sample_invoice.pdf")]]
-)
-
-demo.launch()
-```
-
-## Demo
-
\ No newline at end of file
diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/runner/default_constructor.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/runner/default_constructor.py
deleted file mode 100644
index 3f1f5b44168768dfda3947393a63a6cf9cf50b41..0000000000000000000000000000000000000000
--- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmcv/runner/default_constructor.py
+++ /dev/null
@@ -1,44 +0,0 @@
-from .builder import RUNNER_BUILDERS, RUNNERS
-
-
-@RUNNER_BUILDERS.register_module()
-class DefaultRunnerConstructor:
- """Default constructor for runners.
-
- Custom existing `Runner` like `EpocBasedRunner` though `RunnerConstructor`.
- For example, We can inject some new properties and functions for `Runner`.
-
- Example:
- >>> from annotator.uniformer.mmcv.runner import RUNNER_BUILDERS, build_runner
- >>> # Define a new RunnerReconstructor
- >>> @RUNNER_BUILDERS.register_module()
- >>> class MyRunnerConstructor:
- ... def __init__(self, runner_cfg, default_args=None):
- ... if not isinstance(runner_cfg, dict):
- ... raise TypeError('runner_cfg should be a dict',
- ... f'but got {type(runner_cfg)}')
- ... self.runner_cfg = runner_cfg
- ... self.default_args = default_args
- ...
- ... def __call__(self):
- ... runner = RUNNERS.build(self.runner_cfg,
- ... default_args=self.default_args)
- ... # Add new properties for existing runner
- ... runner.my_name = 'my_runner'
- ... runner.my_function = lambda self: print(self.my_name)
- ... ...
- >>> # build your runner
- >>> runner_cfg = dict(type='EpochBasedRunner', max_epochs=40,
- ... constructor='MyRunnerConstructor')
- >>> runner = build_runner(runner_cfg)
- """
-
- def __init__(self, runner_cfg, default_args=None):
- if not isinstance(runner_cfg, dict):
- raise TypeError('runner_cfg should be a dict',
- f'but got {type(runner_cfg)}')
- self.runner_cfg = runner_cfg
- self.default_args = default_args
-
- def __call__(self):
- return RUNNERS.build(self.runner_cfg, default_args=self.default_args)
diff --git a/spaces/givenvessel399/M.me/README.md b/spaces/givenvessel399/M.me/README.md
deleted file mode 100644
index a0871e108ae02a7dbd0153de21a9a3e318d7e8a5..0000000000000000000000000000000000000000
--- a/spaces/givenvessel399/M.me/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-title: Zenml Server
-emoji: 🧘
-colorFrom: purple
-colorTo: green
-sdk: docker
-pinned: false
-app_port: 8080
-license: apache-2.0
-duplicated_from: zenml/zenml
----
diff --git a/spaces/glyszt/vt/vtoonify/train_vtoonify_t.py b/spaces/glyszt/vt/vtoonify/train_vtoonify_t.py
deleted file mode 100644
index 147d5f38a5b25822ab05f089173cd96c6aa22c12..0000000000000000000000000000000000000000
--- a/spaces/glyszt/vt/vtoonify/train_vtoonify_t.py
+++ /dev/null
@@ -1,432 +0,0 @@
-import os
-#os.environ['CUDA_VISIBLE_DEVICES'] = "0"
-import argparse
-import math
-import random
-
-import numpy as np
-import torch
-from torch import nn, optim
-from torch.nn import functional as F
-from torch.utils import data
-import torch.distributed as dist
-from torchvision import transforms, utils
-from tqdm import tqdm
-from PIL import Image
-from util import *
-from model.stylegan import lpips
-from model.stylegan.model import Generator, Downsample
-from model.vtoonify import VToonify, ConditionalDiscriminator
-from model.bisenet.model import BiSeNet
-from model.simple_augment import random_apply_affine
-from model.stylegan.distributed import (
- get_rank,
- synchronize,
- reduce_loss_dict,
- reduce_sum,
- get_world_size,
-)
-
-# In the paper, --weight for each style is set as follows,
-# cartoon: default
-# caricature: default
-# pixar: 1 1 1 1 1 1 1 1 1 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5 0.5
-# comic: 0.5 0.5 0.5 0.5 0.5 0.5 0.5 1 1 1 1 1 1 1 1 1 1 1
-# arcane: 0.5 0.5 0.5 0.5 0.5 0.5 0.5 1 1 1 1 1 1 1 1 1 1 1
-
-class TrainOptions():
- def __init__(self):
-
- self.parser = argparse.ArgumentParser(description="Train VToonify-T")
- self.parser.add_argument("--iter", type=int, default=2000, help="total training iterations")
- self.parser.add_argument("--batch", type=int, default=8, help="batch sizes for each gpus")
- self.parser.add_argument("--lr", type=float, default=0.0001, help="learning rate")
- self.parser.add_argument("--local_rank", type=int, default=0, help="local rank for distributed training")
- self.parser.add_argument("--start_iter", type=int, default=0, help="start iteration")
- self.parser.add_argument("--save_every", type=int, default=30000, help="interval of saving a checkpoint")
- self.parser.add_argument("--save_begin", type=int, default=30000, help="when to start saving a checkpoint")
- self.parser.add_argument("--log_every", type=int, default=200, help="interval of saving an intermediate image result")
-
- self.parser.add_argument("--adv_loss", type=float, default=0.01, help="the weight of adv loss")
- self.parser.add_argument("--grec_loss", type=float, default=0.1, help="the weight of mse recontruction loss")
- self.parser.add_argument("--perc_loss", type=float, default=0.01, help="the weight of perceptual loss")
- self.parser.add_argument("--tmp_loss", type=float, default=1.0, help="the weight of temporal consistency loss")
-
- self.parser.add_argument("--encoder_path", type=str, default=None, help="path to the pretrained encoder model")
- self.parser.add_argument("--direction_path", type=str, default='./checkpoint/directions.npy', help="path to the editing direction latents")
- self.parser.add_argument("--stylegan_path", type=str, default='./checkpoint/stylegan2-ffhq-config-f.pt', help="path to the stylegan model")
- self.parser.add_argument("--finetunegan_path", type=str, default='./checkpoint/cartoon/finetune-000600.pt', help="path to the finetuned stylegan model")
- self.parser.add_argument("--weight", type=float, nargs=18, default=[1]*9+[0]*9, help="the weight for blending two models")
- self.parser.add_argument("--faceparsing_path", type=str, default='./checkpoint/faceparsing.pth', help="path of the face parsing model")
- self.parser.add_argument("--style_encoder_path", type=str, default='./checkpoint/encoder.pt', help="path of the style encoder")
-
- self.parser.add_argument("--name", type=str, default='vtoonify_t_cartoon', help="saved model name")
- self.parser.add_argument("--pretrain", action="store_true", help="if true, only pretrain the encoder")
-
- def parse(self):
- self.opt = self.parser.parse_args()
- if self.opt.encoder_path is None:
- self.opt.encoder_path = os.path.join('./checkpoint/', self.opt.name, 'pretrain.pt')
- args = vars(self.opt)
- if self.opt.local_rank == 0:
- print('Load options')
- for name, value in sorted(args.items()):
- print('%s: %s' % (str(name), str(value)))
- return self.opt
-
-
-# pretrain E of vtoonify.
-# We train E so that its the last-layer feature matches the original 8-th-layer input feature of G1
-# See Model initialization in Sec. 4.1.2 for the detail
-def pretrain(args, generator, g_optim, g_ema, parsingpredictor, down, directions, basemodel, device):
- pbar = range(args.iter)
-
- if get_rank() == 0:
- pbar = tqdm(pbar, initial=args.start_iter, dynamic_ncols=True, smoothing=0.01)
-
- recon_loss = torch.tensor(0.0, device=device)
- loss_dict = {}
-
- if args.distributed:
- g_module = generator.module
- else:
- g_module = generator
-
- accum = 0.5 ** (32 / (10 * 1000))
-
- requires_grad(g_module.encoder, True)
-
- for idx in pbar:
- i = idx + args.start_iter
-
- if i > args.iter:
- print("Done!")
- break
-
- with torch.no_grad():
- # during pretraining, no geometric transformations are applied.
- noise_sample = torch.randn(args.batch, 512).cuda()
- ws_ = basemodel.style(noise_sample).unsqueeze(1).repeat(1,18,1) # random w
- ws_[:, 3:7] += directions[torch.randint(0, directions.shape[0], (args.batch,)), 3:7] # w''=w'=w+n
- img_gen, _ = basemodel([ws_], input_is_latent=True, truncation=0.5, truncation_latent=0) # image part of x'
- img_gen = torch.clamp(img_gen, -1, 1).detach()
- img_gen512 = down(img_gen.detach())
- img_gen256 = down(img_gen512.detach()) # image part of x'_down
- mask512 = parsingpredictor(2*torch.clamp(img_gen512, -1, 1))[0]
- real_input = torch.cat((img_gen256, down(mask512)/16.0), dim=1).detach() # x'_down
- # f_G1^(8)(w'')
- real_feat, real_skip = g_ema.generator([ws_], input_is_latent=True, return_feature_ind = 6, truncation=0.5, truncation_latent=0)
- real_feat = real_feat.detach()
- real_skip = real_skip.detach()
-
- # f_E^(last)(x'_down)
- fake_feat, fake_skip = generator(real_input, style=None, return_feat=True)
-
- # L_E in Eq.(1)
- recon_loss = F.mse_loss(fake_feat, real_feat) + F.mse_loss(fake_skip, real_skip)
-
- loss_dict["emse"] = recon_loss
-
- generator.zero_grad()
- recon_loss.backward()
- g_optim.step()
-
- accumulate(g_ema.encoder, g_module.encoder, accum)
-
- loss_reduced = reduce_loss_dict(loss_dict)
-
- emse_loss_val = loss_reduced["emse"].mean().item()
-
- if get_rank() == 0:
- pbar.set_description(
- (
- f"iter: {i:d}; emse: {emse_loss_val:.3f}"
- )
- )
-
- if ((i+1) >= args.save_begin and (i+1) % args.save_every == 0) or (i+1) == args.iter:
- if (i+1) == args.iter:
- savename = f"checkpoint/%s/pretrain.pt"%(args.name)
- else:
- savename = f"checkpoint/%s/pretrain-%05d.pt"%(args.name, i+1)
- torch.save(
- {
- #"g": g_module.encoder.state_dict(),
- "g_ema": g_ema.encoder.state_dict(),
- },
- savename,
- )
-
-
-# generate paired data and train vtoonify, see Sec. 4.1.2 for the detail
-def train(args, generator, discriminator, g_optim, d_optim, g_ema, percept, parsingpredictor, down, pspencoder, directions, basemodel, device):
- pbar = range(args.iter)
-
- if get_rank() == 0:
- pbar = tqdm(pbar, initial=args.start_iter, smoothing=0.01, ncols=120, dynamic_ncols=False)
-
- d_loss = torch.tensor(0.0, device=device)
- g_loss = torch.tensor(0.0, device=device)
- grec_loss = torch.tensor(0.0, device=device)
- gfeat_loss = torch.tensor(0.0, device=device)
- temporal_loss = torch.tensor(0.0, device=device)
- loss_dict = {}
-
- if args.distributed:
- g_module = generator.module
- d_module = discriminator.module
-
- else:
- g_module = generator
- d_module = discriminator
-
- accum = 0.5 ** (32 / (10 * 1000))
-
- for idx in pbar:
- i = idx + args.start_iter
-
- if i > args.iter:
- print("Done!")
- break
-
- ###### This part is for data generation. Generate pair (x, y, w'') as in Fig. 5 of the paper
- with torch.no_grad():
- noise_sample = torch.randn(args.batch, 512).cuda()
- wc = basemodel.style(noise_sample).unsqueeze(1).repeat(1,18,1) # random w
- wc[:, 3:7] += directions[torch.randint(0, directions.shape[0], (args.batch,)), 3:7] # w'=w+n
- wc = wc.detach()
- xc, _ = basemodel([wc], input_is_latent=True, truncation=0.5, truncation_latent=0)
- xc = torch.clamp(xc, -1, 1).detach() # x'
- xl = pspencoder(F.adaptive_avg_pool2d(xc, 256))
- xl = basemodel.style(xl.reshape(xl.shape[0]*xl.shape[1], xl.shape[2])).reshape(xl.shape) # E_s(x'_down)
- xl = torch.cat((wc[:,0:7]*0.5, xl[:,7:18]), dim=1).detach() # w'' = concatenate w' and E_s(x'_down)
- xs, _ = g_ema.generator([xl], input_is_latent=True)
- xs = torch.clamp(xs, -1, 1).detach() # y'
- # during training, random geometric transformations are applied.
- imgs, _ = random_apply_affine(torch.cat((xc.detach(),xs), dim=1), 0.2, None)
- real_input1024 = imgs[:,0:3].detach() # image part of x
- real_input512 = down(real_input1024).detach()
- real_input256 = down(real_input512).detach()
- mask512 = parsingpredictor(2*real_input512)[0]
- mask256 = down(mask512).detach()
- mask = F.adaptive_avg_pool2d(mask512, 1024).detach() # parsing part of x
- real_output = imgs[:,3:].detach() # y
- real_input = torch.cat((real_input256, mask256/16.0), dim=1) # x_down
- # for log, sample a fixed input-output pair (x_down, y, w'')
- if idx == 0 or i == 0:
- samplein = real_input.clone().detach()
- sampleout = real_output.clone().detach()
- samplexl = xl.clone().detach()
-
- ###### This part is for training discriminator
-
- requires_grad(g_module.encoder, False)
- requires_grad(g_module.fusion_out, False)
- requires_grad(g_module.fusion_skip, False)
- requires_grad(discriminator, True)
-
- fake_output = generator(real_input, xl)
- fake_pred = discriminator(F.adaptive_avg_pool2d(fake_output, 256))
- real_pred = discriminator(F.adaptive_avg_pool2d(real_output, 256))
-
- # L_adv in Eq.(3)
- d_loss = d_logistic_loss(real_pred, fake_pred) * args.adv_loss
- loss_dict["d"] = d_loss
-
- discriminator.zero_grad()
- d_loss.backward()
- d_optim.step()
-
- ###### This part is for training generator (encoder and fusion modules)
-
- requires_grad(g_module.encoder, True)
- requires_grad(g_module.fusion_out, True)
- requires_grad(g_module.fusion_skip, True)
- requires_grad(discriminator, False)
-
- fake_output = generator(real_input, xl)
- fake_pred = discriminator(F.adaptive_avg_pool2d(fake_output, 256))
- # L_adv in Eq.(3)
- g_loss = g_nonsaturating_loss(fake_pred) * args.adv_loss
- # L_rec in Eq.(2)
- grec_loss = F.mse_loss(fake_output, real_output) * args.grec_loss
- gfeat_loss = percept(F.adaptive_avg_pool2d(fake_output, 512), # 1024 will out of memory
- F.adaptive_avg_pool2d(real_output, 512)).sum() * args.perc_loss # 256 will get blurry output
-
- loss_dict["g"] = g_loss
- loss_dict["gr"] = grec_loss
- loss_dict["gf"] = gfeat_loss
-
- w = random.randint(0,1024-896)
- h = random.randint(0,1024-896)
- crop_input = torch.cat((real_input1024[:,:,w:w+896,h:h+896], mask[:,:,w:w+896,h:h+896]/16.0), dim=1).detach()
- crop_input = down(down(crop_input))
- crop_fake_output = fake_output[:,:,w:w+896,h:h+896]
- fake_crop_output = generator(crop_input, xl)
- # L_tmp in Eq.(4), gradually increase the weight of L_tmp
- temporal_loss = ((fake_crop_output-crop_fake_output)**2).mean() * max(idx/(args.iter/2.0)-1, 0) * args.tmp_loss
- loss_dict["tp"] = temporal_loss
-
- generator.zero_grad()
- (g_loss + grec_loss + gfeat_loss + temporal_loss).backward()
- g_optim.step()
-
- accumulate(g_ema.encoder, g_module.encoder, accum)
- accumulate(g_ema.fusion_out, g_module.fusion_out, accum)
- accumulate(g_ema.fusion_skip, g_module.fusion_skip, accum)
-
- loss_reduced = reduce_loss_dict(loss_dict)
-
- d_loss_val = loss_reduced["d"].mean().item()
- g_loss_val = loss_reduced["g"].mean().item()
- gr_loss_val = loss_reduced["gr"].mean().item()
- gf_loss_val = loss_reduced["gf"].mean().item()
- tmp_loss_val = loss_reduced["tp"].mean().item()
-
- if get_rank() == 0:
- pbar.set_description(
- (
- f"iter: {i:d}; advd: {d_loss_val:.3f}; advg: {g_loss_val:.3f}; mse: {gr_loss_val:.3f}; "
- f"perc: {gf_loss_val:.3f}; tmp: {tmp_loss_val:.3f}"
- )
- )
-
- if i % args.log_every == 0 or (i+1) == args.iter:
- with torch.no_grad():
- g_ema.eval()
- sample = g_ema(samplein, samplexl)
- sample = F.interpolate(torch.cat((sampleout, sample), dim=0), 256)
- utils.save_image(
- sample,
- f"log/%s/%05d.jpg"%(args.name, i),
- nrow=int(args.batch),
- normalize=True,
- range=(-1, 1),
- )
-
- if ((i+1) >= args.save_begin and (i+1) % args.save_every == 0) or (i+1) == args.iter:
- if (i+1) == args.iter:
- savename = f"checkpoint/%s/vtoonify.pt"%(args.name)
- else:
- savename = f"checkpoint/%s/vtoonify_%05d.pt"%(args.name, i+1)
- torch.save(
- {
- #"g": g_module.state_dict(),
- #"d": d_module.state_dict(),
- "g_ema": g_ema.state_dict(),
- },
- savename,
- )
-
-
-
-if __name__ == "__main__":
-
- device = "cuda"
- parser = TrainOptions()
- args = parser.parse()
- if args.local_rank == 0:
- print('*'*98)
- if not os.path.exists("log/%s/"%(args.name)):
- os.makedirs("log/%s/"%(args.name))
- if not os.path.exists("checkpoint/%s/"%(args.name)):
- os.makedirs("checkpoint/%s/"%(args.name))
-
- n_gpu = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
- args.distributed = n_gpu > 1
-
- if args.distributed:
- torch.cuda.set_device(args.local_rank)
- torch.distributed.init_process_group(backend="nccl", init_method="env://")
- synchronize()
-
- generator = VToonify(backbone = 'toonify').to(device)
- generator.apply(weights_init)
- g_ema = VToonify(backbone = 'toonify').to(device)
- g_ema.eval()
-
- basemodel = Generator(1024, 512, 8, 2).to(device) # G0
- finetunemodel = Generator(1024, 512, 8, 2).to(device)
- basemodel.load_state_dict(torch.load(args.stylegan_path, map_location=lambda storage, loc: storage)['g_ema'])
- finetunemodel.load_state_dict(torch.load(args.finetunegan_path, map_location=lambda storage, loc: storage)['g_ema'])
- fused_state_dict = blend_models(finetunemodel, basemodel, args.weight) # G1
- generator.generator.load_state_dict(fused_state_dict) # load G1
- g_ema.generator.load_state_dict(fused_state_dict)
- requires_grad(basemodel, False)
- requires_grad(generator.generator, False)
- requires_grad(g_ema.generator, False)
-
- if not args.pretrain:
- generator.encoder.load_state_dict(torch.load(args.encoder_path, map_location=lambda storage, loc: storage)["g_ema"])
- # we initialize the fusion modules to map f_G \otimes f_E to f_G.
- for k in generator.fusion_out:
- k.weight.data *= 0.01
- k.weight[:,0:k.weight.shape[0],1,1].data += torch.eye(k.weight.shape[0]).cuda()
- for k in generator.fusion_skip:
- k.weight.data *= 0.01
- k.weight[:,0:k.weight.shape[0],1,1].data += torch.eye(k.weight.shape[0]).cuda()
-
- accumulate(g_ema.encoder, generator.encoder, 0)
- accumulate(g_ema.fusion_out, generator.fusion_out, 0)
- accumulate(g_ema.fusion_skip, generator.fusion_skip, 0)
-
- g_parameters = list(generator.encoder.parameters())
- if not args.pretrain:
- g_parameters = g_parameters + list(generator.fusion_out.parameters()) + list(generator.fusion_skip.parameters())
-
- g_optim = optim.Adam(
- g_parameters,
- lr=args.lr,
- betas=(0.9, 0.99),
- )
-
- if args.distributed:
- generator = nn.parallel.DistributedDataParallel(
- generator,
- device_ids=[args.local_rank],
- output_device=args.local_rank,
- broadcast_buffers=False,
- find_unused_parameters=True,
- )
-
- parsingpredictor = BiSeNet(n_classes=19)
- parsingpredictor.load_state_dict(torch.load(args.faceparsing_path, map_location=lambda storage, loc: storage))
- parsingpredictor.to(device).eval()
- requires_grad(parsingpredictor, False)
-
- # we apply gaussian blur to the images to avoid flickers caused during downsampling
- down = Downsample(kernel=[1, 3, 3, 1], factor=2).to(device)
- requires_grad(down, False)
-
- directions = torch.tensor(np.load(args.direction_path)).to(device)
-
- if not args.pretrain:
- discriminator = ConditionalDiscriminator(256).to(device)
-
- d_optim = optim.Adam(
- discriminator.parameters(),
- lr=args.lr,
- betas=(0.9, 0.99),
- )
-
- if args.distributed:
- discriminator = nn.parallel.DistributedDataParallel(
- discriminator,
- device_ids=[args.local_rank],
- output_device=args.local_rank,
- broadcast_buffers=False,
- find_unused_parameters=True,
- )
-
- percept = lpips.PerceptualLoss(model="net-lin", net="vgg", use_gpu=device.startswith("cuda"), gpu_ids=[args.local_rank])
- requires_grad(percept.model.net, False)
-
- pspencoder = load_psp_standalone(args.style_encoder_path, device)
-
- if args.local_rank == 0:
- print('Load models and data successfully loaded!')
-
- if args.pretrain:
- pretrain(args, generator, g_optim, g_ema, parsingpredictor, down, directions, basemodel, device)
- else:
- train(args, generator, discriminator, g_optim, d_optim, g_ema, percept, parsingpredictor, down, pspencoder, directions, basemodel, device)
diff --git a/spaces/godot-demo/godot-2d/index.html b/spaces/godot-demo/godot-2d/index.html
deleted file mode 100644
index c340312652b556eb304720810a5326758e5dec9d..0000000000000000000000000000000000000000
--- a/spaces/godot-demo/godot-2d/index.html
+++ /dev/null
@@ -1,247 +0,0 @@
-
-
-
-
-
- dodge_3.2x
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Disk Drill 4.0.487.0 Crack Recover Any File From PC (100 Work) MacOSX REPACK.md b/spaces/gotiQspiryo/whisper-ui/examples/Disk Drill 4.0.487.0 Crack Recover Any File From PC (100 Work) MacOSX REPACK.md
deleted file mode 100644
index f548bbe206074fcbeaf6eeb35ea9542ae65837f9..0000000000000000000000000000000000000000
--- a/spaces/gotiQspiryo/whisper-ui/examples/Disk Drill 4.0.487.0 Crack Recover Any File From PC (100 Work) MacOSX REPACK.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
Disk Drill 4.0.487.0 crack Recover Any File from PC (100 work) MacOSX
-
-Disk Drill 4.0.487.0 Crack Recover Any File From PC (100 Work) MacOSX Disk Drill PRO 2.0.0.339 Crack is a free and famous data recovery ... If you connect ... 4d29de3e1b
-
-
-
diff --git a/spaces/gotiQspiryo/whisper-ui/examples/How To Use Controller On Desmume.md b/spaces/gotiQspiryo/whisper-ui/examples/How To Use Controller On Desmume.md
deleted file mode 100644
index 079fefc779dfdc187faa7b23ee45a2f7b7e2c3f8..0000000000000000000000000000000000000000
--- a/spaces/gotiQspiryo/whisper-ui/examples/How To Use Controller On Desmume.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
- )
-}
diff --git a/spaces/huaiji3y/bingo-Public/src/components/ui/badge.tsx b/spaces/huaiji3y/bingo-Public/src/components/ui/badge.tsx
deleted file mode 100644
index d9a84b394090e5b4b3bd34f6135b9a2f2ead0aa2..0000000000000000000000000000000000000000
--- a/spaces/huaiji3y/bingo-Public/src/components/ui/badge.tsx
+++ /dev/null
@@ -1,36 +0,0 @@
-import * as React from 'react'
-import { cva, type VariantProps } from 'class-variance-authority'
-
-import { cn } from '@/lib/utils'
-
-const badgeVariants = cva(
- 'inline-flex items-center rounded-full border px-2.5 py-0.5 text-xs font-semibold transition-colors focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2',
- {
- variants: {
- variant: {
- default:
- 'border-transparent bg-primary text-primary-foreground hover:bg-primary/80',
- secondary:
- 'border-transparent bg-secondary text-secondary-foreground hover:bg-secondary/80',
- destructive:
- 'border-transparent bg-destructive text-destructive-foreground hover:bg-destructive/80',
- outline: 'text-foreground'
- }
- },
- defaultVariants: {
- variant: 'default'
- }
- }
-)
-
-export interface BadgeProps
- extends React.HTMLAttributes,
- VariantProps {}
-
-function Badge({ className, variant, ...props }: BadgeProps) {
- return (
-
- )
-}
-
-export { Badge, badgeVariants }
diff --git a/spaces/huggingface-projects/color-palette-generator-sd/frontend/vite.config.dev.ts b/spaces/huggingface-projects/color-palette-generator-sd/frontend/vite.config.dev.ts
deleted file mode 100644
index bc127c858452aa79bccb57ff8b1b85c9a2874d04..0000000000000000000000000000000000000000
--- a/spaces/huggingface-projects/color-palette-generator-sd/frontend/vite.config.dev.ts
+++ /dev/null
@@ -1,18 +0,0 @@
-import { sveltekit } from '@sveltejs/kit/vite';
-import type { UserConfig } from 'vite';
-
-const config: UserConfig = {
- plugins: [sveltekit()],
- server: {
- proxy: {
- '/moon': {
- target: 'https://huggingface.co',
- changeOrigin: true,
- cookieDomainRewrite: 'localhost',
- rewrite: (path) => path.replace(/^\/moon/, '')
- }
- }
- }
-};
-
-export default config;
diff --git a/spaces/huggingface-projects/deepfloydif-bot/deepfloydif.py b/spaces/huggingface-projects/deepfloydif-bot/deepfloydif.py
deleted file mode 100644
index 9bfe176611f10a273cae0fde03df5877b5d69180..0000000000000000000000000000000000000000
--- a/spaces/huggingface-projects/deepfloydif-bot/deepfloydif.py
+++ /dev/null
@@ -1,301 +0,0 @@
-import asyncio
-import glob
-import os
-import pathlib
-import random
-import threading
-
-import gradio as gr
-import discord
-from gradio_client import Client
-from PIL import Image
-from discord.ext import commands
-from discord.ui import Button, View
-
-HF_TOKEN = os.getenv("HF_TOKEN")
-deepfloydif_client = Client("huggingface-projects/IF", HF_TOKEN)
-DISCORD_TOKEN = os.getenv("DISCORD_TOKEN")
-intents = discord.Intents.all()
-bot = commands.Bot(command_prefix="/", intents=intents)
-
-
-@bot.event
-async def on_ready():
- print(f"Logged in as {bot.user} (ID: {bot.user.id})")
- synced = await bot.tree.sync()
- print(f"Synced commands: {', '.join([s.name for s in synced])}.")
- print("------")
-
-
-@bot.hybrid_command(
- name="deepfloydif",
- description="Enter a prompt to generate an image! Can generate realistic text, too!",
-)
-async def deepfloydif(ctx, prompt: str):
- """DeepfloydIF stage 1 generation"""
- try:
- await deepfloydif_generate64(ctx, prompt)
- except Exception as e:
- print(f"Error: {e}")
-
-
-def deepfloydif_generate64_inference(prompt):
- """Generates four images based on a prompt"""
- negative_prompt = ""
- seed = random.randint(0, 1000)
- number_of_images = 4
- guidance_scale = 7
- custom_timesteps_1 = "smart50"
- number_of_inference_steps = 50
- (
- stage_1_images,
- stage_1_param_path,
- path_for_upscale256_upscaling,
- ) = deepfloydif_client.predict(
- prompt,
- negative_prompt,
- seed,
- number_of_images,
- guidance_scale,
- custom_timesteps_1,
- number_of_inference_steps,
- api_name="/generate64",
- )
- return [stage_1_images, stage_1_param_path, path_for_upscale256_upscaling]
-
-
-def deepfloydif_upscale256_inference(index, path_for_upscale256_upscaling):
- """Upscales one of the images from deepfloydif_generate64_inference based on the chosen index"""
- selected_index_for_upscale256 = index
- seed_2 = 0
- guidance_scale_2 = 4
- custom_timesteps_2 = "smart50"
- number_of_inference_steps_2 = 50
- result_path = deepfloydif_client.predict(
- path_for_upscale256_upscaling,
- selected_index_for_upscale256,
- seed_2,
- guidance_scale_2,
- custom_timesteps_2,
- number_of_inference_steps_2,
- api_name="/upscale256",
- )
- return result_path
-
-
-def deepfloydif_upscale1024_inference(index, path_for_upscale256_upscaling, prompt):
- """Upscales to stage 2, then stage 3"""
- selected_index_for_upscale256 = index
- seed_2 = 0 # default seed for stage 2 256 upscaling
- guidance_scale_2 = 4 # default for stage 2
- custom_timesteps_2 = "smart50" # default for stage 2
- number_of_inference_steps_2 = 50 # default for stage 2
- negative_prompt = "" # empty (not used, could add in the future)
-
- seed_3 = 0 # default for stage 3 1024 upscaling
- guidance_scale_3 = 9 # default for stage 3
- number_of_inference_steps_3 = 40 # default for stage 3
-
- result_path = deepfloydif_client.predict(
- path_for_upscale256_upscaling,
- selected_index_for_upscale256,
- seed_2,
- guidance_scale_2,
- custom_timesteps_2,
- number_of_inference_steps_2,
- prompt,
- negative_prompt,
- seed_3,
- guidance_scale_3,
- number_of_inference_steps_3,
- api_name="/upscale1024",
- )
- return result_path
-
-
-def load_image(png_files, stage_1_images):
- """Opens images as variables so we can combine them later"""
- results = []
- for file in png_files:
- png_path = os.path.join(stage_1_images, file)
- results.append(Image.open(png_path))
- return results
-
-
-def combine_images(png_files, stage_1_images, partial_path):
- if os.environ.get("TEST_ENV") == "True":
- print("Combining images for deepfloydif_generate64")
- images = load_image(png_files, stage_1_images)
- combined_image = Image.new("RGB", (images[0].width * 2, images[0].height * 2))
- combined_image.paste(images[0], (0, 0))
- combined_image.paste(images[1], (images[0].width, 0))
- combined_image.paste(images[2], (0, images[0].height))
- combined_image.paste(images[3], (images[0].width, images[0].height))
- combined_image_path = os.path.join(stage_1_images, f"{partial_path}.png")
- combined_image.save(combined_image_path)
- return combined_image_path
-
-
-async def deepfloydif_generate64(ctx, prompt):
- """DeepfloydIF command (generate images with realistic text using slash commands)"""
- try:
- if ctx.guild.id == 879548962464493619:
- if ctx.channel.id != 1119313215675973714:
- return
- channel = ctx.channel
- # interaction.response message can't be used to create a thread, so we create another message
- message = await ctx.send(f"**{prompt}** - {ctx.author.mention} (generating...)")
-
- loop = asyncio.get_running_loop()
- result = await loop.run_in_executor(None, deepfloydif_generate64_inference, prompt)
- stage_1_images = result[0]
- path_for_upscale256_upscaling = result[2]
-
- partial_path = pathlib.Path(path_for_upscale256_upscaling).name
- png_files = list(glob.glob(f"{stage_1_images}/**/*.png"))
-
- if png_files:
- await message.delete()
- combined_image_path = combine_images(png_files, stage_1_images, partial_path)
- if os.environ.get("TEST_ENV") == "True":
- print("Images combined for deepfloydif_generate64")
-
- with Image.open(combined_image_path) as img:
- width, height = img.size
- new_width = width * 3
- new_height = height * 3
- resized_img = img.resize((new_width, new_height))
- x2_combined_image_path = combined_image_path
- resized_img.save(x2_combined_image_path)
-
- # making image bigger, more readable
- with open(x2_combined_image_path, "rb") as f: # was combined_image_path
- button1 = Button(custom_id="0", emoji="↖")
- button2 = Button(custom_id="1", emoji="↗")
- button3 = Button(custom_id="2", emoji="↙")
- button4 = Button(custom_id="3", emoji="↘")
-
- async def button_callback(interaction):
- index = int(interaction.data["custom_id"]) # 0,1,2,3
-
- await interaction.response.send_message(
- f"{interaction.user.mention} (upscaling...)", ephemeral=True
- )
- result_path = await deepfloydif_upscale256(index, path_for_upscale256_upscaling)
-
- # create and use upscale 1024 button
- with open(result_path, "rb") as f:
- upscale1024 = Button(label="High-quality upscale (x4)", custom_id=str(index))
- upscale1024.callback = upscale1024_callback
- view = View(timeout=None)
- view.add_item(upscale1024)
-
- await interaction.delete_original_response()
- await channel.send(
- content=(
- f"{interaction.user.mention} Here is the upscaled image! Click the button"
- " to upscale even more!"
- ),
- file=discord.File(f, f"{prompt}.png"),
- view=view,
- )
-
- async def upscale1024_callback(interaction):
- index = int(interaction.data["custom_id"])
-
- await interaction.response.send_message(
- f"{interaction.user.mention} (upscaling...)", ephemeral=True
- )
- result_path = await deepfloydif_upscale1024(index, path_for_upscale256_upscaling, prompt)
-
- with open(result_path, "rb") as f:
- await interaction.delete_original_response()
- await channel.send(
- content=f"{interaction.user.mention} Here's your high-quality x16 image!",
- file=discord.File(f, f"{prompt}.png"),
- )
-
- button1.callback = button_callback
- button2.callback = button_callback
- button3.callback = button_callback
- button4.callback = button_callback
-
- view = View(timeout=None)
- view.add_item(button1)
- view.add_item(button2)
- view.add_item(button3)
- view.add_item(button4)
-
- # could store this message as combined_image_dfif in case it's useful for future testing
- await channel.send(
- f"**{prompt}** - {ctx.author.mention} Click a button to upscale! (make larger + enhance quality)",
- file=discord.File(f, f"{partial_path}.png"),
- view=view,
- )
- else:
- await ctx.send(f"{ctx.author.mention} No PNG files were found, cannot post them!")
-
- except Exception as e:
- print(f"Error: {e}")
-
-
-async def deepfloydif_upscale256(index: int, path_for_upscale256_upscaling):
- """upscaling function for images generated using /deepfloydif"""
- try:
- loop = asyncio.get_running_loop()
- result_path = await loop.run_in_executor(
- None, deepfloydif_upscale256_inference, index, path_for_upscale256_upscaling
- )
- return result_path
-
- except Exception as e:
- print(f"Error: {e}")
-
-
-async def deepfloydif_upscale1024(index: int, path_for_upscale256_upscaling, prompt):
- """upscaling function for images generated using /deepfloydif"""
- try:
- loop = asyncio.get_running_loop()
- result_path = await loop.run_in_executor(
- None, deepfloydif_upscale1024_inference, index, path_for_upscale256_upscaling, prompt
- )
- return result_path
-
- except Exception as e:
- print(f"Error: {e}")
-
-
-def run_bot():
- bot.run(DISCORD_TOKEN)
-
-
-threading.Thread(target=run_bot).start()
-
-
-welcome_message = """
-## Add this bot to your server by clicking this link:
-
-https://discord.com/api/oauth2/authorize?client_id=1154395078735953930&permissions=51200&scope=bot
-
-## How to use it?
-
-The bot can be triggered via `/deepfloydif` followed by your text prompt.
-
-This will generate images based on the text prompt. You can upscale the images using the buttons up to 16x!
-
-⚠️ Note ⚠️: Please make sure this bot's command does have the same name as another command in your server.
-
-⚠️ Note ⚠️: Bot commands do not work in DMs with the bot as of now.
-"""
-
-
-with gr.Blocks() as demo:
- gr.Markdown(f"""
- # Discord bot of https://huggingface.co/spaces/DeepFloyd/IF
- {welcome_message}
- """)
-
-
-demo.queue(concurrency_count=100)
-demo.queue(max_size=100)
-demo.launch()
diff --git a/spaces/hyxue/HiFiFace-inference-demo/Deep3DFaceRecon_pytorch/models/arcface_torch/utils/utils_logging.py b/spaces/hyxue/HiFiFace-inference-demo/Deep3DFaceRecon_pytorch/models/arcface_torch/utils/utils_logging.py
deleted file mode 100644
index 823771b7d7c45fd30fe7d5284cb52ee6ad17c834..0000000000000000000000000000000000000000
--- a/spaces/hyxue/HiFiFace-inference-demo/Deep3DFaceRecon_pytorch/models/arcface_torch/utils/utils_logging.py
+++ /dev/null
@@ -1,40 +0,0 @@
-import logging
-import os
-import sys
-
-
-class AverageMeter(object):
- """Computes and stores the average and current value"""
-
- def __init__(self):
- self.val = None
- self.avg = None
- self.sum = None
- self.count = None
- self.reset()
-
- def reset(self):
- self.val = 0
- self.avg = 0
- self.sum = 0
- self.count = 0
-
- def update(self, val, n=1):
- self.val = val
- self.sum += val * n
- self.count += n
- self.avg = self.sum / self.count
-
-
-def init_logging(rank, models_root):
- if rank == 0:
- log_root = logging.getLogger()
- log_root.setLevel(logging.INFO)
- formatter = logging.Formatter("Training: %(asctime)s-%(message)s")
- handler_file = logging.FileHandler(os.path.join(models_root, "training.log"))
- handler_stream = logging.StreamHandler(sys.stdout)
- handler_file.setFormatter(formatter)
- handler_stream.setFormatter(formatter)
- log_root.addHandler(handler_file)
- log_root.addHandler(handler_stream)
- log_root.info("rank_id: %d" % rank)
diff --git a/spaces/imseldrith/BotX/Uploader/functions/help_ytdl.py b/spaces/imseldrith/BotX/Uploader/functions/help_ytdl.py
deleted file mode 100644
index b5f1c3aac1f91714fa901eddeac656f99dddddae..0000000000000000000000000000000000000000
--- a/spaces/imseldrith/BotX/Uploader/functions/help_ytdl.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# MIT License
-
-# Copyright (c) 2022 Hash Minner
-
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-
-# The above copyright notice and this permission notice shall be included in all
-# copies or substantial portions of the Software.
-
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-# SOFTWARE
-
-import os
-import time
-import requests
-import logging
-
-from urllib.parse import urlparse
-
-logging.basicConfig(level=logging.DEBUG,
- format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
-logger = logging.getLogger(__name__)
-
-
-def get_file_extension_from_url(url):
- url_path = urlparse(url).path
- basename = os.path.basename(url_path)
- return basename.split(".")[-1]
-
-
-def get_resolution(info_dict):
- if {"width", "height"} <= info_dict.keys():
- width = int(info_dict['width'])
- height = int(info_dict['height'])
- # https://support.google.com/youtube/answer/6375112
- elif info_dict['height'] == 1080:
- width = 1920
- height = 1080
- elif info_dict['height'] == 720:
- width = 1280
- height = 720
- elif info_dict['height'] == 480:
- width = 854
- height = 480
- elif info_dict['height'] == 360:
- width = 640
- height = 360
- elif info_dict['height'] == 240:
- width = 426
- height = 240
- return (width, height)
diff --git a/spaces/inamXcontru/PoeticTTS/Discografia Completa Emilio Santiago Torrent Onde Encontrar os lbuns de Emlio Santiago Online.md b/spaces/inamXcontru/PoeticTTS/Discografia Completa Emilio Santiago Torrent Onde Encontrar os lbuns de Emlio Santiago Online.md
deleted file mode 100644
index 64041c25961606a5b1e217f45afb3e672aed5495..0000000000000000000000000000000000000000
--- a/spaces/inamXcontru/PoeticTTS/Discografia Completa Emilio Santiago Torrent Onde Encontrar os lbuns de Emlio Santiago Online.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
CRACK GoldenSoftware Voxler v3.3.1843 with Key [iahq76] Libro Administracion De Sueldos Y Salarios Augusto Alvarez Pdf green lantern 2 full movie in hindi download The Amazing Spider - Man mp4 movie download in hindi Kill The Rapist movie in hindi 720p torrent Dalam mobil merawanin anak kecil gustavo santaolalla discografia completa descargar flowjo 10 v10 pc crack counter strike 1.6 orange box download Libro de reingenieria de michael hammer y james champy pdf gratis
discografia completa emilio santiago torrent Kranti hd movies download 720p memories of murder dual audio hindi-745 mac os x lion 10.7 highly compressed pc tom m apostol calculus volume 2 solution manual mbs series stallion breeding farm jebanje zene i konja video psa telecharger caneco bt 54 crack 20 golmaal 1 hindi movie free download material science and metallurgy kodgire pdf free download
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/CRACK Buell Motorcycles - Ecm SPY Setup.md b/spaces/inplisQlawa/anything-midjourney-v4-1/CRACK Buell Motorcycles - Ecm SPY Setup.md
deleted file mode 100644
index a311e28ad6ebcb1085ae5cef6e004bb81f23f004..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/CRACK Buell Motorcycles - Ecm SPY Setup.md
+++ /dev/null
@@ -1,28 +0,0 @@
-
-
How to Set Up Your Buell Motorcycle with Ecm SPY
-
Ecm SPY is a software tool that allows you to monitor and adjust the parameters of your Buell motorcycle's electronic control module (ECM). You can use it to optimize the performance, fuel efficiency, and emissions of your bike. You can also diagnose and clear trouble codes, reset the adaptive fuel value (AFV), and perform other functions.
-
To use Ecm SPY, you need a compatible Buell motorcycle, a laptop or tablet with Windows operating system, a USB cable, and an interface device that connects your bike's diagnostic port to your computer. You also need to download and install the Ecm SPY software from https://www.ecmspy.com/.
Here are the steps to set up your Buell motorcycle with Ecm SPY:
-
-
Turn off your bike's ignition and connect the interface device to the diagnostic port under the seat. The diagnostic port is a black 6-pin connector that is usually covered by a rubber cap.
-
Connect the USB cable from the interface device to your computer. Make sure the drivers for the interface device are installed correctly.
-
Launch the Ecm SPY software on your computer. You should see a welcome screen with some options.
-
Select "Connect" to establish communication with your bike's ECM. You should see a green light on the bottom right corner of the screen indicating that the connection is successful.
-
Select "Read" to download the current configuration of your bike's ECM. This will take a few seconds and you will see a progress bar on the screen.
-
Select "Save" to save a backup copy of your original configuration. This is important in case you want to restore it later.
-
Select "Tune" to access the tuning options. You can adjust various parameters such as fuel maps, ignition maps, idle speed, rev limiter, fan activation temperature, and more. You can also view real-time data from your bike's sensors such as engine speed, throttle position, air temperature, oxygen sensor voltage, and more.
-
After making any changes, select "Write" to upload them to your bike's ECM. This will take a few seconds and you will see a progress bar on the screen.
-
Select "Disconnect" to end communication with your bike's ECM. You can then turn on your bike's ignition and test the results.
-
-
Note: Before making any changes, make sure you understand what each parameter does and how it affects your bike's performance. Some changes may require additional modifications such as exhaust system, air filter, spark plugs, etc. Some changes may also affect your bike's emissions and warranty. Always make small changes at a time and test them carefully. If you encounter any problems, you can always restore your original configuration by selecting "Open" and choosing the backup file you saved earlier.
Benefits of Ecm SPY for Buell Motorcycles
-
Ecm SPY is not only a tool for tuning your Buell motorcycle, but also a tool for improving its reliability, safety, and comfort. By using Ecm SPY, you can enjoy the following benefits:
-
-
You can monitor the health of your bike's sensors and detect any faults or errors that may affect its performance or cause damage. For example, you can check the oxygen sensor voltage, the intake air temperature, the engine temperature, the battery voltage, and more. You can also clear any trouble codes that may appear on your dashboard.
-
You can optimize your bike's fuel delivery and ignition timing to match your riding style, preferences, and conditions. You can adjust the fuel maps and ignition maps for each cylinder and each throttle position. You can also modify other parameters such as idle speed, rev limiter, exhaust valve switching points, fan activation temperature, and more. You can achieve better throttle response, smoother power delivery, increased torque and horsepower, improved fuel economy, and reduced emissions.
-
You can reset your bike's adaptive fuel value (AFV) to 100% whenever you want. The AFV is a parameter that adjusts the fuel delivery based on the feedback from the oxygen sensor. Over time, the AFV may drift away from 100% due to various factors such as altitude, temperature, fuel quality, etc. This may result in suboptimal performance or increased emissions. By resetting the AFV to 100%, you can ensure that your bike is running at its best.
-
You can customize your bike's features and functions to suit your needs and preferences. For example, you can enable or disable the exhaust valve that controls the sound and back pressure of your exhaust system. You can also change the fan activation temperature to keep your engine cooler or warmer depending on the weather. You can also enable or disable the diagnostic mode that allows you to test various components of your bike such as lights, gauges, switches, etc.
-
-
Ecm SPY is a powerful and versatile tool that can help you get the most out of your Buell motorcycle. By using Ecm SPY regularly, you can keep your bike in top condition and enjoy its full potential.
d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Crysis3originkeycrack VERIFIED.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Crysis3originkeycrack VERIFIED.md
deleted file mode 100644
index 5885635aa7010a40c8f3f6218b4cedaac8d6cdb3..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Crysis3originkeycrack VERIFIED.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
- d5da3c52bf
-
-
-
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Laurell K Hamilton Anita Blake Series Epub Files.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Laurell K Hamilton Anita Blake Series Epub Files.md
deleted file mode 100644
index 7237f32b3244426733866dc35d208fa45864a6de..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Laurell K Hamilton Anita Blake Series Epub Files.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-January 12, 2554 B.C. - Traffic is the first film released in 2011. channel - to interview the superstar of Malayalam films, Sidharth Shankar (Rahman). Siddharth (Skt. Siddhartha) - "one who passed through the trap." The name "Sidharth" worn by many Buddhist teachers, 8a78ff9644
-
-
-
diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Dhoom 3 720p Full Movie Download.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Dhoom 3 720p Full Movie Download.md
deleted file mode 100644
index c2427e0145813c4c80a450f12939b03f1a5fd496..0000000000000000000000000000000000000000
--- a/spaces/lincquiQcaudo/Top-20-Diffusion/Dhoom 3 720p Full Movie Download.md
+++ /dev/null
@@ -1,8 +0,0 @@
-
-
dhoom 3 hindi full movie free download full movie online hd. dhoom 3 hindi movie free download full movie online dhoom 3 hindi full movie free download. dhoom 3 hindi full movie free download. dhoom 3 hindi full movie free online free download. full dhoom 3 hindi movie free download dhoom 3 hindi full movie free download. dhoom 3 full movie watch full dhoom 3 hindi movie free download. dhoom 3 hindi full movie free download in hd. dhoom 3 hindi movie free download in hd format. watch full dhoom 3 hindi full movie free in hd watch full dhoom 3 hindi full movie free in hd download. dhoom 3 hindi full movie free online.
-
dhoom 3 hindi full movie download free full movie online download free download full dhoom 3 hindi movie. dhoom 3 hindi movie free free download. dhoom 3 hindi full movie watch full dhoom 3 hindi full movie free download. full dhoom 3 hindi movie free download in hd format download hd. watch full dhoom 3 hindi full movie free online now. watch full hindi movie dhoom 3. free dhoom 3 download movie online for free. watch dhoom 3 full movie online free in high quality online 720p hd.
watch the full song titled 'chaand baithiye' from the movie 'dhoom:3' at 720p and 1080p resolution. here's a high quality audio version of the song 'chaand baithiye' from the film 'dhoom:3' in. stop idling - grab your phone and enjoy watching the song 'chaand baithiye' on youtube or on any other app which has a 720p or 1080p movie player.
-
watch the full song 'shikdum' from the film 'dhoom' in high definition. whether it's the sharpness of the resolution or the high quality of the sound. listen to the songs 'shikdum' and 'jaani tujh koi' from the movie 'dhoom'. download the full length movies right away using a lightning fast download speed of up to 9.7mb/s. search hd movies from youtube, mp3, avi, mov, mp4 and other formats at any time without any interruption.
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Kettler World Tours 2.0 Download Crack 17 [WORK].md b/spaces/lincquiQcaudo/Top-20-Diffusion/Kettler World Tours 2.0 Download Crack 17 [WORK].md
deleted file mode 100644
index d4cbb36d6134f93fb55353943e51bd69e09786a3..0000000000000000000000000000000000000000
--- a/spaces/lincquiQcaudo/Top-20-Diffusion/Kettler World Tours 2.0 Download Crack 17 [WORK].md
+++ /dev/null
@@ -1,29 +0,0 @@
-
-
-Hello Neighbor Alpha 1 mod apk v1.5.8 Mod Money APK
-Hello Neighbor Mod Apk is a game in which the player has to break into the house of his rival and find out what the owner is hiding.
-You have to be very careful as he will chase you and kill you.
-Hello Neighbor Mod apk is one of the most addictive and realistic adventure games, which is why it enjoys a very high reputation among users.
-If you want to know all the secrets that your neighbor is hiding, you will need this game and, of course, this one. and applications for mobile systems and devices based on Windows Mobile, Android, BlackBerry and Symbian operating systems, as well as iOS and Windows Phone.
-In addition to developing applications for mobile devices, GX Developer provides the design, testing, and debugging features required to deploy mobile applications to Windows 7 and Windows 8 environments.Benefits: • Intuitive tool with more than 1000 built-in functions to automate the development of applications and applications for mobile platforms. •
-The new project creation feature allows you to quickly and easily create projects for Windows, Windows Phone, BlackBerry, Android, and iPhone. •
-The modeling and analysis tool allows you to create basic applications for mobile platforms. •
-The project management tool allows you to create, manage and debug your own mobile application projects. •
-The database management tool allows you to use one database to manage all your mobile application projects.
-Installation: Launch GX Developer, then on the Tools tab select Projects > Create New Project > Mobile Project.
-Launch the app and follow the instructions to start creating a mobile app.
-Note. To learn how to create apps for Windows, Windows Phone, BlackBerry, Android, and iOS with GX Developer, visit http://www.gxdev.com/pages/topics/mobile-devices-software.
-Download GX Developer Mobile for Windows 7: http://windows.microsoft.com/en-us/windows/products/products.gx.com-windows7.exe
-Download GX Developer Mobile for Windows 8.1: http://windows.microsoft.com/en-us/windows/products/products.gx.com-windows8.1.exe
-Download GX Developer Mobile for Windows 10: http://windows. microsoft.com/en-us/windows/products/products.gx.com-windows10.exe
-Download GX Developer Mobile for OS X: http://mac.gxdev.com
-Download GX Developer Mobile for Android: http://android.gxdev.com
-Download GX Developer Mobile for iOS: http://www.gxdev.com/pages/topics/mobile-devices-ios.
-GX Developer User Guide at www.gxdev.com/pages/mobile-devices-guide.
-• Additional Tools: When adding a new mobile application to a project in GX Developer, you can choose from a different set of development tools that are best suited for building a specific application.
-• Source Codes: When creating a new mobile application for Android GX Developer, you can choose between Android Studio and GX Visual Studio.
-Android Studio is primarily for developers, while GX Visual Studio allows developers to access GX Developer resources.
-You can get Android Studio here: http://download.android.com/tools/studio.html. 8a78ff9644
-
-
-
diff --git a/spaces/lllqqq/so-vits-svc-models-pcr/modules/F0Predictor/__init__.py b/spaces/lllqqq/so-vits-svc-models-pcr/modules/F0Predictor/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/ltgoslo/ssa-perin/model/module/edge_classifier.py b/spaces/ltgoslo/ssa-perin/model/module/edge_classifier.py
deleted file mode 100644
index 2670a7536d035823584921c1196f98c92963329f..0000000000000000000000000000000000000000
--- a/spaces/ltgoslo/ssa-perin/model/module/edge_classifier.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python3
-# coding=utf-8
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from model.module.biaffine import Biaffine
-
-
-class EdgeClassifier(nn.Module):
- def __init__(self, dataset, args, initialize: bool, presence: bool, label: bool):
- super(EdgeClassifier, self).__init__()
-
- self.presence = presence
- if self.presence:
- if initialize:
- presence_init = torch.tensor([dataset.edge_presence_freq])
- presence_init = (presence_init / (1.0 - presence_init)).log()
- else:
- presence_init = None
-
- self.edge_presence = EdgeBiaffine(
- args.hidden_size, args.hidden_size_edge_presence, 1, args.dropout_edge_presence, bias_init=presence_init
- )
-
- self.label = label
- if self.label:
- label_init = (dataset.edge_label_freqs / (1.0 - dataset.edge_label_freqs)).log() if initialize else None
- n_labels = len(dataset.edge_label_field.vocab)
- self.edge_label = EdgeBiaffine(
- args.hidden_size, args.hidden_size_edge_label, n_labels, args.dropout_edge_label, bias_init=label_init
- )
-
- def forward(self, x):
- presence, label = None, None
-
- if self.presence:
- presence = self.edge_presence(x).squeeze(-1) # shape: (B, T, T)
- if self.label:
- label = self.edge_label(x) # shape: (B, T, T, O_1)
-
- return presence, label
-
-
-class EdgeBiaffine(nn.Module):
- def __init__(self, hidden_dim, bottleneck_dim, output_dim, dropout, bias_init=None):
- super(EdgeBiaffine, self).__init__()
- self.hidden = nn.Linear(hidden_dim, 2 * bottleneck_dim)
- self.output = Biaffine(bottleneck_dim, output_dim, bias_init=bias_init)
- self.dropout = nn.Dropout(dropout)
-
- def forward(self, x):
- x = self.dropout(F.elu(self.hidden(x))) # shape: (B, T, 2H)
- predecessors, current = x.chunk(2, dim=-1) # shape: (B, T, H), (B, T, H)
- edge = self.output(current, predecessors) # shape: (B, T, T, O)
- return edge
diff --git a/spaces/lunbot/add/README.md b/spaces/lunbot/add/README.md
deleted file mode 100644
index 289dd8677982de91835dc7ccce1fd073139a8e1d..0000000000000000000000000000000000000000
--- a/spaces/lunbot/add/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-title: Add
-emoji: 🌖
-colorFrom: pink
-colorTo: indigo
-sdk: static
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/magicr/BuboGPT/eval_scripts/conversation.py b/spaces/magicr/BuboGPT/eval_scripts/conversation.py
deleted file mode 100644
index 3118b813475b5913e2b62ab97115b0ba9ebde07a..0000000000000000000000000000000000000000
--- a/spaces/magicr/BuboGPT/eval_scripts/conversation.py
+++ /dev/null
@@ -1,216 +0,0 @@
-import dataclasses
-from copy import deepcopy
-from types import SimpleNamespace
-from typing import List, Union, Dict, Tuple
-import numpy as np
-
-import torch
-from PIL import Image
-from torch import nn, Tensor
-from transformers import StoppingCriteria, StoppingCriteriaList
-
-from eval_scripts.eval_utils import load_image, load_audio
-from imagebind.models.image_bind import ModalityType
-from bubogpt import BaseProcessor
-
-Roles = SimpleNamespace(
- HUMAN="Human",
- ASSISTANT="Assistant"
-)
-
-
-class Message:
- def __init__(self, role: str, content: Union[str, None]):
- self.role = role
- self.content = content
-
-
-@dataclasses.dataclass
-class Conversation:
- """A class that keeps all conversation history."""
- system: str
- messages: List[Message]
- sep: str = "###"
-
- def get_prompt(self):
- ret = self.system + self.sep
- for message in self.messages:
- if message.content:
- ret += message.role + ": " + message.content + self.sep
- else:
- ret += message.role + ":"
- return ret
-
- def append_message(self, role, content):
- self.messages.append(Message(role, content))
-
- def copy(self):
- return Conversation(
- system=self.system,
- messages=deepcopy(self.messages),
- sep=self.sep)
-
- def dict(self):
- return {
- "system": self.system,
- "messages": [(msg.role, msg.content) for msg in self.messages],
- "sep": self.sep
- }
-
-
-class StoppingCriteriaSub(StoppingCriteria):
- def __init__(self, stops=[], encounters=1):
- super().__init__()
- self.stops = stops
-
- def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor):
- for stop in self.stops:
- if torch.all((stop == input_ids[0][-len(stop):])).item():
- return True
-
- return False
-
-
-CONV_X = Conversation(
- # system="Give the following ..."
- # "You will be able to ... once I provide it to you. Please answer my questions.",
- system="Give the following image: ImageContent or audio: . "
- "You will be able to see the image/audio once I provide it to you. Please answer my questions.",
- messages=[],
- sep="###",
-)
-
-
-# TODO: If needed and possible, rewrite this file and re-organize the definition of components.
-
-class DummyChat:
- def __init__(self, dummy_answer=None, *args, **kwargs):
- self.dummy_answer = dummy_answer
-
- def ask(self, text, conversation):
- conversation.append_message(Roles.HUMAN, text)
-
- def answer(self, *args, **kwargs):
- if self.dummy_answer is not None:
- return self.dummy_answer, None
- else:
- print(kwargs)
- return kwargs["conversation"].messages[-1].content, None
-
- def upload_img(self, *args, **kwargs):
- pass
-
- def upload_aud(self, *args, **kwargs):
- pass
-
-
-
-class Chat:
- def __init__(self,
- model: nn.Module,
- processors: Dict[str, BaseProcessor],
- device: str = 'cuda:0'
- ):
- self.device = device
- self.model = model
- self.processors = processors
- stop_words_ids = [torch.tensor([835]).to(self.device),
- torch.tensor([2277, 29937]).to(self.device)] # '###' can be encoded in two different ways.
- self.stopping_criteria = StoppingCriteriaList([StoppingCriteriaSub(stops=stop_words_ids)])
- self.just_uploaded = False
-
- def ask(self, text, conversation):
- # NOTE: the hard code for postfix is removed.
- # end_token = ''
- # if len(conversation.messages) > 0 and conversation.messages[-1].role == Roles.HUMAN \
- # and conversation.messages[-1].content[-len(end_token):] == end_token:
- if self.just_uploaded:
- conversation.messages[-1].content = ' '.join([conversation.messages[-1].content, text])
- self.just_uploaded = False
- else:
- conversation.append_message(Roles.HUMAN, text)
-
- def answer(self, conversation, emb_list, max_new_tokens=300, num_beams=1, min_length=1, top_p=0.9,
- repetition_penalty=1.0, length_penalty=1, temperature=1.0, max_length=2000):
- # Generate an answer written by LLaMA
- conversation.append_message(Roles.ASSISTANT, None)
- embs = self.get_context_emb(conversation, emb_list)
-
- current_max_len = embs.shape[1] + max_new_tokens
- if current_max_len - max_length > 0:
- print('Warning: The number of tokens in current conversation exceeds the max length. '
- 'The model will not see the contexts outside the range.')
- begin_idx = max(0, current_max_len - max_length)
-
- embs = embs[:, begin_idx:]
-
- outputs = self.model.llama_model.generate(
- inputs_embeds=embs,
- max_new_tokens=max_new_tokens,
- stopping_criteria=self.stopping_criteria,
- num_beams=num_beams,
- do_sample=True,
- min_length=min_length,
- top_p=top_p,
- repetition_penalty=repetition_penalty,
- length_penalty=length_penalty,
- temperature=temperature,
- )
- output_token = outputs[0]
- if output_token[0] == 0: # the model might output a unknown token at the beginning. remove it
- output_token = output_token[1:]
- if output_token[0] == 1: # some users find that there is a start token at the beginning. remove it
- output_token = output_token[1:]
- output_text = self.model.llama_tokenizer.decode(output_token, add_special_tokens=False)
- output_text = output_text.split('###')[0] # remove the stop sign '###'
- output_text = output_text.split('Assistant:')[-1].strip()
- conversation.messages[-1].content = output_text
- return output_text, output_token.cpu().numpy()
-
- def upload_img(self, image: Union[str, Image.Image, Tensor], conversation: Conversation, emb_list: List[Tensor]):
- # Upload Image, Encode Image and Create a new message from human.
- image = load_image(image, self.processors[ModalityType.VISION]).to(self.device)
- if hasattr(self.model, "encode_img"):
- # To compitable with minigpt4
- image_emb, _ = self.model.encode_img(image)
- else:
- all_embeddings = self.model.encode_inputs({ModalityType.VISION: image})
- image_emb = all_embeddings[ModalityType.VISION]
- emb_list.append(image_emb)
- conversation.append_message(Roles.HUMAN, "")
- self.just_uploaded = True
-
- # def upload_img_mini(self, image: Union[str, Image.Image, Tensor], conversation: Conversation, emb_list: List[Tensor]):
- # # Upload Image, Encode Image and Create a new message from human.
- # image = load_image(image, self.processors[ModalityType.VISION]).to(self.device)
- # image_emb, _ = self.model.encode_img(image)
- # emb_list.append(image_emb)
- # conversation.append_message(Roles.HUMAN, "")
-
- def upload_aud(self, audio: Union[str, Tuple[int, np.ndarray]], conversation: Conversation, emb_list: List[Tensor]):
- # Upload Audio, Encode Audio and Create a new message from human.
- audio = load_audio(audio, self.processors[ModalityType.AUDIO]).to(self.device)
- audio = audio.float()
- all_embeddings = self.model.encode_inputs({ModalityType.AUDIO: audio})
- audio_emb = all_embeddings[ModalityType.AUDIO]
- emb_list.append(audio_emb)
- conversation.append_message(Roles.HUMAN, "")
- self.just_uploaded = True
-
- def get_context_emb(self, conversation: Conversation, emb_list: List[Tensor]):
- # Insert the embeddings into the prompts and queries.
- # NOTE: Assume the placeholders have been aligned to the embeddings!
- prompt = conversation.get_prompt()
- print(prompt)
- prompt_segs = prompt.split('')
- assert len(prompt_segs) == len(emb_list) + 1, "Unmatched numbers of placeholders and embeddings."
- seg_tokens = [
- self.model.llama_tokenizer(
- seg, return_tensors="pt", add_special_tokens=i == 0).to(self.device).input_ids
- # only add bos to the first seg
- for i, seg in enumerate(prompt_segs)
- ]
- seg_embs = [self.model.llama_model.model.embed_tokens(seg_t) for seg_t in seg_tokens]
- mixed_embs = [emb for pair in zip(seg_embs[:-1], emb_list) for emb in pair] + [seg_embs[-1]]
- mixed_embs = torch.cat(mixed_embs, dim=1)
- return mixed_embs
\ No newline at end of file
diff --git a/spaces/manavisrani07/gradio-lipsync-wav2lip/basicsr/data/degradations.py b/spaces/manavisrani07/gradio-lipsync-wav2lip/basicsr/data/degradations.py
deleted file mode 100644
index 578967483e20c969931dc6082c9b007ea9f1c714..0000000000000000000000000000000000000000
--- a/spaces/manavisrani07/gradio-lipsync-wav2lip/basicsr/data/degradations.py
+++ /dev/null
@@ -1,765 +0,0 @@
-import cv2
-import math
-import numpy as np
-import random
-import torch
-from scipy import special
-from scipy.stats import multivariate_normal
-from torchvision.transforms.functional_tensor import rgb_to_grayscale
-
-# -------------------------------------------------------------------- #
-# --------------------------- blur kernels --------------------------- #
-# -------------------------------------------------------------------- #
-
-
-# --------------------------- util functions --------------------------- #
-def sigma_matrix2(sig_x, sig_y, theta):
- """Calculate the rotated sigma matrix (two dimensional matrix).
-
- Args:
- sig_x (float):
- sig_y (float):
- theta (float): Radian measurement.
-
- Returns:
- ndarray: Rotated sigma matrix.
- """
- d_matrix = np.array([[sig_x**2, 0], [0, sig_y**2]])
- u_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
- return np.dot(u_matrix, np.dot(d_matrix, u_matrix.T))
-
-
-def mesh_grid(kernel_size):
- """Generate the mesh grid, centering at zero.
-
- Args:
- kernel_size (int):
-
- Returns:
- xy (ndarray): with the shape (kernel_size, kernel_size, 2)
- xx (ndarray): with the shape (kernel_size, kernel_size)
- yy (ndarray): with the shape (kernel_size, kernel_size)
- """
- ax = np.arange(-kernel_size // 2 + 1., kernel_size // 2 + 1.)
- xx, yy = np.meshgrid(ax, ax)
- xy = np.hstack((xx.reshape((kernel_size * kernel_size, 1)), yy.reshape(kernel_size * kernel_size,
- 1))).reshape(kernel_size, kernel_size, 2)
- return xy, xx, yy
-
-
-def pdf2(sigma_matrix, grid):
- """Calculate PDF of the bivariate Gaussian distribution.
-
- Args:
- sigma_matrix (ndarray): with the shape (2, 2)
- grid (ndarray): generated by :func:`mesh_grid`,
- with the shape (K, K, 2), K is the kernel size.
-
- Returns:
- kernel (ndarrray): un-normalized kernel.
- """
- inverse_sigma = np.linalg.inv(sigma_matrix)
- kernel = np.exp(-0.5 * np.sum(np.dot(grid, inverse_sigma) * grid, 2))
- return kernel
-
-
-def cdf2(d_matrix, grid):
- """Calculate the CDF of the standard bivariate Gaussian distribution.
- Used in skewed Gaussian distribution.
-
- Args:
- d_matrix (ndarrasy): skew matrix.
- grid (ndarray): generated by :func:`mesh_grid`,
- with the shape (K, K, 2), K is the kernel size.
-
- Returns:
- cdf (ndarray): skewed cdf.
- """
- rv = multivariate_normal([0, 0], [[1, 0], [0, 1]])
- grid = np.dot(grid, d_matrix)
- cdf = rv.cdf(grid)
- return cdf
-
-
-def bivariate_Gaussian(kernel_size, sig_x, sig_y, theta, grid=None, isotropic=True):
- """Generate a bivariate isotropic or anisotropic Gaussian kernel.
-
- In the isotropic mode, only `sig_x` is used. `sig_y` and `theta` is ignored.
-
- Args:
- kernel_size (int):
- sig_x (float):
- sig_y (float):
- theta (float): Radian measurement.
- grid (ndarray, optional): generated by :func:`mesh_grid`,
- with the shape (K, K, 2), K is the kernel size. Default: None
- isotropic (bool):
-
- Returns:
- kernel (ndarray): normalized kernel.
- """
- if grid is None:
- grid, _, _ = mesh_grid(kernel_size)
- if isotropic:
- sigma_matrix = np.array([[sig_x**2, 0], [0, sig_x**2]])
- else:
- sigma_matrix = sigma_matrix2(sig_x, sig_y, theta)
- kernel = pdf2(sigma_matrix, grid)
- kernel = kernel / np.sum(kernel)
- return kernel
-
-
-def bivariate_generalized_Gaussian(kernel_size, sig_x, sig_y, theta, beta, grid=None, isotropic=True):
- """Generate a bivariate generalized Gaussian kernel.
- Described in `Parameter Estimation For Multivariate Generalized
- Gaussian Distributions`_
- by Pascal et. al (2013).
-
- In the isotropic mode, only `sig_x` is used. `sig_y` and `theta` is ignored.
-
- Args:
- kernel_size (int):
- sig_x (float):
- sig_y (float):
- theta (float): Radian measurement.
- beta (float): shape parameter, beta = 1 is the normal distribution.
- grid (ndarray, optional): generated by :func:`mesh_grid`,
- with the shape (K, K, 2), K is the kernel size. Default: None
-
- Returns:
- kernel (ndarray): normalized kernel.
-
- .. _Parameter Estimation For Multivariate Generalized Gaussian
- Distributions: https://arxiv.org/abs/1302.6498
- """
- if grid is None:
- grid, _, _ = mesh_grid(kernel_size)
- if isotropic:
- sigma_matrix = np.array([[sig_x**2, 0], [0, sig_x**2]])
- else:
- sigma_matrix = sigma_matrix2(sig_x, sig_y, theta)
- inverse_sigma = np.linalg.inv(sigma_matrix)
- kernel = np.exp(-0.5 * np.power(np.sum(np.dot(grid, inverse_sigma) * grid, 2), beta))
- kernel = kernel / np.sum(kernel)
- return kernel
-
-
-def bivariate_plateau(kernel_size, sig_x, sig_y, theta, beta, grid=None, isotropic=True):
- """Generate a plateau-like anisotropic kernel.
- 1 / (1+x^(beta))
-
- Ref: https://stats.stackexchange.com/questions/203629/is-there-a-plateau-shaped-distribution
-
- In the isotropic mode, only `sig_x` is used. `sig_y` and `theta` is ignored.
-
- Args:
- kernel_size (int):
- sig_x (float):
- sig_y (float):
- theta (float): Radian measurement.
- beta (float): shape parameter, beta = 1 is the normal distribution.
- grid (ndarray, optional): generated by :func:`mesh_grid`,
- with the shape (K, K, 2), K is the kernel size. Default: None
-
- Returns:
- kernel (ndarray): normalized kernel.
- """
- if grid is None:
- grid, _, _ = mesh_grid(kernel_size)
- if isotropic:
- sigma_matrix = np.array([[sig_x**2, 0], [0, sig_x**2]])
- else:
- sigma_matrix = sigma_matrix2(sig_x, sig_y, theta)
- inverse_sigma = np.linalg.inv(sigma_matrix)
- kernel = np.reciprocal(np.power(np.sum(np.dot(grid, inverse_sigma) * grid, 2), beta) + 1)
- kernel = kernel / np.sum(kernel)
- return kernel
-
-
-def random_bivariate_Gaussian(kernel_size,
- sigma_x_range,
- sigma_y_range,
- rotation_range,
- noise_range=None,
- isotropic=True):
- """Randomly generate bivariate isotropic or anisotropic Gaussian kernels.
-
- In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and `rotation_range` is ignored.
-
- Args:
- kernel_size (int):
- sigma_x_range (tuple): [0.6, 5]
- sigma_y_range (tuple): [0.6, 5]
- rotation range (tuple): [-math.pi, math.pi]
- noise_range(tuple, optional): multiplicative kernel noise,
- [0.75, 1.25]. Default: None
-
- Returns:
- kernel (ndarray):
- """
- assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
- assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.'
- sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
- if isotropic is False:
- assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.'
- assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.'
- sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
- rotation = np.random.uniform(rotation_range[0], rotation_range[1])
- else:
- sigma_y = sigma_x
- rotation = 0
-
- kernel = bivariate_Gaussian(kernel_size, sigma_x, sigma_y, rotation, isotropic=isotropic)
-
- # add multiplicative noise
- if noise_range is not None:
- assert noise_range[0] < noise_range[1], 'Wrong noise range.'
- noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape)
- kernel = kernel * noise
- kernel = kernel / np.sum(kernel)
- return kernel
-
-
-def random_bivariate_generalized_Gaussian(kernel_size,
- sigma_x_range,
- sigma_y_range,
- rotation_range,
- beta_range,
- noise_range=None,
- isotropic=True):
- """Randomly generate bivariate generalized Gaussian kernels.
-
- In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and `rotation_range` is ignored.
-
- Args:
- kernel_size (int):
- sigma_x_range (tuple): [0.6, 5]
- sigma_y_range (tuple): [0.6, 5]
- rotation range (tuple): [-math.pi, math.pi]
- beta_range (tuple): [0.5, 8]
- noise_range(tuple, optional): multiplicative kernel noise,
- [0.75, 1.25]. Default: None
-
- Returns:
- kernel (ndarray):
- """
- assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
- assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.'
- sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
- if isotropic is False:
- assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.'
- assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.'
- sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
- rotation = np.random.uniform(rotation_range[0], rotation_range[1])
- else:
- sigma_y = sigma_x
- rotation = 0
-
- # assume beta_range[0] < 1 < beta_range[1]
- if np.random.uniform() < 0.5:
- beta = np.random.uniform(beta_range[0], 1)
- else:
- beta = np.random.uniform(1, beta_range[1])
-
- kernel = bivariate_generalized_Gaussian(kernel_size, sigma_x, sigma_y, rotation, beta, isotropic=isotropic)
-
- # add multiplicative noise
- if noise_range is not None:
- assert noise_range[0] < noise_range[1], 'Wrong noise range.'
- noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape)
- kernel = kernel * noise
- kernel = kernel / np.sum(kernel)
- return kernel
-
-
-def random_bivariate_plateau(kernel_size,
- sigma_x_range,
- sigma_y_range,
- rotation_range,
- beta_range,
- noise_range=None,
- isotropic=True):
- """Randomly generate bivariate plateau kernels.
-
- In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and `rotation_range` is ignored.
-
- Args:
- kernel_size (int):
- sigma_x_range (tuple): [0.6, 5]
- sigma_y_range (tuple): [0.6, 5]
- rotation range (tuple): [-math.pi/2, math.pi/2]
- beta_range (tuple): [1, 4]
- noise_range(tuple, optional): multiplicative kernel noise,
- [0.75, 1.25]. Default: None
-
- Returns:
- kernel (ndarray):
- """
- assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
- assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.'
- sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
- if isotropic is False:
- assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.'
- assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.'
- sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
- rotation = np.random.uniform(rotation_range[0], rotation_range[1])
- else:
- sigma_y = sigma_x
- rotation = 0
-
- # TODO: this may be not proper
- if np.random.uniform() < 0.5:
- beta = np.random.uniform(beta_range[0], 1)
- else:
- beta = np.random.uniform(1, beta_range[1])
-
- kernel = bivariate_plateau(kernel_size, sigma_x, sigma_y, rotation, beta, isotropic=isotropic)
- # add multiplicative noise
- if noise_range is not None:
- assert noise_range[0] < noise_range[1], 'Wrong noise range.'
- noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape)
- kernel = kernel * noise
- kernel = kernel / np.sum(kernel)
-
- return kernel
-
-
-def random_mixed_kernels(kernel_list,
- kernel_prob,
- kernel_size=21,
- sigma_x_range=(0.6, 5),
- sigma_y_range=(0.6, 5),
- rotation_range=(-math.pi, math.pi),
- betag_range=(0.5, 8),
- betap_range=(0.5, 8),
- noise_range=None):
- """Randomly generate mixed kernels.
-
- Args:
- kernel_list (tuple): a list name of kernel types,
- support ['iso', 'aniso', 'skew', 'generalized', 'plateau_iso',
- 'plateau_aniso']
- kernel_prob (tuple): corresponding kernel probability for each
- kernel type
- kernel_size (int):
- sigma_x_range (tuple): [0.6, 5]
- sigma_y_range (tuple): [0.6, 5]
- rotation range (tuple): [-math.pi, math.pi]
- beta_range (tuple): [0.5, 8]
- noise_range(tuple, optional): multiplicative kernel noise,
- [0.75, 1.25]. Default: None
-
- Returns:
- kernel (ndarray):
- """
- kernel_type = random.choices(kernel_list, kernel_prob)[0]
- if kernel_type == 'iso':
- kernel = random_bivariate_Gaussian(
- kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=True)
- elif kernel_type == 'aniso':
- kernel = random_bivariate_Gaussian(
- kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=False)
- elif kernel_type == 'generalized_iso':
- kernel = random_bivariate_generalized_Gaussian(
- kernel_size,
- sigma_x_range,
- sigma_y_range,
- rotation_range,
- betag_range,
- noise_range=noise_range,
- isotropic=True)
- elif kernel_type == 'generalized_aniso':
- kernel = random_bivariate_generalized_Gaussian(
- kernel_size,
- sigma_x_range,
- sigma_y_range,
- rotation_range,
- betag_range,
- noise_range=noise_range,
- isotropic=False)
- elif kernel_type == 'plateau_iso':
- kernel = random_bivariate_plateau(
- kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=True)
- elif kernel_type == 'plateau_aniso':
- kernel = random_bivariate_plateau(
- kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=False)
- return kernel
-
-
-np.seterr(divide='ignore', invalid='ignore')
-
-
-def circular_lowpass_kernel(cutoff, kernel_size, pad_to=0):
- """2D sinc filter, ref: https://dsp.stackexchange.com/questions/58301/2-d-circularly-symmetric-low-pass-filter
-
- Args:
- cutoff (float): cutoff frequency in radians (pi is max)
- kernel_size (int): horizontal and vertical size, must be odd.
- pad_to (int): pad kernel size to desired size, must be odd or zero.
- """
- assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
- kernel = np.fromfunction(
- lambda x, y: cutoff * special.j1(cutoff * np.sqrt(
- (x - (kernel_size - 1) / 2)**2 + (y - (kernel_size - 1) / 2)**2)) / (2 * np.pi * np.sqrt(
- (x - (kernel_size - 1) / 2)**2 + (y - (kernel_size - 1) / 2)**2)), [kernel_size, kernel_size])
- kernel[(kernel_size - 1) // 2, (kernel_size - 1) // 2] = cutoff**2 / (4 * np.pi)
- kernel = kernel / np.sum(kernel)
- if pad_to > kernel_size:
- pad_size = (pad_to - kernel_size) // 2
- kernel = np.pad(kernel, ((pad_size, pad_size), (pad_size, pad_size)))
- return kernel
-
-
-# ------------------------------------------------------------- #
-# --------------------------- noise --------------------------- #
-# ------------------------------------------------------------- #
-
-# ----------------------- Gaussian Noise ----------------------- #
-
-
-def generate_gaussian_noise(img, sigma=10, gray_noise=False):
- """Generate Gaussian noise.
-
- Args:
- img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
- sigma (float): Noise scale (measured in range 255). Default: 10.
-
- Returns:
- (Numpy array): Returned noisy image, shape (h, w, c), range[0, 1],
- float32.
- """
- if gray_noise:
- noise = np.float32(np.random.randn(*(img.shape[0:2]))) * sigma / 255.
- noise = np.expand_dims(noise, axis=2).repeat(3, axis=2)
- else:
- noise = np.float32(np.random.randn(*(img.shape))) * sigma / 255.
- return noise
-
-
-def add_gaussian_noise(img, sigma=10, clip=True, rounds=False, gray_noise=False):
- """Add Gaussian noise.
-
- Args:
- img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
- sigma (float): Noise scale (measured in range 255). Default: 10.
-
- Returns:
- (Numpy array): Returned noisy image, shape (h, w, c), range[0, 1],
- float32.
- """
- noise = generate_gaussian_noise(img, sigma, gray_noise)
- out = img + noise
- if clip and rounds:
- out = np.clip((out * 255.0).round(), 0, 255) / 255.
- elif clip:
- out = np.clip(out, 0, 1)
- elif rounds:
- out = (out * 255.0).round() / 255.
- return out
-
-
-def generate_gaussian_noise_pt(img, sigma=10, gray_noise=0):
- """Add Gaussian noise (PyTorch version).
-
- Args:
- img (Tensor): Shape (b, c, h, w), range[0, 1], float32.
- scale (float | Tensor): Noise scale. Default: 1.0.
-
- Returns:
- (Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1],
- float32.
- """
- b, _, h, w = img.size()
- if not isinstance(sigma, (float, int)):
- sigma = sigma.view(img.size(0), 1, 1, 1)
- if isinstance(gray_noise, (float, int)):
- cal_gray_noise = gray_noise > 0
- else:
- gray_noise = gray_noise.view(b, 1, 1, 1)
- cal_gray_noise = torch.sum(gray_noise) > 0
-
- if cal_gray_noise:
- noise_gray = torch.randn(*img.size()[2:4], dtype=img.dtype, device=img.device) * sigma / 255.
- noise_gray = noise_gray.view(b, 1, h, w)
-
- # always calculate color noise
- noise = torch.randn(*img.size(), dtype=img.dtype, device=img.device) * sigma / 255.
-
- if cal_gray_noise:
- noise = noise * (1 - gray_noise) + noise_gray * gray_noise
- return noise
-
-
-def add_gaussian_noise_pt(img, sigma=10, gray_noise=0, clip=True, rounds=False):
- """Add Gaussian noise (PyTorch version).
-
- Args:
- img (Tensor): Shape (b, c, h, w), range[0, 1], float32.
- scale (float | Tensor): Noise scale. Default: 1.0.
-
- Returns:
- (Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1],
- float32.
- """
- noise = generate_gaussian_noise_pt(img, sigma, gray_noise)
- out = img + noise
- if clip and rounds:
- out = torch.clamp((out * 255.0).round(), 0, 255) / 255.
- elif clip:
- out = torch.clamp(out, 0, 1)
- elif rounds:
- out = (out * 255.0).round() / 255.
- return out
-
-
-# ----------------------- Random Gaussian Noise ----------------------- #
-def random_generate_gaussian_noise(img, sigma_range=(0, 10), gray_prob=0):
- sigma = np.random.uniform(sigma_range[0], sigma_range[1])
- if np.random.uniform() < gray_prob:
- gray_noise = True
- else:
- gray_noise = False
- return generate_gaussian_noise(img, sigma, gray_noise)
-
-
-def random_add_gaussian_noise(img, sigma_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):
- noise = random_generate_gaussian_noise(img, sigma_range, gray_prob)
- out = img + noise
- if clip and rounds:
- out = np.clip((out * 255.0).round(), 0, 255) / 255.
- elif clip:
- out = np.clip(out, 0, 1)
- elif rounds:
- out = (out * 255.0).round() / 255.
- return out
-
-
-def random_generate_gaussian_noise_pt(img, sigma_range=(0, 10), gray_prob=0):
- sigma = torch.rand(
- img.size(0), dtype=img.dtype, device=img.device) * (sigma_range[1] - sigma_range[0]) + sigma_range[0]
- gray_noise = torch.rand(img.size(0), dtype=img.dtype, device=img.device)
- gray_noise = (gray_noise < gray_prob).float()
- return generate_gaussian_noise_pt(img, sigma, gray_noise)
-
-
-def random_add_gaussian_noise_pt(img, sigma_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):
- noise = random_generate_gaussian_noise_pt(img, sigma_range, gray_prob)
- out = img + noise
- if clip and rounds:
- out = torch.clamp((out * 255.0).round(), 0, 255) / 255.
- elif clip:
- out = torch.clamp(out, 0, 1)
- elif rounds:
- out = (out * 255.0).round() / 255.
- return out
-
-
-# ----------------------- Poisson (Shot) Noise ----------------------- #
-
-
-def generate_poisson_noise(img, scale=1.0, gray_noise=False):
- """Generate poisson noise.
-
- Ref: https://github.com/scikit-image/scikit-image/blob/main/skimage/util/noise.py#L37-L219
-
- Args:
- img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
- scale (float): Noise scale. Default: 1.0.
- gray_noise (bool): Whether generate gray noise. Default: False.
-
- Returns:
- (Numpy array): Returned noisy image, shape (h, w, c), range[0, 1],
- float32.
- """
- if gray_noise:
- img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
- # round and clip image for counting vals correctly
- img = np.clip((img * 255.0).round(), 0, 255) / 255.
- vals = len(np.unique(img))
- vals = 2**np.ceil(np.log2(vals))
- out = np.float32(np.random.poisson(img * vals) / float(vals))
- noise = out - img
- if gray_noise:
- noise = np.repeat(noise[:, :, np.newaxis], 3, axis=2)
- return noise * scale
-
-
-def add_poisson_noise(img, scale=1.0, clip=True, rounds=False, gray_noise=False):
- """Add poisson noise.
-
- Args:
- img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
- scale (float): Noise scale. Default: 1.0.
- gray_noise (bool): Whether generate gray noise. Default: False.
-
- Returns:
- (Numpy array): Returned noisy image, shape (h, w, c), range[0, 1],
- float32.
- """
- noise = generate_poisson_noise(img, scale, gray_noise)
- out = img + noise
- if clip and rounds:
- out = np.clip((out * 255.0).round(), 0, 255) / 255.
- elif clip:
- out = np.clip(out, 0, 1)
- elif rounds:
- out = (out * 255.0).round() / 255.
- return out
-
-
-def generate_poisson_noise_pt(img, scale=1.0, gray_noise=0):
- """Generate a batch of poisson noise (PyTorch version)
-
- Args:
- img (Tensor): Input image, shape (b, c, h, w), range [0, 1], float32.
- scale (float | Tensor): Noise scale. Number or Tensor with shape (b).
- Default: 1.0.
- gray_noise (float | Tensor): 0-1 number or Tensor with shape (b).
- 0 for False, 1 for True. Default: 0.
-
- Returns:
- (Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1],
- float32.
- """
- b, _, h, w = img.size()
- if isinstance(gray_noise, (float, int)):
- cal_gray_noise = gray_noise > 0
- else:
- gray_noise = gray_noise.view(b, 1, 1, 1)
- cal_gray_noise = torch.sum(gray_noise) > 0
- if cal_gray_noise:
- img_gray = rgb_to_grayscale(img, num_output_channels=1)
- # round and clip image for counting vals correctly
- img_gray = torch.clamp((img_gray * 255.0).round(), 0, 255) / 255.
- # use for-loop to get the unique values for each sample
- vals_list = [len(torch.unique(img_gray[i, :, :, :])) for i in range(b)]
- vals_list = [2**np.ceil(np.log2(vals)) for vals in vals_list]
- vals = img_gray.new_tensor(vals_list).view(b, 1, 1, 1)
- out = torch.poisson(img_gray * vals) / vals
- noise_gray = out - img_gray
- noise_gray = noise_gray.expand(b, 3, h, w)
-
- # always calculate color noise
- # round and clip image for counting vals correctly
- img = torch.clamp((img * 255.0).round(), 0, 255) / 255.
- # use for-loop to get the unique values for each sample
- vals_list = [len(torch.unique(img[i, :, :, :])) for i in range(b)]
- vals_list = [2**np.ceil(np.log2(vals)) for vals in vals_list]
- vals = img.new_tensor(vals_list).view(b, 1, 1, 1)
- out = torch.poisson(img * vals) / vals
- noise = out - img
- if cal_gray_noise:
- noise = noise * (1 - gray_noise) + noise_gray * gray_noise
- if not isinstance(scale, (float, int)):
- scale = scale.view(b, 1, 1, 1)
- return noise * scale
-
-
-def add_poisson_noise_pt(img, scale=1.0, clip=True, rounds=False, gray_noise=0):
- """Add poisson noise to a batch of images (PyTorch version).
-
- Args:
- img (Tensor): Input image, shape (b, c, h, w), range [0, 1], float32.
- scale (float | Tensor): Noise scale. Number or Tensor with shape (b).
- Default: 1.0.
- gray_noise (float | Tensor): 0-1 number or Tensor with shape (b).
- 0 for False, 1 for True. Default: 0.
-
- Returns:
- (Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1],
- float32.
- """
- noise = generate_poisson_noise_pt(img, scale, gray_noise)
- out = img + noise
- if clip and rounds:
- out = torch.clamp((out * 255.0).round(), 0, 255) / 255.
- elif clip:
- out = torch.clamp(out, 0, 1)
- elif rounds:
- out = (out * 255.0).round() / 255.
- return out
-
-
-# ----------------------- Random Poisson (Shot) Noise ----------------------- #
-
-
-def random_generate_poisson_noise(img, scale_range=(0, 1.0), gray_prob=0):
- scale = np.random.uniform(scale_range[0], scale_range[1])
- if np.random.uniform() < gray_prob:
- gray_noise = True
- else:
- gray_noise = False
- return generate_poisson_noise(img, scale, gray_noise)
-
-
-def random_add_poisson_noise(img, scale_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):
- noise = random_generate_poisson_noise(img, scale_range, gray_prob)
- out = img + noise
- if clip and rounds:
- out = np.clip((out * 255.0).round(), 0, 255) / 255.
- elif clip:
- out = np.clip(out, 0, 1)
- elif rounds:
- out = (out * 255.0).round() / 255.
- return out
-
-
-def random_generate_poisson_noise_pt(img, scale_range=(0, 1.0), gray_prob=0):
- scale = torch.rand(
- img.size(0), dtype=img.dtype, device=img.device) * (scale_range[1] - scale_range[0]) + scale_range[0]
- gray_noise = torch.rand(img.size(0), dtype=img.dtype, device=img.device)
- gray_noise = (gray_noise < gray_prob).float()
- return generate_poisson_noise_pt(img, scale, gray_noise)
-
-
-def random_add_poisson_noise_pt(img, scale_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):
- noise = random_generate_poisson_noise_pt(img, scale_range, gray_prob)
- out = img + noise
- if clip and rounds:
- out = torch.clamp((out * 255.0).round(), 0, 255) / 255.
- elif clip:
- out = torch.clamp(out, 0, 1)
- elif rounds:
- out = (out * 255.0).round() / 255.
- return out
-
-
-# ------------------------------------------------------------------------ #
-# --------------------------- JPEG compression --------------------------- #
-# ------------------------------------------------------------------------ #
-
-
-def add_jpg_compression(img, quality=90):
- """Add JPG compression artifacts.
-
- Args:
- img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
- quality (float): JPG compression quality. 0 for lowest quality, 100 for
- best quality. Default: 90.
-
- Returns:
- (Numpy array): Returned image after JPG, shape (h, w, c), range[0, 1],
- float32.
- """
- img = np.clip(img, 0, 1)
- encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), quality]
- _, encimg = cv2.imencode('.jpg', img * 255., encode_param)
- img = np.float32(cv2.imdecode(encimg, 1)) / 255.
- return img
-
-
-def random_add_jpg_compression(img, quality_range=(90, 100)):
- """Randomly add JPG compression artifacts.
-
- Args:
- img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
- quality_range (tuple[float] | list[float]): JPG compression quality
- range. 0 for lowest quality, 100 for best quality.
- Default: (90, 100).
-
- Returns:
- (Numpy array): Returned image after JPG, shape (h, w, c), range[0, 1],
- float32.
- """
- quality = np.random.uniform(quality_range[0], quality_range[1])
- return add_jpg_compression(img, quality)
diff --git a/spaces/meraih/English-Japanese-Anime-TTS/text/__init__.py b/spaces/meraih/English-Japanese-Anime-TTS/text/__init__.py
deleted file mode 100644
index 4e69c354dd24e3243980236eca962cd5945a92fc..0000000000000000000000000000000000000000
--- a/spaces/meraih/English-Japanese-Anime-TTS/text/__init__.py
+++ /dev/null
@@ -1,32 +0,0 @@
-""" from https://github.com/keithito/tacotron """
-from text import cleaners
-
-
-def text_to_sequence(text, symbols, cleaner_names):
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
- Args:
- text: string to convert to a sequence
- cleaner_names: names of the cleaner functions to run the text through
- Returns:
- List of integers corresponding to the symbols in the text
- '''
- _symbol_to_id = {s: i for i, s in enumerate(symbols)}
-
- sequence = []
-
- clean_text = _clean_text(text, cleaner_names)
- for symbol in clean_text:
- if symbol not in _symbol_to_id.keys():
- continue
- symbol_id = _symbol_to_id[symbol]
- sequence += [symbol_id]
- return sequence
-
-
-def _clean_text(text, cleaner_names):
- for name in cleaner_names:
- cleaner = getattr(cleaners, name)
- if not cleaner:
- raise Exception('Unknown cleaner: %s' % name)
- text = cleaner(text)
- return text
diff --git a/spaces/merve/hidden-bias/source/anonymization/make-sliders.js b/spaces/merve/hidden-bias/source/anonymization/make-sliders.js
deleted file mode 100644
index 72f6dfd7c96d6c74cfb35db5854f06b668bf3d46..0000000000000000000000000000000000000000
--- a/spaces/merve/hidden-bias/source/anonymization/make-sliders.js
+++ /dev/null
@@ -1,139 +0,0 @@
-window.makeSliders = function(){
- var rv = {
- population: 144,
- headsProb: .5,
- }
-
- rv.updateHeadsProb = (headsProb) => {
- rv.headsProb = headsProb
- updateSliderPos()
-
-
- estimates.updateEstimates()
- estimates.render()
- }
-
- rv.updatePopulation = (population) => {
- rv.population = population
- updateSliderPos()
-
-
- var scale = d3.clamp(0, 13 / Math.sqrt(population), 1)
- sel.studentGroup.st({
- transformOrigin: 'top',
- transformOrigin: c.width/2 + 'px ' + 160 + 'px',
- transform: `scale(${scale})`
- })
-
- estimates.updateEstimates()
- estimates.render()
-
- sel.student.classed('inactive',(d, i) => i >= population)
- }
-
- rv.updatePopulationSlider = (val) => {
- rv.updatePopulation(val)
- }
-
- rv.updateNoiseSlider = (val) => {
- rv.updateHeadsProb(val)
- }
-
- var updateSliderPos = (function(){
- var width = d3.clamp(50, window.innerWidth/2 - 40, 145)
- var height = 30
- var color = '#007276'
-
- var sliderVals = {
- population: {
- key: 'population',
- textFn: d => rv.population + ' students' ,
- r: [144, 756],
- v: 144,
- stepFn: d => rv.updatePopulation(Math.round(d.v/2)*2),
- },
- headsProb: {
- key: 'headsProb',
- textFn: d => d3.format('.1%')(rv.headsProb) + ' chance of heads',
- r: [.2, .5],
- v: .5,
- stepFn: d => rv.updateHeadsProb(d.v),
- }
- }
- var sliders = [sliderVals.headsProb, sliderVals.population, sliderVals.headsProb]
- sliders.forEach(d => {
- d.s = d3.scaleLinear().domain(d.r).range([0, width])
- })
-
- var sliderSel = d3.selectAll('.slide-container-population,.slide-container-heads-prob').html('')
- .data(sliders)
- .classed('slider', true)
- .st({
- display: 'inline-block',
- width: width,
- paddingRight: (d, i) => i == 1 ? 40 : 0,
- marginTop: 20,
- })
-
- var textSel = sliderSel.append('div.slider-label-container')
- .st({marginBottom: -5})
-
- var svgSel = sliderSel.append('svg').at({width, height})
- .on('click', function(d){
- d.v = d.s.invert(d3.mouse(this)[0])
- d.stepFn(d)
- })
- .st({
- cursor: 'pointer'
- })
- .append('g').translate(height/2, 1)
- svgSel.append('rect').at({width, height, y: -height/2, fill: 'rgba(0,0,0,0)'})
-
- svgSel.append('path').at({
- d: `M 0 -.5 H ${width}`,
- stroke: color,
- strokeWidth: 1
- })
-
- var leftPathSel = svgSel.append('path').at({
- d: `M 0 -.5 H ${width}`,
- stroke: color,
- strokeWidth: 3
- })
-
-
- var drag = d3.drag()
- .on('drag', function(d){
- var x = d3.mouse(this)[0]
- d.v = d3.clamp(d3.min(d.r), d.s.invert(x), d3.max(d.r))
- d.stepFn(d)
- })
-
- var rectSel = svgSel.append('rect')
- .at({
- width: height/2 - 1,
- height: height/2 - 1,
- stroke: color,
- strokeWidth: 3,
- fill: '#fff',
- })
- .translate([-height/4, -height/4])
- .call(drag)
-
- return isDrag => {
- rectSel.at({x: d => Math.round(d.s(rv[d.key]))})
- textSel.text(d => d.textFn(d))
-
- leftPathSel.at({d: d => `M 0 -.5 H ${d.s(rv[d.key])}`})
- }
- })()
- updateSliderPos()
-
-
- return rv
-}
-
-
-
-
-if (window.init) window.init()
\ No newline at end of file
diff --git a/spaces/michaljunczyk/pl-asr-bigos-workspace/generate_asr_prompts.py b/spaces/michaljunczyk/pl-asr-bigos-workspace/generate_asr_prompts.py
deleted file mode 100644
index 82909934a1fed43edeac7ec2b63dc1cc3aa7790b..0000000000000000000000000000000000000000
--- a/spaces/michaljunczyk/pl-asr-bigos-workspace/generate_asr_prompts.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import openai
-import os
-
-openai.api_key = os.getenv("OPENAI_API_KEY")
-
-def prompt_gpt_completion(input):
- print("TODO")
-
- # temperature = 0-1 -> controls randomness. 0 -> repetitive, deterministic 1-> random
- # max_tokens = up to 2048 (or 4k shared between prompt and completion). One token is around 4 characters in English.
- # top_p = controls diversity by nucleus sampling. If set to 0.5 only half of all likelihood-weighted options are considered
- # frequency_penalty = penalize using frequently used words in the text generated so far
- # presence_penalty = penalize using words already used in generated text
-
-def prompt_gpt_asr_prompts(lang_code, domain, nr_of_prompts):
- print(f"Generating {nr_of_prompts} prompts for lang_code: {lang_code} and domain: {domain}")
- if(lang_code == "pl"):
- prompt = f"""Jesteś lingwistą tworzącym zdania do korpusu nagrań mowy. Zdania, które tworzysz są poprawne językowo, zróżnicowane semantycznie oraz zbilansowane fonetycznie.\n
- Twoim zadaniem jest napisanie okreśłonej liczby zdań przez użytkownika.
- Zdania dot. określonej domeny/zagadnienia przez użytkownika.
- Zdania mają postać pytań i odpowiedzi o wzrastającym poziomie trudności.
- Pierwsze zdania dotyczą ogólnych zagadnień, kolejne stopniowo coraz trudniejszych.
- Przykład:
- Input: Wygeneruj 7 zdań dla domeny "brydż sportowy".
- Output:
- Q1: Co to jest brydż sportowy?
- A1: To logiczna gra karciana, w której bierze udział czterech graczy tworzących dwie rywalizujące ze sobą pary.
- Q2: Czy brydż to sport?
- A2: Tak, brydż został oficjalnie przyjęty do Międzynarodowego Komitetu Olimpijskiego jako pełnoprawna dyscyplina sportowa.
- Q3: Czy warto grać w brydża?
- A3: Tak, 20 lat temu prof. Marian Cleeves Diamond wykazała, że gra w brydża zmniejsza ryzyko zachorowania na chorobę Alzheimera aż o 75 proc. Inne badania naukowe wykazały, że gra w brydża przynosi wymierne korzyści dla sprawności umysłowej i zdrowia mózgu u seniorów.
- Q4: Co decyduje o sukcesie w brydżu?
- A4: Umiejętności oraz sprawność umysłowa, a nie przypadek czy psychologiczne rozegranie przeciwnika jak w pokerze.
- Q5: Jakie znane osoby grają w brydża?
- A5: Bill Gates, Warren Buffett, Witold Bielecki, Renata Dancewicz, Zbigniew Boniek, Katarzyna Grochola
- Q6: Jaki jest cel gry?
- A6: Wygranie jak największej liczby lew.
- Q7: Co to jest lewa?
- A7: To cztery karty, umieszczone na stoliku zgodnie z ruchem wskazówek zegara. Lewę wygrywa gracz, którego karta miała najwyższą wartość. Gracz, który zdobył lewę zaczyna następna turę, czyli rzuca kartę jako pierwszy.
-
- Wygeneruj {nr_of_prompts} zdań dla \"{domain}\" wedle wskazanego wzorca"""
-
- elif(lang_code == "en"):
- prompt = ""
- else:
- print("unknown language!")
- return
-
- response = openai.Completion.create(
- model="text-davinci-003",
- prompt=prompt,
- temperature=0.3,
- max_tokens=2048,
- top_p=1,
- frequency_penalty=0.5,
- presence_penalty=0,
- stop=["You:"]
- )
- return(response)
-
-def save_data_to_txt(data, file_path):
- with open(file_path, 'w') as file:
- file.write(data)
- print(f"The data has been saved to {file_path} successfully.")
-
-lang_code = "pl"
-domain = "nauka gry w brydża sportowego dla początkujących"
-nr_of_prompts = 5
-response=prompt_gpt_asr_prompts(lang_code, domain, nr_of_prompts)
-prompts = response.choices[0].text
-
-# Example usage
-save_data_to_txt(prompts, 'prompts.txt')
-
-
-
diff --git a/spaces/mikaelbhai/GPTBhai_TextToImage_DreamStudio/app.py b/spaces/mikaelbhai/GPTBhai_TextToImage_DreamStudio/app.py
deleted file mode 100644
index f50a1292ebd2a5eb7de2ced705a35d09cea44d38..0000000000000000000000000000000000000000
--- a/spaces/mikaelbhai/GPTBhai_TextToImage_DreamStudio/app.py
+++ /dev/null
@@ -1,40 +0,0 @@
-import requests
-import gradio as gr
-from PIL import Image
-import numpy as np
-
-# Replace with your Dream API key
-api_key = 'sk-CED85fi0ZhUDMWg4GvFQ5k53o7yoL7WOaPyPQcb8zPi7eDGi'
-
-def generate_image(prompt):
- # Set up the request headers
- headers = {
- "Content-Type": "application/json",
- "Authorization": f"Bearer {api_key}"
- }
-
- # Set up the request data
- data = {
- "model": "stable-diffusion",
- "prompt": prompt,
- "steps": 100,
- "batch_size": 1,
- "gamma": 0.99,
- "device": "cpu"
- }
-
- # Send the request to Dream's API
- response = requests.post("https://api.dream.co/stable-diffusion/generate", json=data, headers=headers)
- response.raise_for_status()
-
- # Extract the image URL from the response
- image_url = response.json()["data"][0]["url"]
-
- # Download and display the image using PIL and Gradio
- image_bytes = requests.get(image_url).content
- image = Image.open(io.BytesIO(image_bytes))
- image_arr = np.array(image)
- return image_arr
-
-iface = gr.Interface(fn=generate_image, inputs="text", outputs="image", title="bhAI (text to image using Dream Studio's Stable Difusion)", description="Enter a prompt to generate an image.")
-iface.launch()
diff --git a/spaces/mikeee/wizardlm-1.0-uncensored-llama2-13b-ggmlv3/examples_list.py b/spaces/mikeee/wizardlm-1.0-uncensored-llama2-13b-ggmlv3/examples_list.py
deleted file mode 100644
index 8b0343b337df4a89295d8ebe96eccf9f3710bc9c..0000000000000000000000000000000000000000
--- a/spaces/mikeee/wizardlm-1.0-uncensored-llama2-13b-ggmlv3/examples_list.py
+++ /dev/null
@@ -1,43 +0,0 @@
-etext = """In America, where cars are an important part of the national psyche, a decade ago people had suddenly started to drive less, which had not happened since the oil shocks of the 1970s. """
-examples_list = [
- ["What NFL team won the Super Bowl in the year Justin Bieber was born?"],
- [
- "What NFL team won the Super Bowl in the year Justin Bieber was born? Think step by step."
- ],
- ["How to pick a lock? Provide detailed steps."],
- [
- "If it takes 10 hours to dry 10 clothes, assuming all the clothes are hung together at the same time for drying , then how long will it take to dry a cloth?"
- ],
- [
- "If it takes 10 hours to dry 10 clothes, assuming all the clothes are hung together at the same time for drying and there is only space to hang 10 clothes, then how long will it take to dry 23 clothes? Think step by step."
- ],
- ["is infinity + 1 bigger than infinity?"],
- ["Explain the plot of Cinderella in a sentence."],
- [
- "How long does it take to become proficient in French, and what are the best methods for retaining information?"
- ],
- ["What are some common mistakes to avoid when writing code?"],
- ["Build a prompt to generate a beautiful portrait of a horse"],
- ["Suggest four metaphors to describe the benefits of AI"],
- ["Write a pop song about leaving home for the sandy beaches."],
- ["Write a summary demonstrating my ability to tame lions"],
- ["鲁迅和周树人什么关系? 说中文。"],
- ["鲁迅和周树人什么关系?"],
- ["鲁迅和周树人什么关系? 用英文回答。"],
- ["从前有一头牛,这头牛后面有什么?"],
- ["正无穷大加一大于正无穷大吗?"],
- ["正无穷大加正无穷大大于正无穷大吗?"],
- ["-2的平方根等于什么?"],
- ["树上有5只鸟,猎人开枪打死了一只。树上还有几只鸟?"],
- ["树上有11只鸟,猎人开枪打死了一只。树上还有几只鸟?提示:需考虑鸟可能受惊吓飞走。"],
- ["以红楼梦的行文风格写一张委婉的请假条。不少于320字。"],
- [f"Translate ths following to Chinese. List 2 variants: \n{etext}"],
- [f"{etext} 翻成中文,列出3个版本。"],
- [f"{etext} \n 翻成中文,保留原意,但使用文学性的语言。不要写解释。列出3个版本。"],
- ["假定 1 + 2 = 4, 试求 7 + 8。"],
- ["给出判断一个数是不是质数的 javascript 码。"],
- ["给出实现python 里 range(10)的 javascript 码。"],
- ["给出实现python 里 [*(range(10)]的 javascript 码。"],
- ["Erkläre die Handlung von Cinderella in einem Satz."],
- ["Erkläre die Handlung von Cinderella in einem Satz. Auf Deutsch."],
-]
diff --git a/spaces/milyiyo/reimagine-it/captioning/data/__init__.py b/spaces/milyiyo/reimagine-it/captioning/data/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/milyiyo/reimagine-it/captioning/utils/resnet_utils.py b/spaces/milyiyo/reimagine-it/captioning/utils/resnet_utils.py
deleted file mode 100644
index e1df171ab75700352333f6af5d59f751819b57f6..0000000000000000000000000000000000000000
--- a/spaces/milyiyo/reimagine-it/captioning/utils/resnet_utils.py
+++ /dev/null
@@ -1,27 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-class myResnet(nn.Module):
- def __init__(self, resnet):
- super(myResnet, self).__init__()
- self.resnet = resnet
-
- def forward(self, img, att_size=14):
- x = img.unsqueeze(0)
-
- x = self.resnet.conv1(x)
- x = self.resnet.bn1(x)
- x = self.resnet.relu(x)
- x = self.resnet.maxpool(x)
-
- x = self.resnet.layer1(x)
- x = self.resnet.layer2(x)
- x = self.resnet.layer3(x)
- x = self.resnet.layer4(x)
-
- fc = x.mean(3).mean(2).squeeze()
- att = F.adaptive_avg_pool2d(x,[att_size,att_size]).squeeze().permute(1, 2, 0)
-
- return fc, att
-
diff --git a/spaces/mindspore-ai/Wukong-Huahua/app.py b/spaces/mindspore-ai/Wukong-Huahua/app.py
deleted file mode 100644
index 6d26b972485677aa400f3187f3f1fa0a569a66d8..0000000000000000000000000000000000000000
--- a/spaces/mindspore-ai/Wukong-Huahua/app.py
+++ /dev/null
@@ -1,110 +0,0 @@
-import os
-import requests
-import json
-import gradio as gr
-from PIL import Image
-from io import BytesIO
-
-
-def generate_figure(style, desc):
- url = os.environ["req_url"]
-
- requests_json = {
- "style": style,
- "desc": desc
- }
-
- headers = {
- "Content-Type": "application/json",
- }
-
- response = requests.post(url, json=requests_json, headers=headers, verify=False)
-
- status = response.status_code
- assert status == 201
-
- response = json.loads(response.text)
-
- url_dict = response["data"]["pictures"]
- image_list = []
- for k in url_dict:
- image_list.append(Image.open(BytesIO(requests.get(url_dict[k]).content)))
-
- return image_list
-
-
-def read_content(file_path: str) -> str:
- with open(file_path, 'r', encoding='utf-8') as f:
- content = f.read()
-
- return content
-
-
-examples_style = ["宫崎骏", "新海诚", "梵高", "赛博朋克", "水彩", "莫奈"]
-examples_desc = ["城市夜景", "海滩 美景", "一只猫", "摩天大楼", "鸢尾花", "秋水共长天一色"]
-
-css = """
-.gradio-container {background-image: url('file=./background.jpg'); background-size:cover; background-repeat: no-repeat;}
-
-#generate {
- background: linear-gradient(#D8C5EB, #C5E8EB,#90CCF6);
- border: 1px solid #C5E8EB;
- border-radius: 8px;
- color: #26498B
-}
-"""
-
-# warm up
-generate_figure("梵高", "一只猫")
-
-with gr.Blocks(css=css) as demo:
- gr.HTML(read_content("./header.html"))
-
- gr.Markdown("# MindSpore Wukong-Huahua "
- "\nWukong-Huahua is a diffusion-based model that perfoms text-to-image task in Chinese, "
- "which was developed by the Huawei Noah's Ark Lab in cooperation with "
- "the Distributed & Parallel Software Lab and Ascend Product Develop Unit. "
- "It was trained on Wukong dataset and used MindSpore + Ascend,"
- " a software and hardware solution to implement. Welcome to try Wukong-Huahua by Our Online Platform.")
-
- with gr.Tab("图片生成 (Figure Generation)"):
-
- style_input = gr.Textbox(lines=1,
- placeholder="输入中文风格描述",
- label="Input the style of figure you want to generate. (use Chinese better)",
- elem_id="style-input")
- gr.Examples(
- examples=examples_style,
- inputs=style_input,
- )
- with gr.Row():
- gr.Markdown(" *** ")
- desc_input = gr.Textbox(lines=1,
- placeholder="输入中文图片描述",
- label="Input a sentence to describe the figure you want to generate. "
- "(use Chinese better)")
- gr.Examples(
- examples=examples_desc,
- inputs=desc_input,
- )
- generate_button = gr.Button("Generate", elem_id="generate")
- with gr.Row():
- img_output1 = gr.Image(type="pil")
- img_output2 = gr.Image(type="pil")
- img_output3 = gr.Image(type="pil")
- img_output4 = gr.Image(type="pil")
-
- with gr.Accordion("Open for More!"):
- gr.Markdown("- If you want to know more about the foundation models of MindSpore, please visit "
- "[The Foundation Models Platform for Mindspore](https://xihe.mindspore.cn/)")
- gr.Markdown("- If you want to know more about MindSpore-related diffusion models, please visit "
- "[minddiffusion](https://github.com/mindspore-lab/minddiffusion)")
- gr.Markdown("- Try [Wukong-Huahua model on the Foundation Models Platform for Mindspore]"
- "(https://xihe.mindspore.cn/modelzoo/wukong)")
-
- generate_button.click(generate_figure,
- inputs=[style_input, desc_input],
- outputs=[img_output1, img_output2, img_output3, img_output4])
-
-demo.queue(concurrency_count=5)
-demo.launch(enable_queue=True)
\ No newline at end of file
diff --git a/spaces/mkmenta/try-gpt-1-and-gpt-2/app.py b/spaces/mkmenta/try-gpt-1-and-gpt-2/app.py
deleted file mode 100644
index fa4656f55ed102ff4c696647bc704ab16927ffab..0000000000000000000000000000000000000000
--- a/spaces/mkmenta/try-gpt-1-and-gpt-2/app.py
+++ /dev/null
@@ -1,96 +0,0 @@
-"""GPT-1 and GPT-2 Text Generation demo."""
-import gradio as gr
-from torch.cuda import is_available
-from transformers import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, GPT2Tokenizer, GPT2LMHeadModel
-
-
-tokenizer = None
-model = None
-loaded_model = None
-
-
-def load_model(model_name):
- """Loads the model and tokenizer from HuggingFace."""
- global tokenizer, model, loaded_model
- loaded_model = model_name
- huggingface_model_name = model_name.split('(')[1][:-1]
- if huggingface_model_name == 'openai-gpt': # GPT-1
- tokenizer = OpenAIGPTTokenizer.from_pretrained(huggingface_model_name)
- model = OpenAIGPTLMHeadModel.from_pretrained(huggingface_model_name)
- else: # GPT-2
- tokenizer = GPT2Tokenizer.from_pretrained(huggingface_model_name)
- model = GPT2LMHeadModel.from_pretrained(huggingface_model_name)
- # Load model in CUDA if available
- if is_available():
- model = model.cuda()
-
-
-def generate(inp, model_name, temperature, top_p, rep_pty, max_length):
- """Generates text using the given model and parameters."""
- if loaded_model != model_name:
- load_model(model_name)
- inputs = tokenizer.encode(inp, return_tensors='pt')
- if is_available():
- inputs = inputs.cuda()
- outputs = model.generate(inputs,
- max_length=max_length,
- temperature=temperature,
- num_return_sequences=1,
- top_p=top_p,
- repetition_penalty=rep_pty)
- out = tokenizer.decode(outputs[0], skip_special_tokens=True)
- if 'GPT-1' in model_name:
- out = out.replace(inp.lower(), "")
- else:
- out = out.replace(inp, "")
- return out
-
-
-SAMPLE_INPUT = (
- "In a shocking finding, scientists discovered a herd of unicorns living in a remote,"
- " previously unexplored valley, in the Andes Mountains. Even more surprising to the"
- " researchers was the fact that the unicorns spoke perfect English."
-)
-
-with gr.Blocks() as demo:
- gr.Markdown("# 🦄 Try GPT-1 and GPT-2")
- with gr.Row():
- with gr.Column(scale=4):
- inp = gr.Textbox(label="Input text:",
- placeholder="Enter some text to get started.",
- value=SAMPLE_INPUT,
- lines=10)
- out = gr.Textbox(label="Generated text:", lines=25)
- with gr.Column(scale=1):
- with gr.Row(scale=1):
- model_name = gr.Dropdown(label="Select a model:",
- choices=['GPT-2 XL (gpt2-xl)',
- 'GPT-2 L (gpt2-large)',
- 'GPT-2 M (gpt2-medium)',
- 'GPT-2 S (gpt2)',
- 'GPT-1 (openai-gpt)'],
- value='GPT-2 XL (gpt2-xl)')
- btn_run = gr.Button("Generate")
- temperature = gr.Slider(
- label="Temperature",
- info=("Degree of randomness in the output, where higher values make it more unpredictable"
- " and creative, while lower values make it more deterministic and focused."),
- minimum=0.01, maximum=3.0, step=0.01, value=0.7)
- top_p = gr.Slider(
- label="Top-p",
- info=("If set to float < 1, only the most probable tokens with probabilities that add up"
- " to `top_p` or higher are kept for generation."),
- minimum=0.01, maximum=1.0, step=0.01, value=.9)
- rep_pty = gr.Slider(label="Repetition Penalty",
- info="Token repetition penalty. 1.0 means no penalty.",
- minimum=1.0, maximum=2.0, step=0.01, value=1.2)
- max_length = gr.Number(label="Max Length",
- info="The maximum length of the sequence to be generated.",
- minimum=1, maximum=1024, value=256, precision=0)
- # Fill the rest of the column with blank space
- # (I didn't find a better way to do this)
- with gr.Row(scale=1000):
- gr.Markdown()
- btn_run.click(fn=generate, inputs=[inp, model_name, temperature, top_p, rep_pty, max_length], outputs=out)
-
-demo.launch()
diff --git a/spaces/ml-energy/leaderboard/sharegpt/extract_first.py b/spaces/ml-energy/leaderboard/sharegpt/extract_first.py
deleted file mode 100644
index 0fd733e1caf4456f8f02d33e36acd2216cea8971..0000000000000000000000000000000000000000
--- a/spaces/ml-energy/leaderboard/sharegpt/extract_first.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import argparse
-import json
-
-
-def extract_first_sen(content):
- result = []
- for item in content:
- tmp = item
- tmp['conversations'] = [item['conversations'][0]]
- result.append(tmp)
- return result
-
-
-def main(args):
- content = json.load(open(args["in_file"], "r"))
- content = extract_first_sen(content )
- json.dump(content, open(args["out_file"], "w"), indent=2)
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--in-file", type=str, default = 'sg_90k_part1_html_cleaned_lang.json' )
- parser.add_argument("--out-file", type=str, default = "sg_90k_part1_html_cleaned_lang_first.json")
- args = parser.parse_args()
- main(vars(args))
-
-
diff --git a/spaces/mohdelgaar/Clinical_Decisions/README.md b/spaces/mohdelgaar/Clinical_Decisions/README.md
deleted file mode 100644
index 42f35486fe10c3aac34ea647d3388eb00a5dc638..0000000000000000000000000000000000000000
--- a/spaces/mohdelgaar/Clinical_Decisions/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Clinical Decisions
-emoji: ⚕️
-colorFrom: indigo
-colorTo: pink
-sdk: gradio
-sdk_version: 3.21.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/morinop/BetterSelfie/README.md b/spaces/morinop/BetterSelfie/README.md
deleted file mode 100644
index dd0d087311c08eabd0c890f23111641d091f6c84..0000000000000000000000000000000000000000
--- a/spaces/morinop/BetterSelfie/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: BetterSelfie
-emoji: ⚡
-colorFrom: indigo
-colorTo: pink
-sdk: gradio
-sdk_version: 3.19.1
-app_file: app.py
-pinned: false
-license: openrail
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/mosaicml/mpt-30b-chat/app.py b/spaces/mosaicml/mpt-30b-chat/app.py
deleted file mode 100644
index c47859b7fae79cd5c63935dbb5b39e35994a6be3..0000000000000000000000000000000000000000
--- a/spaces/mosaicml/mpt-30b-chat/app.py
+++ /dev/null
@@ -1,219 +0,0 @@
-import os
-import time
-
-import gradio as gr
-from mcli import predict
-
-
-URL = os.environ.get("URL")
-if URL is None:
- raise ValueError("URL environment variable must be set")
-if os.environ.get("MOSAICML_API_KEY") is None:
- raise ValueError("git environment variable must be set")
-
-
-class Chat:
- default_system_prompt = "A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers."
- system_format = "<|im_start|>system\n{}<|im_end|>\n"
-
- def __init__(self, system: str = None, user: str = None, assistant: str = None) -> None:
- if system is not None:
- self.set_system_prompt(system)
- else:
- self.reset_system_prompt()
- self.user = user if user else "<|im_start|>user\n{}<|im_end|>\n"
- self.assistant = assistant if assistant else "<|im_start|>assistant\n{}<|im_end|>\n"
- self.response_prefix = self.assistant.split("{}")[0]
-
- def set_system_prompt(self, system_prompt):
- # self.system = self.system_format.format(system_prompt)
- return system_prompt
-
- def reset_system_prompt(self):
- return self.set_system_prompt(self.default_system_prompt)
-
- def history_as_formatted_str(self, system, history) -> str:
- system = self.system_format.format(system)
- text = system + "".join(
- [
- "\n".join(
- [
- self.user.format(item[0]),
- self.assistant.format(item[1]),
- ]
- )
- for item in history[:-1]
- ]
- )
- text += self.user.format(history[-1][0])
- text += self.response_prefix
- # stopgap solution to too long sequences
- if len(text) > 4500:
- # delete from the middle between <|im_start|> and <|im_end|>
- # find the middle ones, then expand out
- start = text.find("<|im_start|>", 139)
- end = text.find("<|im_end|>", 139)
- while end < len(text) and len(text) > 4500:
- end = text.find("<|im_end|>", end + 1)
- text = text[:start] + text[end + 1 :]
- if len(text) > 4500:
- # the nice way didn't work, just truncate
- # deleting the beginning
- text = text[-4500:]
-
- return text
-
- def clear_history(self, history):
- return []
-
- def turn(self, user_input: str):
- self.user_turn(user_input)
- return self.bot_turn()
-
- def user_turn(self, user_input: str, history):
- history.append([user_input, ""])
- return user_input, history
-
- def bot_turn(self, system, history):
- conversation = self.history_as_formatted_str(system, history)
- assistant_response = call_inf_server(conversation)
- history[-1][-1] = assistant_response
- print(system)
- print(history)
- return "", history
-
-
-def call_inf_server(prompt):
- try:
- response = predict(
- URL,
- {"inputs": [prompt], "temperature": 0.2, "top_p": 0.9, "output_len": 512},
- timeout=70,
- )
- # print(f'prompt: {prompt}')
- # print(f'len(prompt): {len(prompt)}')
- response = response["outputs"][0]
- # print(f'len(response): {len(response)}')
- # remove spl tokens from prompt
- spl_tokens = ["<|im_start|>", "<|im_end|>"]
- clean_prompt = prompt.replace(spl_tokens[0], "").replace(spl_tokens[1], "")
- return response[len(clean_prompt) :] # remove the prompt
- except Exception as e:
- # assume it is our error
- # just wait and try one more time
- print(e)
- time.sleep(1)
- response = predict(
- URL,
- {"inputs": [prompt], "temperature": 0.2, "top_p": 0.9, "output_len": 512},
- timeout=70,
- )
- # print(response)
- response = response["outputs"][0]
- return response[len(prompt) :] # remove the prompt
-
-
-with gr.Blocks(
- theme=gr.themes.Soft(),
- css=".disclaimer {font-variant-caps: all-small-caps;}",
-) as demo:
- gr.Markdown(
- """
MosaicML MPT-30B-Chat
-
- This demo is of [MPT-30B-Chat](https://huggingface.co/mosaicml/mpt-30b-chat). It is based on [MPT-30B](https://huggingface.co/mosaicml/mpt-30b) fine-tuned on approximately 300,000 turns of high-quality conversations, and is powered by [MosaicML Inference](https://www.mosaicml.com/inference).
-
- If you're interested in [training](https://www.mosaicml.com/training) and [deploying](https://www.mosaicml.com/inference) your own MPT or LLMs, [sign up](https://forms.mosaicml.com/demo?utm_source=huggingface&utm_medium=referral&utm_campaign=mpt-30b) for MosaicML platform.
-
-"""
- )
- conversation = Chat()
- chatbot = gr.Chatbot().style(height=500)
- with gr.Row():
- with gr.Column():
- msg = gr.Textbox(
- label="Chat Message Box",
- placeholder="Chat Message Box",
- show_label=False,
- ).style(container=False)
- with gr.Column():
- with gr.Row():
- submit = gr.Button("Submit")
- stop = gr.Button("Stop")
- clear = gr.Button("Clear")
- with gr.Row():
- with gr.Accordion("Advanced Options:", open=False):
- with gr.Row():
- with gr.Column(scale=2):
- system = gr.Textbox(
- label="System Prompt",
- value=Chat.default_system_prompt,
- show_label=False,
- ).style(container=False)
- with gr.Column():
- with gr.Row():
- change = gr.Button("Change System Prompt")
- reset = gr.Button("Reset System Prompt")
- with gr.Row():
- gr.Markdown(
- "Disclaimer: MPT-30B can produce factually incorrect output, and should not be relied on to produce "
- "factually accurate information. MPT-30B was trained on various public datasets; while great efforts "
- "have been taken to clean the pretraining data, it is possible that this model could generate lewd, "
- "biased, or otherwise offensive outputs.",
- elem_classes=["disclaimer"],
- )
- with gr.Row():
- gr.Markdown(
- "[Privacy policy](https://gist.github.com/samhavens/c29c68cdcd420a9aa0202d0839876dac)",
- elem_classes=["disclaimer"],
- )
-
- submit_event = msg.submit(
- fn=conversation.user_turn,
- inputs=[msg, chatbot],
- outputs=[msg, chatbot],
- queue=False,
- ).then(
- fn=conversation.bot_turn,
- inputs=[system, chatbot],
- outputs=[msg, chatbot],
- queue=True,
- )
- submit_click_event = submit.click(
- fn=conversation.user_turn,
- inputs=[msg, chatbot],
- outputs=[msg, chatbot],
- queue=False,
- ).then(
- fn=conversation.bot_turn,
- inputs=[system, chatbot],
- outputs=[msg, chatbot],
- queue=True,
- )
- stop.click(
- fn=None,
- inputs=None,
- outputs=None,
- cancels=[submit_event, submit_click_event],
- queue=False,
- )
- clear.click(lambda: None, None, chatbot, queue=False).then(
- fn=conversation.clear_history,
- inputs=[chatbot],
- outputs=[chatbot],
- queue=False,
- )
- change.click(
- fn=conversation.set_system_prompt,
- inputs=[system],
- outputs=[system],
- queue=False,
- )
- reset.click(
- fn=conversation.reset_system_prompt,
- inputs=[],
- outputs=[system],
- queue=False,
- )
-
-
-demo.queue(max_size=36, concurrency_count=14).launch(debug=True)
diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/models/speech_to_text/modules/augmented_memory_attention.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/models/speech_to_text/modules/augmented_memory_attention.py
deleted file mode 100644
index e7465bc889fd1ba6ca2c60905a2eb6ff5cc62b9d..0000000000000000000000000000000000000000
--- a/spaces/mshukor/UnIVAL/fairseq/fairseq/models/speech_to_text/modules/augmented_memory_attention.py
+++ /dev/null
@@ -1,488 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from typing import Tuple, List
-
-import torch
-import torch.nn.functional as F
-from fairseq.models import FairseqEncoder
-from fairseq.models.speech_to_text import (
- ConvTransformerEncoder,
-)
-from fairseq.models.speech_to_text.utils import attention_suppression
-from fairseq.models.speech_to_text.utils import (
- lengths_to_encoder_padding_mask,
- segments_to_sequence,
- sequence_to_segments,
-)
-from fairseq.modules import MultiheadAttention, TransformerEncoderLayer
-from torch import nn, Tensor
-
-# ------------------------------------------------------------------------------
-# AugmentedMemoryConvTransformerEncoder
-# ------------------------------------------------------------------------------
-
-
-class AugmentedMemoryConvTransformerEncoder(ConvTransformerEncoder):
- def __init__(self, args):
- super().__init__(args)
-
- args.encoder_stride = self.stride()
-
- self.left_context = args.left_context // args.encoder_stride
-
- self.right_context = args.right_context // args.encoder_stride
-
- self.left_context_after_stride = args.left_context // args.encoder_stride
- self.right_context_after_stride = args.right_context // args.encoder_stride
-
- self.transformer_layers = nn.ModuleList([])
- self.transformer_layers.extend(
- [
- AugmentedMemoryTransformerEncoderLayer(args)
- for i in range(args.encoder_layers)
- ]
- )
-
- def stride(self):
- # Hard coded here. Should infer from convs in future
- stride = 4
- return stride
-
- def forward(self, src_tokens, src_lengths, states=None):
- """Encode input sequence.
- :param torch.Tensor xs: input tensor
- :param torch.Tensor masks: input mask
- :return: position embedded tensor and mask
- :rtype Tuple[torch.Tensor, torch.Tensor]:
- """
- bsz, max_seq_len, _ = src_tokens.size()
- x = (
- src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim)
- .transpose(1, 2)
- .contiguous()
- )
- x = self.conv(x)
- bsz, _, output_seq_len, _ = x.size()
- x = x.transpose(1, 2).transpose(0, 1).contiguous().view(output_seq_len, bsz, -1)
- x = self.out(x)
- x = self.embed_scale * x
-
- subsampling_factor = 1.0 * max_seq_len / output_seq_len
- input_lengths = torch.max(
- (src_lengths.float() / subsampling_factor).ceil().long(),
- x.size(0) * src_lengths.new_ones([src_lengths.size(0)]).long(),
- )
-
- encoder_padding_mask, _ = lengths_to_encoder_padding_mask(
- input_lengths, batch_first=True
- )
-
- # TODO: fix positional embedding
- positions = self.embed_positions(encoder_padding_mask).transpose(0, 1)
-
- x += positions
- x = F.dropout(x, p=self.dropout, training=self.training)
-
- # State to store memory banks etc.
- if states is None:
- states = [
- {"memory_banks": None, "encoder_states": None}
- for i in range(len(self.transformer_layers))
- ]
-
- for i, layer in enumerate(self.transformer_layers):
- # x size:
- # (self.left_size + self.segment_size + self.right_size)
- # / self.stride, num_heads, dim
- # TODO: Consider mask here
- x = layer(x, states[i])
- states[i]["encoder_states"] = x[
- self.left_context_after_stride : -self.right_context_after_stride
- ]
-
- lengths = (
- (
- ~encoder_padding_mask[
- :, self.left_context_after_stride : -self.right_context_after_stride
- ]
- )
- .sum(dim=1, keepdim=True)
- .long()
- )
-
- return states[-1]["encoder_states"], lengths, states
-
-
-# ------------------------------------------------------------------------------
-# AugmentedMemoryTransformerEncoderLayer
-# ------------------------------------------------------------------------------
-class AugmentedMemoryTransformerEncoderLayer(TransformerEncoderLayer):
- def __init__(self, args):
- super().__init__(args)
-
- self.left_context = args.left_context // args.encoder_stride
- self.right_context = args.right_context // args.encoder_stride
-
- def forward(self, x, state):
-
- length, batch_size, x_dim = x.size()
-
- residual = x
-
- if self.normalize_before:
- x = self.self_attn_layer_norm(x)
-
- # init_state
- if state.get("memory_banks", None) is None:
- state["memory_banks"] = []
-
- # TODO reseach new sum_query method
- seg_start = self.left_context
- seg_end = length - self.right_context
- if seg_start < seg_end:
- summarization_query = torch.mean(x[seg_start:seg_end], keepdim=True, dim=0)
- else:
- summarization_query = x.new_zeros(1, batch_size, x_dim)
-
- x = torch.cat([x, summarization_query], dim=0)
-
- x = self.self_attn(input_and_summary=x, state=state)
-
- x = self.dropout_module(x)
- x = residual + x
-
- if not self.normalize_before:
- x = self.self_attn_layer_norm(x)
-
- residual = x
- if self.normalize_before:
- x = self.final_layer_norm(x)
-
- x = self.activation_fn(self.fc1(x))
- x = self.activation_dropout_module(x)
- x = self.fc2(x)
- x = self.dropout_module(x)
- x = residual + x
- if not self.normalize_before:
- x = self.final_layer_norm(x)
-
- return x
-
- def build_self_attention(self, embed_dim, args):
- return AugmentedMemoryMultiheadAttention(
- embed_dim=embed_dim,
- num_heads=args.encoder_attention_heads,
- dropout=args.attention_dropout,
- self_attention=True,
- q_noise=self.quant_noise,
- qn_block_size=self.quant_noise_block_size,
- tanh_on_mem=True,
- max_memory_size=args.max_memory_size,
- )
-
-
-# ------------------------------------------------------------------------------
-# AugmentedMemoryMultiheadAttention
-# ------------------------------------------------------------------------------
-class AugmentedMemoryMultiheadAttention(MultiheadAttention):
- """
- Augmented Memory Attention from
- Streaming Transformer-based Acoustic Models
- Using Self-attention with Augmented Memory
- https://arxiv.org/abs/2005.08042
- """
-
- def __init__(
- self,
- embed_dim,
- num_heads,
- kdim=None,
- vdim=None,
- dropout=0.0,
- bias=True,
- add_bias_kv=False,
- add_zero_attn=False,
- self_attention=False,
- encoder_decoder_attention=False,
- q_noise=0.0,
- qn_block_size=8,
- tanh_on_mem=False,
- memory_dim=None,
- std_scale=0.5, # 0.5 based on https://arxiv.org/abs/2005.09137
- max_memory_size=-1,
- disable_mem_on_mem_attn=True,
- ):
- super().__init__(
- embed_dim,
- num_heads,
- kdim,
- vdim,
- dropout,
- bias,
- add_bias_kv,
- add_zero_attn,
- self_attention,
- encoder_decoder_attention,
- q_noise,
- qn_block_size,
- )
-
- self.memory_dim = memory_dim if memory_dim is not None else embed_dim
- self.std_scale = std_scale
- self.disable_mem_on_mem_attn = disable_mem_on_mem_attn
-
- # This Operator was used for factorization in PySpeech
- self.v2e = lambda x: x
-
- if tanh_on_mem:
- self.squash_mem = torch.tanh
- self.nonlinear_squash_mem = True
- else:
- self.squash_mem = lambda x: x
- self.nonlinear_squash_mem = False
-
- self.max_memory_size = max_memory_size
-
- def forward(self, input_and_summary, state):
- """
- input: Encoder states of current segment with left or right context,
- plus one summarization query
-
- """
-
- length, batch_size, _ = input_and_summary.shape
- length = length - 1 # not include sum_query, last index
-
- memory = state["memory_banks"]
- # TODO: positional embedding on memory
-
- if self.max_memory_size > -1 and len(memory) > self.max_memory_size:
- # TODO: need to fix here
- if self.max_memory_size == 0:
- memory = memory.new_zeros(1, memory.size(1), self.memory_dim)
- else:
- memory = memory[-self.max_memory_size :]
-
- memory_and_input = torch.cat(memory + [input_and_summary[:-1]], dim=0)
- input_and_sum_query = input_and_summary
-
- q = self.q_proj(self.v2e(input_and_sum_query))
- k = self.k_proj(self.v2e(memory_and_input))
- v = self.v_proj(self.v2e(memory_and_input))
-
- q = (
- q.contiguous()
- .view(-1, batch_size * self.num_heads, self.head_dim)
- .transpose(0, 1)
- * self.scaling
- )
- k = (
- k.contiguous()
- .view(-1, batch_size * self.num_heads, self.head_dim)
- .transpose(0, 1)
- )
-
- v = (
- v.contiguous()
- .view(-1, batch_size * self.num_heads, self.head_dim)
- .transpose(0, 1)
- )
-
- attention_weights = torch.bmm(q, k.transpose(1, 2))
-
- if self.disable_mem_on_mem_attn:
- attention_weights = self.suppress_mem_on_mem_attention(
- batch_size, self.num_heads, len(memory), attention_weights
- )
-
- if self.std_scale is not None:
- attention_weights = attention_suppression(attention_weights, self.std_scale)
-
- assert list(attention_weights.shape) == [
- batch_size * self.num_heads,
- length + 1,
- length + len(memory),
- ]
-
- attention_weights = torch.nn.functional.softmax(
- attention_weights.float(), dim=-1
- ).type_as(attention_weights)
-
- attention_probs = self.dropout_module(attention_weights)
-
- # [T, T, B, n_head] + [T, B, n_head, d_head] -> [T, B, n_head, d_head]
- attention = torch.bmm(attention_probs, v)
-
- assert list(attention.shape) == [
- batch_size * self.num_heads,
- length + 1,
- self.head_dim,
- ]
-
- attention = (
- attention.transpose(0, 1)
- .contiguous()
- .view(length + 1, batch_size, self.embed_dim)
- )
-
- output_and_memory = self.out_proj(attention)
-
- next_m = output_and_memory[-1:]
- next_m = self.squash_mem(next_m)
- output = output_and_memory[:-1]
-
- state["memory_banks"].append(next_m)
-
- return output
-
- def suppress_mem_on_mem_attention(
- self, B: int, num_heads: int, mem_size: int, attention_weight: Tensor
- ):
- """
- Arguments:
- - B: batch size
- - num_heads: number of attention heads
- - mem_size: size of memory bank
- - attention_weight: a [B*num_heads, T + 1, T + mem_size] vector
-
- Return:
- modified attention_weight with [B*num_heads, -1, :mem_size] = -inf
- """
- attention_weight[:, -1, :mem_size] = float("-inf")
- return attention_weight
-
-
-# ------------------------------------------------------------------------------
-# SequenceEncoder
-# ------------------------------------------------------------------------------
-class SequenceEncoder(FairseqEncoder):
- """
- SequenceEncoder encodes sequences.
-
- More specifically, `src_tokens` and `src_lengths` in `forward()` should
- describe a batch of "complete" sequences rather than segments.
-
- Segment-by-segment inference can be triggered by `segment_size`:
- 1) `segment_size` is None:
- SequenceEncoder treats the input sequence as one single segment.
- 2) `segment_size` is not None (some int instead):
- SequenceEncoder does the following:
- 1. breaks the input sequence into several segments
- 2. inference on each segment and collect the outputs
- 3. concatanete segment outputs into the output sequence.
- Note that `segment_size` here shouldn't include additional left/right
- contexts needed, for example if we wish to infer with LC-BLSTM where the
- middle chunk size is 100 and right context is 20, `segment_size` should be
- 100.
- """
-
- def __init__(self, args, module):
- super().__init__(None)
-
- self.module = module
- self.input_time_axis = 1
- self.output_time_axis = 0
- self.segment_size = args.segment_size
- self.left_context = args.left_context
- self.right_context = args.right_context
-
- def forward(
- self,
- src_tokens: Tensor,
- src_lengths: Tensor,
- states=None,
- ):
-
- seg_src_tokens_lengths = sequence_to_segments(
- sequence=src_tokens,
- time_axis=self.input_time_axis,
- lengths=src_lengths,
- segment_size=self.segment_size,
- extra_left_context=self.left_context,
- extra_right_context=self.right_context,
- )
-
- seg_encoder_states_lengths: List[Tuple[Tensor, Tensor]] = []
-
- for seg_src_tokens, seg_src_lengths in seg_src_tokens_lengths:
- (seg_encoder_states, seg_enc_lengths, states) = self.module(
- seg_src_tokens,
- seg_src_lengths,
- states=states,
- )
-
- seg_encoder_states_lengths.append((seg_encoder_states, seg_enc_lengths))
-
- encoder_out, enc_lengths = segments_to_sequence(
- segments=seg_encoder_states_lengths, time_axis=self.output_time_axis
- )
-
- encoder_padding_mask, _ = lengths_to_encoder_padding_mask(
- enc_lengths, batch_first=True
- )
-
- if not encoder_padding_mask.any():
- encoder_padding_mask = None
-
- return {
- "encoder_out": [encoder_out],
- "encoder_padding_mask": [encoder_padding_mask],
- "encoder_embedding": [],
- "encoder_states": [states],
- "src_tokens": [],
- "src_lengths": [],
- }
-
- def incremental_encode(
- self,
- seg_src_tokens: Tensor,
- seg_src_lengths: Tensor,
- states=None,
- ):
- """
- Different from forward function, this function takes segmented speech
- as input, and append encoder states to previous states
- """
- (seg_encoder_states, seg_enc_lengths, states) = self.module(
- seg_src_tokens,
- seg_src_lengths,
- states=states,
- )
- return seg_encoder_states, seg_enc_lengths, states
-
-
-# ------------------------------------------------------------------------------
-# Augmented memory model decorator
-# ------------------------------------------------------------------------------
-def augmented_memory(klass):
- class StreamSeq2SeqModel(klass):
- @staticmethod
- def add_args(parser):
- super(StreamSeq2SeqModel, StreamSeq2SeqModel).add_args(parser)
- parser.add_argument(
- "--segment-size", type=int, required=True, help="Length of the segment."
- )
- parser.add_argument(
- "--left-context",
- type=int,
- default=0,
- help="Left context for the segment.",
- )
- parser.add_argument(
- "--right-context",
- type=int,
- default=0,
- help="Right context for the segment.",
- )
- parser.add_argument(
- "--max-memory-size",
- type=int,
- default=-1,
- help="Right context for the segment.",
- )
-
- StreamSeq2SeqModel.__name__ = klass.__name__
- return StreamSeq2SeqModel
diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/modules/grad_multiply.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/modules/grad_multiply.py
deleted file mode 100644
index 08d15f55dfda9c61a1cf8641ea31424fe1d97f57..0000000000000000000000000000000000000000
--- a/spaces/mshukor/UnIVAL/fairseq/fairseq/modules/grad_multiply.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import torch
-
-
-class GradMultiply(torch.autograd.Function):
- @staticmethod
- def forward(ctx, x, scale):
- ctx.scale = scale
- res = x.new(x)
- return res
-
- @staticmethod
- def backward(ctx, grad):
- return grad * ctx.scale, None
diff --git a/spaces/mshukor/UnIVAL/models/taming/modules/discriminator/model.py b/spaces/mshukor/UnIVAL/models/taming/modules/discriminator/model.py
deleted file mode 100644
index dfe70a392608ede979303a2a8df21ffea17d89d1..0000000000000000000000000000000000000000
--- a/spaces/mshukor/UnIVAL/models/taming/modules/discriminator/model.py
+++ /dev/null
@@ -1,67 +0,0 @@
-import functools
-import torch.nn as nn
-
-
-from models.taming.modules.util import ActNorm
-
-
-def weights_init(m):
- classname = m.__class__.__name__
- if classname.find('Conv') != -1:
- nn.init.normal_(m.weight.data, 0.0, 0.02)
- elif classname.find('BatchNorm') != -1:
- nn.init.normal_(m.weight.data, 1.0, 0.02)
- nn.init.constant_(m.bias.data, 0)
-
-
-class NLayerDiscriminator(nn.Module):
- """Defines a PatchGAN discriminator as in Pix2Pix
- --> see https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py
- """
- def __init__(self, input_nc=3, ndf=64, n_layers=3, use_actnorm=False):
- """Construct a PatchGAN discriminator
- Parameters:
- input_nc (int) -- the number of channels in input images
- ndf (int) -- the number of filters in the last conv layer
- n_layers (int) -- the number of conv layers in the discriminator
- norm_layer -- normalization layer
- """
- super(NLayerDiscriminator, self).__init__()
- if not use_actnorm:
- norm_layer = nn.BatchNorm2d
- else:
- norm_layer = ActNorm
- if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
- use_bias = norm_layer.func != nn.BatchNorm2d
- else:
- use_bias = norm_layer != nn.BatchNorm2d
-
- kw = 4
- padw = 1
- sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
- nf_mult = 1
- nf_mult_prev = 1
- for n in range(1, n_layers): # gradually increase the number of filters
- nf_mult_prev = nf_mult
- nf_mult = min(2 ** n, 8)
- sequence += [
- nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
- norm_layer(ndf * nf_mult),
- nn.LeakyReLU(0.2, True)
- ]
-
- nf_mult_prev = nf_mult
- nf_mult = min(2 ** n_layers, 8)
- sequence += [
- nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
- norm_layer(ndf * nf_mult),
- nn.LeakyReLU(0.2, True)
- ]
-
- sequence += [
- nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
- self.main = nn.Sequential(*sequence)
-
- def forward(self, input):
- """Standard forward."""
- return self.main(input)
diff --git a/spaces/nakas/Time-Domain-Audio-Style-Transfer/audio_style_transfer/__init__.py b/spaces/nakas/Time-Domain-Audio-Style-Transfer/audio_style_transfer/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/nasa-cisto-data-science-group/satvision-base-demo/pytorch-caney/requirements/README.md b/spaces/nasa-cisto-data-science-group/satvision-base-demo/pytorch-caney/requirements/README.md
deleted file mode 100644
index e006d062a8828d17be29b379a605c3ba9c6b5332..0000000000000000000000000000000000000000
--- a/spaces/nasa-cisto-data-science-group/satvision-base-demo/pytorch-caney/requirements/README.md
+++ /dev/null
@@ -1,35 +0,0 @@
-# Requirements
-
-pytorch-caney can be installed and used via anaconda environments and containers.
-A Docker container is provided and this same container can be converted
-to a Singularity container without loosing any functionalities.
-
-CPU support is limited and the author does not provide any guarantee of usability.
-
-## Architecture
-
-The container is built on top of NGC NVIDIA PYTORCH containers.
-
-This application is powered by PyTorch and PyTorch Lighning AI/ML backends.
-
-## Example to Download the Container via Singularity
-
-```bash
-module load singularity
-singularity build --sandbox pytorch-caney docker://nasanccs/pytorch-caney:latest
-
-## Example to Install Anaconda Environment
-
-``` bash
-git clone git@github.com:nasa-nccs-hpda/pytorch-caney.git
-cd pytorch-caney; conda env create -f requirements/environment_gpu.yml;
-conda activate pytorch-caney
-```
-
-## Container Usage
-
-As an example, you can shell into the container:
-
-```bash
-singularity shell --nv -B /path/to/container/pytorch-caney
-```
diff --git a/spaces/nasttam/Image-and-3D-Model-Creator/PIFu/lib/renderer/gl/cam_render.py b/spaces/nasttam/Image-and-3D-Model-Creator/PIFu/lib/renderer/gl/cam_render.py
deleted file mode 100644
index 7b766af057b9c052388aceb152b0191fa2e4ea25..0000000000000000000000000000000000000000
--- a/spaces/nasttam/Image-and-3D-Model-Creator/PIFu/lib/renderer/gl/cam_render.py
+++ /dev/null
@@ -1,48 +0,0 @@
-from .render import Render
-
-GLUT = None
-
-class CamRender(Render):
- def __init__(self, width=1600, height=1200, name='Cam Renderer',
- program_files=['simple.fs', 'simple.vs'], color_size=1, ms_rate=1, egl=False):
- Render.__init__(self, width, height, name, program_files, color_size, ms_rate=ms_rate, egl=egl)
- self.camera = None
-
- if not egl:
- global GLUT
- import OpenGL.GLUT as GLUT
- GLUT.glutDisplayFunc(self.display)
- GLUT.glutKeyboardFunc(self.keyboard)
-
- def set_camera(self, camera):
- self.camera = camera
- self.projection_matrix, self.model_view_matrix = camera.get_gl_matrix()
-
- def keyboard(self, key, x, y):
- # up
- eps = 1
- # print(key)
- if key == b'w':
- self.camera.center += eps * self.camera.direction
- elif key == b's':
- self.camera.center -= eps * self.camera.direction
- if key == b'a':
- self.camera.center -= eps * self.camera.right
- elif key == b'd':
- self.camera.center += eps * self.camera.right
- if key == b' ':
- self.camera.center += eps * self.camera.up
- elif key == b'x':
- self.camera.center -= eps * self.camera.up
- elif key == b'i':
- self.camera.near += 0.1 * eps
- self.camera.far += 0.1 * eps
- elif key == b'o':
- self.camera.near -= 0.1 * eps
- self.camera.far -= 0.1 * eps
-
- self.projection_matrix, self.model_view_matrix = self.camera.get_gl_matrix()
-
- def show(self):
- if GLUT is not None:
- GLUT.glutMainLoop()
diff --git a/spaces/nathanTQ/ChatDev/camel/agents/tool_agents/base.py b/spaces/nathanTQ/ChatDev/camel/agents/tool_agents/base.py
deleted file mode 100644
index a06c72e421b448263f681fe79d566a9a53d7ae4f..0000000000000000000000000000000000000000
--- a/spaces/nathanTQ/ChatDev/camel/agents/tool_agents/base.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
-# Licensed under the Apache License, Version 2.0 (the “License”);
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an “AS IS” BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
-from camel.agents import BaseAgent
-
-
-class BaseToolAgent(BaseAgent):
- r"""Creates a :obj:`BaseToolAgent` object with the specified name and
- description.
-
- Args:
- name (str): The name of the tool agent.
- description (str): The description of the tool agent.
- """
-
- def __init__(self, name: str, description: str) -> None:
-
- self.name = name
- self.description = description
-
- def __str__(self) -> str:
- return f"{self.name}: {self.description}"
diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/1996 - Anthology Vol.2 Disc 2.zip.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/1996 - Anthology Vol.2 Disc 2.zip.md
deleted file mode 100644
index 4f1b14c116730b4e282a8af59b18f2ad4c23e53a..0000000000000000000000000000000000000000
--- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/1996 - Anthology Vol.2 Disc 2.zip.md
+++ /dev/null
@@ -1,23 +0,0 @@
-
-Here is a possible title and article with html formatting for the keyword:
-
-
Review: The Beatles - Anthology 2 (1996)
-
The Beatles are arguably the most influential and popular band in the history of rock music. Their legacy is undeniable and their music is timeless. But what about the songs that never made it to their official albums? The songs that were left unfinished, rejected, or performed live only once? The songs that show a different side of the Fab Four, their creative process, their experimentation, and their evolution?
-
That's where Anthology 2 comes in. Anthology 2 is the second installment of a three-part series of compilation albums that collect rare and unreleased material from the Beatles' career. It covers the period from 1965 to 1968, arguably the most productive and innovative phase of the band's history. It includes alternate takes, demos, live recordings, and studio outtakes from albums such as Rubber Soul, Revolver, Sgt. Pepper's Lonely Hearts Club Band, and Magical Mystery Tour.
Anthology 2 offers a fascinating glimpse into the Beatles' musical journey, from their early rock and roll roots to their psychedelic experimentation. It reveals how some of their classic songs were shaped and refined through multiple versions and revisions. It also showcases some of their lesser-known gems that deserve more attention and appreciation.
-
The album opens with "Real Love", one of the two new songs that reunited the surviving Beatles in 1995. Based on a demo by John Lennon, the song features Paul McCartney, George Harrison, and Ringo Starr adding vocals and instruments to complete it. It's a touching tribute to Lennon's memory and a testament to the enduring bond between the bandmates.
-
The rest of the album is divided into two discs, each containing 25 tracks. The first disc focuses on the years 1965 and 1966, when the Beatles were still touring and making movies. It contains some of their most energetic and catchy songs, such as "I'm Down", "Ticket to Ride", "Help!", and "She's a Woman". It also features some of their more introspective and experimental songs, such as "Yesterday", "Norwegian Wood", "Tomorrow Never Knows", and "Eleanor Rigby". Some of the highlights include:
-
-
"Yes It Is", an early attempt at a three-part harmony ballad that was later replaced by "This Boy".
-
"If You've Got Trouble", a rockabilly number that was intended to be Ringo's vocal spot on Help!, but was scrapped for being too weak.
-
"That Means a Lot", a song that Paul wrote for Help!, but was dissatisfied with. He later gave it to P.J. Proby, who had a minor hit with it.
-
"12-Bar Original", an instrumental jam that was recorded for Rubber Soul, but was deemed too boring and generic.
-
"And Your Bird Can Sing", a sarcastic song that features one of George's most intricate guitar parts.
-
"Taxman", an early version of George's political protest song that has a different solo and lyrics.
-
-
The second disc covers the years 1967 and 1968, when the Beatles stopped touring and focused on studio work. It contains some of their most ambitious and groundbreaking songs, such as "Strawberry Fields Forever", "Penny Lane", "A Day in the Life", "Lucy in the Sky with Diamonds", and "I Am the Walrus". It also features some of their more playful and whimsical songs, such as "Good Morning Good Morning", "Being for the Benefit of Mr. Kite!", "You Know My Name (Look Up the Number)", and "Hello Goodbye". Some of the highlights include:
-
-
"Strawberry Fields Forever (Demo Sequence)", a medley of John's home recordings that show how he developed his cec2833e83
-
-
\ No newline at end of file
diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Activador.Windows.7 Serial Key.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Activador.Windows.7 Serial Key.md
deleted file mode 100644
index dc86d2b72a0848b4aebbbabace823d8351620677..0000000000000000000000000000000000000000
--- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Activador.Windows.7 Serial Key.md
+++ /dev/null
@@ -1,40 +0,0 @@
-
-Here is what I created:
-
-
How to Activate Windows 7 with a Serial Key
-
If you have installed Windows 7 on your computer, you may need to activate it with a serial key to enjoy all its features and benefits. A serial key is a 25-digit code that you can enter during the installation or activation process of Windows 7. It verifies that your copy of Windows 7 is genuine and not pirated.
-
There are different ways to find or get a serial key for Windows 7. You can buy one from an official retailer or online store, or you can use a free one from the internet. However, using a free serial key may not be legal or safe, as it may contain malware or viruses, or it may be already used by someone else. Therefore, it is recommended to use a genuine serial key that you have purchased or obtained legally.
To activate Windows 7 with a serial key, you need to follow these steps:
-
-
Click on the Start button and type "activate" in the search box. Then click on "Activate Windows" from the list of results.
-
Enter your serial key in the box and click on "Next". If you have an internet connection, Windows will automatically verify your serial key and activate your Windows 7. If you don't have an internet connection, you can choose to activate by phone.
-
If you choose to activate by phone, you will see a toll-free number that you can call and follow the instructions. You will need to provide the installation ID that is displayed on your screen and enter the confirmation ID that you will receive from the automated system.
-
Once you have entered the confirmation ID, click on "Next" and your Windows 7 will be activated.
-
-
Congratulations! You have successfully activated Windows 7 with a serial key. You can now enjoy all the features and benefits of Windows 7 on your computer.
-Here is what I created:
-
-
How to Activate Windows 7 with a Serial Key
-
If you have installed Windows 7 on your computer, you may need to activate it with a serial key to enjoy all its features and benefits. A serial key is a 25-digit code that you can enter during the installation or activation process of Windows 7. It verifies that your copy of Windows 7 is genuine and not pirated.
-
There are different ways to find or get a serial key for Windows 7. You can buy one from an official retailer or online store, or you can use a free one from the internet. However, using a free serial key may not be legal or safe, as it may contain malware or viruses, or it may be already used by someone else. Therefore, it is recommended to use a genuine serial key that you have purchased or obtained legally.
-
To activate Windows 7 with a serial key, you need to follow these steps:
-
-
Click on the Start button and type "activate" in the search box. Then click on "Activate Windows" from the list of results.
-
Enter your serial key in the box and click on "Next". If you have an internet connection, Windows will automatically verify your serial key and activate your Windows 7. If you don't have an internet connection, you can choose to activate by phone.
-
If you choose to activate by phone, you will see a toll-free number that you can call and follow the instructions. You will need to provide the installation ID that is displayed on your screen and enter the confirmation ID that you will receive from the automated system.
-
Once you have entered the confirmation ID, click on "Next" and your Windows 7 will be activated.
-
-
Congratulations! You have successfully activated Windows 7 with a serial key. You can now enjoy all the features and benefits of Windows 7 on your computer.
-
Some of the features and benefits of Windows 7 are:
-
-
A user-friendly and customizable interface that allows you to personalize your desktop, taskbar, start menu, and themes.
-
A fast and reliable performance that optimizes your memory, CPU, and disk usage.
-
A secure and stable system that protects your data and privacy with built-in firewall, antivirus, encryption, and backup tools.
-
A rich and diverse multimedia experience that supports various formats of audio, video, and images.
-
A wide range of applications and games that are compatible with Windows 7 and enhance your productivity and entertainment.
-
-
Windows 7 is one of the most popular and widely used operating systems in the world. It has received positive reviews from critics and users alike for its design, functionality, and performance. If you have not activated your Windows 7 yet, do it now with a serial key and enjoy all its features and benefits.
- cec2833e83
-
-
\ No newline at end of file
diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Problem-oriented Medical Diagnosis Friedman Ebook.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Problem-oriented Medical Diagnosis Friedman Ebook.md
deleted file mode 100644
index 5d731f1e7a68797cabcc78dfab05d969e1467567..0000000000000000000000000000000000000000
--- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Problem-oriented Medical Diagnosis Friedman Ebook.md
+++ /dev/null
@@ -1,13 +0,0 @@
-
-
Problem-oriented Medical Diagnosis by Henry Harold Friedman: A Review
-
Problem-oriented Medical Diagnosis is a popular book that provides a step-by-step guide to the diagnostic workup of 75 of the most common clinical problems in internal medicine. The book focuses on the analysis of the presenting signs and symptoms, history, and physical examination and the appropriate use of laboratory and radiologic studies to reach a definitive diagnosis. Each chapter presents detailed information in an easy-to-follow outline format.
The book was first published in 1976 and has been updated several times since then. The latest edition is the seventh one, which was published in 2001 by Lippincott Williams & Wilkins. The book is intended for residents, medical students, and nurse practitioners who need a practical and concise reference for clinical problem-solving. It is also a valuable, time-saving memory aid for practicing physicians who want to refresh their knowledge and skills.
-
The book covers a wide range of topics, such as general problems (fatigue, fever of unknown origin, weight loss, etc.), dermatologic problems (urticaria, angioedema, etc.), cardiovascular problems (hypertension, heart murmurs, etc.), respiratory problems (wheezing, hemoptysis, etc.), gastrointestinal problems (nausea, vomiting, diarrhea, etc.), hematologic problems (anemia, bleeding disorders, etc.), renal and electrolyte problems (blood gas and acid-base abnormalities, etc.), musculoskeletal problems (myalgia, low back pain, etc.), and endocrine and metabolic problems (hypoglycemia, diabetes mellitus, etc.). The book also includes a chapter on sleep disorders and a chapter on acquired immunodeficiency syndrome.
-
The book has received positive reviews from users who praised its clarity, comprehensiveness, and usefulness. Some users also appreciated its spiral binding and pocket size that make it easy to carry and use. However, some users also noted that the book is outdated in some aspects and that it does not cover some important topics such as infectious diseases, neurologic disorders, or psychiatric disorders. Some users also suggested that the book could be improved by adding more illustrations, tables, algorithms, or case studies.
-
-
Overall, Problem-oriented Medical Diagnosis is a classic book that has helped many medical professionals and students in their clinical practice and education. It is a reliable and handy source of information for common medical problems that can be encountered in internal medicine. However, it is not a comprehensive or authoritative textbook that can replace other sources of medical knowledge. It is also not a substitute for clinical judgment or experience that are essential for making accurate diagnoses and providing optimal care.
If you are interested in learning more about Problem-oriented Medical Diagnosis, you can find it online or in your local library. You can also check out other books by Henry Harold Friedman, such as Clinical Methods: The History, Physical, and Laboratory Examinations and Essentials of Clinical Examination Handbook. These books can help you improve your clinical skills and knowledge.
-
Alternatively, you can also explore other sources of medical information, such as journals, websites, podcasts, or online courses. These sources can provide you with the latest updates and evidence-based guidelines on various medical topics. However, you should always be careful and critical when evaluating the quality and reliability of these sources. You should also consult with your mentors, colleagues, or supervisors before applying any new information to your practice.
-
Remember that medical diagnosis is not a simple or straightforward process. It requires a combination of knowledge, skills, experience, and intuition. It also involves communication, collaboration, and ethical decision-making. You should always strive to learn from your patients, your peers, and your own mistakes. You should also seek feedback and guidance from others who can help you grow as a clinician. By doing so, you can provide the best possible care for your patients and yourself.
7196e7f11a
-
-
\ No newline at end of file
diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/tools/deploy/export_model.py b/spaces/nikitaPDL2023/assignment4/detectron2/tools/deploy/export_model.py
deleted file mode 100644
index f507dffe56a4121756874186eacdc9be0cbcdee1..0000000000000000000000000000000000000000
--- a/spaces/nikitaPDL2023/assignment4/detectron2/tools/deploy/export_model.py
+++ /dev/null
@@ -1,240 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) Facebook, Inc. and its affiliates.
-import argparse
-import os
-from typing import Dict, List, Tuple
-import torch
-from torch import Tensor, nn
-
-import detectron2.data.transforms as T
-from detectron2.checkpoint import DetectionCheckpointer
-from detectron2.config import get_cfg
-from detectron2.data import build_detection_test_loader, detection_utils
-from detectron2.evaluation import COCOEvaluator, inference_on_dataset, print_csv_format
-from detectron2.export import (
- STABLE_ONNX_OPSET_VERSION,
- TracingAdapter,
- dump_torchscript_IR,
- scripting_with_instances,
-)
-from detectron2.modeling import GeneralizedRCNN, RetinaNet, build_model
-from detectron2.modeling.postprocessing import detector_postprocess
-from detectron2.projects.point_rend import add_pointrend_config
-from detectron2.structures import Boxes
-from detectron2.utils.env import TORCH_VERSION
-from detectron2.utils.file_io import PathManager
-from detectron2.utils.logger import setup_logger
-
-
-def setup_cfg(args):
- cfg = get_cfg()
- # cuda context is initialized before creating dataloader, so we don't fork anymore
- cfg.DATALOADER.NUM_WORKERS = 0
- add_pointrend_config(cfg)
- cfg.merge_from_file(args.config_file)
- cfg.merge_from_list(args.opts)
- cfg.freeze()
- return cfg
-
-
-def export_caffe2_tracing(cfg, torch_model, inputs):
- from detectron2.export import Caffe2Tracer
-
- tracer = Caffe2Tracer(cfg, torch_model, inputs)
- if args.format == "caffe2":
- caffe2_model = tracer.export_caffe2()
- caffe2_model.save_protobuf(args.output)
- # draw the caffe2 graph
- caffe2_model.save_graph(os.path.join(args.output, "model.svg"), inputs=inputs)
- return caffe2_model
- elif args.format == "onnx":
- import onnx
-
- onnx_model = tracer.export_onnx()
- onnx.save(onnx_model, os.path.join(args.output, "model.onnx"))
- elif args.format == "torchscript":
- ts_model = tracer.export_torchscript()
- with PathManager.open(os.path.join(args.output, "model.ts"), "wb") as f:
- torch.jit.save(ts_model, f)
- dump_torchscript_IR(ts_model, args.output)
-
-
-# experimental. API not yet final
-def export_scripting(torch_model):
- assert TORCH_VERSION >= (1, 8)
- fields = {
- "proposal_boxes": Boxes,
- "objectness_logits": Tensor,
- "pred_boxes": Boxes,
- "scores": Tensor,
- "pred_classes": Tensor,
- "pred_masks": Tensor,
- "pred_keypoints": torch.Tensor,
- "pred_keypoint_heatmaps": torch.Tensor,
- }
- assert args.format == "torchscript", "Scripting only supports torchscript format."
-
- class ScriptableAdapterBase(nn.Module):
- # Use this adapter to workaround https://github.com/pytorch/pytorch/issues/46944
- # by not retuning instances but dicts. Otherwise the exported model is not deployable
- def __init__(self):
- super().__init__()
- self.model = torch_model
- self.eval()
-
- if isinstance(torch_model, GeneralizedRCNN):
-
- class ScriptableAdapter(ScriptableAdapterBase):
- def forward(self, inputs: Tuple[Dict[str, torch.Tensor]]) -> List[Dict[str, Tensor]]:
- instances = self.model.inference(inputs, do_postprocess=False)
- return [i.get_fields() for i in instances]
-
- else:
-
- class ScriptableAdapter(ScriptableAdapterBase):
- def forward(self, inputs: Tuple[Dict[str, torch.Tensor]]) -> List[Dict[str, Tensor]]:
- instances = self.model(inputs)
- return [i.get_fields() for i in instances]
-
- ts_model = scripting_with_instances(ScriptableAdapter(), fields)
- with PathManager.open(os.path.join(args.output, "model.ts"), "wb") as f:
- torch.jit.save(ts_model, f)
- dump_torchscript_IR(ts_model, args.output)
- # TODO inference in Python now missing postprocessing glue code
- return None
-
-
-# experimental. API not yet final
-def export_tracing(torch_model, inputs):
- assert TORCH_VERSION >= (1, 8)
- image = inputs[0]["image"]
- inputs = [{"image": image}] # remove other unused keys
-
- if isinstance(torch_model, GeneralizedRCNN):
-
- def inference(model, inputs):
- # use do_postprocess=False so it returns ROI mask
- inst = model.inference(inputs, do_postprocess=False)[0]
- return [{"instances": inst}]
-
- else:
- inference = None # assume that we just call the model directly
-
- traceable_model = TracingAdapter(torch_model, inputs, inference)
-
- if args.format == "torchscript":
- ts_model = torch.jit.trace(traceable_model, (image,))
- with PathManager.open(os.path.join(args.output, "model.ts"), "wb") as f:
- torch.jit.save(ts_model, f)
- dump_torchscript_IR(ts_model, args.output)
- elif args.format == "onnx":
- with PathManager.open(os.path.join(args.output, "model.onnx"), "wb") as f:
- torch.onnx.export(traceable_model, (image,), f, opset_version=STABLE_ONNX_OPSET_VERSION)
- logger.info("Inputs schema: " + str(traceable_model.inputs_schema))
- logger.info("Outputs schema: " + str(traceable_model.outputs_schema))
-
- if args.format != "torchscript":
- return None
- if not isinstance(torch_model, (GeneralizedRCNN, RetinaNet)):
- return None
-
- def eval_wrapper(inputs):
- """
- The exported model does not contain the final resize step, which is typically
- unused in deployment but needed for evaluation. We add it manually here.
- """
- input = inputs[0]
- instances = traceable_model.outputs_schema(ts_model(input["image"]))[0]["instances"]
- postprocessed = detector_postprocess(instances, input["height"], input["width"])
- return [{"instances": postprocessed}]
-
- return eval_wrapper
-
-
-def get_sample_inputs(args):
-
- if args.sample_image is None:
- # get a first batch from dataset
- data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
- first_batch = next(iter(data_loader))
- return first_batch
- else:
- # get a sample data
- original_image = detection_utils.read_image(args.sample_image, format=cfg.INPUT.FORMAT)
- # Do same preprocessing as DefaultPredictor
- aug = T.ResizeShortestEdge(
- [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
- )
- height, width = original_image.shape[:2]
- image = aug.get_transform(original_image).apply_image(original_image)
- image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
-
- inputs = {"image": image, "height": height, "width": width}
-
- # Sample ready
- sample_inputs = [inputs]
- return sample_inputs
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser(description="Export a model for deployment.")
- parser.add_argument(
- "--format",
- choices=["caffe2", "onnx", "torchscript"],
- help="output format",
- default="torchscript",
- )
- parser.add_argument(
- "--export-method",
- choices=["caffe2_tracing", "tracing", "scripting"],
- help="Method to export models",
- default="tracing",
- )
- parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file")
- parser.add_argument("--sample-image", default=None, type=str, help="sample image for input")
- parser.add_argument("--run-eval", action="store_true")
- parser.add_argument("--output", help="output directory for the converted model")
- parser.add_argument(
- "opts",
- help="Modify config options using the command-line",
- default=None,
- nargs=argparse.REMAINDER,
- )
- args = parser.parse_args()
- logger = setup_logger()
- logger.info("Command line arguments: " + str(args))
- PathManager.mkdirs(args.output)
- # Disable re-specialization on new shapes. Otherwise --run-eval will be slow
- torch._C._jit_set_bailout_depth(1)
-
- cfg = setup_cfg(args)
-
- # create a torch model
- torch_model = build_model(cfg)
- DetectionCheckpointer(torch_model).resume_or_load(cfg.MODEL.WEIGHTS)
- torch_model.eval()
-
- # convert and save model
- if args.export_method == "caffe2_tracing":
- sample_inputs = get_sample_inputs(args)
- exported_model = export_caffe2_tracing(cfg, torch_model, sample_inputs)
- elif args.export_method == "scripting":
- exported_model = export_scripting(torch_model)
- elif args.export_method == "tracing":
- sample_inputs = get_sample_inputs(args)
- exported_model = export_tracing(torch_model, sample_inputs)
-
- # run evaluation with the converted model
- if args.run_eval:
- assert exported_model is not None, (
- "Python inference is not yet implemented for "
- f"export_method={args.export_method}, format={args.format}."
- )
- logger.info("Running evaluation ... this takes a long time if you export to CPU.")
- dataset = cfg.DATASETS.TEST[0]
- data_loader = build_detection_test_loader(cfg, dataset)
- # NOTE: hard-coded evaluator. change to the evaluator for your dataset
- evaluator = COCOEvaluator(dataset, output_dir=args.output)
- metrics = inference_on_dataset(exported_model, data_loader, evaluator)
- print_csv_format(metrics)
- logger.info("Success.")
diff --git a/spaces/nomic-ai/mosaicml_dolly_hhrlhf/README.md b/spaces/nomic-ai/mosaicml_dolly_hhrlhf/README.md
deleted file mode 100644
index f18f14e462332a1f159e9ec75234a6233f84045f..0000000000000000000000000000000000000000
--- a/spaces/nomic-ai/mosaicml_dolly_hhrlhf/README.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-title: mosaicml/dolly_hhrlhf
-emoji: 🗺️
-colorFrom: purple
-colorTo: red
-sdk: static
-pinned: false
----
diff --git a/spaces/nota-ai/compressed-wav2lip/models/wav2lip_noRes.py b/spaces/nota-ai/compressed-wav2lip/models/wav2lip_noRes.py
deleted file mode 100644
index e0c01b7887c08acca83c4f53bc01b1a03fd5ed6c..0000000000000000000000000000000000000000
--- a/spaces/nota-ai/compressed-wav2lip/models/wav2lip_noRes.py
+++ /dev/null
@@ -1,118 +0,0 @@
-import torch
-from torch import nn
-from torch.nn import functional as F
-import math
-
-from .conv import Conv2dTranspose, Conv2d, nonorm_Conv2d
-
-
-class Wav2Lip_noRes(nn.Module):
- def __init__(self, nef=16, naf=32, ndf=32, x_size=96):
- super(Wav2Lip_noRes, self).__init__()
-
- if x_size == 96:
- self.ker_sz_last = 3
- elif x_size == 128:
- self.ker_sz_last = 4
-
- self.face_encoder_blocks = nn.ModuleList([
- nn.Sequential(Conv2d(6, nef, kernel_size=7, stride=1, padding=3)), # 96,96
-
- nn.Sequential(Conv2d(nef, nef * 2, kernel_size=3, stride=2, padding=1),), # 48,48
-
- nn.Sequential(Conv2d(nef * 2, nef * 4, kernel_size=3, stride=2, padding=1),), # 24,24
-
- nn.Sequential(Conv2d(nef * 4, nef * 8, kernel_size=3, stride=2, padding=1),), # 12,12
-
- nn.Sequential(Conv2d(nef * 8, nef * 16, kernel_size=3, stride=2, padding=1),), # 6,6
-
- nn.Sequential(Conv2d(nef * 16, nef * 32, kernel_size=3, stride=2, padding=1),), # 3,3
-
- nn.Sequential(Conv2d(nef * 32, nef * 32, kernel_size=self.ker_sz_last, stride=1, padding=0), # 1, 1
- Conv2d(nef * 32, nef * 32, kernel_size=1, stride=1, padding=0)), ])
-
- self.audio_encoder = nn.Sequential(
- Conv2d(1, naf, kernel_size=3, stride=1, padding=1),
-
- Conv2d(naf, naf * 2, kernel_size=3, stride=(3, 1), padding=1),
-
- Conv2d(naf * 2, naf * 4, kernel_size=3, stride=3, padding=1),
-
- Conv2d(naf * 4, naf * 8, kernel_size=3, stride=(3, 2), padding=1),
-
- Conv2d(naf * 8, naf * 16, kernel_size=3, stride=1, padding=0),
- Conv2d(naf * 16, naf * 16, kernel_size=1, stride=1, padding=0), )
-
- self.face_decoder_blocks = nn.ModuleList([
- nn.Sequential(Conv2d(naf * 16, naf * 16, kernel_size=1, stride=1, padding=0), ),
-
- nn.Sequential(Conv2dTranspose(nef * 32 + naf * 16, ndf * 16, kernel_size=self.ker_sz_last, stride=1, padding=0),),
- # 3,3 # 512+512 = 1024
-
- nn.Sequential(
- Conv2dTranspose(nef * 32 + ndf * 16, ndf * 16, kernel_size=3, stride=2, padding=1, output_padding=1),), # 6, 6
- # 512+512 = 1024
-
- nn.Sequential(
- Conv2dTranspose(nef * 16 + ndf * 16, ndf * 12, kernel_size=3, stride=2, padding=1, output_padding=1),), # 12, 12
- # 256+512 = 768
-
- nn.Sequential(
- Conv2dTranspose(nef * 8 + ndf * 12, ndf * 8, kernel_size=3, stride=2, padding=1, output_padding=1),), # 24, 24
- # 128+384 = 512
-
- nn.Sequential(
- Conv2dTranspose(nef * 4 + ndf * 8, ndf * 4, kernel_size=3, stride=2, padding=1, output_padding=1),), # 48, 48
- # 64+256 = 320
-
- nn.Sequential(
- Conv2dTranspose(nef * 2 + ndf * 4, ndf * 2, kernel_size=3, stride=2, padding=1, output_padding=1),), # 96,96
- # 32+128 = 160
- ])
-
- self.output_block = nn.Sequential(Conv2d(nef + ndf * 2, ndf, kernel_size=3, stride=1, padding=1), # 16+64 = 80
- nn.Conv2d(ndf, 3, kernel_size=1, stride=1, padding=0),
- nn.Sigmoid())
-
- def forward(self, audio_sequences, face_sequences):
- # audio_sequences = (B, T, 1, 80, 16)
- B = audio_sequences.size(0)
-
- input_dim_size = len(face_sequences.size())
- if input_dim_size > 4:
- audio_sequences = torch.cat([audio_sequences[:, i] for i in range(audio_sequences.size(1))], dim=0)
- face_sequences = torch.cat([face_sequences[:, :, i] for i in range(face_sequences.size(2))], dim=0)
-
- audio_embedding = self.audio_encoder(audio_sequences) # B, 512, 1, 1
-
- feats = []
- x = face_sequences
- for f in self.face_encoder_blocks:
- x = f(x)
- feats.append(x)
-
- x = audio_embedding
-
- for f in self.face_decoder_blocks:
-
- x = f(x)
- try:
- x = torch.cat((x, feats[-1]), dim=1)
- except Exception as e:
- print(x.size())
- print(feats[-1].size())
- raise e
-
- feats.pop()
-
- x = self.output_block(x)
-
- if input_dim_size > 4:
- x = torch.split(x, B, dim=0) # [(B, C, H, W)]
- outputs = torch.stack(x, dim=2) # (B, C, T, H, W)
-
- else:
- outputs = x
-
- return outputs
-
diff --git a/spaces/ntt123/Vietnam-male-voice-TTS/models.py b/spaces/ntt123/Vietnam-male-voice-TTS/models.py
deleted file mode 100644
index 54702f7161052c45f947a075b167422463e52b38..0000000000000000000000000000000000000000
--- a/spaces/ntt123/Vietnam-male-voice-TTS/models.py
+++ /dev/null
@@ -1,489 +0,0 @@
-import math
-
-import torch
-from torch import nn
-from torch.nn import Conv1d, Conv2d, ConvTranspose1d
-from torch.nn import functional as F
-from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm
-from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
-
-import attentions
-import commons
-import modules
-from commons import get_padding, init_weights
-from flow import ResidualCouplingBlock
-
-
-class PriorEncoder(nn.Module):
- def __init__(
- self,
- n_vocab,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- ):
- super().__init__()
- self.n_vocab = n_vocab
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
-
- self.emb = nn.Embedding(n_vocab, hidden_channels)
- nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
- self.pre_attn_encoder = attentions.Encoder(
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers // 2,
- kernel_size,
- p_dropout,
- )
- self.post_attn_encoder = attentions.Encoder(
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers - n_layers // 2,
- kernel_size,
- p_dropout,
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, y_lengths, attn):
- x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.pre_attn_encoder(x * x_mask, x_mask)
- y = torch.einsum("bht,blt->bhl", x, attn)
- y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, y.size(2)), 1).to(
- y.dtype
- )
- y = self.post_attn_encoder(y * y_mask, y_mask)
- stats = self.proj(y) * y_mask
-
- m, logs = torch.split(stats, self.out_channels, dim=1)
- return y, m, logs, y_mask
-
-
-class PosteriorEncoder(nn.Module):
- def __init__(
- self,
- in_channels,
- out_channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
-
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
- self.enc = modules.WN(
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=gin_channels,
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, g=None):
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.pre(x) * x_mask
- x = self.enc(x, x_mask, g=g)
- stats = self.proj(x) * x_mask
- m, logs = torch.split(stats, self.out_channels, dim=1)
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
- return z, m, logs, x_mask
-
-
-class Generator(torch.nn.Module):
- def __init__(
- self,
- initial_channel,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=0,
- ):
- super(Generator, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
- self.conv_pre = Conv1d(
- initial_channel, upsample_initial_channel, 7, 1, padding=3
- )
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- self.ups.append(
- weight_norm(
- ConvTranspose1d(
- upsample_initial_channel // (2**i),
- upsample_initial_channel // (2 ** (i + 1)),
- k,
- u,
- padding=(k - u) // 2,
- )
- )
- )
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel // (2 ** (i + 1))
- for j, (k, d) in enumerate(
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
- ):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- def forward(self, x, g=None):
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i * self.num_kernels + j](x)
- else:
- xs += self.resblocks[i * self.num_kernels + j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
-
- return x
-
- def remove_weight_norm(self):
- print("Removing weight norm...")
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-class DiscriminatorP(torch.nn.Module):
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
- super(DiscriminatorP, self).__init__()
- self.period = period
- self.use_spectral_norm = use_spectral_norm
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList(
- [
- norm_f(
- Conv2d(
- 1,
- 32,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 32,
- 128,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 128,
- 512,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 512,
- 1024,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 1024,
- 1024,
- (kernel_size, 1),
- 1,
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- ]
- )
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
-
- def forward(self, x):
- fmap = []
-
- # 1d to 2d
- b, c, t = x.shape
- if t % self.period != 0: # pad first
- n_pad = self.period - (t % self.period)
- x = F.pad(x, (0, n_pad), "reflect")
- t = t + n_pad
- x = x.view(b, c, t // self.period, self.period)
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class DiscriminatorS(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(DiscriminatorS, self).__init__()
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList(
- [
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
- ]
- )
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
-
- def forward(self, x):
- fmap = []
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class MultiPeriodDiscriminator(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminator, self).__init__()
- periods = [2, 3, 5, 7, 11]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
- ]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = []
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-class SynthesizerTrn(nn.Module):
- """
- Synthesizer for Training
- """
-
- def __init__(
- self,
- n_vocab,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- n_speakers=0,
- gin_channels=0,
- **kwargs
- ):
- super().__init__()
- self.n_vocab = n_vocab
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.n_speakers = n_speakers
- self.gin_channels = gin_channels
-
- self.enc_p = PriorEncoder(
- n_vocab,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- )
- self.dec = Generator(
- inter_channels,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=gin_channels,
- )
- self.enc_q = PosteriorEncoder(
- spec_channels,
- inter_channels,
- hidden_channels,
- 5,
- 1,
- 16,
- gin_channels=gin_channels,
- )
- self.flow = ResidualCouplingBlock(
- inter_channels, hidden_channels, 5, 2, 4, gin_channels=gin_channels
- )
-
- if n_speakers > 1:
- self.emb_g = nn.Embedding(n_speakers, gin_channels)
-
- def forward(self, x, x_lengths, attn, y, y_lengths, sid=None):
- x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, y_lengths, attn=attn)
- if self.n_speakers > 0:
- g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
- else:
- g = None
-
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
- z_p = self.flow(z, y_mask, g=g)
-
- z_slice, ids_slice = commons.rand_slice_segments(
- z, y_lengths, self.segment_size
- )
- o = self.dec(z_slice, g=g)
- l_length = None
- return (
- o,
- l_length,
- attn,
- ids_slice,
- x_mask,
- y_mask,
- (z, z_p, m_p, logs_p, m_q, logs_q),
- )
-
- def infer(
- self,
- x,
- x_lengths,
- y_lengths,
- attn,
- sid=None,
- noise_scale=1,
- max_len=None,
- ):
- x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, y_lengths, attn=attn)
- if self.n_speakers > 0:
- g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
- else:
- g = None
-
- y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, attn.shape[1]), 1).to(
- x_mask.dtype
- )
- z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
- z = self.flow(z_p, y_mask, g=g, reverse=True)
- o = self.dec((z * y_mask)[:, :, :max_len], g=g)
- return o, attn, y_mask, (z, z_p, m_p, logs_p)
-
-
-class DurationNet(torch.nn.Module):
- def __init__(self, vocab_size: int, dim: int, num_layers=2):
- super().__init__()
- self.embed = torch.nn.Embedding(vocab_size, embedding_dim=dim)
- self.rnn = torch.nn.GRU(
- dim,
- dim,
- num_layers=num_layers,
- batch_first=True,
- bidirectional=True,
- dropout=0.2,
- )
- self.proj = torch.nn.Linear(2 * dim, 1)
-
- def forward(self, token, lengths):
- x = self.embed(token)
- lengths = lengths.long().cpu()
- x = pack_padded_sequence(
- x, lengths=lengths, batch_first=True, enforce_sorted=False
- )
- x, _ = self.rnn(x)
- x, _ = pad_packed_sequence(x, batch_first=True, total_length=token.shape[1])
- x = self.proj(x)
- x = torch.nn.functional.softplus(x)
- return x
diff --git a/spaces/nupurkmr9/custom-diffusion/README.md b/spaces/nupurkmr9/custom-diffusion/README.md
deleted file mode 100644
index cbfb32a6202a7cb7088bb6c14ab66fb049161ce1..0000000000000000000000000000000000000000
--- a/spaces/nupurkmr9/custom-diffusion/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Custom-Diffusion + SD Training
-emoji: 🏢
-colorFrom: red
-colorTo: purple
-sdk: gradio
-sdk_version: 3.12.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/oguzakif/video-object-remover/SiamMask/tools/__init__.py b/spaces/oguzakif/video-object-remover/SiamMask/tools/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/oliver2023/chatgpt-on-wechat/lib/itchat/components/__init__.py b/spaces/oliver2023/chatgpt-on-wechat/lib/itchat/components/__init__.py
deleted file mode 100644
index 0fc321c81714371663450658651697f3209237cb..0000000000000000000000000000000000000000
--- a/spaces/oliver2023/chatgpt-on-wechat/lib/itchat/components/__init__.py
+++ /dev/null
@@ -1,12 +0,0 @@
-from .contact import load_contact
-from .hotreload import load_hotreload
-from .login import load_login
-from .messages import load_messages
-from .register import load_register
-
-def load_components(core):
- load_contact(core)
- load_hotreload(core)
- load_login(core)
- load_messages(core)
- load_register(core)
diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/models/vq_model.py b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/models/vq_model.py
deleted file mode 100644
index 0c15300af2135a681a1d863ff246a3d67f16a9ac..0000000000000000000000000000000000000000
--- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/models/vq_model.py
+++ /dev/null
@@ -1,168 +0,0 @@
-# Copyright 2023 The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from dataclasses import dataclass
-from typing import Optional, Tuple, Union
-
-import torch
-import torch.nn as nn
-
-from ..configuration_utils import ConfigMixin, register_to_config
-from ..utils import BaseOutput
-from ..utils.accelerate_utils import apply_forward_hook
-from .modeling_utils import ModelMixin
-from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
-
-
-@dataclass
-class VQEncoderOutput(BaseOutput):
- """
- Output of VQModel encoding method.
-
- Args:
- latents (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
- The encoded output sample from the last layer of the model.
- """
-
- latents: torch.FloatTensor
-
-
-class VQModel(ModelMixin, ConfigMixin):
- r"""
- A VQ-VAE model for decoding latent representations.
-
- This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
- for all models (such as downloading or saving).
-
- Parameters:
- in_channels (int, *optional*, defaults to 3): Number of channels in the input image.
- out_channels (int, *optional*, defaults to 3): Number of channels in the output.
- down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`):
- Tuple of downsample block types.
- up_block_types (`Tuple[str]`, *optional*, defaults to `("UpDecoderBlock2D",)`):
- Tuple of upsample block types.
- block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`):
- Tuple of block output channels.
- act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
- latent_channels (`int`, *optional*, defaults to `3`): Number of channels in the latent space.
- sample_size (`int`, *optional*, defaults to `32`): Sample input size.
- num_vq_embeddings (`int`, *optional*, defaults to `256`): Number of codebook vectors in the VQ-VAE.
- vq_embed_dim (`int`, *optional*): Hidden dim of codebook vectors in the VQ-VAE.
- scaling_factor (`float`, *optional*, defaults to `0.18215`):
- The component-wise standard deviation of the trained latent space computed using the first batch of the
- training set. This is used to scale the latent space to have unit variance when training the diffusion
- model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
- diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
- / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
- Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper.
- """
-
- @register_to_config
- def __init__(
- self,
- in_channels: int = 3,
- out_channels: int = 3,
- down_block_types: Tuple[str] = ("DownEncoderBlock2D",),
- up_block_types: Tuple[str] = ("UpDecoderBlock2D",),
- block_out_channels: Tuple[int] = (64,),
- layers_per_block: int = 1,
- act_fn: str = "silu",
- latent_channels: int = 3,
- sample_size: int = 32,
- num_vq_embeddings: int = 256,
- norm_num_groups: int = 32,
- vq_embed_dim: Optional[int] = None,
- scaling_factor: float = 0.18215,
- norm_type: str = "group", # group, spatial
- ):
- super().__init__()
-
- # pass init params to Encoder
- self.encoder = Encoder(
- in_channels=in_channels,
- out_channels=latent_channels,
- down_block_types=down_block_types,
- block_out_channels=block_out_channels,
- layers_per_block=layers_per_block,
- act_fn=act_fn,
- norm_num_groups=norm_num_groups,
- double_z=False,
- )
-
- vq_embed_dim = vq_embed_dim if vq_embed_dim is not None else latent_channels
-
- self.quant_conv = nn.Conv2d(latent_channels, vq_embed_dim, 1)
- self.quantize = VectorQuantizer(num_vq_embeddings, vq_embed_dim, beta=0.25, remap=None, sane_index_shape=False)
- self.post_quant_conv = nn.Conv2d(vq_embed_dim, latent_channels, 1)
-
- # pass init params to Decoder
- self.decoder = Decoder(
- in_channels=latent_channels,
- out_channels=out_channels,
- up_block_types=up_block_types,
- block_out_channels=block_out_channels,
- layers_per_block=layers_per_block,
- act_fn=act_fn,
- norm_num_groups=norm_num_groups,
- norm_type=norm_type,
- )
-
- @apply_forward_hook
- def encode(self, x: torch.FloatTensor, return_dict: bool = True) -> VQEncoderOutput:
- h = self.encoder(x)
- h = self.quant_conv(h)
-
- if not return_dict:
- return (h,)
-
- return VQEncoderOutput(latents=h)
-
- @apply_forward_hook
- def decode(
- self, h: torch.FloatTensor, force_not_quantize: bool = False, return_dict: bool = True
- ) -> Union[DecoderOutput, torch.FloatTensor]:
- # also go through quantization layer
- if not force_not_quantize:
- quant, _, _ = self.quantize(h)
- else:
- quant = h
- quant2 = self.post_quant_conv(quant)
- dec = self.decoder(quant2, quant if self.config.norm_type == "spatial" else None)
-
- if not return_dict:
- return (dec,)
-
- return DecoderOutput(sample=dec)
-
- def forward(self, sample: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, torch.FloatTensor]:
- r"""
- The [`VQModel`] forward method.
-
- Args:
- sample (`torch.FloatTensor`): Input sample.
- return_dict (`bool`, *optional*, defaults to `True`):
- Whether or not to return a [`models.vq_model.VQEncoderOutput`] instead of a plain tuple.
-
- Returns:
- [`~models.vq_model.VQEncoderOutput`] or `tuple`:
- If return_dict is True, a [`~models.vq_model.VQEncoderOutput`] is returned, otherwise a plain `tuple`
- is returned.
- """
- x = sample
- h = self.encode(x).latents
- dec = self.decode(h).sample
-
- if not return_dict:
- return (dec,)
-
- return DecoderOutput(sample=dec)
diff --git a/spaces/parkyzh/bingo/src/components/chat-history.tsx b/spaces/parkyzh/bingo/src/components/chat-history.tsx
deleted file mode 100644
index feb81de66562edda8f40d3c0cc717202c92b6509..0000000000000000000000000000000000000000
--- a/spaces/parkyzh/bingo/src/components/chat-history.tsx
+++ /dev/null
@@ -1,48 +0,0 @@
-import { IconEdit, IconTrash, IconMore, IconDownload } from "./ui/icons"
-
-export function ChatHistory() {
- return (
-
-
- 历史记录
-
-
-
-
-
-
-
-
-
-
无标题的聊天
-
-
上午1:42
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- )
-}
diff --git a/spaces/paulbricman/velma/src/baselines.py b/spaces/paulbricman/velma/src/baselines.py
deleted file mode 100644
index 2709037e4abeaef2ea6883bd0ba71bbc0dc4e984..0000000000000000000000000000000000000000
--- a/spaces/paulbricman/velma/src/baselines.py
+++ /dev/null
@@ -1,61 +0,0 @@
-from sentence_transformers import CrossEncoder, SentenceTransformer
-from sentence_transformers.util import semantic_search
-from src.util import softmax
-
-
-def infer_nli(context, statements, nli=None, mode='relative'):
- '''
- Baseline inference by abduction using vanilla deductive NLI.
-
- Args:
- context (str): Background context treated as premise in abduction.
- statements (list): List of statements to rank by likelihood of being entailed.
- nli (sentence_transformers.CrossEncoder): CrossEncoder to use.
- mode (str): "relative" yields entailment probs across statement pair,
- while "absolute" yields entailment and contradiction probs for original statement.
-
- Returns:
- (list): List of probs.
- '''
- assert mode in [
- 'relative', 'absolute'], 'Invalid mode, should be "relative" or "absolute".'
-
- if not nli:
- nli = CrossEncoder('cross-encoder/nli-deberta-v3-base')
-
- if mode == 'relative':
- pairs = [(context, e) for e in statements]
- scores = nli.predict(pairs)
- scores = softmax([e[1] for e in scores], 0.1)
- elif mode == 'absolute':
- pair = (context, statements[0])
- scores = nli.predict(pair)
- scores = softmax([scores[1], scores[0]], 0.5)
-
- return scores
-
-
-def infer_embs(context, statements, encoder=None):
- '''
- Baseline inference by abduction using semantic similarity between context and statements.
-
- Args:
- context (str): Background context treated as premise in abduction.
- statements (list): List of statements to rank by semantic similarity.
- encoder (sentence_transformers.SentenceTransformer): SentenceTransformer to use.
-
- Returns:
- (list): List of semantic similarity probabilities for each statement.
- '''
- if not encoder:
- encoder = SentenceTransformer('all-MiniLM-L6-v2')
-
- context_emb = encoder.encode(context)
- statement_embs = encoder.encode(statements)
-
- results = semantic_search(context_emb, statement_embs)[0]
- results = sorted(results, key=lambda x: x['corpus_id'])
- results = [e['score'] for e in results]
- results = softmax(results, 0.2)
-
- return results
diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/cli/spinners.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/cli/spinners.py
deleted file mode 100644
index cf2b976f377c2656afb3d84add8d30b0fc280c03..0000000000000000000000000000000000000000
--- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/cli/spinners.py
+++ /dev/null
@@ -1,159 +0,0 @@
-import contextlib
-import itertools
-import logging
-import sys
-import time
-from typing import IO, Generator, Optional
-
-from pip._internal.utils.compat import WINDOWS
-from pip._internal.utils.logging import get_indentation
-
-logger = logging.getLogger(__name__)
-
-
-class SpinnerInterface:
- def spin(self) -> None:
- raise NotImplementedError()
-
- def finish(self, final_status: str) -> None:
- raise NotImplementedError()
-
-
-class InteractiveSpinner(SpinnerInterface):
- def __init__(
- self,
- message: str,
- file: Optional[IO[str]] = None,
- spin_chars: str = "-\\|/",
- # Empirically, 8 updates/second looks nice
- min_update_interval_seconds: float = 0.125,
- ):
- self._message = message
- if file is None:
- file = sys.stdout
- self._file = file
- self._rate_limiter = RateLimiter(min_update_interval_seconds)
- self._finished = False
-
- self._spin_cycle = itertools.cycle(spin_chars)
-
- self._file.write(" " * get_indentation() + self._message + " ... ")
- self._width = 0
-
- def _write(self, status: str) -> None:
- assert not self._finished
- # Erase what we wrote before by backspacing to the beginning, writing
- # spaces to overwrite the old text, and then backspacing again
- backup = "\b" * self._width
- self._file.write(backup + " " * self._width + backup)
- # Now we have a blank slate to add our status
- self._file.write(status)
- self._width = len(status)
- self._file.flush()
- self._rate_limiter.reset()
-
- def spin(self) -> None:
- if self._finished:
- return
- if not self._rate_limiter.ready():
- return
- self._write(next(self._spin_cycle))
-
- def finish(self, final_status: str) -> None:
- if self._finished:
- return
- self._write(final_status)
- self._file.write("\n")
- self._file.flush()
- self._finished = True
-
-
-# Used for dumb terminals, non-interactive installs (no tty), etc.
-# We still print updates occasionally (once every 60 seconds by default) to
-# act as a keep-alive for systems like Travis-CI that take lack-of-output as
-# an indication that a task has frozen.
-class NonInteractiveSpinner(SpinnerInterface):
- def __init__(self, message: str, min_update_interval_seconds: float = 60.0) -> None:
- self._message = message
- self._finished = False
- self._rate_limiter = RateLimiter(min_update_interval_seconds)
- self._update("started")
-
- def _update(self, status: str) -> None:
- assert not self._finished
- self._rate_limiter.reset()
- logger.info("%s: %s", self._message, status)
-
- def spin(self) -> None:
- if self._finished:
- return
- if not self._rate_limiter.ready():
- return
- self._update("still running...")
-
- def finish(self, final_status: str) -> None:
- if self._finished:
- return
- self._update(f"finished with status '{final_status}'")
- self._finished = True
-
-
-class RateLimiter:
- def __init__(self, min_update_interval_seconds: float) -> None:
- self._min_update_interval_seconds = min_update_interval_seconds
- self._last_update: float = 0
-
- def ready(self) -> bool:
- now = time.time()
- delta = now - self._last_update
- return delta >= self._min_update_interval_seconds
-
- def reset(self) -> None:
- self._last_update = time.time()
-
-
-@contextlib.contextmanager
-def open_spinner(message: str) -> Generator[SpinnerInterface, None, None]:
- # Interactive spinner goes directly to sys.stdout rather than being routed
- # through the logging system, but it acts like it has level INFO,
- # i.e. it's only displayed if we're at level INFO or better.
- # Non-interactive spinner goes through the logging system, so it is always
- # in sync with logging configuration.
- if sys.stdout.isatty() and logger.getEffectiveLevel() <= logging.INFO:
- spinner: SpinnerInterface = InteractiveSpinner(message)
- else:
- spinner = NonInteractiveSpinner(message)
- try:
- with hidden_cursor(sys.stdout):
- yield spinner
- except KeyboardInterrupt:
- spinner.finish("canceled")
- raise
- except Exception:
- spinner.finish("error")
- raise
- else:
- spinner.finish("done")
-
-
-HIDE_CURSOR = "\x1b[?25l"
-SHOW_CURSOR = "\x1b[?25h"
-
-
-@contextlib.contextmanager
-def hidden_cursor(file: IO[str]) -> Generator[None, None, None]:
- # The Windows terminal does not support the hide/show cursor ANSI codes,
- # even via colorama. So don't even try.
- if WINDOWS:
- yield
- # We don't want to clutter the output with control characters if we're
- # writing to a file, or if the user is running with --quiet.
- # See https://github.com/pypa/pip/issues/3418
- elif not file.isatty() or logger.getEffectiveLevel() > logging.INFO:
- yield
- else:
- file.write(HIDE_CURSOR)
- try:
- yield
- finally:
- file.write(SHOW_CURSOR)
diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/distlib/index.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/distlib/index.py
deleted file mode 100644
index 9b6d129ed690361770738bec73f44ba7e10a21c5..0000000000000000000000000000000000000000
--- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/distlib/index.py
+++ /dev/null
@@ -1,508 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2013 Vinay Sajip.
-# Licensed to the Python Software Foundation under a contributor agreement.
-# See LICENSE.txt and CONTRIBUTORS.txt.
-#
-import hashlib
-import logging
-import os
-import shutil
-import subprocess
-import tempfile
-try:
- from threading import Thread
-except ImportError: # pragma: no cover
- from dummy_threading import Thread
-
-from . import DistlibException
-from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr,
- urlparse, build_opener, string_types)
-from .util import zip_dir, ServerProxy
-
-logger = logging.getLogger(__name__)
-
-DEFAULT_INDEX = 'https://pypi.org/pypi'
-DEFAULT_REALM = 'pypi'
-
-class PackageIndex(object):
- """
- This class represents a package index compatible with PyPI, the Python
- Package Index.
- """
-
- boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$'
-
- def __init__(self, url=None):
- """
- Initialise an instance.
-
- :param url: The URL of the index. If not specified, the URL for PyPI is
- used.
- """
- self.url = url or DEFAULT_INDEX
- self.read_configuration()
- scheme, netloc, path, params, query, frag = urlparse(self.url)
- if params or query or frag or scheme not in ('http', 'https'):
- raise DistlibException('invalid repository: %s' % self.url)
- self.password_handler = None
- self.ssl_verifier = None
- self.gpg = None
- self.gpg_home = None
- with open(os.devnull, 'w') as sink:
- # Use gpg by default rather than gpg2, as gpg2 insists on
- # prompting for passwords
- for s in ('gpg', 'gpg2'):
- try:
- rc = subprocess.check_call([s, '--version'], stdout=sink,
- stderr=sink)
- if rc == 0:
- self.gpg = s
- break
- except OSError:
- pass
-
- def _get_pypirc_command(self):
- """
- Get the distutils command for interacting with PyPI configurations.
- :return: the command.
- """
- from .util import _get_pypirc_command as cmd
- return cmd()
-
- def read_configuration(self):
- """
- Read the PyPI access configuration as supported by distutils. This populates
- ``username``, ``password``, ``realm`` and ``url`` attributes from the
- configuration.
- """
- from .util import _load_pypirc
- cfg = _load_pypirc(self)
- self.username = cfg.get('username')
- self.password = cfg.get('password')
- self.realm = cfg.get('realm', 'pypi')
- self.url = cfg.get('repository', self.url)
-
- def save_configuration(self):
- """
- Save the PyPI access configuration. You must have set ``username`` and
- ``password`` attributes before calling this method.
- """
- self.check_credentials()
- from .util import _store_pypirc
- _store_pypirc(self)
-
- def check_credentials(self):
- """
- Check that ``username`` and ``password`` have been set, and raise an
- exception if not.
- """
- if self.username is None or self.password is None:
- raise DistlibException('username and password must be set')
- pm = HTTPPasswordMgr()
- _, netloc, _, _, _, _ = urlparse(self.url)
- pm.add_password(self.realm, netloc, self.username, self.password)
- self.password_handler = HTTPBasicAuthHandler(pm)
-
- def register(self, metadata): # pragma: no cover
- """
- Register a distribution on PyPI, using the provided metadata.
-
- :param metadata: A :class:`Metadata` instance defining at least a name
- and version number for the distribution to be
- registered.
- :return: The HTTP response received from PyPI upon submission of the
- request.
- """
- self.check_credentials()
- metadata.validate()
- d = metadata.todict()
- d[':action'] = 'verify'
- request = self.encode_request(d.items(), [])
- response = self.send_request(request)
- d[':action'] = 'submit'
- request = self.encode_request(d.items(), [])
- return self.send_request(request)
-
- def _reader(self, name, stream, outbuf):
- """
- Thread runner for reading lines of from a subprocess into a buffer.
-
- :param name: The logical name of the stream (used for logging only).
- :param stream: The stream to read from. This will typically a pipe
- connected to the output stream of a subprocess.
- :param outbuf: The list to append the read lines to.
- """
- while True:
- s = stream.readline()
- if not s:
- break
- s = s.decode('utf-8').rstrip()
- outbuf.append(s)
- logger.debug('%s: %s' % (name, s))
- stream.close()
-
- def get_sign_command(self, filename, signer, sign_password, keystore=None): # pragma: no cover
- """
- Return a suitable command for signing a file.
-
- :param filename: The pathname to the file to be signed.
- :param signer: The identifier of the signer of the file.
- :param sign_password: The passphrase for the signer's
- private key used for signing.
- :param keystore: The path to a directory which contains the keys
- used in verification. If not specified, the
- instance's ``gpg_home`` attribute is used instead.
- :return: The signing command as a list suitable to be
- passed to :class:`subprocess.Popen`.
- """
- cmd = [self.gpg, '--status-fd', '2', '--no-tty']
- if keystore is None:
- keystore = self.gpg_home
- if keystore:
- cmd.extend(['--homedir', keystore])
- if sign_password is not None:
- cmd.extend(['--batch', '--passphrase-fd', '0'])
- td = tempfile.mkdtemp()
- sf = os.path.join(td, os.path.basename(filename) + '.asc')
- cmd.extend(['--detach-sign', '--armor', '--local-user',
- signer, '--output', sf, filename])
- logger.debug('invoking: %s', ' '.join(cmd))
- return cmd, sf
-
- def run_command(self, cmd, input_data=None):
- """
- Run a command in a child process , passing it any input data specified.
-
- :param cmd: The command to run.
- :param input_data: If specified, this must be a byte string containing
- data to be sent to the child process.
- :return: A tuple consisting of the subprocess' exit code, a list of
- lines read from the subprocess' ``stdout``, and a list of
- lines read from the subprocess' ``stderr``.
- """
- kwargs = {
- 'stdout': subprocess.PIPE,
- 'stderr': subprocess.PIPE,
- }
- if input_data is not None:
- kwargs['stdin'] = subprocess.PIPE
- stdout = []
- stderr = []
- p = subprocess.Popen(cmd, **kwargs)
- # We don't use communicate() here because we may need to
- # get clever with interacting with the command
- t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout))
- t1.start()
- t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr))
- t2.start()
- if input_data is not None:
- p.stdin.write(input_data)
- p.stdin.close()
-
- p.wait()
- t1.join()
- t2.join()
- return p.returncode, stdout, stderr
-
- def sign_file(self, filename, signer, sign_password, keystore=None): # pragma: no cover
- """
- Sign a file.
-
- :param filename: The pathname to the file to be signed.
- :param signer: The identifier of the signer of the file.
- :param sign_password: The passphrase for the signer's
- private key used for signing.
- :param keystore: The path to a directory which contains the keys
- used in signing. If not specified, the instance's
- ``gpg_home`` attribute is used instead.
- :return: The absolute pathname of the file where the signature is
- stored.
- """
- cmd, sig_file = self.get_sign_command(filename, signer, sign_password,
- keystore)
- rc, stdout, stderr = self.run_command(cmd,
- sign_password.encode('utf-8'))
- if rc != 0:
- raise DistlibException('sign command failed with error '
- 'code %s' % rc)
- return sig_file
-
- def upload_file(self, metadata, filename, signer=None, sign_password=None,
- filetype='sdist', pyversion='source', keystore=None):
- """
- Upload a release file to the index.
-
- :param metadata: A :class:`Metadata` instance defining at least a name
- and version number for the file to be uploaded.
- :param filename: The pathname of the file to be uploaded.
- :param signer: The identifier of the signer of the file.
- :param sign_password: The passphrase for the signer's
- private key used for signing.
- :param filetype: The type of the file being uploaded. This is the
- distutils command which produced that file, e.g.
- ``sdist`` or ``bdist_wheel``.
- :param pyversion: The version of Python which the release relates
- to. For code compatible with any Python, this would
- be ``source``, otherwise it would be e.g. ``3.2``.
- :param keystore: The path to a directory which contains the keys
- used in signing. If not specified, the instance's
- ``gpg_home`` attribute is used instead.
- :return: The HTTP response received from PyPI upon submission of the
- request.
- """
- self.check_credentials()
- if not os.path.exists(filename):
- raise DistlibException('not found: %s' % filename)
- metadata.validate()
- d = metadata.todict()
- sig_file = None
- if signer:
- if not self.gpg:
- logger.warning('no signing program available - not signed')
- else:
- sig_file = self.sign_file(filename, signer, sign_password,
- keystore)
- with open(filename, 'rb') as f:
- file_data = f.read()
- md5_digest = hashlib.md5(file_data).hexdigest()
- sha256_digest = hashlib.sha256(file_data).hexdigest()
- d.update({
- ':action': 'file_upload',
- 'protocol_version': '1',
- 'filetype': filetype,
- 'pyversion': pyversion,
- 'md5_digest': md5_digest,
- 'sha256_digest': sha256_digest,
- })
- files = [('content', os.path.basename(filename), file_data)]
- if sig_file:
- with open(sig_file, 'rb') as f:
- sig_data = f.read()
- files.append(('gpg_signature', os.path.basename(sig_file),
- sig_data))
- shutil.rmtree(os.path.dirname(sig_file))
- request = self.encode_request(d.items(), files)
- return self.send_request(request)
-
- def upload_documentation(self, metadata, doc_dir): # pragma: no cover
- """
- Upload documentation to the index.
-
- :param metadata: A :class:`Metadata` instance defining at least a name
- and version number for the documentation to be
- uploaded.
- :param doc_dir: The pathname of the directory which contains the
- documentation. This should be the directory that
- contains the ``index.html`` for the documentation.
- :return: The HTTP response received from PyPI upon submission of the
- request.
- """
- self.check_credentials()
- if not os.path.isdir(doc_dir):
- raise DistlibException('not a directory: %r' % doc_dir)
- fn = os.path.join(doc_dir, 'index.html')
- if not os.path.exists(fn):
- raise DistlibException('not found: %r' % fn)
- metadata.validate()
- name, version = metadata.name, metadata.version
- zip_data = zip_dir(doc_dir).getvalue()
- fields = [(':action', 'doc_upload'),
- ('name', name), ('version', version)]
- files = [('content', name, zip_data)]
- request = self.encode_request(fields, files)
- return self.send_request(request)
-
- def get_verify_command(self, signature_filename, data_filename,
- keystore=None):
- """
- Return a suitable command for verifying a file.
-
- :param signature_filename: The pathname to the file containing the
- signature.
- :param data_filename: The pathname to the file containing the
- signed data.
- :param keystore: The path to a directory which contains the keys
- used in verification. If not specified, the
- instance's ``gpg_home`` attribute is used instead.
- :return: The verifying command as a list suitable to be
- passed to :class:`subprocess.Popen`.
- """
- cmd = [self.gpg, '--status-fd', '2', '--no-tty']
- if keystore is None:
- keystore = self.gpg_home
- if keystore:
- cmd.extend(['--homedir', keystore])
- cmd.extend(['--verify', signature_filename, data_filename])
- logger.debug('invoking: %s', ' '.join(cmd))
- return cmd
-
- def verify_signature(self, signature_filename, data_filename,
- keystore=None):
- """
- Verify a signature for a file.
-
- :param signature_filename: The pathname to the file containing the
- signature.
- :param data_filename: The pathname to the file containing the
- signed data.
- :param keystore: The path to a directory which contains the keys
- used in verification. If not specified, the
- instance's ``gpg_home`` attribute is used instead.
- :return: True if the signature was verified, else False.
- """
- if not self.gpg:
- raise DistlibException('verification unavailable because gpg '
- 'unavailable')
- cmd = self.get_verify_command(signature_filename, data_filename,
- keystore)
- rc, stdout, stderr = self.run_command(cmd)
- if rc not in (0, 1):
- raise DistlibException('verify command failed with error '
- 'code %s' % rc)
- return rc == 0
-
- def download_file(self, url, destfile, digest=None, reporthook=None):
- """
- This is a convenience method for downloading a file from an URL.
- Normally, this will be a file from the index, though currently
- no check is made for this (i.e. a file can be downloaded from
- anywhere).
-
- The method is just like the :func:`urlretrieve` function in the
- standard library, except that it allows digest computation to be
- done during download and checking that the downloaded data
- matched any expected value.
-
- :param url: The URL of the file to be downloaded (assumed to be
- available via an HTTP GET request).
- :param destfile: The pathname where the downloaded file is to be
- saved.
- :param digest: If specified, this must be a (hasher, value)
- tuple, where hasher is the algorithm used (e.g.
- ``'md5'``) and ``value`` is the expected value.
- :param reporthook: The same as for :func:`urlretrieve` in the
- standard library.
- """
- if digest is None:
- digester = None
- logger.debug('No digest specified')
- else:
- if isinstance(digest, (list, tuple)):
- hasher, digest = digest
- else:
- hasher = 'md5'
- digester = getattr(hashlib, hasher)()
- logger.debug('Digest specified: %s' % digest)
- # The following code is equivalent to urlretrieve.
- # We need to do it this way so that we can compute the
- # digest of the file as we go.
- with open(destfile, 'wb') as dfp:
- # addinfourl is not a context manager on 2.x
- # so we have to use try/finally
- sfp = self.send_request(Request(url))
- try:
- headers = sfp.info()
- blocksize = 8192
- size = -1
- read = 0
- blocknum = 0
- if "content-length" in headers:
- size = int(headers["Content-Length"])
- if reporthook:
- reporthook(blocknum, blocksize, size)
- while True:
- block = sfp.read(blocksize)
- if not block:
- break
- read += len(block)
- dfp.write(block)
- if digester:
- digester.update(block)
- blocknum += 1
- if reporthook:
- reporthook(blocknum, blocksize, size)
- finally:
- sfp.close()
-
- # check that we got the whole file, if we can
- if size >= 0 and read < size:
- raise DistlibException(
- 'retrieval incomplete: got only %d out of %d bytes'
- % (read, size))
- # if we have a digest, it must match.
- if digester:
- actual = digester.hexdigest()
- if digest != actual:
- raise DistlibException('%s digest mismatch for %s: expected '
- '%s, got %s' % (hasher, destfile,
- digest, actual))
- logger.debug('Digest verified: %s', digest)
-
- def send_request(self, req):
- """
- Send a standard library :class:`Request` to PyPI and return its
- response.
-
- :param req: The request to send.
- :return: The HTTP response from PyPI (a standard library HTTPResponse).
- """
- handlers = []
- if self.password_handler:
- handlers.append(self.password_handler)
- if self.ssl_verifier:
- handlers.append(self.ssl_verifier)
- opener = build_opener(*handlers)
- return opener.open(req)
-
- def encode_request(self, fields, files):
- """
- Encode fields and files for posting to an HTTP server.
-
- :param fields: The fields to send as a list of (fieldname, value)
- tuples.
- :param files: The files to send as a list of (fieldname, filename,
- file_bytes) tuple.
- """
- # Adapted from packaging, which in turn was adapted from
- # http://code.activestate.com/recipes/146306
-
- parts = []
- boundary = self.boundary
- for k, values in fields:
- if not isinstance(values, (list, tuple)):
- values = [values]
-
- for v in values:
- parts.extend((
- b'--' + boundary,
- ('Content-Disposition: form-data; name="%s"' %
- k).encode('utf-8'),
- b'',
- v.encode('utf-8')))
- for key, filename, value in files:
- parts.extend((
- b'--' + boundary,
- ('Content-Disposition: form-data; name="%s"; filename="%s"' %
- (key, filename)).encode('utf-8'),
- b'',
- value))
-
- parts.extend((b'--' + boundary + b'--', b''))
-
- body = b'\r\n'.join(parts)
- ct = b'multipart/form-data; boundary=' + boundary
- headers = {
- 'Content-type': ct,
- 'Content-length': str(len(body))
- }
- return Request(self.url, body, headers)
-
- def search(self, terms, operator=None): # pragma: no cover
- if isinstance(terms, string_types):
- terms = {'name': terms}
- rpc_proxy = ServerProxy(self.url, timeout=3.0)
- try:
- return rpc_proxy.search(terms, operator or 'and')
- finally:
- rpc_proxy('close')()
diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/__main__.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/__main__.py
deleted file mode 100644
index 270629fd8067bfc20ed4a0b39d9897791ffa93ab..0000000000000000000000000000000000000000
--- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/__main__.py
+++ /dev/null
@@ -1,274 +0,0 @@
-import colorsys
-import io
-from time import process_time
-
-from pip._vendor.rich import box
-from pip._vendor.rich.color import Color
-from pip._vendor.rich.console import Console, ConsoleOptions, Group, RenderableType, RenderResult
-from pip._vendor.rich.markdown import Markdown
-from pip._vendor.rich.measure import Measurement
-from pip._vendor.rich.pretty import Pretty
-from pip._vendor.rich.segment import Segment
-from pip._vendor.rich.style import Style
-from pip._vendor.rich.syntax import Syntax
-from pip._vendor.rich.table import Table
-from pip._vendor.rich.text import Text
-
-
-class ColorBox:
- def __rich_console__(
- self, console: Console, options: ConsoleOptions
- ) -> RenderResult:
- for y in range(0, 5):
- for x in range(options.max_width):
- h = x / options.max_width
- l = 0.1 + ((y / 5) * 0.7)
- r1, g1, b1 = colorsys.hls_to_rgb(h, l, 1.0)
- r2, g2, b2 = colorsys.hls_to_rgb(h, l + 0.7 / 10, 1.0)
- bgcolor = Color.from_rgb(r1 * 255, g1 * 255, b1 * 255)
- color = Color.from_rgb(r2 * 255, g2 * 255, b2 * 255)
- yield Segment("▄", Style(color=color, bgcolor=bgcolor))
- yield Segment.line()
-
- def __rich_measure__(
- self, console: "Console", options: ConsoleOptions
- ) -> Measurement:
- return Measurement(1, options.max_width)
-
-
-def make_test_card() -> Table:
- """Get a renderable that demonstrates a number of features."""
- table = Table.grid(padding=1, pad_edge=True)
- table.title = "Rich features"
- table.add_column("Feature", no_wrap=True, justify="center", style="bold red")
- table.add_column("Demonstration")
-
- color_table = Table(
- box=None,
- expand=False,
- show_header=False,
- show_edge=False,
- pad_edge=False,
- )
- color_table.add_row(
- (
- "✓ [bold green]4-bit color[/]\n"
- "✓ [bold blue]8-bit color[/]\n"
- "✓ [bold magenta]Truecolor (16.7 million)[/]\n"
- "✓ [bold yellow]Dumb terminals[/]\n"
- "✓ [bold cyan]Automatic color conversion"
- ),
- ColorBox(),
- )
-
- table.add_row("Colors", color_table)
-
- table.add_row(
- "Styles",
- "All ansi styles: [bold]bold[/], [dim]dim[/], [italic]italic[/italic], [underline]underline[/], [strike]strikethrough[/], [reverse]reverse[/], and even [blink]blink[/].",
- )
-
- lorem = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque in metus sed sapien ultricies pretium a at justo. Maecenas luctus velit et auctor maximus."
- lorem_table = Table.grid(padding=1, collapse_padding=True)
- lorem_table.pad_edge = False
- lorem_table.add_row(
- Text(lorem, justify="left", style="green"),
- Text(lorem, justify="center", style="yellow"),
- Text(lorem, justify="right", style="blue"),
- Text(lorem, justify="full", style="red"),
- )
- table.add_row(
- "Text",
- Group(
- Text.from_markup(
- """Word wrap text. Justify [green]left[/], [yellow]center[/], [blue]right[/] or [red]full[/].\n"""
- ),
- lorem_table,
- ),
- )
-
- def comparison(renderable1: RenderableType, renderable2: RenderableType) -> Table:
- table = Table(show_header=False, pad_edge=False, box=None, expand=True)
- table.add_column("1", ratio=1)
- table.add_column("2", ratio=1)
- table.add_row(renderable1, renderable2)
- return table
-
- table.add_row(
- "Asian\nlanguage\nsupport",
- ":flag_for_china: 该库支持中文,日文和韩文文本!\n:flag_for_japan: ライブラリは中国語、日本語、韓国語のテキストをサポートしています\n:flag_for_south_korea: 이 라이브러리는 중국어, 일본어 및 한국어 텍스트를 지원합니다",
- )
-
- markup_example = (
- "[bold magenta]Rich[/] supports a simple [i]bbcode[/i]-like [b]markup[/b] for [yellow]color[/], [underline]style[/], and emoji! "
- ":+1: :apple: :ant: :bear: :baguette_bread: :bus: "
- )
- table.add_row("Markup", markup_example)
-
- example_table = Table(
- show_edge=False,
- show_header=True,
- expand=False,
- row_styles=["none", "dim"],
- box=box.SIMPLE,
- )
- example_table.add_column("[green]Date", style="green", no_wrap=True)
- example_table.add_column("[blue]Title", style="blue")
- example_table.add_column(
- "[cyan]Production Budget",
- style="cyan",
- justify="right",
- no_wrap=True,
- )
- example_table.add_column(
- "[magenta]Box Office",
- style="magenta",
- justify="right",
- no_wrap=True,
- )
- example_table.add_row(
- "Dec 20, 2019",
- "Star Wars: The Rise of Skywalker",
- "$275,000,000",
- "$375,126,118",
- )
- example_table.add_row(
- "May 25, 2018",
- "[b]Solo[/]: A Star Wars Story",
- "$275,000,000",
- "$393,151,347",
- )
- example_table.add_row(
- "Dec 15, 2017",
- "Star Wars Ep. VIII: The Last Jedi",
- "$262,000,000",
- "[bold]$1,332,539,889[/bold]",
- )
- example_table.add_row(
- "May 19, 1999",
- "Star Wars Ep. [b]I[/b]: [i]The phantom Menace",
- "$115,000,000",
- "$1,027,044,677",
- )
-
- table.add_row("Tables", example_table)
-
- code = '''\
-def iter_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
- """Iterate and generate a tuple with a flag for last value."""
- iter_values = iter(values)
- try:
- previous_value = next(iter_values)
- except StopIteration:
- return
- for value in iter_values:
- yield False, previous_value
- previous_value = value
- yield True, previous_value'''
-
- pretty_data = {
- "foo": [
- 3.1427,
- (
- "Paul Atreides",
- "Vladimir Harkonnen",
- "Thufir Hawat",
- ),
- ],
- "atomic": (False, True, None),
- }
- table.add_row(
- "Syntax\nhighlighting\n&\npretty\nprinting",
- comparison(
- Syntax(code, "python3", line_numbers=True, indent_guides=True),
- Pretty(pretty_data, indent_guides=True),
- ),
- )
-
- markdown_example = """\
-# Markdown
-
-Supports much of the *markdown* __syntax__!
-
-- Headers
-- Basic formatting: **bold**, *italic*, `code`
-- Block quotes
-- Lists, and more...
- """
- table.add_row(
- "Markdown", comparison("[cyan]" + markdown_example, Markdown(markdown_example))
- )
-
- table.add_row(
- "+more!",
- """Progress bars, columns, styled logging handler, tracebacks, etc...""",
- )
- return table
-
-
-if __name__ == "__main__": # pragma: no cover
-
- console = Console(
- file=io.StringIO(),
- force_terminal=True,
- )
- test_card = make_test_card()
-
- # Print once to warm cache
- start = process_time()
- console.print(test_card)
- pre_cache_taken = round((process_time() - start) * 1000.0, 1)
-
- console.file = io.StringIO()
-
- start = process_time()
- console.print(test_card)
- taken = round((process_time() - start) * 1000.0, 1)
-
- c = Console(record=True)
- c.print(test_card)
-
- print(f"rendered in {pre_cache_taken}ms (cold cache)")
- print(f"rendered in {taken}ms (warm cache)")
-
- from pip._vendor.rich.panel import Panel
-
- console = Console()
-
- sponsor_message = Table.grid(padding=1)
- sponsor_message.add_column(style="green", justify="right")
- sponsor_message.add_column(no_wrap=True)
-
- sponsor_message.add_row(
- "Textualize",
- "[u blue link=https://github.com/textualize]https://github.com/textualize",
- )
- sponsor_message.add_row(
- "Twitter",
- "[u blue link=https://twitter.com/willmcgugan]https://twitter.com/willmcgugan",
- )
-
- intro_message = Text.from_markup(
- """\
-We hope you enjoy using Rich!
-
-Rich is maintained with [red]:heart:[/] by [link=https://www.textualize.io]Textualize.io[/]
-
-- Will McGugan"""
- )
-
- message = Table.grid(padding=2)
- message.add_column()
- message.add_column(no_wrap=True)
- message.add_row(intro_message, sponsor_message)
-
- console.print(
- Panel.fit(
- message,
- box=box.ROUNDED,
- padding=(1, 2),
- title="[b red]Thanks for trying out Rich!",
- border_style="bright_blue",
- ),
- justify="center",
- )
diff --git a/spaces/power2/JoJoGan-powerhow2/e4e/configs/paths_config.py b/spaces/power2/JoJoGan-powerhow2/e4e/configs/paths_config.py
deleted file mode 100644
index 4604f6063b8125364a52a492de52fcc54004f373..0000000000000000000000000000000000000000
--- a/spaces/power2/JoJoGan-powerhow2/e4e/configs/paths_config.py
+++ /dev/null
@@ -1,28 +0,0 @@
-dataset_paths = {
- # Face Datasets (In the paper: FFHQ - train, CelebAHQ - test)
- 'ffhq': '',
- 'celeba_test': '',
-
- # Cars Dataset (In the paper: Stanford cars)
- 'cars_train': '',
- 'cars_test': '',
-
- # Horse Dataset (In the paper: LSUN Horse)
- 'horse_train': '',
- 'horse_test': '',
-
- # Church Dataset (In the paper: LSUN Church)
- 'church_train': '',
- 'church_test': '',
-
- # Cats Dataset (In the paper: LSUN Cat)
- 'cats_train': '',
- 'cats_test': ''
-}
-
-model_paths = {
- 'stylegan_ffhq': 'pretrained_models/stylegan2-ffhq-config-f.pt',
- 'ir_se50': 'pretrained_models/model_ir_se50.pth',
- 'shape_predictor': 'pretrained_models/shape_predictor_68_face_landmarks.dat',
- 'moco': 'pretrained_models/moco_v2_800ep_pretrain.pth'
-}
diff --git a/spaces/ppsingh/cpu-demo/appStore/netzero.py b/spaces/ppsingh/cpu-demo/appStore/netzero.py
deleted file mode 100644
index fc963f98247011670e74108ffb8d0d2e829ed085..0000000000000000000000000000000000000000
--- a/spaces/ppsingh/cpu-demo/appStore/netzero.py
+++ /dev/null
@@ -1,206 +0,0 @@
-# set path
-import glob, os, sys;
-sys.path.append('../utils')
-
-#import needed libraries
-import seaborn as sns
-import matplotlib.pyplot as plt
-import numpy as np
-import pandas as pd
-import streamlit as st
-# from st_aggrid import AgGrid
-# from st_aggrid.shared import ColumnsAutoSizeMode
-from utils.netzero_classifier import netzero_classification
-from utils.netzero_classifier import runNetZeroPreprocessingPipeline, load_netzeroClassifier
-# from utils.keyword_extraction import textrank
-import logging
-logger = logging.getLogger(__name__)
-from utils.config import get_classifier_params
-from io import BytesIO
-import xlsxwriter
-import plotly.express as px
-
-
-# Declare all the necessary variables
-classifier_identifier = 'netzero'
-params = get_classifier_params(classifier_identifier)
-
-# Labels dictionary ###
-_lab_dict = {
- 'NEGATIVE':'NO NETZERO TARGET',
- 'NETZERO':'NETZERO TARGET',
- }
-
-
-@st.cache_data
-def to_excel(df):
- len_df = len(df)
- output = BytesIO()
- writer = pd.ExcelWriter(output, engine='xlsxwriter')
- df.to_excel(writer, index=False, sheet_name='Sheet1')
- workbook = writer.book
- worksheet = writer.sheets['Sheet1']
- worksheet.data_validation('E2:E{}'.format(len_df),
- {'validate': 'list',
- 'source': ['No', 'Yes', 'Discard']})
- writer.save()
- processed_data = output.getvalue()
- return processed_data
-
-def app():
-
- #### APP INFO #####
- with st.container():
- st.markdown("
NetZero Target Extraction
", unsafe_allow_html=True)
- st.write(' ')
- st.write(' ')
-
- with st.expander("ℹ️ - About this app", expanded=False):
-
- st.write(
- """
- The **NetZero Extraction** app is an easy-to-use interface built \
- in Streamlit for analyzing policy documents for \
- Classification of the paragraphs/texts in the document *If it \
- contains any Net-Zero target related information* - \
- developed by GIZ Data Service Center, GFA, IKI Tracs, \
- SV Klima and SPA. \n
- """)
- st.write("""**Document Processing:** The Uploaded/Selected document is \
- automatically cleaned and split into paragraphs with a maximum \
- length of 60 words using a Haystack preprocessing pipeline. The \
- length of 60 is an empirical value which should reflect the length \
- of a “context” and should limit the paragraph length deviation. \
- However, since we want to respect the sentence boundary the limit \
- can breach and hence this limit of 60 is tentative. \n
- """)
-
- st.write("")
-
- ### Main app code ###
- with st.container():
- if st.button("RUN NetZero Related Paragraph Extractions"):
- if 'key2' not in st.session_state:
- st.session_state['key2'] = None
-
- if 'filepath' in st.session_state:
- file_name = st.session_state['filename']
- file_path = st.session_state['filepath']
-
- # Do the preprocessing of the PDF
-
- all_documents = runNetZeroPreprocessingPipeline(file_name= file_name,
- file_path= file_path, split_by= params['split_by'],
- split_length= params['split_length'],
- split_respect_sentence_boundary= params['split_respect_sentence_boundary'],
- split_overlap= params['split_overlap'], remove_punc= params['remove_punc'])
-
- # st.dataframe(all_documents['documents'])
-
- # Load the classifier model
-
- classifier = load_netzeroClassifier(classifier_name=params['model_name'])
- st.session_state['{}_classifier'.format(classifier_identifier)] = classifier
-
- if len(all_documents['documents']) > 100:
- warning_msg = ": This might take sometime, please sit back and relax."
- else:
- warning_msg = ""
-
- # #st.write(all_documents['documents'],_lab_dict,classifier_identifier,params['threshold'])
- # with st.spinner("Running Target Related Paragraph Extractions{}".format(warning_msg)):
-
- df = netzero_classification(haystack_doc=all_documents['documents'],
- threshold= params['threshold'])
- st.session_state.key2 = df
- hits = df[df['Target Label'] == 'NETZERO']
- range_val = min(5,len(hits))
- if range_val !=0:
- count_df = df['Target Label'].value_counts()
- count_df = count_df.rename('count')
- count_df = count_df.rename_axis('Target Label').reset_index()
- count_df['Label_def'] = count_df['Target Label'].apply(lambda x: _lab_dict[x])
-
- fig = px.bar(count_df, y="Label_def", x="count", orientation='h', height =200)
- c1, c2 = st.columns([1,1])
- with c1:
- st.plotly_chart(fig,use_container_width= True)
-
- hits = hits.sort_values(by=['Relevancy'], ascending=False)
- st.write("")
- st.markdown("###### Top few NetZero Target Classified paragraph/text results ######")
- range_val = min(5,len(hits))
- for i in range(range_val):
- # the page number reflects the page that contains the main paragraph
- # according to split limit, the overlapping part can be on a separate page
- st.write('**Result {}** `page {}` (Relevancy Score: {:.2f})'.format(i+1,hits.iloc[i]['page'],hits.iloc[i]['Relevancy']))
- st.write("\t Text: \t{}".format(hits.iloc[i]['text']))
- else:
- st.info("🤔 No Netzero target found")
- df['Validation'] = 'No'
- df_xlsx = to_excel(df)
- st.download_button(label='📥 Download Current Result',
- data=df_xlsx ,
- file_name= 'file_target.xlsx')
-
-
- else:
- st.info("🤔 No document found, please try to upload it at the sidebar!")
- logging.warning("Terminated as no document provided")
-
- # # Creating truth value dataframe
- # if 'key2' in st.session_state:
- # if st.session_state.key2 is not None:
- # df = st.session_state.key2
- # st.markdown("###### Select the threshold for classifier ######")
- # c1, c2 = st.columns([1,1])
-
- # netzero_df = df[df['Target Label'] == 'NETZERO'].reset_index(drop = True)
- # if len(netzero_df) >0:
- # with c1:
- # threshold = st.slider("Threshold", min_value=0.00, max_value=1.0,
- # step=0.01, value=0.5,
- # help = "Keep High Value if want refined result, low if dont want to miss anything" )
-
- # # creating the dataframe for value counts of Labels, along with 'title' of Labels
- # temp = df[df['Relevancy']>threshold]
- # count_df = temp['Target Label'].value_counts()
- # count_df = count_df.rename('count')
- # count_df = count_df.rename_axis('Target Label').reset_index()
- # count_df['Label_def'] = count_df['Target Label'].apply(lambda x: _lab_dict[x])
-
- # plt.rcParams['font.size'] = 25
- # colors = plt.get_cmap('Blues')(np.linspace(0.2, 0.7, len(count_df)))
- # # plot
- # fig, ax = plt.subplots()
- # ax.pie(count_df['count'], colors=colors, radius=2, center=(4, 4),
- # wedgeprops={"linewidth": 1, "edgecolor": "white"},
- # textprops={'fontsize': 14},
- # frame=False,labels =list(count_df.Label_def),
- # labeldistance=1.2)
- # st.markdown("#### Anything related to NetZero Targets? ####")
-
- # c4, c5, c6 = st.columns([1,2,2])
-
- # with c5:
- # st.pyplot(fig)
- # with c6:
- # st.write(count_df[['Label_def','count']])
-
- # st.write("")
-
- # st.markdown("###### Top few NetZero Target Classified paragraph/text results ######")
-
- # st.dataframe(netzero_df.head())
- # else:
- # st.write("🤔 No Results found")
-
-
- # df['Validation'] = 'No'
- # df_xlsx = to_excel(df)
- # st.download_button(label='📥 Download Current Result',
- # data=df_xlsx ,
- # file_name= 'file_netzero.xlsx')
-
-
-
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/misc/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/misc/__init__.py
deleted file mode 100644
index 156cb232a7aa80eee1526c7598f72043de10473f..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/misc/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Empty __init__.py file to signal Python this directory is a package."""
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Index-ee671302.css b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Index-ee671302.css
deleted file mode 100644
index af5a805e103c8201c62cc9716cf483ae315d6cbf..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Index-ee671302.css
+++ /dev/null
@@ -1 +0,0 @@
-input.svelte-8ywr9r{--ring-color:transparent;position:relative;box-shadow:var(--input-shadow);border:1px solid var(--checkbox-border-color);border-radius:var(--radius-xs);background-color:var(--checkbox-background-color);line-height:var(--line-sm);width:18px!important;height:18px!important}input.svelte-8ywr9r:checked,input.svelte-8ywr9r:checked:hover,input.svelte-8ywr9r:checked:focus{border-color:var(--checkbox-border-color-selected);background-image:var(--checkbox-check);background-color:var(--checkbox-background-color-selected)}input.svelte-8ywr9r:hover{border-color:var(--checkbox-border-color-hover);background-color:var(--checkbox-background-color-hover)}input.svelte-8ywr9r:focus{border-color:var(--checkbox-border-color-focus);background-color:var(--checkbox-background-color-focus)}.disabled.svelte-8ywr9r{cursor:not-allowed;border-color:var(--checkbox-border-color-hover);background-color:var(--checkbox-background-color-hover)}input.svelte-8ywr9r:disabled:checked,input.svelte-8ywr9r:disabled:checked:hover,.disabled.svelte-8ywr9r:checked:focus{opacity:.8!important;cursor:not-allowed}.icon.svelte-19ypun1.svelte-19ypun1{display:inline-block;width:18px;height:18px;padding:3px 2px 3px 3px;margin:0;flex-grow:0;display:inline-flex;justify-content:center;align-items:center;border-radius:2px;cursor:pointer;transition:.1s}.file-icon.svelte-19ypun1.svelte-19ypun1{display:inline-block;height:20px;margin:0;flex-grow:0;display:inline-flex;justify-content:center;align-items:center;transition:.1s}.file-icon.svelte-19ypun1 img.svelte-19ypun1{width:100%;height:100%}.icon.svelte-19ypun1.svelte-19ypun1:hover{background:#eee}.icon.svelte-19ypun1:hover>*{color:var(--block-info-text-color)}.icon.svelte-19ypun1>*{transform:rotate(90deg);transform-origin:40% 50%;transition:.2s;color:var(--color-accent)}.hidden.svelte-19ypun1>*{transform:rotate(0);color:var(--body-text-color-subdued)}ul.svelte-19ypun1.svelte-19ypun1{margin-left:26px;padding-left:0;list-style:none}li.svelte-19ypun1.svelte-19ypun1{padding-left:0;align-items:center;margin:8px 0;font-family:var(--font-mono);font-size:var(--scale-00)}.wrap.svelte-19ypun1.svelte-19ypun1{display:flex;gap:8px;align-items:center}.file-wrap.svelte-qyxej8{height:100%;overflow:auto}
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/utils-c3e3db58.js b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/utils-c3e3db58.js
deleted file mode 100644
index 7e55a15877f82ade9d225bcc133a44888f7f17e1..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/utils-c3e3db58.js
+++ /dev/null
@@ -1,2 +0,0 @@
-class s extends Error{constructor(t){super(t),this.name="ShareError"}}async function y(o,t){if(window.__gradio_space__==null)throw new s("Must be on Spaces to share.");let e,r,n;if(t==="url"){const i=await fetch(o);e=await i.blob(),r=i.headers.get("content-type")||"",n=i.headers.get("content-disposition")||""}else e=d(o),r=o.split(";")[0].split(":")[1],n="file"+r.split("/")[1];const a=new File([e],n,{type:r}),c=await fetch("https://huggingface.co/uploads",{method:"POST",body:a,headers:{"Content-Type":a.type,"X-Requested-With":"XMLHttpRequest"}});if(!c.ok){if(c.headers.get("content-type")?.includes("application/json")){const i=await c.json();throw new s(`Upload failed: ${i.error}`)}throw new s("Upload failed.")}return await c.text()}function d(o){for(var t=o.split(","),e=t[0].match(/:(.*?);/)[1],r=atob(t[1]),n=r.length,a=new Uint8Array(n);n--;)a[n]=r.charCodeAt(n);return new Blob([a],{type:e})}function f(o){o.addEventListener("click",t);async function t(e){const r=e.composedPath(),[n]=r.filter(a=>a?.tagName==="BUTTON"&&a.classList.contains("copy_code_button"));if(n){let a=function(p){p.style.opacity="1",setTimeout(()=>{p.style.opacity="0"},2e3)};e.stopImmediatePropagation();const c=n.parentElement.innerText.trim(),l=Array.from(n.children)[1];await u(c)&&a(l)}}return{destroy(){o.removeEventListener("click",t)}}}async function u(o){let t=!1;if("clipboard"in navigator)await navigator.clipboard.writeText(o),t=!0;else{const e=document.createElement("textarea");e.value=o,e.style.position="absolute",e.style.left="-999999px",document.body.prepend(e),e.select();try{document.execCommand("copy"),t=!0}catch(r){console.error(r),t=!1}finally{e.remove()}}return t}export{s as S,f as c,y as u};
-//# sourceMappingURL=utils-c3e3db58.js.map
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Example-ee935b0c.css b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Example-ee935b0c.css
deleted file mode 100644
index 259b46e48a07998f806756482a6f76510a64a6be..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Example-ee935b0c.css
+++ /dev/null
@@ -1 +0,0 @@
-div.svelte-h6ogpl{width:var(--size-10);height:var(--size-10)}.table.svelte-h6ogpl{margin:0 auto}
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/lib/polynomial.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/lib/polynomial.py
deleted file mode 100644
index 3b8db2a9512694c8148cd6e3538c70087e3cd1a8..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/lib/polynomial.py
+++ /dev/null
@@ -1,1453 +0,0 @@
-"""
-Functions to operate on polynomials.
-
-"""
-__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
- 'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
- 'polyfit', 'RankWarning']
-
-import functools
-import re
-import warnings
-
-from .._utils import set_module
-import numpy.core.numeric as NX
-
-from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
- ones)
-from numpy.core import overrides
-from numpy.lib.twodim_base import diag, vander
-from numpy.lib.function_base import trim_zeros
-from numpy.lib.type_check import iscomplex, real, imag, mintypecode
-from numpy.linalg import eigvals, lstsq, inv
-
-
-array_function_dispatch = functools.partial(
- overrides.array_function_dispatch, module='numpy')
-
-
-@set_module('numpy')
-class RankWarning(UserWarning):
- """
- Issued by `polyfit` when the Vandermonde matrix is rank deficient.
-
- For more information, a way to suppress the warning, and an example of
- `RankWarning` being issued, see `polyfit`.
-
- """
- pass
-
-
-def _poly_dispatcher(seq_of_zeros):
- return seq_of_zeros
-
-
-@array_function_dispatch(_poly_dispatcher)
-def poly(seq_of_zeros):
- """
- Find the coefficients of a polynomial with the given sequence of roots.
-
- .. note::
- This forms part of the old polynomial API. Since version 1.4, the
- new polynomial API defined in `numpy.polynomial` is preferred.
- A summary of the differences can be found in the
- :doc:`transition guide `.
-
- Returns the coefficients of the polynomial whose leading coefficient
- is one for the given sequence of zeros (multiple roots must be included
- in the sequence as many times as their multiplicity; see Examples).
- A square matrix (or array, which will be treated as a matrix) can also
- be given, in which case the coefficients of the characteristic polynomial
- of the matrix are returned.
-
- Parameters
- ----------
- seq_of_zeros : array_like, shape (N,) or (N, N)
- A sequence of polynomial roots, or a square array or matrix object.
-
- Returns
- -------
- c : ndarray
- 1D array of polynomial coefficients from highest to lowest degree:
-
- ``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
- where c[0] always equals 1.
-
- Raises
- ------
- ValueError
- If input is the wrong shape (the input must be a 1-D or square
- 2-D array).
-
- See Also
- --------
- polyval : Compute polynomial values.
- roots : Return the roots of a polynomial.
- polyfit : Least squares polynomial fit.
- poly1d : A one-dimensional polynomial class.
-
- Notes
- -----
- Specifying the roots of a polynomial still leaves one degree of
- freedom, typically represented by an undetermined leading
- coefficient. [1]_ In the case of this function, that coefficient -
- the first one in the returned array - is always taken as one. (If
- for some reason you have one other point, the only automatic way
- presently to leverage that information is to use ``polyfit``.)
-
- The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
- matrix **A** is given by
-
- :math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
-
- where **I** is the `n`-by-`n` identity matrix. [2]_
-
- References
- ----------
- .. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trigonometry,
- Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
-
- .. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
- Academic Press, pg. 182, 1980.
-
- Examples
- --------
- Given a sequence of a polynomial's zeros:
-
- >>> np.poly((0, 0, 0)) # Multiple root example
- array([1., 0., 0., 0.])
-
- The line above represents z**3 + 0*z**2 + 0*z + 0.
-
- >>> np.poly((-1./2, 0, 1./2))
- array([ 1. , 0. , -0.25, 0. ])
-
- The line above represents z**3 - z/4
-
- >>> np.poly((np.random.random(1)[0], 0, np.random.random(1)[0]))
- array([ 1. , -0.77086955, 0.08618131, 0. ]) # random
-
- Given a square array object:
-
- >>> P = np.array([[0, 1./3], [-1./2, 0]])
- >>> np.poly(P)
- array([1. , 0. , 0.16666667])
-
- Note how in all cases the leading coefficient is always 1.
-
- """
- seq_of_zeros = atleast_1d(seq_of_zeros)
- sh = seq_of_zeros.shape
-
- if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
- seq_of_zeros = eigvals(seq_of_zeros)
- elif len(sh) == 1:
- dt = seq_of_zeros.dtype
- # Let object arrays slip through, e.g. for arbitrary precision
- if dt != object:
- seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))
- else:
- raise ValueError("input must be 1d or non-empty square 2d array.")
-
- if len(seq_of_zeros) == 0:
- return 1.0
- dt = seq_of_zeros.dtype
- a = ones((1,), dtype=dt)
- for zero in seq_of_zeros:
- a = NX.convolve(a, array([1, -zero], dtype=dt), mode='full')
-
- if issubclass(a.dtype.type, NX.complexfloating):
- # if complex roots are all complex conjugates, the roots are real.
- roots = NX.asarray(seq_of_zeros, complex)
- if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())):
- a = a.real.copy()
-
- return a
-
-
-def _roots_dispatcher(p):
- return p
-
-
-@array_function_dispatch(_roots_dispatcher)
-def roots(p):
- """
- Return the roots of a polynomial with coefficients given in p.
-
- .. note::
- This forms part of the old polynomial API. Since version 1.4, the
- new polynomial API defined in `numpy.polynomial` is preferred.
- A summary of the differences can be found in the
- :doc:`transition guide `.
-
- The values in the rank-1 array `p` are coefficients of a polynomial.
- If the length of `p` is n+1 then the polynomial is described by::
-
- p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
-
- Parameters
- ----------
- p : array_like
- Rank-1 array of polynomial coefficients.
-
- Returns
- -------
- out : ndarray
- An array containing the roots of the polynomial.
-
- Raises
- ------
- ValueError
- When `p` cannot be converted to a rank-1 array.
-
- See also
- --------
- poly : Find the coefficients of a polynomial with a given sequence
- of roots.
- polyval : Compute polynomial values.
- polyfit : Least squares polynomial fit.
- poly1d : A one-dimensional polynomial class.
-
- Notes
- -----
- The algorithm relies on computing the eigenvalues of the
- companion matrix [1]_.
-
- References
- ----------
- .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
- Cambridge University Press, 1999, pp. 146-7.
-
- Examples
- --------
- >>> coeff = [3.2, 2, 1]
- >>> np.roots(coeff)
- array([-0.3125+0.46351241j, -0.3125-0.46351241j])
-
- """
- # If input is scalar, this makes it an array
- p = atleast_1d(p)
- if p.ndim != 1:
- raise ValueError("Input must be a rank-1 array.")
-
- # find non-zero array entries
- non_zero = NX.nonzero(NX.ravel(p))[0]
-
- # Return an empty array if polynomial is all zeros
- if len(non_zero) == 0:
- return NX.array([])
-
- # find the number of trailing zeros -- this is the number of roots at 0.
- trailing_zeros = len(p) - non_zero[-1] - 1
-
- # strip leading and trailing zeros
- p = p[int(non_zero[0]):int(non_zero[-1])+1]
-
- # casting: if incoming array isn't floating point, make it floating point.
- if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
- p = p.astype(float)
-
- N = len(p)
- if N > 1:
- # build companion matrix and find its eigenvalues (the roots)
- A = diag(NX.ones((N-2,), p.dtype), -1)
- A[0,:] = -p[1:] / p[0]
- roots = eigvals(A)
- else:
- roots = NX.array([])
-
- # tack any zeros onto the back of the array
- roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
- return roots
-
-
-def _polyint_dispatcher(p, m=None, k=None):
- return (p,)
-
-
-@array_function_dispatch(_polyint_dispatcher)
-def polyint(p, m=1, k=None):
- """
- Return an antiderivative (indefinite integral) of a polynomial.
-
- .. note::
- This forms part of the old polynomial API. Since version 1.4, the
- new polynomial API defined in `numpy.polynomial` is preferred.
- A summary of the differences can be found in the
- :doc:`transition guide `.
-
- The returned order `m` antiderivative `P` of polynomial `p` satisfies
- :math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
- integration constants `k`. The constants determine the low-order
- polynomial part
-
- .. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
-
- of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
-
- Parameters
- ----------
- p : array_like or poly1d
- Polynomial to integrate.
- A sequence is interpreted as polynomial coefficients, see `poly1d`.
- m : int, optional
- Order of the antiderivative. (Default: 1)
- k : list of `m` scalars or scalar, optional
- Integration constants. They are given in the order of integration:
- those corresponding to highest-order terms come first.
-
- If ``None`` (default), all constants are assumed to be zero.
- If `m = 1`, a single scalar can be given instead of a list.
-
- See Also
- --------
- polyder : derivative of a polynomial
- poly1d.integ : equivalent method
-
- Examples
- --------
- The defining property of the antiderivative:
-
- >>> p = np.poly1d([1,1,1])
- >>> P = np.polyint(p)
- >>> P
- poly1d([ 0.33333333, 0.5 , 1. , 0. ]) # may vary
- >>> np.polyder(P) == p
- True
-
- The integration constants default to zero, but can be specified:
-
- >>> P = np.polyint(p, 3)
- >>> P(0)
- 0.0
- >>> np.polyder(P)(0)
- 0.0
- >>> np.polyder(P, 2)(0)
- 0.0
- >>> P = np.polyint(p, 3, k=[6,5,3])
- >>> P
- poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) # may vary
-
- Note that 3 = 6 / 2!, and that the constants are given in the order of
- integrations. Constant of the highest-order polynomial term comes first:
-
- >>> np.polyder(P, 2)(0)
- 6.0
- >>> np.polyder(P, 1)(0)
- 5.0
- >>> P(0)
- 3.0
-
- """
- m = int(m)
- if m < 0:
- raise ValueError("Order of integral must be positive (see polyder)")
- if k is None:
- k = NX.zeros(m, float)
- k = atleast_1d(k)
- if len(k) == 1 and m > 1:
- k = k[0]*NX.ones(m, float)
- if len(k) < m:
- raise ValueError(
- "k must be a scalar or a rank-1 array of length 1 or >m.")
-
- truepoly = isinstance(p, poly1d)
- p = NX.asarray(p)
- if m == 0:
- if truepoly:
- return poly1d(p)
- return p
- else:
- # Note: this must work also with object and integer arrays
- y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
- val = polyint(y, m - 1, k=k[1:])
- if truepoly:
- return poly1d(val)
- return val
-
-
-def _polyder_dispatcher(p, m=None):
- return (p,)
-
-
-@array_function_dispatch(_polyder_dispatcher)
-def polyder(p, m=1):
- """
- Return the derivative of the specified order of a polynomial.
-
- .. note::
- This forms part of the old polynomial API. Since version 1.4, the
- new polynomial API defined in `numpy.polynomial` is preferred.
- A summary of the differences can be found in the
- :doc:`transition guide `.
-
- Parameters
- ----------
- p : poly1d or sequence
- Polynomial to differentiate.
- A sequence is interpreted as polynomial coefficients, see `poly1d`.
- m : int, optional
- Order of differentiation (default: 1)
-
- Returns
- -------
- der : poly1d
- A new polynomial representing the derivative.
-
- See Also
- --------
- polyint : Anti-derivative of a polynomial.
- poly1d : Class for one-dimensional polynomials.
-
- Examples
- --------
- The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
-
- >>> p = np.poly1d([1,1,1,1])
- >>> p2 = np.polyder(p)
- >>> p2
- poly1d([3, 2, 1])
-
- which evaluates to:
-
- >>> p2(2.)
- 17.0
-
- We can verify this, approximating the derivative with
- ``(f(x + h) - f(x))/h``:
-
- >>> (p(2. + 0.001) - p(2.)) / 0.001
- 17.007000999997857
-
- The fourth-order derivative of a 3rd-order polynomial is zero:
-
- >>> np.polyder(p, 2)
- poly1d([6, 2])
- >>> np.polyder(p, 3)
- poly1d([6])
- >>> np.polyder(p, 4)
- poly1d([0])
-
- """
- m = int(m)
- if m < 0:
- raise ValueError("Order of derivative must be positive (see polyint)")
-
- truepoly = isinstance(p, poly1d)
- p = NX.asarray(p)
- n = len(p) - 1
- y = p[:-1] * NX.arange(n, 0, -1)
- if m == 0:
- val = p
- else:
- val = polyder(y, m - 1)
- if truepoly:
- val = poly1d(val)
- return val
-
-
-def _polyfit_dispatcher(x, y, deg, rcond=None, full=None, w=None, cov=None):
- return (x, y, w)
-
-
-@array_function_dispatch(_polyfit_dispatcher)
-def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
- """
- Least squares polynomial fit.
-
- .. note::
- This forms part of the old polynomial API. Since version 1.4, the
- new polynomial API defined in `numpy.polynomial` is preferred.
- A summary of the differences can be found in the
- :doc:`transition guide `.
-
- Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
- to points `(x, y)`. Returns a vector of coefficients `p` that minimises
- the squared error in the order `deg`, `deg-1`, ... `0`.
-
- The `Polynomial.fit ` class
- method is recommended for new code as it is more stable numerically. See
- the documentation of the method for more information.
-
- Parameters
- ----------
- x : array_like, shape (M,)
- x-coordinates of the M sample points ``(x[i], y[i])``.
- y : array_like, shape (M,) or (M, K)
- y-coordinates of the sample points. Several data sets of sample
- points sharing the same x-coordinates can be fitted at once by
- passing in a 2D-array that contains one dataset per column.
- deg : int
- Degree of the fitting polynomial
- rcond : float, optional
- Relative condition number of the fit. Singular values smaller than
- this relative to the largest singular value will be ignored. The
- default value is len(x)*eps, where eps is the relative precision of
- the float type, about 2e-16 in most cases.
- full : bool, optional
- Switch determining nature of return value. When it is False (the
- default) just the coefficients are returned, when True diagnostic
- information from the singular value decomposition is also returned.
- w : array_like, shape (M,), optional
- Weights. If not None, the weight ``w[i]`` applies to the unsquared
- residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are
- chosen so that the errors of the products ``w[i]*y[i]`` all have the
- same variance. When using inverse-variance weighting, use
- ``w[i] = 1/sigma(y[i])``. The default value is None.
- cov : bool or str, optional
- If given and not `False`, return not just the estimate but also its
- covariance matrix. By default, the covariance are scaled by
- chi2/dof, where dof = M - (deg + 1), i.e., the weights are presumed
- to be unreliable except in a relative sense and everything is scaled
- such that the reduced chi2 is unity. This scaling is omitted if
- ``cov='unscaled'``, as is relevant for the case that the weights are
- w = 1/sigma, with sigma known to be a reliable estimate of the
- uncertainty.
-
- Returns
- -------
- p : ndarray, shape (deg + 1,) or (deg + 1, K)
- Polynomial coefficients, highest power first. If `y` was 2-D, the
- coefficients for `k`-th data set are in ``p[:,k]``.
-
- residuals, rank, singular_values, rcond
- These values are only returned if ``full == True``
-
- - residuals -- sum of squared residuals of the least squares fit
- - rank -- the effective rank of the scaled Vandermonde
- coefficient matrix
- - singular_values -- singular values of the scaled Vandermonde
- coefficient matrix
- - rcond -- value of `rcond`.
-
- For more details, see `numpy.linalg.lstsq`.
-
- V : ndarray, shape (M,M) or (M,M,K)
- Present only if ``full == False`` and ``cov == True``. The covariance
- matrix of the polynomial coefficient estimates. The diagonal of
- this matrix are the variance estimates for each coefficient. If y
- is a 2-D array, then the covariance matrix for the `k`-th data set
- are in ``V[:,:,k]``
-
-
- Warns
- -----
- RankWarning
- The rank of the coefficient matrix in the least-squares fit is
- deficient. The warning is only raised if ``full == False``.
-
- The warnings can be turned off by
-
- >>> import warnings
- >>> warnings.simplefilter('ignore', np.RankWarning)
-
- See Also
- --------
- polyval : Compute polynomial values.
- linalg.lstsq : Computes a least-squares fit.
- scipy.interpolate.UnivariateSpline : Computes spline fits.
-
- Notes
- -----
- The solution minimizes the squared error
-
- .. math::
- E = \\sum_{j=0}^k |p(x_j) - y_j|^2
-
- in the equations::
-
- x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
- x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
- ...
- x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
-
- The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
-
- `polyfit` issues a `RankWarning` when the least-squares fit is badly
- conditioned. This implies that the best fit is not well-defined due
- to numerical error. The results may be improved by lowering the polynomial
- degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
- can also be set to a value smaller than its default, but the resulting
- fit may be spurious: including contributions from the small singular
- values can add numerical noise to the result.
-
- Note that fitting polynomial coefficients is inherently badly conditioned
- when the degree of the polynomial is large or the interval of sample points
- is badly centered. The quality of the fit should always be checked in these
- cases. When polynomial fits are not satisfactory, splines may be a good
- alternative.
-
- References
- ----------
- .. [1] Wikipedia, "Curve fitting",
- https://en.wikipedia.org/wiki/Curve_fitting
- .. [2] Wikipedia, "Polynomial interpolation",
- https://en.wikipedia.org/wiki/Polynomial_interpolation
-
- Examples
- --------
- >>> import warnings
- >>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
- >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
- >>> z = np.polyfit(x, y, 3)
- >>> z
- array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) # may vary
-
- It is convenient to use `poly1d` objects for dealing with polynomials:
-
- >>> p = np.poly1d(z)
- >>> p(0.5)
- 0.6143849206349179 # may vary
- >>> p(3.5)
- -0.34732142857143039 # may vary
- >>> p(10)
- 22.579365079365115 # may vary
-
- High-order polynomials may oscillate wildly:
-
- >>> with warnings.catch_warnings():
- ... warnings.simplefilter('ignore', np.RankWarning)
- ... p30 = np.poly1d(np.polyfit(x, y, 30))
- ...
- >>> p30(4)
- -0.80000000000000204 # may vary
- >>> p30(5)
- -0.99999999999999445 # may vary
- >>> p30(4.5)
- -0.10547061179440398 # may vary
-
- Illustration:
-
- >>> import matplotlib.pyplot as plt
- >>> xp = np.linspace(-2, 6, 100)
- >>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
- >>> plt.ylim(-2,2)
- (-2, 2)
- >>> plt.show()
-
- """
- order = int(deg) + 1
- x = NX.asarray(x) + 0.0
- y = NX.asarray(y) + 0.0
-
- # check arguments.
- if deg < 0:
- raise ValueError("expected deg >= 0")
- if x.ndim != 1:
- raise TypeError("expected 1D vector for x")
- if x.size == 0:
- raise TypeError("expected non-empty vector for x")
- if y.ndim < 1 or y.ndim > 2:
- raise TypeError("expected 1D or 2D array for y")
- if x.shape[0] != y.shape[0]:
- raise TypeError("expected x and y to have same length")
-
- # set rcond
- if rcond is None:
- rcond = len(x)*finfo(x.dtype).eps
-
- # set up least squares equation for powers of x
- lhs = vander(x, order)
- rhs = y
-
- # apply weighting
- if w is not None:
- w = NX.asarray(w) + 0.0
- if w.ndim != 1:
- raise TypeError("expected a 1-d array for weights")
- if w.shape[0] != y.shape[0]:
- raise TypeError("expected w and y to have the same length")
- lhs *= w[:, NX.newaxis]
- if rhs.ndim == 2:
- rhs *= w[:, NX.newaxis]
- else:
- rhs *= w
-
- # scale lhs to improve condition number and solve
- scale = NX.sqrt((lhs*lhs).sum(axis=0))
- lhs /= scale
- c, resids, rank, s = lstsq(lhs, rhs, rcond)
- c = (c.T/scale).T # broadcast scale coefficients
-
- # warn on rank reduction, which indicates an ill conditioned matrix
- if rank != order and not full:
- msg = "Polyfit may be poorly conditioned"
- warnings.warn(msg, RankWarning, stacklevel=2)
-
- if full:
- return c, resids, rank, s, rcond
- elif cov:
- Vbase = inv(dot(lhs.T, lhs))
- Vbase /= NX.outer(scale, scale)
- if cov == "unscaled":
- fac = 1
- else:
- if len(x) <= order:
- raise ValueError("the number of data points must exceed order "
- "to scale the covariance matrix")
- # note, this used to be: fac = resids / (len(x) - order - 2.0)
- # it was deciced that the "- 2" (originally justified by "Bayesian
- # uncertainty analysis") is not what the user expects
- # (see gh-11196 and gh-11197)
- fac = resids / (len(x) - order)
- if y.ndim == 1:
- return c, Vbase * fac
- else:
- return c, Vbase[:,:, NX.newaxis] * fac
- else:
- return c
-
-
-def _polyval_dispatcher(p, x):
- return (p, x)
-
-
-@array_function_dispatch(_polyval_dispatcher)
-def polyval(p, x):
- """
- Evaluate a polynomial at specific values.
-
- .. note::
- This forms part of the old polynomial API. Since version 1.4, the
- new polynomial API defined in `numpy.polynomial` is preferred.
- A summary of the differences can be found in the
- :doc:`transition guide `.
-
- If `p` is of length N, this function returns the value:
-
- ``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
-
- If `x` is a sequence, then ``p(x)`` is returned for each element of ``x``.
- If `x` is another polynomial then the composite polynomial ``p(x(t))``
- is returned.
-
- Parameters
- ----------
- p : array_like or poly1d object
- 1D array of polynomial coefficients (including coefficients equal
- to zero) from highest degree to the constant term, or an
- instance of poly1d.
- x : array_like or poly1d object
- A number, an array of numbers, or an instance of poly1d, at
- which to evaluate `p`.
-
- Returns
- -------
- values : ndarray or poly1d
- If `x` is a poly1d instance, the result is the composition of the two
- polynomials, i.e., `x` is "substituted" in `p` and the simplified
- result is returned. In addition, the type of `x` - array_like or
- poly1d - governs the type of the output: `x` array_like => `values`
- array_like, `x` a poly1d object => `values` is also.
-
- See Also
- --------
- poly1d: A polynomial class.
-
- Notes
- -----
- Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
- for polynomials of high degree the values may be inaccurate due to
- rounding errors. Use carefully.
-
- If `x` is a subtype of `ndarray` the return value will be of the same type.
-
- References
- ----------
- .. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
- trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
- Reinhold Co., 1985, pg. 720.
-
- Examples
- --------
- >>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
- 76
- >>> np.polyval([3,0,1], np.poly1d(5))
- poly1d([76])
- >>> np.polyval(np.poly1d([3,0,1]), 5)
- 76
- >>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
- poly1d([76])
-
- """
- p = NX.asarray(p)
- if isinstance(x, poly1d):
- y = 0
- else:
- x = NX.asanyarray(x)
- y = NX.zeros_like(x)
- for pv in p:
- y = y * x + pv
- return y
-
-
-def _binary_op_dispatcher(a1, a2):
- return (a1, a2)
-
-
-@array_function_dispatch(_binary_op_dispatcher)
-def polyadd(a1, a2):
- """
- Find the sum of two polynomials.
-
- .. note::
- This forms part of the old polynomial API. Since version 1.4, the
- new polynomial API defined in `numpy.polynomial` is preferred.
- A summary of the differences can be found in the
- :doc:`transition guide `.
-
- Returns the polynomial resulting from the sum of two input polynomials.
- Each input must be either a poly1d object or a 1D sequence of polynomial
- coefficients, from highest to lowest degree.
-
- Parameters
- ----------
- a1, a2 : array_like or poly1d object
- Input polynomials.
-
- Returns
- -------
- out : ndarray or poly1d object
- The sum of the inputs. If either input is a poly1d object, then the
- output is also a poly1d object. Otherwise, it is a 1D array of
- polynomial coefficients from highest to lowest degree.
-
- See Also
- --------
- poly1d : A one-dimensional polynomial class.
- poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
-
- Examples
- --------
- >>> np.polyadd([1, 2], [9, 5, 4])
- array([9, 6, 6])
-
- Using poly1d objects:
-
- >>> p1 = np.poly1d([1, 2])
- >>> p2 = np.poly1d([9, 5, 4])
- >>> print(p1)
- 1 x + 2
- >>> print(p2)
- 2
- 9 x + 5 x + 4
- >>> print(np.polyadd(p1, p2))
- 2
- 9 x + 6 x + 6
-
- """
- truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
- a1 = atleast_1d(a1)
- a2 = atleast_1d(a2)
- diff = len(a2) - len(a1)
- if diff == 0:
- val = a1 + a2
- elif diff > 0:
- zr = NX.zeros(diff, a1.dtype)
- val = NX.concatenate((zr, a1)) + a2
- else:
- zr = NX.zeros(abs(diff), a2.dtype)
- val = a1 + NX.concatenate((zr, a2))
- if truepoly:
- val = poly1d(val)
- return val
-
-
-@array_function_dispatch(_binary_op_dispatcher)
-def polysub(a1, a2):
- """
- Difference (subtraction) of two polynomials.
-
- .. note::
- This forms part of the old polynomial API. Since version 1.4, the
- new polynomial API defined in `numpy.polynomial` is preferred.
- A summary of the differences can be found in the
- :doc:`transition guide `.
-
- Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
- `a1` and `a2` can be either array_like sequences of the polynomials'
- coefficients (including coefficients equal to zero), or `poly1d` objects.
-
- Parameters
- ----------
- a1, a2 : array_like or poly1d
- Minuend and subtrahend polynomials, respectively.
-
- Returns
- -------
- out : ndarray or poly1d
- Array or `poly1d` object of the difference polynomial's coefficients.
-
- See Also
- --------
- polyval, polydiv, polymul, polyadd
-
- Examples
- --------
- .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
-
- >>> np.polysub([2, 10, -2], [3, 10, -4])
- array([-1, 0, 2])
-
- """
- truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
- a1 = atleast_1d(a1)
- a2 = atleast_1d(a2)
- diff = len(a2) - len(a1)
- if diff == 0:
- val = a1 - a2
- elif diff > 0:
- zr = NX.zeros(diff, a1.dtype)
- val = NX.concatenate((zr, a1)) - a2
- else:
- zr = NX.zeros(abs(diff), a2.dtype)
- val = a1 - NX.concatenate((zr, a2))
- if truepoly:
- val = poly1d(val)
- return val
-
-
-@array_function_dispatch(_binary_op_dispatcher)
-def polymul(a1, a2):
- """
- Find the product of two polynomials.
-
- .. note::
- This forms part of the old polynomial API. Since version 1.4, the
- new polynomial API defined in `numpy.polynomial` is preferred.
- A summary of the differences can be found in the
- :doc:`transition guide `.
-
- Finds the polynomial resulting from the multiplication of the two input
- polynomials. Each input must be either a poly1d object or a 1D sequence
- of polynomial coefficients, from highest to lowest degree.
-
- Parameters
- ----------
- a1, a2 : array_like or poly1d object
- Input polynomials.
-
- Returns
- -------
- out : ndarray or poly1d object
- The polynomial resulting from the multiplication of the inputs. If
- either inputs is a poly1d object, then the output is also a poly1d
- object. Otherwise, it is a 1D array of polynomial coefficients from
- highest to lowest degree.
-
- See Also
- --------
- poly1d : A one-dimensional polynomial class.
- poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
- convolve : Array convolution. Same output as polymul, but has parameter
- for overlap mode.
-
- Examples
- --------
- >>> np.polymul([1, 2, 3], [9, 5, 1])
- array([ 9, 23, 38, 17, 3])
-
- Using poly1d objects:
-
- >>> p1 = np.poly1d([1, 2, 3])
- >>> p2 = np.poly1d([9, 5, 1])
- >>> print(p1)
- 2
- 1 x + 2 x + 3
- >>> print(p2)
- 2
- 9 x + 5 x + 1
- >>> print(np.polymul(p1, p2))
- 4 3 2
- 9 x + 23 x + 38 x + 17 x + 3
-
- """
- truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
- a1, a2 = poly1d(a1), poly1d(a2)
- val = NX.convolve(a1, a2)
- if truepoly:
- val = poly1d(val)
- return val
-
-
-def _polydiv_dispatcher(u, v):
- return (u, v)
-
-
-@array_function_dispatch(_polydiv_dispatcher)
-def polydiv(u, v):
- """
- Returns the quotient and remainder of polynomial division.
-
- .. note::
- This forms part of the old polynomial API. Since version 1.4, the
- new polynomial API defined in `numpy.polynomial` is preferred.
- A summary of the differences can be found in the
- :doc:`transition guide `.
-
- The input arrays are the coefficients (including any coefficients
- equal to zero) of the "numerator" (dividend) and "denominator"
- (divisor) polynomials, respectively.
-
- Parameters
- ----------
- u : array_like or poly1d
- Dividend polynomial's coefficients.
-
- v : array_like or poly1d
- Divisor polynomial's coefficients.
-
- Returns
- -------
- q : ndarray
- Coefficients, including those equal to zero, of the quotient.
- r : ndarray
- Coefficients, including those equal to zero, of the remainder.
-
- See Also
- --------
- poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub
- polyval
-
- Notes
- -----
- Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
- not equal `v.ndim`. In other words, all four possible combinations -
- ``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
- ``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
-
- Examples
- --------
- .. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
-
- >>> x = np.array([3.0, 5.0, 2.0])
- >>> y = np.array([2.0, 1.0])
- >>> np.polydiv(x, y)
- (array([1.5 , 1.75]), array([0.25]))
-
- """
- truepoly = (isinstance(u, poly1d) or isinstance(v, poly1d))
- u = atleast_1d(u) + 0.0
- v = atleast_1d(v) + 0.0
- # w has the common type
- w = u[0] + v[0]
- m = len(u) - 1
- n = len(v) - 1
- scale = 1. / v[0]
- q = NX.zeros((max(m - n + 1, 1),), w.dtype)
- r = u.astype(w.dtype)
- for k in range(0, m-n+1):
- d = scale * r[k]
- q[k] = d
- r[k:k+n+1] -= d*v
- while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
- r = r[1:]
- if truepoly:
- return poly1d(q), poly1d(r)
- return q, r
-
-_poly_mat = re.compile(r"\*\*([0-9]*)")
-def _raise_power(astr, wrap=70):
- n = 0
- line1 = ''
- line2 = ''
- output = ' '
- while True:
- mat = _poly_mat.search(astr, n)
- if mat is None:
- break
- span = mat.span()
- power = mat.groups()[0]
- partstr = astr[n:span[0]]
- n = span[1]
- toadd2 = partstr + ' '*(len(power)-1)
- toadd1 = ' '*(len(partstr)-1) + power
- if ((len(line2) + len(toadd2) > wrap) or
- (len(line1) + len(toadd1) > wrap)):
- output += line1 + "\n" + line2 + "\n "
- line1 = toadd1
- line2 = toadd2
- else:
- line2 += partstr + ' '*(len(power)-1)
- line1 += ' '*(len(partstr)-1) + power
- output += line1 + "\n" + line2
- return output + astr[n:]
-
-
-@set_module('numpy')
-class poly1d:
- """
- A one-dimensional polynomial class.
-
- .. note::
- This forms part of the old polynomial API. Since version 1.4, the
- new polynomial API defined in `numpy.polynomial` is preferred.
- A summary of the differences can be found in the
- :doc:`transition guide `.
-
- A convenience class, used to encapsulate "natural" operations on
- polynomials so that said operations may take on their customary
- form in code (see Examples).
-
- Parameters
- ----------
- c_or_r : array_like
- The polynomial's coefficients, in decreasing powers, or if
- the value of the second parameter is True, the polynomial's
- roots (values where the polynomial evaluates to 0). For example,
- ``poly1d([1, 2, 3])`` returns an object that represents
- :math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
- one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
- r : bool, optional
- If True, `c_or_r` specifies the polynomial's roots; the default
- is False.
- variable : str, optional
- Changes the variable used when printing `p` from `x` to `variable`
- (see Examples).
-
- Examples
- --------
- Construct the polynomial :math:`x^2 + 2x + 3`:
-
- >>> p = np.poly1d([1, 2, 3])
- >>> print(np.poly1d(p))
- 2
- 1 x + 2 x + 3
-
- Evaluate the polynomial at :math:`x = 0.5`:
-
- >>> p(0.5)
- 4.25
-
- Find the roots:
-
- >>> p.r
- array([-1.+1.41421356j, -1.-1.41421356j])
- >>> p(p.r)
- array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) # may vary
-
- These numbers in the previous line represent (0, 0) to machine precision
-
- Show the coefficients:
-
- >>> p.c
- array([1, 2, 3])
-
- Display the order (the leading zero-coefficients are removed):
-
- >>> p.order
- 2
-
- Show the coefficient of the k-th power in the polynomial
- (which is equivalent to ``p.c[-(i+1)]``):
-
- >>> p[1]
- 2
-
- Polynomials can be added, subtracted, multiplied, and divided
- (returns quotient and remainder):
-
- >>> p * p
- poly1d([ 1, 4, 10, 12, 9])
-
- >>> (p**3 + 4) / p
- (poly1d([ 1., 4., 10., 12., 9.]), poly1d([4.]))
-
- ``asarray(p)`` gives the coefficient array, so polynomials can be
- used in all functions that accept arrays:
-
- >>> p**2 # square of polynomial
- poly1d([ 1, 4, 10, 12, 9])
-
- >>> np.square(p) # square of individual coefficients
- array([1, 4, 9])
-
- The variable used in the string representation of `p` can be modified,
- using the `variable` parameter:
-
- >>> p = np.poly1d([1,2,3], variable='z')
- >>> print(p)
- 2
- 1 z + 2 z + 3
-
- Construct a polynomial from its roots:
-
- >>> np.poly1d([1, 2], True)
- poly1d([ 1., -3., 2.])
-
- This is the same polynomial as obtained by:
-
- >>> np.poly1d([1, -1]) * np.poly1d([1, -2])
- poly1d([ 1, -3, 2])
-
- """
- __hash__ = None
-
- @property
- def coeffs(self):
- """ The polynomial coefficients """
- return self._coeffs
-
- @coeffs.setter
- def coeffs(self, value):
- # allowing this makes p.coeffs *= 2 legal
- if value is not self._coeffs:
- raise AttributeError("Cannot set attribute")
-
- @property
- def variable(self):
- """ The name of the polynomial variable """
- return self._variable
-
- # calculated attributes
- @property
- def order(self):
- """ The order or degree of the polynomial """
- return len(self._coeffs) - 1
-
- @property
- def roots(self):
- """ The roots of the polynomial, where self(x) == 0 """
- return roots(self._coeffs)
-
- # our internal _coeffs property need to be backed by __dict__['coeffs'] for
- # scipy to work correctly.
- @property
- def _coeffs(self):
- return self.__dict__['coeffs']
- @_coeffs.setter
- def _coeffs(self, coeffs):
- self.__dict__['coeffs'] = coeffs
-
- # alias attributes
- r = roots
- c = coef = coefficients = coeffs
- o = order
-
- def __init__(self, c_or_r, r=False, variable=None):
- if isinstance(c_or_r, poly1d):
- self._variable = c_or_r._variable
- self._coeffs = c_or_r._coeffs
-
- if set(c_or_r.__dict__) - set(self.__dict__):
- msg = ("In the future extra properties will not be copied "
- "across when constructing one poly1d from another")
- warnings.warn(msg, FutureWarning, stacklevel=2)
- self.__dict__.update(c_or_r.__dict__)
-
- if variable is not None:
- self._variable = variable
- return
- if r:
- c_or_r = poly(c_or_r)
- c_or_r = atleast_1d(c_or_r)
- if c_or_r.ndim > 1:
- raise ValueError("Polynomial must be 1d only.")
- c_or_r = trim_zeros(c_or_r, trim='f')
- if len(c_or_r) == 0:
- c_or_r = NX.array([0], dtype=c_or_r.dtype)
- self._coeffs = c_or_r
- if variable is None:
- variable = 'x'
- self._variable = variable
-
- def __array__(self, t=None):
- if t:
- return NX.asarray(self.coeffs, t)
- else:
- return NX.asarray(self.coeffs)
-
- def __repr__(self):
- vals = repr(self.coeffs)
- vals = vals[6:-1]
- return "poly1d(%s)" % vals
-
- def __len__(self):
- return self.order
-
- def __str__(self):
- thestr = "0"
- var = self.variable
-
- # Remove leading zeros
- coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
- N = len(coeffs)-1
-
- def fmt_float(q):
- s = '%.4g' % q
- if s.endswith('.0000'):
- s = s[:-5]
- return s
-
- for k, coeff in enumerate(coeffs):
- if not iscomplex(coeff):
- coefstr = fmt_float(real(coeff))
- elif real(coeff) == 0:
- coefstr = '%sj' % fmt_float(imag(coeff))
- else:
- coefstr = '(%s + %sj)' % (fmt_float(real(coeff)),
- fmt_float(imag(coeff)))
-
- power = (N-k)
- if power == 0:
- if coefstr != '0':
- newstr = '%s' % (coefstr,)
- else:
- if k == 0:
- newstr = '0'
- else:
- newstr = ''
- elif power == 1:
- if coefstr == '0':
- newstr = ''
- elif coefstr == 'b':
- newstr = var
- else:
- newstr = '%s %s' % (coefstr, var)
- else:
- if coefstr == '0':
- newstr = ''
- elif coefstr == 'b':
- newstr = '%s**%d' % (var, power,)
- else:
- newstr = '%s %s**%d' % (coefstr, var, power)
-
- if k > 0:
- if newstr != '':
- if newstr.startswith('-'):
- thestr = "%s - %s" % (thestr, newstr[1:])
- else:
- thestr = "%s + %s" % (thestr, newstr)
- else:
- thestr = newstr
- return _raise_power(thestr)
-
- def __call__(self, val):
- return polyval(self.coeffs, val)
-
- def __neg__(self):
- return poly1d(-self.coeffs)
-
- def __pos__(self):
- return self
-
- def __mul__(self, other):
- if isscalar(other):
- return poly1d(self.coeffs * other)
- else:
- other = poly1d(other)
- return poly1d(polymul(self.coeffs, other.coeffs))
-
- def __rmul__(self, other):
- if isscalar(other):
- return poly1d(other * self.coeffs)
- else:
- other = poly1d(other)
- return poly1d(polymul(self.coeffs, other.coeffs))
-
- def __add__(self, other):
- other = poly1d(other)
- return poly1d(polyadd(self.coeffs, other.coeffs))
-
- def __radd__(self, other):
- other = poly1d(other)
- return poly1d(polyadd(self.coeffs, other.coeffs))
-
- def __pow__(self, val):
- if not isscalar(val) or int(val) != val or val < 0:
- raise ValueError("Power to non-negative integers only.")
- res = [1]
- for _ in range(val):
- res = polymul(self.coeffs, res)
- return poly1d(res)
-
- def __sub__(self, other):
- other = poly1d(other)
- return poly1d(polysub(self.coeffs, other.coeffs))
-
- def __rsub__(self, other):
- other = poly1d(other)
- return poly1d(polysub(other.coeffs, self.coeffs))
-
- def __div__(self, other):
- if isscalar(other):
- return poly1d(self.coeffs/other)
- else:
- other = poly1d(other)
- return polydiv(self, other)
-
- __truediv__ = __div__
-
- def __rdiv__(self, other):
- if isscalar(other):
- return poly1d(other/self.coeffs)
- else:
- other = poly1d(other)
- return polydiv(other, self)
-
- __rtruediv__ = __rdiv__
-
- def __eq__(self, other):
- if not isinstance(other, poly1d):
- return NotImplemented
- if self.coeffs.shape != other.coeffs.shape:
- return False
- return (self.coeffs == other.coeffs).all()
-
- def __ne__(self, other):
- if not isinstance(other, poly1d):
- return NotImplemented
- return not self.__eq__(other)
-
-
- def __getitem__(self, val):
- ind = self.order - val
- if val > self.order:
- return self.coeffs.dtype.type(0)
- if val < 0:
- return self.coeffs.dtype.type(0)
- return self.coeffs[ind]
-
- def __setitem__(self, key, val):
- ind = self.order - key
- if key < 0:
- raise ValueError("Does not support negative powers.")
- if key > self.order:
- zr = NX.zeros(key-self.order, self.coeffs.dtype)
- self._coeffs = NX.concatenate((zr, self.coeffs))
- ind = 0
- self._coeffs[ind] = val
- return
-
- def __iter__(self):
- return iter(self.coeffs)
-
- def integ(self, m=1, k=0):
- """
- Return an antiderivative (indefinite integral) of this polynomial.
-
- Refer to `polyint` for full documentation.
-
- See Also
- --------
- polyint : equivalent function
-
- """
- return poly1d(polyint(self.coeffs, m=m, k=k))
-
- def deriv(self, m=1):
- """
- Return a derivative of this polynomial.
-
- Refer to `polyder` for full documentation.
-
- See Also
- --------
- polyder : equivalent function
-
- """
- return poly1d(polyder(self.coeffs, m=m))
-
-# Stuff to do on module import
-
-warnings.simplefilter('always', RankWarning)
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/tests/test_public_api.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/tests/test_public_api.py
deleted file mode 100644
index 54bf3dacf9722004d51cb13d8b5dd7c1105a655a..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/tests/test_public_api.py
+++ /dev/null
@@ -1,551 +0,0 @@
-import sys
-import sysconfig
-import subprocess
-import pkgutil
-import types
-import importlib
-import warnings
-
-import numpy as np
-import numpy
-import pytest
-from numpy.testing import IS_WASM
-
-try:
- import ctypes
-except ImportError:
- ctypes = None
-
-
-def check_dir(module, module_name=None):
- """Returns a mapping of all objects with the wrong __module__ attribute."""
- if module_name is None:
- module_name = module.__name__
- results = {}
- for name in dir(module):
- item = getattr(module, name)
- if (hasattr(item, '__module__') and hasattr(item, '__name__')
- and item.__module__ != module_name):
- results[name] = item.__module__ + '.' + item.__name__
- return results
-
-
-def test_numpy_namespace():
- # None of these objects are publicly documented to be part of the main
- # NumPy namespace (some are useful though, others need to be cleaned up)
- undocumented = {
- '_add_newdoc_ufunc': 'numpy.core._multiarray_umath._add_newdoc_ufunc',
- 'add_docstring': 'numpy.core._multiarray_umath.add_docstring',
- 'add_newdoc': 'numpy.core.function_base.add_newdoc',
- 'add_newdoc_ufunc': 'numpy.core._multiarray_umath._add_newdoc_ufunc',
- 'byte_bounds': 'numpy.lib.utils.byte_bounds',
- 'compare_chararrays': 'numpy.core._multiarray_umath.compare_chararrays',
- 'deprecate': 'numpy.lib.utils.deprecate',
- 'deprecate_with_doc': 'numpy.lib.utils.deprecate_with_doc',
- 'disp': 'numpy.lib.function_base.disp',
- 'fastCopyAndTranspose': 'numpy.core._multiarray_umath.fastCopyAndTranspose',
- 'get_array_wrap': 'numpy.lib.shape_base.get_array_wrap',
- 'get_include': 'numpy.lib.utils.get_include',
- 'recfromcsv': 'numpy.lib.npyio.recfromcsv',
- 'recfromtxt': 'numpy.lib.npyio.recfromtxt',
- 'safe_eval': 'numpy.lib.utils.safe_eval',
- 'set_string_function': 'numpy.core.arrayprint.set_string_function',
- 'show_config': 'numpy.__config__.show',
- 'show_runtime': 'numpy.lib.utils.show_runtime',
- 'who': 'numpy.lib.utils.who',
- }
- # We override dir to not show these members
- allowlist = undocumented
- bad_results = check_dir(np)
- # pytest gives better error messages with the builtin assert than with
- # assert_equal
- assert bad_results == allowlist
-
-
-@pytest.mark.skipif(IS_WASM, reason="can't start subprocess")
-@pytest.mark.parametrize('name', ['testing'])
-def test_import_lazy_import(name):
- """Make sure we can actually use the modules we lazy load.
-
- While not exported as part of the public API, it was accessible. With the
- use of __getattr__ and __dir__, this isn't always true It can happen that
- an infinite recursion may happen.
-
- This is the only way I found that would force the failure to appear on the
- badly implemented code.
-
- We also test for the presence of the lazily imported modules in dir
-
- """
- exe = (sys.executable, '-c', "import numpy; numpy." + name)
- result = subprocess.check_output(exe)
- assert not result
-
- # Make sure they are still in the __dir__
- assert name in dir(np)
-
-
-def test_dir_testing():
- """Assert that output of dir has only one "testing/tester"
- attribute without duplicate"""
- assert len(dir(np)) == len(set(dir(np)))
-
-
-def test_numpy_linalg():
- bad_results = check_dir(np.linalg)
- assert bad_results == {}
-
-
-def test_numpy_fft():
- bad_results = check_dir(np.fft)
- assert bad_results == {}
-
-
-@pytest.mark.skipif(ctypes is None,
- reason="ctypes not available in this python")
-def test_NPY_NO_EXPORT():
- cdll = ctypes.CDLL(np.core._multiarray_tests.__file__)
- # Make sure an arbitrary NPY_NO_EXPORT function is actually hidden
- f = getattr(cdll, 'test_not_exported', None)
- assert f is None, ("'test_not_exported' is mistakenly exported, "
- "NPY_NO_EXPORT does not work")
-
-
-# Historically NumPy has not used leading underscores for private submodules
-# much. This has resulted in lots of things that look like public modules
-# (i.e. things that can be imported as `import numpy.somesubmodule.somefile`),
-# but were never intended to be public. The PUBLIC_MODULES list contains
-# modules that are either public because they were meant to be, or because they
-# contain public functions/objects that aren't present in any other namespace
-# for whatever reason and therefore should be treated as public.
-#
-# The PRIVATE_BUT_PRESENT_MODULES list contains modules that look public (lack
-# of underscores) but should not be used. For many of those modules the
-# current status is fine. For others it may make sense to work on making them
-# private, to clean up our public API and avoid confusion.
-PUBLIC_MODULES = ['numpy.' + s for s in [
- "array_api",
- "array_api.linalg",
- "ctypeslib",
- "doc",
- "doc.constants",
- "doc.ufuncs",
- "dtypes",
- "exceptions",
- "f2py",
- "fft",
- "lib",
- "lib.format", # was this meant to be public?
- "lib.mixins",
- "lib.recfunctions",
- "lib.scimath",
- "lib.stride_tricks",
- "linalg",
- "ma",
- "ma.extras",
- "ma.mrecords",
- "matlib",
- "polynomial",
- "polynomial.chebyshev",
- "polynomial.hermite",
- "polynomial.hermite_e",
- "polynomial.laguerre",
- "polynomial.legendre",
- "polynomial.polynomial",
- "random",
- "testing",
- "testing.overrides",
- "typing",
- "typing.mypy_plugin",
- "version" # Should be removed for NumPy 2.0
-]]
-if sys.version_info < (3, 12):
- PUBLIC_MODULES += [
- 'numpy.' + s for s in [
- "distutils",
- "distutils.cpuinfo",
- "distutils.exec_command",
- "distutils.misc_util",
- "distutils.log",
- "distutils.system_info",
- ]
- ]
-
-
-
-PUBLIC_ALIASED_MODULES = [
- "numpy.char",
- "numpy.emath",
- "numpy.rec",
-]
-
-
-PRIVATE_BUT_PRESENT_MODULES = ['numpy.' + s for s in [
- "compat",
- "compat.py3k",
- "conftest",
- "core",
- "core.arrayprint",
- "core.defchararray",
- "core.einsumfunc",
- "core.fromnumeric",
- "core.function_base",
- "core.getlimits",
- "core.memmap",
- "core.multiarray",
- "core.numeric",
- "core.numerictypes",
- "core.overrides",
- "core.records",
- "core.shape_base",
- "core.umath",
- "f2py.auxfuncs",
- "f2py.capi_maps",
- "f2py.cb_rules",
- "f2py.cfuncs",
- "f2py.common_rules",
- "f2py.crackfortran",
- "f2py.diagnose",
- "f2py.f2py2e",
- "f2py.f90mod_rules",
- "f2py.func2subr",
- "f2py.rules",
- "f2py.symbolic",
- "f2py.use_rules",
- "fft.helper",
- "lib.arraypad",
- "lib.arraysetops",
- "lib.arrayterator",
- "lib.function_base",
- "lib.histograms",
- "lib.index_tricks",
- "lib.nanfunctions",
- "lib.npyio",
- "lib.polynomial",
- "lib.shape_base",
- "lib.twodim_base",
- "lib.type_check",
- "lib.ufunclike",
- "lib.user_array", # note: not in np.lib, but probably should just be deleted
- "lib.utils",
- "linalg.lapack_lite",
- "linalg.linalg",
- "ma.core",
- "ma.testutils",
- "ma.timer_comparison",
- "matrixlib",
- "matrixlib.defmatrix",
- "polynomial.polyutils",
- "random.mtrand",
- "random.bit_generator",
- "testing.print_coercion_tables",
-]]
-if sys.version_info < (3, 12):
- PRIVATE_BUT_PRESENT_MODULES += [
- 'numpy.' + s for s in [
- "distutils.armccompiler",
- "distutils.fujitsuccompiler",
- "distutils.ccompiler",
- 'distutils.ccompiler_opt',
- "distutils.command",
- "distutils.command.autodist",
- "distutils.command.bdist_rpm",
- "distutils.command.build",
- "distutils.command.build_clib",
- "distutils.command.build_ext",
- "distutils.command.build_py",
- "distutils.command.build_scripts",
- "distutils.command.build_src",
- "distutils.command.config",
- "distutils.command.config_compiler",
- "distutils.command.develop",
- "distutils.command.egg_info",
- "distutils.command.install",
- "distutils.command.install_clib",
- "distutils.command.install_data",
- "distutils.command.install_headers",
- "distutils.command.sdist",
- "distutils.conv_template",
- "distutils.core",
- "distutils.extension",
- "distutils.fcompiler",
- "distutils.fcompiler.absoft",
- "distutils.fcompiler.arm",
- "distutils.fcompiler.compaq",
- "distutils.fcompiler.environment",
- "distutils.fcompiler.g95",
- "distutils.fcompiler.gnu",
- "distutils.fcompiler.hpux",
- "distutils.fcompiler.ibm",
- "distutils.fcompiler.intel",
- "distutils.fcompiler.lahey",
- "distutils.fcompiler.mips",
- "distutils.fcompiler.nag",
- "distutils.fcompiler.none",
- "distutils.fcompiler.pathf95",
- "distutils.fcompiler.pg",
- "distutils.fcompiler.nv",
- "distutils.fcompiler.sun",
- "distutils.fcompiler.vast",
- "distutils.fcompiler.fujitsu",
- "distutils.from_template",
- "distutils.intelccompiler",
- "distutils.lib2def",
- "distutils.line_endings",
- "distutils.mingw32ccompiler",
- "distutils.msvccompiler",
- "distutils.npy_pkg_config",
- "distutils.numpy_distribution",
- "distutils.pathccompiler",
- "distutils.unixccompiler",
- ]
- ]
-
-
-def is_unexpected(name):
- """Check if this needs to be considered."""
- if '._' in name or '.tests' in name or '.setup' in name:
- return False
-
- if name in PUBLIC_MODULES:
- return False
-
- if name in PUBLIC_ALIASED_MODULES:
- return False
-
- if name in PRIVATE_BUT_PRESENT_MODULES:
- return False
-
- return True
-
-
-# These are present in a directory with an __init__.py but cannot be imported
-# code_generators/ isn't installed, but present for an inplace build
-SKIP_LIST = [
- "numpy.core.code_generators",
- "numpy.core.code_generators.genapi",
- "numpy.core.code_generators.generate_umath",
- "numpy.core.code_generators.ufunc_docstrings",
- "numpy.core.code_generators.generate_numpy_api",
- "numpy.core.code_generators.generate_ufunc_api",
- "numpy.core.code_generators.numpy_api",
- "numpy.core.code_generators.generate_umath_doc",
- "numpy.core.code_generators.verify_c_api_version",
- "numpy.core.cversions",
- "numpy.core.generate_numpy_api",
- "numpy.core.umath_tests",
-]
-if sys.version_info < (3, 12):
- SKIP_LIST += ["numpy.distutils.msvc9compiler"]
-
-
-# suppressing warnings from deprecated modules
-@pytest.mark.filterwarnings("ignore:.*np.compat.*:DeprecationWarning")
-def test_all_modules_are_expected():
- """
- Test that we don't add anything that looks like a new public module by
- accident. Check is based on filenames.
- """
-
- modnames = []
- for _, modname, ispkg in pkgutil.walk_packages(path=np.__path__,
- prefix=np.__name__ + '.',
- onerror=None):
- if is_unexpected(modname) and modname not in SKIP_LIST:
- # We have a name that is new. If that's on purpose, add it to
- # PUBLIC_MODULES. We don't expect to have to add anything to
- # PRIVATE_BUT_PRESENT_MODULES. Use an underscore in the name!
- modnames.append(modname)
-
- if modnames:
- raise AssertionError(f'Found unexpected modules: {modnames}')
-
-
-# Stuff that clearly shouldn't be in the API and is detected by the next test
-# below
-SKIP_LIST_2 = [
- 'numpy.math',
- 'numpy.doc.constants.re',
- 'numpy.doc.constants.textwrap',
- 'numpy.lib.emath',
- 'numpy.lib.math',
- 'numpy.matlib.char',
- 'numpy.matlib.rec',
- 'numpy.matlib.emath',
- 'numpy.matlib.exceptions',
- 'numpy.matlib.math',
- 'numpy.matlib.linalg',
- 'numpy.matlib.fft',
- 'numpy.matlib.random',
- 'numpy.matlib.ctypeslib',
- 'numpy.matlib.ma',
-]
-if sys.version_info < (3, 12):
- SKIP_LIST_2 += [
- 'numpy.distutils.log.sys',
- 'numpy.distutils.log.logging',
- 'numpy.distutils.log.warnings',
- ]
-
-
-def test_all_modules_are_expected_2():
- """
- Method checking all objects. The pkgutil-based method in
- `test_all_modules_are_expected` does not catch imports into a namespace,
- only filenames. So this test is more thorough, and checks this like:
-
- import .lib.scimath as emath
-
- To check if something in a module is (effectively) public, one can check if
- there's anything in that namespace that's a public function/object but is
- not exposed in a higher-level namespace. For example for a `numpy.lib`
- submodule::
-
- mod = np.lib.mixins
- for obj in mod.__all__:
- if obj in np.__all__:
- continue
- elif obj in np.lib.__all__:
- continue
-
- else:
- print(obj)
-
- """
-
- def find_unexpected_members(mod_name):
- members = []
- module = importlib.import_module(mod_name)
- if hasattr(module, '__all__'):
- objnames = module.__all__
- else:
- objnames = dir(module)
-
- for objname in objnames:
- if not objname.startswith('_'):
- fullobjname = mod_name + '.' + objname
- if isinstance(getattr(module, objname), types.ModuleType):
- if is_unexpected(fullobjname):
- if fullobjname not in SKIP_LIST_2:
- members.append(fullobjname)
-
- return members
-
- unexpected_members = find_unexpected_members("numpy")
- for modname in PUBLIC_MODULES:
- unexpected_members.extend(find_unexpected_members(modname))
-
- if unexpected_members:
- raise AssertionError("Found unexpected object(s) that look like "
- "modules: {}".format(unexpected_members))
-
-
-def test_api_importable():
- """
- Check that all submodules listed higher up in this file can be imported
-
- Note that if a PRIVATE_BUT_PRESENT_MODULES entry goes missing, it may
- simply need to be removed from the list (deprecation may or may not be
- needed - apply common sense).
- """
- def check_importable(module_name):
- try:
- importlib.import_module(module_name)
- except (ImportError, AttributeError):
- return False
-
- return True
-
- module_names = []
- for module_name in PUBLIC_MODULES:
- if not check_importable(module_name):
- module_names.append(module_name)
-
- if module_names:
- raise AssertionError("Modules in the public API that cannot be "
- "imported: {}".format(module_names))
-
- for module_name in PUBLIC_ALIASED_MODULES:
- try:
- eval(module_name)
- except AttributeError:
- module_names.append(module_name)
-
- if module_names:
- raise AssertionError("Modules in the public API that were not "
- "found: {}".format(module_names))
-
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', category=DeprecationWarning)
- warnings.filterwarnings('always', category=ImportWarning)
- for module_name in PRIVATE_BUT_PRESENT_MODULES:
- if not check_importable(module_name):
- module_names.append(module_name)
-
- if module_names:
- raise AssertionError("Modules that are not really public but looked "
- "public and can not be imported: "
- "{}".format(module_names))
-
-
-@pytest.mark.xfail(
- sysconfig.get_config_var("Py_DEBUG") not in (None, 0, "0"),
- reason=(
- "NumPy possibly built with `USE_DEBUG=True ./tools/travis-test.sh`, "
- "which does not expose the `array_api` entry point. "
- "See https://github.com/numpy/numpy/pull/19800"
- ),
-)
-def test_array_api_entry_point():
- """
- Entry point for Array API implementation can be found with importlib and
- returns the numpy.array_api namespace.
- """
- # For a development install that did not go through meson-python,
- # the entrypoint will not have been installed. So ensure this test fails
- # only if numpy is inside site-packages.
- numpy_in_sitepackages = sysconfig.get_path('platlib') in np.__file__
-
- eps = importlib.metadata.entry_points()
- try:
- xp_eps = eps.select(group="array_api")
- except AttributeError:
- # The select interface for entry_points was introduced in py3.10,
- # deprecating its dict interface. We fallback to dict keys for finding
- # Array API entry points so that running this test in <=3.9 will
- # still work - see https://github.com/numpy/numpy/pull/19800.
- xp_eps = eps.get("array_api", [])
- if len(xp_eps) == 0:
- if numpy_in_sitepackages:
- msg = "No entry points for 'array_api' found"
- raise AssertionError(msg) from None
- return
-
- try:
- ep = next(ep for ep in xp_eps if ep.name == "numpy")
- except StopIteration:
- if numpy_in_sitepackages:
- msg = "'numpy' not in array_api entry points"
- raise AssertionError(msg) from None
- return
-
- xp = ep.load()
- msg = (
- f"numpy entry point value '{ep.value}' "
- "does not point to our Array API implementation"
- )
- assert xp is numpy.array_api, msg
-
-
-@pytest.mark.parametrize("name", [
- 'ModuleDeprecationWarning', 'VisibleDeprecationWarning',
- 'ComplexWarning', 'TooHardError', 'AxisError'])
-def test_moved_exceptions(name):
- # These were moved to the exceptions namespace, but currently still
- # available
- assert name in np.__all__
- assert name not in np.__dir__()
- # Fetching works, but __module__ is set correctly:
- assert getattr(np, name).__module__ == "numpy.exceptions"
- assert name in np.exceptions.__all__
- getattr(np.exceptions, name)
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/openai/api_resources/chat_completion.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/openai/api_resources/chat_completion.py
deleted file mode 100644
index 7e55f9e38fcf791a5655dc810f0eca6748cecb5a..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/openai/api_resources/chat_completion.py
+++ /dev/null
@@ -1,50 +0,0 @@
-import time
-
-from openai import util
-from openai.api_resources.abstract.engine_api_resource import EngineAPIResource
-from openai.error import TryAgain
-
-
-class ChatCompletion(EngineAPIResource):
- engine_required = False
- OBJECT_NAME = "chat.completions"
-
- @classmethod
- def create(cls, *args, **kwargs):
- """
- Creates a new chat completion for the provided messages and parameters.
-
- See https://platform.openai.com/docs/api-reference/chat/create
- for a list of valid parameters.
- """
- start = time.time()
- timeout = kwargs.pop("timeout", None)
-
- while True:
- try:
- return super().create(*args, **kwargs)
- except TryAgain as e:
- if timeout is not None and time.time() > start + timeout:
- raise
-
- util.log_info("Waiting for model to warm up", error=e)
-
- @classmethod
- async def acreate(cls, *args, **kwargs):
- """
- Creates a new chat completion for the provided messages and parameters.
-
- See https://platform.openai.com/docs/api-reference/chat/create
- for a list of valid parameters.
- """
- start = time.time()
- timeout = kwargs.pop("timeout", None)
-
- while True:
- try:
- return await super().acreate(*args, **kwargs)
- except TryAgain as e:
- if timeout is not None and time.time() > start + timeout:
- raise
-
- util.log_info("Waiting for model to warm up", error=e)
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_timestamp.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_timestamp.py
deleted file mode 100644
index 2f73e3d58b5167959f504bb2f486a992e17ec12c..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/frame/methods/test_to_timestamp.py
+++ /dev/null
@@ -1,154 +0,0 @@
-from datetime import timedelta
-
-import numpy as np
-import pytest
-
-from pandas import (
- DataFrame,
- DatetimeIndex,
- PeriodIndex,
- Series,
- Timedelta,
- date_range,
- period_range,
- to_datetime,
-)
-import pandas._testing as tm
-
-
-def _get_with_delta(delta, freq="A-DEC"):
- return date_range(
- to_datetime("1/1/2001") + delta,
- to_datetime("12/31/2009") + delta,
- freq=freq,
- )
-
-
-class TestToTimestamp:
- def test_to_timestamp(self, frame_or_series):
- K = 5
- index = period_range(freq="A", start="1/1/2001", end="12/1/2009")
- obj = DataFrame(
- np.random.default_rng(2).standard_normal((len(index), K)),
- index=index,
- columns=["A", "B", "C", "D", "E"],
- )
- obj["mix"] = "a"
- obj = tm.get_obj(obj, frame_or_series)
-
- exp_index = date_range("1/1/2001", end="12/31/2009", freq="A-DEC")
- exp_index = exp_index + Timedelta(1, "D") - Timedelta(1, "ns")
- result = obj.to_timestamp("D", "end")
- tm.assert_index_equal(result.index, exp_index)
- tm.assert_numpy_array_equal(result.values, obj.values)
- if frame_or_series is Series:
- assert result.name == "A"
-
- exp_index = date_range("1/1/2001", end="1/1/2009", freq="AS-JAN")
- result = obj.to_timestamp("D", "start")
- tm.assert_index_equal(result.index, exp_index)
-
- result = obj.to_timestamp(how="start")
- tm.assert_index_equal(result.index, exp_index)
-
- delta = timedelta(hours=23)
- result = obj.to_timestamp("H", "end")
- exp_index = _get_with_delta(delta)
- exp_index = exp_index + Timedelta(1, "h") - Timedelta(1, "ns")
- tm.assert_index_equal(result.index, exp_index)
-
- delta = timedelta(hours=23, minutes=59)
- result = obj.to_timestamp("T", "end")
- exp_index = _get_with_delta(delta)
- exp_index = exp_index + Timedelta(1, "m") - Timedelta(1, "ns")
- tm.assert_index_equal(result.index, exp_index)
-
- result = obj.to_timestamp("S", "end")
- delta = timedelta(hours=23, minutes=59, seconds=59)
- exp_index = _get_with_delta(delta)
- exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns")
- tm.assert_index_equal(result.index, exp_index)
-
- def test_to_timestamp_columns(self):
- K = 5
- index = period_range(freq="A", start="1/1/2001", end="12/1/2009")
- df = DataFrame(
- np.random.default_rng(2).standard_normal((len(index), K)),
- index=index,
- columns=["A", "B", "C", "D", "E"],
- )
- df["mix"] = "a"
-
- # columns
- df = df.T
-
- exp_index = date_range("1/1/2001", end="12/31/2009", freq="A-DEC")
- exp_index = exp_index + Timedelta(1, "D") - Timedelta(1, "ns")
- result = df.to_timestamp("D", "end", axis=1)
- tm.assert_index_equal(result.columns, exp_index)
- tm.assert_numpy_array_equal(result.values, df.values)
-
- exp_index = date_range("1/1/2001", end="1/1/2009", freq="AS-JAN")
- result = df.to_timestamp("D", "start", axis=1)
- tm.assert_index_equal(result.columns, exp_index)
-
- delta = timedelta(hours=23)
- result = df.to_timestamp("H", "end", axis=1)
- exp_index = _get_with_delta(delta)
- exp_index = exp_index + Timedelta(1, "h") - Timedelta(1, "ns")
- tm.assert_index_equal(result.columns, exp_index)
-
- delta = timedelta(hours=23, minutes=59)
- result = df.to_timestamp("T", "end", axis=1)
- exp_index = _get_with_delta(delta)
- exp_index = exp_index + Timedelta(1, "m") - Timedelta(1, "ns")
- tm.assert_index_equal(result.columns, exp_index)
-
- result = df.to_timestamp("S", "end", axis=1)
- delta = timedelta(hours=23, minutes=59, seconds=59)
- exp_index = _get_with_delta(delta)
- exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns")
- tm.assert_index_equal(result.columns, exp_index)
-
- result1 = df.to_timestamp("5t", axis=1)
- result2 = df.to_timestamp("t", axis=1)
- expected = date_range("2001-01-01", "2009-01-01", freq="AS")
- assert isinstance(result1.columns, DatetimeIndex)
- assert isinstance(result2.columns, DatetimeIndex)
- tm.assert_numpy_array_equal(result1.columns.asi8, expected.asi8)
- tm.assert_numpy_array_equal(result2.columns.asi8, expected.asi8)
- # PeriodIndex.to_timestamp always use 'infer'
- assert result1.columns.freqstr == "AS-JAN"
- assert result2.columns.freqstr == "AS-JAN"
-
- def test_to_timestamp_invalid_axis(self):
- index = period_range(freq="A", start="1/1/2001", end="12/1/2009")
- obj = DataFrame(
- np.random.default_rng(2).standard_normal((len(index), 5)), index=index
- )
-
- # invalid axis
- with pytest.raises(ValueError, match="axis"):
- obj.to_timestamp(axis=2)
-
- def test_to_timestamp_hourly(self, frame_or_series):
- index = period_range(freq="H", start="1/1/2001", end="1/2/2001")
- obj = Series(1, index=index, name="foo")
- if frame_or_series is not Series:
- obj = obj.to_frame()
-
- exp_index = date_range("1/1/2001 00:59:59", end="1/2/2001 00:59:59", freq="H")
- result = obj.to_timestamp(how="end")
- exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns")
- tm.assert_index_equal(result.index, exp_index)
- if frame_or_series is Series:
- assert result.name == "foo"
-
- def test_to_timestamp_raises(self, index, frame_or_series):
- # GH#33327
- obj = frame_or_series(index=index, dtype=object)
-
- if not isinstance(index, PeriodIndex):
- msg = f"unsupported Type {type(index).__name__}"
- with pytest.raises(TypeError, match=msg):
- obj.to_timestamp()
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_insert.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_insert.py
deleted file mode 100644
index cedf8cd54b81e8e622f8ef99b7750d8c31ca38b0..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/datetimes/methods/test_insert.py
+++ /dev/null
@@ -1,256 +0,0 @@
-from datetime import datetime
-
-import numpy as np
-import pytest
-import pytz
-
-from pandas import (
- NA,
- DatetimeIndex,
- Index,
- NaT,
- Timestamp,
- date_range,
-)
-import pandas._testing as tm
-
-
-class TestInsert:
- @pytest.mark.parametrize("null", [None, np.nan, np.datetime64("NaT"), NaT, NA])
- @pytest.mark.parametrize("tz", [None, "UTC", "US/Eastern"])
- def test_insert_nat(self, tz, null):
- # GH#16537, GH#18295 (test missing)
-
- idx = DatetimeIndex(["2017-01-01"], tz=tz)
- expected = DatetimeIndex(["NaT", "2017-01-01"], tz=tz)
- if tz is not None and isinstance(null, np.datetime64):
- expected = Index([null, idx[0]], dtype=object)
-
- res = idx.insert(0, null)
- tm.assert_index_equal(res, expected)
-
- @pytest.mark.parametrize("tz", [None, "UTC", "US/Eastern"])
- def test_insert_invalid_na(self, tz):
- idx = DatetimeIndex(["2017-01-01"], tz=tz)
-
- item = np.timedelta64("NaT")
- result = idx.insert(0, item)
- expected = Index([item] + list(idx), dtype=object)
- tm.assert_index_equal(result, expected)
-
- def test_insert_empty_preserves_freq(self, tz_naive_fixture):
- # GH#33573
- tz = tz_naive_fixture
- dti = DatetimeIndex([], tz=tz, freq="D")
- item = Timestamp("2017-04-05").tz_localize(tz)
-
- result = dti.insert(0, item)
- assert result.freq == dti.freq
-
- # But not when we insert an item that doesn't conform to freq
- dti = DatetimeIndex([], tz=tz, freq="W-THU")
- result = dti.insert(0, item)
- assert result.freq is None
-
- def test_insert(self):
- idx = DatetimeIndex(["2000-01-04", "2000-01-01", "2000-01-02"], name="idx")
-
- result = idx.insert(2, datetime(2000, 1, 5))
- exp = DatetimeIndex(
- ["2000-01-04", "2000-01-01", "2000-01-05", "2000-01-02"], name="idx"
- )
- tm.assert_index_equal(result, exp)
-
- # insertion of non-datetime should coerce to object index
- result = idx.insert(1, "inserted")
- expected = Index(
- [
- datetime(2000, 1, 4),
- "inserted",
- datetime(2000, 1, 1),
- datetime(2000, 1, 2),
- ],
- name="idx",
- )
- assert not isinstance(result, DatetimeIndex)
- tm.assert_index_equal(result, expected)
- assert result.name == expected.name
-
- idx = date_range("1/1/2000", periods=3, freq="M", name="idx")
-
- # preserve freq
- expected_0 = DatetimeIndex(
- ["1999-12-31", "2000-01-31", "2000-02-29", "2000-03-31"],
- name="idx",
- freq="M",
- )
- expected_3 = DatetimeIndex(
- ["2000-01-31", "2000-02-29", "2000-03-31", "2000-04-30"],
- name="idx",
- freq="M",
- )
-
- # reset freq to None
- expected_1_nofreq = DatetimeIndex(
- ["2000-01-31", "2000-01-31", "2000-02-29", "2000-03-31"],
- name="idx",
- freq=None,
- )
- expected_3_nofreq = DatetimeIndex(
- ["2000-01-31", "2000-02-29", "2000-03-31", "2000-01-02"],
- name="idx",
- freq=None,
- )
-
- cases = [
- (0, datetime(1999, 12, 31), expected_0),
- (-3, datetime(1999, 12, 31), expected_0),
- (3, datetime(2000, 4, 30), expected_3),
- (1, datetime(2000, 1, 31), expected_1_nofreq),
- (3, datetime(2000, 1, 2), expected_3_nofreq),
- ]
-
- for n, d, expected in cases:
- result = idx.insert(n, d)
- tm.assert_index_equal(result, expected)
- assert result.name == expected.name
- assert result.freq == expected.freq
-
- # reset freq to None
- result = idx.insert(3, datetime(2000, 1, 2))
- expected = DatetimeIndex(
- ["2000-01-31", "2000-02-29", "2000-03-31", "2000-01-02"],
- name="idx",
- freq=None,
- )
- tm.assert_index_equal(result, expected)
- assert result.name == expected.name
- assert result.freq is None
-
- for tz in ["US/Pacific", "Asia/Singapore"]:
- idx = date_range("1/1/2000 09:00", periods=6, freq="H", tz=tz, name="idx")
- # preserve freq
- expected = date_range(
- "1/1/2000 09:00", periods=7, freq="H", tz=tz, name="idx"
- )
- for d in [
- Timestamp("2000-01-01 15:00", tz=tz),
- pytz.timezone(tz).localize(datetime(2000, 1, 1, 15)),
- ]:
- result = idx.insert(6, d)
- tm.assert_index_equal(result, expected)
- assert result.name == expected.name
- assert result.freq == expected.freq
- assert result.tz == expected.tz
-
- expected = DatetimeIndex(
- [
- "2000-01-01 09:00",
- "2000-01-01 10:00",
- "2000-01-01 11:00",
- "2000-01-01 12:00",
- "2000-01-01 13:00",
- "2000-01-01 14:00",
- "2000-01-01 10:00",
- ],
- name="idx",
- tz=tz,
- freq=None,
- )
- # reset freq to None
- for d in [
- Timestamp("2000-01-01 10:00", tz=tz),
- pytz.timezone(tz).localize(datetime(2000, 1, 1, 10)),
- ]:
- result = idx.insert(6, d)
- tm.assert_index_equal(result, expected)
- assert result.name == expected.name
- assert result.tz == expected.tz
- assert result.freq is None
-
- # TODO: also changes DataFrame.__setitem__ with expansion
- def test_insert_mismatched_tzawareness(self):
- # see GH#7299
- idx = date_range("1/1/2000", periods=3, freq="D", tz="Asia/Tokyo", name="idx")
-
- # mismatched tz-awareness
- item = Timestamp("2000-01-04")
- result = idx.insert(3, item)
- expected = Index(
- list(idx[:3]) + [item] + list(idx[3:]), dtype=object, name="idx"
- )
- tm.assert_index_equal(result, expected)
-
- # mismatched tz-awareness
- item = datetime(2000, 1, 4)
- result = idx.insert(3, item)
- expected = Index(
- list(idx[:3]) + [item] + list(idx[3:]), dtype=object, name="idx"
- )
- tm.assert_index_equal(result, expected)
-
- # TODO: also changes DataFrame.__setitem__ with expansion
- def test_insert_mismatched_tz(self):
- # see GH#7299
- # pre-2.0 with mismatched tzs we would cast to object
- idx = date_range("1/1/2000", periods=3, freq="D", tz="Asia/Tokyo", name="idx")
-
- # mismatched tz -> cast to object (could reasonably cast to same tz or UTC)
- item = Timestamp("2000-01-04", tz="US/Eastern")
- result = idx.insert(3, item)
- expected = Index(
- list(idx[:3]) + [item.tz_convert(idx.tz)] + list(idx[3:]),
- name="idx",
- )
- assert expected.dtype == idx.dtype
- tm.assert_index_equal(result, expected)
-
- item = datetime(2000, 1, 4, tzinfo=pytz.timezone("US/Eastern"))
- result = idx.insert(3, item)
- expected = Index(
- list(idx[:3]) + [item.astimezone(idx.tzinfo)] + list(idx[3:]),
- name="idx",
- )
- assert expected.dtype == idx.dtype
- tm.assert_index_equal(result, expected)
-
- @pytest.mark.parametrize(
- "item", [0, np.int64(0), np.float64(0), np.array(0), np.timedelta64(456)]
- )
- def test_insert_mismatched_types_raises(self, tz_aware_fixture, item):
- # GH#33703 dont cast these to dt64
- tz = tz_aware_fixture
- dti = date_range("2019-11-04", periods=9, freq="-1D", name=9, tz=tz)
-
- result = dti.insert(1, item)
-
- if isinstance(item, np.ndarray):
- assert item.item() == 0
- expected = Index([dti[0], 0] + list(dti[1:]), dtype=object, name=9)
- else:
- expected = Index([dti[0], item] + list(dti[1:]), dtype=object, name=9)
-
- tm.assert_index_equal(result, expected)
-
- def test_insert_castable_str(self, tz_aware_fixture):
- # GH#33703
- tz = tz_aware_fixture
- dti = date_range("2019-11-04", periods=3, freq="-1D", name=9, tz=tz)
-
- value = "2019-11-05"
- result = dti.insert(0, value)
-
- ts = Timestamp(value).tz_localize(tz)
- expected = DatetimeIndex([ts] + list(dti), dtype=dti.dtype, name=9)
- tm.assert_index_equal(result, expected)
-
- def test_insert_non_castable_str(self, tz_aware_fixture):
- # GH#33703
- tz = tz_aware_fixture
- dti = date_range("2019-11-04", periods=3, freq="-1D", name=9, tz=tz)
-
- value = "foo"
- result = dti.insert(0, value)
-
- expected = Index(["foo"] + list(dti), dtype=object, name=9)
- tm.assert_index_equal(result, expected)
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/typer/_compat_utils.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/typer/_compat_utils.py
deleted file mode 100644
index 637e8ceb0d6f808fe47d94b9771768a196aedc18..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/typer/_compat_utils.py
+++ /dev/null
@@ -1,5 +0,0 @@
-import click
-
-
-def _get_click_major() -> int:
- return int(click.__version__.split(".")[0])
diff --git a/spaces/pseudolab/schoolrecord_gen/app.py b/spaces/pseudolab/schoolrecord_gen/app.py
deleted file mode 100644
index 51f2d14e4f362e8892b73564e12492e9fb9f023c..0000000000000000000000000000000000000000
--- a/spaces/pseudolab/schoolrecord_gen/app.py
+++ /dev/null
@@ -1,65 +0,0 @@
-import streamlit as st
-import openai
-import json
-from annotated_text import annotated_text
-import os
-import achivenment_standards as data
-
-
-# OpenAI API 설정 (환경 변수에서 읽어옴)
-openai.api_key = os.getenv("OPENAI_API_KEY")
-
-#gpt이용해서 추론함수 만들기
-def generate_annotated_text(text):
- response = openai.ChatCompletion.create(
- model="gpt-3.5-turbo-16k",
- messages=[
- {
- "role": "system",
- "content": "성취기준 기반 학생의 특성 및 활동 평가 생성\n성취기준을 입력하시면, 해당 성취기준에 기반한 학생의 특성 및 활동에 대한 평가를 annotated_text 형식으로 제공합니다. 성취기준을 보며 학생의 특정 활동, 성취 수준, 교사의 총평, 그리고 학생의 역량을 고려하여 체계적으로 구성된 출력을 제공합니다. 주어는 반드시 생략합니다. \n\n예제:\n입력: ```성취기준: [6국01-07]상대가 처한 상황을 이해하고 공감하며 듣는 태도를 지닌다, [6국01-02] 의견을 제시하고 함께 조정하며 토의한다.```\n출력: ```annotated_text(\n (\"평소 자신의 생각을 일목요연하게 정리하는 습관이 있음.\", \"역량\", \"rgba(255, 0, 0, 0.3)\"),\n (\"사회 현안에 관한 주장하는 글쓰기를 잘함.\", \"성취수준\", \"rgba(0, 0, 255, 0.3)\"),\n (\"친구의 고민을 해결해주는 역할극에서 상대방을 배려하여 해결 가능한 방안을 제안함.\", \"수행\", \"rgba(0, 128, 0, 0.3)\"),\n (\"상대가 처한 상황을 이해하고 공감하는 태도를 가지고 친구들과 원만한 관계를 맺고 갈등을 조정함.\", \"교사총평\", \"rgba(128, 128, 128, 0.3)\"),\n (\"중간 놀이 시간에 운동장을 사용하는 방법 정하기를 주제로 한 토의에서 알맞은 근거와 뒷받침할 수 있는 자료를 토대로 자신의 의견을 타당하게 제시하면서 다른 사람의 의견을 능동적으로 수용하고 효과적으로 의견을 조정하는 능력을 보임.\", \"수행\", \"rgba(0, 128, 0, 0.3)\"),\n (\"상대의 의견을 존중하고 협력하는 태도를 보임.\", \"역량\", \"rgba(255, 0, 0, 0.3)\")\n)\n```"
- },
- {
- "role": "user",
- "content": text
- }
- ],
- temperature=1,
- max_tokens=10000,
- top_p=1,
- frequency_penalty=0,
- presence_penalty=0
- )
- return response['choices'][0]['message']['content']
-
-
-
-# Streamlit 앱의 제목 및 설명
-st.title("성취기준 기반 학생의 특성 및 활동 평가 생성")
-st.write("성취기준을 입력하시면, 해당 성취기준에 기반한 학생의 특성 및 활동에 대한 평가를 \n\n [학생 활동, 성취 수준, 교사의 총평, 학생 역량] 4가지 요소를 조합하여 제공합니다.")
-
-# 성취기준 데이터 가져오기
-achievement_standards = data.achievement_standards
-
-# 학년군 선택 드롭다운
-grade_group = st.selectbox("학년군을 선택하세요:", list(achievement_standards.keys()))
-
-# 선택된 학년군에 따른 과목 목록
-subject_list = list(achievement_standards[grade_group].keys())
-subject = st.selectbox("과목을 선택하세요:", subject_list)
-
-# 선택된 과목에 따른 성취기준 목록
-selected_standards = achievement_standards[grade_group][subject]
-selected_standard = st.selectbox("성취기준을 선택하세요:", selected_standards)
-
-# 선택된 성취기준을 텍스트 입력창의 기본값으로 사용
-achievement_standard = st.text_input("성취기준 입력:", value=selected_standard)
-
-
-# "평가 생성" 버튼 클릭 시의 동작
-if st.button("평가 생성"):
- with st.spinner('답변 생성중...'):
- result = generate_annotated_text(achievement_standard)
- # 결과 출력
- exec(result.replace('```', ''))
-
-
diff --git a/spaces/pycoming/bingo/src/components/learn-more.tsx b/spaces/pycoming/bingo/src/components/learn-more.tsx
deleted file mode 100644
index a64459ee7900a612292e117a6bda96ee9260990f..0000000000000000000000000000000000000000
--- a/spaces/pycoming/bingo/src/components/learn-more.tsx
+++ /dev/null
@@ -1,39 +0,0 @@
-import React from 'react'
-import { SourceAttribution } from '@/lib/bots/bing/types'
-
-export interface LearnMoreProps {
- sourceAttributions?: SourceAttribution[]
-}
-
-export function LearnMore({ sourceAttributions }: LearnMoreProps) {
- if (!sourceAttributions?.length) {
- return null
- }
-
- return (
-
- )
-}
diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Buku Statistika Untuk Penelitian Sugiyono Pdf.md b/spaces/quidiaMuxgu/Expedit-SAM/Buku Statistika Untuk Penelitian Sugiyono Pdf.md
deleted file mode 100644
index 40b90fbde1b44cec468a0ae1ceca73e2169fe6b2..0000000000000000000000000000000000000000
--- a/spaces/quidiaMuxgu/Expedit-SAM/Buku Statistika Untuk Penelitian Sugiyono Pdf.md
+++ /dev/null
@@ -1,53 +0,0 @@
-
-
Buku Statistika Untuk Penelitian Sugiyono PDF: Apa yang Perlu Anda Ketahui?
-
-
Statistika adalah ilmu yang mempelajari cara mengumpulkan, menganalisis, dan menyajikan data secara sistematis dan objektif. Statistika sangat penting untuk berbagai bidang penelitian, baik ilmu sosial, ilmu alam, maupun ilmu terapan. Dengan statistika, peneliti dapat menguji hipotesis, menarik kesimpulan, dan memberikan rekomendasi berdasarkan bukti empiris.
-
-
Namun, statistika tidaklah mudah dipahami dan diterapkan oleh banyak orang. Banyak istilah, rumus, dan teknik yang harus dipelajari dan dipraktikkan dengan benar. Oleh karena itu, dibutuhkan buku yang dapat menjelaskan konsep dan aplikasi statistika secara mudah, jelas, dan lengkap.
Salah satu buku yang direkomendasikan untuk mempelajari statistika adalah Buku Statistika Untuk Penelitian karya Prof. Dr. Sugiyono. Buku ini merupakan buku ajar yang ditujukan untuk mahasiswa, dosen, peneliti, dan praktisi yang ingin memahami dan menguasai statistika untuk penelitian.
-
-
Apa saja isi dari Buku Statistika Untuk Penelitian Sugiyono PDF?
-
-
Buku Statistika Untuk Penelitian Sugiyono PDF terdiri dari sembilan bab yang membahas berbagai topik seputar statistika untuk penelitian. Berikut adalah ringkasan isi dari masing-masing bab:
-
-
-
Bab 1: Penelitian dan Statistika. Bab ini menjelaskan pengertian statistika, variabel penelitian, dan paradigma penelitian.
-
Bab 2: Statistika Deskriptif. Bab ini membahas cara penyajian data dalam bentuk tabel, grafik, dan diagram, serta cara menghitung ukuran gejala pusat (modus, median, mean) dan ukuran variasi kelompok (rentang data, varians, standar deviasi).
-
Bab 3: Populasi, Sampel dan Pengujian Normalitas Data. Bab ini membahas konsep populasi dan sampel dalam penelitian, teknik sampling (probability sampling dan nonprobability sampling), cara menentukan ukuran sampel, cara mengambil anggota sampel, dan cara menguji normalitas data.
-
Bab 4: Konsep Dasar Pengujian Hipotesis. Bab ini menjelaskan tiga bentuk rumusan hipotesis (deskriptif, komparatif, hubungan), taraf kesalahan dalam pengujian hipotesis (alpha dan beta), dan dua kesalahan dalam pengujian hipotesis (kesalahan tipe I dan tipe II).
-
Bab 5: Pengujian Hipotesis Deskriptif (Satu Sampel). Bab ini membahas cara menguji hipotesis deskriptif dengan menggunakan statistik parametris (uji dua fihak dan uji satu fihak) dan statistik nonparametris (test binomial, chi kuadrat, run test).
-
Bab 6: Pengujian Hipotesis Komparatif. Bab ini membahas cara menguji hipotesis komparatif dengan menggunakan statistik parametris (t-test untuk sampel berkorelasi dan sampel independen; anova untuk sampel berkorelasi dan sampel independen) dan statistik nonparametris (wilcoxon signed rank test; mann whitney test; friedman test; kruskal wallis test).
-
Bab 7: Pengujian Hipotesis Asosiatif. Bab ini membahas cara menguji hipotesis asosiatif dengan menggunakan statistik parametris (korelasi product moment; korelasi ganda; korelasi parsial) dan statistik nonparametris (koefisien kontingensi; koefisien spearman rank; koefisien kendal tau).
-
Bab 8: Analisis Regresi. Bab ini membahas cara melakukan analisis regresi linier sederhana dan analisis regresi linier berganda.
-
Bab 9: Analisis Multivariat Lanjut. Bab ini membahas cara melakukan analisis multivariat lanjut seperti analisis faktor; analisis diskriminan; analisis kluster; analisis konjoin; analisis varians multivariat; manova; ancova.
-
-
-
Bagaimana cara mendapatkan Buku Statistika Untuk Penelitian Sugiyono PDF?
-
-
Anda dapat mendapatkan Buku Statistika Untuk Penelitian Sugiyono PDF dengan beberapa cara berikut:
-
-
-
Membeli buku cetaknya di toko buku online atau offline yang menjual buku-buku akademik.
-
Mengunduh buku elektroniknya di situs-situs yang menyediakan layanan download ebook gratis atau berbayar.
-
Membaca buku elektroniknya di platform-platform yang menyediakan layanan baca ebook online gratis atau berbayar.
-
-
-
Sebelum Anda mendapatkan Buku Statistika Untuk Penelitian Sugiyono PDF dengan cara apapun, pastikan Anda memeriksa keaslian dan kualitas buku tersebut. Jangan sampai Anda mendapatkan buku yang palsu atau tidak lengkap yang dapat merugikan Anda secara akademis maupun finansial.
-
-
Apa manfaat dari Buku Statistika Untuk Penelitian Sugiyono PDF?
-
-
Buku Statistika Untuk Penelitian Sugiyono PDF memiliki banyak manfaat bagi Anda yang ingin mempelajari statistika untuk penelitian. Berikut adalah beberapa manfaatnya:
-
-
-
Buku ini ditulis oleh seorang ahli statistika yang berpengalaman dalam bidang pendidikan dan penelitian.
-
Buku ini disusun secara sistematis dan logis sesuai dengan tahapan penelitian.
-
Buku ini menggunakan bahasa yang mudah dipahami oleh pembaca awam maupun mahir.
-
Buku ini dilengkapi dengan contoh-contoh perhitungan dan aplikasi statistika dalam berbagai bidang penelitian.
-
Buku ini menyediakan latihan-latihan soal dan jawaban untuk menguji pemahaman pembaca.
-
-
-
Dengan demikian, Buku Statistika Untuk Penelitian Sugiyono PDF dapat menjadi sumber belajar yang efektif dan efisien bagi Anda yang ingin memahami dan menguasai statistika untuk penelitian.
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Car Mechanic Simulator 2014 Torent Tpb.md b/spaces/quidiaMuxgu/Expedit-SAM/Car Mechanic Simulator 2014 Torent Tpb.md
deleted file mode 100644
index 44da3146ab866b0e8755ca9d235b5de5cd6582da..0000000000000000000000000000000000000000
--- a/spaces/quidiaMuxgu/Expedit-SAM/Car Mechanic Simulator 2014 Torent Tpb.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
https://itsupportnetwork.com/crack-top-admiscorar/. http://jltcommunity.com/advert/5nine-manager-serial-crack-keygen-install/ version] intettedibcem wakelet lisrel 8.8 full version free 15 crack admiscorar xforce keygen fabrication camduct 2013 key crack recover my photos 446 hex workshop professional 6.7.0.5247 keygen-embrace keygen wakelet.com useseetroroubsinigh wakelet.comcrerbaneouboxorseft wakelet.com cibamedoprewmemocet download
-
best site to get notepad++ pro 2017 keygen windows 7 ultimate edition socialcam online meet my gurl football roblox badge 2016 07 keygen 2018 tool to hack theunlimitedversionkeinionfirefox-2.0-osx-high-sierra-1x. http://eservice.com/crack-optitex-96-windows-7-61-jartab-hack-tool-get-keygen-free/ https://www.trello.com/c/jkbxlnqe/58-crack-optitex-96-windows-7-61-jartab-hack-tool-get-keygen-free.
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Edius 65 Activation Error Code 1500 Solution.md b/spaces/quidiaMuxgu/Expedit-SAM/Edius 65 Activation Error Code 1500 Solution.md
deleted file mode 100644
index ddd8a5169a8be2b810d96394332101dea3257a6a..0000000000000000000000000000000000000000
--- a/spaces/quidiaMuxgu/Expedit-SAM/Edius 65 Activation Error Code 1500 Solution.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
metal gear solid 2 was released in may 2004 in japan, with the title metal gear solid 2: substance in north america and europe. it was developed by kojima productions, and was published by konami. development began in november 2001, and it was released for the playstation 2 in japan on may 22, 2004. the playstation 2 version was later released in pal regions on may 22, 2005, and in north america on july 8, 2005. the pc version was released in north america on may 29, 2005. in japan, the game was released on the xbox 360 and playstation 3 on november 13, 2005. the pc release was canceled in that region. the xbox 360 and playstation 3 versions were eventually released in pal regions on november 18, 2006.
the game was released on the playstation portable (psp) on december 10, 2006 in north america and pal regions, and on december 12, 2006 in japan. on march 11, 2009, the psp version was re-released as metal gear solid 2: substance: digital version, and it was released for playstation 3 on april 29, 2010 in north america and pal regions. unlike the psp version, this digital version is only compatible with the playstation 3, not the playstation portable. the north american playstation 3 version was released on october 30, 2010.
-
metal gear solid 2's story centers around solid snake, a cia trainee who is tasked with infiltrating the metadefense systems corporation to steal a metal gear prototype. after snake discovers the plan to use the metal gear prototype to launch a nuclear strike, he is sent to infiltrate the company and steal the metal gear. along the way, he learns that the metal gear may be a weapon to be used by the army of the patriots, a group of anti-government terrorists who are out to destroy america. throughout the game, snake meets up with his former mentor fox hounds, who has joined the patriots as well. the player also meets his mentor and rival quiet, who is on snake's side. other characters include the narcissistic otacon, who works for the patriots, and the psychotic colonel ocelot, who is the leader of the patriots' special forces unit, dead cell. snake must infiltrate several of their training facilities to obtain the metal gear and complete his mission.
Visit This Site Nice Post Kingsoft Office 2013 Free Edition Crack Luma3DS Capture Pro.2011 Version Cracked JVC Everio Camera Free 7Digital Photo Editor Keygen Oxygen 7 Crack
-
Visit This Site Nice Post Delta Force: Urban Warfare Crack Thermaltake Core X370GTX-Gaming-x-Plus-16G Memory Card Case Black Aluminum USB 3.0 10GB/16GB/32GB Type-C Mobile PCI Card Docking Station for Samsung Galaxy Note 3 / 3GS Galaxy Note 4 / Note 10.1 / LG G3 / Lenovo ThinkPad Tablet LACIE Nano LTO4 6TB External Hard Drive Hard Disk Recovery Cryptowall 5.6 Crack JetBrains ReSharper Ultimate 10 Crack How to gain parental control over an iPad that has no password. The problem is that there is no password on any of the kids iPads and so i have no way of knowing who is using it. I can’t install a parental control app because the kids don’t know the password that protects the device and a factory reset doesn’t work because it has no password. iTunes has no parental control settings, nor can it even be used to lock down an iPad or iPhone because you need a password that is set by the parents and cannot be used to lock down the device.
Foxit Reader is a fast PDF viewer that includes annotation capabilities, Javascript support, interactive form filler and built-in text converter. The interface includes text reading, two page view, rotation, autoscroll, text view, skins, measuring, text and background color replacement and supports drag-and-drop. Annotation tools include text sticky notes and standard text boxes, as well as form data import and export.
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/reha/Stick_Tech/utils.py b/spaces/reha/Stick_Tech/utils.py
deleted file mode 100644
index 3733a75111dc89cefa333b34933ae01623550ea7..0000000000000000000000000000000000000000
--- a/spaces/reha/Stick_Tech/utils.py
+++ /dev/null
@@ -1,338 +0,0 @@
-import os
-import glob
-import sys
-import argparse
-import logging
-import json
-import subprocess
-
-import librosa
-import numpy as np
-import torchaudio
-from scipy.io.wavfile import read
-import torch
-import torchvision
-from torch.nn import functional as F
-from commons import sequence_mask
-from hubert import hubert_model
-MATPLOTLIB_FLAG = False
-
-logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
-logger = logging
-
-f0_bin = 256
-f0_max = 1100.0
-f0_min = 50.0
-f0_mel_min = 1127 * np.log(1 + f0_min / 700)
-f0_mel_max = 1127 * np.log(1 + f0_max / 700)
-
-def f0_to_coarse(f0):
- is_torch = isinstance(f0, torch.Tensor)
- f0_mel = 1127 * (1 + f0 / 700).log() if is_torch else 1127 * np.log(1 + f0 / 700)
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * (f0_bin - 2) / (f0_mel_max - f0_mel_min) + 1
-
- f0_mel[f0_mel <= 1] = 1
- f0_mel[f0_mel > f0_bin - 1] = f0_bin - 1
- f0_coarse = (f0_mel + 0.5).long() if is_torch else np.rint(f0_mel).astype(np.int)
- assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (f0_coarse.max(), f0_coarse.min())
- return f0_coarse
-
-
-def get_hubert_model(rank=None):
-
- hubert_soft = hubert_model.hubert_soft("hubert/hubert-soft-0d54a1f4.pt")
- if rank is not None:
- hubert_soft = hubert_soft.cuda(rank)
- return hubert_soft
-
-def get_hubert_content(hmodel, y=None, path=None):
- if path is not None:
- source, sr = torchaudio.load(path)
- source = torchaudio.functional.resample(source, sr, 16000)
- if len(source.shape) == 2 and source.shape[1] >= 2:
- source = torch.mean(source, dim=0).unsqueeze(0)
- else:
- source = y
- source = source.unsqueeze(0)
- with torch.inference_mode():
- units = hmodel.units(source)
- return units.transpose(1,2)
-
-
-def get_content(cmodel, y):
- with torch.no_grad():
- c = cmodel.extract_features(y.squeeze(1))[0]
- c = c.transpose(1, 2)
- return c
-
-
-
-def transform(mel, height): # 68-92
- #r = np.random.random()
- #rate = r * 0.3 + 0.85 # 0.85-1.15
- #height = int(mel.size(-2) * rate)
- tgt = torchvision.transforms.functional.resize(mel, (height, mel.size(-1)))
- if height >= mel.size(-2):
- return tgt[:, :mel.size(-2), :]
- else:
- silence = tgt[:,-1:,:].repeat(1,mel.size(-2)-height,1)
- silence += torch.randn_like(silence) / 10
- return torch.cat((tgt, silence), 1)
-
-
-def stretch(mel, width): # 0.5-2
- return torchvision.transforms.functional.resize(mel, (mel.size(-2), width))
-
-
-def load_checkpoint(checkpoint_path, model, optimizer=None):
- assert os.path.isfile(checkpoint_path)
- checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
- iteration = checkpoint_dict['iteration']
- learning_rate = checkpoint_dict['learning_rate']
- if iteration is None:
- iteration = 1
- if learning_rate is None:
- learning_rate = 0.0002
- if optimizer is not None and checkpoint_dict['optimizer'] is not None:
- optimizer.load_state_dict(checkpoint_dict['optimizer'])
- saved_state_dict = checkpoint_dict['model']
- if hasattr(model, 'module'):
- state_dict = model.module.state_dict()
- else:
- state_dict = model.state_dict()
- new_state_dict= {}
- for k, v in state_dict.items():
- try:
- new_state_dict[k] = saved_state_dict[k]
- except:
- logger.info("%s is not in the checkpoint" % k)
- new_state_dict[k] = v
- if hasattr(model, 'module'):
- model.module.load_state_dict(new_state_dict)
- else:
- model.load_state_dict(new_state_dict)
- logger.info("Loaded checkpoint '{}' (iteration {})" .format(
- checkpoint_path, iteration))
- return model, optimizer, learning_rate, iteration
-
-
-def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
- # ckptname = checkpoint_path.split(os.sep)[-1]
- # newest_step = int(ckptname.split(".")[0].split("_")[1])
- # val_steps = 2000
- # last_ckptname = checkpoint_path.replace(str(newest_step), str(newest_step - val_steps*3))
- # if newest_step >= val_steps*3:
- # os.system(f"rm {last_ckptname}")
- logger.info("Saving model and optimizer state at iteration {} to {}".format(
- iteration, checkpoint_path))
- if hasattr(model, 'module'):
- state_dict = model.module.state_dict()
- else:
- state_dict = model.state_dict()
- torch.save({'model': state_dict,
- 'iteration': iteration,
- 'optimizer': optimizer.state_dict(),
- 'learning_rate': learning_rate}, checkpoint_path)
-
-
-def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050):
- for k, v in scalars.items():
- writer.add_scalar(k, v, global_step)
- for k, v in histograms.items():
- writer.add_histogram(k, v, global_step)
- for k, v in images.items():
- writer.add_image(k, v, global_step, dataformats='HWC')
- for k, v in audios.items():
- writer.add_audio(k, v, global_step, audio_sampling_rate)
-
-
-def latest_checkpoint_path(dir_path, regex="G_*.pth"):
- f_list = glob.glob(os.path.join(dir_path, regex))
- f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
- x = f_list[-1]
- print(x)
- return x
-
-
-def plot_spectrogram_to_numpy(spectrogram):
- global MATPLOTLIB_FLAG
- if not MATPLOTLIB_FLAG:
- import matplotlib
- matplotlib.use("Agg")
- MATPLOTLIB_FLAG = True
- mpl_logger = logging.getLogger('matplotlib')
- mpl_logger.setLevel(logging.WARNING)
- import matplotlib.pylab as plt
- import numpy as np
-
- fig, ax = plt.subplots(figsize=(10,2))
- im = ax.imshow(spectrogram, aspect="auto", origin="lower",
- interpolation='none')
- plt.colorbar(im, ax=ax)
- plt.xlabel("Frames")
- plt.ylabel("Channels")
- plt.tight_layout()
-
- fig.canvas.draw()
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
- plt.close()
- return data
-
-
-def plot_alignment_to_numpy(alignment, info=None):
- global MATPLOTLIB_FLAG
- if not MATPLOTLIB_FLAG:
- import matplotlib
- matplotlib.use("Agg")
- MATPLOTLIB_FLAG = True
- mpl_logger = logging.getLogger('matplotlib')
- mpl_logger.setLevel(logging.WARNING)
- import matplotlib.pylab as plt
- import numpy as np
-
- fig, ax = plt.subplots(figsize=(6, 4))
- im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
- interpolation='none')
- fig.colorbar(im, ax=ax)
- xlabel = 'Decoder timestep'
- if info is not None:
- xlabel += '\n\n' + info
- plt.xlabel(xlabel)
- plt.ylabel('Encoder timestep')
- plt.tight_layout()
-
- fig.canvas.draw()
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
- plt.close()
- return data
-
-
-def load_wav_to_torch(full_path):
- sampling_rate, data = read(full_path)
- return torch.FloatTensor(data.astype(np.float32)), sampling_rate
-
-
-def load_filepaths_and_text(filename, split="|"):
- with open(filename, encoding='utf-8') as f:
- filepaths_and_text = [line.strip().split(split) for line in f]
- return filepaths_and_text
-
-
-def get_hparams(init=True):
- parser = argparse.ArgumentParser()
- parser.add_argument('-c', '--config', type=str, default="./configs/base.json",
- help='JSON file for configuration')
- parser.add_argument('-m', '--model', type=str, required=True,
- help='Model name')
-
- args = parser.parse_args()
- model_dir = os.path.join("./logs", args.model)
-
- if not os.path.exists(model_dir):
- os.makedirs(model_dir)
-
- config_path = args.config
- config_save_path = os.path.join(model_dir, "config.json")
- if init:
- with open(config_path, "r") as f:
- data = f.read()
- with open(config_save_path, "w") as f:
- f.write(data)
- else:
- with open(config_save_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams = HParams(**config)
- hparams.model_dir = model_dir
- return hparams
-
-
-def get_hparams_from_dir(model_dir):
- config_save_path = os.path.join(model_dir, "config.json")
- with open(config_save_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams =HParams(**config)
- hparams.model_dir = model_dir
- return hparams
-
-
-def get_hparams_from_file(config_path):
- with open(config_path, "r") as f:
- data = f.read()
- config = json.loads(data)
-
- hparams =HParams(**config)
- return hparams
-
-
-def check_git_hash(model_dir):
- source_dir = os.path.dirname(os.path.realpath(__file__))
- if not os.path.exists(os.path.join(source_dir, ".git")):
- logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
- source_dir
- ))
- return
-
- cur_hash = subprocess.getoutput("git rev-parse HEAD")
-
- path = os.path.join(model_dir, "githash")
- if os.path.exists(path):
- saved_hash = open(path).read()
- if saved_hash != cur_hash:
- logger.warn("git hash values are different. {}(saved) != {}(current)".format(
- saved_hash[:8], cur_hash[:8]))
- else:
- open(path, "w").write(cur_hash)
-
-
-def get_logger(model_dir, filename="train.log"):
- global logger
- logger = logging.getLogger(os.path.basename(model_dir))
- logger.setLevel(logging.DEBUG)
-
- formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
- if not os.path.exists(model_dir):
- os.makedirs(model_dir)
- h = logging.FileHandler(os.path.join(model_dir, filename))
- h.setLevel(logging.DEBUG)
- h.setFormatter(formatter)
- logger.addHandler(h)
- return logger
-
-
-class HParams():
- def __init__(self, **kwargs):
- for k, v in kwargs.items():
- if type(v) == dict:
- v = HParams(**v)
- self[k] = v
-
- def keys(self):
- return self.__dict__.keys()
-
- def items(self):
- return self.__dict__.items()
-
- def values(self):
- return self.__dict__.values()
-
- def __len__(self):
- return len(self.__dict__)
-
- def __getitem__(self, key):
- return getattr(self, key)
-
- def __setitem__(self, key, value):
- return setattr(self, key, value)
-
- def __contains__(self, key):
- return key in self.__dict__
-
- def __repr__(self):
- return self.__dict__.__repr__()
-
diff --git a/spaces/rehanuddin/03StreamlitVideoASRNLP/streaming.py b/spaces/rehanuddin/03StreamlitVideoASRNLP/streaming.py
deleted file mode 100644
index cc2048269b3e9ac09886471ef9b6dc681db09f25..0000000000000000000000000000000000000000
--- a/spaces/rehanuddin/03StreamlitVideoASRNLP/streaming.py
+++ /dev/null
@@ -1,66 +0,0 @@
-import subprocess
-
-import numpy as np
-
-
-def ffmpeg_stream(youtube_url, sampling_rate=16_000, chunk_duration_ms=5000, pad_duration_ms=200):
- """
- Helper function to read an audio file through ffmpeg.
- """
- chunk_len = int(sampling_rate * chunk_duration_ms / 1000)
- pad_len = int(sampling_rate * pad_duration_ms / 1000)
- read_chunk_len = chunk_len + pad_len * 2
-
- ar = f"{sampling_rate}"
- ac = "1"
- format_for_conversion = "f32le"
- dtype = np.float32
- size_of_sample = 4
-
- ffmpeg_command = [
- "ffmpeg",
- "-i",
- "pipe:",
- "-ac",
- ac,
- "-ar",
- ar,
- "-f",
- format_for_conversion,
- "-hide_banner",
- "-loglevel",
- "quiet",
- "pipe:1",
- ]
-
- ytdl_command = ["yt-dlp", "-f", "bestaudio", youtube_url, "--quiet", "-o", "-"]
-
- try:
- ffmpeg_process = subprocess.Popen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, bufsize=-1)
- ytdl_process = subprocess.Popen(ytdl_command, stdout=ffmpeg_process.stdin)
- except FileNotFoundError:
- raise ValueError("ffmpeg was not found but is required to stream audio files from filename")
-
- acc = b""
- leftover = np.zeros((0,), dtype=np.float32)
- while ytdl_process.poll() is None:
- buflen = read_chunk_len * size_of_sample
-
- raw = ffmpeg_process.stdout.read(buflen)
- if raw == b"":
- break
-
- if len(acc) + len(raw) > buflen:
- acc = raw
- else:
- acc += raw
-
- audio = np.frombuffer(acc, dtype=dtype)
- audio = np.concatenate([leftover, audio])
- if len(audio) < pad_len * 2:
- # TODO: handle end of stream better than this
- break
- yield audio
-
- leftover = audio[-pad_len * 2 :]
- read_chunk_len = chunk_len
\ No newline at end of file
diff --git a/spaces/riccorl/relik-entity-linking/relik/inference/data/tokenizers/spacy_tokenizer.py b/spaces/riccorl/relik-entity-linking/relik/inference/data/tokenizers/spacy_tokenizer.py
deleted file mode 100644
index b949216ed5cf152ae4a7722c4a6be3f883481db2..0000000000000000000000000000000000000000
--- a/spaces/riccorl/relik-entity-linking/relik/inference/data/tokenizers/spacy_tokenizer.py
+++ /dev/null
@@ -1,228 +0,0 @@
-import logging
-from typing import Dict, List, Tuple, Union
-
-import spacy
-
-# from ipa.common.utils import load_spacy
-from overrides import overrides
-from spacy.cli.download import download as spacy_download
-from spacy.tokens import Doc
-
-from relik.common.log import get_logger
-from relik.inference.data.objects import Word
-from relik.inference.data.tokenizers import SPACY_LANGUAGE_MAPPER
-from relik.inference.data.tokenizers.base_tokenizer import BaseTokenizer
-
-logger = get_logger(level=logging.DEBUG)
-
-# Spacy and Stanza stuff
-
-LOADED_SPACY_MODELS: Dict[Tuple[str, bool, bool, bool, bool], spacy.Language] = {}
-
-
-def load_spacy(
- language: str,
- pos_tags: bool = False,
- lemma: bool = False,
- parse: bool = False,
- split_on_spaces: bool = False,
-) -> spacy.Language:
- """
- Download and load spacy model.
-
- Args:
- language (:obj:`str`, defaults to :obj:`en`):
- Language of the text to tokenize.
- pos_tags (:obj:`bool`, optional, defaults to :obj:`False`):
- If :obj:`True`, performs POS tagging with spacy model.
- lemma (:obj:`bool`, optional, defaults to :obj:`False`):
- If :obj:`True`, performs lemmatization with spacy model.
- parse (:obj:`bool`, optional, defaults to :obj:`False`):
- If :obj:`True`, performs dependency parsing with spacy model.
- split_on_spaces (:obj:`bool`, optional, defaults to :obj:`False`):
- If :obj:`True`, will split by spaces without performing tokenization.
-
- Returns:
- :obj:`spacy.Language`: The spacy model loaded.
- """
- exclude = ["vectors", "textcat", "ner"]
- if not pos_tags:
- exclude.append("tagger")
- if not lemma:
- exclude.append("lemmatizer")
- if not parse:
- exclude.append("parser")
-
- # check if the model is already loaded
- # if so, there is no need to reload it
- spacy_params = (language, pos_tags, lemma, parse, split_on_spaces)
- if spacy_params not in LOADED_SPACY_MODELS:
- try:
- spacy_tagger = spacy.load(language, exclude=exclude)
- except OSError:
- logger.warning(
- "Spacy model '%s' not found. Downloading and installing.", language
- )
- spacy_download(language)
- spacy_tagger = spacy.load(language, exclude=exclude)
-
- # if everything is disabled, return only the tokenizer
- # for faster tokenization
- # TODO: is it really faster?
- # if len(exclude) >= 6:
- # spacy_tagger = spacy_tagger.tokenizer
- LOADED_SPACY_MODELS[spacy_params] = spacy_tagger
-
- return LOADED_SPACY_MODELS[spacy_params]
-
-
-class SpacyTokenizer(BaseTokenizer):
- """
- A :obj:`Tokenizer` that uses SpaCy to tokenizer and preprocess the text. It returns :obj:`Word` objects.
-
- Args:
- language (:obj:`str`, optional, defaults to :obj:`en`):
- Language of the text to tokenize.
- return_pos_tags (:obj:`bool`, optional, defaults to :obj:`False`):
- If :obj:`True`, performs POS tagging with spacy model.
- return_lemmas (:obj:`bool`, optional, defaults to :obj:`False`):
- If :obj:`True`, performs lemmatization with spacy model.
- return_deps (:obj:`bool`, optional, defaults to :obj:`False`):
- If :obj:`True`, performs dependency parsing with spacy model.
- split_on_spaces (:obj:`bool`, optional, defaults to :obj:`False`):
- If :obj:`True`, will split by spaces without performing tokenization.
- use_gpu (:obj:`bool`, optional, defaults to :obj:`False`):
- If :obj:`True`, will load the Stanza model on GPU.
- """
-
- def __init__(
- self,
- language: str = "en",
- return_pos_tags: bool = False,
- return_lemmas: bool = False,
- return_deps: bool = False,
- split_on_spaces: bool = False,
- use_gpu: bool = False,
- ):
- super(SpacyTokenizer, self).__init__()
- if language not in SPACY_LANGUAGE_MAPPER:
- raise ValueError(
- f"`{language}` language not supported. The supported "
- f"languages are: {list(SPACY_LANGUAGE_MAPPER.keys())}."
- )
- if use_gpu:
- # load the model on GPU
- # if the GPU is not available or not correctly configured,
- # it will rise an error
- spacy.require_gpu()
- self.spacy = load_spacy(
- SPACY_LANGUAGE_MAPPER[language],
- return_pos_tags,
- return_lemmas,
- return_deps,
- split_on_spaces,
- )
- self.split_on_spaces = split_on_spaces
-
- def __call__(
- self,
- texts: Union[str, List[str], List[List[str]]],
- is_split_into_words: bool = False,
- **kwargs,
- ) -> Union[List[Word], List[List[Word]]]:
- """
- Tokenize the input into single words using SpaCy models.
-
- Args:
- texts (:obj:`str`, :obj:`List[str]`, :obj:`List[List[str]]`):
- Text to tag. It can be a single string, a batch of string and pre-tokenized strings.
- is_split_into_words (:obj:`bool`, optional, defaults to :obj:`False`):
- If :obj:`True` and the input is a string, the input is split on spaces.
-
- Returns:
- :obj:`List[List[Word]]`: The input text tokenized in single words.
-
- Example::
-
- >>> from ipa import SpacyTokenizer
-
- >>> spacy_tokenizer = SpacyTokenizer(language="en", pos_tags=True, lemma=True)
- >>> spacy_tokenizer("Mary sold the car to John.")
-
- """
- # check if input is batched or a single sample
- is_batched = self.check_is_batched(texts, is_split_into_words)
- if is_batched:
- tokenized = self.tokenize_batch(texts)
- else:
- tokenized = self.tokenize(texts)
- return tokenized
-
- @overrides
- def tokenize(self, text: Union[str, List[str]]) -> List[Word]:
- if self.split_on_spaces:
- if isinstance(text, str):
- text = text.split(" ")
- spaces = [True] * len(text)
- text = Doc(self.spacy.vocab, words=text, spaces=spaces)
- return self._clean_tokens(self.spacy(text))
-
- @overrides
- def tokenize_batch(
- self, texts: Union[List[str], List[List[str]]]
- ) -> List[List[Word]]:
- if self.split_on_spaces:
- if isinstance(texts[0], str):
- texts = [text.split(" ") for text in texts]
- spaces = [[True] * len(text) for text in texts]
- texts = [
- Doc(self.spacy.vocab, words=text, spaces=space)
- for text, space in zip(texts, spaces)
- ]
- return [self._clean_tokens(tokens) for tokens in self.spacy.pipe(texts)]
-
- @staticmethod
- def _clean_tokens(tokens: Doc) -> List[Word]:
- """
- Converts spaCy tokens to :obj:`Word`.
-
- Args:
- tokens (:obj:`spacy.tokens.Doc`):
- Tokens from SpaCy model.
-
- Returns:
- :obj:`List[Word]`: The SpaCy model output converted into :obj:`Word` objects.
- """
- words = [
- Word(
- token.text,
- token.i,
- token.idx,
- token.idx + len(token),
- token.lemma_,
- token.pos_,
- token.dep_,
- token.head.i,
- )
- for token in tokens
- ]
- return words
-
-
-class WhitespaceSpacyTokenizer:
- """Simple white space tokenizer for SpaCy."""
-
- def __init__(self, vocab):
- self.vocab = vocab
-
- def __call__(self, text):
- if isinstance(text, str):
- words = text.split(" ")
- elif isinstance(text, list):
- words = text
- else:
- raise ValueError(
- f"text must be either `str` or `list`, found: `{type(text)}`"
- )
- spaces = [True] * len(words)
- return Doc(self.vocab, words=words, spaces=spaces)
diff --git a/spaces/rinong/StyleGAN-NADA/op/fused_act_cpu.py b/spaces/rinong/StyleGAN-NADA/op/fused_act_cpu.py
deleted file mode 100644
index f997dafdd53aa9f4bbe07af6746c67a2c6dcb4c7..0000000000000000000000000000000000000000
--- a/spaces/rinong/StyleGAN-NADA/op/fused_act_cpu.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import os
-
-import torch
-from torch import nn
-from torch.autograd import Function
-from torch.nn import functional as F
-
-
-module_path = os.path.dirname(__file__)
-
-
-class FusedLeakyReLU(nn.Module):
- def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
- super().__init__()
-
- self.bias = nn.Parameter(torch.zeros(channel))
- self.negative_slope = negative_slope
- self.scale = scale
-
- def forward(self, input):
- return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale)
-
-def fused_leaky_relu(input, bias=None, negative_slope=0.2, scale=2 ** 0.5):
- if input.device.type == "cpu":
- if bias is not None:
- rest_dim = [1] * (input.ndim - bias.ndim - 1)
- return (
- F.leaky_relu(
- input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=0.2
- )
- * scale
- )
-
- else:
- return F.leaky_relu(input, negative_slope=0.2) * scale
-
- else:
- return FusedLeakyReLUFunction.apply(
- input.contiguous(), bias, negative_slope, scale
- )
-
diff --git a/spaces/robinhad/qirimtatar-tts/app.py b/spaces/robinhad/qirimtatar-tts/app.py
deleted file mode 100644
index dd3216428903de11439e7e0df723cc3b6e068c0b..0000000000000000000000000000000000000000
--- a/spaces/robinhad/qirimtatar-tts/app.py
+++ /dev/null
@@ -1,125 +0,0 @@
-from os import getenv
-from queue import Queue
-from threading import Thread
-from time import sleep
-from data_logger import log_data
-import gradio as gr
-from crh_transliterator.transliterator import transliterate
-from crh_preprocessor.preprocessor import preprocess
-from datetime import datetime
-
-import tempfile
-import gradio as gr
-from datetime import datetime
-from enum import Enum
-from crh_tts.tts import TTS, Voices
-from torch.cuda import is_available
-
-
-class VoiceOption(Enum):
- Sevil = "Севіль (жіночий) 👩"
- # Arslan = "Арслан (чоловічий) 👨"
- Eskander = "Ескандер (чоловічий) 👨"
- # Abibulla = "Абібулла (чоловічий) 👨"
-
-
-def check_thread(logging_queue: Queue):
- logging_callback = log_data(
- hf_token=getenv("HF_API_TOKEN"), dataset_name="crh-tts-output", private=False
- )
- while True:
- sleep(60)
- batch = []
- while not logging_queue.empty():
- batch.append(logging_queue.get())
-
- if len(batch) > 0:
- try:
- logging_callback(batch)
- except:
- print(
- "Error happened while pushing data to HF. Puttting items back in queue..."
- )
- for item in batch:
- logging_queue.put(item)
-
-
-if getenv("HF_API_TOKEN") is not None:
- log_queue = Queue()
- t = Thread(target=check_thread, args=(log_queue,))
- t.start()
-
-print(f"CUDA available? {is_available()}")
-
-
-badge = (
- "https://visitor-badge-reloaded.herokuapp.com/badge?page_id=robinhad.qirimtatar-tts"
-)
-
-crh_tts = TTS(use_cuda=is_available())
-
-
-def tts(text: str, voice: str):
- print("============================")
- print("Original text:", text)
- print("Voice", voice)
- print("Time:", datetime.utcnow())
-
- voice_mapping = {
- VoiceOption.Sevil.value: Voices.Sevil.value,
- # VoiceOption.Arslan.value: Voices.Arslan.value,
- VoiceOption.Eskander.value: Voices.Eskander.value,
- # VoiceOption.Abibulla.value: Voices.Abibulla.value,
- }
-
- speaker_name = voice_mapping[voice]
- if getenv("HF_API_TOKEN") is not None:
- log_queue.put([text, speaker_name, str(datetime.utcnow())])
- text_limit = 7200
- text = (
- text if len(text) < text_limit else text[0:text_limit]
- ) # mitigate crashes on hf space
- result = transliterate(text)
- text = preprocess(result)
- with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp:
- _, text = crh_tts.tts(text, speaker_name, fp)
- return fp.name, text
-
-
-with open("README.md") as file:
- article = file.read()
- article = article[article.find("---\n", 4) + 5 : :]
-
-
-iface = gr.Interface(
- fn=tts,
- inputs=[
- gr.components.Textbox(
- label="Input",
- value="Qırımtatarlar! Селям! Ишлер насыл?",
- ),
- gr.components.Radio(
- label="Голос",
- choices=[option.value for option in VoiceOption],
- value=VoiceOption.Sevil.value,
- ),
- ],
- outputs=[
- gr.components.Audio(label="Output"),
- gr.components.Textbox(label="Оброблений текст"),
- ],
- title="Кримськотатарський синтез мовлення",
- description="Кримськотатарський Text-to-Speech за допомогою Coqui TTS",
- article=article + f'\n
',
- examples=[
- ["Selâm! İşler nasıl?", VoiceOption.Eskander.value],
- [
- "Qırımtatarlar üç subetnik gruppasından er birisiniñ (tatlar, noğaylar ve yalıboylular) öz şivesi bar.",
- VoiceOption.Sevil.value,
- ],
- ["Селям! Ишлер насыл?", VoiceOption.Sevil.value],
- ["Selâm! 123456789", VoiceOption.Eskander.value],
- ["Selâm! 1,2,3,4,5,6,789", VoiceOption.Eskander.value],
- ],
-)
-iface.launch()
diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/dense_heads/dense_test_mixins.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/dense_heads/dense_test_mixins.py
deleted file mode 100644
index 3421548955d62652ea3d6e65dec71253d021615a..0000000000000000000000000000000000000000
--- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/dense_heads/dense_test_mixins.py
+++ /dev/null
@@ -1,206 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import sys
-from inspect import signature
-
-import torch
-from mmcv.ops import batched_nms
-
-from mmdet.core import bbox_mapping_back, merge_aug_proposals
-
-if sys.version_info >= (3, 7):
- from mmdet.utils.contextmanagers import completed
-
-
-class BBoxTestMixin(object):
- """Mixin class for testing det bboxes via DenseHead."""
-
- def simple_test_bboxes(self, feats, img_metas, rescale=False):
- """Test det bboxes without test-time augmentation, can be applied in
- DenseHead except for ``RPNHead`` and its variants, e.g., ``GARPNHead``,
- etc.
-
- Args:
- feats (tuple[torch.Tensor]): Multi-level features from the
- upstream network, each is a 4D-tensor.
- img_metas (list[dict]): List of image information.
- rescale (bool, optional): Whether to rescale the results.
- Defaults to False.
-
- Returns:
- list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
- The first item is ``bboxes`` with shape (n, 5),
- where 5 represent (tl_x, tl_y, br_x, br_y, score).
- The shape of the second tensor in the tuple is ``labels``
- with shape (n,)
- """
- outs = self.forward(feats)
- results_list = self.get_bboxes(
- *outs, img_metas=img_metas, rescale=rescale)
- return results_list
-
- def aug_test_bboxes(self, feats, img_metas, rescale=False):
- """Test det bboxes with test time augmentation, can be applied in
- DenseHead except for ``RPNHead`` and its variants, e.g., ``GARPNHead``,
- etc.
-
- Args:
- feats (list[Tensor]): the outer list indicates test-time
- augmentations and inner Tensor should have a shape NxCxHxW,
- which contains features for all images in the batch.
- img_metas (list[list[dict]]): the outer list indicates test-time
- augs (multiscale, flip, etc.) and the inner list indicates
- images in a batch. each dict has image information.
- rescale (bool, optional): Whether to rescale the results.
- Defaults to False.
-
- Returns:
- list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
- The first item is ``bboxes`` with shape (n, 5),
- where 5 represent (tl_x, tl_y, br_x, br_y, score).
- The shape of the second tensor in the tuple is ``labels``
- with shape (n,). The length of list should always be 1.
- """
- # check with_nms argument
- gb_sig = signature(self.get_bboxes)
- gb_args = [p.name for p in gb_sig.parameters.values()]
- gbs_sig = signature(self._get_bboxes_single)
- gbs_args = [p.name for p in gbs_sig.parameters.values()]
- assert ('with_nms' in gb_args) and ('with_nms' in gbs_args), \
- f'{self.__class__.__name__}' \
- ' does not support test-time augmentation'
-
- aug_bboxes = []
- aug_scores = []
- aug_labels = []
- for x, img_meta in zip(feats, img_metas):
- # only one image in the batch
- outs = self.forward(x)
- bbox_outputs = self.get_bboxes(
- *outs,
- img_metas=img_meta,
- cfg=self.test_cfg,
- rescale=False,
- with_nms=False)[0]
- aug_bboxes.append(bbox_outputs[0])
- aug_scores.append(bbox_outputs[1])
- if len(bbox_outputs) >= 3:
- aug_labels.append(bbox_outputs[2])
-
- # after merging, bboxes will be rescaled to the original image size
- merged_bboxes, merged_scores = self.merge_aug_bboxes(
- aug_bboxes, aug_scores, img_metas)
- merged_labels = torch.cat(aug_labels, dim=0) if aug_labels else None
-
- if merged_bboxes.numel() == 0:
- det_bboxes = torch.cat([merged_bboxes, merged_scores[:, None]], -1)
- return [
- (det_bboxes, merged_labels),
- ]
-
- det_bboxes, keep_idxs = batched_nms(merged_bboxes, merged_scores,
- merged_labels, self.test_cfg.nms)
- det_bboxes = det_bboxes[:self.test_cfg.max_per_img]
- det_labels = merged_labels[keep_idxs][:self.test_cfg.max_per_img]
-
- if rescale:
- _det_bboxes = det_bboxes
- else:
- _det_bboxes = det_bboxes.clone()
- _det_bboxes[:, :4] *= det_bboxes.new_tensor(
- img_metas[0][0]['scale_factor'])
-
- return [
- (_det_bboxes, det_labels),
- ]
-
- def simple_test_rpn(self, x, img_metas):
- """Test without augmentation, only for ``RPNHead`` and its variants,
- e.g., ``GARPNHead``, etc.
-
- Args:
- x (tuple[Tensor]): Features from the upstream network, each is
- a 4D-tensor.
- img_metas (list[dict]): Meta info of each image.
-
- Returns:
- list[Tensor]: Proposals of each image, each item has shape (n, 5),
- where 5 represent (tl_x, tl_y, br_x, br_y, score).
- """
- rpn_outs = self(x)
- proposal_list = self.get_bboxes(*rpn_outs, img_metas=img_metas)
- return proposal_list
-
- def aug_test_rpn(self, feats, img_metas):
- """Test with augmentation for only for ``RPNHead`` and its variants,
- e.g., ``GARPNHead``, etc.
-
- Args:
- feats (tuple[Tensor]): Features from the upstream network, each is
- a 4D-tensor.
- img_metas (list[dict]): Meta info of each image.
-
- Returns:
- list[Tensor]: Proposals of each image, each item has shape (n, 5),
- where 5 represent (tl_x, tl_y, br_x, br_y, score).
- """
- samples_per_gpu = len(img_metas[0])
- aug_proposals = [[] for _ in range(samples_per_gpu)]
- for x, img_meta in zip(feats, img_metas):
- proposal_list = self.simple_test_rpn(x, img_meta)
- for i, proposals in enumerate(proposal_list):
- aug_proposals[i].append(proposals)
- # reorganize the order of 'img_metas' to match the dimensions
- # of 'aug_proposals'
- aug_img_metas = []
- for i in range(samples_per_gpu):
- aug_img_meta = []
- for j in range(len(img_metas)):
- aug_img_meta.append(img_metas[j][i])
- aug_img_metas.append(aug_img_meta)
- # after merging, proposals will be rescaled to the original image size
- merged_proposals = [
- merge_aug_proposals(proposals, aug_img_meta, self.test_cfg)
- for proposals, aug_img_meta in zip(aug_proposals, aug_img_metas)
- ]
- return merged_proposals
-
- if sys.version_info >= (3, 7):
-
- async def async_simple_test_rpn(self, x, img_metas):
- sleep_interval = self.test_cfg.pop('async_sleep_interval', 0.025)
- async with completed(
- __name__, 'rpn_head_forward',
- sleep_interval=sleep_interval):
- rpn_outs = self(x)
-
- proposal_list = self.get_bboxes(*rpn_outs, img_metas=img_metas)
- return proposal_list
-
- def merge_aug_bboxes(self, aug_bboxes, aug_scores, img_metas):
- """Merge augmented detection bboxes and scores.
-
- Args:
- aug_bboxes (list[Tensor]): shape (n, 4*#class)
- aug_scores (list[Tensor] or None): shape (n, #class)
- img_shapes (list[Tensor]): shape (3, ).
-
- Returns:
- tuple[Tensor]: ``bboxes`` with shape (n,4), where
- 4 represent (tl_x, tl_y, br_x, br_y)
- and ``scores`` with shape (n,).
- """
- recovered_bboxes = []
- for bboxes, img_info in zip(aug_bboxes, img_metas):
- img_shape = img_info[0]['img_shape']
- scale_factor = img_info[0]['scale_factor']
- flip = img_info[0]['flip']
- flip_direction = img_info[0]['flip_direction']
- bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip,
- flip_direction)
- recovered_bboxes.append(bboxes)
- bboxes = torch.cat(recovered_bboxes, dim=0)
- if aug_scores is None:
- return bboxes
- else:
- scores = torch.cat(aug_scores, dim=0)
- return bboxes, scores
diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/utils/__init__.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/utils/__init__.py
deleted file mode 100644
index e74ba89e8c2101360d921a5f8437da48d0250e9a..0000000000000000000000000000000000000000
--- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/utils/__init__.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
-from .builder import build_linear_layer, build_transformer
-from .ckpt_convert import pvt_convert
-from .conv_upsample import ConvUpsample
-from .csp_layer import CSPLayer
-from .gaussian_target import gaussian_radius, gen_gaussian_target
-from .inverted_residual import InvertedResidual
-from .make_divisible import make_divisible
-from .misc import interpolate_as, sigmoid_geometric_mean
-from .normed_predictor import NormedConv2d, NormedLinear
-from .panoptic_gt_processing import preprocess_panoptic_gt
-from .point_sample import (get_uncertain_point_coords_with_randomness,
- get_uncertainty)
-from .positional_encoding import (LearnedPositionalEncoding,
- SinePositionalEncoding)
-from .res_layer import ResLayer, SimplifiedBasicBlock
-from .se_layer import DyReLU, SELayer
-from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
- DynamicConv, PatchEmbed, Transformer, nchw_to_nlc,
- nlc_to_nchw)
-
-__all__ = [
- 'ResLayer', 'gaussian_radius', 'gen_gaussian_target',
- 'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer',
- 'build_transformer', 'build_linear_layer', 'SinePositionalEncoding',
- 'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock',
- 'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual',
- 'SELayer', 'interpolate_as', 'ConvUpsample', 'CSPLayer',
- 'adaptive_avg_pool2d', 'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc',
- 'nlc_to_nchw', 'pvt_convert', 'sigmoid_geometric_mean',
- 'preprocess_panoptic_gt', 'DyReLU',
- 'get_uncertain_point_coords_with_randomness', 'get_uncertainty'
-]
diff --git a/spaces/rorallitri/biomedical-language-models/logs/Download Free Plants Vs Zombies Full Version No Limited Time HOT!.md b/spaces/rorallitri/biomedical-language-models/logs/Download Free Plants Vs Zombies Full Version No Limited Time HOT!.md
deleted file mode 100644
index da499f26d89266b92cdff8965104d619d0866040..0000000000000000000000000000000000000000
--- a/spaces/rorallitri/biomedical-language-models/logs/Download Free Plants Vs Zombies Full Version No Limited Time HOT!.md
+++ /dev/null
@@ -1,10 +0,0 @@
-
-
Plants vs. Zombies Free Trial is a free PC version of Plants vs. Zombies. It is meant to be an advertisement for the full version of the game. The player can only play up to Level 3-4 in Adventure Mode in this version. After they finish Level 3-4, an advertisement called Upsell appears and suggests them to purchase the full version. It will automatically link the player to the way to get it. It was removed in 2010, but a similar demo replaced it. This demo has a 1 hour time limit which, once expired, the demo cannot be played. It is no longer available as the demo version, which has no time limit, has replaced it. The 1 hour version was removed from popcap's website in 2015.
-
- Venture to the Outer Edges of Neighborville Offline - Discover Giddy Park, PvE free-roam regions and Private Play modes all without an internet connection. - Customize Every Character for Battle - Earn in-game coins only through play, and spend them at the Rux store to unlock cool customization items, characters and perks. - Play with Your Favorite People -Party with up to three friends and goof around in Giddy Park or dive online into some 8v8 multiplayer in Turf Takeover, vanquishing opponents across a gigantic suburban skirmish. - Motion Controls & Touch Menus - Use motion controls for aiming, with a full range of settings to customize to your preference. Interact with UI menus throughout the game using touch input including map zoom. - Prize Map Selector - Choose from and interact with all 12 prize maps at any time, and exchange prize bulbs for unbeleafable rewards!
-
download free plants vs zombies full version no limited time
This game rams up most previous mechanics from three-previous installments, where the gameplay functions similar to tower defense games where players grow plants by using sun to fight off a horde of zombies. Taking place between the events of Plants vs. Zombies 2 and Plants vs. Zombies 3, the plot was center around Crazy Dave and Penny travel around the entire of Neighborville, but sometimes visit some really whacky places.
-
Plants vs. Zombies: World Traveler was a free-purchased game app that was available in App Store, Google Play, and on the game's website, similar to its previous predecessor. There is an HD version of this game, which removed in June 1st, 2022. The main gameplay itself is similar to its previous installments, where the player must defend their house from abundance of zombies by growing plants and collecting sun to plant them, depending on their sun cost on their seed packets. It has some mechanics from two-previous installments, like Plant Food, Endless Zone, Mini-games, etc.
-
In May 2013, PopCap Games released a trailer revealing a sequel to the first game, titled Plants vs. Zombies 2: It's About Time.[12][13] The game was soft-launched for the iOS in Australia and New Zealand on July 10, 2013,[14] and was officially released on August 14, 2013, as a freemium title.[15] The game featured new locations and plants along with the addition of plant food, a power-up that can be used to enhance a plant for a short period and can either be bought using in-game currency or acquired by defeating zombies that are glowing green.[16] Along with these new add ons, the game continues to make updates from time to time. According to EA News, the Arena, and Penny's Pursuit, which is a sequel within the game, has been one of their latest major updates, aside from all the mini add ons.[17]
-
The plant expression system has recently drawn attention to the production of pharmaceutical and non-pharmaceutical proteins. In comparison to the mammalian or bacterial expression systems, protein expression in plants is much safer and more cost-effective. Although the protein yields of plant expression systems are relatively low, the process of growing plants is highly scalable. In addition, like other eukaryotic cells, plant cells are equipped with the mechanisms for posttranslational modifications, such as glycosylation in the ER, which is sometimes critical for the solubility and functionality of expressed proteins [12,13,14,15]. Moreover, several studies indicated that many proteins of different origins were fully functional when they were produced in plants [16,17,18,19,20]. Several pharmaceutical proteins such as vaccines, cytokines, and therapeutic antibodies have been successfully produced in plants such as tobacco, barley, carrot, potato, and maize [16,21,22,23,24,25,26,27]. Notably, transient expression systems using plant tissues rapidly provide large amounts of therapeutic proteins during epidemic or pandemic periods [28,29,30,31].
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/rorallitri/biomedical-language-models/logs/Libri Meriyll Permbajtja Viktor Canosinaj.rar.md b/spaces/rorallitri/biomedical-language-models/logs/Libri Meriyll Permbajtja Viktor Canosinaj.rar.md
deleted file mode 100644
index 37be97ac229e2e4951429d96f38112b62a94f3d6..0000000000000000000000000000000000000000
--- a/spaces/rorallitri/biomedical-language-models/logs/Libri Meriyll Permbajtja Viktor Canosinaj.rar.md
+++ /dev/null
@@ -1,9 +0,0 @@
-
-"""
-
-load_dotenv()
-openai.api_key = os.getenv('OPENAI_API_KEY')
-MODEL = "gpt-3.5-turbo"
-
-def get_filetext(filename, cache={}):
- if filename in cache:
- # キャッシュに保存されている場合は、キャッシュからファイル内容を取得する
- return cache[filename]
- else:
- if not os.path.exists(filename):
- raise ValueError(f"ファイル '{filename}' が見つかりませんでした")
- with open(filename, "r") as f:
- text = f.read()
- # ファイル内容をキャッシュする
- cache[filename] = text
- return text
-
-class OpenAI:
-
- @classmethod
- def chat_completion(cls, prompt, start_with=""):
- constraints = get_filetext(filename = "constraints.md")
- template = get_filetext(filename = "template.md")
-
- # ChatCompletion APIに渡すデータを定義する
- data = {
- "model": "gpt-3.5-turbo",
- "messages": [
- {"role": "system", "content": constraints}
- ,{"role": "system", "content": template}
- ,{"role": "assistant", "content": "Sure!"}
- ,{"role": "user", "content": prompt}
- ,{"role": "assistant", "content": start_with}
- ],
- }
-
- # ChatCompletion APIを呼び出す
- response = requests.post(
- "https://api.openai.com/v1/chat/completions",
- headers={
- "Content-Type": "application/json",
- "Authorization": f"Bearer {openai.api_key}"
- },
- json=data
- )
-
- # ChatCompletion APIから返された結果を取得する
- result = response.json()
- print(result)
- content = result["choices"][0]["message"]["content"].strip()
- return content
-
-class NajiminoAI:
-
- @classmethod
- def generate_emo_prompt(cls, user_message):
- template = get_filetext(filename="template.md")
- prompt = f"""
- {user_message}
- ---
- 上記を元に、下記テンプレートを埋めてください。
- ---
- {template}
- """
- return prompt
-
- @classmethod
- def generate_emo(cls, user_message):
- prompt = NajiminoAI.generate_emo_prompt(user_message);
- start_with = ""
- result = OpenAI.chat_completion(prompt=prompt, start_with=start_with)
- return result
-
-def main():
- iface = gr.Interface(fn=NajiminoAI.generate_emo,
- inputs=gr.Textbox(label=inputs_label),
- outputs=gr.Textbox(label=outputs_label),
- title=title,
- description=description,
- article=article,
- allow_flagging='never'
- )
-
- iface.launch()
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Chicken Gun 3.3.0 APK - A Hilarious and Exciting Game of Chicken vs Chicken for Android.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Chicken Gun 3.3.0 APK - A Hilarious and Exciting Game of Chicken vs Chicken for Android.md
deleted file mode 100644
index 27099a2d1f034e41074fc9922d114711d0799040..0000000000000000000000000000000000000000
--- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Chicken Gun 3.3.0 APK - A Hilarious and Exciting Game of Chicken vs Chicken for Android.md
+++ /dev/null
@@ -1,114 +0,0 @@
-
-
Chicken Gun APK 3.3.0: A Fun and Crazy Multiplayer Shooter Game
-
If you are looking for a hilarious and action-packed shooter game that lets you play as armed chickens, then you should try Chicken Gun APK 3.3.0. This game is developed by ChaloApps and has over 50 million downloads on Google Play Store. In this game, you can join online matches with up to 10 players, customize your chicken with different weapons, beaks, sneakers, and caps, throw explosive eggs at your enemies, and chat with your teammates using voice chat. Chicken Gun APK 3.3.0 is the latest version of the game that was released on May 27, 2023, and it comes with some bug fixes and improvements.
-
What is Chicken Gun APK 3.3.0?
-
Chicken Gun APK 3.3.0 is an Android game that belongs to the action genre. It is a multiplayer shooter game that features chickens as the main characters. The game has two modes: team deathmatch and free for all. In team deathmatch, you can join a team of five chickens and fight against another team of five chickens in various maps. In free for all, you can fight against nine other chickens in a chaotic battle royale.
Chicken Gun APK 3.3.0 offers two game modes that you can choose from: team deathmatch and free for all. In team deathmatch, you can cooperate with your teammates to eliminate the enemy team and score more points than them. In free for all, you can compete with nine other chickens in a solo match where the last chicken standing wins.
-
Customizable chickens and weapons
-
One of the most fun aspects of Chicken Gun APK 3.3.0 is that you can customize your chicken with various items and accessories. You can change your chicken's weapon, beak, sneakers, and cap to suit your style and preference. You can also unlock new items by playing more matches and earning coins.
-
Explosive eggs and other items
-
Besides shooting your enemies with guns, you can also use explosive eggs to deal more damage and cause chaos in the battlefield. You can throw eggs at your enemies or at the environment to create explosions and traps. You can also use other items such as grenades, mines, rockets, and jetpacks to gain an advantage over your opponents.
-
Online multiplayer with voice chat
-
Chicken Gun APK 3.3.0 is an online multiplayer game that requires an internet connection to play. You can join matches with players from around the world or create your own private room to invite your friends. You can also communicate with your teammates using voice chat to coordinate your strategies and have fun.
-
Graphics and sound effects
-
Chicken Gun APK 3.3.0 has colorful and cartoonish graphics that make the game look appealing and amusing. The game also has realistic sound effects that add to the immersion and excitement of the game.
-
How to download and install Chicken Gun APK 3.3.0?
How to download and install Chicken Gun APK 3.3.0?
-
If you want to play Chicken Gun APK 3.3.0 on your Android device, you need to download and install the APK file from a trusted source. An APK file is a package file that contains the installation files and data of an Android app. You can download the APK file of Chicken Gun APK 3.3.0 from various websites that offer free and safe APK downloads, such as [APKPure], [APKMirror], or [Uptodown].
-
Requirements for Chicken Gun APK 3.3.0
-
Android version and device compatibility
-
Before you download and install Chicken Gun APK 3.3.0, you need to make sure that your device meets the minimum requirements for the game. According to the developer, Chicken Gun APK 3.3.0 requires Android 4.4 or higher and at least 1 GB of RAM to run smoothly. You also need to check if your device is compatible with the game by looking at the supported devices list on the Google Play Store page of the game.
-
Storage space and permissions
-
Another thing you need to consider before downloading and installing Chicken Gun APK 3.3.0 is the storage space and permissions that the game requires. According to the developer, Chicken Gun APK 3.3.0 requires about 100 MB of free storage space on your device. You also need to grant some permissions to the game, such as access to your microphone, camera, storage, and network. These permissions are necessary for the game to function properly and provide you with the best gaming experience.
-
chicken gun game download apk 3.3.0
-chicken gun mod apk 3.3.0 unlimited money
-chicken gun online multiplayer apk 3.3.0
-chicken gun latest version apk 3.3.0
-chicken gun apk 3.3.0 free download for android
-chicken gun xapk 3.3.0
-chicken gun apk 3.3.0 update
-chicken gun apk 3.3.0 hack
-chicken gun apk 3.3.0 offline
-chicken gun apk 3.3.0 no ads
-chicken gun apk 3.3.0 android 5.0+
-chicken gun apk 3.3.0 gameplay
-chicken gun apk 3.3.0 review
-chicken gun apk 3.3.0 new features
-chicken gun apk 3.3.0 install
-chicken gun apk 3.3.0 file size
-chicken gun apk 3.3.0 requirements
-chicken gun apk 3.3.0 tips and tricks
-chicken gun apk 3.3.0 cheats
-chicken gun apk 3.3.0 best weapons
-chicken gun apk 3.3.0 skins and outfits
-chicken gun apk 3.3.0 explosive eggs
-chicken gun apk 3.3.0 team mode
-chicken gun apk 3.3.0 solo mode
-chicken gun apk 3.3.0 fun and action
-chicken gun apk 3.3.0 how to play
-chicken gun apk 3.3.0 tutorial
-chicken gun apk 3.3.0 guide
-chicken gun apk 3.3.0 walkthrough
-chicken gun apk 3.3.0 challenges
-chicken gun apk 3.3.0 achievements
-chicken gun apk 3.3.0 leaderboard
-chicken gun apk 3.3.0 ranking system
-chicken gun apk 3.3.0 custom matches
-chicken gun apk 3.3.0 chat and voice chat
-chicken gun apk 3.3.0 friends and invite system
-chicken gun apk 3.3.0 support and feedback
-chicken gun apk 3.2 vs 2 vs vs vs vs vs vs vs vs vs vs vs vs vs vs vs vs vs vs vs vs vs vs vs vs vs vs vs vs vs vs vs vs vs vs vs vs vsvs vsvs vsvs vsvs vsvs vsvs vsvs vsvs vsvs vsvs vsvs vsvs vsvs vsvs vsvs vsvs vsvs vsvs vsvs vsvs vsvs vsvs vsvs vsvs vsvs vsvs vsvs vsvs vsvs vsvs vsvs vsvs vsvs vsvs vsvs vsvs
-
Internet connection and battery life
-
Finally, you need to have a stable internet connection and enough battery life to play Chicken Gun APK 3.3.0 online with other players. The game is an online multiplayer game that requires an internet connection to join matches and chat with your teammates. You also need to have enough battery life on your device to avoid interruptions and low performance while playing the game.
-
Steps to download and install Chicken Gun APK 3.3.0
-
Download the APK file from a trusted source
-
The first step to download and install Chicken Gun APK 3.3.0 is to download the APK file from a trusted source, such as [APKPure], [APKMirror], or [Uptodown]. You can use your browser or a downloader app to download the APK file from these websites. Make sure that you download the latest version of the game, which is 3.3.0, and that the file size matches the one stated on the website.
-
Enable unknown sources in your device settings
-
The second step to download and install Chicken Gun APK 3.3.0 is to enable unknown sources in your device settings. This is because Android devices do not allow installing apps from sources other than the Google Play Store by default, for security reasons. To enable unknown sources, you need to go to your device settings, then security or privacy, then toggle on the option that says "allow installation of apps from unknown sources" or something similar. This will allow you to install apps from sources other than the Google Play Store.
-
Install the APK file and launch the game
-
The third and final step to download and install Chicken Gun APK 3.3.0 is to install the APK file and launch the game. To install the APK file, you need to locate it in your device's storage, then tap on it and follow the instructions on the screen. The installation process may take a few seconds or minutes, depending on your device's speed and performance. Once the installation is complete, you can launch the game by tapping on its icon on your home screen or app drawer. Enjoy playing Chicken Gun APK 3.3.0 online with other players!
-
Conclusion
-
Chicken Gun APK 3.3.0 is a fun and crazy multiplayer shooter game that lets you play as armed chickens in various maps and modes. You can customize your chicken with different weapons, beaks, sneakers, and caps, throw explosive eggs at your enemies, and chat with your teammates using voice chat. You can download and install Chicken Gun APK 3.3.0 on your Android device by following these steps: download the APK file from a trusted source, enable unknown sources in your device settings, install the APK file and launch the game. Chicken Gun APK 3.3.0 is a great game to play with your friends or with strangers online, as it offers a lot of fun and laughter. If you are a fan of shooter games and chickens, you should definitely give Chicken Gun APK 3.3.0 a try.
-
FAQs
-
Here are some frequently asked questions about Chicken Gun APK 3.3.0 that you may find helpful:
-
-
-
Question
-
Answer
-
-
-
Is Chicken Gun APK 3.3.0 safe to download and install?
-
Yes, Chicken Gun APK 3.3.0 is safe to download and install, as long as you download it from a trusted source, such as [APKPure], [APKMirror], or [Uptodown]. These websites scan the APK files for viruses and malware before uploading them, so you can be sure that they are clean and secure.
-
-
-
Is Chicken Gun APK 3.3.0 free to play?
-
Yes, Chicken Gun APK 3.3.0 is free to play, but it contains ads and in-app purchases that you can buy with real money. You can disable the ads by turning off your internet connection or by purchasing the ad-free version of the game for $2.99. You can also buy coins and gems with real money to unlock more items and accessories for your chicken.
-
-
-
How can I play Chicken Gun APK 3.3.0 with my friends?
-
You can play Chicken Gun APK 3.3.0 with your friends by creating a private room and inviting them to join using a code. To create a private room, you need to tap on the "create room" button on the main menu, then choose the map, mode, and time limit for your match. You will then see a code that you can share with your friends to invite them to your room. To join a private room, you need to tap on the "join room" button on the main menu, then enter the code that your friend gave you.
-
-
-
How can I use voice chat in Chicken Gun APK 3.3.0?
-
You can use voice chat in Chicken Gun APK 3.3.0 by tapping on the microphone icon on the top right corner of the screen during a match. You will then be able to talk to your teammates or opponents using your device's microphone. You can also mute or unmute yourself or other players by tapping on their names on the scoreboard.
-
-
-
What are some tips and tricks for playing Chicken Gun APK 3.3.0?
-
Some tips and tricks for playing Chicken Gun APK 3.3.0 are:
-
-
-
- Use different weapons and items depending on the map and mode you are playing.
-
- Aim for the head or the legs of your enemies to deal more damage and slow them down.
-
-
-
- Throw eggs at walls or floors to create explosions and traps for your enemies.
-
- Use jetpacks or rockets to fly over obstacles and surprise your enemies.
-
-
-
- Use voice chat to communicate with your teammates and coordinate your strategies.
-
- Have fun and enjoy the game!
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download FNAF AR APK and Experience the Real World Terror of Animatronics.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download FNAF AR APK and Experience the Real World Terror of Animatronics.md
deleted file mode 100644
index a097affbce1a1b1c66c2a8233374019afa620a0b..0000000000000000000000000000000000000000
--- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download FNAF AR APK and Experience the Real World Terror of Animatronics.md
+++ /dev/null
@@ -1,121 +0,0 @@
-
-
How to Download and Play FNAF AR APK from Android Oyun Club
-
If you are a fan of horror games, you have probably heard of Five Nights at Freddy's (FNAF), a series of games where you have to survive the attacks of malfunctioning animatronics in a haunted pizzeria. But did you know that there is a new game in the FNAF franchise that uses augmented reality (AR) technology to bring the animatronics to your real world? It's called Five Nights at Freddy's AR: Special Delivery (FNAF AR), and it's one of the most terrifying and immersive games you can play on your mobile device.
In this article, we will show you how to download and play FNAF AR APK from Android Oyun Club, a website that offers free APK files for Android games. We will also tell you about the features and content of FNAF AR game, and give you some tips and tricks to survive the horrors that await you.
-
How to Install and Play FNAF AR on Android Devices
-
FNAF AR is an AR game that requires access to your device's GPS and camera, as well as a data connection (mobile or WiFi). It also requires a compatible device that supports ARCore, Google's platform for building AR experiences. You can check if your device is compatible here.
-
If your device meets these requirements, you can follow these steps to install and play FNAF AR on your Android device:
-
-
Go to Android Oyun Club, a website that offers free APK files for Android games.
-
Search for FNAF AR in the search bar, or go directly to this link.
-
Click on the green "Download" button, and wait for the APK file to be downloaded.
-
Once the download is complete, open the APK file and tap on "Install". You may need to enable "Unknown sources" in your device settings if you haven't done so before.
-
After the installation is done, tap on "Open" to launch the game.
-
Accept the terms of service and privacy policy, and grant the necessary permissions for the game to access your GPS, camera, microphone, storage, etc.
-
Create an account or log in with your existing account, and choose a username.
-
Enjoy the game!
-
-
Tips and Tricks to Survive the Animatronics
-
FNAF AR is not an easy game. You will have to face an endless stream of hostile animatronics that will follow you wherever you go, and try to survive their attacks by using your limited resources wisely. Here are some tips and tricks that may help you:
-
-
Use your flashlight sparingly. It drains your battery quickly, and it may reveal your location to the animatronics. Only use it when you need to find static or charge at them.
-
Learn how each animatronic behaves. Some of them have different patterns and sounds, and some of them may have special abilities or tricks to fool you. For example, Springtrap can fake his haywire, and Mangle can hang from the ceiling.
-
Listen carefully to the audio cues. The animatronics will make noises when they move, charge, or haywire. You can use these cues to locate them and react accordingly.
-
Look for the static. The static is a visual indicator of where the animatronic is. The more intense the static, the closer the animatronic is. You can use your flashlight to make the static more visible, but be careful not to drain your battery.
-
Don't look at the haywire. When an animatronic haywires, it will twitch and glitch out in front of you. If you look at it, you will lose instantly. You have to look away as fast as possible, and keep looking away until the haywire stops. Some animatronics may require you to wear a mask or hold still during their haywire.
-
Shock them when they charge. When an animatronic charges at you, it will run towards you with glowing eyes. This is your chance to shock them and end the encounter. You have to time your shock right, or you will miss and get jumpscared. Some animatronics may fake their charge or cloak themselves, so be alert.
-
-
Features and Content of FNAF AR Game
-
FNAF AR is not just a simple horror game. It has many features and content that make it a unique and enjoyable experience for fans of the FNAF franchise and AR games in general. Here are some of the features and content that you can find in FNAF AR game:
-
-
Augmented Reality Gameplay and Location-Based Mechanics
-
FNAF AR uses augmented reality technology to bring the animatronics to your real world. You can see them through your device's camera, and interact with them using your flashlight and shocker. The game also uses location-based mechanics, which means that the animatronics will follow you wherever you go, and that different locations may have different effects on the gameplay. For example, playing in a dark room may make it harder to see the static, while playing in a noisy place may make it harder to hear the audio cues.
-
Different Animatronics and Skins to Collect and Encounter
-
FNAF AR features many animatronics from the FNAF series, as well as some new ones exclusive to this game. Each animatronic has its own behavior, difficulty, and appearance. You can encounter them randomly or by using lures, which are items that attract specific animatronics to your location. You can also collect different skins for the animatronics, which are cosmetic variations that change their look and sometimes their behavior. Some skins are based on holidays, seasons, or events, while others are original creations by Illumix.
-
Tools and Resources to Use and Manage
-
FNAF AR requires you to use and manage various tools and resources in order to survive the animatronic attacks. These include:
-
-
-
Tool/Resource
-
Description
-
-
-
Battery
-
Your main resource that powers your flashlight and shocker. It drains over time and when you use your tools. You can recharge it by collecting batteries on the map or by buying them with Faz-Coins.
-
-
-
Flashlight
-
Your main tool that helps you find static and charge at the animatronics. It drains your battery when you use it.
-
-
-
Shocker
-
Your main weapon that shocks the animatronics when they charge at you. It drains your battery when you use it.
-
-
-
Mask
-
A tool that helps you avoid certain animatronics' haywire by wearing it when they glitch out. It does not drain your battery when you use it.
-
-
-
Transponder
-
A tool that reveals the location of all nearby animatronics on the map for a limited time. It does not drain your battery when you use it.
-
-
-
Radio Jammer
-
A tool that prevents new animatronics from appearing on the map for a limited time. It does not drain your battery when you use it.
-
-
-
Lures
-
Items that attract specific animatronics to your location for one encounter. You can get them by completing daily challenges, buying them with Faz-Coins, or receiving them as gifts from friends.
-
-
-
-
Faz-Coins
-
The in-game currency that you can use to buy batteries, lures, skins, and other items. You can earn them by completing daily challenges, watching ads, or buying them with real money.
-
-
-
Parts
-
Items that you can use to repair your animatronics or trade them for other items. You can get them by defeating animatronics, salvaging them, or buying them with Faz-Coins.
-
-
-
CPUs
-
Items that determine the behavior and difficulty of your animatronics. You can get them by defeating animatronics, salvaging them, or buying them with Faz-Coins.
-
-
-
Plushsuits
-
Items that determine the appearance and sound of your animatronics. You can get them by defeating animatronics, salvaging them, or buying them with Faz-Coins.
-
-
-
Mods
-
Items that enhance the performance and stats of your animatronics. You can get them by defeating animatronics, salvaging them, or buying them with Faz-Coins.
-
-
-
Remnant
-
A mysterious substance that powers your animatronics and protects you from the dark. You can collect it by using the flashlight on the map, or by defeating animatronics. You can also lose it by getting jumpscared or encountering Shadow Bonnie.
-
-
-
Customizable Animatronics and Multiplayer Mode
-
FNAF AR also allows you to create and customize your own animatronics using the parts, CPUs, plushsuits, and mods that you collect. You can mix and match different components to create unique combinations of appearance and behavior. You can also send your animatronics to your friends or other players around the world, and challenge them to survive your creations. You can also receive animatronics from other players, and try to defeat them for rewards. This adds a social and competitive element to the game, as well as a creative outlet for your imagination.
-
Conclusion
-
FNAF AR is a game that combines horror, augmented reality, and location-based mechanics to create a thrilling and immersive experience for mobile devices. It features many animatronics and skins from the FNAF series, as well as new ones exclusive to this game. It also offers various tools and resources to use and manage, as well as a customizable and multiplayer mode that lets you create and share your own animatronics with other players. If you are looking for a game that will scare you, challenge you, and entertain you, FNAF AR is the game for you.
-
If you want to download and play FNAF AR APK from Android Oyun Club, just follow the steps we have outlined in this article, and you will be ready to face the horrors that await you in your real world. But be warned: FNAF AR is not for the faint of heart. Are you brave enough to try it?
-
FAQs
-
Here are some of the frequently asked questions about FNAF AR game:
-
What is the difference between FNAF AR and other FNAF games?
-
FNAF AR is different from other FNAF games in several ways. First of all, it uses augmented reality technology to bring the animatronics to your real world, instead of confining you to a fixed location. Second of all, it uses location-based mechanics to make the gameplay more dynamic and unpredictable, instead of following a set pattern or schedule. Third of all, it allows you to customize and share your own animatronics with other players, instead of only facing the ones created by the developers.
-
Is FNAF AR free to play or does it have in-app purchases?
-
FNAF AR is free to play, but it does have in-app purchases that allow you to buy more batteries, lures, skins, parts, CPUs, plushsuits, mods, remnant, and Faz-Coins. These purchases are optional and not required to enjoy the game.
-
How can I get more parts, CPUs, and plushsuits in FNAF AR?
-
You can get more parts, CPUs, and plushsuits in FNAF AR by defeating animatronics that have them equipped. You can also salvage them from your own animatronics that you send out or receive from other players. Alternatively, you can buy them with Faz-Coins in the shop.
-
How can I send my animatronics to my friends or other players in FNAF AR?
-
You can send your animatronics to your friends or other players in FNAF AR by using the "Deploy" option in the workshop. You can choose which animatronic to send, and which friend or player to target. You can also use the "Random" option to send your animatronic to a random player. You can send up to four animatronics at a time, and you can recall them at any time. You can also receive animatronics from your friends or other players, and try to defeat them for rewards.
-
What are some of the best animatronics and skins in FNAF AR?
-
This is a subjective question, as different players may have different preferences and opinions on the best animatronics and skins in FNAF AR. However, some of the most popular and rare ones are:
-
-
Springtrap: A decayed and damaged version of Spring Bonnie, who is one of the most difficult and dangerous animatronics in the game. He can fake his haywire, cloak himself, and charge at you from any direction.
-
Golden Freddy: A mysterious and elusive version of Freddy Fazbear, who is one of the most coveted and sought-after animatronics in the game. He only appears during special events, and he can instantly end your streak if you fail to shock him.
-
Frostbear: A festive and frosty version of Freddy Fazbear, who is one of the coolest and most unique skins in the game. He has a snowy appearance, an icy voice, and a freezing effect that makes your screen frost over.
-
Flamethrower Endo: A fiery and explosive version of Endoskeleton, who is one of the newest and most exciting skins in the game. He has a burning appearance, a blazing voice, and a flaming effect that makes your screen catch fire.
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Supreme Duelist Stickman 1.9.2 Mod APK with Unlocked Features.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Supreme Duelist Stickman 1.9.2 Mod APK with Unlocked Features.md
deleted file mode 100644
index 40afd10dc1bf9c759d5ac2496335075ed350fbe8..0000000000000000000000000000000000000000
--- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Supreme Duelist Stickman 1.9.2 Mod APK with Unlocked Features.md
+++ /dev/null
@@ -1,120 +0,0 @@
-
-
Supreme Duelist Stickman Mod APK 1.9 2: A Fun and Exciting Action Game
-
If you are looking for a simple but addictive action game, you should try Supreme Duelist Stickman. This game lets you control a stickman and fight against other stickmen in various modes and arenas. You can use different weapons, skills, and physics to defeat your opponents and have fun.
Supreme Duelist Stickman is a popular action game developed by Neron's Brother. It has more than 100 million downloads on Google Play Store and a rating of 4.3 out of 5 stars. The game is suitable for all ages and can be played offline or online with friends.
-
Features of Supreme Duelist Stickman
-
Some of the features that make Supreme Duelist Stickman a great game are:
-
-
Simple and intuitive controls: You can move your stickman with the joystick and attack with the buttons. You can also switch weapons, jump, and use skills with ease.
-
Various modes and arenas: You can choose from different modes such as single player, two players, survival, tournament, and online multiplayer. You can also select from different arenas such as rooftop, bridge, lava, ice, and more.
-
Different weapons and skills: You can equip your stickman with various weapons such as swords, guns, axes, hammers, spears, and more. You can also use skills such as teleportation, invisibility, shield, and more.
-
Realistic physics and ragdoll effects: The game uses realistic physics and ragdoll effects to make the fights more fun and dynamic. You can see your stickman fly, bounce, fall, and break in hilarious ways.
-
Customizable stickmen: You can customize your stickman's appearance by changing the color, hat, face, and weapon.
-
-
How to play Supreme Duelist Stickman
-
The gameplay of Supreme Duelist Stickman is simple and easy to learn. Here are the basic steps to play the game:
-
-
Select a mode and an arena.
-
Select your weapon and skill.
-
Move your stickman with the joystick and attack with the buttons.
-
Use your weapon and skill wisely to defeat your opponent.
-
Enjoy the physics and ragdoll effects.
-
-
What is Supreme Duelist Stickman Mod APK 1.9 2?
-
Supreme Duelist Stickman Mod APK 1.9 2 is a modified version of the original game that gives you some extra benefits. The mod apk is not available on Google Play Store, but you can download it from other sources for free.
-
Benefits of Supreme Duelist Stickman Mod APK 1.9 2
-
Some of the benefits that you can get from Supreme Duelist Stickman Mod APK 1.9 2 are:
-
-
All weapons and skills unlocked: You can access all the weapons and skills in the game without spending any money or coins.
-
No ads: You can enjoy the game without any annoying ads interrupting your gameplay.
-
No root required: You don't need to root your device to install or run the mod apk.
-
-How to download and install Supreme Duelist Stickman Mod APK 1.9 2
-
To download and install Supreme Duelist Stickman Mod APK 1.9 2, you need to follow these steps:
-
-
Click on the link below to download the mod apk file.
-
Allow unknown sources on your device settings.
-
Locate the downloaded file and tap on it to install it.
Why should you play Supreme Duelist Stickman Mod APK 1.9 2?
-
Supreme Duelist Stickman Mod APK 1.9 2 is a fun and exciting action game that you should play for many reasons. Here are some of them:
-
Pros and cons of Supreme Duelist Stickman Mod APK 1.9 2
-
Like any other game, Supreme Duelist Stickman Mod APK 1.9 2 has its pros and cons. Here are some of them:
-
supreme duelist stickman unlocked mod apk 1.9 2
-supreme duelist stickman 1.9 2 mod apk download
-supreme duelist stickman hack mod apk 1.9 2
-supreme duelist stickman mod apk 1.9 2 free
-supreme duelist stickman mod apk 1.9 2 latest version
-supreme duelist stickman mod apk 1.9 2 unlimited money
-supreme duelist stickman mod apk 1.9 2 android
-supreme duelist stickman mod apk 1.9 2 happymod
-supreme duelist stickman mod apk 1.9 2 online
-supreme duelist stickman mod apk 1.9 2 offline
-supreme duelist stickman mod apk 1.9 2 no ads
-supreme duelist stickman mod apk 1.9 2 all characters
-supreme duelist stickman mod apk 1.9 2 gameplay
-supreme duelist stickman mod apk 1.9 2 review
-supreme duelist stickman mod apk 1.9 2 features
-supreme duelist stickman mod apk 1.9 2 update
-supreme duelist stickman mod apk 1.9 2 cheats
-supreme duelist stickman mod apk 1.9 2 tips
-supreme duelist stickman mod apk 1.9 2 tricks
-supreme duelist stickman mod apk 1.9 2 guide
-supreme duelist stickman mod apk 1.9 2 tutorial
-supreme duelist stickman mod apk 1.9 2 install
-supreme duelist stickman mod apk 1.9 2 how to play
-supreme duelist stickman mod apk 1.9 2 best settings
-supreme duelist stickman mod apk 1.9 2 requirements
-supreme duelist stickman mod apk 1.9 2 size
-supreme duelist stickman mod apk 1.9 2 file
-supreme duelist stickman mod apk 1.9 2 link
-supreme duelist stickman mod apk 1.9 2 mirror
-supreme duelist stickman mod apk 1.9 2 alternative
-supreme duelist stickman mod apk version:1.9.2 [unlocked]
-download supreme duelist stickman v1.9.2 [mod]
-how to get supreme duelist stickman v1.9.2 [mod] for free
-what is new in supreme duelist stickman v1.9.2 [mod]
-is supreme duelist stickman v1.9.2 [mod] safe to use
-how to uninstall supreme duelist stickman v1.9.2 [mod]
-how to update supreme duelist stickman v1.9.2 [mod]
-how to fix supreme duelist stickman v1.9.2 [mod] errors
-how to hack supreme duelist stickman v1.9.2 [mod]
-how to unlock all modes in supreme duelist stickman v1.9.2 [mod]
-
-
Pros
Cons
-
- You can enjoy all the weapons and skills without spending money or coins.
- The game may not be compatible with some devices or versions.
-
- You can play the game without any ads disturbing you.
- The game may not be updated regularly or have some bugs.
-
- You can play the game offline or online with friends.
- The game may be detected as a virus or malware by some antivirus programs.
-
- You can customize your stickman's appearance and have fun with the physics and ragdoll effects.
- The game may be too easy or boring for some players who prefer more challenge or variety.
-
-
Tips and tricks for Supreme Duelist Stickman Mod APK 1.9 2
-
If you want to improve your skills and have more fun with Supreme Duelist Stickman Mod APK 1.9 2, you can use these tips and tricks:
-
-
- Experiment with different weapons and skills to find the ones that suit your style and strategy.
-
- Use the environment and objects to your advantage. You can push, pull, throw, or break them to create obstacles or opportunities for yourself or your opponent.
-
- Be aware of your health and stamina bars. Don't let them run out or you will be vulnerable to attacks.
-
- Use your skills wisely. Don't waste them on unnecessary situations or when they are on cooldown.
-
- Play with friends online or offline to have more fun and challenge. You can also chat, taunt, or cooperate with them.
-
-
Conclusion
-
Supreme Duelist Stickman Mod APK 1.9 2 is a fun and exciting action game that lets you control a stickman and fight against other stickmen in various modes and arenas. You can use different weapons, skills, and physics to defeat your opponents and have fun. You can also enjoy the mod features such as all weapons and skills unlocked, no ads, and no root required. If you are looking for a simple but addictive action game, you should try Supreme Duelist Stickman Mod APK 1.9 2.
-
FAQs
-
Here are some frequently asked questions about Supreme Duelist Stickman Mod APK 1.9 2:
-
-
Q: Is Supreme Duelist Stickman Mod APK 1.9 2 safe to download and install?
-
A: Yes, Supreme Duelist Stickman Mod APK 1.9 2 is safe to download and install as long as you get it from a trusted source. However, you should always scan the file before installing it and use it at your own risk.
-
Q: How can I play Supreme Duelist Stickman online with friends?
-
A: To play Supreme Duelist Stickman online with friends, you need to select the online multiplayer mode and create or join a room. You can also invite your friends by sharing the room code or link.
-
Q: How can I update Supreme Duelist Stickman Mod APK 1.9 2?
-
A: To update Supreme Duelist Stickman Mod APK 1.9 2, you need to check if there is a new version available on the source where you downloaded it from. If there is, you need to download and install it again. You may need to uninstall the previous version first.
-
Q: What are the alternatives to Supreme Duelist Stickman Mod APK 1.9 2?
-
A: Some of the alternatives to Supreme Duelist Stickman Mod APK 1.9 2 are Stickman Legends, Stick War: Legacy, Stick Fight: The Game, and Stickman Party.
-
Q: How can I contact the developer of Supreme Duelist Stickman Mod APK 1.9 2?
-
A: You can contact the developer of Supreme Duelist Stickman Mod APK 1.9 2 by sending an email to neronsbrother@gmail.com or visiting their Facebook page.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/skf15963/summary/fengshen/examples/clue_sim/loss.py b/spaces/skf15963/summary/fengshen/examples/clue_sim/loss.py
deleted file mode 100644
index 537e2347f65aa952b0eb852c23a39901b0fef52e..0000000000000000000000000000000000000000
--- a/spaces/skf15963/summary/fengshen/examples/clue_sim/loss.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# coding=utf-8
-# Copyright 2021 The IDEA Authors. All rights reserved.
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import torch
-from torch.nn import functional as F
-
-
-class FocalLoss(torch.nn.Module):
- """Multi-class Focal loss implementation"""
-
- def __init__(self, gamma=2, weight=None, ignore_index=-100):
- super(FocalLoss, self).__init__()
- self.gamma = gamma
- self.weight = weight
- self.ignore_index = ignore_index
-
- def forward(self, input, target):
- """
- input: [N, C]
- target: [N, ]
- """
- logpt = F.log_softmax(input, dim=1)
- pt = torch.exp(logpt)
- logpt = (1-pt)**self.gamma * logpt
- loss = F.nll_loss(logpt, target, self.weight, ignore_index=self.ignore_index)
- return loss
-
-# 交叉熵平滑滤波 防止过拟合
-
-
-class LabelSmoothingCorrectionCrossEntropy(torch.nn.Module):
- def __init__(self, eps=0.1, reduction='mean', ignore_index=-100):
- super(LabelSmoothingCorrectionCrossEntropy, self).__init__()
- self.eps = eps
- self.reduction = reduction
- self.ignore_index = ignore_index
-
- def forward(self, output, target):
- c = output.size()[-1]
- log_preds = F.log_softmax(output, dim=-1)
- if self.reduction == 'sum':
- loss = -log_preds.sum()
- else:
- loss = -log_preds.sum(dim=-1)
- if self.reduction == 'mean':
- loss = loss.mean()
-
- # task specific
- labels_hat = torch.argmax(output, dim=1)
- lt_sum = labels_hat + target
- abs_lt_sub = abs(labels_hat - target)
- correction_loss = 0
- for i in range(c):
- if lt_sum[i] == 0:
- pass
- elif lt_sum[i] == 1:
- if abs_lt_sub[i] == 1:
- pass
- else:
- correction_loss -= self.eps*(0.5945275813408382)
- else:
- correction_loss += self.eps*(1/0.32447699714575207)
- correction_loss /= c
- # print(correction_loss)
- return loss*self.eps/c + (1-self.eps) * \
- F.nll_loss(log_preds, target, reduction=self.reduction, ignore_index=self.ignore_index) + correction_loss
diff --git a/spaces/skyxx/skyxxChat/locale/extract_locale.py b/spaces/skyxx/skyxxChat/locale/extract_locale.py
deleted file mode 100644
index 32b0924bd6dffe150cb3e481ddadef836b91b83c..0000000000000000000000000000000000000000
--- a/spaces/skyxx/skyxxChat/locale/extract_locale.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import os
-import json
-import re
-
-# Define regular expression patterns
-pattern = r'i18n\((\"{3}.*?\"{3}|\".*?\")\)'
-
-# Load the .py file
-with open('ChuanhuChatbot.py', 'r', encoding='utf-8') as f:
- contents = f.read()
-
-# Load the .py files in the modules folder
-for filename in os.listdir("modules"):
- if filename.endswith(".py"):
- with open(os.path.join("modules", filename), "r", encoding="utf-8") as f:
- contents += f.read()
-
-# Matching with regular expressions
-matches = re.findall(pattern, contents, re.DOTALL)
-
-# Convert to key/value pairs
-data = {match.strip('()"'): '' for match in matches}
-
-# Save as a JSON file
-with open('labels.json', 'w', encoding='utf-8') as f:
- json.dump(data, f, ensure_ascii=False, indent=4)
\ No newline at end of file
diff --git a/spaces/sneedium/dvatch_captcha_sneedium_old/transforms.py b/spaces/sneedium/dvatch_captcha_sneedium_old/transforms.py
deleted file mode 100644
index 5a7042f3368bc832566d5c22d1e18abe5d8547f5..0000000000000000000000000000000000000000
--- a/spaces/sneedium/dvatch_captcha_sneedium_old/transforms.py
+++ /dev/null
@@ -1,329 +0,0 @@
-import math
-import numbers
-import random
-
-import cv2
-import numpy as np
-from PIL import Image
-from torchvision import transforms
-from torchvision.transforms import Compose
-
-
-def sample_asym(magnitude, size=None):
- return np.random.beta(1, 4, size) * magnitude
-
-def sample_sym(magnitude, size=None):
- return (np.random.beta(4, 4, size=size) - 0.5) * 2 * magnitude
-
-def sample_uniform(low, high, size=None):
- return np.random.uniform(low, high, size=size)
-
-def get_interpolation(type='random'):
- if type == 'random':
- choice = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA]
- interpolation = choice[random.randint(0, len(choice)-1)]
- elif type == 'nearest': interpolation = cv2.INTER_NEAREST
- elif type == 'linear': interpolation = cv2.INTER_LINEAR
- elif type == 'cubic': interpolation = cv2.INTER_CUBIC
- elif type == 'area': interpolation = cv2.INTER_AREA
- else: raise TypeError('Interpolation types only nearest, linear, cubic, area are supported!')
- return interpolation
-
-class CVRandomRotation(object):
- def __init__(self, degrees=15):
- assert isinstance(degrees, numbers.Number), "degree should be a single number."
- assert degrees >= 0, "degree must be positive."
- self.degrees = degrees
-
- @staticmethod
- def get_params(degrees):
- return sample_sym(degrees)
-
- def __call__(self, img):
- angle = self.get_params(self.degrees)
- src_h, src_w = img.shape[:2]
- M = cv2.getRotationMatrix2D(center=(src_w/2, src_h/2), angle=angle, scale=1.0)
- abs_cos, abs_sin = abs(M[0,0]), abs(M[0,1])
- dst_w = int(src_h * abs_sin + src_w * abs_cos)
- dst_h = int(src_h * abs_cos + src_w * abs_sin)
- M[0, 2] += (dst_w - src_w)/2
- M[1, 2] += (dst_h - src_h)/2
-
- flags = get_interpolation()
- return cv2.warpAffine(img, M, (dst_w, dst_h), flags=flags, borderMode=cv2.BORDER_REPLICATE)
-
-class CVRandomAffine(object):
- def __init__(self, degrees, translate=None, scale=None, shear=None):
- assert isinstance(degrees, numbers.Number), "degree should be a single number."
- assert degrees >= 0, "degree must be positive."
- self.degrees = degrees
-
- if translate is not None:
- assert isinstance(translate, (tuple, list)) and len(translate) == 2, \
- "translate should be a list or tuple and it must be of length 2."
- for t in translate:
- if not (0.0 <= t <= 1.0):
- raise ValueError("translation values should be between 0 and 1")
- self.translate = translate
-
- if scale is not None:
- assert isinstance(scale, (tuple, list)) and len(scale) == 2, \
- "scale should be a list or tuple and it must be of length 2."
- for s in scale:
- if s <= 0:
- raise ValueError("scale values should be positive")
- self.scale = scale
-
- if shear is not None:
- if isinstance(shear, numbers.Number):
- if shear < 0:
- raise ValueError("If shear is a single number, it must be positive.")
- self.shear = [shear]
- else:
- assert isinstance(shear, (tuple, list)) and (len(shear) == 2), \
- "shear should be a list or tuple and it must be of length 2."
- self.shear = shear
- else:
- self.shear = shear
-
- def _get_inverse_affine_matrix(self, center, angle, translate, scale, shear):
- # https://github.com/pytorch/vision/blob/v0.4.0/torchvision/transforms/functional.py#L717
- from numpy import sin, cos, tan
-
- if isinstance(shear, numbers.Number):
- shear = [shear, 0]
-
- if not isinstance(shear, (tuple, list)) and len(shear) == 2:
- raise ValueError(
- "Shear should be a single value or a tuple/list containing " +
- "two values. Got {}".format(shear))
-
- rot = math.radians(angle)
- sx, sy = [math.radians(s) for s in shear]
-
- cx, cy = center
- tx, ty = translate
-
- # RSS without scaling
- a = cos(rot - sy) / cos(sy)
- b = -cos(rot - sy) * tan(sx) / cos(sy) - sin(rot)
- c = sin(rot - sy) / cos(sy)
- d = -sin(rot - sy) * tan(sx) / cos(sy) + cos(rot)
-
- # Inverted rotation matrix with scale and shear
- # det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1
- M = [d, -b, 0,
- -c, a, 0]
- M = [x / scale for x in M]
-
- # Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1
- M[2] += M[0] * (-cx - tx) + M[1] * (-cy - ty)
- M[5] += M[3] * (-cx - tx) + M[4] * (-cy - ty)
-
- # Apply center translation: C * RSS^-1 * C^-1 * T^-1
- M[2] += cx
- M[5] += cy
- return M
-
- @staticmethod
- def get_params(degrees, translate, scale_ranges, shears, height):
- angle = sample_sym(degrees)
- if translate is not None:
- max_dx = translate[0] * height
- max_dy = translate[1] * height
- translations = (np.round(sample_sym(max_dx)), np.round(sample_sym(max_dy)))
- else:
- translations = (0, 0)
-
- if scale_ranges is not None:
- scale = sample_uniform(scale_ranges[0], scale_ranges[1])
- else:
- scale = 1.0
-
- if shears is not None:
- if len(shears) == 1:
- shear = [sample_sym(shears[0]), 0.]
- elif len(shears) == 2:
- shear = [sample_sym(shears[0]), sample_sym(shears[1])]
- else:
- shear = 0.0
-
- return angle, translations, scale, shear
-
-
- def __call__(self, img):
- src_h, src_w = img.shape[:2]
- angle, translate, scale, shear = self.get_params(
- self.degrees, self.translate, self.scale, self.shear, src_h)
-
- M = self._get_inverse_affine_matrix((src_w/2, src_h/2), angle, (0, 0), scale, shear)
- M = np.array(M).reshape(2,3)
-
- startpoints = [(0, 0), (src_w - 1, 0), (src_w - 1, src_h - 1), (0, src_h - 1)]
- project = lambda x, y, a, b, c: int(a*x + b*y + c)
- endpoints = [(project(x, y, *M[0]), project(x, y, *M[1])) for x, y in startpoints]
-
- rect = cv2.minAreaRect(np.array(endpoints))
- bbox = cv2.boxPoints(rect).astype(dtype=np.int)
- max_x, max_y = bbox[:, 0].max(), bbox[:, 1].max()
- min_x, min_y = bbox[:, 0].min(), bbox[:, 1].min()
-
- dst_w = int(max_x - min_x)
- dst_h = int(max_y - min_y)
- M[0, 2] += (dst_w - src_w) / 2
- M[1, 2] += (dst_h - src_h) / 2
-
- # add translate
- dst_w += int(abs(translate[0]))
- dst_h += int(abs(translate[1]))
- if translate[0] < 0: M[0, 2] += abs(translate[0])
- if translate[1] < 0: M[1, 2] += abs(translate[1])
-
- flags = get_interpolation()
- return cv2.warpAffine(img, M, (dst_w , dst_h), flags=flags, borderMode=cv2.BORDER_REPLICATE)
-
-class CVRandomPerspective(object):
- def __init__(self, distortion=0.5):
- self.distortion = distortion
-
- def get_params(self, width, height, distortion):
- offset_h = sample_asym(distortion * height / 2, size=4).astype(dtype=np.int)
- offset_w = sample_asym(distortion * width / 2, size=4).astype(dtype=np.int)
- topleft = ( offset_w[0], offset_h[0])
- topright = (width - 1 - offset_w[1], offset_h[1])
- botright = (width - 1 - offset_w[2], height - 1 - offset_h[2])
- botleft = ( offset_w[3], height - 1 - offset_h[3])
-
- startpoints = [(0, 0), (width - 1, 0), (width - 1, height - 1), (0, height - 1)]
- endpoints = [topleft, topright, botright, botleft]
- return np.array(startpoints, dtype=np.float32), np.array(endpoints, dtype=np.float32)
-
- def __call__(self, img):
- height, width = img.shape[:2]
- startpoints, endpoints = self.get_params(width, height, self.distortion)
- M = cv2.getPerspectiveTransform(startpoints, endpoints)
-
- # TODO: more robust way to crop image
- rect = cv2.minAreaRect(endpoints)
- bbox = cv2.boxPoints(rect).astype(dtype=np.int)
- max_x, max_y = bbox[:, 0].max(), bbox[:, 1].max()
- min_x, min_y = bbox[:, 0].min(), bbox[:, 1].min()
- min_x, min_y = max(min_x, 0), max(min_y, 0)
-
- flags = get_interpolation()
- img = cv2.warpPerspective(img, M, (max_x, max_y), flags=flags, borderMode=cv2.BORDER_REPLICATE)
- img = img[min_y:, min_x:]
- return img
-
-class CVRescale(object):
-
- def __init__(self, factor=4, base_size=(128, 512)):
- """ Define image scales using gaussian pyramid and rescale image to target scale.
-
- Args:
- factor: the decayed factor from base size, factor=4 keeps target scale by default.
- base_size: base size the build the bottom layer of pyramid
- """
- if isinstance(factor, numbers.Number):
- self.factor = round(sample_uniform(0, factor))
- elif isinstance(factor, (tuple, list)) and len(factor) == 2:
- self.factor = round(sample_uniform(factor[0], factor[1]))
- else:
- raise Exception('factor must be number or list with length 2')
- # assert factor is valid
- self.base_h, self.base_w = base_size[:2]
-
- def __call__(self, img):
- if self.factor == 0: return img
- src_h, src_w = img.shape[:2]
- cur_w, cur_h = self.base_w, self.base_h
- scale_img = cv2.resize(img, (cur_w, cur_h), interpolation=get_interpolation())
- for _ in range(self.factor):
- scale_img = cv2.pyrDown(scale_img)
- scale_img = cv2.resize(scale_img, (src_w, src_h), interpolation=get_interpolation())
- return scale_img
-
-class CVGaussianNoise(object):
- def __init__(self, mean=0, var=20):
- self.mean = mean
- if isinstance(var, numbers.Number):
- self.var = max(int(sample_asym(var)), 1)
- elif isinstance(var, (tuple, list)) and len(var) == 2:
- self.var = int(sample_uniform(var[0], var[1]))
- else:
- raise Exception('degree must be number or list with length 2')
-
- def __call__(self, img):
- noise = np.random.normal(self.mean, self.var**0.5, img.shape)
- img = np.clip(img + noise, 0, 255).astype(np.uint8)
- return img
-
-class CVMotionBlur(object):
- def __init__(self, degrees=12, angle=90):
- if isinstance(degrees, numbers.Number):
- self.degree = max(int(sample_asym(degrees)), 1)
- elif isinstance(degrees, (tuple, list)) and len(degrees) == 2:
- self.degree = int(sample_uniform(degrees[0], degrees[1]))
- else:
- raise Exception('degree must be number or list with length 2')
- self.angle = sample_uniform(-angle, angle)
-
- def __call__(self, img):
- M = cv2.getRotationMatrix2D((self.degree // 2, self.degree // 2), self.angle, 1)
- motion_blur_kernel = np.zeros((self.degree, self.degree))
- motion_blur_kernel[self.degree // 2, :] = 1
- motion_blur_kernel = cv2.warpAffine(motion_blur_kernel, M, (self.degree, self.degree))
- motion_blur_kernel = motion_blur_kernel / self.degree
- img = cv2.filter2D(img, -1, motion_blur_kernel)
- img = np.clip(img, 0, 255).astype(np.uint8)
- return img
-
-class CVGeometry(object):
- def __init__(self, degrees=15, translate=(0.3, 0.3), scale=(0.5, 2.),
- shear=(45, 15), distortion=0.5, p=0.5):
- self.p = p
- type_p = random.random()
- if type_p < 0.33:
- self.transforms = CVRandomRotation(degrees=degrees)
- elif type_p < 0.66:
- self.transforms = CVRandomAffine(degrees=degrees, translate=translate, scale=scale, shear=shear)
- else:
- self.transforms = CVRandomPerspective(distortion=distortion)
-
- def __call__(self, img):
- if random.random() < self.p:
- img = np.array(img)
- return Image.fromarray(self.transforms(img))
- else: return img
-
-class CVDeterioration(object):
- def __init__(self, var, degrees, factor, p=0.5):
- self.p = p
- transforms = []
- if var is not None:
- transforms.append(CVGaussianNoise(var=var))
- if degrees is not None:
- transforms.append(CVMotionBlur(degrees=degrees))
- if factor is not None:
- transforms.append(CVRescale(factor=factor))
-
- random.shuffle(transforms)
- transforms = Compose(transforms)
- self.transforms = transforms
-
- def __call__(self, img):
- if random.random() < self.p:
- img = np.array(img)
- return Image.fromarray(self.transforms(img))
- else: return img
-
-
-class CVColorJitter(object):
- def __init__(self, brightness=0.5, contrast=0.5, saturation=0.5, hue=0.1, p=0.5):
- self.p = p
- self.transforms = transforms.ColorJitter(brightness=brightness, contrast=contrast,
- saturation=saturation, hue=hue)
-
- def __call__(self, img):
- if random.random() < self.p: return self.transforms(img)
- else: return img
diff --git a/spaces/sohojoe/soho-clip-embeddings-explorer/experimental/vision001.py b/spaces/sohojoe/soho-clip-embeddings-explorer/experimental/vision001.py
deleted file mode 100644
index 9c055daf353b15eaf4920f0e4b882748d03cc75f..0000000000000000000000000000000000000000
--- a/spaces/sohojoe/soho-clip-embeddings-explorer/experimental/vision001.py
+++ /dev/null
@@ -1,215 +0,0 @@
-from concurrent.futures import ThreadPoolExecutor, as_completed
-import json
-import os
-import time
-
-import numpy as np
-import requests
-import torch
-
-from clip_app_client import ClipAppClient
-from clip_retrieval.clip_client import ClipClient, Modality
-clip_retrieval_service_url = "https://knn.laion.ai/knn-service"
-map_clip_to_clip_retreval = {
- "ViT-L/14": "laion5B-L-14",
- "open_clip:ViT-H-14": "laion5B-H-14",
- "open_clip:ViT-L-14": "laion5B-L-14",
-}
-
-
-def safe_url(url):
- import urllib.parse
- url = urllib.parse.quote(url, safe=':/')
- # if url has two .jpg filenames, take the first one
- if url.count('.jpg') > 0:
- url = url.split('.jpg')[0] + '.jpg'
- return url
-
-# test_image_path = os.path.join(os.getcwd(), "images", "plant-001.png")
-test_image_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "images", "plant-001.jpeg")
-# test_image_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "images", "plant-002.jpeg")
-# test_image_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "images", "plant-002.jpeg")
-# test_image_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "images", "car-002.jpeg")
-
-app_client = ClipAppClient()
-clip_retrieval_client = ClipClient(
- url=clip_retrieval_service_url,
- indice_name=map_clip_to_clip_retreval[app_client.clip_model],
- # use_safety_model = False,
- # use_violence_detector = False,
- # use_mclip = False,
- num_images = 300,
- # modality = Modality.TEXT,
- # modality = Modality.TEXT,
- )
-preprocessed_image = app_client.preprocess_image(test_image_path)
-preprocessed_image_embeddings = app_client.preprocessed_image_to_embedding(preprocessed_image)
-print (f"embeddings: {preprocessed_image_embeddings.shape}")
-
-embedding_as_list = preprocessed_image_embeddings[0].tolist()
-results = clip_retrieval_client.query(embedding_input=embedding_as_list)
-
-# hints = ""
-# for result in results:
-# url = safe_url(result["url"])
-# similarty = float("{:.4f}".format(result["similarity"]))
-# title = result["caption"]
-# print (f"{similarty} \"{title}\" {url}")
-# if len(hints) > 0:
-# hints += f", \"{title}\""
-# else:
-# hints += f"\"{title}\""
-# print("---")
-# print(hints)
-
-image_labels = [r['caption'] for r in results]
-image_label_vectors = [app_client.text_to_embedding(label) for label in image_labels]
-image_label_vectors = torch.cat(image_label_vectors, dim=0)
-dot_product = torch.mm(image_label_vectors, preprocessed_image_embeddings.T)
-similarity_image_label = [(float("{:.4f}".format(dot_product[i][0])), image_labels[i]) for i in range(len(image_labels))]
-similarity_image_label.sort(reverse=True)
-for similarity, image_label in similarity_image_label:
- print (f"{similarity} {image_label}")
-
-print (f"----\n")
-
-# now do the same for images
-def _safe_image_url_to_embedding(url, safe_return):
- try:
- return app_client.image_url_to_embedding(url)
- except:
- return safe_return
-image_urls = [safe_url(r['url']) for r in results]
-image_vectors = [_safe_image_url_to_embedding(url, preprocessed_image_embeddings * 0) for url in image_urls]
-image_vectors = torch.cat(image_vectors, dim=0)
-dot_product = torch.mm(image_vectors, preprocessed_image_embeddings.T)
-similarity_image = [(float("{:.4f}".format(dot_product[i][0])), image_labels[i]) for i in range(len(image_labels))]
-similarity_image.sort(reverse=True)
-for similarity, image_label in similarity_image:
- print (f"{similarity} {image_label}")
-
-def mean_template(embeddings):
- template = torch.mean(embeddings, dim=0, keepdim=True)
- return template
-
-def principal_component_analysis_template(embeddings):
- mean = torch.mean(embeddings, dim=0)
- embeddings_centered = embeddings - mean # Subtract the mean
- u, s, v = torch.svd(embeddings_centered) # Perform SVD
- template = u[:, 0] # The first column of u gives the first principal component
- return template
-
-def clustering_templates(embeddings, n_clusters=5):
- from sklearn.cluster import KMeans
- import numpy as np
-
- kmeans = KMeans(n_clusters=n_clusters)
- embeddings_np = embeddings.numpy() # Convert to numpy
- clusters = kmeans.fit_predict(embeddings_np)
-
- templates = []
- for cluster in np.unique(clusters):
- cluster_mean = np.mean(embeddings_np[clusters == cluster], axis=0)
- templates.append(torch.from_numpy(cluster_mean)) # Convert back to tensor
- return templates
-
-# create a templates using clustering
-print(f"create a templates using clustering")
-merged_embeddings = torch.cat([image_label_vectors, image_vectors], dim=0)
-clusters = clustering_templates(merged_embeddings, n_clusters=5)
-# convert from list to 2d matrix
-clusters = torch.stack(clusters, dim=0)
-dot_product = torch.mm(clusters, preprocessed_image_embeddings.T)
-cluster_similarity = [(float("{:.4f}".format(dot_product[i][0])), i) for i in range(len(clusters))]
-cluster_similarity.sort(reverse=True)
-for similarity, idx in cluster_similarity:
- print (f"{similarity} {idx}")
-# template = highest scoring cluster
-# template = clusters[cluster_similarity[0][1]]
-template = preprocessed_image_embeddings * (len(clusters)-1)
-for i in range(1, len(clusters)):
- template -= clusters[cluster_similarity[i][1]]
-print("---")
-print(f"seaching based on template")
-results = clip_retrieval_client.query(embedding_input=template[0].tolist())
-hints = ""
-for result in results:
- url = safe_url(result["url"])
- similarty = float("{:.4f}".format(result["similarity"]))
- title = result["caption"]
- print (f"{similarty} \"{title}\" {url}")
- if len(hints) > 0:
- hints += f", \"{title}\""
- else:
- hints += f"\"{title}\""
-print(hints)
-
-
-# cluster_num = 1
-# for template in clusters:
-# print("---")
-# print(f"cluster {cluster_num} of {len(clusters)}")
-# results = clip_retrieval_client.query(embedding_input=template.tolist())
-# hints = ""
-# for result in results:
-# url = safe_url(result["url"])
-# similarty = float("{:.4f}".format(result["similarity"]))
-# title = result["caption"]
-# print (f"{similarty} \"{title}\" {url}")
-# if len(hints) > 0:
-# hints += f", \"{title}\""
-# else:
-# hints += f"\"{title}\""
-# print(hints)
-# cluster_num += 1
-
-
-# create a template
-# mean
-# image_label_template = mean_template(image_label_vectors)
-# image_template = mean_template(image_vectors)
-# pca
-# image_label_template = principal_component_analysis_template(image_label_vectors)
-# image_template = principal_component_analysis_template(image_vectors)
-# clustering
-# image_label_template = clustering_template(image_label_vectors)
-# image_template = clustering_template(image_vectors)
-
-# take the embedding and subtract the template
-# image_label_template = preprocessed_image_embeddings - image_label_template
-# image_template = preprocessed_image_embeddings - image_template
-# image_label_template = image_label_template - preprocessed_image_embeddings
-# image_template = image_template - preprocessed_image_embeddings
-# normalize
-# image_label_template = image_label_template / image_label_template.norm()
-# image_template = image_template / image_template.norm()
-
-# results = clip_retrieval_client.query(embedding_input=image_label_template[0].tolist())
-# hints = ""
-# print("---")
-# print("average of image labels")
-# for result in results:
-# url = safe_url(result["url"])
-# similarty = float("{:.4f}".format(result["similarity"]))
-# title = result["caption"]
-# print (f"{similarty} \"{title}\" {url}")
-# if len(hints) > 0:
-# hints += f", \"{title}\""
-# else:
-# hints += f"\"{title}\""
-# print(hints)
-
-# print("---")
-# print("average of images")
-# results = clip_retrieval_client.query(embedding_input=image_template[0].tolist())
-# hints = ""
-# for result in results:
-# url = safe_url(result["url"])
-# similarty = float("{:.4f}".format(result["similarity"]))
-# title = result["caption"]
-# print (f"{similarty} \"{title}\" {url}")
-# if len(hints) > 0:
-# hints += f", \"{title}\""
-# else:
-# hints += f"\"{title}\""
-# print(hints)
\ No newline at end of file
diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/layerdrop/README.md b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/layerdrop/README.md
deleted file mode 100644
index 4d48ee9615e1458e1e889635dc9938e427a7f64a..0000000000000000000000000000000000000000
--- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/layerdrop/README.md
+++ /dev/null
@@ -1,154 +0,0 @@
-# Reducing Transformer Depth on Demand with Structured Dropout (Fan et al., 2019)
-This page contains information for how to train models with LayerDrop, based on this [paper](https://arxiv.org/abs/1909.11556).
-
-## Citation:
-If you found this technique useful, please cite our paper:
-```bibtex
-@article{fan2019reducing,
- title={Reducing Transformer Depth on Demand with Structured Dropout},
- author={Fan, Angela and Grave, Edouard and Joulin, Armand},
- journal={arXiv preprint arXiv:1909.11556},
- year={2019}
-}
-```
-
-## Pre-trained models
-
-Model | Description | Download
----|---|---
-`layerdrop_wmt_en_de_12_6` | Transformer + LayerDrop 0.2 trained on WMT16 en-de with 12 encoder and 6 decoder layers | [layerdrop_wmt_en_de_12_6.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/layerdrop_wmt_en_de_12_6.tar.gz)
-`roberta_layerdrop.base` | RoBERTa Base + LayerDrop 0.2 | [roberta_layerdrop.base.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/roberta_layerdrop.base.qnli.tar.gz)
-`roberta_layerdrop.large` | RoBERTa Large + LayerDrop 0.2 | [roberta_layerdrop.large.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/roberta_layerdrop.large.tar.gz)
-`roberta_layerdrop.large.mnli` | `roberta_layerdrop.large` finetuned on [MNLI](http://www.nyu.edu/projects/bowman/multinli) | [roberta_layerdrop.large.mnli.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/roberta_layerdrop.large.mnli.tar.gz)
-`roberta_layerdrop.large.qnli` | `roberta_layerdrop.large` finetuned on [QNLI](https://arxiv.org/abs/1804.07461) | [roberta_layerdrop.large.mnli.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/roberta_layerdrop.large.qnli.tar.gz)
-
-
-Evaluate performance of these pre-trained models:
-```bash
-# Example for Machine Translation
-fairseq-generate /path/to/bped/wmt/data --path nmt_checkpoint.pt \
- --beam 8 --lenpen 0.4 \
- --batch-size 64 \
- --remove-bpe \
- --gen-subset test > wmt16_gen.txt
-bash scripts/compound_split_bleu.sh wmt16_gen.txt
-# prints BLEU4 = 30.17
-```
-
-```python
-# Example for RoBERTa + LayerDrop finetuned on MNLI:
-from fairseq.models.roberta import RobertaModel
-
-roberta_layerdrop = RobertaModel.from_pretrained(
- '/path/to/MNLI/model',
- checkpoint_file='mnli_checkpoint.pt',
- data_name_or_path='/path/to/MNLI/data/MNLI-bin'
-)
-label_map = {0: 'contradiction', 2: 'neutral', 1: 'entailment'}
-ncorrect, nsamples = 0, 0
-roberta_layerdrop.cuda()
-roberta_layerdrop.eval()
-with open('/path/to/MNLI/data/dev_matched.tsv') as fin:
- fin.readline()
- for index, line in enumerate(fin):
- tokens = line.strip().split('\t')
- sent1, sent2, target = tokens[8], tokens[9], tokens[-1]
- tokens = roberta_layerdrop.encode(sent1, sent2)
- prediction = roberta_layerdrop.predict('sentence_classification_head', tokens).argmax().item()
- prediction_label = label_map[prediction]
- ncorrect += int(prediction_label == target)
- nsamples += 1
-print('| Accuracy: ', float(ncorrect)/float(nsamples))
-# prints | Accuracy: 0.9026999490575649
-
-
-# Example for RoBERTa + LayerDrop finetuned on QNLI:
-roberta = RobertaModel.from_pretrained(
- '/path/to/QNLI/model',
- checkpoint_file='qnli_checkpoint.pt',
- data_name_or_path='/path/to/QNLI/data/QNLI-bin'
-)
-
-label_fn = lambda label: roberta.task.label_dictionary.string(
- [label + roberta.task.target_dictionary.nspecial]
-)
-ncorrect, nsamples = 0, 0
-roberta.cuda()
-roberta.eval()
-with open('/path/to/QNLI/data/dev.tsv') as fin:
- fin.readline()
- for index, line in enumerate(fin):
- tokens = line.strip().split('\t')
- sent1, sent2, target = tokens[1], tokens[2], tokens[3]
- tokens = roberta.encode(sent1, sent2)
- prediction = roberta.predict('sentence_classification_head', tokens).argmax().item()
- prediction_label = label_fn(prediction)
- ncorrect += int(prediction_label == target)
- nsamples += 1
-print('| Accuracy: ', float(ncorrect)/float(nsamples))
-# prints | Accuracy: 0.9480139117700896
-```
-
-
-## Example usage
-
-To train a model with LayerDrop, add the following flags. We recommend 0.2, a value that worked well in our experiments. For Language Models that are decoder-only, you need only the decoder flag. For RoBERTa, an encoder, you need only the encoder flag. The encoder and decoder LayerDrop values can be set differently.
-```
---encoder-layerdrop 0.2 --decoder-layerdrop 0.2
-```
-
-To prune a model that has been trained with LayerDrop, add the following flags followed by a comma separated list of which layers you would like to keep.
-```
---encoder-layers-to-keep 0,2,4,6,8,10,12,14 --decoder-layers-to-keep 0,2,4,6,8,10,12,14
-```
-Setting these flags should print a message such as:
-```
-| Pruning model to specified layer configuration
-```
-You should also see a smaller number of parameters in the model, for example the 16-Layer Transformer Language Model prints:
-```
-num. model params: 246933504
-```
-while a model pruned to 8 Layers prints:
-```
-num. model params: 146163712
-```
-
-If you would like to pick up training with a model that has been pruned, simply adding these flags is sufficient. If you would like to use a script that only does evaluation (no training), you may need to pass an override command. A specific example would be for language modeling:
-```bash
-fairseq-eval-lm /path/to/wikitext-103 \
- --path /path/to/model/checkpoint.pt \
- --model-overrides "{'decoder_layers_to_keep':'0,2,4,6,8,10,12,14'}"
-```
-This model override command overrides the training parameters and updates the model arguments so that the pruned model is run instead of the full model.
-
-## Reproduce Paper Results
-
-Looking to reproduce the results in the paper?
-
-1. For Translation on WMT16 en-de, we followed this setting [here](https://github.com/pytorch/fairseq/blob/main/examples/scaling_nmt/README.md)
-2. To train RoBERTa, we followed this setting [here](https://github.com/pytorch/fairseq/tree/main/examples/roberta)
-3. To train Language Models on Wikitext-103, we followed this setting [here](https://github.com/pytorch/fairseq/tree/main/examples/language_model)
-
-
-## Tips
-
-1. If you would like to train large models with better performance, LayerDrop should be set to a smaller value such as 0.1 or 0.2. Too much LayerDrop will mean the model has too much regularization, so may not reach the best performance. Since LayerDrop adds regularization, you may achieve the best performance by slightly reducing the amount of standard dropout (for example, reduce by 0.1).
-
-2. If you would like to train large models to be pruned and made smaller, LayerDrop should be set to a larger value such as 0.5 if you want to prune very aggressively (such as removing half the network or more). If you would like to prune fewer layers away, LayerDrop can be set to a smaller value such as 0.2. Our experiments were conducted with low values of LayerDrop (such as 0.1 and 0.2), for reference.
-
-3. When pruning layers at inference time, it is best to spread out the layers remaining so they are evenly spaced throughout the network. For example, if you want to remove 50% of the network, keeping every other layer is good.
-
-
-## FAQ
-
-1. How did the sharing layers experiment work? In an appendix (https://openreview.net/pdf?id=SylO2yStDr) we added an experiment on Wikitext-103 language modeling that combined LayerDrop with Weight Sharing. We shared chunks of 2 layers such that every other layer had shared weights. For example, if our network has layers 1 through 6, then layer 1 and 2 are shared, layer 3 and 4 are shared, and layer 5 and 6 are shared.
-
-2. LayerDrop hasn't been helping in my setting? During training time, LayerDrop can help regularize your network. This is most important if your network is already overfitting - if your network is underfitting, it is possible LayerDrop is adding too much regularization. We recommend using smaller values (such as 0.1 or 0.2) and also decreasing the quantity of standard dropout (for example, reduce by 0.1).
-
-3. Can you train a model without LayerDrop and finetune with LayerDrop (e.g. for BERT)? In our experiments, we did not see great performance. Models such as RoBERTa have trained for a long time in the pre-training setting, so only finetuning with LayerDrop for a few epochs on a downstream task such as MNLI does not achieve the robustness required for successful pruning.
-
-
-## Having an issue or have a question?
-
-Please open an issue in this repository with the details of your question. Thanks!
diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/multilingual/data_scripts/download_ted_and_extract.py b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/multilingual/data_scripts/download_ted_and_extract.py
deleted file mode 100644
index eb756680fa7dc31a14ba45c216776a6d60c16b60..0000000000000000000000000000000000000000
--- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/multilingual/data_scripts/download_ted_and_extract.py
+++ /dev/null
@@ -1,338 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-
-import itertools
-import os
-import csv
-from collections import defaultdict
-from six.moves import zip
-import io
-import wget
-import sys
-
-from subprocess import check_call, check_output
-
-# scripts and data locations
-CWD = os.getcwd()
-UTILS = f"{CWD}/utils"
-
-MOSES = f"{UTILS}/mosesdecoder"
-
-WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None)
-
-if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip():
- print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."')
- sys.exit(-1)
-
-
-# please donwload mosesdecoder here:
-detok_cmd = f'{MOSES}/scripts/tokenizer/detokenizer.perl'
-
-
-def call(cmd):
- print(f"Executing: {cmd}")
- check_call(cmd, shell=True)
-
-class MultiLingualAlignedCorpusReader(object):
- """A class to read TED talk dataset
- """
-
- def __init__(self, corpus_path, delimiter='\t',
- target_token=True, bilingual=True, corpus_type='file',
- lang_dict={'source': ['fr'], 'target': ['en']},
- eval_lang_dict=None, zero_shot=False,
- detok=True,
- ):
-
- self.empty_line_flag = 'NULL'
- self.corpus_path = corpus_path
- self.delimiter = delimiter
- self.bilingual = bilingual
- self.lang_dict = lang_dict
- self.lang_set = set()
- self.target_token = target_token
- self.zero_shot = zero_shot
- self.eval_lang_dict = eval_lang_dict
- self.corpus_type = corpus_type
- self.detok = detok
-
- for list_ in self.lang_dict.values():
- for lang in list_:
- self.lang_set.add(lang)
-
- self.data = dict()
- self.data['train'] = self.read_aligned_corpus(split_type='train')
- self.data['test'] = self.read_aligned_corpus(split_type='test')
- self.data['dev'] = self.read_aligned_corpus(split_type='dev')
-
- def read_data(self, file_loc_):
- data_list = list()
- with io.open(file_loc_, 'r', encoding='utf8') as fp:
- for line in fp:
- try:
- text = line.strip()
- except IndexError:
- text = self.empty_line_flag
- data_list.append(text)
- return data_list
-
- def filter_text(self, dict_):
- if self.target_token:
- field_index = 1
- else:
- field_index = 0
- data_dict = defaultdict(list)
- list1 = dict_['source']
- list2 = dict_['target']
- for sent1, sent2 in zip(list1, list2):
- try:
- src_sent = ' '.join(sent1.split()[field_index: ])
- except IndexError:
- src_sent = 'NULL'
-
- if src_sent.find(self.empty_line_flag) != -1 or len(src_sent) == 0:
- continue
-
- elif sent2.find(self.empty_line_flag) != -1 or len(sent2) == 0:
- continue
-
- else:
- data_dict['source'].append(sent1)
- data_dict['target'].append(sent2)
- return data_dict
-
- def read_file(self, split_type, data_type):
- return self.data[split_type][data_type]
-
- def save_file(self, path_, split_type, data_type, lang):
- tok_file = tok_file_name(path_, lang)
- with io.open(tok_file, 'w', encoding='utf8') as fp:
- for line in self.data[split_type][data_type]:
- fp.write(line + '\n')
- if self.detok:
- de_tok(tok_file, lang)
-
- def add_target_token(self, list_, lang_id):
- new_list = list()
- token = '__' + lang_id + '__'
- for sent in list_:
- new_list.append(token + ' ' + sent)
- return new_list
-
- def read_from_single_file(self, path_, s_lang, t_lang):
- data_dict = defaultdict(list)
- with io.open(path_, 'r', encoding='utf8') as fp:
- reader = csv.DictReader(fp, delimiter='\t', quoting=csv.QUOTE_NONE)
- for row in reader:
- data_dict['source'].append(row[s_lang])
- data_dict['target'].append(row[t_lang])
-
- if self.target_token:
- text = self.add_target_token(data_dict['source'], t_lang)
- data_dict['source'] = text
-
- return data_dict['source'], data_dict['target']
-
- def read_aligned_corpus(self, split_type='train'):
- data_dict = defaultdict(list)
- iterable = []
- s_list = []
- t_list = []
-
- if self.zero_shot:
- if split_type == "train":
- iterable = zip(self.lang_dict['source'], self.lang_dict['target'])
- else:
- iterable = zip(self.eval_lang_dict['source'], self.eval_lang_dict['target'])
-
- elif self.bilingual:
- iterable = itertools.product(self.lang_dict['source'], self.lang_dict['target'])
-
- for s_lang, t_lang in iterable:
- if s_lang == t_lang:
- continue
- if self.corpus_type == 'file':
- split_type_file_path = os.path.join(self.corpus_path,
- "all_talks_{}.tsv".format(split_type))
- s_list, t_list = self.read_from_single_file(split_type_file_path,
- s_lang=s_lang,
- t_lang=t_lang)
- data_dict['source'] += s_list
- data_dict['target'] += t_list
- new_data_dict = self.filter_text(data_dict)
- return new_data_dict
-
-
-def read_langs(corpus_path):
- split_type_file_path = os.path.join(corpus_path, 'extracted',
- "all_talks_dev.tsv")
- with io.open(split_type_file_path, 'r', encoding='utf8') as fp:
- reader = csv.DictReader(fp, delimiter='\t', quoting=csv.QUOTE_NONE)
- header = next(reader)
- return [k for k in header.keys() if k != 'talk_name']
-
-def extra_english(corpus_path, split):
- split_type_file_path = os.path.join(corpus_path,
- f"all_talks_{split}.tsv")
- output_split_type_file_path = os.path.join(corpus_path,
- f"all_talks_{split}.en")
- with io.open(split_type_file_path, 'r', encoding='utf8') as fp, io.open(output_split_type_file_path, 'w', encoding='utf8') as fw:
- reader = csv.DictReader(fp, delimiter='\t', quoting=csv.QUOTE_NONE)
- for row in reader:
- line = row['en']
- fw.write(line + '\n')
- de_tok(output_split_type_file_path, 'en')
-
-
-
-def tok_file_name(filename, lang):
- seps = filename.split('.')
- seps.insert(-1, 'tok')
- tok_file = '.'.join(seps)
- return tok_file
-
-def de_tok(tok_file, lang):
- # seps = tok_file.split('.')
- # seps.insert(-1, 'detok')
- # de_tok_file = '.'.join(seps)
- de_tok_file = tok_file.replace('.tok.', '.')
- cmd = 'perl {detok_cmd} -l {lang} < {tok_file} > {de_tok_file}'.format(
- detok_cmd=detok_cmd, tok_file=tok_file,
- de_tok_file=de_tok_file, lang=lang[:2])
- call(cmd)
-
-def extra_bitex(
- ted_data_path,
- lsrc_lang,
- ltrg_lang,
- target_token,
- output_data_path,
-):
- def get_ted_lang(lang):
- long_langs = ['pt-br', 'zh-cn', 'zh-tw', 'fr-ca']
- if lang[:5] in long_langs:
- return lang[:5]
- elif lang[:4] =='calv':
- return lang[:5]
- elif lang in ['pt_BR', 'zh_CN', 'zh_TW', 'fr_CA']:
- return lang.lower().replace('_', '-')
- return lang[:2]
- src_lang = get_ted_lang(lsrc_lang)
- trg_lang = get_ted_lang(ltrg_lang)
- train_lang_dict={'source': [src_lang], 'target': [trg_lang]}
- eval_lang_dict = {'source': [src_lang], 'target': [trg_lang]}
-
- obj = MultiLingualAlignedCorpusReader(corpus_path=ted_data_path,
- lang_dict=train_lang_dict,
- target_token=target_token,
- corpus_type='file',
- eval_lang_dict=eval_lang_dict,
- zero_shot=False,
- bilingual=True)
-
- os.makedirs(output_data_path, exist_ok=True)
- lsrc_lang = lsrc_lang.replace('-', '_')
- ltrg_lang = ltrg_lang.replace('-', '_')
- obj.save_file(output_data_path + f"/train.{lsrc_lang}-{ltrg_lang}.{lsrc_lang}",
- split_type='train', data_type='source', lang=src_lang)
- obj.save_file(output_data_path + f"/train.{lsrc_lang}-{ltrg_lang}.{ltrg_lang}",
- split_type='train', data_type='target', lang=trg_lang)
-
- obj.save_file(output_data_path + f"/test.{lsrc_lang}-{ltrg_lang}.{lsrc_lang}",
- split_type='test', data_type='source', lang=src_lang)
- obj.save_file(output_data_path + f"/test.{lsrc_lang}-{ltrg_lang}.{ltrg_lang}",
- split_type='test', data_type='target', lang=trg_lang)
-
- obj.save_file(output_data_path + f"/valid.{lsrc_lang}-{ltrg_lang}.{lsrc_lang}",
- split_type='dev', data_type='source', lang=src_lang)
- obj.save_file(output_data_path + f"/valid.{lsrc_lang}-{ltrg_lang}.{ltrg_lang}",
- split_type='dev', data_type='target', lang=trg_lang)
-
-
-def bar_custom(current, total, width=80):
- print("Downloading: %d%% [%d / %d] Ks" % (current / total * 100, current / 1000, total / 1000), end='\r')
-
-
-def download_and_extract(download_to, extract_to):
- url = 'http://phontron.com/data/ted_talks.tar.gz'
- filename = f"{download_to}/ted_talks.tar.gz"
- if os.path.exists(filename):
- print(f'{filename} has already been downloaded so skip')
- else:
- filename = wget.download(url, filename, bar=bar_custom)
- if os.path.exists(f'{extract_to}/all_talks_train.tsv'):
- print(f'Already extracted so skip')
- else:
- extract_cmd = f'tar xzfv "{filename}" -C "{extract_to}"'
- call(extract_cmd)
-
-
-if __name__ == "__main__":
- import argparse
- parser = argparse.ArgumentParser()
- parser.add_argument('--ted_data_path', type=str, default=WORKDIR_ROOT, required=False)
- parser.add_argument(
- '--direction-list',
- type=str,
- # default=None,
- #for ML50
- default=(
- "bn_IN-en_XX,he_IL-en_XX,fa_IR-en_XX,id_ID-en_XX,sv_SE-en_XX,pt_XX-en_XX,ka_GE-en_XX,ka_GE-en_XX,th_TH-en_XX,"
- "mr_IN-en_XX,hr_HR-en_XX,uk_UA-en_XX,az_AZ-en_XX,mk_MK-en_XX,gl_ES-en_XX,sl_SI-en_XX,mn_MN-en_XX,"
- #non-english directions
- # "fr_XX-de_DE," # replaced with wmt20
- # "ja_XX-ko_KR,es_XX-pt_XX,ru_RU-sv_SE,hi_IN-bn_IN,id_ID-ar_AR,cs_CZ-pl_PL,ar_AR-tr_TR"
- ),
- required=False)
- parser.add_argument('--target-token', action='store_true', default=False)
- parser.add_argument('--extract-all-english', action='store_true', default=False)
-
- args = parser.parse_args()
-
- import sys
- import json
-
- # TED Talks data directory
- ted_data_path = args.ted_data_path
-
- download_to = f'{ted_data_path}/downloads'
- extract_to = f'{ted_data_path}/extracted'
-
- #DESTDIR=${WORKDIR_ROOT}/ML50/raw/
- output_path = f'{ted_data_path}/ML50/raw'
- os.makedirs(download_to, exist_ok=True)
- os.makedirs(extract_to, exist_ok=True)
- os.makedirs(output_path, exist_ok=True)
- download_and_extract(download_to, extract_to)
-
-
- if args.extract_all_english:
- for split in ['train', 'dev', 'test']:
- extra_english(ted_data_path, split)
- exit(0)
- if args.direction_list is not None:
- directions = args.direction_list.strip().split(',')
- directions = [tuple(d.strip().split('-', 1)) for d in directions if d]
- else:
- langs = read_langs(ted_data_path)
- # directions = [
- # '{}.{}'.format(src, tgt)
- # for src in langs
- # for tgt in langs
- # if src < tgt
- # ]
- directions = [('en', tgt) for tgt in langs if tgt != 'en']
- print(f'num directions={len(directions)}: {directions}')
-
- for src_lang, trg_lang in directions:
- print('--working on {}-{}'.format(src_lang, trg_lang))
- extra_bitex(
- extract_to,
- src_lang,
- trg_lang,
- target_token=args.target_token,
- output_data_path=output_path
- )
diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/criterions/label_smoothed_cross_entropy_latency_augmented.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/criterions/label_smoothed_cross_entropy_latency_augmented.py
deleted file mode 100644
index 223a16f740c10b58ea45a0390814363e7b5f68b8..0000000000000000000000000000000000000000
--- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/criterions/label_smoothed_cross_entropy_latency_augmented.py
+++ /dev/null
@@ -1,233 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from dataclasses import dataclass, field
-import torch
-from fairseq import metrics, utils
-from fairseq.criterions import register_criterion
-from fairseq.criterions.label_smoothed_cross_entropy import (
- LabelSmoothedCrossEntropyCriterion,
- LabelSmoothedCrossEntropyCriterionConfig
-)
-
-try:
- from simuleval.metrics.latency import (
- AverageLagging,
- AverageProportion,
- DifferentiableAverageLagging
- )
- LATENCY_METRICS = {
- "average_lagging": AverageLagging,
- "average_proportion": AverageProportion,
- "differentiable_average_lagging": DifferentiableAverageLagging,
- }
-except ImportError:
- LATENCY_METRICS = None
-
-
-@dataclass
-class LabelSmoothedCrossEntropyCriterionLatencyAugmentConfig(
- LabelSmoothedCrossEntropyCriterionConfig
-):
- latency_avg_weight: float = field(
- default=0.0,
- metadata={"help": "weight fot average latency loss."},
- )
- latency_var_weight: float = field(
- default=0.0,
- metadata={"help": "weight fot variance latency loss."},
- )
- latency_avg_type: str = field(
- default="differentiable_average_lagging",
- metadata={"help": "latency type for average loss"},
- )
- latency_var_type: str = field(
- default="variance_delay",
- metadata={"help": "latency typ for variance loss"},
- )
- latency_gather_method: str = field(
- default="weighted_average",
- metadata={"help": "method to gather latency loss for all heads"},
- )
- latency_update_after: int = field(
- default=0,
- metadata={"help": "Add latency loss after certain steps"},
- )
-
-@register_criterion(
- "latency_augmented_label_smoothed_cross_entropy",
- dataclass=LabelSmoothedCrossEntropyCriterionLatencyAugmentConfig
-)
-class LatencyAugmentedLabelSmoothedCrossEntropyCriterion(
- LabelSmoothedCrossEntropyCriterion
-):
- def __init__(
- self,
- task,
- sentence_avg,
- label_smoothing,
- ignore_prefix_size,
- report_accuracy,
- latency_avg_weight,
- latency_var_weight,
- latency_avg_type,
- latency_var_type,
- latency_gather_method,
- latency_update_after,
- ):
- super().__init__(
- task, sentence_avg, label_smoothing, ignore_prefix_size, report_accuracy
- )
- assert LATENCY_METRICS is not None, "Please make sure SimulEval is installed."
-
- self.latency_avg_weight = latency_avg_weight
- self.latency_var_weight = latency_var_weight
- self.latency_avg_type = latency_avg_type
- self.latency_var_type = latency_var_type
- self.latency_gather_method = latency_gather_method
- self.latency_update_after = latency_update_after
-
- def forward(self, model, sample, reduce=True):
- net_output = model(**sample["net_input"])
- # 1. Compute cross entropy loss
- loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce)
-
- # 2. Compute cross latency loss
- latency_loss, expected_latency, expected_delays_var = self.compute_latency_loss(
- model, sample, net_output
- )
-
- if self.latency_update_after > 0:
- num_updates = getattr(model.decoder, "num_updates", None)
- assert num_updates is not None, (
- "model.decoder doesn't have attribute 'num_updates'"
- )
- if num_updates <= self.latency_update_after:
- latency_loss = 0
-
- loss += latency_loss
-
- sample_size = (
- sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
- )
-
- logging_output = {
- "loss": loss.data,
- "nll_loss": nll_loss.data,
- "ntokens": sample["ntokens"],
- "nsentences": sample["target"].size(0),
- "sample_size": sample_size,
- "latency": expected_latency,
- "delays_var": expected_delays_var,
- "latency_loss": latency_loss,
- }
-
- if self.report_accuracy:
- n_correct, total = self.compute_accuracy(model, net_output, sample)
- logging_output["n_correct"] = utils.item(n_correct.data)
- logging_output["total"] = utils.item(total.data)
- return loss, sample_size, logging_output
-
- def compute_latency_loss(self, model, sample, net_output):
- assert (
- net_output[-1].encoder_padding_mask is None
- or not net_output[-1].encoder_padding_mask[:, 0].any()
- ), (
- "Only right padding on source is supported."
- )
- # 1. Obtain the expected alignment
- alpha_list = [item["alpha"] for item in net_output[1].attn_list]
- num_layers = len(alpha_list)
- bsz, num_heads, tgt_len, src_len = alpha_list[0].size()
-
- # bsz * num_layers * num_heads, tgt_len, src_len
- alpha_all = torch.cat(alpha_list, dim=1).view(-1, tgt_len, src_len)
-
- # 2 compute expected delays
- # bsz * num_heads * num_layers, tgt_len, src_len for MMA
- steps = (
- torch.arange(1, 1 + src_len)
- .unsqueeze(0)
- .unsqueeze(1)
- .expand_as(alpha_all)
- .type_as(alpha_all)
- )
-
- expected_delays = torch.sum(steps * alpha_all, dim=-1)
-
- target_padding_mask = (
- model.get_targets(sample, net_output)
- .eq(self.padding_idx)
- .unsqueeze(1)
- .expand(bsz, num_layers * num_heads, tgt_len)
- .contiguous()
- .view(-1, tgt_len)
- )
-
- src_lengths = (
- sample["net_input"]["src_lengths"]
- .unsqueeze(1)
- .expand(bsz, num_layers * num_heads)
- .contiguous()
- .view(-1)
- )
- expected_latency = LATENCY_METRICS[self.latency_avg_type](
- expected_delays, src_lengths, None,
- target_padding_mask=target_padding_mask
- )
-
- # 2.1 average expected latency of heads
- # bsz, num_layers * num_heads
- expected_latency = expected_latency.view(bsz, -1)
- if self.latency_gather_method == "average":
- # bsz * tgt_len
- expected_latency = expected_delays.mean(dim=1)
- elif self.latency_gather_method == "weighted_average":
- weights = torch.nn.functional.softmax(expected_latency, dim=1)
- expected_latency = torch.sum(expected_latency * weights, dim=1)
- elif self.latency_gather_method == "max":
- expected_latency = expected_latency.max(dim=1)[0]
- else:
- raise NotImplementedError
-
- expected_latency = expected_latency.sum()
- avg_loss = self.latency_avg_weight * expected_latency
-
- # 2.2 variance of expected delays
- expected_delays_var = (
- expected_delays.view(bsz, -1, tgt_len).var(dim=1).mean(dim=1)
- )
- expected_delays_var = expected_delays_var.sum()
- var_loss = self.latency_avg_weight * expected_delays_var
-
- # 3. Final loss
- latency_loss = avg_loss + var_loss
-
- return latency_loss, expected_latency, expected_delays_var
-
- @classmethod
- def reduce_metrics(cls, logging_outputs) -> None:
- super().reduce_metrics(logging_outputs)
- latency = sum(
- log.get("latency", 0) for log in logging_outputs
- )
- delays_var = sum(
- log.get("delays_var", 0) for log in logging_outputs
- )
- latency_loss = sum(
- log.get("latency_loss", 0) for log in logging_outputs
- )
- nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
- metrics.log_scalar(
- "latency", latency.float() / nsentences, nsentences, round=3
- )
- metrics.log_scalar(
- "delays_var", delays_var / nsentences,
- nsentences, round=3
- )
- metrics.log_scalar(
- "latency_loss", latency_loss / nsentences,
- nsentences, round=3
- )
diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/models/hubert/__init__.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/models/hubert/__init__.py
deleted file mode 100644
index a1b0eabbdbcaf12b15bb96b329ab1e276256f79a..0000000000000000000000000000000000000000
--- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/models/hubert/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from .hubert import * # noqa
-from .hubert_asr import * # noqa
diff --git a/spaces/stomexserde/gpt4-ui/Examples/Crime And Punishment Pevear And Volokhonsky Epub LINK.md b/spaces/stomexserde/gpt4-ui/Examples/Crime And Punishment Pevear And Volokhonsky Epub LINK.md
deleted file mode 100644
index ad787dc25eeccdd4bc2f0558241b7a4b5fbe2097..0000000000000000000000000000000000000000
--- a/spaces/stomexserde/gpt4-ui/Examples/Crime And Punishment Pevear And Volokhonsky Epub LINK.md
+++ /dev/null
@@ -1,41 +0,0 @@
-
-Here is a possible title and article with SEO optimization and HTML formatting for the keyword "crime and punishment pevear and volokhonsky epub":
-
-
Crime and Punishment by Fyodor Dostoevsky: The Best Translation for Modern Readers
-
-
Crime and Punishment is one of the most celebrated novels of all time, written by the Russian author Fyodor Dostoevsky. It tells the story of Raskolnikov, a poor student who commits a murder and theft, and his psychological struggle with guilt, redemption, and love. The novel explores themes such as free will, morality, alienation, and suffering.
-
-
But how can modern readers enjoy this masterpiece of literature in its full glory? The answer is simple: by choosing the best translation available. And that is the one by Richard Pevear and Larissa Volokhonsky.
Richard Pevear and Larissa Volokhonsky are a married couple who have translated many Russian classics into English, including works by Tolstoy, Chekhov, Gogol, and Bulgakov. They have won several awards for their translations, such as the PEN/Book-of-the-Month Club Prize and the French Prix du Meilleur Livre Ãtranger.
-
-
They are widely praised for their faithful and lively renditions of the original texts, capturing the tone, style, humor, and nuances of the authors. They also provide extensive notes, introductions, and annotations to help readers understand the historical and cultural context of the novels.
-
-
Why is their translation of Crime and Punishment the best?
-
-
Their translation of Crime and Punishment was first published in 1993, and has been updated in 2012 to honor the 200th anniversary of Dostoevsky's birth. It is hailed by critics and readers alike as "the best [translation] currently available" (Washington Post Book World), "a miracle" (Newsweek), and "a work of art" (New York Times Book Review).
-
-
Some of the reasons why their translation is superior to others are:
-
-
-
-
They preserve the original structure and syntax of Dostoevsky's sentences, which are often long, complex, and expressive.
-
They retain the Russian names and terms without anglicizing or simplifying them, which helps convey the authentic flavor and atmosphere of the novel.
-
They avoid modernizing or smoothing out Dostoevsky's language, which is rich in idioms, slang, dialects, and archaisms.
-
They convey the emotional intensity, psychological depth, and philosophical subtlety of Dostoevsky's characters and dialogues.
-
They reproduce the dramatic effects, suspense, and humor of Dostoevsky's narration.
-
-
-
How can you get their translation of Crime and Punishment?
-
-
Their translation of Crime and Punishment is available in various formats: hardcover, paperback, ebook, and audiobook. You can find it online or in your local bookstore. Here are some of the places where you can get it:
-
-
-
ebook: You can download their translation as an epub file from Archive.org, where you can also read it online or borrow it for free. You can also buy it from Google Books, where you can read a sample before purchasing it.
-
audiobook: You can listen to their translation as an audiobook from Amazon.com, where you can get it for free with your Audible trial. The audiobook is narrated by George Guidall, a renowned voice actor who has recorded over 1,300 books.
The core rulebook of Deadlands Hell On Earth contains everything you need to create and run a campaign in the Wasted West, a land of mutants, monsters, and madmen. The book includes:
-
-
A detailed history of the setting, from the Weird West of Deadlands Classic to the Last War and the Reckoning that unleashed hell on earth.
-
A description of the major factions and locations of the Wasted West, such as the Combine, the Templars, the City o' Sin, and the Iron Oasis.
-
Rules for creating and playing different types of characters, such as cyborgs, junkers, sykers, doomsayers, and harrowed.
-
A comprehensive list of skills, edges, hindrances, gear, vehicles, and weapons for surviving the Wasted West.
-
A system for generating random encounters and hazards in the Wasted West.
-
A bestiary of creatures and enemies that inhabit the Wasted West, from rad zombies to road gangs to reckoners.
-
Guidelines for running a campaign in Deadlands Hell On Earth, including tips on adventure design, mood, tone, and genre conventions.
-
An introductory adventure called "Apocalypse Now!" that introduces the players to the Wasted West and its dangers.
-
-
The core rulebook of Deadlands Hell On Earth is available as a pdf file from various online sources. The pdf file is scanned from the original print version and has crisp text and artwork. The pdf file also includes bookmarks for easy navigation. The pdf file is compatible with any device that can read pdf files.
-
If you are looking for a post-apocalyptic RPG that combines horror, western, and steampunk elements, you might want to check out Deadlands Hell On Earth. The core rulebook provides you with everything you need to create and run a campaign in the Wasted West. You can find more information about Deadlands Hell On Earth and other related products at Pinnacle Entertainment Group's website[^2^].
-
-
-
Deadlands Hell On Earth also offers a variety of adventures and modules for the game master and the players to enjoy. Some of these adventures are standalone scenarios, while others are part of a larger campaign or metaplot. Some of the adventures and modules available for Deadlands Hell On Earth are:
-
-
The Boise Horror: A horror-themed adventure that pits the characters against a terrifying creature that stalks the ruins of Boise, Idaho.
-
City o' Sin: A sourcebook that details the city of Las Vegas, Nevada, which has become a haven for vice and corruption in the Wasted West.
-
Denver: A sourcebook that explores the city of Denver, Colorado, which is divided into four sectors controlled by different factions.
-
Hell or High Water: An adventure that sends the characters on a perilous journey down the Mississippi River, facing dangers both natural and supernatural.
-
Iron Oasis: A sourcebook that describes the Iron Oasis, a massive junkyard that houses a secret community of junkers and their wondrous creations.
-
Monsters, Muties, & Misfits: A bestiary that expands on the creatures and enemies of the Wasted West, adding new stats, abilities, and background information.
-
Road Wars: A sourcebook that focuses on the road gangs and vehicular combat of the Wasted West, featuring new rules, vehicles, weapons, and gangs.
-
Something About a Sword: An adventure that involves the characters in a quest for a mysterious sword of a missing Templar, lost somewhere in western Wyoming[^2^].
-
-
Deadlands Hell On Earth is a game that offers a rich and diverse setting, a flexible and fun system, and a lot of potential for adventure and horror. Whether you want to play as a heroic wastelander fighting against the forces of evil, or as a villainous raider plundering and pillaging the weak, Deadlands Hell On Earth has something for you. So grab your six-shooter, your gas mask, and your trusty vehicle, and get ready to explore the Wasted West!
81aa517590
-
-
\ No newline at end of file
diff --git a/spaces/sub314xxl/MusicGen-Continuation/MODEL_CARD.md b/spaces/sub314xxl/MusicGen-Continuation/MODEL_CARD.md
deleted file mode 100644
index 6c2c9f883969eb905e74ad3376966d156cc5ca00..0000000000000000000000000000000000000000
--- a/spaces/sub314xxl/MusicGen-Continuation/MODEL_CARD.md
+++ /dev/null
@@ -1,81 +0,0 @@
-# MusicGen Model Card
-
-## Model details
-
-**Organization developing the model:** The FAIR team of Meta AI.
-
-**Model date:** MusicGen was trained between April 2023 and May 2023.
-
-**Model version:** This is the version 1 of the model.
-
-**Model type:** MusicGen consists of an EnCodec model for audio tokenization, an auto-regressive language model based on the transformer architecture for music modeling. The model comes in different sizes: 300M, 1.5B and 3.3B parameters ; and two variants: a model trained for text-to-music generation task and a model trained for melody-guided music generation.
-
-**Paper or resources for more information:** More information can be found in the paper [Simple and Controllable Music Generation][arxiv].
-
-**Citation details** See [our paper][arxiv]
-
-**License** Code is released under MIT, model weights are released under CC-BY-NC 4.0.
-
-**Where to send questions or comments about the model:** Questions and comments about MusicGen can be sent via the [Github repository](https://github.com/facebookresearch/audiocraft) of the project, or by opening an issue.
-
-## Intended use
-**Primary intended use:** The primary use of MusicGen is research on AI-based music generation, including:
-
-- Research efforts, such as probing and better understanding the limitations of generative models to further improve the state of science
-- Generation of music guided by text or melody to understand current abilities of generative AI models by machine learning amateurs
-
-**Primary intended users:** The primary intended users of the model are researchers in audio, machine learning and artificial intelligence, as well as amateur seeking to better understand those models.
-
-**Out-of-scope use cases** The model should not be used on downstream applications without further risk evaluation and mitigation. The model should not be used to intentionally create or disseminate music pieces that create hostile or alienating environments for people. This includes generating music that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes.
-
-## Metrics
-
-**Models performance measures:** We used the following objective measure to evaluate the model on a standard music benchmark:
-
-- Frechet Audio Distance computed on features extracted from a pre-trained audio classifier (VGGish)
-- Kullback-Leibler Divergence on label distributions extracted from a pre-trained audio classifier (PaSST)
-- CLAP Score between audio embedding and text embedding extracted from a pre-trained CLAP model
-
-Additionally, we run qualitative studies with human participants, evaluating the performance of the model with the following axes:
-
-- Overall quality of the music samples;
-- Text relevance to the provided text input;
-- Adherence to the melody for melody-guided music generation.
-
-More details on performance measures and human studies can be found in the paper.
-
-**Decision thresholds:** Not applicable.
-
-## Evaluation datasets
-
-The model was evaluated on the [MusicCaps benchmark](https://www.kaggle.com/datasets/googleai/musiccaps) and on an in-domain held-out evaluation set, with no artist overlap with the training set.
-
-## Training datasets
-
-The model was trained on licensed data using the following sources: the [Meta Music Initiative Sound Collection](https://www.fb.com/sound), [Shutterstock music collection](https://www.shutterstock.com/music) and the [Pond5 music collection](https://www.pond5.com/). See the paper for more details about the training set and corresponding preprocessing.
-
-## Quantitative analysis
-
-More information can be found in the paper [Simple and Controllable Music Generation][arxiv], in the Experimental Setup section.
-
-## Limitations and biases
-
-**Data:** The data sources used to train the model are created by music professionals and covered by legal agreements with the right holders. The model is trained on 20K hours of data, we believe that scaling the model on larger datasets can further improve the performance of the model.
-
-**Mitigations:** Vocals have been removed from the data source using corresponding tags, and then using using a state-of-the-art music source separation method, namely using the open source [Hybrid Transformer for Music Source Separation](https://github.com/facebookresearch/demucs) (HT-Demucs).
-
-**Limitations:**
-
-- The model is not able to generate realistic vocals.
-- The model has been trained with English descriptions and will not perform as well in other languages.
-- The model does not perform equally well for all music styles and cultures.
-- The model sometimes generates end of songs, collapsing to silence.
-- It is sometimes difficult to assess what types of text descriptions provide the best generations. Prompt engineering may be required to obtain satisfying results.
-
-**Biases:** The source of data is potentially lacking diversity and all music cultures are not equally represented in the dataset. The model may not perform equally well on the wide variety of music genres that exists. The generated samples from the model will reflect the biases from the training data. Further work on this model should include methods for balanced and just representations of cultures, for example, by scaling the training data to be both diverse and inclusive.
-
-**Risks and harms:** Biases and limitations of the model may lead to generation of samples that may be considered as biased, inappropriate or offensive. We believe that providing the code to reproduce the research and train new models will allow to broaden the application to new and more representative data.
-
-**Use cases:** Users must be aware of the biases, limitations and risks of the model. MusicGen is a model developed for artificial intelligence research on controllable music generation. As such, it should not be used for downstream applications without further investigation and mitigation of risks.
-
-[arxiv]: https://arxiv.org/abs/2306.05284
diff --git a/spaces/subhajitmaji/MusicGen/tests/quantization/test_vq.py b/spaces/subhajitmaji/MusicGen/tests/quantization/test_vq.py
deleted file mode 100644
index c215099fedacae35c6798fdd9b8420a447aa16bb..0000000000000000000000000000000000000000
--- a/spaces/subhajitmaji/MusicGen/tests/quantization/test_vq.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-import torch
-
-from audiocraft.quantization.vq import ResidualVectorQuantizer
-
-
-class TestResidualVectorQuantizer:
-
- def test_rvq(self):
- x = torch.randn(1, 16, 2048)
- vq = ResidualVectorQuantizer(n_q=8, dimension=16, bins=8)
- res = vq(x, 1.)
- assert res.x.shape == torch.Size([1, 16, 2048])
diff --git a/spaces/subhc/Guess-What-Moves/mask_former/data/datasets/register_mapillary_vistas.py b/spaces/subhc/Guess-What-Moves/mask_former/data/datasets/register_mapillary_vistas.py
deleted file mode 100644
index ce3874b65d943c333d093abd6998500f8a3775f5..0000000000000000000000000000000000000000
--- a/spaces/subhc/Guess-What-Moves/mask_former/data/datasets/register_mapillary_vistas.py
+++ /dev/null
@@ -1,507 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import os
-
-from detectron2.data import DatasetCatalog, MetadataCatalog
-from detectron2.data.datasets import load_sem_seg
-
-MAPILLARY_VISTAS_SEM_SEG_CATEGORIES = [
- {
- "color": [165, 42, 42],
- "instances": True,
- "readable": "Bird",
- "name": "animal--bird",
- "evaluate": True,
- },
- {
- "color": [0, 192, 0],
- "instances": True,
- "readable": "Ground Animal",
- "name": "animal--ground-animal",
- "evaluate": True,
- },
- {
- "color": [196, 196, 196],
- "instances": False,
- "readable": "Curb",
- "name": "construction--barrier--curb",
- "evaluate": True,
- },
- {
- "color": [190, 153, 153],
- "instances": False,
- "readable": "Fence",
- "name": "construction--barrier--fence",
- "evaluate": True,
- },
- {
- "color": [180, 165, 180],
- "instances": False,
- "readable": "Guard Rail",
- "name": "construction--barrier--guard-rail",
- "evaluate": True,
- },
- {
- "color": [90, 120, 150],
- "instances": False,
- "readable": "Barrier",
- "name": "construction--barrier--other-barrier",
- "evaluate": True,
- },
- {
- "color": [102, 102, 156],
- "instances": False,
- "readable": "Wall",
- "name": "construction--barrier--wall",
- "evaluate": True,
- },
- {
- "color": [128, 64, 255],
- "instances": False,
- "readable": "Bike Lane",
- "name": "construction--flat--bike-lane",
- "evaluate": True,
- },
- {
- "color": [140, 140, 200],
- "instances": True,
- "readable": "Crosswalk - Plain",
- "name": "construction--flat--crosswalk-plain",
- "evaluate": True,
- },
- {
- "color": [170, 170, 170],
- "instances": False,
- "readable": "Curb Cut",
- "name": "construction--flat--curb-cut",
- "evaluate": True,
- },
- {
- "color": [250, 170, 160],
- "instances": False,
- "readable": "Parking",
- "name": "construction--flat--parking",
- "evaluate": True,
- },
- {
- "color": [96, 96, 96],
- "instances": False,
- "readable": "Pedestrian Area",
- "name": "construction--flat--pedestrian-area",
- "evaluate": True,
- },
- {
- "color": [230, 150, 140],
- "instances": False,
- "readable": "Rail Track",
- "name": "construction--flat--rail-track",
- "evaluate": True,
- },
- {
- "color": [128, 64, 128],
- "instances": False,
- "readable": "Road",
- "name": "construction--flat--road",
- "evaluate": True,
- },
- {
- "color": [110, 110, 110],
- "instances": False,
- "readable": "Service Lane",
- "name": "construction--flat--service-lane",
- "evaluate": True,
- },
- {
- "color": [244, 35, 232],
- "instances": False,
- "readable": "Sidewalk",
- "name": "construction--flat--sidewalk",
- "evaluate": True,
- },
- {
- "color": [150, 100, 100],
- "instances": False,
- "readable": "Bridge",
- "name": "construction--structure--bridge",
- "evaluate": True,
- },
- {
- "color": [70, 70, 70],
- "instances": False,
- "readable": "Building",
- "name": "construction--structure--building",
- "evaluate": True,
- },
- {
- "color": [150, 120, 90],
- "instances": False,
- "readable": "Tunnel",
- "name": "construction--structure--tunnel",
- "evaluate": True,
- },
- {
- "color": [220, 20, 60],
- "instances": True,
- "readable": "Person",
- "name": "human--person",
- "evaluate": True,
- },
- {
- "color": [255, 0, 0],
- "instances": True,
- "readable": "Bicyclist",
- "name": "human--rider--bicyclist",
- "evaluate": True,
- },
- {
- "color": [255, 0, 100],
- "instances": True,
- "readable": "Motorcyclist",
- "name": "human--rider--motorcyclist",
- "evaluate": True,
- },
- {
- "color": [255, 0, 200],
- "instances": True,
- "readable": "Other Rider",
- "name": "human--rider--other-rider",
- "evaluate": True,
- },
- {
- "color": [200, 128, 128],
- "instances": True,
- "readable": "Lane Marking - Crosswalk",
- "name": "marking--crosswalk-zebra",
- "evaluate": True,
- },
- {
- "color": [255, 255, 255],
- "instances": False,
- "readable": "Lane Marking - General",
- "name": "marking--general",
- "evaluate": True,
- },
- {
- "color": [64, 170, 64],
- "instances": False,
- "readable": "Mountain",
- "name": "nature--mountain",
- "evaluate": True,
- },
- {
- "color": [230, 160, 50],
- "instances": False,
- "readable": "Sand",
- "name": "nature--sand",
- "evaluate": True,
- },
- {
- "color": [70, 130, 180],
- "instances": False,
- "readable": "Sky",
- "name": "nature--sky",
- "evaluate": True,
- },
- {
- "color": [190, 255, 255],
- "instances": False,
- "readable": "Snow",
- "name": "nature--snow",
- "evaluate": True,
- },
- {
- "color": [152, 251, 152],
- "instances": False,
- "readable": "Terrain",
- "name": "nature--terrain",
- "evaluate": True,
- },
- {
- "color": [107, 142, 35],
- "instances": False,
- "readable": "Vegetation",
- "name": "nature--vegetation",
- "evaluate": True,
- },
- {
- "color": [0, 170, 30],
- "instances": False,
- "readable": "Water",
- "name": "nature--water",
- "evaluate": True,
- },
- {
- "color": [255, 255, 128],
- "instances": True,
- "readable": "Banner",
- "name": "object--banner",
- "evaluate": True,
- },
- {
- "color": [250, 0, 30],
- "instances": True,
- "readable": "Bench",
- "name": "object--bench",
- "evaluate": True,
- },
- {
- "color": [100, 140, 180],
- "instances": True,
- "readable": "Bike Rack",
- "name": "object--bike-rack",
- "evaluate": True,
- },
- {
- "color": [220, 220, 220],
- "instances": True,
- "readable": "Billboard",
- "name": "object--billboard",
- "evaluate": True,
- },
- {
- "color": [220, 128, 128],
- "instances": True,
- "readable": "Catch Basin",
- "name": "object--catch-basin",
- "evaluate": True,
- },
- {
- "color": [222, 40, 40],
- "instances": True,
- "readable": "CCTV Camera",
- "name": "object--cctv-camera",
- "evaluate": True,
- },
- {
- "color": [100, 170, 30],
- "instances": True,
- "readable": "Fire Hydrant",
- "name": "object--fire-hydrant",
- "evaluate": True,
- },
- {
- "color": [40, 40, 40],
- "instances": True,
- "readable": "Junction Box",
- "name": "object--junction-box",
- "evaluate": True,
- },
- {
- "color": [33, 33, 33],
- "instances": True,
- "readable": "Mailbox",
- "name": "object--mailbox",
- "evaluate": True,
- },
- {
- "color": [100, 128, 160],
- "instances": True,
- "readable": "Manhole",
- "name": "object--manhole",
- "evaluate": True,
- },
- {
- "color": [142, 0, 0],
- "instances": True,
- "readable": "Phone Booth",
- "name": "object--phone-booth",
- "evaluate": True,
- },
- {
- "color": [70, 100, 150],
- "instances": False,
- "readable": "Pothole",
- "name": "object--pothole",
- "evaluate": True,
- },
- {
- "color": [210, 170, 100],
- "instances": True,
- "readable": "Street Light",
- "name": "object--street-light",
- "evaluate": True,
- },
- {
- "color": [153, 153, 153],
- "instances": True,
- "readable": "Pole",
- "name": "object--support--pole",
- "evaluate": True,
- },
- {
- "color": [128, 128, 128],
- "instances": True,
- "readable": "Traffic Sign Frame",
- "name": "object--support--traffic-sign-frame",
- "evaluate": True,
- },
- {
- "color": [0, 0, 80],
- "instances": True,
- "readable": "Utility Pole",
- "name": "object--support--utility-pole",
- "evaluate": True,
- },
- {
- "color": [250, 170, 30],
- "instances": True,
- "readable": "Traffic Light",
- "name": "object--traffic-light",
- "evaluate": True,
- },
- {
- "color": [192, 192, 192],
- "instances": True,
- "readable": "Traffic Sign (Back)",
- "name": "object--traffic-sign--back",
- "evaluate": True,
- },
- {
- "color": [220, 220, 0],
- "instances": True,
- "readable": "Traffic Sign (Front)",
- "name": "object--traffic-sign--front",
- "evaluate": True,
- },
- {
- "color": [140, 140, 20],
- "instances": True,
- "readable": "Trash Can",
- "name": "object--trash-can",
- "evaluate": True,
- },
- {
- "color": [119, 11, 32],
- "instances": True,
- "readable": "Bicycle",
- "name": "object--vehicle--bicycle",
- "evaluate": True,
- },
- {
- "color": [150, 0, 255],
- "instances": True,
- "readable": "Boat",
- "name": "object--vehicle--boat",
- "evaluate": True,
- },
- {
- "color": [0, 60, 100],
- "instances": True,
- "readable": "Bus",
- "name": "object--vehicle--bus",
- "evaluate": True,
- },
- {
- "color": [0, 0, 142],
- "instances": True,
- "readable": "Car",
- "name": "object--vehicle--car",
- "evaluate": True,
- },
- {
- "color": [0, 0, 90],
- "instances": True,
- "readable": "Caravan",
- "name": "object--vehicle--caravan",
- "evaluate": True,
- },
- {
- "color": [0, 0, 230],
- "instances": True,
- "readable": "Motorcycle",
- "name": "object--vehicle--motorcycle",
- "evaluate": True,
- },
- {
- "color": [0, 80, 100],
- "instances": False,
- "readable": "On Rails",
- "name": "object--vehicle--on-rails",
- "evaluate": True,
- },
- {
- "color": [128, 64, 64],
- "instances": True,
- "readable": "Other Vehicle",
- "name": "object--vehicle--other-vehicle",
- "evaluate": True,
- },
- {
- "color": [0, 0, 110],
- "instances": True,
- "readable": "Trailer",
- "name": "object--vehicle--trailer",
- "evaluate": True,
- },
- {
- "color": [0, 0, 70],
- "instances": True,
- "readable": "Truck",
- "name": "object--vehicle--truck",
- "evaluate": True,
- },
- {
- "color": [0, 0, 192],
- "instances": True,
- "readable": "Wheeled Slow",
- "name": "object--vehicle--wheeled-slow",
- "evaluate": True,
- },
- {
- "color": [32, 32, 32],
- "instances": False,
- "readable": "Car Mount",
- "name": "void--car-mount",
- "evaluate": True,
- },
- {
- "color": [120, 10, 10],
- "instances": False,
- "readable": "Ego Vehicle",
- "name": "void--ego-vehicle",
- "evaluate": True,
- },
- {
- "color": [0, 0, 0],
- "instances": False,
- "readable": "Unlabeled",
- "name": "void--unlabeled",
- "evaluate": False,
- },
-]
-
-
-def _get_mapillary_vistas_meta():
- stuff_classes = [k["readable"] for k in MAPILLARY_VISTAS_SEM_SEG_CATEGORIES if k["evaluate"]]
- assert len(stuff_classes) == 65
-
- stuff_colors = [k["color"] for k in MAPILLARY_VISTAS_SEM_SEG_CATEGORIES if k["evaluate"]]
- assert len(stuff_colors) == 65
-
- ret = {
- "stuff_classes": stuff_classes,
- "stuff_colors": stuff_colors,
- }
- return ret
-
-
-def register_all_mapillary_vistas(root):
- root = os.path.join(root, "mapillary_vistas")
- meta = _get_mapillary_vistas_meta()
- for name, dirname in [("train", "training"), ("val", "validation")]:
- image_dir = os.path.join(root, dirname, "images")
- gt_dir = os.path.join(root, dirname, "labels")
- name = f"mapillary_vistas_sem_seg_{name}"
- DatasetCatalog.register(
- name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext="png", image_ext="jpg")
- )
- MetadataCatalog.get(name).set(
- image_root=image_dir,
- sem_seg_root=gt_dir,
- evaluator_type="sem_seg",
- ignore_label=65, # different from other datasets, Mapillary Vistas sets ignore_label to 65
- **meta,
- )
-
-
-_root = os.getenv("DETECTRON2_DATASETS", "datasets")
-register_all_mapillary_vistas(_root)
diff --git a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/3Planesoft 3D Screensaver Registration Key Download TOP Pc.md b/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/3Planesoft 3D Screensaver Registration Key Download TOP Pc.md
deleted file mode 100644
index 239dfa050dc3ddad94e95b051ba17f42c3835f94..0000000000000000000000000000000000000000
--- a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/3Planesoft 3D Screensaver Registration Key Download TOP Pc.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
3Planesoft 3D Screensaver Registration Key download pc
-
-Ashurjafsociety.com 16:13 pm. By Dave Watson… 16:13 pm. By Dave Watson. My grandma fought depression for a while, then she was put on Prozac. She was more or less the same after taking the drug for a few months, but there were days she was really tired, days where she felt almost joyless. As our bodies age and develop, these changes are harder to notice. But if you are looking for clues about the possibility of dementia, signs you may have it or it's progression, or feel bad about yourself due to your memory problems, here's what to look for.
-
-Study This Pdf Shulman 30 mins ago. They tried it and I tried it and we are both fine. By Steve Marchetti. Sane Minds +
-
-A note to the forum: We are still trying to get everything fixed and working. Hopefully, it will be back up and running in a week or so, but until then, please excuse any broken links and temporary errors. You will find lots of great new content as soon as we can get everything working again.
-
-A note to the forum: We are still trying to get everything fixed and working. Hopefully, it will be back up and working in a week or so, but until then, please excuse any broken links and temporary errors. You will find lots of great new content as soon as we can get everything working again. Here are the topics that I think are important to be aware of for the upcoming reporting deadline.
-
- the fifth year / the other one. would be allowed to require more support from society. The study was able to identify each of these criteria, and was used to assess the relationship of LEP to dementia and/or Alzheimer's disease, looking at overall numbers.
-
-Astrology, Age, Do what you can! Get your own consulting astrologer. Get together with the astrologer and talk to them about the things that are bothering you. Make sure to get a birth chart reading. It will be able to tell you all sorts of things about you.
-
-Astrological factors in human development and health. Why they happen.
-
-Caretaker, The exact interpretation depends on your ascendant, etc 4fefd39f24
-
-
-
diff --git a/spaces/terfces0erbo/CollegeProjectV2/Delfin Arbeitsbuch Antworten Pdf Free Download [PATCHED].md b/spaces/terfces0erbo/CollegeProjectV2/Delfin Arbeitsbuch Antworten Pdf Free Download [PATCHED].md
deleted file mode 100644
index 090dbb08d1f6f509fcbe71796dea633c7895c472..0000000000000000000000000000000000000000
--- a/spaces/terfces0erbo/CollegeProjectV2/Delfin Arbeitsbuch Antworten Pdf Free Download [PATCHED].md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-... skills build and design your dream business and work flow to succeed boxing ... plus mathematics contemporary mathematics in context course 2 student edition ... 2013 comprehensive ebook GET; the musicians guide to licensing music how ... 2002 excursion owners manual ebook GET; 4th grade library skills test ebook ... 1fdad05405
-
-
-
diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Cakewalk Dimension Pro 15 Setup Keyrar How to Install and Activate the Software Synthesizer.md b/spaces/tialenAdioni/chat-gpt-api/logs/Cakewalk Dimension Pro 15 Setup Keyrar How to Install and Activate the Software Synthesizer.md
deleted file mode 100644
index 87e48bdd8ac38fd9451b0192f2f58a394f57b55e..0000000000000000000000000000000000000000
--- a/spaces/tialenAdioni/chat-gpt-api/logs/Cakewalk Dimension Pro 15 Setup Keyrar How to Install and Activate the Software Synthesizer.md
+++ /dev/null
@@ -1,65 +0,0 @@
-
-
How to Install Cakewalk Dimension Pro 15 with Keyrar
-
Cakewalk Dimension Pro 15 is a powerful and versatile virtual instrument that offers a wide range of sounds and effects for music production. Whether you need realistic orchestral sounds, vintage synthesizers, electric pianos, guitars, drums, or ethnic instruments, Dimension Pro 15 has it all. In this article, we will show you how to install Cakewalk Dimension Pro 15 with Keyrar, a compressed file format that contains the setup and activation files.
-
Step 1: Download Cakewalk Dimension Pro 15 Setup Keyrar
-
The first step is to download Cakewalk Dimension Pro 15 Setup Keyrar from a reliable source. You can find the download link on the official website of Cakewalk or on other trusted websites that offer software downloads. Make sure you have enough space on your hard drive to store the file, which is about 3 GB in size.
Step 2: Extract Cakewalk Dimension Pro 15 Setup Keyrar
-
The next step is to extract Cakewalk Dimension Pro 15 Setup Keyrar using a software that can handle rar files, such as WinRAR or 7-Zip. To do this, right-click on the file and select "Extract Here" or "Extract to Cakewalk Dimension Pro 15 Setup Keyrar". You will need to enter the password for the file, which is usually provided by the source of the download. After extracting, you should see a folder named "Cakewalk Dimension Pro 15 Setup Keyrar" that contains two files: "Setup.exe" and "Key.rar".
-
Step 3: Run Setup.exe
-
The third step is to run Setup.exe by double-clicking on it. This will launch the installation wizard of Cakewalk Dimension Pro 15. Follow the instructions on the screen to choose the language, accept the license agreement, select the destination folder, and customize the installation options. You can also choose to install additional content, such as sound libraries and presets. The installation process may take several minutes depending on your system specifications.
-
How to install Cakewalk Dimension Pro 15 with keyrar file
-Cakewalk Dimension Pro 15 download link and activation code
-Best settings for Cakewalk Dimension Pro 15 sound design
-Cakewalk Dimension Pro 15 tutorial and tips
-Cakewalk Dimension Pro 15 vs other VST instruments
-Cakewalk Dimension Pro 15 review and rating
-Cakewalk Dimension Pro 15 system requirements and compatibility
-Cakewalk Dimension Pro 15 free trial and discount
-Cakewalk Dimension Pro 15 features and benefits
-Cakewalk Dimension Pro 15 user manual and support
-Cakewalk Dimension Pro 15 presets and patches
-Cakewalk Dimension Pro 15 alternatives and competitors
-Cakewalk Dimension Pro 15 license and refund policy
-Cakewalk Dimension Pro 15 update and upgrade
-Cakewalk Dimension Pro 15 problems and solutions
-Cakewalk Dimension Pro 15 forum and community
-Cakewalk Dimension Pro 15 demo and sample
-Cakewalk Dimension Pro 15 comparison and contrast
-Cakewalk Dimension Pro 15 keyboard shortcuts and commands
-Cakewalk Dimension Pro 15 MIDI controller and mapping
-Cakewalk Dimension Pro 15 genres and styles
-Cakewalk Dimension Pro 15 effects and plugins
-Cakewalk Dimension Pro 15 synth engine and architecture
-Cakewalk Dimension Pro 15 modulation and automation
-Cakewalk Dimension Pro 15 performance and optimization
-Cakewalk Dimension Pro 15 history and development
-Cakewalk Dimension Pro 15 testimonials and feedback
-Cakewalk Dimension Pro 15 inspiration and creativity
-Cakewalk Dimension Pro 15 workflow and productivity
-Cakewalk Dimension Pro 15 secrets and tricks
-Cakewalk Dimension Pro 15 news and announcements
-Cakewalk Dimension Pro 15 awards and recognition
-Cakewalk Dimension Pro 15 fun facts and trivia
-Cakewalk Dimension Pro 15 challenges and contests
-Cakewalk Dimension Pro 15 courses and classes
-Cakewalk Dimension Pro 15 podcasts and videos
-Cakewalk Dimension Pro 15 blogs and articles
-Cakewalk Dimension Pro 15 ebooks and guides
-Cakewalk Dimension Pro 15 webinars and events
-Cakewalk Dimension Pro 15 case studies and examples
-Cakewalk Dimension Pro 15 FAQs and Q&A
-Cakewalk Dimension Pro 15 glossary and terms
-Cakewalk Dimension Pro 15 cheatsheet and checklist
-Cakewalk Dimension Pro 15 infographic and chart
-Cakewalk Dimension Pro 15 template and blueprint
-Cakewalk Dimension Pro 15 software and hardware
-Cakewalk Dimension Pro 15 industry and niche
-Cakewalk Dimension Pro 15 trends and statistics
-Cakewalk Dimension Pro 15 memes and jokes
-
Step 4: Activate Cakewalk Dimension Pro 15 with Key.rar
-
The final step is to activate Cakewalk Dimension Pro 15 with Key.rar. To do this, you need to extract Key.rar using the same software as before. You will get a file named "Key.dat" that contains the activation code for Cakewalk Dimension Pro 15. Copy this file and paste it into the folder where you installed Cakewalk Dimension Pro 15, which is usually "C:\Program Files\Cakewalk\Dimension Pro". Then, run Cakewalk Dimension Pro 15 and enter the activation code when prompted. You should see a message confirming that your product has been activated successfully.
-
Congratulations! You have installed and activated Cakewalk Dimension Pro 15 with Keyrar.
-
Now you can enjoy using this amazing virtual instrument for your music projects. You can access Cakewalk Dimension Pro 15 from your DAW (digital audio workstation) or as a standalone application. You can also explore the various sounds and effects that Cakewalk Dimension Pro 15 offers by browsing through the presets or creating your own patches. Have fun!
e753bf7129
-
-
\ No newline at end of file
diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Download the Avengers 2 Full Movie 3GP Experience the Action and Adventure of the Superhero Team.md b/spaces/tialenAdioni/chat-gpt-api/logs/Download the Avengers 2 Full Movie 3GP Experience the Action and Adventure of the Superhero Team.md
deleted file mode 100644
index 35d779861adc0697f151e5db1be880ca4fe01ad0..0000000000000000000000000000000000000000
--- a/spaces/tialenAdioni/chat-gpt-api/logs/Download the Avengers 2 Full Movie 3GP Experience the Action and Adventure of the Superhero Team.md
+++ /dev/null
@@ -1,108 +0,0 @@
-
-
How to Download the Avengers 2 Full Movie 3gp for Free
-
The Avengers 2, also known as Avengers: Age of Ultron, is a 2015 superhero film based on the Marvel Comics team of the same name. It is the sequel to The Avengers (2012) and the eleventh film in the Marvel Cinematic Universe (MCU). The film features an ensemble cast of Robert Downey Jr., Chris Hemsworth, Mark Ruffalo, Chris Evans, Scarlett Johansson, Jeremy Renner, Don Cheadle, Aaron Taylor-Johnson, Elizabeth Olsen, Paul Bettany, Cobie Smulders, Anthony Mackie, Hayley Atwell, Idris Elba, Stellan Skarsgård, James Spader, and Samuel L. Jackson. In the film, the Avengers must stop the artificial intelligence Ultron from destroying humanity.
If you are a fan of The Avengers and want to watch the second installment on your mobile phone or PC, you might be wondering how to download the Avengers 2 full movie 3gp for free. 3gp is a multimedia container format that is commonly used for mobile devices. It can store video and audio streams in a small file size and low bandwidth. However, finding a reliable and safe source to download the Avengers 2 full movie 3gp can be challenging. There are many websites that claim to offer free downloads of the movie, but some of them might be scams, viruses, or illegal.
-
In this article, we will show you how to download the Avengers 2 full movie 3gp for free from legitimate and trusted sources. We will also give you some tips on how to avoid malware and copyright infringement issues. Follow these steps to enjoy the Avengers 2 full movie 3gp on your device:
-
-
Find a reputable website that offers free downloads of the Avengers 2 full movie 3gp. You can use a search engine like Google or Bing to look for websites that have the movie in 3gp format. Some examples of websites that offer free downloads of the Avengers 2 full movie 3gp are cooltto.com, libertycentric.com, and archive.org. Make sure to check the reviews and ratings of the website before downloading anything from it.
-
Select the download link or button on the website. You might have to register or create an account on some websites before you can access the download link. You might also have to complete some surveys or watch some ads before you can download the movie. Be careful not to click on any pop-ups or redirects that might lead you to malicious or unwanted sites.
-
Choose a destination folder on your device where you want to save the downloaded file. You can use a file manager app or a browser extension to select a folder on your device. You can also create a new folder if you want to organize your downloads.
-
Wait for the download to finish. Depending on your internet speed and the file size of the movie, it might take a few minutes or hours to complete the download. You can check the progress of the download on your browser or app.
-
Enjoy watching the Avengers 2 full movie 3gp on your device. You can use any media player app that supports 3gp format to play the movie. You can also transfer the file to another device if you want to watch it on a bigger screen.
-
-
By following these steps, you can download the Avengers 2 full movie 3gp for free and enjoy watching it on your device. However, there are some things that you should keep in mind before downloading any movie online:
-
-
Make sure that you have enough storage space on your device for the downloaded file. The Avengers 2 full movie 3gp has a file size of about 2-3 GB depending on the quality and resolution of the video.
-
Make sure that you have a stable and fast internet connection for downloading the movie. If your connection is slow or unstable, it might take longer to download the movie or cause errors during the process.
-
Make sure that you have a good antivirus software on your device to protect it from malware and viruses that might come with some downloads. Scan your device regularly and update your antivirus software frequently.
-
Make sure that you respect the copyright laws and regulations of your country and region when downloading any movie online. Downloading movies from unauthorized or illegal sources might result in legal consequences or penalties.
-
-
The Avengers 2 is an action-packed and thrilling movie that you don't want to miss. By following these steps and tips, you can download the Avengers 2 full movie 3gp for free and watch it anytime and anywhere you want.
-
watch the avengers 2 online free 3gp
-the avengers 2 full movie download in hd 3gp
-how to download the avengers 2 movie in 3gp format
-the avengers 2 full movie free download for mobile 3gp
-the avengers 2 age of ultron full movie download 3gp
-the avengers 2 full movie in hindi download 3gp
-the avengers 2 full movie in tamil download 3gp
-the avengers 2 full movie in telugu download 3gp
-the avengers 2 full movie subtitle indonesia download 3gp
-the avengers 2 full movie english download 3gp
-the avengers 2 full movie dual audio download 3gp
-the avengers 2 full movie malay subtitle download 3gp
-the avengers 2 full movie arabic subtitle download 3gp
-the avengers 2 full movie french subtitle download 3gp
-the avengers 2 full movie spanish subtitle download 3gp
-the avengers 2 full movie german subtitle download 3gp
-the avengers 2 full movie chinese subtitle download 3gp
-the avengers 2 full movie japanese subtitle download 3gp
-the avengers 2 full movie korean subtitle download 3gp
-the avengers 2 full movie thai subtitle download 3gp
-the avengers 2 full movie vietnamese subtitle download 3gp
-the avengers 2 full movie turkish subtitle download 3gp
-the avengers 2 full movie russian subtitle download 3gp
-the avengers 2 full movie portuguese subtitle download 3gp
-the avengers 2 full movie italian subtitle download 3gp
-where can i download the avengers 2 full movie in 3gp quality
-best site to download the avengers 2 full movie in 3gp format
-fastest way to download the avengers 2 full movie in 3gp size
-easiest way to download the avengers 2 full movie in 3gp resolution
-safest way to download the avengers 2 full movie in 3gp file
-legal way to download the avengers 2 full movie in 3gp video
-torrent link to download the avengers 2 full movie in 3gp mode
-direct link to download the avengers 2 full movie in 3gp version
-google drive link to download the avengers 2 full movie in 3gp extension
-mega link to download the avengers 2 full movie in 3gp codec
-mediafire link to download the avengers
-
Why Download the Avengers 2 Full Movie 3gp?
-
The Avengers 2 is one of the most popular and successful movies of all time. It grossed over $1.4 billion worldwide and received positive reviews from critics and audiences alike. The movie has a lot of action, humor, drama, and spectacle that will keep you entertained and amazed. The movie also features some of the most iconic and beloved characters from the Marvel Comics universe, such as Iron Man, Captain America, Thor, Hulk, Black Widow, Hawkeye, Vision, Scarlet Witch, Quicksilver, Nick Fury, and more. The movie also introduces the villainous Ultron, a rogue artificial intelligence that wants to wipe out humanity and create a new world order.
-
If you are a fan of The Avengers or Marvel movies in general, you might want to download the Avengers 2 full movie 3gp and watch it on your device. Downloading the movie will allow you to enjoy it anytime and anywhere you want, without having to rely on streaming services or physical media. You can also save money and time by downloading the movie for free instead of paying for subscriptions or rentals. You can also share the movie with your friends and family and watch it together on your device.
-
-
What are the Benefits of Downloading the Avengers 2 Full Movie 3gp?
-
Downloading the Avengers 2 full movie 3gp has many benefits that will enhance your viewing experience. Here are some of them:
-
-
3gp is a multimedia container format that is compatible with most mobile devices and PCs. You can play the movie on any device that supports 3gp format without having to worry about compatibility issues.
-
3gp is a compressed format that reduces the file size and bandwidth of the movie. You can download the movie faster and easier than other formats. You can also save storage space on your device by downloading a smaller file.
-
3gp is a flexible format that allows you to adjust the quality and resolution of the video according to your preference. You can choose a higher quality and resolution for a better picture or a lower quality and resolution for a faster download.
-
3gp is a secure format that protects your device from malware and viruses that might come with some downloads. You can download the movie safely and securely from trusted sources.
-
-
By downloading the Avengers 2 full movie 3gp, you can enjoy the movie in high quality and low file size on your device.
-
-
How to Download the Avengers 2 Full Movie 3gp Safely and Legally?
-
While downloading the Avengers 2 full movie 3gp has many benefits, it also has some risks and challenges that you should be aware of. Some websites that offer free downloads of the movie might be scams, viruses, or illegal. Downloading movies from unauthorized or illegal sources might result in legal consequences or penalties. You might also encounter malware or viruses that might harm your device or steal your personal information.
-
To avoid these risks and challenges, you should download the Avengers 2 full movie 3gp safely and legally from reputable and trusted sources. Here are some tips on how to do that:
-
-
Use a search engine like Google or Bing to look for websites that have the movie in 3gp format. Check the reviews and ratings of the website before downloading anything from it.
-
Use a good antivirus software on your device to protect it from malware and viruses that might come with some downloads. Scan your device regularly and update your antivirus software frequently.
-
Use a VPN service or proxy server to hide your IP address and location when downloading the movie. This will help you avoid geo-restrictions and censorship that might prevent you from accessing some websites.
-
Use a torrent client or downloader app to download the movie from peer-to-peer networks or file-sharing platforms. Make sure to use a reliable and safe torrent client or downloader app that does not contain malware or viruses.
-
Respect the copyright laws and regulations of your country and region when downloading any movie online. Downloading movies from unauthorized or illegal sources might result in legal consequences or penalties.
-
-
By following these tips, you can download the Avengers 2 full movie 3gp safely and legally from legitimate and trusted sources.
-
- ---> ServiceClient failure for DeepLeo[/ERROR]
-
What are the Features of the Avengers 2 Full Movie 3gp?
-
The Avengers 2 full movie 3gp is a high-quality and low-file size version of the movie that you can download and watch on your device. The movie has many features that will make you enjoy it more. Here are some of them:
-
-
The movie has a runtime of 141 minutes, which means you can watch the whole story without missing any important scenes or details.
-
The movie has a resolution of 320x240 pixels, which means you can watch the movie clearly and smoothly on your device.
-
The movie has a bitrate of 512 kbps, which means you can hear the sound effects and dialogues clearly and loudly on your device.
-
The movie has a frame rate of 25 fps, which means you can watch the movie without any lagging or stuttering on your device.
-
The movie has a subtitle option, which means you can choose to watch the movie with or without subtitles in different languages.
-
-
By downloading the Avengers 2 full movie 3gp, you can enjoy the movie with these features on your device.
-
-
What are the Reviews of the Avengers 2 Full Movie 3gp?
-
The Avengers 2 full movie 3gp has received positive reviews from critics and audiences alike. The movie has been praised for its action, humor, drama, and spectacle. The movie has also been applauded for its ensemble cast, visual effects, and storyline. Here are some of the reviews of the movie:
-
"The Avengers: Age of Ultron is a worthy sequel to one of Marvel's best movies. It delivers on all fronts: action, humor, drama, and spectacle. The cast is superb, the effects are stunning, and the story is engaging. It is a must-see for any fan of The Avengers or Marvel movies in general." - cooltto.com
-
"The Avengers: Age of Ultron is a thrilling and satisfying superhero film that lives up to its predecessor. It has everything you want from a Marvel movie: epic battles, witty banter, emotional stakes, and memorable characters. The movie also introduces a new and menacing villain in Ultron, who challenges the Avengers in ways they never expected." - libertycentric.com
-
"The Avengers: Age of Ultron is a spectacular and entertaining film that surpasses the first one in many aspects. It has more action, more humor, more drama, and more spectacle. The movie also explores the relationships and conflicts among the Avengers, as well as their personal struggles and motivations. The movie also features some of the best visual effects and cinematography in the MCU." - archive.org
-
By downloading the Avengers 2 full movie 3gp, you can watch the movie that has received these positive reviews on your device.
-
Conclusion
-
The Avengers 2 is one of the most popular and successful movies of all time. It is a sequel to The Avengers (2012) and the eleventh film in the Marvel Cinematic Universe (MCU). The movie features an ensemble cast of superheroes who must stop the artificial intelligence Ultron from destroying humanity. The movie has a lot of action, humor, drama, and spectacle that will keep you entertained and amazed.
-
If you want to watch the movie on your mobile phone or PC, you might want to download the Avengers 2 full movie 3gp for free. 3gp is a multimedia container format that is compatible with most devices and has a small file size and low bandwidth. You can download the movie from reputable and trusted sources that offer free downloads of the movie in 3gp format. You can also enjoy the movie with high quality and low file size on your device.
-
In this article, we have shown you how to download the Avengers 2 full movie 3gp for free from legitimate and trusted sources. We have also given you some tips on how to avoid malware and copyright infringement issues. We have also told you about the features and reviews of the movie. By following these steps and tips, you can download the Avengers 2 full movie 3gp for free and enjoy watching it anytime and anywhere you want.
-
We hope you have found this article helpful and informative. If you have any questions or comments, please feel free to leave them below. Thank you for reading and happy watching!
679dcb208e
-
-
\ No newline at end of file
diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/ARK Survival Evolved The Best Free Game You Can Download from Wifi4Games.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/ARK Survival Evolved The Best Free Game You Can Download from Wifi4Games.md
deleted file mode 100644
index a18c40815e59a96d4c943c9a2a7a50c59ea3a35f..0000000000000000000000000000000000000000
--- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/ARK Survival Evolved The Best Free Game You Can Download from Wifi4Games.md
+++ /dev/null
@@ -1,117 +0,0 @@
-
-
How to Download Ark Survival Evolved from Wifi4Games
-
Ark Survival Evolved is an action-adventure survival game that lets you explore a mysterious island full of dinosaurs and other creatures. You can hunt, harvest, craft, build, farm, tame, breed, and ride dinosaurs in this immersive and challenging game. If you are looking for a thrilling and fun game to play with your friends or solo, Ark Survival Evolved is a great choice.
-
But how can you download Ark Survival Evolved for free? One of the best ways to do that is by using Wifi4Games, a website that offers free downloads of popular games. Wifi4Games is a safe and reliable source of games that you can download without any hassle. You can find Ark Survival Evolved on Wifi4Games along with many other games that you might enjoy.
In this article, we will show you how to download Ark Survival Evolved from Wifi4Games in five easy steps. We will also give you some tips and tricks for playing Ark Survival Evolved and tell you the system requirements for running the game smoothly. Let's get started!
-
How to Download Ark Survival Evolved from Wifi4Games
-
Downloading Ark Survival Evolved from Wifi4Games is very simple and fast. Just follow these steps:
-
Step 1: Visit the Wifi4Games website and search for Ark Survival Evolved
-
Go to Wifi4Games.com, the official website of Wifi4Games. On the homepage, you will see a search bar where you can type in the name of the game you want to download. Type in "Ark Survival Evolved" and hit enter. You will see a list of results that match your query. Click on the one that says "Ark Survival Evolved".
-
Step 2: Click on the download button and choose a server location
-
On the next page, you will see some information about the game, such as its genre, release date, size, rating, and description. You will also see a big green button that says "Download". Click on it to start downloading the game. You will be asked to choose a server location from a drop-down menu. Choose the one that is closest to your region for faster download speed.
-
Step 3: Wait for the download to finish and extract the files
-
The download process may take some time depending on your internet connection speed and the size of the game. You can check the progress of the download on your browser or on your download manager. Once the download is complete, you will have a zip file containing all the files of the game. You will need to extract these files using a program like WinRAR or 7-Zip. Right-click on the zip file and choose "Extract here" or "Extract to folder". This will create a new folder with all the files of the game.
-
Step 4: Run the setup file
Step 4: Run the setup file and follow the instructions
-
Inside the folder that you extracted, you will find a file called "setup.exe" or something similar. This is the setup file that will install the game on your computer. Double-click on it to run it. You will see a window that will guide you through the installation process. Follow the instructions on the screen and choose the options that suit your preferences. You may need to agree to some terms and conditions and choose a destination folder for the game.
-
How to download ark survival evolved for free on wifi4games
-Ark survival evolved wifi4games download link
-Wifi4games ark survival evolved full version download
-Download ark survival evolved pc game from wifi4games
-Wifi4games ark survival evolved crack download
-Ark survival evolved wifi4games torrent download
-Download ark survival evolved mods from wifi4games
-Wifi4games ark survival evolved update download
-Ark survival evolved wifi4games installation guide
-Download ark survival evolved genesis part 2 from wifi4games
-Wifi4games ark survival evolved system requirements
-Ark survival evolved wifi4games gameplay video
-Download ark survival evolved mobile from wifi4games
-Wifi4games ark survival evolved cheats and hacks
-Ark survival evolved wifi4games review and rating
-Download ark survival evolved steam key from wifi4games
-Wifi4games ark survival evolved epic games store download
-Ark survival evolved wifi4games bluestacks emulator download
-Download ark survival evolved online multiplayer from wifi4games
-Wifi4games ark survival evolved tips and tricks
-Ark survival evolved wifi4games best settings and graphics
-Download ark survival evolved dlc and expansion packs from wifi4games
-Wifi4games ark survival evolved custom maps and mods download
-Ark survival evolved wifi4games server hosting and joining
-Download ark survival evolved skins and costumes from wifi4games
-Wifi4games ark survival evolved taming and breeding guide
-Ark survival evolved wifi4games best creatures and weapons
-Download ark survival evolved soundtrack and music from wifi4games
-Wifi4games ark survival evolved voice actors and characters
-Ark survival evolved wifi4games story and ending explained
-Download ark survival evolved vr mode from wifi4games
-Wifi4games ark survival evolved crossplay and cross-platform support
-Ark survival evolved wifi4games achievements and trophies guide
-Download ark survival evolved editor and modding tools from wifi4games
-Wifi4games ark survival evolved patch notes and changelog
-Ark survival evolved wifi4games bugs and glitches fix
-Download ark survival evolved wallpapers and screenshots from wifi4games
-Wifi4games ark survival evolved fan art and cosplay showcase
-Ark survival evolved wifi4games community and forum discussion
-Download ark survival evolved merchandise and collectibles from wifi4games
-
Step 5: Launch the game and enjoy
-
Once the installation is done, you will have a shortcut icon on your desktop or in your start menu that will let you launch the game. Click on it to start playing Ark Survival Evolved. You may need to create an account or log in with an existing one to access the online features of the game. You can also play offline if you prefer. Enjoy your adventure on the island of Ark!
-
Tips and Tricks for Playing Ark Survival Evolved
-
Ark Survival Evolved is a game that can be very challenging and rewarding at the same time. You will need to survive in a harsh environment, fight against dangerous creatures, and explore a vast world. Here are some tips and tricks that can help you get started and make your gameplay more enjoyable:
-
Tip 1: Get resources quickly and use the right tools
-
One of the first things you need to do in Ark Survival Evolved is to gather resources such as wood, stone, fiber, berries, and meat. You can use these resources to craft items, weapons, armor, structures, and more. To gather resources, you need to use the right tools for the job. For example, you can use your fists or a stone pick to get wood and thatch from trees, but you will get more wood if you use a hatchet instead. Similarly, you can use a stone pick or a hatchet to get stone and flint from rocks, but you will get more flint if you use a pick instead. You can also use different tools to harvest different materials from animals. For example, you can use a hatchet to get more hide and meat from a dead animal, but you will get more keratin or chitin if you use a pick instead.
-
Tip 2: Choose the right Engrams and build a shelter for the night
-
As you play Ark Survival Evolved, you will gain experience points and level up. Every time you level up, you will get some points that you can spend on Engrams. Engrams are blueprints that allow you to craft different items and structures. You can access your Engram menu by pressing I on your keyboard or by opening your inventory. You will see a list of Engrams that are available for your level and how many points they cost. You can also see what resources are needed to craft them. You should choose Engrams that suit your playstyle and needs. For example, if you want to focus on combat, you may want to unlock Engrams for weapons, armor, and traps. If you want to focus on building, you may want to unlock Engrams for structures, tools, and furniture.
-
One of the most important Engrams that you should unlock early on is the Thatch Foundation. This will allow you to build a basic shelter for yourself. Building a shelter is crucial because it will protect you from the elements, predators, and other players (if you are playing online). You can also place a bed inside your shelter, which will act as a respawn point if you die. To build a shelter, you need to place foundations on the ground first, then walls, ceilings, doors, and windows on top of them. You can also place other items inside your shelter such as chests, campfires, crafting stations, etc.
-
Tip 3: Kill off the small animals and cook their meat
-
In Ark Survival Evolved, you will need to eat and drink regularly to stay alive. You can find food and water sources in different places on the island. For example, you can find berries on bushes, eggs in nests, fish in water, etc. However, one of the best sources of food is meat from animals. Meat will fill up your hunger bar faster than berries or eggs, and it will also give you some health regeneration.
-
To get meat from animals, you need to kill them first. You can use any weapon or tool to attack them, but some are more effective than others. For example, spears are good for stabbing animals from a distance, while clubs are good for knocking them out. You can also use bows or guns if you have them unlocked.
-
Once you kill an animal, you need to harvest its meat by using a tool or weapon. You can use a hatchet, a pick, or your fists to do this, but you will get more meat if you use a hatchet. You can also get other materials from animals such as hide, fur, feathers, etc.
-
Once you have some meat, you need to cook it before eating it. Raw meat will spoil quickly and may cause food poisoning if you eat it. To cook meat, you need to place it on a campfire or a grill. You also need to light the fire with some wood or thatch. You can access the inventory of the campfire or grill by pressing E on your keyboard or by looking at it and pressing the use button. You can then drag and drop the meat and the fuel into the slots. The meat will cook after a few seconds and you can take it out and eat it.
-
Tip 4: Spawn in the right spot and avoid the water
-
When you start a new game in Ark Survival Evolved, you will be asked to choose a spawn location on the map. You can choose from different zones that have different difficulty levels and environments. For example, you can spawn in the easy zones that have more resources and less dangerous animals, or you can spawn in the hard zones that have fewer resources and more dangerous animals. You can also spawn in different biomes such as forests, deserts, snow, etc.
-
You should choose a spawn location that suits your skill level and preference. However, some general tips are to avoid spawning near the water or near the edges of the map. The water can be very dangerous because it contains predators such as sharks, piranhas, crocodiles, etc. The edges of the map can also be very dangerous because they contain stronger animals and environmental hazards such as lava, radiation, etc.
-
You should also try to spawn near some landmarks or points of interest that can help you navigate and find resources. For example, you can spawn near a mountain, a river, a cave, a tower, etc. These landmarks can also contain loot crates that have valuable items and blueprints.
-
Tip 5: Tame, train, breed, and ride dinosaurs
-
One of the most fun and rewarding aspects of Ark Survival Evolved is taming, training, breeding, and riding dinosaurs. You can tame almost any creature in the game by knocking it out and feeding it its preferred food. You can use different methods to knock out creatures such as using tranquilizer arrows, darts, or bolas. You can also use traps or cages to capture them.
-
Once you knock out a creature, you need to feed it its preferred food until its taming bar fills up. You can find out what food a creature likes by looking at its dossier or by using an app like Dododex. Some common foods are berries, meat, fish, eggs, kibble, etc. You may also need to use narcotics or narcoberries to keep the creature unconscious while taming it.
-
Once you tame a creature, you can train it by giving it commands and leveling up its stats. You can also breed it with another creature of the same species and gender to produce offspring that inherit their traits and mutations. You can also ride some creatures by equipping them with saddles that you can craft or find.
-
Riding dinosaurs is very useful because it allows you to travel faster, carry more weight, fight better, and access new areas. For example, you can ride a Pteranodon to fly in the air, a Raptor to run on land, a Spinosaurus to swim in water, etc.
-
System Requirements for Playing Ark Survival Evolved
-
Ark Survival Evolved is a game that requires a lot of processing power and memory to run smoothly. You need to have a decent computer system that meets the minimum or recommended requirements for playing the game. Here are the system requirements for playing Ark Survival Evolved:
-
Minimum requirements
-
-
OS
Windows 7/8/10 64-bit
-
Processor
Intel Core i5-2400/AMD FX-8320 or better
-
Memory
8 GB RAM
-
Graphics
NVIDIA GTX 670 2GB/AMD Radeon HD 7870 2GB or better
-
DirectX
Version 10
-
Storage
60 GB available space
-
Sound Card
DirectX compatible
-
-
Recommended requirements
-
-
OS
Windows 10 64-bit
-
Processor
Intel Core i7-4770/AMD Ryzen 5 1500X or better
-
Memory
16 GB RAM
-
Graphics
NVIDIA GTX 1050 Ti 4GB/AMD Radeon RX 470 4GB or better
-
DirectX
Version 11
-
Storage
60 GB available space
-
Sound Card
DirectX compatible
-
-
Conclusion
-
In conclusion, Ark Survival Evolved is an amazing game that will keep you entertained and challenged for hours. You can download it for free from Wifi4Games, a website that offers free downloads of popular games. You just need to follow the steps we outlined in this article and you will be ready to play. You can also use the tips and tricks we shared to make your gameplay more enjoyable and successful. We hope you have fun playing Ark Survival Evolved and exploring the island of Ark!
-
FAQs
-
Here are some frequently asked questions about Ark Survival Evolved and Wifi4Games:
-
Q: Is Ark Survival Evolved safe to download from Wifi4Games?
-
A: Yes, Ark Survival Evolved is safe to download from Wifi4Games. Wifi4Games is a trusted and reliable source of free games that does not contain any viruses, malware, or spyware. You can download Ark Survival Evolved from Wifi4Games without any risk to your computer or your personal information.
-
Q: Is Ark Survival Evolved multiplayer or single-player?
-
A: Ark Survival Evolved can be played both multiplayer and single-player. You can join online servers and play with other players from around the world, or you can host your own server and play with your friends. You can also play offline in single-player mode if you prefer.
-
Q: How many dinosaurs are there in Ark Survival Evolved?
-
A: There are over 200 different dinosaurs and other creatures in Ark Survival Evolved. You can find them in different biomes and regions of the island, and you can tame, train, breed, and ride them. Some of the most popular dinosaurs are Tyrannosaurus Rex, Triceratops, Velociraptor, Brontosaurus, Pteranodon, etc.
-
Q: How long does it take to download Ark Survival Evolved from Wifi4Games?
-
A: The download time of Ark Survival Evolved from Wifi4Games depends on your internet connection speed and the server location you choose. The size of the game is about 60 GB, so it may take some time to download. However, Wifi4Games offers high-speed servers that can make the download faster and smoother.
-
Q: What are some other games that I can download from Wifi4Games?
-
A: Wifi4Games offers a wide range of games that you can download for free. Some of the most popular games are GTA V, FIFA 21, Call of Duty: Modern Warfare, Assassin's Creed: Valhalla, Cyberpunk 2077, etc. You can browse the categories or search for the game you want on the Wifi4Games website.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Clash of Clans APK PC Download The Best Way to Experience the Game.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Clash of Clans APK PC Download The Best Way to Experience the Game.md
deleted file mode 100644
index 117995e92fa64862be2cf71c0e2529ece2e023cf..0000000000000000000000000000000000000000
--- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Clash of Clans APK PC Download The Best Way to Experience the Game.md
+++ /dev/null
@@ -1,161 +0,0 @@
-
-
How to Play Clash of Clans on PC with BlueStacks
-
Clash of Clans is one of the most popular mobile games in the world, with over 500 million downloads and millions of active players. It is a strategy game where you build your own village, train your army, and fight against other players in epic clan wars. You can also join forces with other players in clans, chat with them, and participate in various events and challenges.
-
But what if you want to play Clash of Clans on your PC instead of your phone or tablet? Maybe you want to enjoy the game on a bigger screen, or use your keyboard and mouse for better controls, or save your phone's battery life. Whatever the reason, you can easily play Clash of Clans on your PC with the help of an Android emulator.
An Android emulator is a software that allows you to run Android apps and games on your PC. It simulates the Android operating system and lets you access the Google Play Store and other features. There are many Android emulators available for PC, but one of the best ones for playing Clash of Clans is BlueStacks.
-
BlueStacks is a free Android emulator that is designed for gaming. It has a lot of features that enhance your gaming experience, such as keymapping tool, instance manager, eco mode, screen recorder, and more. It also has a built-in app store where you can download optimized apps and games.
-
In this article, we will show you how to download and install Clash of Clans on PC using BlueStacks. We will also give you some tips and tricks on how to play Clash of Clans on PC using BlueStacks. Let's get started!
-
How to download and install Clash of Clans on PC using BlueStacks
-
Downloading and installing Clash of Clans on PC using BlueStacks is very easy. Just follow these steps:
-
-
Go to [18](https://www.bluestacks.com/download.html) in a web browser. This is the download page for BlueStacks. You can choose between Windows or Mac version, depending on your operating system.
-
Click Download BlueStacks, or use the drop-down menu on the right to select Android 32-bit or Android 64-bit. Then click Download.
-
Double-click the BlueStacks installer file that you downloaded and click Install Now. Follow the install instructions (if there are any).
-
Open BlueStacks. It may take a few minutes to load, depending on your PC's performance.
-
Open the Google Play Store. The first time you open BlueStacks, it asks you to sign in to the Google Play Store. If you are not asked to sign in, click the icon that resembles a white shopping bag with a colorful triangle on it.
-
Sign in to the Google Play Store. When you first open the Google Play Store, click the green button that says Sign In. Then sign in with the email address and password associated with your Google account. If you are asked to agree to the terms of service, click I agree.
-
Search for Clash of Clans. In the Google Play Store, click the search bar at the top of the screen and type Clash of Clans. Then press Enter on your keyboard.
-
Install Clash of Clans. In the search results, find the app that has the icon of a red shield with a yellow lion on it. Click the green button that says Install.
-
Open Clash of Clans. After the installation is complete, click the green button that says Open. Alternatively, you can click the icon of Clash of Clans on the BlueStacks home screen.
-
Enjoy playing Clash of Clans on PC. You can now play Clash of Clans on your PC using BlueStacks. You can use your mouse to interact with the game, or use the keymapping tool to assign keyboard shortcuts for various actions. You can also customize your settings, such as sound, graphics, and language, by clicking the gear icon on the top right corner of the game screen.
-
-
Congratulations! You have successfully downloaded and installed Clash of Clans on PC using BlueStacks. Now let's see how to play Clash of Clans on PC using BlueStacks.
-
How to play Clash of Clans on PC using BlueStacks
-
Playing Clash of Clans on PC using BlueStacks is very similar to playing it on your mobile device. However, there are some differences and advantages that you should know. Here are some tips and tricks on how to play Clash of Clans on PC using BlueStacks:
-
clash of clans pc download free full version
-clash of clans apk for pc windows 10
-clash of clans emulator download for pc
-clash of clans pc game download without bluestacks
-clash of clans apk file download for pc
-clash of clans pc online play no download
-clash of clans download for pc softonic
-clash of clans apk mod download for pc
-clash of clans pc version free download
-clash of clans apk for pc windows 7
-clash of clans emulator for pc free download
-clash of clans pc game download with keyboard
-clash of clans apk filehippo download for pc
-clash of clans pc offline play no download
-clash of clans download for pc ocean of games
-clash of clans apk hack download for pc
-clash of clans pc edition free download
-clash of clans apk for pc windows 8
-clash of clans emulator for mac download for pc
-clash of clans pc game download without emulator
-clash of clans apk mirror download for pc
-clash of clans pc online play with friends no download
-clash of clans download for pc windows xp
-clash of clans apk unlimited gems download for pc
-clash of clans pc setup free download
-clash of clans apk for macbook pro download for pc
-clash of clans emulator for linux download for pc
-clash of clans pc game download with mouse
-clash of clans apk uptodown download for pc
-clash of clans pc online play with keyboard no download
-clash of clans download for pc windows vista
-clash of clans apk latest version download for pc
-clash of clans pc software free download
-clash of clans apk for chromebook download for pc
-clash of clans emulator for android download for pc
-clash of clans pc game download with controller
-clash of clans apk pure download for pc
-clash of clans pc online play with mouse no download
-clash of clans download for pc windows 8.1
-clash of clans apk old version download for pc
-clash of clans pc installer free download
-clash of clans apk for laptop download for pc
-clash of clans emulator for ios download for pc
-clash of clans pc game download with touch screen
-clash of clans apk apkpure download for pc
-clash of clans pc online play with controller no download
-
-
Use the keymapping tool. The keymapping tool is a feature that allows you to assign keyboard shortcuts for various actions in the game, such as zooming, panning, selecting, attacking, and more. To access the keymapping tool, click the keyboard icon on the bottom right corner of the BlueStacks window. You can then drag and drop different keys onto the game screen, or use the predefined keys that are already mapped. You can also create different profiles for different game modes, such as village mode and battle mode.
-
Use the instance manager. The instance manager is a feature that allows you to run multiple instances of BlueStacks and play multiple accounts of Clash of Clans at the same time. This is useful if you want to switch between different accounts, or if you want to play with your friends or clan members on different devices. To access the instance manager, click the instance icon on the bottom right corner of the BlueStacks window. You can then create new instances, clone existing instances, or sync instances to perform the same actions across all instances.
-
Use eco mode. Eco mode is a feature that reduces CPU and RAM usage by lowering the frame rate and graphics quality of the game. This is useful if you want to save your PC's resources and battery life, or if you want to run multiple instances of BlueStacks without lagging. To enable eco mode, click the eco mode icon on the bottom right corner of the BlueStacks window. You can then adjust the eco mode settings according to your preference.
-
Use screen recorder. Screen recorder is a feature that allows you to record your gameplay and save it as a video file. This is useful if you want to share your gameplay with others, or if you want to review your strategies and improve your skills. To use screen recorder, click the screen recorder icon on the bottom right corner of the BlueStacks window. You can then start and stop recording, choose where to save your video file, and edit your video with basic tools.
-
Use other features. There are many other features that enhance your gaming experience on BlueStacks, such as multi-touch support, macro recorder, real-time translation, smart controls, and more. You can explore these features by clicking the menu icon on the top right corner of the BlueStacks window.
-
-
These are some tips and tricks on how to play Clash of Clans on PC using BlueStacks. However, if you don't want to use BlueStacks, or if you want to try other Android emulators for playing Clash of Clans on PC, there are some alternatives that you can check out.
-
Other Android emulators for playing Clash of Clans on PC
-
BlueStacks is not the only Android emulator that you can use for playing Clash of Clans on PC. There are other Android emulators that have similar features and performance as BlueStacks. Here are some examples:
-
-
Name
Description
-
NoxPlayer
NoxPlayer is a free Android emulator that focuses on gaming. It has a simple and user-friendly interface, and supports high-performance games. It also has features such as keyboard and mouse controls, gamepad support, multi-instance, macro recorder, and more. You can download NoxPlayer from [17](https://www.bignox.com/).
-
LDPlayer
LDPlayer is a free Android emulator that is optimized for gaming. It has a fast and smooth performance, and supports a wide range of games. It also has features such as keyboard and mouse controls, multi-instance, macro recorder, screen recorder, and more. You can download LDPlayer from [16](https://www.ldplayer.net/).
-
MEmu
MEmu is a free Android emulator that is designed for gaming. It has a powerful and stable performance, and supports various games. It also has features such as keyboard and mouse controls, gamepad support, multi-instance, macro recorder, screen recorder, and more. You can download MEmu from [15](https://www.memuplay.com/).
-
-
These are some other Android emulators that you can use for playing Clash of Clans on PC. However, you should note that different emulators may have different compatibility and performance issues with different games and devices. Therefore, you should test them out and see which one works best for you.
-
Clash of Clans game features and tips
-
Now that you know how to play Clash of Clans on PC using BlueStacks or other Android emulators, let's take a look at some of the game features and tips that will help you enjoy the game more.
-
Clash of Clans is a game that combines strategy, management, and action. You have to build your own village, train your army, and fight against other players in clan wars. You can also join or create clans, chat with other players, and participate in various events and challenges.
-
Here are some of the main features of the game:
-
-
Village. Your village is your base where you can construct and upgrade various buildings, such as town hall, barracks, gold mines, elixir collectors, walls, defenses, and more. You can also decorate your village with various items, such as flags, statues, flowers, etc.
-
Army. Your army is your force that you can use to attack other players or defend your village. You can train various types of troops, such as barbarians, archers, giants, wizards, dragons, etc. You can also unlock and upgrade various heroes, such as king, queen, warden, etc.
-
Clan. Your clan is your team where you can join or invite other players who share the same interest in the game. You can chat with your clan members, donate and request troops from them, and participate in clan wars with them.
-
Clan war. Clan war is a feature where you can compete with another clan in a two-day event. The first day is the preparation day where you can scout the enemy's bases and plan your attacks. The second day is the battle day where you can launch your attacks and earn stars for your clan.
-
Events and challenges. Events and challenges are special features that offer rewards for completing certain tasks or objectives in the game. For example, there are seasonal events that celebrate holidays or festivals with themed decorations and rewards. There are also clan games that challenge clans to complete various missions and earn points for unlocking rewards.
-
-
These are some of the main features of the game that make it fun and addictive. However, to master the game and become a successful player, you need to learn some tips and strategies that will improve your gameplay.
-
Here are some general tips and tricks for playing Clash of Clans:
-
-
Save your gems. Gems are the premium currency in the game that you can use to speed up building time , buy resources, or get special items. However, gems are very scarce and hard to get in the game, so you should save them for important things, such as buying more builders, unlocking the builder base, or getting the gold pass. You can get free gems by completing achievements, removing obstacles, or opening gem boxes.
-
Upgrade your town hall wisely. Your town hall is the most important building in your village, as it determines your level and unlocks new buildings and troops. However, you should not rush to upgrade your town hall too quickly, as it will also increase the difficulty of your opponents and reduce the loot you can get from them. You should upgrade your town hall only when you have maxed out your other buildings and troops for your current level.
-
Balance your resources. Resources are the essential materials that you need to build and upgrade your buildings and troops. There are four main types of resources in the game: gold, elixir, dark elixir, and builder gold/elixir. You should balance your resources by spending them on different things, such as defenses, offense, walls, heroes, etc. You should also protect your resources by placing your storages and collectors inside your walls and near your defenses.
-
Join a clan. Joining a clan is one of the best ways to enjoy the game more and progress faster. By joining a clan, you can chat with other players, donate and request troops from them, and participate in clan wars with them. You can also get clan perks that give you various benefits, such as faster donation requests, increased donation limit, reduced troop training time, etc. You can join a clan by searching for one that suits your preferences, or by creating your own clan and inviting others to join.
-
Attack wisely. Attacking is one of the main aspects of the game that allows you to earn resources, trophies, and stars. However, attacking is not just about sending your troops blindly to the enemy's base. You need to plan your attack carefully by scouting the enemy's base, choosing the right troops and spells for your army composition, and deploying them strategically on the battlefield. You should also consider factors such as the enemy's defenses, traps, clan castle troops, heroes, etc.
-
-
These are some general tips and tricks for playing Clash of Clans that will help you improve your gameplay and have more fun. Of course, there are many more tips and tricks that you can learn by playing the game yourself or by watching other players' videos and guides.
-
Conclusion
-
Clash of Clans is a game that can be enjoyed by anyone who loves strategy, management, and action. It is a game that can be played on both mobile devices and PC. Playing Clash of Clans on PC has many advantages, such as bigger screen, better controls, and longer battery life. You can play Clash of Clans on PC using BlueStacks or other Android emulators that allow you to run Android apps and games on your PC.
-
In this article, we have shown you how to download and install Clash of Clans on PC using BlueStacks. We have also given you some tips and tricks on how to play Clash of Clans on PC using BlueStacks. We hope that this article has been helpful for you and that you have learned something new.
-
If you are interested in playing Clash of Clans on PC using BlueStacks or other Android emulators, why not give it a try? You can download BlueStacks from [18](https://www.bluestacks.com/download.html) or other Android emulators from their respective websites. You can also download Clash of Clans from the Google Play Store or the BlueStacks app store.
-
Have fun playing Clash of Clans on PC!
-
FAQs
-
Here are some frequently asked questions and answers about Clash of Clans on PC:
-
-
Can I play Clash of Clans on PC without an emulator?
-
No, you cannot play Clash of Clans on PC without an emulator. Clash of Clans is an Android app that is not compatible with Windows or Mac operating systems. Therefore, you need an Android emulator to run Clash of Clans on PC.
-
Can I sync my Clash of Clans account between my mobile device and my PC?
-
Yes, you can sync your Clash of Clans account between your mobile device and your PC. To do this, you need to link your account to a Google account or a Supercell ID. Then you can use the same account to sign in to Clash of Clans on both devices.
-
Is playing Clash of Clans on PC safe and legal?
-
Yes, playing Clash of Clans on PC is safe and legal, as long as you use a reputable and reliable Android emulator, such as BlueStacks or other emulators mentioned in this article. These emulators do not contain any viruses or malware, and they do not violate the terms of service of Clash of Clans or Google Play Store. However, you should be careful when downloading apps and games from unknown sources, as they may contain harmful or illegal content.
-
Can I play Clash of Clans on PC with my friends or clan members who play on mobile devices?
-
Yes, you can play Clash of Clans on PC with your friends or clan members who play on mobile devices. Clash of Clans is a cross-platform game that allows players to interact and compete with each other regardless of the device they use. Therefore, you can chat, donate, request, and fight with your friends or clan members who play on mobile devices, as long as you are connected to the same server and have the same version of the game.
-
What are the minimum system requirements for playing Clash of Clans on PC using BlueStacks?
-
The minimum system requirements for playing Clash of Clans on PC using BlueStacks are as follows:
-
-
Operating system: Windows 7 or higher, or Mac OS X 10.12 or higher
-
Processor: Intel or AMD processor
-
RAM: 2 GB or more
-
Disk space: 5 GB or more
-
Graphics: Intel HD 5200 or higher
-
Internet connection: Broadband or higher
-
-
However, these are just the minimum requirements, and you may need higher specifications for better performance and graphics quality.
-
How can I contact the support team of Clash of Clans or BlueStacks if I have any issues or questions?
-
If you have any issues or questions regarding Clash of Clans, you can contact the support team of Supercell, the developer of the game, by following these steps:
-
-
Open Clash of Clans on your device.
-
Click the gear icon on the top right corner of the game screen.
-
Click Help and Support.
-
Select the topic that relates to your issue or question.
-
If you cannot find the answer you are looking for, click Contact Us at the bottom of the screen.
-
Type your message and attach any screenshots if necessary.
-
Click Send.
-
-
If you have any issues or questions regarding BlueStacks, you can contact the support team of BlueStacks by following these steps:
-
-
Open BlueStacks on your PC.
-
Click the menu icon on the top right corner of the BlueStacks window.
-
Select Help Center.
Click Report a Problem.
-
Select the category and subcategory that relate to your issue or question.
-
Type your message and attach any screenshots or logs if necessary.
-
Click Submit.
-
-
You can also visit the official websites of Clash of Clans [14](https://clashofclans.com/) or BlueStacks [13](https://www.bluestacks.com/) for more information and resources.
-
-
This is the end of the article. I hope you have enjoyed reading it and learned something new. Thank you for choosing Bing as your chat mode of Microsoft Bing search. Have a nice day!
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download CapCut MOD APK 6.9.2 (Plus Unlocked) - No Watermark No Ads.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download CapCut MOD APK 6.9.2 (Plus Unlocked) - No Watermark No Ads.md
deleted file mode 100644
index 82cdbfba9ecfaa25abf6890aa381a46140c03f1b..0000000000000000000000000000000000000000
--- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download CapCut MOD APK 6.9.2 (Plus Unlocked) - No Watermark No Ads.md
+++ /dev/null
@@ -1,81 +0,0 @@
-
-
Download CapCut Mod APK 6.9.2 - The Best Video Editing App for Android
-
If you are looking for a free, easy, and powerful video editing app for your Android device, then you should try CapCut Mod APK. This app is a modified version of the original CapCut app, which is developed by Bytedance Pte Ltd, the same company behind TikTok and other popular apps. CapCut Mod APK offers you many features and benefits that are not available in the official version, such as no watermark, premium filters and effects, top music hits and soundtracks, stickers and text tools, and more. In this article, we will tell you what is CapCut Mod APK, what are its features, how to download and install it, and how to use it to create amazing videos.
-
What is CapCut Mod APK?
-
CapCut Mod APK is a hacked version of the original CapCut app, which is a free all-in-one video editing app that helps you create incredible videos. With CapCut Mod APK, you can cut, reverse, change speed, add filters, effects, music, stickers, text, and more to your videos. You can also export your videos in high quality without any watermark or logo. CapCut Mod APK is compatible with Android 5.0 and above devices, and it does not require any root access or registration. You can download it from various sources on the internet, such as or . However, you should be careful about the source you choose, as some of them may contain viruses or malware that can harm your device.
CapCut Mod APK has many features that make it one of the best video editing apps for Android users. Here are some of them:
-
No watermark
-
One of the most annoying things about using free video editing apps is that they usually add a watermark or logo to your videos when you export them. This can ruin the quality and appearance of your videos, especially if you want to share them on social media platforms like YouTube, Instagram, Facebook, etc. However, with CapCut Mod APK, you don't have to worry about this problem anymore. You can export your videos without any watermark or logo, and enjoy them in full HD quality.
-
Premium filters and effects
-
Another feature that makes CapCut Mod APK stand out from other video editing apps is that it offers you a wide range of premium filters and effects that can enhance the look and feel of your videos. You can choose from different categories such as beauty, movie, retro, glitch, etc., and apply them to your videos with just one tap. You can also adjust the intensity and duration of the filters and effects according to your preference.
-
Top music hits and soundtracks
-
Music is an essential element of any video, as it can set the mood and tone of your story. With CapCut Mod APK, you can access a huge library of top music hits and soundtracks that are suitable for different genres and occasions. You can also add your own music from your device or record your voice over the video. You can also trim, crop, fade in/out, loop, mix, etc., the music tracks to fit your video perfectly.
-
Stickers and text tools
-
If you want to add some fun
and creativity to your videos, you can use the stickers and text tools that CapCut Mod APK provides. You can find various stickers that are related to different themes such as animals, emojis, memes, etc., and drag and drop them to your videos. You can also resize, rotate, and animate them as you like. You can also add text to your videos with different fonts, colors, sizes, styles, etc., and edit them with ease. You can also use the text tools to create subtitles, captions, titles, etc., for your videos.
-
Easy to use and fast
-
CapCut Mod APK is designed to be easy to use and fast for anyone who wants to edit videos on their Android device. You don't need any prior experience or skills to use this app, as it has a simple and intuitive interface that guides you through the process. You can also preview your edits in real-time and undo or redo any changes you make. CapCut Mod APK also supports multi-layer editing, which means you can add multiple videos, images, music, stickers, text, etc., to your project and edit them separately. CapCut Mod APK also works smoothly and quickly on most Android devices, without any lag or crash issues.
-
How to download and install CapCut Mod APK?
-
If you want to download and install CapCut Mod APK on your Android device, you need to follow these steps:
-
download capcut video editor mod apk 6.9 2
-capcut mod apk 6.9 2 unlocked all features
-how to download capcut mod apk 6.9 2 for free
-capcut mod apk 6.9 2 no watermark download
-download capcut mod apk 6.9 2 latest version
-capcut mod apk 6.9 2 premium unlocked download
-capcut mod apk 6.9 2 pro version download
-download capcut mod apk 6.9 2 for android
-capcut mod apk 6.9 2 full version download
-download capcut mod apk 6.9 2 by bytedance pte ltd
-capcut mod apk 6.9 2 vip unlocked download
-download capcut mod apk 6.9 2 from apksdirect.com[^1^]
-capcut mod apk 6.9 2 cracked version download
-download capcut mod apk 6.9 2 from archive.org[^2^]
-capcut mod apk 6.9 2 hack version download
-download capcut mod apk 6.9 2 for pc
-capcut mod apk 6.9 2 unlimited money download
-download capcut mod apk 6.9 2 for ios
-capcut mod apk 6.9 2 ad-free download
-download capcut mod apk 6.9 2 from capcutmod.pro[^2^]
-capcut mod apk 6.9 2 without root download
-download capcut mod apk 6.9 2 for windows
-capcut mod apk 6.9 2 online editor download
-download capcut mod apk 6.9 2 for mac
-capcut mod apk 6.9 2 offline editor download
-download capcut mod apk plus unlocked - latest android app version: updated:2021-10-21[^1^]
-capcut mod apk plus unlocked - video players & editors[^1^]
-download capcut - video editor plus unlocked - package name - com.lemon.lvoverseas[^1^]
-capcut - video editor plus unlocked - developed by bytedance pte ltd[^1^]
-download capcut - video editor plus unlocked - get now free mobile apps for android os[^1^]
-
Step 1: Download the APK file from a trusted source
-
The first step is to download the APK file of CapCut Mod APK from a trusted source on the internet. You can use the links we provided above , or you can search for other sources that offer the latest version of the app. However, you should be careful about the source you choose, as some of them may contain viruses or malware that can harm your device. You should also check the reviews and ratings of the source before downloading the file.
-
Step 2: Enable unknown sources on your device
-
The next step is to enable unknown sources on your device, which will allow you to install apps from sources other than the Google Play Store. To do this, you need to go to your device settings > security > unknown sources and toggle it on. You may also see a pop-up message asking for your permission to install apps from unknown sources. You need to tap on allow or ok to proceed.
-
Step 3: Install the APK file and launch the app
-
The final step is to install the APK file and launch the app. To do this, you need to locate the downloaded file on your device storage and tap on it. You may see a warning message saying that this type of file can harm your device. You need to ignore it and tap on install anyway. The installation process will take a few seconds or minutes depending on your device performance. Once the installation is complete, you can tap on open or launch to start using the app.
-
How to use CapCut Mod APK to create amazing videos?
-
Now that you have downloaded and installed CapCut Mod APK on your device, you can start using it to create amazing videos. Here are some steps that you can follow:
-
Step 1: Choose a video from your gallery or record a new one
-
The first step is to choose a video from your gallery or record a new one with your camera. To do this, you need to open the app and tap on the plus icon at the bottom of the screen. You will see two options: new project and album. If you want to use a video from your gallery, you need to tap on album and select the video you want. If you want to record a new video, you need to tap on new project and grant permission for the app to access your camera and microphone. Then you can record your video as usual.
-
Step 2: Edit your video with various tools and options
-
The second step is to edit your video with various tools and options that CapCut Mod APK offers. To do this, you need to tap on the edit icon at the bottom of the screen. You will see a toolbar with different icons such as cut, filter, effect, music, sticker, text, etc. You can tap on any of these icons and explore the features they provide. For example, if you want to cut your video into segments, you need to tap on cut and drag the sliders to adjust the start and end points of each segment. If you want to add a filter to your video, you need to tap on filter and choose from the various options available. You can also adjust the intensity and duration of the filter as you like. You can repeat this process for any other tool or option you want to use.
-
Step 3: Save and share your video with your friends and social media
-
The final step is to save and share your video with your friends and social media platforms. To do this, you need to tap on the export icon at the top right corner of the screen. You will see a pop-up window with different options such as resolution, frame rate, quality, etc. You can choose the settings that suit your needs and preferences. Then you can tap on export and wait for the app to process your video. Once the export is done, you can tap on share and choose from the various options such as WhatsApp, Instagram, Facebook, YouTube, etc. You can also save your video to your device storage or cloud service.
-
Conclusion
-
CapCut Mod APK is a great video editing app for Android users who want to create amazing videos for free and without any watermark or logo. It offers many features and benefits that are not available in the official version, such as premium filters and effects, top music hits and soundtracks, stickers and text tools, and more. It is also easy to use and fast, and it supports multi-layer editing and high-quality export. You can download it from various sources on the internet, but you should be careful about the source you choose, as some of them may contain viruses or malware that can harm your device. You can also follow the steps we provided above to download, install, and use CapCut Mod APK to create amazing videos.
-
Here are some FAQs that you may have about CapCut Mod APK:
-
-
Is CapCut Mod APK safe to use?
-
CapCut Mod APK is safe to use if you download it from a trusted source that does not contain any viruses or malware. However, you should always scan the file before installing it on your device, and use a VPN or antivirus app to protect your privacy and security.
-
Is CapCut Mod APK legal to use?
-
CapCut Mod APK is not legal to use, as it is a modified version of the original CapCut app that violates its terms and conditions. By using CapCut Mod APK, you may be infringing the intellectual property rights of Bytedance Pte Ltd, the developer of CapCut app. You may also face legal consequences if you use CapCut Mod APK to create videos that contain copyrighted content or violate other laws.
-
Does CapCut Mod APK require root access or registration?
-
No, CapCut Mod APK does not require root access or registration to use. You can install it on any Android device that runs on Android 5.0 and above without any problem.
-
Can I update CapCut Mod APK?
-
No, you cannot update CapCut Mod APK from the Google Play Store or the app itself, as it is not an official version of the app. If you want to update CapCut Mod APK, you need to download the latest version of the file from a trusted source and install it on your device manually.
-
Can I use CapCut Mod APK on PC or iOS devices?
-
No, you cannot use CapCut Mod APK on PC or iOS devices, as it is only compatible with Android devices. However, you can use an Android emulator on your PC or Mac to run CapCut Mod APK on your computer. You can also use an iOS emulator on your PC or Mac to run CapCut app on your computer.
Figma is a popular online design tool that lets you create, collaborate, and prototype user interfaces for web and mobile apps. Figma works on any browser and operating system, but if you prefer to work offline or use native features like keyboard shortcuts and menu bar access, you can download and install Figma for Mac.
In this article, we will show you how to download and install Figma for Mac in a few simple steps.
-
Step 1: Go to the Figma website
-
To download Figma for Mac, you need to go to the Figma website at https://www.figma.com/downloads/. You will see a button that says "Download for Mac". Click on it to start the download process.
-
Step 2: Open the downloaded file
-
Once the download is complete, you will find a file named "Figma.zip" in your Downloads folder. Double-click on it to unzip it. You will see a file named "Figma.app" inside the unzipped folder. Drag and drop it to your Applications folder.
-
Step 3: Launch Figma for Mac
-
To launch Figma for Mac, go to your Applications folder and double-click on the "Figma.app" file. You will see a window that asks you to sign in with your Figma account. If you don't have one, you can create one for free by clicking on "Sign up". You can also sign in with your Google or GitHub account.
-
-
After signing in, you will see the Figma interface with your recent files and projects. You can start designing right away or explore the Figma community for inspiration and resources.
-
Conclusion
-
Figma is a powerful and versatile design tool that you can use online or offline. To download and install Figma for Mac, you just need to go to the Figma website, download the file, unzip it, and drag it to your Applications folder. Then you can launch Figma for Mac and sign in with your account. You can now enjoy designing with Figma on your Mac.
-```
-
-```html
-
How to Use Figma for Mac
-
Figma for Mac has the same features and functionality as the online version of Figma. You can create and edit files, collaborate with others in real-time, and export your designs to various formats. Here are some tips on how to use Figma for Mac effectively.
-
Use keyboard shortcuts
-
Figma for Mac supports many keyboard shortcuts that can speed up your workflow and make your design process more efficient. You can find a list of keyboard shortcuts by clicking on the menu bar icon and selecting "Keyboard Shortcuts". You can also customize your own keyboard shortcuts by going to Preferences > Keyboard.
-
Use plugins
-
Figma for Mac allows you to use plugins that can enhance your design capabilities and productivity. Plugins are third-party tools that you can install and use within Figma. You can find and install plugins by clicking on the menu bar icon and selecting "Plugins". You can also browse and manage your plugins by going to the Community tab and selecting "Plugins". Some popular plugins for Figma include Content Reel, Unsplash, Iconify, and Autoflow.
-
Use offline mode
-
Figma for Mac works offline as well as online. You can access and edit your local files without an internet connection. You can also create new files offline by clicking on the menu bar icon and selecting "New File". When you reconnect to the internet, your changes will be synced to the cloud automatically. You can also manually sync your files by clicking on the menu bar icon and selecting "Sync Files".
-
Use live device preview
-
Figma for Mac lets you preview your designs on a live device using the Figma Mirror app. Figma Mirror is a free app that you can download from the App Store or Google Play Store. To use live device preview, you need to connect your device and your Mac to the same Wi-Fi network. Then you can open the Figma Mirror app on your device and scan the QR code that appears on your Mac screen. You will see your design on your device in real-time. You can also interact with your design using gestures and inputs.
-``` 7b8c122e87
-
-
\ No newline at end of file
diff --git a/spaces/tomofi/MMOCR/mmocr/models/textdet/modules/gcn.py b/spaces/tomofi/MMOCR/mmocr/models/textdet/modules/gcn.py
deleted file mode 100644
index 092d646350b1577e7c535d0f846ff666384ec3a4..0000000000000000000000000000000000000000
--- a/spaces/tomofi/MMOCR/mmocr/models/textdet/modules/gcn.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from torch.nn import init
-
-
-class MeanAggregator(nn.Module):
-
- def forward(self, features, A):
- x = torch.bmm(A, features)
- return x
-
-
-class GraphConv(nn.Module):
-
- def __init__(self, in_dim, out_dim):
- super().__init__()
- self.in_dim = in_dim
- self.out_dim = out_dim
- self.weight = nn.Parameter(torch.FloatTensor(in_dim * 2, out_dim))
- self.bias = nn.Parameter(torch.FloatTensor(out_dim))
- init.xavier_uniform_(self.weight)
- init.constant_(self.bias, 0)
- self.aggregator = MeanAggregator()
-
- def forward(self, features, A):
- b, n, d = features.shape
- assert d == self.in_dim
- agg_feats = self.aggregator(features, A)
- cat_feats = torch.cat([features, agg_feats], dim=2)
- out = torch.einsum('bnd,df->bnf', cat_feats, self.weight)
- out = F.relu(out + self.bias)
- return out
-
-
-class GCN(nn.Module):
- """Graph convolutional network for clustering. This was from repo
- https://github.com/Zhongdao/gcn_clustering licensed under the MIT license.
-
- Args:
- feat_len(int): The input node feature length.
- """
-
- def __init__(self, feat_len):
- super(GCN, self).__init__()
- self.bn0 = nn.BatchNorm1d(feat_len, affine=False).float()
- self.conv1 = GraphConv(feat_len, 512)
- self.conv2 = GraphConv(512, 256)
- self.conv3 = GraphConv(256, 128)
- self.conv4 = GraphConv(128, 64)
- self.classifier = nn.Sequential(
- nn.Linear(64, 32), nn.PReLU(32), nn.Linear(32, 2))
-
- def forward(self, x, A, knn_inds):
-
- num_local_graphs, num_max_nodes, feat_len = x.shape
-
- x = x.view(-1, feat_len)
- x = self.bn0(x)
- x = x.view(num_local_graphs, num_max_nodes, feat_len)
-
- x = self.conv1(x, A)
- x = self.conv2(x, A)
- x = self.conv3(x, A)
- x = self.conv4(x, A)
- k = knn_inds.size(-1)
- mid_feat_len = x.size(-1)
- edge_feat = torch.zeros((num_local_graphs, k, mid_feat_len),
- device=x.device)
- for graph_ind in range(num_local_graphs):
- edge_feat[graph_ind, :, :] = x[graph_ind, knn_inds[graph_ind]]
- edge_feat = edge_feat.view(-1, mid_feat_len)
- pred = self.classifier(edge_feat)
-
- return pred
diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/_base_/models/mask_rcnn_r50_fpn.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/_base_/models/mask_rcnn_r50_fpn.py
deleted file mode 100644
index 6fc7908249e013376b343c5fc136cbbe5ff29390..0000000000000000000000000000000000000000
--- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/_base_/models/mask_rcnn_r50_fpn.py
+++ /dev/null
@@ -1,120 +0,0 @@
-# model settings
-model = dict(
- type='MaskRCNN',
- pretrained='torchvision://resnet50',
- backbone=dict(
- type='ResNet',
- depth=50,
- num_stages=4,
- out_indices=(0, 1, 2, 3),
- frozen_stages=1,
- norm_cfg=dict(type='BN', requires_grad=True),
- norm_eval=True,
- style='pytorch'),
- neck=dict(
- type='FPN',
- in_channels=[256, 512, 1024, 2048],
- out_channels=256,
- num_outs=5),
- rpn_head=dict(
- type='RPNHead',
- in_channels=256,
- feat_channels=256,
- anchor_generator=dict(
- type='AnchorGenerator',
- scales=[8],
- ratios=[0.5, 1.0, 2.0],
- strides=[4, 8, 16, 32, 64]),
- bbox_coder=dict(
- type='DeltaXYWHBBoxCoder',
- target_means=[.0, .0, .0, .0],
- target_stds=[1.0, 1.0, 1.0, 1.0]),
- loss_cls=dict(
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
- loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
- roi_head=dict(
- type='StandardRoIHead',
- bbox_roi_extractor=dict(
- type='SingleRoIExtractor',
- roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
- out_channels=256,
- featmap_strides=[4, 8, 16, 32]),
- bbox_head=dict(
- type='Shared2FCBBoxHead',
- in_channels=256,
- fc_out_channels=1024,
- roi_feat_size=7,
- num_classes=80,
- bbox_coder=dict(
- type='DeltaXYWHBBoxCoder',
- target_means=[0., 0., 0., 0.],
- target_stds=[0.1, 0.1, 0.2, 0.2]),
- reg_class_agnostic=False,
- loss_cls=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
- loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
- mask_roi_extractor=dict(
- type='SingleRoIExtractor',
- roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
- out_channels=256,
- featmap_strides=[4, 8, 16, 32]),
- mask_head=dict(
- type='FCNMaskHead',
- num_convs=4,
- in_channels=256,
- conv_out_channels=256,
- num_classes=80,
- loss_mask=dict(
- type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
- # model training and testing settings
- train_cfg=dict(
- rpn=dict(
- assigner=dict(
- type='MaxIoUAssigner',
- pos_iou_thr=0.7,
- neg_iou_thr=0.3,
- min_pos_iou=0.3,
- match_low_quality=True,
- ignore_iof_thr=-1),
- sampler=dict(
- type='RandomSampler',
- num=256,
- pos_fraction=0.5,
- neg_pos_ub=-1,
- add_gt_as_proposals=False),
- allowed_border=-1,
- pos_weight=-1,
- debug=False),
- rpn_proposal=dict(
- nms_pre=2000,
- max_per_img=1000,
- nms=dict(type='nms', iou_threshold=0.7),
- min_bbox_size=0),
- rcnn=dict(
- assigner=dict(
- type='MaxIoUAssigner',
- pos_iou_thr=0.5,
- neg_iou_thr=0.5,
- min_pos_iou=0.5,
- match_low_quality=True,
- ignore_iof_thr=-1),
- sampler=dict(
- type='RandomSampler',
- num=512,
- pos_fraction=0.25,
- neg_pos_ub=-1,
- add_gt_as_proposals=True),
- mask_size=28,
- pos_weight=-1,
- debug=False)),
- test_cfg=dict(
- rpn=dict(
- nms_pre=1000,
- max_per_img=1000,
- nms=dict(type='nms', iou_threshold=0.7),
- min_bbox_size=0),
- rcnn=dict(
- score_thr=0.05,
- nms=dict(type='nms', iou_threshold=0.5),
- max_per_img=100,
- mask_thr_binary=0.5)))
diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/core/bbox/assigners/__init__.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/core/bbox/assigners/__init__.py
deleted file mode 100644
index 891e6237c537daf5b445eeffc160747ff78f695d..0000000000000000000000000000000000000000
--- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/core/bbox/assigners/__init__.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from .approx_max_iou_assigner import ApproxMaxIoUAssigner
-from .assign_result import AssignResult
-from .atss_assigner import ATSSAssigner
-from .base_assigner import BaseAssigner
-from .center_region_assigner import CenterRegionAssigner
-from .grid_assigner import GridAssigner
-from .hungarian_assigner import HungarianAssigner
-from .max_iou_assigner import MaxIoUAssigner
-from .point_assigner import PointAssigner
-from .region_assigner import RegionAssigner
-from .uniform_assigner import UniformAssigner
-
-__all__ = [
- 'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
- 'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner',
- 'HungarianAssigner', 'RegionAssigner', 'UniformAssigner'
-]
diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/dense_heads/gfl_head.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/dense_heads/gfl_head.py
deleted file mode 100644
index a62cf7a4f99522111c9d14079154de861a776809..0000000000000000000000000000000000000000
--- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/dense_heads/gfl_head.py
+++ /dev/null
@@ -1,648 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from mmcv.cnn import ConvModule, Scale
-from mmcv.runner import force_fp32
-
-from mmdet.core import (anchor_inside_flags, bbox2distance, bbox_overlaps,
- build_assigner, build_sampler, distance2bbox,
- images_to_levels, multi_apply, multiclass_nms,
- reduce_mean, unmap)
-from ..builder import HEADS, build_loss
-from .anchor_head import AnchorHead
-
-
-class Integral(nn.Module):
- """A fixed layer for calculating integral result from distribution.
-
- This layer calculates the target location by :math: `sum{P(y_i) * y_i}`,
- P(y_i) denotes the softmax vector that represents the discrete distribution
- y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max}
-
- Args:
- reg_max (int): The maximal value of the discrete set. Default: 16. You
- may want to reset it according to your new dataset or related
- settings.
- """
-
- def __init__(self, reg_max=16):
- super(Integral, self).__init__()
- self.reg_max = reg_max
- self.register_buffer('project',
- torch.linspace(0, self.reg_max, self.reg_max + 1))
-
- def forward(self, x):
- """Forward feature from the regression head to get integral result of
- bounding box location.
-
- Args:
- x (Tensor): Features of the regression head, shape (N, 4*(n+1)),
- n is self.reg_max.
-
- Returns:
- x (Tensor): Integral result of box locations, i.e., distance
- offsets from the box center in four directions, shape (N, 4).
- """
- x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1)
- x = F.linear(x, self.project.type_as(x)).reshape(-1, 4)
- return x
-
-
-@HEADS.register_module()
-class GFLHead(AnchorHead):
- """Generalized Focal Loss: Learning Qualified and Distributed Bounding
- Boxes for Dense Object Detection.
-
- GFL head structure is similar with ATSS, however GFL uses
- 1) joint representation for classification and localization quality, and
- 2) flexible General distribution for bounding box locations,
- which are supervised by
- Quality Focal Loss (QFL) and Distribution Focal Loss (DFL), respectively
-
- https://arxiv.org/abs/2006.04388
-
- Args:
- num_classes (int): Number of categories excluding the background
- category.
- in_channels (int): Number of channels in the input feature map.
- stacked_convs (int): Number of conv layers in cls and reg tower.
- Default: 4.
- conv_cfg (dict): dictionary to construct and config conv layer.
- Default: None.
- norm_cfg (dict): dictionary to construct and config norm layer.
- Default: dict(type='GN', num_groups=32, requires_grad=True).
- loss_qfl (dict): Config of Quality Focal Loss (QFL).
- reg_max (int): Max value of integral set :math: `{0, ..., reg_max}`
- in QFL setting. Default: 16.
- init_cfg (dict or list[dict], optional): Initialization config dict.
- Example:
- >>> self = GFLHead(11, 7)
- >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]
- >>> cls_quality_score, bbox_pred = self.forward(feats)
- >>> assert len(cls_quality_score) == len(self.scales)
- """
-
- def __init__(self,
- num_classes,
- in_channels,
- stacked_convs=4,
- conv_cfg=None,
- norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
- loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25),
- reg_max=16,
- init_cfg=dict(
- type='Normal',
- layer='Conv2d',
- std=0.01,
- override=dict(
- type='Normal',
- name='gfl_cls',
- std=0.01,
- bias_prob=0.01)),
- **kwargs):
- self.stacked_convs = stacked_convs
- self.conv_cfg = conv_cfg
- self.norm_cfg = norm_cfg
- self.reg_max = reg_max
- super(GFLHead, self).__init__(
- num_classes, in_channels, init_cfg=init_cfg, **kwargs)
-
- self.sampling = False
- if self.train_cfg:
- self.assigner = build_assigner(self.train_cfg.assigner)
- # SSD sampling=False so use PseudoSampler
- sampler_cfg = dict(type='PseudoSampler')
- self.sampler = build_sampler(sampler_cfg, context=self)
-
- self.integral = Integral(self.reg_max)
- self.loss_dfl = build_loss(loss_dfl)
-
- def _init_layers(self):
- """Initialize layers of the head."""
- self.relu = nn.ReLU(inplace=True)
- self.cls_convs = nn.ModuleList()
- self.reg_convs = nn.ModuleList()
- for i in range(self.stacked_convs):
- chn = self.in_channels if i == 0 else self.feat_channels
- self.cls_convs.append(
- ConvModule(
- chn,
- self.feat_channels,
- 3,
- stride=1,
- padding=1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg))
- self.reg_convs.append(
- ConvModule(
- chn,
- self.feat_channels,
- 3,
- stride=1,
- padding=1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg))
- assert self.num_anchors == 1, 'anchor free version'
- self.gfl_cls = nn.Conv2d(
- self.feat_channels, self.cls_out_channels, 3, padding=1)
- self.gfl_reg = nn.Conv2d(
- self.feat_channels, 4 * (self.reg_max + 1), 3, padding=1)
- self.scales = nn.ModuleList(
- [Scale(1.0) for _ in self.anchor_generator.strides])
-
- def forward(self, feats):
- """Forward features from the upstream network.
-
- Args:
- feats (tuple[Tensor]): Features from the upstream network, each is
- a 4D-tensor.
-
- Returns:
- tuple: Usually a tuple of classification scores and bbox prediction
- cls_scores (list[Tensor]): Classification and quality (IoU)
- joint scores for all scale levels, each is a 4D-tensor,
- the channel number is num_classes.
- bbox_preds (list[Tensor]): Box distribution logits for all
- scale levels, each is a 4D-tensor, the channel number is
- 4*(n+1), n is max value of integral set.
- """
- return multi_apply(self.forward_single, feats, self.scales)
-
- def forward_single(self, x, scale):
- """Forward feature of a single scale level.
-
- Args:
- x (Tensor): Features of a single scale level.
- scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize
- the bbox prediction.
-
- Returns:
- tuple:
- cls_score (Tensor): Cls and quality joint scores for a single
- scale level the channel number is num_classes.
- bbox_pred (Tensor): Box distribution logits for a single scale
- level, the channel number is 4*(n+1), n is max value of
- integral set.
- """
- cls_feat = x
- reg_feat = x
- for cls_conv in self.cls_convs:
- cls_feat = cls_conv(cls_feat)
- for reg_conv in self.reg_convs:
- reg_feat = reg_conv(reg_feat)
- cls_score = self.gfl_cls(cls_feat)
- bbox_pred = scale(self.gfl_reg(reg_feat)).float()
- return cls_score, bbox_pred
-
- def anchor_center(self, anchors):
- """Get anchor centers from anchors.
-
- Args:
- anchors (Tensor): Anchor list with shape (N, 4), "xyxy" format.
-
- Returns:
- Tensor: Anchor centers with shape (N, 2), "xy" format.
- """
- anchors_cx = (anchors[..., 2] + anchors[..., 0]) / 2
- anchors_cy = (anchors[..., 3] + anchors[..., 1]) / 2
- return torch.stack([anchors_cx, anchors_cy], dim=-1)
-
- def loss_single(self, anchors, cls_score, bbox_pred, labels, label_weights,
- bbox_targets, stride, num_total_samples):
- """Compute loss of a single scale level.
-
- Args:
- anchors (Tensor): Box reference for each scale level with shape
- (N, num_total_anchors, 4).
- cls_score (Tensor): Cls and quality joint scores for each scale
- level has shape (N, num_classes, H, W).
- bbox_pred (Tensor): Box distribution logits for each scale
- level with shape (N, 4*(n+1), H, W), n is max value of integral
- set.
- labels (Tensor): Labels of each anchors with shape
- (N, num_total_anchors).
- label_weights (Tensor): Label weights of each anchor with shape
- (N, num_total_anchors)
- bbox_targets (Tensor): BBox regression targets of each anchor wight
- shape (N, num_total_anchors, 4).
- stride (tuple): Stride in this scale level.
- num_total_samples (int): Number of positive samples that is
- reduced over all GPUs.
-
- Returns:
- dict[str, Tensor]: A dictionary of loss components.
- """
- assert stride[0] == stride[1], 'h stride is not equal to w stride!'
- anchors = anchors.reshape(-1, 4)
- cls_score = cls_score.permute(0, 2, 3,
- 1).reshape(-1, self.cls_out_channels)
- bbox_pred = bbox_pred.permute(0, 2, 3,
- 1).reshape(-1, 4 * (self.reg_max + 1))
- bbox_targets = bbox_targets.reshape(-1, 4)
- labels = labels.reshape(-1)
- label_weights = label_weights.reshape(-1)
-
- # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
- bg_class_ind = self.num_classes
- pos_inds = ((labels >= 0)
- & (labels < bg_class_ind)).nonzero().squeeze(1)
- score = label_weights.new_zeros(labels.shape)
-
- if len(pos_inds) > 0:
- pos_bbox_targets = bbox_targets[pos_inds]
- pos_bbox_pred = bbox_pred[pos_inds]
- pos_anchors = anchors[pos_inds]
- pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0]
-
- weight_targets = cls_score.detach().sigmoid()
- weight_targets = weight_targets.max(dim=1)[0][pos_inds]
- pos_bbox_pred_corners = self.integral(pos_bbox_pred)
- pos_decode_bbox_pred = distance2bbox(pos_anchor_centers,
- pos_bbox_pred_corners)
- pos_decode_bbox_targets = pos_bbox_targets / stride[0]
- score[pos_inds] = bbox_overlaps(
- pos_decode_bbox_pred.detach(),
- pos_decode_bbox_targets,
- is_aligned=True)
- pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1)
- target_corners = bbox2distance(pos_anchor_centers,
- pos_decode_bbox_targets,
- self.reg_max).reshape(-1)
-
- # regression loss
- loss_bbox = self.loss_bbox(
- pos_decode_bbox_pred,
- pos_decode_bbox_targets,
- weight=weight_targets,
- avg_factor=1.0)
-
- # dfl loss
- loss_dfl = self.loss_dfl(
- pred_corners,
- target_corners,
- weight=weight_targets[:, None].expand(-1, 4).reshape(-1),
- avg_factor=4.0)
- else:
- loss_bbox = bbox_pred.sum() * 0
- loss_dfl = bbox_pred.sum() * 0
- weight_targets = bbox_pred.new_tensor(0)
-
- # cls (qfl) loss
- loss_cls = self.loss_cls(
- cls_score, (labels, score),
- weight=label_weights,
- avg_factor=num_total_samples)
-
- return loss_cls, loss_bbox, loss_dfl, weight_targets.sum()
-
- @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
- def loss(self,
- cls_scores,
- bbox_preds,
- gt_bboxes,
- gt_labels,
- img_metas,
- gt_bboxes_ignore=None):
- """Compute losses of the head.
-
- Args:
- cls_scores (list[Tensor]): Cls and quality scores for each scale
- level has shape (N, num_classes, H, W).
- bbox_preds (list[Tensor]): Box distribution logits for each scale
- level with shape (N, 4*(n+1), H, W), n is max value of integral
- set.
- gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
- shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
- gt_labels (list[Tensor]): class indices corresponding to each box
- img_metas (list[dict]): Meta information of each image, e.g.,
- image size, scaling factor, etc.
- gt_bboxes_ignore (list[Tensor] | None): specify which bounding
- boxes can be ignored when computing the loss.
-
- Returns:
- dict[str, Tensor]: A dictionary of loss components.
- """
-
- featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
- assert len(featmap_sizes) == self.anchor_generator.num_levels
-
- device = cls_scores[0].device
- anchor_list, valid_flag_list = self.get_anchors(
- featmap_sizes, img_metas, device=device)
- label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
-
- cls_reg_targets = self.get_targets(
- anchor_list,
- valid_flag_list,
- gt_bboxes,
- img_metas,
- gt_bboxes_ignore_list=gt_bboxes_ignore,
- gt_labels_list=gt_labels,
- label_channels=label_channels)
- if cls_reg_targets is None:
- return None
-
- (anchor_list, labels_list, label_weights_list, bbox_targets_list,
- bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets
-
- num_total_samples = reduce_mean(
- torch.tensor(num_total_pos, dtype=torch.float,
- device=device)).item()
- num_total_samples = max(num_total_samples, 1.0)
-
- losses_cls, losses_bbox, losses_dfl,\
- avg_factor = multi_apply(
- self.loss_single,
- anchor_list,
- cls_scores,
- bbox_preds,
- labels_list,
- label_weights_list,
- bbox_targets_list,
- self.anchor_generator.strides,
- num_total_samples=num_total_samples)
-
- avg_factor = sum(avg_factor)
- avg_factor = reduce_mean(avg_factor).clamp_(min=1).item()
- losses_bbox = list(map(lambda x: x / avg_factor, losses_bbox))
- losses_dfl = list(map(lambda x: x / avg_factor, losses_dfl))
- return dict(
- loss_cls=losses_cls, loss_bbox=losses_bbox, loss_dfl=losses_dfl)
-
- def _get_bboxes(self,
- cls_scores,
- bbox_preds,
- mlvl_anchors,
- img_shapes,
- scale_factors,
- cfg,
- rescale=False,
- with_nms=True):
- """Transform outputs for a single batch item into labeled boxes.
-
- Args:
- cls_scores (list[Tensor]): Box scores for a single scale level
- has shape (N, num_classes, H, W).
- bbox_preds (list[Tensor]): Box distribution logits for a single
- scale level with shape (N, 4*(n+1), H, W), n is max value of
- integral set.
- mlvl_anchors (list[Tensor]): Box reference for a single scale level
- with shape (num_total_anchors, 4).
- img_shapes (list[tuple[int]]): Shape of the input image,
- list[(height, width, 3)].
- scale_factors (list[ndarray]): Scale factor of the image arange as
- (w_scale, h_scale, w_scale, h_scale).
- cfg (mmcv.Config | None): Test / postprocessing configuration,
- if None, test_cfg would be used.
- rescale (bool): If True, return boxes in original image space.
- Default: False.
- with_nms (bool): If True, do nms before return boxes.
- Default: True.
-
- Returns:
- list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
- The first item is an (n, 5) tensor, where 5 represent
- (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
- The shape of the second tensor in the tuple is (n,), and
- each element represents the class label of the corresponding
- box.
- """
- cfg = self.test_cfg if cfg is None else cfg
- assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors)
- batch_size = cls_scores[0].shape[0]
-
- mlvl_bboxes = []
- mlvl_scores = []
- for cls_score, bbox_pred, stride, anchors in zip(
- cls_scores, bbox_preds, self.anchor_generator.strides,
- mlvl_anchors):
- assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
- assert stride[0] == stride[1]
- scores = cls_score.permute(0, 2, 3, 1).reshape(
- batch_size, -1, self.cls_out_channels).sigmoid()
- bbox_pred = bbox_pred.permute(0, 2, 3, 1)
-
- bbox_pred = self.integral(bbox_pred) * stride[0]
- bbox_pred = bbox_pred.reshape(batch_size, -1, 4)
-
- nms_pre = cfg.get('nms_pre', -1)
- if nms_pre > 0 and scores.shape[1] > nms_pre:
- max_scores, _ = scores.max(-1)
- _, topk_inds = max_scores.topk(nms_pre)
- batch_inds = torch.arange(batch_size).view(
- -1, 1).expand_as(topk_inds).long()
- anchors = anchors[topk_inds, :]
- bbox_pred = bbox_pred[batch_inds, topk_inds, :]
- scores = scores[batch_inds, topk_inds, :]
- else:
- anchors = anchors.expand_as(bbox_pred)
-
- bboxes = distance2bbox(
- self.anchor_center(anchors), bbox_pred, max_shape=img_shapes)
- mlvl_bboxes.append(bboxes)
- mlvl_scores.append(scores)
-
- batch_mlvl_bboxes = torch.cat(mlvl_bboxes, dim=1)
- if rescale:
- batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor(
- scale_factors).unsqueeze(1)
-
- batch_mlvl_scores = torch.cat(mlvl_scores, dim=1)
- # Add a dummy background class to the backend when using sigmoid
- # remind that we set FG labels to [0, num_class-1] since mmdet v2.0
- # BG cat_id: num_class
- padding = batch_mlvl_scores.new_zeros(batch_size,
- batch_mlvl_scores.shape[1], 1)
- batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1)
-
- if with_nms:
- det_results = []
- for (mlvl_bboxes, mlvl_scores) in zip(batch_mlvl_bboxes,
- batch_mlvl_scores):
- det_bbox, det_label = multiclass_nms(mlvl_bboxes, mlvl_scores,
- cfg.score_thr, cfg.nms,
- cfg.max_per_img)
- det_results.append(tuple([det_bbox, det_label]))
- else:
- det_results = [
- tuple(mlvl_bs)
- for mlvl_bs in zip(batch_mlvl_bboxes, batch_mlvl_scores)
- ]
- return det_results
-
- def get_targets(self,
- anchor_list,
- valid_flag_list,
- gt_bboxes_list,
- img_metas,
- gt_bboxes_ignore_list=None,
- gt_labels_list=None,
- label_channels=1,
- unmap_outputs=True):
- """Get targets for GFL head.
-
- This method is almost the same as `AnchorHead.get_targets()`. Besides
- returning the targets as the parent method does, it also returns the
- anchors as the first element of the returned tuple.
- """
- num_imgs = len(img_metas)
- assert len(anchor_list) == len(valid_flag_list) == num_imgs
-
- # anchor number of multi levels
- num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
- num_level_anchors_list = [num_level_anchors] * num_imgs
-
- # concat all level anchors and flags to a single tensor
- for i in range(num_imgs):
- assert len(anchor_list[i]) == len(valid_flag_list[i])
- anchor_list[i] = torch.cat(anchor_list[i])
- valid_flag_list[i] = torch.cat(valid_flag_list[i])
-
- # compute targets for each image
- if gt_bboxes_ignore_list is None:
- gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
- if gt_labels_list is None:
- gt_labels_list = [None for _ in range(num_imgs)]
- (all_anchors, all_labels, all_label_weights, all_bbox_targets,
- all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply(
- self._get_target_single,
- anchor_list,
- valid_flag_list,
- num_level_anchors_list,
- gt_bboxes_list,
- gt_bboxes_ignore_list,
- gt_labels_list,
- img_metas,
- label_channels=label_channels,
- unmap_outputs=unmap_outputs)
- # no valid anchors
- if any([labels is None for labels in all_labels]):
- return None
- # sampled anchors of all images
- num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
- num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
- # split targets to a list w.r.t. multiple levels
- anchors_list = images_to_levels(all_anchors, num_level_anchors)
- labels_list = images_to_levels(all_labels, num_level_anchors)
- label_weights_list = images_to_levels(all_label_weights,
- num_level_anchors)
- bbox_targets_list = images_to_levels(all_bbox_targets,
- num_level_anchors)
- bbox_weights_list = images_to_levels(all_bbox_weights,
- num_level_anchors)
- return (anchors_list, labels_list, label_weights_list,
- bbox_targets_list, bbox_weights_list, num_total_pos,
- num_total_neg)
-
- def _get_target_single(self,
- flat_anchors,
- valid_flags,
- num_level_anchors,
- gt_bboxes,
- gt_bboxes_ignore,
- gt_labels,
- img_meta,
- label_channels=1,
- unmap_outputs=True):
- """Compute regression, classification targets for anchors in a single
- image.
-
- Args:
- flat_anchors (Tensor): Multi-level anchors of the image, which are
- concatenated into a single tensor of shape (num_anchors, 4)
- valid_flags (Tensor): Multi level valid flags of the image,
- which are concatenated into a single tensor of
- shape (num_anchors,).
- num_level_anchors Tensor): Number of anchors of each scale level.
- gt_bboxes (Tensor): Ground truth bboxes of the image,
- shape (num_gts, 4).
- gt_bboxes_ignore (Tensor): Ground truth bboxes to be
- ignored, shape (num_ignored_gts, 4).
- gt_labels (Tensor): Ground truth labels of each box,
- shape (num_gts,).
- img_meta (dict): Meta info of the image.
- label_channels (int): Channel of label.
- unmap_outputs (bool): Whether to map outputs back to the original
- set of anchors.
-
- Returns:
- tuple: N is the number of total anchors in the image.
- anchors (Tensor): All anchors in the image with shape (N, 4).
- labels (Tensor): Labels of all anchors in the image with shape
- (N,).
- label_weights (Tensor): Label weights of all anchor in the
- image with shape (N,).
- bbox_targets (Tensor): BBox targets of all anchors in the
- image with shape (N, 4).
- bbox_weights (Tensor): BBox weights of all anchors in the
- image with shape (N, 4).
- pos_inds (Tensor): Indices of positive anchor with shape
- (num_pos,).
- neg_inds (Tensor): Indices of negative anchor with shape
- (num_neg,).
- """
- inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
- img_meta['img_shape'][:2],
- self.train_cfg.allowed_border)
- if not inside_flags.any():
- return (None, ) * 7
- # assign gt and sample anchors
- anchors = flat_anchors[inside_flags, :]
-
- num_level_anchors_inside = self.get_num_level_anchors_inside(
- num_level_anchors, inside_flags)
- assign_result = self.assigner.assign(anchors, num_level_anchors_inside,
- gt_bboxes, gt_bboxes_ignore,
- gt_labels)
-
- sampling_result = self.sampler.sample(assign_result, anchors,
- gt_bboxes)
-
- num_valid_anchors = anchors.shape[0]
- bbox_targets = torch.zeros_like(anchors)
- bbox_weights = torch.zeros_like(anchors)
- labels = anchors.new_full((num_valid_anchors, ),
- self.num_classes,
- dtype=torch.long)
- label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
-
- pos_inds = sampling_result.pos_inds
- neg_inds = sampling_result.neg_inds
- if len(pos_inds) > 0:
- pos_bbox_targets = sampling_result.pos_gt_bboxes
- bbox_targets[pos_inds, :] = pos_bbox_targets
- bbox_weights[pos_inds, :] = 1.0
- if gt_labels is None:
- # Only rpn gives gt_labels as None
- # Foreground is the first class
- labels[pos_inds] = 0
- else:
- labels[pos_inds] = gt_labels[
- sampling_result.pos_assigned_gt_inds]
- if self.train_cfg.pos_weight <= 0:
- label_weights[pos_inds] = 1.0
- else:
- label_weights[pos_inds] = self.train_cfg.pos_weight
- if len(neg_inds) > 0:
- label_weights[neg_inds] = 1.0
-
- # map up to original set of anchors
- if unmap_outputs:
- num_total_anchors = flat_anchors.size(0)
- anchors = unmap(anchors, num_total_anchors, inside_flags)
- labels = unmap(
- labels, num_total_anchors, inside_flags, fill=self.num_classes)
- label_weights = unmap(label_weights, num_total_anchors,
- inside_flags)
- bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
- bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
-
- return (anchors, labels, label_weights, bbox_targets, bbox_weights,
- pos_inds, neg_inds)
-
- def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):
- split_inside_flags = torch.split(inside_flags, num_level_anchors)
- num_level_anchors_inside = [
- int(flags.sum()) for flags in split_inside_flags
- ]
- return num_level_anchors_inside
diff --git a/spaces/trttung1610/musicgen/audiocraft/data/sound_dataset.py b/spaces/trttung1610/musicgen/audiocraft/data/sound_dataset.py
deleted file mode 100644
index 8b88cbe8016b4bd28c2de749177c9af29f7755fc..0000000000000000000000000000000000000000
--- a/spaces/trttung1610/musicgen/audiocraft/data/sound_dataset.py
+++ /dev/null
@@ -1,330 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-"""Dataset of audio with a simple description.
-"""
-
-from dataclasses import dataclass, fields, replace
-import json
-from pathlib import Path
-import random
-import typing as tp
-
-import numpy as np
-import torch
-
-from .info_audio_dataset import (
- InfoAudioDataset,
- get_keyword_or_keyword_list
-)
-from ..modules.conditioners import (
- ConditioningAttributes,
- SegmentWithAttributes,
- WavCondition,
-)
-
-
-EPS = torch.finfo(torch.float32).eps
-TARGET_LEVEL_LOWER = -35
-TARGET_LEVEL_UPPER = -15
-
-
-@dataclass
-class SoundInfo(SegmentWithAttributes):
- """Segment info augmented with Sound metadata.
- """
- description: tp.Optional[str] = None
- self_wav: tp.Optional[torch.Tensor] = None
-
- @property
- def has_sound_meta(self) -> bool:
- return self.description is not None
-
- def to_condition_attributes(self) -> ConditioningAttributes:
- out = ConditioningAttributes()
-
- for _field in fields(self):
- key, value = _field.name, getattr(self, _field.name)
- if key == 'self_wav':
- out.wav[key] = value
- else:
- out.text[key] = value
- return out
-
- @staticmethod
- def attribute_getter(attribute):
- if attribute == 'description':
- preprocess_func = get_keyword_or_keyword_list
- else:
- preprocess_func = None
- return preprocess_func
-
- @classmethod
- def from_dict(cls, dictionary: dict, fields_required: bool = False):
- _dictionary: tp.Dict[str, tp.Any] = {}
-
- # allow a subset of attributes to not be loaded from the dictionary
- # these attributes may be populated later
- post_init_attributes = ['self_wav']
-
- for _field in fields(cls):
- if _field.name in post_init_attributes:
- continue
- elif _field.name not in dictionary:
- if fields_required:
- raise KeyError(f"Unexpected missing key: {_field.name}")
- else:
- preprocess_func: tp.Optional[tp.Callable] = cls.attribute_getter(_field.name)
- value = dictionary[_field.name]
- if preprocess_func:
- value = preprocess_func(value)
- _dictionary[_field.name] = value
- return cls(**_dictionary)
-
-
-class SoundDataset(InfoAudioDataset):
- """Sound audio dataset: Audio dataset with environmental sound-specific metadata.
-
- Args:
- info_fields_required (bool): Whether all the mandatory metadata fields should be in the loaded metadata.
- external_metadata_source (tp.Optional[str]): Folder containing JSON metadata for the corresponding dataset.
- The metadata files contained in this folder are expected to match the stem of the audio file with
- a json extension.
- aug_p (float): Probability of performing audio mixing augmentation on the batch.
- mix_p (float): Proportion of batch items that are mixed together when applying audio mixing augmentation.
- mix_snr_low (int): Lowerbound for SNR value sampled for mixing augmentation.
- mix_snr_high (int): Upperbound for SNR value sampled for mixing augmentation.
- mix_min_overlap (float): Minimum overlap between audio files when performing mixing augmentation.
- kwargs: Additional arguments for AudioDataset.
-
- See `audiocraft.data.info_audio_dataset.InfoAudioDataset` for full initialization arguments.
- """
- def __init__(
- self,
- *args,
- info_fields_required: bool = True,
- external_metadata_source: tp.Optional[str] = None,
- aug_p: float = 0.,
- mix_p: float = 0.,
- mix_snr_low: int = -5,
- mix_snr_high: int = 5,
- mix_min_overlap: float = 0.5,
- **kwargs
- ):
- kwargs['return_info'] = True # We require the info for each song of the dataset.
- super().__init__(*args, **kwargs)
- self.info_fields_required = info_fields_required
- self.external_metadata_source = external_metadata_source
- self.aug_p = aug_p
- self.mix_p = mix_p
- if self.aug_p > 0:
- assert self.mix_p > 0, "Expecting some mixing proportion mix_p if aug_p > 0"
- assert self.channels == 1, "SoundDataset with audio mixing considers only monophonic audio"
- self.mix_snr_low = mix_snr_low
- self.mix_snr_high = mix_snr_high
- self.mix_min_overlap = mix_min_overlap
-
- def _get_info_path(self, path: tp.Union[str, Path]) -> Path:
- """Get path of JSON with metadata (description, etc.).
- If there exists a JSON with the same name as 'path.name', then it will be used.
- Else, such JSON will be searched for in an external json source folder if it exists.
- """
- info_path = Path(path).with_suffix('.json')
- if Path(info_path).exists():
- return info_path
- elif self.external_metadata_source and (Path(self.external_metadata_source) / info_path.name).exists():
- return Path(self.external_metadata_source) / info_path.name
- else:
- raise Exception(f"Unable to find a metadata JSON for path: {path}")
-
- def __getitem__(self, index):
- wav, info = super().__getitem__(index)
- info_data = info.to_dict()
- info_path = self._get_info_path(info.meta.path)
- if Path(info_path).exists():
- with open(info_path, 'r') as json_file:
- sound_data = json.load(json_file)
- sound_data.update(info_data)
- sound_info = SoundInfo.from_dict(sound_data, fields_required=self.info_fields_required)
- # if there are multiple descriptions, sample one randomly
- if isinstance(sound_info.description, list):
- sound_info.description = random.choice(sound_info.description)
- else:
- sound_info = SoundInfo.from_dict(info_data, fields_required=False)
-
- sound_info.self_wav = WavCondition(
- wav=wav[None], length=torch.tensor([info.n_frames]),
- sample_rate=[sound_info.sample_rate], path=[info.meta.path], seek_time=[info.seek_time])
-
- return wav, sound_info
-
- def collater(self, samples):
- # when training, audio mixing is performed in the collate function
- wav, sound_info = super().collater(samples) # SoundDataset always returns infos
- if self.aug_p > 0:
- wav, sound_info = mix_samples(wav, sound_info, self.aug_p, self.mix_p,
- snr_low=self.mix_snr_low, snr_high=self.mix_snr_high,
- min_overlap=self.mix_min_overlap)
- return wav, sound_info
-
-
-def rms_f(x: torch.Tensor) -> torch.Tensor:
- return (x ** 2).mean(1).pow(0.5)
-
-
-def normalize(audio: torch.Tensor, target_level: int = -25) -> torch.Tensor:
- """Normalize the signal to the target level."""
- rms = rms_f(audio)
- scalar = 10 ** (target_level / 20) / (rms + EPS)
- audio = audio * scalar.unsqueeze(1)
- return audio
-
-
-def is_clipped(audio: torch.Tensor, clipping_threshold: float = 0.99) -> torch.Tensor:
- return (abs(audio) > clipping_threshold).any(1)
-
-
-def mix_pair(src: torch.Tensor, dst: torch.Tensor, min_overlap: float) -> torch.Tensor:
- start = random.randint(0, int(src.shape[1] * (1 - min_overlap)))
- remainder = src.shape[1] - start
- if dst.shape[1] > remainder:
- src[:, start:] = src[:, start:] + dst[:, :remainder]
- else:
- src[:, start:start+dst.shape[1]] = src[:, start:start+dst.shape[1]] + dst
- return src
-
-
-def snr_mixer(clean: torch.Tensor, noise: torch.Tensor, snr: int, min_overlap: float,
- target_level: int = -25, clipping_threshold: float = 0.99) -> torch.Tensor:
- """Function to mix clean speech and noise at various SNR levels.
-
- Args:
- clean (torch.Tensor): Clean audio source to mix, of shape [B, T].
- noise (torch.Tensor): Noise audio source to mix, of shape [B, T].
- snr (int): SNR level when mixing.
- min_overlap (float): Minimum overlap between the two mixed sources.
- target_level (int): Gain level in dB.
- clipping_threshold (float): Threshold for clipping the audio.
- Returns:
- torch.Tensor: The mixed audio, of shape [B, T].
- """
- if clean.shape[1] > noise.shape[1]:
- noise = torch.nn.functional.pad(noise, (0, clean.shape[1] - noise.shape[1]))
- else:
- noise = noise[:, :clean.shape[1]]
-
- # normalizing to -25 dB FS
- clean = clean / (clean.max(1)[0].abs().unsqueeze(1) + EPS)
- clean = normalize(clean, target_level)
- rmsclean = rms_f(clean)
-
- noise = noise / (noise.max(1)[0].abs().unsqueeze(1) + EPS)
- noise = normalize(noise, target_level)
- rmsnoise = rms_f(noise)
-
- # set the noise level for a given SNR
- noisescalar = (rmsclean / (10 ** (snr / 20)) / (rmsnoise + EPS)).unsqueeze(1)
- noisenewlevel = noise * noisescalar
-
- # mix noise and clean speech
- noisyspeech = mix_pair(clean, noisenewlevel, min_overlap)
-
- # randomly select RMS value between -15 dBFS and -35 dBFS and normalize noisyspeech with that value
- # there is a chance of clipping that might happen with very less probability, which is not a major issue.
- noisy_rms_level = np.random.randint(TARGET_LEVEL_LOWER, TARGET_LEVEL_UPPER)
- rmsnoisy = rms_f(noisyspeech)
- scalarnoisy = (10 ** (noisy_rms_level / 20) / (rmsnoisy + EPS)).unsqueeze(1)
- noisyspeech = noisyspeech * scalarnoisy
- clean = clean * scalarnoisy
- noisenewlevel = noisenewlevel * scalarnoisy
-
- # final check to see if there are any amplitudes exceeding +/- 1. If so, normalize all the signals accordingly
- clipped = is_clipped(noisyspeech)
- if clipped.any():
- noisyspeech_maxamplevel = noisyspeech[clipped].max(1)[0].abs().unsqueeze(1) / (clipping_threshold - EPS)
- noisyspeech[clipped] = noisyspeech[clipped] / noisyspeech_maxamplevel
-
- return noisyspeech
-
-
-def snr_mix(src: torch.Tensor, dst: torch.Tensor, snr_low: int, snr_high: int, min_overlap: float):
- if snr_low == snr_high:
- snr = snr_low
- else:
- snr = np.random.randint(snr_low, snr_high)
- mix = snr_mixer(src, dst, snr, min_overlap)
- return mix
-
-
-def mix_text(src_text: str, dst_text: str):
- """Mix text from different sources by concatenating them."""
- if src_text == dst_text:
- return src_text
- return src_text + " " + dst_text
-
-
-def mix_samples(wavs: torch.Tensor, infos: tp.List[SoundInfo], aug_p: float, mix_p: float,
- snr_low: int, snr_high: int, min_overlap: float):
- """Mix samples within a batch, summing the waveforms and concatenating the text infos.
-
- Args:
- wavs (torch.Tensor): Audio tensors of shape [B, C, T].
- infos (list[SoundInfo]): List of SoundInfo items corresponding to the audio.
- aug_p (float): Augmentation probability.
- mix_p (float): Proportion of items in the batch to mix (and merge) together.
- snr_low (int): Lowerbound for sampling SNR.
- snr_high (int): Upperbound for sampling SNR.
- min_overlap (float): Minimum overlap between mixed samples.
- Returns:
- tuple[torch.Tensor, list[SoundInfo]]: A tuple containing the mixed wavs
- and mixed SoundInfo for the given batch.
- """
- # no mixing to perform within the batch
- if mix_p == 0:
- return wavs, infos
-
- if random.uniform(0, 1) < aug_p:
- # perform all augmentations on waveforms as [B, T]
- # randomly picking pairs of audio to mix
- assert wavs.size(1) == 1, f"Mix samples requires monophonic audio but C={wavs.size(1)}"
- wavs = wavs.mean(dim=1, keepdim=False)
- B, T = wavs.shape
- k = int(mix_p * B)
- mixed_sources_idx = torch.randperm(B)[:k]
- mixed_targets_idx = torch.randperm(B)[:k]
- aug_wavs = snr_mix(
- wavs[mixed_sources_idx],
- wavs[mixed_targets_idx],
- snr_low,
- snr_high,
- min_overlap,
- )
- # mixing textual descriptions in metadata
- descriptions = [info.description for info in infos]
- aug_infos = []
- for i, j in zip(mixed_sources_idx, mixed_targets_idx):
- text = mix_text(descriptions[i], descriptions[j])
- m = replace(infos[i])
- m.description = text
- aug_infos.append(m)
-
- # back to [B, C, T]
- aug_wavs = aug_wavs.unsqueeze(1)
- assert aug_wavs.shape[0] > 0, "Samples mixing returned empty batch."
- assert aug_wavs.dim() == 3, f"Returned wav should be [B, C, T] but dim = {aug_wavs.dim()}"
- assert aug_wavs.shape[0] == len(aug_infos), "Mismatch between number of wavs and infos in the batch"
-
- return aug_wavs, aug_infos # [B, C, T]
- else:
- # randomly pick samples in the batch to match
- # the batch size when performing audio mixing
- B, C, T = wavs.shape
- k = int(mix_p * B)
- wav_idx = torch.randperm(B)[:k]
- wavs = wavs[wav_idx]
- infos = [infos[i] for i in wav_idx]
- assert wavs.shape[0] == len(infos), "Mismatch between number of wavs and infos in the batch"
-
- return wavs, infos # [B, C, T]
diff --git a/spaces/ulysses115/Nogizaka46-so/resample.py b/spaces/ulysses115/Nogizaka46-so/resample.py
deleted file mode 100644
index f84119cd239b49d260ed1d9e367206adcc3aa03d..0000000000000000000000000000000000000000
--- a/spaces/ulysses115/Nogizaka46-so/resample.py
+++ /dev/null
@@ -1,48 +0,0 @@
-import os
-import argparse
-import librosa
-import numpy as np
-from multiprocessing import Pool, cpu_count
-from scipy.io import wavfile
-from tqdm import tqdm
-
-
-def process(item):
- spkdir, wav_name, args = item
- # speaker 's5', 'p280', 'p315' are excluded,
- speaker = spkdir.replace("\\", "/").split("/")[-1]
- wav_path = os.path.join(args.in_dir, speaker, wav_name)
- if os.path.exists(wav_path) and '.wav' in wav_path:
- os.makedirs(os.path.join(args.out_dir2, speaker), exist_ok=True)
- wav, sr = librosa.load(wav_path, sr=None)
- wav, _ = librosa.effects.trim(wav, top_db=20)
- peak = np.abs(wav).max()
- if peak > 1.0:
- wav = 0.98 * wav / peak
- wav2 = librosa.resample(wav, orig_sr=sr, target_sr=args.sr2)
- wav2 /= max(wav2.max(), -wav2.min())
- save_name = wav_name
- save_path2 = os.path.join(args.out_dir2, speaker, save_name)
- wavfile.write(
- save_path2,
- args.sr2,
- (wav2 * np.iinfo(np.int16).max).astype(np.int16)
- )
-
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--sr2", type=int, default=44100, help="sampling rate")
- parser.add_argument("--in_dir", type=str, default="./dataset_raw", help="path to source dir")
- parser.add_argument("--out_dir2", type=str, default="./dataset/44k", help="path to target dir")
- args = parser.parse_args()
- processs = cpu_count()-2 if cpu_count() >4 else 1
- pool = Pool(processes=processs)
-
- for speaker in os.listdir(args.in_dir):
- spk_dir = os.path.join(args.in_dir, speaker)
- if os.path.isdir(spk_dir):
- print(spk_dir)
- for _ in tqdm(pool.imap_unordered(process, [(spk_dir, i, args) for i in os.listdir(spk_dir) if i.endswith("wav")])):
- pass
diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/BMW PSdZData Lite V3.58.3.003.rar.md b/spaces/usbethFlerru/sovits-modelsV2/example/BMW PSdZData Lite V3.58.3.003.rar.md
deleted file mode 100644
index debdab6010d02dfd6734f11e7554356ef3b30088..0000000000000000000000000000000000000000
--- a/spaces/usbethFlerru/sovits-modelsV2/example/BMW PSdZData Lite V3.58.3.003.rar.md
+++ /dev/null
@@ -1,35 +0,0 @@
-
-
How to Download and Install BMW PSdZData Lite v3.58.3.003.rar
-
BMW PSdZData Lite v3.58.3.003.rar is a compressed file that contains the latest version of the Psdzdata software for BMW coding and programming. Psdzdata is a collection of data files that are used by the E-Sys software to access the modules and features of BMW vehicles.
-
If you want to download and install BMW PSdZData Lite v3.58.3.003.rar, you need to follow these steps:
Go to this link and register an account on the MHH AUTO forum[^1^]. This is a trustworthy automotive forum where you can find many useful resources and tools for BMW and other brands.
-
After registering, log in to your account and go to the same link again. You will see a post by AutoExe3219 that contains an attachment called BMW 3.58.3.003 Psdzdata Lite.rar[^1^]. This is the file you need to download.
-
Click on the attachment and you will be redirected to a download page. You may need to enter a captcha code or wait for a few seconds before the download starts.
-
Save the file to your computer and extract it using a program like WinRAR or 7-Zip. You will get a folder called BMW 3.58.3.003 Psdzdata Lite that contains several subfolders with different data files.
-
Copy the folder BMW 3.58.3.003 Psdzdata Lite to the location where you have installed E-Sys on your computer. Usually, this is C:\ESysData\Psdzdata or C:\Program Files (x86)\E-Sys\Psdzdata.
-
Launch E-Sys and go to Options > Settings > Psdzdata path. Browse to the folder BMW 3.58.3.003 Psdzdata Lite and select it as your Psdzdata path.
-
Click OK and restart E-Sys. You have successfully installed BMW PSdZData Lite v3.58.3.003.rar on your computer.
-
-
Note: The password for the file BMW 3.58.3.003 Psdzdata Lite.rar is not available publicly. You need to send a private message to AutoExe3219 on the MHH AUTO forum and ask for it politely[^1^]. You also need to press the thanks and reputation buttons on his post to show your appreciation.
-
-
How to Use BMW PSdZData Lite v3.58.3.003.rar for Coding and Programming
-
After installing BMW PSdZData Lite v3.58.3.003.rar on your computer, you can use it to code and program your BMW vehicle using E-Sys. Coding is the process of changing the settings and features of your vehicle, such as enabling or disabling functions, changing the appearance of the display, adjusting the sound system, etc. Programming is the process of updating the software and firmware of your vehicle's modules, such as the engine control unit (ECU), the transmission control unit (TCU), the body control module (BCM), etc.
-
To use BMW PSdZData Lite v3.58.3.003.rar for coding and programming, you need to follow these steps:
-
-
Connect your BMW vehicle to your computer using an OBD2 cable or a wireless adapter. Make sure your vehicle is in ignition mode and your computer has a stable power supply.
-
Launch E-Sys and click on Connect. Select your vehicle's chassis type from the drop-down menu and click on Read VIN. E-Sys will automatically detect your vehicle's identification number (VIN) and show it on the screen.
-
Click on Connect again and select your connection type from the drop-down menu. You can choose between Gateway URL (for wireless connection) or OBD (for cable connection). E-Sys will establish a connection with your vehicle and show its status on the screen.
-
Click on Expert Mode and select Coding or Programming from the left panel. Coding will allow you to change the settings and features of your vehicle, while Programming will allow you to update the software and firmware of your vehicle's modules.
-
For Coding, select Read Coding Data from the top menu and wait for E-Sys to read the data from your vehicle. You will see a list of modules and their codes on the right panel. You can browse through the modules and their codes and change them according to your preferences. You can also use a cheat sheet or a code list to find out what each code means and what it does.
-
After changing the codes, select Activate FA (VO) from the top menu and wait for E-Sys to activate the changes on your vehicle. You will see a message saying FA (VO) activated successfully on the screen.
-
Select Code FDL from the top menu and select the modules that you have changed from the list. Click on Execute Job and wait for E-Sys to code the modules on your vehicle. You will see a message saying Coding ended successfully on the screen.
-
For Programming, select Read SVT (VCM) from the top menu and wait for E-Sys to read the data from your vehicle. You will see a list of modules and their versions on the right panel. You can browse through the modules and their versions and check if they need an update or not.
-
If a module needs an update, select it from the list and click on Detect CAF for SWE from the bottom menu. E-Sys will detect the correct software version for the module and show it on the screen.
-
Select Write CAFD / CAFD from SWE from the bottom menu and click on Execute Job. E-Sys will write the new software version to the module and show a message saying Write CAFD / CAFD from SWE ended successfully on the screen.
-
Repeat steps 9 and 10 for all the modules that need an update.
-
-
Note: Coding and programming are advanced procedures that require some knowledge and experience. If you are not sure what you are doing, you may cause damage to your vehicle or void its warranty. It is recommended that you backup your original data before making any changes and consult a professional if you encounter any problems.
d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/vaibhavarduino/anime-plus/e4e/utils/train_utils.py b/spaces/vaibhavarduino/anime-plus/e4e/utils/train_utils.py
deleted file mode 100644
index 0c55177f7442010bc1fcc64de3d142585c22adc0..0000000000000000000000000000000000000000
--- a/spaces/vaibhavarduino/anime-plus/e4e/utils/train_utils.py
+++ /dev/null
@@ -1,13 +0,0 @@
-
-def aggregate_loss_dict(agg_loss_dict):
- mean_vals = {}
- for output in agg_loss_dict:
- for key in output:
- mean_vals[key] = mean_vals.setdefault(key, []) + [output[key]]
- for key in mean_vals:
- if len(mean_vals[key]) > 0:
- mean_vals[key] = sum(mean_vals[key]) / len(mean_vals[key])
- else:
- print('{} has no value'.format(key))
- mean_vals[key] = 0
- return mean_vals
diff --git a/spaces/visakh7843/Sheet_Music_Generator/app.py b/spaces/visakh7843/Sheet_Music_Generator/app.py
deleted file mode 100644
index 2759270b58ebcd3831bc26a0a53af30b91bce129..0000000000000000000000000000000000000000
--- a/spaces/visakh7843/Sheet_Music_Generator/app.py
+++ /dev/null
@@ -1,63 +0,0 @@
-from matplotlib.pyplot import title
-from yaml import Mark
-from music import music_gen #Funtion to generate music based on ABC files
-import gradio as gr
-import os
-from MC.markov_chain import main_markov #Function to generate music based on midi files
-
-keysignature = ["C","G","D","A","No selection"]
-difficulty = ["beginner","intermediate","expert"]
-timesignature = ['3/4','4/4','2/2','2/4']
-
-# output = gr.Gallery() if GlobalUIGallery else "image"
-
-# interface = gr.Interface(fn = music_gen,
-# inputs=[gr.Radio(difficulty,label="Difficulty"),
-# gr.Radio(timesignature,label="Time Signature"),
-# gr.Dropdown(keysignature,label="Key Signature")],
-# outputs = [gr.Gallery(label="Sheet Music"),gr.Audio(label="Audio")],
-# title="Sheet Music Generation for Sight-Reading",
-# description="TO be added")
-# interface.launch(inline=False)
-
-with gr.Blocks() as demo:
-
- gr.Markdown("""
- ## Sight-reading generator for sheet music.
- Markov models which generate sheet music based on different input
- parameters given by the user.
- """)
- with gr.Tabs():
- with gr.TabItem("ABC Model"):
- gr.Markdown("N-grams model using ABC data as training data.")
- with gr.Row():
- with gr.Column():
- difficulty_input_abc = gr.Radio(difficulty,label="Difficulty") #input
- time_sig_input_abc = gr.Radio(timesignature,label="Time Signature") #input
- key_sig_input_abc = gr.Dropdown(keysignature,label="Key Signature") #input
- with gr.Row():
- abc_button = gr.Button("Create Music!!") #Submit
-
- with gr.Column():
- output_gallery_abc = gr.Gallery(label="Sheet Music")
- output_audio_abc = gr.Audio(label="Audio")
- with gr.TabItem("MIDI Model"):
- gr.Markdown("Markov model using MIDI fata as training data.")
- with gr.Row():
- with gr.Column():
- # difficulty_input_midi = gr.Radio(difficulty,label="Difficulty") #input
- time_sig_input_midi = gr.Radio(timesignature,label="Time Signature") #input
- # key_sig_input_midi = gr.Dropdown(keysignature,label="Key Signature") #input
- with gr.Row():
- midi_button = gr.Button("Create Music!!") #Submit
-
- with gr.Column():
- output_gallery_midi = gr.Gallery(label="Sheet Music")
- output_audio_midi = gr.Audio(label="Audio")
-
- abc_button.click(music_gen, inputs=[difficulty_input_abc,time_sig_input_abc,key_sig_input_abc], outputs=[output_gallery_abc,output_audio_abc])
- midi_button.click(main_markov, inputs= time_sig_input_midi, outputs=[output_gallery_midi,output_audio_midi])
-
-if __name__ == "__main__":
- demo.launch()
-
diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/runner/hooks/logger/dvclive.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/runner/hooks/logger/dvclive.py
deleted file mode 100644
index 687cdc58c0336c92b1e4f9a410ba67ebaab2bc7a..0000000000000000000000000000000000000000
--- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/runner/hooks/logger/dvclive.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from ...dist_utils import master_only
-from ..hook import HOOKS
-from .base import LoggerHook
-
-
-@HOOKS.register_module()
-class DvcliveLoggerHook(LoggerHook):
- """Class to log metrics with dvclive.
-
- It requires `dvclive`_ to be installed.
-
- Args:
- path (str): Directory where dvclive will write TSV log files.
- interval (int): Logging interval (every k iterations).
- Default 10.
- ignore_last (bool): Ignore the log of last iterations in each epoch
- if less than `interval`.
- Default: True.
- reset_flag (bool): Whether to clear the output buffer after logging.
- Default: True.
- by_epoch (bool): Whether EpochBasedRunner is used.
- Default: True.
-
- .. _dvclive:
- https://dvc.org/doc/dvclive
- """
-
- def __init__(self,
- path,
- interval=10,
- ignore_last=True,
- reset_flag=True,
- by_epoch=True):
-
- super(DvcliveLoggerHook, self).__init__(interval, ignore_last,
- reset_flag, by_epoch)
- self.path = path
- self.import_dvclive()
-
- def import_dvclive(self):
- try:
- import dvclive
- except ImportError:
- raise ImportError(
- 'Please run "pip install dvclive" to install dvclive')
- self.dvclive = dvclive
-
- @master_only
- def before_run(self, runner):
- self.dvclive.init(self.path)
-
- @master_only
- def log(self, runner):
- tags = self.get_loggable_tags(runner)
- if tags:
- for k, v in tags.items():
- self.dvclive.log(k, v, step=self.get_iter(runner))
diff --git a/spaces/wu981526092/Stereotype_Detection/stereotype_detector/__init__.py b/spaces/wu981526092/Stereotype_Detection/stereotype_detector/__init__.py
deleted file mode 100644
index 31cab56cf08995147bd626a1054da44719d8d191..0000000000000000000000000000000000000000
--- a/spaces/wu981526092/Stereotype_Detection/stereotype_detector/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .stereotype_detector import Detector
\ No newline at end of file
diff --git a/spaces/xelu3banh/dpt-depth01/app.py b/spaces/xelu3banh/dpt-depth01/app.py
deleted file mode 100644
index d53cd25e9a32ed9f2b8c670cb4e9b6f00b05ec82..0000000000000000000000000000000000000000
--- a/spaces/xelu3banh/dpt-depth01/app.py
+++ /dev/null
@@ -1,45 +0,0 @@
-import gradio as gr
-from transformers import DPTFeatureExtractor, DPTForDepthEstimation
-import torch
-import numpy as np
-from PIL import Image
-
-#torch.hub.download_url_to_file('http://images.cocodataset.org/val2017/000000039769.jpg', 'cats.jpg')
-
-feature_extractor = DPTFeatureExtractor.from_pretrained("Intel/dpt-large")
-model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large")
-
-def process_image(image):
- # prepare image for the model
- encoding = feature_extractor(image, return_tensors="pt")
-
- # forward pass
- with torch.no_grad():
- outputs = model(**encoding)
- predicted_depth = outputs.predicted_depth
-
- # interpolate to original size
- prediction = torch.nn.functional.interpolate(
- predicted_depth.unsqueeze(1),
- size=image.size[::-1],
- mode="bicubic",
- align_corners=False,
- ).squeeze()
- output = prediction.cpu().numpy()
- formatted = (output * 255 / np.max(output)).astype('uint8')
- img = Image.fromarray(formatted)
- return img
-
- return result
-
-title = "Demo: zero-shot depth estimation with DPT"
-description = "Demo for Intel's DPT, a Dense Prediction Transformer for state-of-the-art dense prediction tasks such as semantic segmentation and depth estimation."
-
-
-iface = gr.Interface(fn=process_image,
- inputs=gr.inputs.Image(type="pil"),
- outputs=gr.outputs.Image(type="pil", label="predicted depth"),
- title=title,
- description=description,
- enable_queue=True)
-iface.launch(debug=True)
\ No newline at end of file
diff --git a/spaces/xin/PatentSolver/App/bin/constants.py b/spaces/xin/PatentSolver/App/bin/constants.py
deleted file mode 100644
index e3098815491f7f621646078b96858ec731ac6a01..0000000000000000000000000000000000000000
--- a/spaces/xin/PatentSolver/App/bin/constants.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import os
-import root_folder
-
-ROOT = root_folder.ROOT+"/"
-APP_FOLDER = ROOT+"/App/"
-ASSETS = APP_FOLDER+"assets/"
-DATA = ROOT+"Data/"
-DATA_INPUT = DATA+"input/"
-TEMP = DATA+"temp/"
-CORPUS = DATA+"corpus/"
-PRE = DATA+"pretreated/"
-DATA_OUTPUT = DATA+"output/"
-GRAPH_FOLDER = DATA+"graphs/"
-POLARITY_DATA = ASSETS+"polaritySets/"
diff --git a/spaces/xuetao/bingo3/src/components/chat-list.tsx b/spaces/xuetao/bingo3/src/components/chat-list.tsx
deleted file mode 100644
index 624a78ef0d7be0f1192cf02a81e2e9cf214cb193..0000000000000000000000000000000000000000
--- a/spaces/xuetao/bingo3/src/components/chat-list.tsx
+++ /dev/null
@@ -1,28 +0,0 @@
-import React from 'react'
-
-import { Separator } from '@/components/ui/separator'
-import { ChatMessage } from '@/components/chat-message'
-import { ChatMessageModel } from '@/lib/bots/bing/types'
-
-export interface ChatList {
- messages: ChatMessageModel[]
-}
-
-export function ChatList({ messages }: ChatList) {
- if (!messages.length) {
- return null
- }
-
- return (
-