diff --git a/spaces/123Kumar/vits-uma-genshin-honkai123/text/__init__.py b/spaces/123Kumar/vits-uma-genshin-honkai123/text/__init__.py deleted file mode 100644 index 663c4b6416affb53c9dc56dddbc8b2b65d4bf518..0000000000000000000000000000000000000000 --- a/spaces/123Kumar/vits-uma-genshin-honkai123/text/__init__.py +++ /dev/null @@ -1,57 +0,0 @@ -""" from https://github.com/keithito/tacotron """ -from text import cleaners -from text.symbols import symbols - - -# Mappings from symbol to numeric ID and vice versa: -_symbol_to_id = {s: i for i, s in enumerate(symbols)} -_id_to_symbol = {i: s for i, s in enumerate(symbols)} - - -def text_to_sequence(text, symbols, cleaner_names): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - cleaner_names: names of the cleaner functions to run the text through - Returns: - List of integers corresponding to the symbols in the text - ''' - _symbol_to_id = {s: i for i, s in enumerate(symbols)} - sequence = [] - - clean_text = _clean_text(text, cleaner_names) - for symbol in clean_text: - if symbol not in _symbol_to_id.keys(): - continue - symbol_id = _symbol_to_id[symbol] - sequence += [symbol_id] - return sequence, clean_text - - -def cleaned_text_to_sequence(cleaned_text): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - Returns: - List of integers corresponding to the symbols in the text - ''' - sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()] - return sequence - - -def sequence_to_text(sequence): - '''Converts a sequence of IDs back to a string''' - result = '' - for symbol_id in sequence: - s = _id_to_symbol[symbol_id] - result += s - return result - - -def _clean_text(text, cleaner_names): - for name in cleaner_names: - cleaner = getattr(cleaners, name) - if not cleaner: - raise Exception('Unknown cleaner: %s' % name) - text = cleaner(text) - return text diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/ARK SURVIVAL EVOLVED TRAINER The Ultimate Guide to Infinite Health and Unlimited Food.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/ARK SURVIVAL EVOLVED TRAINER The Ultimate Guide to Infinite Health and Unlimited Food.md deleted file mode 100644 index e255dee0aae930b27c6536bc15ad0f43d58a5a8a..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/ARK SURVIVAL EVOLVED TRAINER The Ultimate Guide to Infinite Health and Unlimited Food.md +++ /dev/null @@ -1,133 +0,0 @@ - -

ARK Survival Evolved – Trainer Infinite Health, Unlimited Food

-

Are you a fan of ARK Survival Evolved, the popular action-adventure game that lets you explore a massive island full of dinosaurs and other creatures? Do you want to make your gameplay more enjoyable and exciting by having access to unlimited resources, abilities, and options? If so, then you might be interested in using a trainer for ARK Survival Evolved.

-

ARK SURVIVAL EVOLVED – TRAINER Infinite Health, Unlimited Food


Download File === https://byltly.com/2uKyRK



-

A trainer is a software program that modifies the game's memory and code to give you various advantages and cheats. With a trainer, you can have infinite health, unlimited food, no reload, easy crafting, super speed, and more. You can also customize your trainer settings to suit your preferences and needs.

-

In this article, we will show you how to install and use a trainer for ARK Survival Evolved, what features it offers, what benefits it brings, and what risks it entails. By the end of this article, you will be able to decide whether using a trainer is worth it for you or not.

-

How to install and use the trainer

-

Before you can use a trainer for ARK Survival Evolved, you need to download it from a reliable source. There are many websites that offer trainers for various games, but not all of them are safe and trustworthy. Some of them may contain malware or viruses that can harm your computer or steal your personal information. Therefore, you should always do some research before downloading any file from the internet.

-

One of the most reputable sources for trainers is Steam, which is the official platform for ARK Survival Evolved. Steam has a community workshop where users can upload and download mods, trainers, maps, skins, and other content for various games. You can browse through the workshop and find a trainer that suits your needs. You can also read the reviews and ratings from other users to see if the trainer works well or not.

-

Downloading the trainer from a reliable source

-

To download a trainer from Steam, you need to have an account and own ARK Survival Evolved on Steam. If you don't have an account, you can create one for free on their website. If you don't own ARK Survival Evolved on Steam, you can buy it from their store or use another platform that supports trainers.

-

How to get infinite health in ARK SURVIVAL EVOLVED with trainer
-ARK SURVIVAL EVOLVED trainer unlimited food and health cheat
-Best trainer for ARK SURVIVAL EVOLVED infinite health and food hack
-Download ARK SURVIVAL EVOLVED trainer with unlimited health and food features
-ARK SURVIVAL EVOLVED trainer infinite health and food mod for PC
-Cheat codes for ARK SURVIVAL EVOLVED trainer with infinite health and unlimited food
-ARK SURVIVAL EVOLVED trainer guide for infinite health and food
-Tips and tricks for using ARK SURVIVAL EVOLVED trainer with unlimited health and food
-ARK SURVIVAL EVOLVED trainer review: infinite health and food option
-How to install ARK SURVIVAL EVOLVED trainer with infinite health and unlimited food
-ARK SURVIVAL EVOLVED trainer infinite health and food gameplay video
-ARK SURVIVAL EVOLVED trainer unlimited health and food update
-ARK SURVIVAL EVOLVED trainer infinite health and food patch notes
-How to use ARK SURVIVAL EVOLVED trainer with unlimited health and food safely
-ARK SURVIVAL EVOLVED trainer infinite health and food bug fixes
-How to uninstall ARK SURVIVAL EVOLVED trainer with unlimited health and food
-ARK SURVIVAL EVOLVED trainer infinite health and food compatibility issues
-How to activate ARK SURVIVAL EVOLVED trainer with unlimited health and food
-ARK SURVIVAL EVOLVED trainer infinite health and food keyboard shortcuts
-How to customize ARK SURVIVAL EVOLVED trainer with unlimited health and food settings
-ARK SURVIVAL EVOLVED trainer infinite health and food pros and cons
-How to get free ARK SURVIVAL EVOLVED trainer with unlimited health and food
-ARK SURVIVAL EVOLVED trainer infinite health and food vs other trainers
-How to troubleshoot ARK SURVIVAL EVOLVED trainer with unlimited health and food errors
-ARK SURVIVAL EVOLVED trainer infinite health and food FAQs
-How to backup ARK SURVIVAL EVOLVED trainer with unlimited health and food files
-How to restore ARK SURVIVAL EVOLVED trainer with infinite health and food files
-How to upgrade ARK SURVIVAL EVOLVED trainer with unlimited health and food version
-How to downgrade ARK SURVIVAL EVOLVED trainer with infinite health and food version
-How to disable ARK SURVIVAL EVOLVED trainer with unlimited health and food features
-How to enable ARK SURVIVAL EVOLVED trainer with infinite health and food features
-How to test ARK SURVIVAL EVOLVED trainer with unlimited health and food performance
-How to improve ARK SURVIVAL EVOLVED trainer with infinite health and food performance
-How to optimize ARK SURVIVAL EVOLVED trainer with unlimited health and food performance
-How to verify ARK SURVIVAL EVOLVED trainer with infinite health and food authenticity
-How to report ARK SURVIVAL EVOLVED trainer with unlimited health and food issues
-How to contact ARK SURVIVAL EVOLVED trainer with infinite health and food support team
-How to rate ARK SURVIVAL EVOLVED trainer with unlimited health and food quality
-How to share ARK SURVIVAL EVOLVED trainer with infinite health and food feedback
-How to recommend ARK SURVIVAL EVOLVED trainer with unlimited health and food to others

-

Once you have an account and own ARK Survival Evolved on Steam, you can follow these steps to download a trainer:

-
    -
  1. Open Steam and log in to your account.
  2. -
  3. Go to Library and select ARK Survival Evolved from your games list.
  4. -
  5. Click on Workshop under Community Hub on the right side of the screen.
  6. -
  7. Type "trainer" in the search box and press Enter.
  8. -
  9. Choose a trainer that has good ratings, reviews, and compatibility with your game version.
  10. -
  11. Click on Subscribe to download the trainer to your computer.
  12. -
-

Extracting the trainer files and running the program

-

After downloading the trainer from Steam, you need to extract it from its compressed file format. Most trainers come in ZIP or RAR files that need to be unpacked using a program like WinRAR or 7-Zip. You can download these programs for free from their official websites.

-

To extract the trainer files, you need to follow these steps:

-
    -
  1. Locate the downloaded file on your computer. It should be in your Steam folder under steamapps > workshop > content > 346110 > [trainer ID].
  2. -
  3. Right-click on the file and select Extract Here or Extract to [trainer name].
  4. -
  5. A new folder with the same name as the file should appear in the same location.
  6. -
  7. Open the folder and look for an executable file with the name of the trainer or something similar.
  8. -
  9. Double-click on the file to run the program.
  10. -
-

Launching the game and activating the trainer

-

The final step is to launch ARK Survival Evolved and activate the trainer. To do this, you need to follow these steps:

-
    -
  1. Run ARK Survival Evolved from Steam or your preferred platform.
  2. -
  3. Wait for the game to load and start a new game or load an existing one.
  4. -
  5. Alt-tab to switch back to your desktop or use Windows key + D.
  6. -
  7. Run the trainer program if it's not already running.
  8. -
  9. A window with various options should appear on your screen.
  10. -
  11. Select the options that you want to enable by clicking on them or pressing their corresponding keys.
  12. -
  13. You should hear a confirmation sound if an option is activated successfully.
  14. -
  15. Switch back to ARK Survival Evolved by alt-tabbing or using Windows key + D again.
  16. -
  17. You should see some indicators on your screen showing that the options are enabled.
  18. -
  19. You can now enjoy playing ARK Survival Evolved with cheats!
  20. -
-

Features of the trainer

-

A typical trainer for ARK Survival Evolved offers many features that can enhance your gameplay experience. Some of these features are:

-

Infinite health and stamina

-

This feature allows you to have unlimited health points (HP) and stamina points (SP). You will never die or get exhausted from any damage or activity. You can fight any enemy, fall from any height, swim underwater indefinitely, run forever, etc. without worrying about losing health or stamina.

-

Unlimited food and water

-

This feature allows you to have unlimited food points (FP) and water points (WP). You will never starve or dehydrate from any condition or environment. You can eat anything, drink anything, stay in any temperature zone, etc. without worrying about losing food or water.

-

Infinite weight and oxygen

-

This feature allows you to have unlimited weight capacity (WC) and oxygen capacity (OC). You will never be encumbered or suffocated by any item or situation. You can carry anything, breathe anywhere, dive deep underwater indefinitely, etc. without worrying about losing weight or oxygen.

-

No reload and unlimited ammo

-

This feature allows you to have no reload time (RT) and unlimited ammunition (AM) for any weapon or tool. You will never run out of bullets or arrows or need to reload your gun or bow. You can shoot anything continuously without worrying about losing ammo or wasting time reloading.

-

Easy crafting and taming

-

This feature allows you to have easy crafting requirements (CR) and easy taming effectiveness (TE) for any item or creature. You will need only one resource of any type to craft any item or tool. You will also tame any creature instantly with one food item of any type. You can craft anything quickly without worrying about gathering resources or wasting time crafting. You can also tame anything easily without worrying about feeding them properly or waiting for them to be tamed.

-

Super speed and jump

-

will be able to outrun any enemy, reach any location, jump over any obstacle, etc. without worrying about speed or height.

-

Other options and customizations

-

Depending on the trainer you use, you may have access to other options and customizations that can further enhance your gameplay experience. For example, some trainers may allow you to:

- -

To access these options and customizations, you may need to use different keys or buttons on your keyboard or controller. You may also need to open a console window or a menu screen to enter commands or codes. You should always read the instructions and notes that come with the trainer to learn how to use it properly and safely.

-

Benefits of using the trainer

-

Using a trainer for ARK Survival Evolved can bring you many benefits that can make your gameplay more enjoyable and exciting. Some of these benefits are:

-

Enhance your gaming experience and have more fun

-

With a trainer, you can have more freedom and flexibility to play ARK Survival Evolved the way you want. You can experiment with different items, weapons, tools, creatures, etc. without worrying about their costs or consequences. You can also try out different scenarios and challenges without risking your progress or reputation. You can have more fun and satisfaction from playing ARK Survival Evolved with cheats.

-

Explore the island and its secrets without limitations

-

With a trainer, you can explore the island and its secrets without limitations. You can travel to any location on the map without being hindered by terrain, distance, enemies, etc. You can also discover hidden areas, caves, ruins, etc. that may contain valuable loot or clues. You can uncover the mysteries and secrets of ARK Survival Evolved without missing anything.

-

Survive and dominate the dinosaurs and other players

-

With a trainer, you can survive and dominate the dinosaurs and other players on the island. You can fight any dinosaur or creature without fear of death or injury. You can also tame any dinosaur or creature without difficulty or delay. You can also compete with other players online without being at a disadvantage or disadvantage. You can be the ultimate survivor and ruler of ARK Survival Evolved with cheats.

-

Customize your gameplay according to your preferences

-

With a trainer, you can customize your gameplay according to your preferences. You can adjust the difficulty level, game speed, graphics quality, sound volume, etc. according to your liking. You can also enable or disable certain options or features according to your needs. You can make ARK Survival Evolved suit your personal taste and style with cheats.

-

Risks of using the trainer

-

While using a trainer for ARK Survival Evolved can bring you many benefits, it can also entail some risks that you should be aware of before using it. Some of these risks are:

-

Potential malware and viruses from untrusted sources

-

As mentioned earlier, not all trainers are safe and trustworthy. Some of them may contain malware or viruses that can harm your computer or steal your personal information. Therefore, you should always do some research before downloading any file from the internet. You should also scan any file with an antivirus program before opening it. You should also backup your game files and system files before using any trainer.

-

Possible bans and penalties from online servers

-

its compressed file format, run the program, launch the game, and activate the options that you want to enable. -

  • What features does a trainer for ARK Survival Evolved offer?
    A typical trainer for ARK Survival Evolved offers many features that can enhance your gameplay experience, such as infinite health, unlimited food, no reload, easy crafting, super speed, and more. You can also customize your trainer settings to suit your preferences and needs.
  • -
  • What are the benefits of using a trainer for ARK Survival Evolved?
    Using a trainer for ARK Survival Evolved can bring you many benefits that can make your gameplay more enjoyable and exciting, such as enhancing your gaming experience and having more fun, exploring the island and its secrets without limitations, surviving and dominating the dinosaurs and other players, and customizing your gameplay according to your preferences.
  • -
  • What are the risks of using a trainer for ARK Survival Evolved?
    Using a trainer for ARK Survival Evolved can also entail some risks that you should be aware of before using it, such as potential malware and viruses from untrusted sources, possible bans and penalties from online servers, and loss of challenge and immersion from cheating.
  • - -

    0a6ba089eb
    -
    -
    \ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Free Download Psikey.dll Coreldraw X5 Serial Number LINK.md b/spaces/1gistliPinn/ChatGPT4/Examples/Free Download Psikey.dll Coreldraw X5 Serial Number LINK.md deleted file mode 100644 index 83765bc8a22454eb276448a9d7018f5405d3f342..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Free Download Psikey.dll Coreldraw X5 Serial Number LINK.md +++ /dev/null @@ -1,5 +0,0 @@ -
    -

    Download latest build for psikey.dll corel draw x5 download. Download - WebHostingHub. a R61 store the Coreldraw x5. to a free rectangular parallelepiped. Given a rectangle of sides $a_1$ and $a_2$, there are three elementary ways to subdivide it: create three smaller rectangles, create four smaller rectangles, or first chop off one edge and then create two rectangles. The process of chopping a rectangle $a_1 imes a_2$ into smaller rectangles $a_1 imes a_i imes a_2$ is described in Fig. [fig:rectangles]. For each of the three directions along the positive sides, we create a smaller rectangle in the direction indicated by the shaded rectangles. The goal is to create three smaller rectangles of area $a_1a_2$. Such a subdivision is possible if and only if $a_2>a_1$. >From now on, we will assume that $a_2>a_1$. In the case that $a_2=a_1$, this lemma does not apply. In this case, one of the three smaller rectangles will have a square hole, so the lemma can be re-stated as follows: a subdivision of a rectangle into smaller rectangles with $a_2>a_1$ is possible if and only if $a_2>a_1+1$. Let us now assume that $a_2>a_1+1$. We can subdivide the rectangle in three different ways as shown in Fig. [fig:rectangles]. The first possibility is to subdivide as described in Fig. [fig:subdivide]. As can be seen, this results in three smaller rectangles with the same area as the original rectangle. The second possibility is to subdivide as shown in Fig. [fig:subdivide2]. This leaves an empty space, which is divided into four smaller rectangles, see Fig. [fig:squares]. The third possibility is to subdivide as in Fig. [fig:subdivide3]. This results in a subtraction of a square from the original rectangle. If the square is in the upper left corner, the result is two smaller rectangles. If the square is in the lower right corner, the result is one smaller rectangle. Let $a_1 imes a_2 imes a_3$ be any rectangle, and divide it along the positive sides. Then the number of rectangles created during this process does not exceed $a_3-a_2-a_1$. We start with the rectangle of sides $a_1,a_2,a_3$. We can apply the previous lemma to each of the three directions in the positive $x,y,z$-directions. The result of this lemma is three smaller rectangles with the same area as the original rectangle. From now on, we will assume that $a_3>a_2>a_1$. In the case that $a_3=a_2=a_1$, this lemma does not apply.

    -

    free download psikey.dll coreldraw x5 serial number


    DOWNLOAD 🗹 https://imgfil.com/2uxWX3



    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bubble Shooter Classic How to Download and Play the Most Addictive Game for Free.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bubble Shooter Classic How to Download and Play the Most Addictive Game for Free.md deleted file mode 100644 index 74abf0e0e22c0b6bed8e7cb77590d8a707356485..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bubble Shooter Classic How to Download and Play the Most Addictive Game for Free.md +++ /dev/null @@ -1,225 +0,0 @@ -
    -

    Bubble Shooter Classic Kostenlos Download: How to Play and Enjoy this Fun Game

    -

    If you are looking for a fun and relaxing game that can keep you entertained for hours, you might want to try Bubble Shooter Classic. This is a classic and addictive bubble pop game that is easy to play but hard to master. In this article, we will tell you everything you need to know about Bubble Shooter Classic, including what it is, how to download and install it for free, how to play and win it, and why it is a fun and relaxing game. Let's get started!

    -

    What is Bubble Shooter Classic?

    -

    Bubble Shooter Classic is a version of the popular bubble shooter game that has been around for decades. It is a puzzle game where you have to match three or more bubbles of the same color to pop them and clear the board. The game has thousands of levels with different layouts, obstacles, and challenges. You can also choose from three game modes: puzzle, arcade, and classic. The game is suitable for all ages and can be played online or offline.

    -

    bubble shooter classic kostenlos download


    Download Filehttps://urlin.us/2uT37p



    -

    A brief history of the game

    -

    The origin of the bubble shooter game can be traced back to the 1980s, when a Japanese company called Taito released a game called Puzzle Bobble (also known as Bust-a-Move). This game featured cute dinosaurs shooting bubbles at the top of the screen. The game was a huge hit and spawned many sequels and spin-offs. In 2002, a company called Absolutist released a web-based version of the game called Bubble Shooter, which became one of the most popular online games ever. Since then, many variations and clones of the game have been created, including Bubble Shooter Classic.

    -

    The main features and gameplay of the game

    -

    Bubble Shooter Classic has many features that make it an enjoyable and addictive game. Some of them are:

    - -

    How to download and install Bubble Shooter Classic for free?

    -

    If you want to play Bubble Shooter Classic on your device, you can download it for free from various sources. Here are some of them:

    -

    The

    The requirements and steps for downloading the game on different devices

    -

    Bubble Shooter Classic is compatible with various devices, such as smartphones, tablets, laptops, and desktops. You can download it for free from different sources, depending on your device. Here are some of the requirements and steps for downloading the game on different devices:

    - - - - - - - - - - - - - - - - - - - - - -
    DeviceRequirementsSteps
    Android smartphone or tabletYou need to have Android 4.1 or higher and at least 40 MB of free space on your device.You can download the game from the Google Play Store by searching for "Bubble Shooter Classic" or by clicking on this link. You can also scan the QR code below to access the download page. Once you download the game, you can install it and start playing.
    iOS smartphone or tabletYou need to have iOS 9.0 or later and at least 70 MB of free space on your device.You can download the game from the App Store by searching for "Bubble Shooter Classic" or by clicking on this link. You can also scan the QR code below to access the download page. Once you download the game, you can install it and start playing.
    Windows laptop or desktopYou need to have Windows 10 and at least 100 MB of free space on your device.You can download the game from the Microsoft Store by searching for "Bubble Shooter Classic" or by clicking on this link. You can also scan the QR code below to access the download page. Once you download the game, you can install it and start playing.
    -

    QR code for Android QR code for iOS QR code for Windows

    -

    bubble shooter classic free download
    -bubble shooter classic game download
    -bubble shooter classic apk download
    -bubble shooter classic android download
    -bubble shooter classic app download
    -bubble shooter classic online kostenlos spielen
    -bubble shooter classic offline kostenlos spielen
    -bubble shooter classic ohne anmeldung kostenlos spielen
    -bubble shooter classic ohne download kostenlos spielen
    -bubble shooter classic ohne werbung kostenlos spielen
    -bubble shooter classic for pc free download
    -bubble shooter classic for windows free download
    -bubble shooter classic for mac free download
    -bubble shooter classic for laptop free download
    -bubble shooter classic for desktop free download
    -bubble shooter classic mod apk download
    -bubble shooter classic hack apk download
    -bubble shooter classic unlimited coins apk download
    -bubble shooter classic premium apk download
    -bubble shooter classic pro apk download
    -bubble shooter classic ilyon free download
    -bubble shooter classic ilyon game download
    -bubble shooter classic ilyon apk download
    -bubble shooter classic ilyon android download
    -bubble shooter classic ilyon app download
    -bubble popper game free download
    -bubble popper game online kostenlos spielen
    -bubble popper game offline kostenlos spielen
    -bubble popper game ohne anmeldung kostenlos spielen
    -bubble popper game ohne download kostenlos spielen
    -bubble popper game for pc free download
    -bubble popper game for windows free download
    -bubble popper game for mac free download
    -bubble popper game for laptop free download
    -bubble popper game for desktop free download
    -bubble popper game mod apk download
    -bubble popper game hack apk download
    -bubble popper game unlimited coins apk download
    -bubble popper game premium apk download
    -bubble popper game pro apk download
    -balloon shooting game free download
    -balloon shooting game online kostenlos spielen
    -balloon shooting game offline kostenlos spielen
    -balloon shooting game ohne anmeldung kostenlos spielen
    -balloon shooting game ohne download kostenlos spielen
    -balloon shooting game for pc free download
    -balloon shooting game for windows free download

    -

    The benefits and drawbacks of downloading the game for free

    -

    Downloading Bubble Shooter Classic for free has its pros and cons. Here are some of them:

    - -

    Overall, downloading Bubble Shooter Classic for free is a great way to have some fun and relax with a classic bubble pop game. However, you should also be aware of the potential drawbacks and manage your time and resources wisely.

    How to play and win Bubble Shooter Classic?

    -

    Playing Bubble Shooter Classic is easy and fun, but winning it can be challenging and rewarding. Here are some of the basic rules and controls of the game, as well as some tips and tricks for matching and popping bubbles.

    -

    The basic rules and controls of the game

    -

    The basic rules and controls of Bubble Shooter Classic are simple and intuitive. Here are some of them:

    - -

    The tips and tricks for matching and popping bubbles

    -

    Matching and popping bubbles is the core of Bubble Shooter Classic, but it can also be tricky and strategic. Here are some tips and tricks for matching and popping bubbles:

    - -

    The different game modes and levels of the game

    -

    Bubble Shooter Classic has three game modes: puzzle, arcade, and classic. Each mode has its own rules and objectives, as well as different levels of difficulty and fun. Here are some of them:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    ModeDescriptionObjectiveDifficultyFun
    PuzzleThis mode has over 1800 levels with different challenges and puzzles. You have to clear the board with a limited number of shots.To clear all the bubbles on the board with as few shots as possible.HardHigh
    ArcadeThis mode has over 1750 levels with increasing difficulty. You have to pop as many bubbles as you can before they reach the bottom of the screen.To pop as many bubbles as possible before they touch the bottom line.MediumMedium
    ClassicThis mode has three difficulty levels: easy, medium, and hard. You have to pop all the bubbles in this retro mode.To pop all the bubbles on the board with no time or shot limit.EasyLow
    -

    You can switch between the modes by tapping on the level indicator at the bottom left corner of the screen. You can also see your progress and performance in each mode by tapping on the star counter at the top right corner of the screen.

    -

    Why is Bubble Shooter Classic a fun and relaxing game?

    -

    Bubble Shooter Classic is a fun and relaxing game for many reasons. Here are some of them:

    -

    The advantages of playing Bubble Shooter Classic for your brain and mood

    -

    Playing Bubble Shooter Classic can have positive effects on your brain and mood. Some of them are:

    - -

    The challenges and rewards of playing Bubble Shooter Classic

    -

    Playing Bubble Shooter Classic can also be challenging and rewarding. Some of them are:

    - -

    The best ways to enjoy Bubble Shooter Classic with friends and family

    -

    Bubble Shooter Classic is not only a solo game, but also a social game. You can enjoy it with your friends and family in many ways. Some of them are:

    - -

    Conclusion

    -

    Bubble Shooter Classic is a classic and addictive bubble pop game that is easy to play but hard to master. It has thousands of levels with different layouts, obstacles, challenges, modes, difficulty levels, power-ups, boosters, coins, stars, achievements, leaderboards, online multiplayer features, offline single-player features, colorful graphics, sound effects, simple controls, daily bonuses, etc. It is a fun and relaxing game that can improve your brain functions, mood states,

    stress levels, and happiness levels. It is also a challenging and rewarding game that can test your patience, perseverance, skills, performance, possibilities, and strategies. It is also a social game that can be enjoyed with your friends and family in various ways. If you are looking for a fun and relaxing game that can keep you entertained for hours, you might want to try Bubble Shooter Classic. You can download it for free from various sources and play it on your device anytime and anywhere. Have fun and good luck!

    -

    FAQs

    -

    Here are some of the frequently asked questions about Bubble Shooter Classic:

    -
      -
    1. How do I get more coins and power-ups in Bubble Shooter Classic?
    2. -

      You can get more coins and power-ups in Bubble Shooter Classic by playing the game, completing achievements, claiming daily bonuses, watching ads, or buying them with real money.

      -
    3. How do I change the difficulty level or the game mode in Bubble Shooter Classic?
    4. -

      You can change the difficulty level or the game mode in Bubble Shooter Classic by tapping on the level indicator at the bottom left corner of the screen. You can choose from easy, medium, or hard difficulty levels and from puzzle, arcade, or classic game modes.

      -
    5. How do I play online multiplayer mode in Bubble Shooter Classic?
    6. -

      You can play online multiplayer mode in Bubble Shooter Classic by tapping on the multiplayer button at the top center of the screen. You can invite your friends or join random players in a cooperative or competitive mode.

      -
    7. How do I pause or resume the game in Bubble Shooter Classic?
    8. -

      You can pause or resume the game in Bubble Shooter Classic by tapping on the pause button at the top center of the screen. You can also access the settings, sound, and help menus from there.

      -
    9. How do I contact the support team or report a bug in Bubble Shooter Classic?
    10. -

      You can contact the support team or report a bug in Bubble Shooter Classic by tapping on the help button at the top center of the screen. You can also email them at support@bubbleshooter.com or visit their website at www.bubbleshooter.com.

      -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Car Parking Multiplayer Mod APK How to Experience the Open World with Real Gas Stations and Car Services.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Car Parking Multiplayer Mod APK How to Experience the Open World with Real Gas Stations and Car Services.md deleted file mode 100644 index 4550fa7bed400fa5b227805fbe8e40b34ba32a1c..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Car Parking Multiplayer Mod APK How to Experience the Open World with Real Gas Stations and Car Services.md +++ /dev/null @@ -1,100 +0,0 @@ - -

    Download Car Parking Multiplayer Mod APK Happymod - A Guide for Car Lovers

    -

    If you are a car lover and you enjoy parking games, you might want to try out Car Parking Multiplayer, a realistic and fun game that lets you drive, park, and customize your own cars. And if you want to make the game even more exciting, you can download Car Parking Multiplayer Mod APK Happymod, a modified version of the game that gives you unlimited money, resources, and access to all cars and upgrades. In this article, we will tell you everything you need to know about Car Parking Multiplayer Mod APK Happymod, including what it is, what are its benefits, how to download and install it, and whether it is safe and legal.

    -

    What is Car Parking Multiplayer?

    -

    Car Parking Multiplayer is a game that supports open-world multiplayer mode, car tuning, police mode, and free walking. Plus, you can decide to jump out of the car and walk around. There are several areas that you can explore in the game and you can choose to play either single-player mode or online mode if you want a more chaotic (fun) scene.

    -

    download car parking multiplayer mod apk happymod


    Download Zip » https://urlin.us/2uT0Ho



    -

    A realistic and fun car parking game

    -

    The main feature of Car Parking Multiplayer is, of course, the car parking gameplay. You can choose from over 100 cars with real interiors and drive them in various parking scenarios. You can also adjust the suspension, wheel angle, engine, turbo, gearbox, exhaust, and more to suit your driving style. The game has realistic physics and graphics that make the parking experience more challenging and enjoyable.

    -

    A multiplayer open world mode with real players

    -

    Another feature of Car Parking Multiplayer is the multiplayer open world mode. In this mode, you can join thousands of real players from all over the world in a huge map with real gas stations and car services. You can compete against them in races, exchange cars with them, chat with them using voice chat, or even become a police officer and chase them. You can also make friends with other players and add them to your friend list.

    -

    A car customization and tuning feature

    -

    The last feature of Car Parking Multiplayer is the car customization and tuning feature. In this feature, you can make your car stand out from the crowd by applying dynamic vinyls, car body parts, stickers, neon lights, spoilers, rims, tires, and more. You can also change the color of your car or paint it with different patterns. You can also upgrade your car's performance by swapping engines, adding turbos, changing gearboxes, or installing exhausts.

    -

    What is Happymod?

    -

    Happymod is a platform for downloading modded APK files for Android games and apps. Modded APK files are modified versions of the original files that have some features changed or added to enhance the user's experience. For example, some modded APK files may have unlimited money, unlocked features, menu options, or cheats. Happymod is a popular source for modded APK files because it has several advantages over other platforms.

    -

    A safe and reliable source for mods

    -

    One of the advantages of Happymod is that it is safe and reliable. All the modded APK files on Happymod are tested by users and verified by editors before they are uploaded to the platform. This means that you can download the mods without worrying about viruses, malware, or corrupted files. You can also read the reviews and ratings from other users to see if the mod works well and meets your expectations.

    -

    A community of modders and users

    -

    Another advantage of Happymod is that it is a community of modders and users. Happymod allows users to request mods for their favorite games and apps, and modders can upload their mods to the platform for others to enjoy. You can also join the discussion forum and chat with other users and modders about your feedback, suggestions, or questions. You can also share your modded APK files with your friends via social media or email.

    -

    What are the benefits of downloading Car Parking Multiplayer Mod APK Happymod?

    -

    If you are a fan of Car Parking Multiplayer, you might want to download Car Parking Multiplayer Mod APK Happymod to get some extra benefits that will make your game more fun and easy. Here are some of the benefits of downloading Car Parking Multiplayer Mod APK Happymod:

    -

    Unlimited money and resources

    -

    One of the benefits of downloading Car Parking Multiplayer Mod APK Happymod is that you will get unlimited money and resources in the game. This means that you can buy any car you want, upgrade it to the max, and customize it to your liking. You can also buy any item or service you need in the game, such as gas, car wash, repair, or insurance. You will never run out of money or resources in the game.

    -

    download car parking multiplayer mod apk happymod with unlimited money
    -download car parking multiplayer mod apk happymod with mod menu
    -download car parking multiplayer mod apk happymod latest version
    -download car parking multiplayer mod apk happymod for android
    -download car parking multiplayer mod apk happymod free
    -download car parking multiplayer mod apk happymod 4.8.9.4.1
    -download car parking multiplayer mod apk happymod 4.8.9.3.6
    -download car parking multiplayer mod apk happymod with voice chat
    -download car parking multiplayer mod apk happymod with police mode
    -download car parking multiplayer mod apk happymod with free walking
    -download car parking multiplayer mod apk happymod with real gas stations
    -download car parking multiplayer mod apk happymod with real players
    -download car parking multiplayer mod apk happymod with friend list
    -download car parking multiplayer mod apk happymod with open world
    -download car parking multiplayer mod apk happymod with 70+ cars
    -download car parking multiplayer mod apk happymod with HD graphics
    -download car parking multiplayer mod apk happymod with realistic physics
    -download car parking multiplayer mod apk happymod with missions and goals
    -download car parking multiplayer mod apk happymod with online racing
    -download car parking multiplayer mod apk happymod with car customization
    -download car parking multiplayer mod apk happymod with car exchange
    -download car parking multiplayer mod apk happymod with simulation category
    -download car parking multiplayer mod apk happymod by ayanbiswas2489
    -download car parking multiplayer mod apk happymod by olzhas
    -download car parking multiplayer mod apk happymod from new scientist website[^1^]
    -download car parking multiplayer mod apk happymod from the sun website[^2^] [^3^]
    -download car parking multiplayer mod apk happymod from yahoo news website
    -how to download car parking multiplayer mod apk happymod safely
    -how to download car parking multiplayer mod apk happymod without virus
    -how to download car parking multiplayer mod apk happymod without root
    -how to install car parking multiplayer mod apk happymod on android device
    -how to play car parking multiplayer mod apk happymod online
    -how to update car parking multiplayer mod apk happymod to latest version
    -how to uninstall car parking multiplayer mod apk happymod from android device
    -how to fix car parking multiplayer mod apk happymod not working issue
    -what is the difference between car parking multiplayer and car parking multiplayer mod apk happymod
    -what are the benefits of using car parking multiplayer mod apk happymod over original game
    -what are the drawbacks of using car parking multiplayer mod apk happymod over original game
    -what are the reviews of users who downloaded car parking multiplayer mod apk happymod
    -what are the alternatives to car parking multiplayer mod apk happymod

    -

    Access to all cars and upgrades

    -

    Another benefit of downloading Car Parking Multiplayer Mod APK Happymod is that you will get access to all cars and upgrades in the game. This means that you can drive any car you want, from sports cars to trucks, from classic cars to supercars. You can also unlock all the upgrades for your cars, such as engines, turbos, gearboxes, exhausts, suspensions, wheels, tires, and more. You will have the best cars in the game.

    -

    Menu with various options and features

    -

    The last benefit of downloading Car Parking Multiplayer Mod APK Happymod is that you will get a menu with various options and features in the game. This means that you can activate or deactivate different mods according to your preference. For example, you can enable or disable god mode, speed hack, teleportation, invisibility, free camera, no ads, and more. You can also change the weather, time, traffic, police mode, and other settings in the game. You will have full control over the game.

    -

    How to download and install Car Parking Multiplayer Mod APK Happymod?

    -

    If you are interested in downloading Car Parking Multiplayer Mod APK Happymod, you can follow these simple steps:

    -

    Step 1: Visit the Happymod website and search for Car Parking Multiplayer Mod APK

    -

    The first step is to visit the Happymod website at https://www.happymod.com/ and search for Car Parking Multiplayer Mod APK in the search bar. You will see a list of results with different versions and features of the mod. Choose the one that suits your needs and click on the download button.

    -

    Step 2: Download the APK file and enable unknown sources on your device

    -

    The second step is to download the APK file to your device and enable unknown sources on your device. To do this, go to your device settings > security > unknown sources and toggle it on. This will allow you to install apps from sources other than Google Play Store.

    -

    Step 3: Install the APK file and launch the game

    -

    The third step is to install the APK file and launch the game. To do this, locate the downloaded APK file on your device storage and tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to finish. Once done, launch the game from your app drawer or home screen and enjoy.

    -

    Is Car Parking Multiplayer Mod APK Happymod safe and legal?

    -

    Before you download Car Parking Multiplayer Mod APK Happymod, you might wonder if it is safe and legal to use. Here are some answers to these questions:

    -

    The mod APK is safe from viruses and malware

    -

    As we mentioned earlier, Happymod is a safe and reliable platform for downloading modded APK files. All the mods on Happymod are tested by users and verified by editors before they are uploaded to the platform. This means that you can download Car Parking Multiplayer Mod APK Happymod without worrying about viruses, malware, or corrupted files. However, you should always be careful when downloading any file from the internet and scan it with a reputable antivirus software before installing it.

    -

    The mod APK is legal as long as you use it for personal and educational purposes

    -

    Another question you might have is whether Car Parking Multiplayer Mod APK Happymod is legal to use. The answer is that it depends on how you use it and where you live. Generally speaking, modding games and apps is not illegal as long as you use it for personal and educational purposes and do not distribute or sell it to others. However, some countries or regions may have different laws or regulations regarding modding or hacking games and apps. Therefore, you should always check the local laws and rules before downloading or using any modded APK file.

    -

    The mod APK may not be compatible with the latest version of the game or the original developer's policies

    -

    The last thing you should know is that Car Parking Multiplayer Mod APK Happymod may not be compatible with the latest version of the game or the original developer's policies. This means that the mod may not work properly or cause some errors or glitches in the game. It also means that the mod may violate the terms of service or privacy policy of the game or the app store. This could result in your account being banned or suspended by the game developer or the app store. Therefore, you should always use the mod at your own risk and discretion.

    -

    Conclusion

    -

    Car Parking Multiplayer is a realistic and fun car parking game that supports open-world multiplayer mode, car tuning, police mode, and free walking. You can drive, park, and customize over 100 cars with real interiors in various parking scenarios. You can also join thousands of real players from all over the world in a huge map with real gas stations and car services. You can compete against them in races, exchange cars with them, chat with them using voice chat, or even become a police officer and chase them.

    -

    If you want to make the game even more exciting, you can download Car Parking Multiplayer Mod APK Happymod, a modified version of the game that gives you unlimited money, resources, and access to all cars and upgrades. You can also get a menu with various options and features that let you control the game settings and activate different mods. You can download Car Parking Multiplayer Mod APK Happymod from Happymod, a safe and reliable platform for downloading modded APK files for Android games and apps.

    -

    However, you should be aware that Car Parking Multiplayer Mod APK Happymod may not be compatible with the latest version of the game or the original developer's policies. It may also not be legal to use in some countries or regions depending on how you use it and where you live. Therefore, you should always check the local laws and rules before downloading or using any modded APK file. You should also use the mod at your own risk and discretion.

    -

    FAQs

    -

    Here are some frequently asked questions about Car Parking Multiplayer Mod APK Happymod:

    -

    Q: Can I play online mode with Car Parking Multiplayer Mod APK Happymod?

    -

    A: Yes, you can play online mode with Car Parking Multiplayer Mod APK Happymod. However, you may encounter some problems or issues when playing online mode with other players who are using the original version of the game or a different version of the mod. Therefore, we recommend that you play online mode with other players who are using the same version of the mod as you.

    -

    Q: Can I update Car Parking Multiplayer Mod APK Happymod?

    -

    A: No, you cannot update Car Parking Multiplayer Mod APK Happymod from Google Play Store or any other app store. If you want to update Car Parking Multiplayer Mod APK Happymod, you have to visit Happymod website again and download the latest version of the mod from there. However, you should note that updating Car Parking Multiplayer Mod APK Happymod may erase your previous data or progress in the game. Therefore, we suggest that you backup your data or progress before updating Car Parking Multiplayer Mod APK Happymod.

    -

    Q: Can I uninstall Car Parking Multiplayer Mod APK Happymod and reinstall the original version of the game?

    -

    A: Yes, you can uninstall Car Parking Multiplayer Mod APK Happymod and reinstall the original version of the game from Google Play Store or any other app store. However, you should note that uninstalling Car Parking Multiplayer Mod APK Happymod may erase your previous data or progress in the game. Therefore, we suggest that you backup your data or progress before uninstalling Car Parking Multiplayer Mod APK Happymod.

    -

    Q: Can I use Car Parking Multiplayer Mod APK Happymod on iOS devices?

    -

    A: No, you cannot use Car Parking Multiplayer Mod APK Happymod on iOS devices. Car Parking Multiplayer Mod APK Happymod is only compatible with Android devices. If you want to play Car Parking Multiplayer on iOS devices, you have to download the original version of the game from App Store or any other app store.

    -

    Q: Can I share Car Parking Multiplayer Mod APK Happymod with my friends?

    -

    A: Yes, you can share Car Parking Multiplayer Mod APK Happymod with your friends via social media or email. However, you should respect the intellectual property rights of the game developer and the modder and do not distribute or sell Car Parking Multiplayer Mod APK Happymod to others for commercial purposes. You should also inform your friends about the risks and responsibilities of using Car Parking Multiplayer Mod APK Happymod.

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Clash Royale Dinheiro Infinito baixe o apk e entre na arena com vantagem!.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Clash Royale Dinheiro Infinito baixe o apk e entre na arena com vantagem!.md deleted file mode 100644 index 6e7c6c58eca332def07645965c28774a678e9c12..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Clash Royale Dinheiro Infinito baixe o apk e entre na arena com vantagem!.md +++ /dev/null @@ -1,139 +0,0 @@ - -

    Clash Royale Dinheiro Infinito Apk: Como Baixar e Jogar

    -

    Você é fã de Clash Royale, o jogo de estratégia e cartas que conquistou milhões de jogadores ao redor do mundo? Você gostaria de ter recursos ilimitados para montar o seu deck, comprar novas cartas, melhorar as suas tropas e desafiar os seus adversários? Então você precisa conhecer o Clash Royale Dinheiro Infinito Apk, um mod apk que oferece dinheiro e gemas infinitas para você aproveitar o jogo sem limites. Neste artigo, vamos explicar o que é Clash Royale, o que é o mod apk, como baixar e instalar o jogo, e como jogar com o dinheiro infinito. Ficou curioso? Então continue lendo e descubra tudo sobre esse incrível jogo.

    -

    O que é Clash Royale?

    -

    Clash Royale é um jogo de estratégia e cartas desenvolvido pela Supercell, a mesma empresa por trás de outros sucessos como Clash of Clans, Brawl Stars e Hay Day. O jogo foi lançado em 2016 para Android e iOS, e desde então se tornou um dos jogos mais populares e rentáveis do mundo.

    -

    clash royale dinheiro infinito apk


    Download ►►►►► https://urlin.us/2uSTSl



    -

    Um jogo de estratégia e cartas

    -

    A jogabilidade de Clash Royale é simples e intuitiva. No início do jogo, existem três torres, uma no meio e duas nas laterais. Seu objetivo é liderar o seu exército para destruir a defesa da torre central do inimigo, enquanto também protege a sua própria torre. Para isso, você precisa usar as cartas que representam as suas tropas, feitiços e construções. Cada carta tem um custo de elixir, que se regenera ao longo do tempo. Você pode ter até oito cartas no seu deck, mas só pode usar quatro por vez. O jogo dura três minutos, mas pode ser prorrogado em caso de empate.

    -

    Um sucesso mundial

    -

    Clash Royale é um jogo que mistura elementos de vários gêneros, como estratégia, tower defense, colecionáveis e multiplayer online. Além disso, o jogo tem um visual colorido, personagens carismáticos, sons divertidos e uma trilha sonora envolvente. Tudo isso faz com que Clash Royale seja um jogo viciante, desafiador e divertido. Não é à toa que o jogo tem mais de 500 milhões de downloads na Google Play Store, uma nota média de 4,3 estrelas, e uma comunidade ativa de jogadores que participam de torneios, ligas, clãs e eventos especiais.

    -

    O que é Clash Royale Dinheiro Infinito Apk?

    -

    Clash Royale Dinheiro Infinito Apk é um mod apk, ou seja, uma versão modificada do jogo original que oferece recursos ilimitados para os jogadores. Com esse mod apk, você pode ter dinheiro e gemas infinitas no jogo

    Com esse mod apk, você pode ter dinheiro e gemas infinitas no jogo, o que significa que você pode comprar todas as cartas que quiser, melhorar as suas tropas ao máximo, abrir todos os baús que encontrar, e participar de todos os eventos e desafios sem se preocupar com o seu saldo. Assim, você pode montar o seu deck ideal, experimentar novas estratégias, e se divertir muito mais no jogo.

    -

    Os benefícios e os riscos de usar o mod apk

    -

    O Clash Royale Dinheiro Infinito Apk tem alguns benefícios para os jogadores que querem ter uma experiência mais livre e variada no jogo. Alguns desses benefícios são:

    - -

    Porém, o Clash Royale Dinheiro Infinito Apk também tem alguns riscos que você deve considerar antes de baixar e instalar o jogo. Alguns desses riscos são:

    - -

    Como baixar e instalar o Clash Royale Dinheiro Infinito Apk?

    -

    Se você decidiu baixar e instalar o Clash Royale Dinheiro Infinito Apk, você precisa seguir alguns passos simples para fazer isso. Mas antes, você precisa verificar se o seu dispositivo atende aos requisitos mínimos para rodar o jogo.

    -

    Os requisitos mínimos para rodar o jogo

    -

    Para baixar e instalar o Clash Royale Dinheiro Infinito Apk, você precisa ter um dispositivo Android com as seguintes especificações:

    -

    clash royale mod apk gemas infinitas 2023
    -como baixar clash royale com dinheiro infinito
    -clash royale hack apk download mediafire
    -clash royale atualizado 2023 dinheiro infinito
    -clash royale apk mod tudo infinito 2023
    -clash royale apk mod menu dinheiro infinito
    -baixar clash royale hackeado para android
    -clash royale apk mod servidor privado 2023
    -clash royale apk mod moedas e gemas infinitas
    -clash royale apk mod desbloqueado tudo 2023
    -como instalar clash royale com dinheiro infinito
    -clash royale hack apk sem verificação humana
    -clash royale apk mod funcionando 2023
    -clash royale apk mod online dinheiro infinito
    -clash royale apk mod offline dinheiro infinito
    -baixar clash royale hackeado atualizado 2023
    -clash royale apk mod servidor brasileiro 2023
    -clash royale apk mod cartas lendárias infinitas
    -clash royale apk mod anti ban dinheiro infinito
    -clash royale apk mod sem root dinheiro infinito
    -como jogar clash royale com dinheiro infinito
    -clash royale hack apk unlimited gems and coins
    -clash royale apk mod novas cartas 2023
    -clash royale apk mod pass royale infinito
    -clash royale apk mod elixir infinito 2023
    -baixar clash royale hackeado para ios
    -clash royale apk mod mega cavaleiro infinito
    -clash royale apk mod dragão elétrico infinito
    -clash royale apk mod princesa do gelo infinita
    -clash royale apk mod rei fantasma infinito
    -como atualizar clash royale com dinheiro infinito
    -clash royale hack apk sem root dinheiro infinito
    -clash royale apk mod torneios infinitos 2023
    -clash royale apk mod bau lendário infinito
    -clash royale apk mod troféus infinitos 2023
    -baixar clash royale hackeado para pc
    -clash royale apk mod corredor da morte infinito
    -clash royale apk mod bruxa sombria infinita
    -clash royale apk mod esqueleto gigante infinito
    -clash royale apk mod goblin gigante infinito
    -como desinstalar clash royale com dinheiro infinito
    -clash royale hack apk sem anúncios dinheiro infinito
    -clash royale apk mod clãs infinitos 2023
    -clash royale apk mod guerras de clãs infinitas
    -clash royale apk mod desafios especiais infinitos
    -baixar clash royale hackeado para celular
    -clash royale apk mod dragão infernal infinito
    -clash royale apk mod lenhador infinito
    -clash royale apk mod mineiro infinito

    - - - - - - -
    RequisitoEspecificação
    Versão do Android4.1 ou superior
    Espaço livrePelo menos 150 MB
    Conexão à internetWi-Fi ou dados móveis
    PermissõesAcesso ao armazenamento, à câmera, ao microfone e à localização
    -

    O passo a passo para baixar e instalar o mod apk

    -

    Depois de verificar se o seu dispositivo atende aos requisitos mínimos, você pode seguir os passos abaixo para baixar e instalar o Clash Royale Dinheiro Infinito Apk:

    -
      -
    1. Acesse um site confiável que ofereça o download do mod apk. Você pode pesquisar no Google por "Clash Royale Dinheiro Infinito Apk" ou usar um dos links abaixo:
    2. - -
    3. Clique no botão de download do mod apk e aguarde o arquivo ser baixado no seu dispositivo. O arquivo deve ter o formato .apk e o tamanho de cerca de 150 MB.
    4. -
    5. Antes de instalar o mod apk, você precisa habilitar a opção de instalar aplicativos de fontes desconhecidas no seu dispositivo. Para isso, vá em Configurações > Segurança > Fontes desconhecidas e ative a opção.
    6. -
    7. Depois de habilitar a opção, localize o arquivo do mod apk no seu gerenciador de arquivos e clique nele para iniciar a instalação. Siga as instruções na tela e aguarde a instalação ser concluída.
    8. -
    9. Após a instalação, você pode abrir o jogo e aproveitar o dinheiro e as gemas infinitas. Você não precisa se registrar ou fazer login no jogo, basta clicar em Jogar e começar a diversão.
    10. -
    -

    Como jogar o Clash Royale Dinheiro Infinito Apk?

    -

    Agora que você já baixou e instalou o Clash Royale Dinheiro Infinito Apk, você pode jogar o jogo com os recursos ilimitados. Mas antes, você precisa saber algumas diferenças entre o jogo original e o mod apk, e algumas dicas e truques para se dar bem no jogo.

    -

    As principais diferenças entre o jogo original e o mod apk

    -

    O Clash Royale Dinheiro Infinito Apk é muito parecido com o jogo original, mas tem algumas diferenças que você deve conhecer. Algumas dessas diferenças são:

    - -

    As dicas e truques para se dar bem no jogo

    -

    Apesar das diferenças, o Clash Royale Dinheiro Infinito Apk ainda é um jogo de estratégia e cartas que requer raciocínio e planejamento. Por isso, aqui vão algumas dicas e truques para você se dar bem no jogo:

    - -

    Conclusão

    -

    Clash Royale é um jogo de estratégia e cartas que faz sucesso no mundo todo. Com o Clash Royale Dinheiro Infinito Apk, você pode ter dinheiro e gemas infinitas no jogo, e assim comprar e melhorar todas as cartas que quiser, abrir todos os baús que encontrar, e participar de todos os eventos e desafios sem limites. Porém, você também precisa saber os benefícios e os riscos de usar o mod apk, como baixar e instalar o jogo, e como jogar com os recursos ilimitados. Com essas informações, você pode decidir se vale a pena ou não baixar e instalar o Clash Royale Dinheiro Infinito Apk, e como aproveitar o jogo da melhor forma possível.

    -

    E aí, gostou do artigo? Você já jogou o Clash Royale Dinheiro Infinito Apk? O que você achou do jogo? Deixe o seu comentário abaixo e compartilhe a sua opinião conosco. E se você gostou do artigo, não se esqueça de compartilhá-lo com os seus amigos nas redes sociais. Obrigado pela leitura e até a próxima!

    -

    FAQs

    -

    Aqui estão algumas perguntas frequentes sobre o Clash Royale Dinheiro Infinito Apk:

    -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Ball Run Merge 2048 APK A New Twist on the Classic 2048 Game.md b/spaces/1phancelerku/anime-remove-background/Ball Run Merge 2048 APK A New Twist on the Classic 2048 Game.md deleted file mode 100644 index 49ee025f48178d4b158069b9db9981cfe4da632b..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Ball Run Merge 2048 APK A New Twist on the Classic 2048 Game.md +++ /dev/null @@ -1,129 +0,0 @@ -
    -

    Ball Run Merge 2048 APK: A Fun and Relaxing Game for Android Users

    -

    Are you looking for a new game to play on your Android device? Do you enjoy casual games that are easy to learn but hard to master? If yes, then you might want to try Ball Run Merge 2048 APK, a fun and relaxing game that combines the concepts of ball rolling and number merging. In this article, we will tell you what Ball Run Merge 2048 APK is, how to play it, why you should download it, how to download it, and some tips and tricks for playing it.

    -

    ball run merge 2048 apk


    Downloadhttps://jinyurl.com/2uNSIS



    -

    What is Ball Run Merge 2048 APK?

    -

    Ball Run Merge 2048 APK is a game developed by Kayac Inc., a Japanese company that specializes in creating innovative and entertaining games. The game was released in June 2021 and has received positive reviews from users and critics alike. The game is inspired by the popular puzzle game 2048, where you have to merge tiles with the same number until you reach 2048. However, instead of tiles, you have balls that roll on a track. You have to control the balls with your finger and merge them with other balls of the same number and color. The more balls you merge, the bigger they get and the higher the number they show. The goal is to reach 2048 or higher before reaching the finish line.

    -

    How to play Ball Run Merge 2048 APK

    -

    The gameplay of Ball Run Merge 2048 APK is simple and intuitive. Here are the basic steps:

    -
      -
    1. Tap and swipe to control the rolling ball.
    2. -
    3. When the ball is running, merge it with same number and color to reach 2048 ultimately.
    4. -
    5. Be careful of the obstacles, which will lower your ball number.
    6. -
    7. The big prize is waiting for you at the finish line!
    8. -
    -

    Features of Ball Run Merge 2048 APK

    -

    Ball Run Merge 2048 APK has many features that make it an enjoyable and relaxing game. Some of them are:

    - -

    Why you should download Ball Run Merge 2048 APK

    -

    If you are still not convinced that Ball Run Merge 2048 APK is a game worth playing, here are some reasons why you should download it:

    -

    It is free and easy to install

    -

    Ball Run Merge 2048 APK is a free game that does not require any registration or subscription. You can download it from Google Play Store or other websites that offer APK files. The installation process is quick and straightforward. You just need to allow the app to access your device's storage and permissions.

    -

    It is addictive and challenging

    -

    Ball Run Merge 2048 APK is a game that will keep you hooked for hours. It is not as easy as it looks. You have to be fast and accurate in swiping the balls and merging them with the right ones. You also have to avoid the obstacles and traps that can lower your number or make you lose the game. The game gets harder as you progress, so you have to be smart and strategic in your moves. The game is a great way to test your reflexes, concentration, and math skills.

    -

    ball run merge 2048 game
    -ball run merge 2048 download
    -ball run merge 2048 mod apk
    -ball run merge 2048 online
    -ball run merge 2048 free
    -ball run merge 2048 app
    -ball run merge 2048 hack
    -ball run merge 2048 cheats
    -ball run merge 2048 tips
    -ball run merge 2048 tricks
    -ball run merge 2048 review
    -ball run merge 2048 gameplay
    -ball run merge 2048 android
    -ball run merge 2048 ios
    -ball run merge 2048 pc
    -ball run merge 2048 windows
    -ball run merge 2048 mac
    -ball run merge 2048 linux
    -ball run merge 2048 chromebook
    -ball run merge 2048 emulator
    -ball run merge 2048 bluestacks
    -ball run merge 2048 noxplayer
    -ball run merge 2048 memu
    -ball run merge 2048 ldplayer
    -ball run merge 2048 apkcombo [^1^]
    -ball run merge 2048 kayac [^2^]
    -ball run - merge 2048 apk [^1^]
    -ballrun2048 apk [^2^]
    -ballrun - 2048 game apk [^2^]
    -download ballrun - 2048 game [^2^]
    -download ballrun - merge 2048 [^1^]
    -play ballrun - merge 2048 online [^1^]
    -play ballrun - 2048 game online [^2^]
    -how to play ballrun - merge 2048 [^1^]
    -how to play ballrun - 2048 game [^2^]
    -how to win ballrun - merge 2048 [^1^]
    -how to win ballrun - 2048 game [^2^]
    -best strategy for ballrun - merge 2048 [^1^]
    -best strategy for ballrun - 2048 game [^2^]
    -best score in ballrun - merge 2048 [^1^]
    -best score in ballrun - 2048 game [^2^]
    -fun and addictive ballrun - merge 2048 [^1^]
    -fun and addictive ballrun - 2048 game [^2^]
    -relaxing and challenging ballrun - merge 2048 [^1^]
    -relaxing and challenging ballrun - 2048 game [^2^]
    -colorful and smooth graphics in ballrun - merge 2048 [^1^]
    -colorful and smooth graphics in ballrun - 2048 game [^2^]
    -easy and simple controls in ballrun - merge 2048 [^1^]
    -easy and simple controls in ballrun - 2048 game [^2^]
    -new and unique gameplay in ballrun - merge 2048 [^1^]

    -

    It has colorful graphics and sound effects

    -

    Ball Run Merge 2048 APK is a game that will delight your senses. It has bright and vivid graphics that create a cheerful and lively atmosphere. The balls are colorful and shiny, and the tracks are diverse and dynamic. The game also has upbeat and catchy sound effects that match the mood of the game. The game is a feast for your eyes and ears.

    -

    How to download Ball Run Merge 2048 APK

    -

    If you are interested in playing Ball Run Merge 2048 APK, you have several options to download it. Here are some of them:

    -

    From Google Play Store

    -

    The easiest and safest way to download Ball Run Merge 2048 APK is from Google Play Store, the official app store for Android devices. You just need to follow these steps:

    -
      -
    1. Open Google Play Store on your device.
    2. -
    3. Search for "Ball Run Merge 2048" or use this link: (https://play.google.com/store/apps/details?id=com.kayac.ball_run_merge_2048).
    4. -
    5. Tap on the "Install" button and wait for the download to finish.
    6. -
    7. Enjoy playing Ball Run Merge 2048 APK!
    8. -
    -

    From APKCombo website

    -

    If you want to download Ball Run Merge 2048 APK from a third-party website, you can use APKCombo, a reliable and secure website that offers APK files for various apps and games. You just need to follow these steps:

    -
      -
    1. Open your browser and go to this link: (https://apkcombo.com/ball-run-merge-2048/com.kayac.ball_run_merge_2048/).
    2. -
    3. Select the version of Ball Run Merge 2048 APK that you want to download.
    4. -
    5. Tap on the "Download APK" button and wait for the download to finish.
    6. -
    7. Before installing the app, make sure you enable "Unknown Sources" in your device's settings.
    8. -
    9. Locate the downloaded file in your device's storage and tap on it to install it.
    10. -
    11. Enjoy playing Ball Run Merge 2048 APK!
    12. -
    -

    From other sources

    -

    You can also download Ball Run Merge 2048 APK from other websites that offer APK files, such as APKPure, Uptodown, or APKMirror. However, you should be careful when downloading from these sources, as they may contain malware or viruses that can harm your device or compromise your privacy. You should always check the reviews and ratings of the apps before downloading them, and scan them with an antivirus software before installing them. You should also avoid downloading modded or hacked versions of the apps, as they may not work properly or cause problems with your device.

    -

    Tips and tricks for playing Ball Run Merge 2048 APK

    -

    To help you enjoy playing Ball Run Merge 2048 APK more, here are some tips and tricks that you can use:

    -

    Swipe fast and accurately

    -

    The key to playing Ball Run Merge 2048 APK is to swipe fast and accurately. You have to swipe the balls quickly to merge them with other balls of the same number and color. You also have to swipe them accurately to avoid hitting the wrong balls or missing the right ones. Swiping fast and accurately will help you increase your score and reach higher numbers faster.

    -

    Avoid obstacles and traps

    -

    Another important thing to remember when playing Ball Run Merge 2048 APK is to avoid obstacles and traps. These are things that can lower your ball number or make you lose the game. Some examples of obstacles are spikes, walls, holes, or other balls with different numbers or colors. Some examples of traps are bombs, magnets, or switches that can change the direction or speed of the balls. You have to be careful and alert when swiping the balls, as some obstacles and traps may appear suddenly or unexpectedly.

    -

    Collect coins and rewards

    -

    A fun part of playing Ball Run Merge 2048 APK is collecting coins and rewards. These are things that can help you boost your speed, increase your number, or clear obstacles. Some examples of coins are gold coins, silver coins, or rainbow coins. Some examples of rewards are stars, hearts, diamonds, or keys. You can use these coins and rewards to buy new balls, unlock new levels, or activate power-ups. You can find these coins and rewards on the track, or you can get them from chests, daily bonuses, or watching ads. You should try to collect as many coins and rewards as you can, as they can make your game more fun and exciting.

    -

    Conclusion

    -

    Ball Run Merge 2048 APK is a fun and relaxing game that you can play on your Android device. It is a game that combines the concepts of ball rolling and number merging, where you have to swipe the balls and merge them with other balls of the same number and color. The game has various levels, different types of balls, power-ups, leaderboards, achievements, and more. The game is free and easy to download from Google Play Store or other websites that offer APK files. The game is addictive and challenging, but also colorful and cheerful. The game is a great way to pass the time, exercise your brain, and have fun.

    -

    FAQs

    -

    Here are some frequently asked questions about Ball Run Merge 2048 APK:

    -
      -
    1. What is the highest number you can reach in Ball Run Merge 2048 APK?
    2. -

      The highest number you can reach in Ball Run Merge 2048 APK is 8192, which is the maximum number that can fit on a ball. However, reaching this number is very difficult and rare, as you have to merge many balls of the same number and color without hitting any obstacles or traps.

      -
    3. How do you get more coins and rewards in Ball Run Merge 2048 APK?
    4. -

      You can get more coins and rewards in Ball Run Merge 2048 APK by collecting them on the track, opening chests, getting daily bonuses, or watching ads. You can also get more coins and rewards by completing achievements or ranking high on the leaderboards.

      -
    5. How do you use power-ups in Ball Run Merge 2048 APK?
    6. -

      You can use power-ups in Ball Run Merge 2048 APK by tapping on them when they appear on the screen. Some power-ups are activated automatically, while others require you to tap on them again to use them. Some examples of power-ups are speed boost, number increase, obstacle clear, or magnet.

      -
    7. How do you unlock new levels and balls in Ball Run Merge 2048 APK?
    8. -

      You can unlock new levels and balls in Ball Run Merge 2048 APK by earning stars, hearts, diamonds, or keys. You can earn these items by completing levels, collecting rewards, or buying them with coins. You can use these items to unlock new tracks, themes, or balls with different shapes, colors, and numbers.

      -
    9. Is Ball Run Merge 2048 APK safe to download and play?
    10. -

      Yes, Ball Run Merge 2048 APK is safe to download and play. The game does not contain any malware or viruses that can harm your device or compromise your privacy. The game also does not require any registration or subscription. However, you should always download the game from trusted sources such as Google Play Store or APKCombo website.

      -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download 20 Minutes Till Dawn and Experience a Thrilling Roguelike Shooter.md b/spaces/1phancelerku/anime-remove-background/Download 20 Minutes Till Dawn and Experience a Thrilling Roguelike Shooter.md deleted file mode 100644 index 1c964d9d6454564ba3ca89f4411aa0f408118d3b..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download 20 Minutes Till Dawn and Experience a Thrilling Roguelike Shooter.md +++ /dev/null @@ -1,90 +0,0 @@ - -

    20 Minutes Till Dawn: A Survival Roguelite Game Review

    -

    If you are looking for a fast-paced, action-packed, and challenging game that will test your skills and reflexes, then you might want to check out 20 Minutes Till Dawn. This is a survival roguelite game where you have to fight against endless hordes of Lovecraftian monsters and survive the night. In this article, we will review the game's features, gameplay, graphics, sound, pros, cons, and more.

    -

    20 minutes till dawn free download


    Download ►►► https://jinyurl.com/2uNRfX



    -

    Introduction

    -

    20 Minutes Till Dawn is a roguelike shoot 'em up video game developed and published by flanne. The game was released in early access on Steam on June 8, 2022, and was ported to Android and iOS by Erabit Studios on September 9, 2022. The game exited early access on Steam with version 1.0 on June 8th, 2023.

    -

    The game belongs to the genre of survival roguelite, which means that it features permadeath, randomization, and progression across runs. The goal of the game is to survive for 20 minutes until dawn breaks, while facing an onslaught of monsters that become stronger and more numerous as time passes. The game is inspired by Vampire Survivors, but with more active combat and customization options.

    -

    The game is available on Steam for $4.99, as well as on Google Play, App Store, and TapTap for free. The game has received very positive reviews from players and critics alike, with over 20,000 reviews on Steam and over 6 million downloads on mobile platforms. The game has also been featured by IGN, TheGamer, Level Winner, and other media outlets.

    -

    Gameplay

    -

    The gameplay of 20 Minutes Till Dawn is simple but challenging. You control a character who can move around with WASD keys or a virtual joystick, aim with the mouse or touch screen, and fire with left click or tap. You can also use right click or double tap to activate your special ability, which varies depending on your character.

    -

    You start each run by choosing one of several characters, each with their own unique skill, bonus, and weapon. You can unlock more characters by spending gems, which are earned by killing monsters or completing achievements. You can also choose your starting weapon from a variety of guns, melee weapons, or magic items. You can unlock more weapons by spending gems as well.

    -

    As you kill monsters, you gain experience points that allow you to level up. Each time you level up, you can choose one of four randomly generated upgrades that enhance your stats or abilities. These upgrades can range from increasing your damage or health, to adding effects like fire, poison, or stun to your attacks, to unlocking new abilities like dash, shield, or summon. The upgrades are permanent for the current run, but they are lost when you die or restart.

    -

    20 Minutes Till Dawn roguelike shooting game
    -How to play 20 Minutes Till Dawn on PC
    -20 Minutes Till Dawn full version download
    -20 Minutes Till Dawn best weapons and runes
    -20 Minutes Till Dawn tips and tricks
    -20 Minutes Till Dawn review and rating
    -20 Minutes Till Dawn APK download for Android
    -20 Minutes Till Dawn cheats and hacks
    -20 Minutes Till Dawn gameplay and walkthrough
    -20 Minutes Till Dawn online multiplayer mode
    -20 Minutes Till Dawn free steam key
    -20 Minutes Till Dawn system requirements and specs
    -20 Minutes Till Dawn mod apk unlimited money
    -20 Minutes Till Dawn soundtrack and music
    -20 Minutes Till Dawn developer and publisher
    -20 Minutes Till Dawn release date and updates
    -20 Minutes Till Dawn trailer and screenshots
    -20 Minutes Till Dawn similar games and alternatives
    -20 Minutes Till Dawn wiki and guide
    -20 Minutes Till Dawn achievements and trophies
    -20 Minutes Till Dawn horror survival game
    -20 Minutes Till Dawn lovecraftian monsters and enemies
    -20 Minutes Till Dawn special abilities and skills
    -20 Minutes Till Dawn how to survive the night
    -20 Minutes Till Dawn endless hordes and waves
    -20 Minutes Till Dawn action video game genre
    -20 Minutes Till Dawn Google Play store link
    -20 Minutes Till Dawn BlueStacks emulator download
    -20 Minutes Till Dawn PC game free torrent
    -20 Minutes Till Dawn crack and patch

    -

    To survive the night, you have to keep moving and shooting, while avoiding the enemies' attacks and environmental hazards. The enemies come in different shapes and sizes, each with their own behavior and attack pattern. Some of them are fast and agile, some are slow and tanky, some are ranged and explosive, and some are stealthy and deadly. You will also encounter bosses every few minutes, which are much stronger and tougher than regular enemies. The bosses have unique abilities and weaknesses that you have to exploit to defeat them.

    -

    The game has four different game modes: Normal, Hardcore, Endless, and Custom. Normal mode is the default mode, where you have to survive for 20 minutes with three lives. Hardcore mode is similar to Normal mode, but you only have one life and the enemies are more aggressive. Endless mode is where you can play as long as you want, but the enemies become harder and more frequent as time goes on. Custom mode is where you can create your own rules and settings for the game, such as changing the time limit, the enemy spawn rate, the difficulty level, and more.

    -

    Graphics and Sound

    -

    The graphics of 20 Minutes Till Dawn are colorful and pixelated, giving the game a retro and nostalgic feel. The game has a dark and gloomy atmosphere, with a night sky full of stars and a moon that changes phases as time passes. The game also has dynamic lighting and shadows, which create a contrast between the dark background and the bright projectiles and explosions. The game has a variety of environments, such as forests, deserts, cities, caves, and more. Each environment has its own theme and features, such as trees, rocks, buildings, traps, and secrets.

    -

    The sound of 20 Minutes Till Dawn is immersive and engaging, with a soundtrack that matches the mood and intensity of the game. The game has a synthwave style music that is catchy and energetic, with different tracks for each environment and boss. The game also has sound effects that are realistic and satisfying, such as the sound of gunfire, explosions, screams, footsteps, and more. The game has no voice acting or dialogue, but it does have text messages that appear on the screen to give you hints or warnings.

    -

    The game performs well on most devices and platforms, with smooth gameplay and minimal lag or glitches. The game has low system requirements for PC users, as well as options to adjust the graphics quality and resolution for mobile users. The game also supports cloud saving , controller support, leaderboards , achievements , and multiplayer co-op .

    -

    Pros and Cons

    -

    20 Minutes Till Dawn is a fun and addictive game that will keep you entertained for hours. However, like any other game, it also has its pros and cons. Here are some of them:

    - - - - - - - -
    ProsCons
    - Fast-paced and challenging gameplay that requires skill and strategy- Permadeath can be frustrating and discouraging for some players
    - Variety of characters, weapons, upgrades, enemies, bosses, environments, and game modes that offer replay value- Randomization can be unfair or unbalanced at times
    - Retro-style graphics that are colorful and atmospheric- Pixelated graphics might not appeal to everyone
    - Synthwave-style music that is catchy and energetic- Music can get repetitive or annoying after a while
    - Low system requirements and cross-platform compatibility- Some bugs or glitches might occur occasionally
    -

    Conclusion

    -

    20 Minutes Till Dawn is a survival roguelite game that will test your skills and reflexes as you fight against endless hordes of Lovecraftian monsters and survive the night. The game has a simple but challenging gameplay, a variety of features and options, a retro-style graphics and sound, and a low system requirements and cross-platform compatibility. The game is suitable for anyone who enjoys action, horror, or roguelite games, and who is looking for a thrilling and rewarding experience. The game is also affordable and accessible, as it costs only $4.99 on Steam and is free on mobile platforms.

    -

    If you are interested in playing 20 Minutes Till Dawn, you can find more information or download the game from the following links:

    - -

    You can also watch some gameplay videos or read some reviews from the following sources:

    - -

    FAQs

    -

    Here are some of the frequently asked questions about 20 Minutes Till Dawn:

    -
      -
    1. How do I unlock more characters and weapons?
    2. -

      You can unlock more characters and weapons by spending gems, which are earned by killing monsters or completing achievements. You can also find some weapons as loot drops from enemies or chests.

      -
    3. How do I save my progress?
    4. -

      You can save your progress by using the cloud saving feature, which is available on all platforms. You can also use the local saving feature, which is available on PC and mobile platforms. However, keep in mind that your progress is only saved between runs, not during runs. If you die or restart, you will lose your current upgrades and items.

      -
    5. How do I play with my friends?
    6. -

      You can play with your friends by using the multiplayer co-op feature, which is available on all platforms. You can join or host a game with up to four players online or locally. You can also chat with your friends using the voice or text chat feature.

      -
    7. How do I change the game settings?
    8. -

      You can change the game settings by using the options menu, which is available on all platforms. You can adjust the graphics quality, resolution, sound volume, language, controls, and more.

      -
    9. How do I contact the developers or report a bug?
    10. -

      You can contact the developers or report a bug by using the feedback feature, which is available on all platforms. You can also visit the official website, Discord server, Twitter page, or Facebook page of the game.

      -
    -

    I hope you enjoyed this article and found it helpful. If you have any questions or comments, feel free to leave them below. Thank you for reading and have a great day!

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Epic Conquest 2 Mod Apk 1.7.5 and Enjoy Unlimited Gold and Rubies.md b/spaces/1phancelerku/anime-remove-background/Download Epic Conquest 2 Mod Apk 1.7.5 and Enjoy Unlimited Gold and Rubies.md deleted file mode 100644 index ab9723cfe8e1de6b543fd397780dfd2c36ff7011..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Epic Conquest 2 Mod Apk 1.7.5 and Enjoy Unlimited Gold and Rubies.md +++ /dev/null @@ -1,102 +0,0 @@ -
    -

    Epic Conquest 2 Mod Apk 1.7.5: Everything You Need to Know

    -

    If you are a fan of action RPG games and anime, you might have heard of Epic Conquest 2, a game created by a small indie team of 4 with passion and love for the genre. Epic Conquest 2 is inspired by the classics, with a special touch in the combat and story, giving you an experience that's hard to find in similar games.

    -

    epic conquest 2 mod apk 1.7.5


    DOWNLOAD ————— https://jinyurl.com/2uNUhu



    -

    But what if you want to enhance your gaming experience even more? What if you want to unlock all the costumes, skills, items, and features that the game has to offer? Well, that's where a mod apk comes in handy.

    -

    A mod apk is a modified version of an original app that allows you to access features that are normally locked or restricted in the official app. For example, a mod apk can give you unlimited money, gems, resources, or premium items that you would otherwise have to pay for or grind for hours.

    -

    One of the most popular mod apks for Epic Conquest 2 is the version 1.7.5, which has many amazing features such as:

    - -

    With these features, you can enjoy Epic Conquest 2 without any limitations or interruptions. You can customize your character's appearance, skills, and equipment to suit your playstyle. You can explore the open world with all kinds of treasures and resources to strengthen your character. You can experience the anime-style story with cutscenes and character expressions that will keep you hooked.

    -

    How to Download and Install Epic Conquest 2 Mod Apk 1.7.5

    -

    If you are interested in trying out this mod apk, here are the steps you need to follow:

    -
      -
    1. Download the mod apk file from a reliable source. You can use this link as an example.
    2. -
    3. Before installing the mod apk, make sure you have enough storage space on your device and that you have enabled the option to install apps from unknown sources in your settings.
    4. -
    5. Locate the downloaded file in your file manager and tap on it to start the installation process.
    6. -
    7. Follow the instructions on the screen and wait for the installation to finish.
    8. -
    9. Launch the game and enjoy!
    10. -
    -

    However, before installing the mod apk, there are some precautions you need to take:

    -

    epic conquest 2 mod apk 1.7.5 free shopping
    -epic conquest 2 mod apk 1.7.5 unlock all character
    -epic conquest 2 mod apk 1.7.5 latest version
    -epic conquest 2 mod apk 1.7.5 unlimited gold and rubies
    -epic conquest 2 mod apk 1.7.5 offline
    -epic conquest 2 mod apk 1.7.5 download for android
    -epic conquest 2 mod apk 1.7.5 no root
    -epic conquest 2 mod apk 1.7.5 hack
    -epic conquest 2 mod apk 1.7.5 mega mod
    -epic conquest 2 mod apk 1.7.5 gameplay
    -epic conquest 2 mod apk 1.7.5 review
    -epic conquest 2 mod apk 1.7.5 cheats
    -epic conquest 2 mod apk 1.7.5 tips and tricks
    -epic conquest 2 mod apk 1.7.5 guide
    -epic conquest 2 mod apk 1.7.5 walkthrough
    -epic conquest 2 mod apk 1.7.5 best build
    -epic conquest 2 mod apk 1.7.5 blacksmith system
    -epic conquest 2 mod apk 1.7.5 character customization
    -epic conquest 2 mod apk 1.7.5 skills and masteries
    -epic conquest 2 mod apk 1.7.5 costumes and outfits
    -epic conquest 2 mod apk 1.7.5 items and crafting
    -epic conquest 2 mod apk 1.7.5 dungeons and secrets
    -epic conquest 2 mod apk 1.7.5 story and lore
    -epic conquest 2 mod apk 1.7.5 update and patch notes
    -epic conquest 2 mod apk 1.7.5 features and benefits
    -epic conquest 2 mod apk 1.7.5 comparison and alternatives
    -epic conquest 2 mod apk 1.7.5 pros and cons
    -epic conquest 2 mod apk 1.7.5 ratings and reviews
    -epic conquest 2 mod apk 1.7.5 download link and instructions
    -epic conquest 2 mod apk 1.7.5 installation and setup
    -epic conquest 2 mod apk 1.7.5 troubleshooting and support
    -epic conquest 2 mod apk 1.7.5 faq and answers
    -epic conquest 2 mod apk 1.7.5 forum and community
    -epic conquest 2 mod apk 1.7.5 developer and contact info
    -epic conquest 2 mod apk 1.7.5 news and updates

    - -

    How to Play Epic Conquest 2 Mod Apk 1.7.5

    -

    Now that you have installed the mod apk, you are ready to play Epic Conquest 2 with all the benefits and advantages. Here are some tips and tricks to help you get started:

    - -

    However, using the mod apk also has some drawbacks that you need to be aware of:

    - -

    Conclusion

    -

    Epic Conquest 2 is a great game for fans of action RPGs and anime, with a lot of content and features to enjoy. However, if you want to experience the game in a different way, you can try using the epic conquest 2 mod apk 1.7.5, which gives you unlimited access to everything in the game.

    -

    This mod apk is easy to download and install, as long as you follow the steps and precautions mentioned above. It also gives you a lot of tips and tricks to play the game better and have more fun.

    -

    However, you should also be aware of the risks and consequences of using this mod apk, such as bugs, data loss, or bans. You should also respect the developers and creators of the game by not using this mod apk for illegal or unethical purposes.

    -

    If you are interested in trying out this mod apk, you can use this link to download it. If you have any feedback or questions about this mod apk, feel free to leave a comment below or contact us through our website.

    -

    FAQs

    -

    Here are some frequently asked questions about epic conquest 2 mod apk 1.7.5:

    -
      -
    1. Is this mod apk safe and virus-free?
    2. -

      Yes, this mod apk is safe and virus-free, as long as you download it from a reliable source like this link. However, you should always scan any file you download with an antivirus or malware detector before installing it on your device.

      -
    3. Does this mod apk work on any device or platform?
    4. -
        -
      1. Does this mod apk require root access or any special permissions?
      2. -

        No, this mod apk does not require root access or any special permissions to run on your device. You just need to enable the option to install apps from unknown sources in your settings.

        -
      3. Can I update this mod apk to the latest version of the game?
      4. -

        No, this mod apk is based on the version 1.7.5 of the game, which is not the latest version. If you update this mod apk, you might lose the mod features or encounter compatibility issues. You should wait for a new version of the mod apk to be released before updating.

        -
      5. Can I play online or multiplayer with this mod apk?
      6. -

        No, this mod apk is not compatible with the online or multiplayer features of the game. If you try to play online or multiplayer with this mod apk, you might get banned or suspended from the game servers. You should only play offline or solo with this mod apk.

        -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Mortal Kombat X MOD APK Data for Android and iOS Devices.md b/spaces/1phancelerku/anime-remove-background/Download Mortal Kombat X MOD APK Data for Android and iOS Devices.md deleted file mode 100644 index 807d7b45d7cf5529e47f80d4e7688bf7103c388e..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Mortal Kombat X MOD APK Data for Android and iOS Devices.md +++ /dev/null @@ -1,98 +0,0 @@ -
      -

      Mortal Kombat X Mod APK + Data Download: How to Install and Play the Ultimate Fighting Game on Your Android Device

      -

      Mortal Kombat X is one of the most popular and acclaimed fighting games of all time. It is the tenth installment in the Mortal Kombat franchise and a sequel to Mortal Kombat (2011). It features stunning graphics, cinematic presentation, brutal gameplay, and a rich story mode that takes place 25 years after the events of its predecessor. It also introduces new characters, variations, modes, and features that make it a must-play for any fan of the genre.

      -

      mortal kombat x mod apk + data download


      Download Zip ⚹⚹⚹ https://jinyurl.com/2uNPXn



      -

      But what if you want to play Mortal Kombat X on your Android device? Unfortunately, the official version of the game is not available on Google Play Store due to its high requirements and compatibility issues. However, there is a way to enjoy this amazing game on your smartphone or tablet by using a mod apk + data download. A mod apk is a modified version of an application that allows you to access premium features, unlock content, bypass restrictions, and enhance performance. A data download is a file that contains additional information and resources for an application, such as graphics, sounds, levels, etc.

      -

      By using a mod apk + data download for Mortal Kombat X, you can experience the full game on your Android device without any limitations or problems. You can play as any character you want, use any variation you like, customize your fighters, access all game modes, complete all challenges, and enjoy all the updates and DLCs that have been released for the game. You can also play online with other players around the world, or offline with your friends using local multiplayer. All you need is a compatible device, enough storage space, and a reliable internet connection.

      -

      How to Download and Install Mortal Kombat X Mod APK + Data on Your Android Device

      -

      If you are interested in playing Mortal Kombat X with mod apk + data on your Android device, here are the steps you need to follow:

      -

      Step 1: Enable unknown sources on your device settings

      -

      Before you can install any mod apk or data file on your device, you need to enable unknown sources on your device settings. This will allow you to install applications from sources other than Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on. You may also need to confirm this action by tapping OK or Allow.

      -

      Step 2: Download the mod apk + data file from a trusted source

      -

      Next, you need to download the mod apk + data file for Mortal Kombat X from a trusted source. There are many websites that offer this file, but not all of them are safe and reliable. Some may contain viruses, malware, or fake or outdated links. Therefore, you should always do some research and check the reviews and ratings of the source before downloading anything. One of the best sources for Mortal Kombat X mod apk + data download is [this website]. It offers a safe, fast, and easy download link that works for most Android devices. You can also find more information and instructions on how to use the mod apk + data file on the website.

      -

      mortal kombat x unlimited coins and souls apk + data
      -mortal kombat x mega mod apk + obb data
      -mortal kombat x hacked version apk + data offline
      -mortal kombat x latest mod apk + data for android
      -mortal kombat x mod apk + data highly compressed
      -mortal kombat x mod apk + data revdl
      -mortal kombat x mod apk + data android 1
      -mortal kombat x mod apk + data rexdl
      -mortal kombat x mod apk + data free download
      -mortal kombat x mod apk + data unlimited everything
      -mortal kombat x mod apk + data all characters unlocked
      -mortal kombat x mod apk + data no root
      -mortal kombat x mod apk + data offline mode
      -mortal kombat x mod apk + data anti ban
      -mortal kombat x mod apk + data all gpu
      -mortal kombat x mod apk + data mali
      -mortal kombat x mod apk + data adreno
      -mortal kombat x mod apk + data power vr
      -mortal kombat x mod apk + data 1.18.0
      -mortal kombat x mod apk + data 1.21.0
      -mortal kombat x mod apk + data 1.22.0
      -mortal kombat x mod apk + data 1.23.0
      -mortal kombat x mod apk + data 1.24.0
      -mortal kombat x mod apk + data 1.25.0
      -mortal kombat x mod apk + data 2.0.0
      -mortal kombat x mod apk + data 2.1.0
      -mortal kombat x mod apk + data 2.2.0
      -mortal kombat x mod apk + data 2.3.0
      -mortal kombat x mod apk + data 2.4.0
      -mortal kombat x mod apk + data 2.5.0
      -mortal kombat x god mode apk + data download
      -mortal kombat x unlimited health apk + data download
      -mortal kombat x one hit kill apk + data download
      -mortal kombat x unlocked skins apk + data download
      -mortal kombat x unlocked fatalities apk + data download
      -mortal kombat x unlocked brutalities apk + data download
      -mortal kombat x unlocked cards apk + data download
      -mortal kombat x unlocked levels apk + data download
      -mortal kombat x unlocked features apk + data download
      -mortal kombat x premium edition apk + data download
      -how to download and install mortal kombat x mod apk + data on android device
      -how to download and install mortal kombat x mod apk + data on pc using bluestacks
      -how to download and install mortal kombat x mod apk + data on ios device
      -how to download and install mortal kombat x mod apk + data on mac using nox player
      -how to fix error while downloading or installing mortal kombat x mod apk + data
      -how to update or uninstall mortal kombat x mod apk + data
      -how to play online or offline with friends using mortal kombat x mod apk + data
      -how to backup or restore your progress in mortal kombat x mod apk + data
      -how to get more coins and souls in mortal kombat x mod apk + data

      -

      Step 3: Extract the data file to the obb folder on your device storage

      -

      After you have downloaded the mod apk + data file, you need to extract the data file to the obb folder on your device storage. The obb folder is where the game stores its additional data and resources. To do this, you need a file manager app that can handle zip files, such as ES File Explorer, ZArchiver, or RAR. You can download any of these apps from Google Play Store for free. Once you have installed a file manager app, follow these steps:

      - - Locate the mod apk + data file that you have downloaded on your device storage. It should have a name like Mortal-Kombat-X-Mod-APK-Data.zip or something similar. - Tap and hold on the file and select Extract or Unzip from the menu that appears. - Choose a destination folder where you want to extract the file. You can create a new folder or use an existing one. - Tap OK or Extract to start the extraction process. It may take a few minutes depending on the size of the file and your device speed. - Once the extraction is done, you should see a folder named com.wb.goog.mkx or something similar inside the destination folder. This is the data folder for Mortal Kombat X. - Move or copy this folder to the obb folder on your device storage. The obb folder is usually located in Android > obb. If you don't see it, you may need to create it manually. - Make sure that the data folder is inside the obb folder and has the correct name.

      Step 4: Install the mod apk file and launch the game

      -

      The final step is to install the mod apk file and launch the game. To do this, follow these steps:

      - - Locate the mod apk file that you have downloaded on your device storage. It should have a name like Mortal-Kombat-X-Mod-APK.apk or something similar. - Tap on the file and select Install from the menu that appears. - Wait for the installation process to finish. It may take a few seconds or minutes depending on your device speed and compatibility. - Once the installation is done, tap Open or Launch to start the game. - Enjoy Mortal Kombat X with mod apk + data on your Android device!

      How to Play Mortal Kombat X with Mod APK + Data

      -

      Now that you have installed and launched Mortal Kombat X with mod apk + data on your Android device, you are ready to play and have fun. Here are some tips and tricks on how to play Mortal Kombat X with mod apk + data:

      -

      Choose your character and variation

      -

      Mortal Kombat X features a roster of over 30 characters, each with their own unique skills, abilities, and fatalities. You can choose from classic characters like Scorpion, Sub-Zero, Raiden, Liu Kang, Sonya Blade, and Johnny Cage, as well as new characters like Cassie Cage, D'Vorah, Kotal Kahn, Erron Black, and Jacqui Briggs. You can also unlock and play as guest characters like Jason Voorhees, Predator, Alien, Leatherface, and Kratos.

      -

      Each character has three variations that change their appearance, moveset, and strategy. For example, Scorpion has Ninjutsu, Hellfire, and Inferno variations; Sub-Zero has Cryomancer, Grandmaster, and Unbreakable variations; Raiden has Thunder God, Displacer, and Master of Storms variations; etc. You can choose your variation before each match or change it during gameplay by pressing L1 (or equivalent button) on your controller.

      -

      You can also customize your character's appearance by changing their costume, accessory, weapon, taunt, victory pose, etc. You can unlock new costumes by completing challenges, playing the story mode, or using the mod apk + data. You can also create your own custom character by using the Kreate a Fighter feature in the Extras menu.

      -

      Learn the basic and advanced techniques

      -

      Mortal Kombat X is a game that requires skill, timing, and strategy to master. You need to learn the basic and advanced techniques to survive and win against your opponents. Some of the basic techniques are:

      - - Punch: Press Square (or equivalent button) to perform a quick and weak attack. - Kick: Press X (or equivalent button) to perform a fast and moderate attack. - Block: Press R2 (or equivalent button) to defend yourself from incoming attacks. You can also use directional buttons to block high, mid, or low attacks. - Throw: Press L1 (or equivalent button) or Square + X (or equivalent buttons) to grab and toss your opponent. You can also press directional buttons to change the direction of the throw. - Run: Press R2 + Forward (or equivalent buttons) to sprint towards your opponent. You can use this to close the distance or surprise them with an attack. - Jump: Press Up (or equivalent button) to leap into the air. You can also press directional buttons to jump forward, backward, or sideways. - Crouch: Press Down (or equivalent button) to duck under high attacks or avoid projectiles. You can also press Square or X (or equivalent buttons) to perform a low punch or kick.

      Some of the advanced techniques are:

      - - Combo: Press a sequence of buttons to perform a series of attacks that deal more damage and stun your opponent. You can find the list of combos for each character in the Moves List menu or on the screen during gameplay. - Special Move: Press a combination of buttons to perform a unique and powerful attack that uses some of your energy meter. You can find the list of special moves for each character in the Moves List menu or on the screen during gameplay. - X-Ray Move: Press L2 + R2 (or equivalent buttons) when your energy meter is full to perform a devastating attack that shows the internal damage inflicted on your opponent. This move can deal up to 30% damage and break your opponent's bones and organs. - Fatality: Press a specific sequence of buttons at the end of the match when your opponent is in a dizzy state to execute a gruesome finishing move that kills them in a brutal way. You can find the list of fatalities for each character in the Moves List menu or on the screen during gameplay. You can also use the mod apk + data to unlock all fatalities for all characters. - Brutality: Perform a certain requirement during the match, such as using a specific move or variation, and end the match with a specific attack to trigger a violent finishing move that kills your opponent instantly. You can find the list of brutalities for each character in the Moves List menu or on the screen during gameplay. You can also use the mod apk + data to unlock all brutalities for all characters.

      Use the environment and special attacks

      -

      Mortal Kombat X features interactive environments that you can use to your advantage or disadvantage during gameplay. You can use objects, weapons, traps, animals, and even people in the background to damage, stun, or escape from your opponent. To use an environmental interaction, press R1 (or equivalent button) when you are near an object that has a white outline. Some examples of environmental interactions are:

      - - Throwing barrels, rocks, skulls, spears, etc. at your opponent - Jumping off walls, pillars, statues, etc. to evade or attack your opponent - Grabbing branches, chains, hooks, etc. to swing or pull yourself towards or away from your opponent - Activating traps, such as spikes, flames, lasers, etc. to hurt your opponent - Using animals, such as crocodiles, wolves, dragons, etc. to bite or claw your opponent - Using people, such as monks, soldiers, civilians, etc. to hit or distract your opponent

      Mortal Kombat X also features special attacks that you can use once per match to turn the tide of battle. These are:

      - - Quitality: If you quit the match online before it ends, your character's head will explode and you will lose automatically. - Faction Kill: If you belong to one of the five factions in Mortal Kombat X (Lin Kuei, Special Forces, Black Dragon, Brotherhood of Shadow, or White Lotus), you can perform a faction-specific finishing move that shows your allegiance and earns you faction points. - Stage Brutality: If you end the match with an environmental interaction, you can trigger a stage-specific finishing move that kills your opponent in a creative way.

      Enjoy the different game modes and challenges

      -

      Mortal Kombat X offers a variety of game modes and challenges that you can enjoy with mod apk + data. These are:

      - - Story Mode: Follow the epic story of Mortal Kombat X that spans 25 years and features multiple characters and events. You can play as different characters in each chapter and make choices that affect the outcome of the story. You can also unlock rewards and secrets by completing the story mode. - Tower Mode: Fight your way through different towers that have different rules, modifiers, and opponents. You can choose from traditional towers, such as Klassic, Test Your Luck, Test Your Might, etc., or dynamic towers, such as Living Towers, Faction Towers, Premier Towers, etc. You can also create your own custom tower by using the mod apk + data. - Online Mode: Compete with other players online in various modes, such as Ranked, Player, King of the Hill, Survivor, etc. You can also join or create a room to chat and play with other players. You can also participate in online events and tournaments that have special rewards and prizes. - Local Mode: Play with your friends offline in various modes, such as Versus, Tag Team, Co-op Arcade, etc. You can also use the mod apk + data to enable local multiplayer on one device by using a split-screen feature. - Challenge Mode: Complete various challenges that test your skills and knowledge of Mortal Kombat X. You can choose from daily challenges, weekly challenges, character challenges, faction challenges, etc. You can also use the mod apk + data to unlock all challenges and rewards. - Krypt Mode: Explore the mysterious and dangerous Krypt that contains secrets, puzzles, traps, and treasures. You can use koins, souls, and hearts to unlock items, such as costumes, fatalities, brutalities, concept art, music, etc. You can also use the mod apk + data to unlock all items and areas in the Krypt.

      Conclusion

      -

      Mortal Kombat X is a game that you should not miss if you are a fan of fighting games or Mortal Kombat franchise. It is a game that offers stunning graphics, cinematic presentation, brutal gameplay, and a rich story mode that will keep you entertained for hours. It is also a game that you can enjoy on your Android device by using a mod apk + data download that gives you access to all features, content, and updates of the game.

      -

      By following the steps above, you can download and install Mortal Kombat X mod apk + data on your Android device easily and safely. You can also learn how to play Mortal Kombat X with mod apk + data by using the tips and tricks above. You can also enjoy the different game modes and challenges that Mortal Kombat X offers with mod apk + data.

      -

      We hope that this article has helped you to play Mortal Kombat X with mod apk + data on your Android device. If you have any questions or feedback, please feel free to share them in the comments section below. We would love to hear from you and help you out.

      -

      FAQs

      -

      Here are some frequently asked questions about Mortal Kombat X with mod apk + data:

      -

      What are the system requirements for Mortal Kombat X on Android?

      -

      The minimum system requirements for Mortal Kombat X on Android are:

      - - Android 4.0 or higher - 1 GB of RAM - 1.5 GB of free storage space - A stable internet connection

      The recommended system requirements for Mortal Kombat X on Android are:

      - - Android 5.0 or higher - 2 GB of RAM - 2 GB of free storage space - A fast internet connection

      What are the differences between Mortal Kombat X and Mortal Kombat XL?

      -

      Mortal Kombat XL is an enhanced version of Mortal Kombat X that includes all the updates and DLCs that have been released for the game. It features new characters, costumes, stages, modes, and features that make it the ultimate Mortal Kombat experience. However, Mortal Kombat XL is only available for PlayStation 4, Xbox One, and PC platforms. It is not available for Android devices. Therefore, if you want to play Mortal Kombat XL on your Android device, you need to use a mod apk + data download that includes all the content and updates of Mortal Kombat XL.

      -

      How can I unlock all the characters and costumes in Mortal Kombat X with mod apk + data?

      -

      One of the benefits of using a mod apk + data download for Mortal Kombat X is that you can unlock all the characters and costumes in the game without spending any money or time. You can access all the characters and costumes from the character selection screen or the customization menu. You can also change your character and costume during gameplay by pressing L1 (or equivalent button) on your controller. You can also use the mod apk + data download to unlock new characters and costumes that are not available in the official version of the game, such as Kratos, Freddy Krueger, Michael Myers, etc.

      -

      How can I update Mortal Kombat X with mod apk + data?

      -

      Another benefit of using a mod apk + data download for Mortal Kombat X is that you can update the game with the latest patches and DLCs without any hassle or delay. You can update the game by downloading and installing the latest version of the mod apk + data file from the same source that you used before. You can also check for updates on the website or the app itself. You do not need to uninstall or delete the previous version of the game. You can simply overwrite it with the new version and enjoy the new features and content.

      -

      Is Mortal Kombat X with mod apk + data safe and legal?

      -

      The answer to this question depends on your perspective and preference. On one hand, using a mod apk + data download for Mortal Kombat X is safe and legal as long as you download it from a trusted source that does not contain any viruses, malware, or fake links. You also need to make sure that your device is compatible and has enough storage space and internet connection to run the game smoothly. On the other hand, using a mod apk + data download for Mortal Kombat X is unsafe and illegal as it violates the terms and conditions of the game developer and publisher. You also risk getting banned or suspended from online services and features if you use a mod apk + data download for Mortal Kombat X. Therefore, you should use a mod apk + data download for Mortal Kombat X at your own risk and discretion.

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/AIConsultant/MusicGen/audiocraft/models/lm.py b/spaces/AIConsultant/MusicGen/audiocraft/models/lm.py deleted file mode 100644 index 8cefd2c58c3a337378579d6cd6469fd038cbb1ee..0000000000000000000000000000000000000000 --- a/spaces/AIConsultant/MusicGen/audiocraft/models/lm.py +++ /dev/null @@ -1,531 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from dataclasses import dataclass -from functools import partial -import logging -import math -import typing as tp - -import torch -from torch import nn - -from ..utils import utils -from ..modules.streaming import StreamingModule, State -from ..modules.transformer import StreamingTransformer, create_norm_fn -from ..modules.conditioners import ( - ConditionFuser, - ClassifierFreeGuidanceDropout, - AttributeDropout, - ConditioningProvider, - ConditioningAttributes, - ConditionType, -) -from ..modules.codebooks_patterns import CodebooksPatternProvider -from ..modules.activations import get_activation_fn - - -logger = logging.getLogger(__name__) -ConditionTensors = tp.Dict[str, ConditionType] -CFGConditions = tp.Union[ConditionTensors, tp.Tuple[ConditionTensors, ConditionTensors]] - - -def get_init_fn(method: str, input_dim: int, init_depth: tp.Optional[int] = None): - """LM layer initialization. - Inspired from xlformers: https://github.com/fairinternal/xlformers - - Args: - method (str): Method name for init function. Valid options are: - 'gaussian', 'uniform'. - input_dim (int): Input dimension of the initialized module. - init_depth (int, optional): Optional init depth value used to rescale - the standard deviation if defined. - """ - # Compute std - std = 1 / math.sqrt(input_dim) - # Rescale with depth - if init_depth is not None: - std = std / math.sqrt(2 * init_depth) - - if method == 'gaussian': - return partial( - torch.nn.init.trunc_normal_, mean=0.0, std=std, a=-3 * std, b=3 * std - ) - elif method == 'uniform': - bound = math.sqrt(3) * std # ensure the standard deviation is `std` - return partial(torch.nn.init.uniform_, a=-bound, b=bound) - else: - raise ValueError("Unsupported layer initialization method") - - -def init_layer(m: nn.Module, - method: str, - init_depth: tp.Optional[int] = None, - zero_bias_init: bool = False): - """Wrapper around ``get_init_fn`` for proper initialization of LM modules. - - Args: - m (nn.Module): Module to initialize. - method (str): Method name for the init function. - init_depth (int, optional): Optional init depth value used to rescale - the standard deviation if defined. - zero_bias_init (bool): Whether to initialize the bias to 0 or not. - """ - if isinstance(m, nn.Linear): - init_fn = get_init_fn(method, m.in_features, init_depth=init_depth) - if m.weight.device.type == 'cpu' and m.weight.dtype == torch.float16: - weight = m.weight.float() - init_fn(weight) - m.weight.data[:] = weight.half() - else: - init_fn(m.weight) - if zero_bias_init and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.Embedding): - init_fn = get_init_fn(method, m.embedding_dim, init_depth=None) - if m.weight.device.type == 'cpu' and m.weight.dtype == torch.float16: - weight = m.weight.float() - init_fn(weight) - m.weight.data[:] = weight.half() - else: - init_fn(m.weight) - - -class ScaledEmbedding(nn.Embedding): - """Boost learning rate for embeddings (with `scale`). - """ - def __init__(self, *args, lr=None, **kwargs): - super().__init__(*args, **kwargs) - self.lr = lr - - def make_optim_group(self): - group = {"params": list(self.parameters())} - if self.lr is not None: - group["lr"] = self.lr - return group - - -@dataclass -class LMOutput: - # The logits are already re-aligned with the input codes - # hence no extra shift is required, e.g. when computing CE - logits: torch.Tensor # [B, K, T, card] - mask: torch.Tensor # [B, K, T] - - -class LMModel(StreamingModule): - """Transformer-based language model on multiple streams of codes. - - Args: - pattern_provider (CodebooksPatternProvider): Pattern provider for codebook interleaving. - condition_provider (MusicConditioningProvider): Conditioning provider from metadata. - fuser (ConditionFuser): Fuser handling the fusing of conditions with language model input. - n_q (int): Number of parallel streams to model. - card (int): Cardinality, vocabulary size. - dim (int): Dimension of the transformer encoder. - num_heads (int): Number of heads for the transformer encoder. - hidden_scale (int): Scale for hidden feed forward dimension of the transformer encoder. - norm (str): Normalization method. - norm_first (bool): Use pre-norm instead of post-norm. - emb_lr (float, optional): Embedding-specific learning rate. - bias_proj (bool): Use bias for output projections. - weight_init (str, optional): Method for weight initialization. - depthwise_init (str, optional): Method for depthwise weight initialization. - zero_bias_init (bool): If true and bias in Linears, initialize bias to zeros. - cfg_dropout (float): Classifier-free guidance dropout. - cfg_coef (float): Classifier-free guidance coefficient. - attribute_dropout (dict): Attribute dropout probabilities. - two_step_cfg (bool): Whether to run classifier free-guidance with 2 distinct steps. - **kwargs: Additional parameters for the transformer encoder. - """ - def __init__(self, pattern_provider: CodebooksPatternProvider, condition_provider: ConditioningProvider, - fuser: ConditionFuser, n_q: int = 8, card: int = 1024, dim: int = 128, num_heads: int = 8, - hidden_scale: int = 4, norm: str = 'layer_norm', norm_first: bool = False, - emb_lr: tp.Optional[float] = None, bias_proj: bool = True, - weight_init: tp.Optional[str] = None, depthwise_init: tp.Optional[str] = None, - zero_bias_init: bool = False, cfg_dropout: float = 0, cfg_coef: float = 1.0, - attribute_dropout: tp.Dict[str, tp.Dict[str, float]] = {}, two_step_cfg: bool = False, - **kwargs): - super().__init__() - self.cfg_coef = cfg_coef - self.cfg_dropout = ClassifierFreeGuidanceDropout(p=cfg_dropout) - self.att_dropout = AttributeDropout(p=attribute_dropout) - self.condition_provider = condition_provider - self.fuser = fuser - self.card = card - embed_dim = self.card + 1 - self.n_q = n_q - self.dim = dim - self.pattern_provider = pattern_provider - self.two_step_cfg = two_step_cfg - self.emb = nn.ModuleList([ScaledEmbedding(embed_dim, dim, lr=emb_lr) for _ in range(n_q)]) - if 'activation' in kwargs: - kwargs['activation'] = get_activation_fn(kwargs['activation']) - self.transformer = StreamingTransformer( - d_model=dim, num_heads=num_heads, dim_feedforward=int(hidden_scale * dim), - norm=norm, norm_first=norm_first, **kwargs) - self.out_norm: tp.Optional[nn.Module] = None - if norm_first: - self.out_norm = create_norm_fn(norm, dim) - self.linears = nn.ModuleList([nn.Linear(dim, self.card, bias=bias_proj) for _ in range(n_q)]) - self._init_weights(weight_init, depthwise_init, zero_bias_init) - self._fsdp: tp.Optional[nn.Module] - self.__dict__['_fsdp'] = None - - def _init_weights(self, weight_init: tp.Optional[str], depthwise_init: tp.Optional[str], zero_bias_init: bool): - """Initialization of the transformer module weights. - - Args: - weight_init (str, optional): Weight initialization strategy. See ``get_init_fn`` for valid options. - depthwise_init (str, optional): Depthwise initialization strategy. The following options are valid: - 'current' where the depth corresponds to the current layer index or 'global' where the total number - of layer is used as depth. If not set, no depthwise initialization strategy is used. - zero_bias_init (bool): Whether to initialize bias to zero or not. - """ - assert depthwise_init is None or depthwise_init in ['current', 'global'] - assert depthwise_init is None or weight_init is not None, \ - "If 'depthwise_init' is defined, a 'weight_init' method should be provided." - assert not zero_bias_init or weight_init is not None, \ - "If 'zero_bias_init', a 'weight_init' method should be provided" - - if weight_init is None: - return - - for emb_layer in self.emb: - init_layer(emb_layer, method=weight_init, init_depth=None, zero_bias_init=zero_bias_init) - - for layer_idx, tr_layer in enumerate(self.transformer.layers): - depth = None - if depthwise_init == 'current': - depth = layer_idx + 1 - elif depthwise_init == 'global': - depth = len(self.transformer.layers) - init_fn = partial(init_layer, method=weight_init, init_depth=depth, zero_bias_init=zero_bias_init) - tr_layer.apply(init_fn) - - for linear in self.linears: - init_layer(linear, method=weight_init, init_depth=None, zero_bias_init=zero_bias_init) - - @property - def special_token_id(self) -> int: - return self.card - - @property - def num_codebooks(self) -> int: - return self.n_q - - def forward(self, sequence: torch.Tensor, - conditions: tp.List[ConditioningAttributes], - condition_tensors: tp.Optional[ConditionTensors] = None) -> torch.Tensor: - """Apply language model on sequence and conditions. - Given a tensor of sequence of shape [B, K, S] with K the number of codebooks and - S the sequence steps, return the logits with shape [B, card, K, S]. - - Args: - indices (torch.Tensor): Indices of the codes to model. - conditions (list of ConditioningAttributes): Conditions to use when modeling - the given codes. Note that when evaluating multiple time with the same conditioning - you should pre-compute those and pass them as `condition_tensors`. - condition_tensors (dict[str, ConditionType], optional): Pre-computed conditioning - tensors, see `conditions`. - Returns: - torch.Tensor: Logits. - """ - B, K, S = sequence.shape - assert K == self.num_codebooks, "Sequence shape must match the specified number of codebooks" - input_ = sum([self.emb[k](sequence[:, k]) for k in range(K)]) - if condition_tensors is None: - assert not self._is_streaming, "Conditions tensors should be precomputed when streaming." - # apply dropout modules - conditions = self.cfg_dropout(conditions) - conditions = self.att_dropout(conditions) - tokenized = self.condition_provider.tokenize(conditions) - # encode conditions and fuse, both have a streaming cache to not recompute when generating. - condition_tensors = self.condition_provider(tokenized) - else: - assert not conditions, "Shouldn't pass both conditions and condition_tensors." - - input_, cross_attention_input = self.fuser(input_, condition_tensors) - - out = self.transformer(input_, cross_attention_src=cross_attention_input) - if self.out_norm: - out = self.out_norm(out) - logits = torch.stack([self.linears[k](out) for k in range(K)], dim=1) # [B, K, S, card] - - # remove the prefix from the model outputs - if len(self.fuser.fuse2cond['prepend']) > 0: - logits = logits[:, :, -S:] - - return logits # [B, K, S, card] - - def compute_predictions( - self, codes: torch.Tensor, - conditions: tp.List[ConditioningAttributes], - condition_tensors: tp.Optional[ConditionTensors] = None) -> LMOutput: - """Given an input tensor of codes [B, K, T] and list of conditions, runs the model - forward using the specified codes interleaving pattern. - - Args: - codes (torch.Tensor): Input codes of shape [B, K, T] with B the batch size, - K the number of codebooks and T the number of timesteps. - conditions (list of ConditioningAttributes): conditionings to use when modeling - the given codes. Note that when evaluating multiple time with the same conditioning - you should pre-compute those and pass them as `condition_tensors`. - condition_tensors (dict[str, ConditionType], optional): pre-computed conditioning - tensors, see `conditions`. - Returns: - LMOutput: Language model outputs - logits (torch.Tensor) of shape [B, K, T, card] corresponding to the provided codes, - i.e. the first item corresponds to logits to predict the first code, meaning that - no additional shifting of codes and logits is required. - mask (torch.Tensor) of shape [B, K, T], mask over valid and invalid positions. - Given the specified interleaving strategies, parts of the logits and codes should - not be considered as valid predictions because of invalid context. - """ - B, K, T = codes.shape - codes = codes.contiguous() - # map codes [B, K, T] into pattern sequence [B, K, S] using special_token_id for masked tokens - pattern = self.pattern_provider.get_pattern(T) - sequence_codes, sequence_indexes, sequence_mask = pattern.build_pattern_sequence( - codes, self.special_token_id, keep_only_valid_steps=True - ) - # apply model on pattern sequence - model = self if self._fsdp is None else self._fsdp - logits = model(sequence_codes, conditions, condition_tensors) # [B, K, S, card] - # map back the logits on pattern sequence to logits on original codes: [B, K, S, card] -> [B, K, T, card] - # and provide the corresponding mask over invalid positions of tokens - logits = logits.permute(0, 3, 1, 2) # [B, card, K, S] - # note: we use nans as special token to make it obvious if we feed unexpected logits - logits, logits_indexes, logits_mask = pattern.revert_pattern_logits( - logits, float('nan'), keep_only_valid_steps=True - ) - logits = logits.permute(0, 2, 3, 1) # [B, K, T, card] - logits_mask = logits_mask[None, :, :].expand(B, -1, -1) # [K, T] -> [B, K, T] - return LMOutput(logits, logits_mask) - - def _sample_next_token(self, - sequence: torch.Tensor, - cfg_conditions: CFGConditions, - unconditional_state: State, - use_sampling: bool = False, - temp: float = 1.0, - top_k: int = 0, - top_p: float = 0.0, - cfg_coef: tp.Optional[float] = None) -> torch.Tensor: - """Sample next token from the model given a sequence and a set of conditions. The model supports - multiple sampling strategies (greedy sampling, softmax, top-k, top-p...). - - Args: - sequence (torch.Tensor): Current sequence of shape [B, K, S] - with K corresponding to the number of codebooks and S the number of sequence steps. - S = 1 in streaming mode, except for the first step that contains a bigger prompt. - condition_tensors (dict[str, ConditionType): Set of conditions. If CFG is used, - should be twice the batch size, being the concatenation of the conditions + null conditions. - use_sampling (bool): Whether to use a sampling strategy or not. - temp (float): Sampling temperature. - top_k (int): K for "top-k" sampling. - top_p (float): P for "top-p" sampling. - cfg_coef (float, optional): classifier free guidance coefficient - Returns: - next_token (torch.Tensor): Next token tensor of shape [B, K, 1]. - """ - B = sequence.shape[0] - cfg_coef = self.cfg_coef if cfg_coef is None else cfg_coef - model = self if self._fsdp is None else self._fsdp - if self.two_step_cfg and cfg_conditions != {}: - assert isinstance(cfg_conditions, tuple), type(cfg_conditions) - condition_tensors, null_condition_tensors = cfg_conditions - cond_logits = model(sequence, conditions=[], condition_tensors=condition_tensors) - state = self.get_streaming_state() - self.set_streaming_state(unconditional_state) - uncond_logits = model(sequence, conditions=[], condition_tensors=null_condition_tensors) - unconditional_state.update(self.get_streaming_state()) - self.set_streaming_state(state) - logits = uncond_logits + (cond_logits - uncond_logits) * self.cfg_coef - else: - assert isinstance(cfg_conditions, dict) - condition_tensors = cfg_conditions - if condition_tensors: - # Preparing for CFG, predicting both conditional and unconditional logits. - sequence = torch.cat([sequence, sequence], dim=0) - all_logits = model( - sequence, - conditions=[], condition_tensors=condition_tensors) - if condition_tensors: - cond_logits, uncond_logits = all_logits.split(B, dim=0) # [B, K, T, card] - logits = uncond_logits + (cond_logits - uncond_logits) * cfg_coef - else: - logits = all_logits - - logits = logits.permute(0, 1, 3, 2) # [B, K, card, T] - logits = logits[..., -1] # [B x K x card] - - # Apply softmax for sampling if temp > 0. Else, do greedy sampling to avoid zero division error. - if use_sampling and temp > 0.0: - probs = torch.softmax(logits / temp, dim=-1) - if top_p > 0.0: - next_token = utils.sample_top_p(probs, p=top_p) - elif top_k > 0: - next_token = utils.sample_top_k(probs, k=top_k) - else: - next_token = utils.multinomial(probs, num_samples=1) - else: - next_token = torch.argmax(logits, dim=-1, keepdim=True) - - return next_token - - @torch.no_grad() - def generate(self, - prompt: tp.Optional[torch.Tensor] = None, - conditions: tp.List[ConditioningAttributes] = [], - num_samples: tp.Optional[int] = None, - max_gen_len: int = 256, - use_sampling: bool = True, - temp: float = 1.0, - top_k: int = 250, - top_p: float = 0.0, - cfg_coef: tp.Optional[float] = None, - two_step_cfg: tp.Optional[bool] = None, - remove_prompts: bool = False, - check: bool = False, - callback: tp.Optional[tp.Callable[[int, int], None]] = None) -> torch.Tensor: - """Generate tokens sampling from the model given a prompt or unconditionally. Generation can - be perform in a greedy fashion or using sampling with top K and top P strategies. - - Args: - prompt (torch.Tensor, optional): Prompt tokens of shape [B, K, T]. - conditions_tensors (list of ConditioningAttributes, optional): List of conditions. - num_samples (int, optional): Number of samples to generate when no prompt and no conditions are given. - max_gen_len (int): Maximum generation length. - use_sampling (bool): Whether to use a sampling strategy or not. - temp (float): Sampling temperature. - top_k (int): K for "top-k" sampling. - top_p (float): P for "top-p" sampling. - cfg_coeff (float, optional): Classifier-free guidance coefficient. - two_step_cfg (bool, optional): Whether to perform classifier-free guidance with two steps generation. - remove_prompts (bool): Whether to remove prompts from generation or not. - check (bool): Whether to apply further checks on generated sequence. - callback (Callback, optional): Callback function to report generation progress. - Returns: - torch.Tensor: Generated tokens. - """ - assert not self.training, "generation shouldn't be used in training mode." - first_param = next(iter(self.parameters())) - device = first_param.device - - # Checking all input shapes are consistent. - possible_num_samples = [] - if num_samples is not None: - possible_num_samples.append(num_samples) - elif prompt is not None: - possible_num_samples.append(prompt.shape[0]) - elif conditions: - possible_num_samples.append(len(conditions)) - else: - possible_num_samples.append(1) - assert [x == possible_num_samples[0] for x in possible_num_samples], "Inconsistent inputs shapes" - num_samples = possible_num_samples[0] - - # below we create set of conditions: one conditional and one unconditional - # to do that we merge the regular condition together with the null condition - # we then do 1 forward pass instead of 2. - # the reason for that is two-fold: - # 1. it is about x2 faster than doing 2 forward passes - # 2. avoid the streaming API treating the 2 passes as part of different time steps - # We also support doing two different passes, in particular to ensure that - # the padding structure is exactly the same between train and test. - # With a batch size of 1, this can be slower though. - cfg_conditions: CFGConditions - two_step_cfg = self.two_step_cfg if two_step_cfg is None else two_step_cfg - if conditions: - null_conditions = ClassifierFreeGuidanceDropout(p=1.0)(conditions) - if two_step_cfg: - cfg_conditions = ( - self.condition_provider(self.condition_provider.tokenize(conditions)), - self.condition_provider(self.condition_provider.tokenize(null_conditions)), - ) - else: - conditions = conditions + null_conditions - tokenized = self.condition_provider.tokenize(conditions) - cfg_conditions = self.condition_provider(tokenized) - else: - cfg_conditions = {} - - if prompt is None: - assert num_samples > 0 - prompt = torch.zeros((num_samples, self.num_codebooks, 0), dtype=torch.long, device=device) - - B, K, T = prompt.shape - start_offset = T - assert start_offset < max_gen_len - - pattern = self.pattern_provider.get_pattern(max_gen_len) - # this token is used as default value for codes that are not generated yet - unknown_token = -1 - - # we generate codes up to the max_gen_len that will be mapped to the pattern sequence - gen_codes = torch.full((B, K, max_gen_len), unknown_token, dtype=torch.long, device=device) - # filling the gen_codes with the prompt if needed - gen_codes[..., :start_offset] = prompt - # create the gen_sequence with proper interleaving from the pattern: [B, K, S] - gen_sequence, indexes, mask = pattern.build_pattern_sequence(gen_codes, self.special_token_id) - # retrieve the start_offset in the sequence: - # it is the first sequence step that contains the `start_offset` timestep - start_offset_sequence = pattern.get_first_step_with_timesteps(start_offset) - assert start_offset_sequence is not None - - with self.streaming(): - unconditional_state = self.get_streaming_state() - prev_offset = 0 - gen_sequence_len = gen_sequence.shape[-1] # gen_sequence shape is [B, K, S] - for offset in range(start_offset_sequence, gen_sequence_len): - # get current sequence (note that the streaming API is providing the caching over previous offsets) - curr_sequence = gen_sequence[..., prev_offset:offset] - curr_mask = mask[None, ..., prev_offset:offset].expand(B, -1, -1) - if check: - # check coherence between mask and sequence - assert (curr_sequence == torch.where(curr_mask, curr_sequence, self.special_token_id)).all() - # should never happen as gen_sequence is filled progressively - assert not (curr_sequence == unknown_token).any() - # sample next token from the model, next token shape is [B, K, 1] - next_token = self._sample_next_token( - curr_sequence, cfg_conditions, unconditional_state, use_sampling, temp, top_k, top_p, - cfg_coef=cfg_coef) - # ensure the tokens that should be masked are properly set to special_token_id - # as the model never output special_token_id - valid_mask = mask[..., offset:offset+1].expand(B, -1, -1) - next_token[~valid_mask] = self.special_token_id - # ensure we don't overwrite prompt tokens, we only write over unknown tokens - # (then mask tokens should be left as is as well, which is correct) - gen_sequence[..., offset:offset+1] = torch.where( - gen_sequence[..., offset:offset+1] == unknown_token, - next_token, gen_sequence[..., offset:offset+1] - ) - prev_offset = offset - if callback is not None: - callback(1 + offset - start_offset_sequence, gen_sequence_len - start_offset_sequence) - unconditional_state.clear() - - # ensure sequence has been entirely filled - assert not (gen_sequence == unknown_token).any() - # ensure gen_sequence pattern and mask are matching - # which means the gen_sequence is valid according to the pattern - assert ( - gen_sequence == torch.where(mask[None, ...].expand(B, -1, -1), gen_sequence, self.special_token_id) - ).all() - # get back the codes, trimming the prompt if needed and cutting potentially incomplete timesteps - out_codes, out_indexes, out_mask = pattern.revert_pattern_sequence(gen_sequence, special_token=unknown_token) - - # sanity checks over the returned codes and corresponding masks - assert (out_codes[..., :max_gen_len] != unknown_token).all() - assert (out_mask[..., :max_gen_len] == 1).all() - - out_start_offset = start_offset if remove_prompts else 0 - out_codes = out_codes[..., out_start_offset:max_gen_len] - - # ensure the returned codes are all valid - assert (out_codes >= 0).all() and (out_codes <= self.card).all() - return out_codes diff --git a/spaces/AIConsultant/MusicGen/audiocraft/utils/export_legacy.py b/spaces/AIConsultant/MusicGen/audiocraft/utils/export_legacy.py deleted file mode 100644 index 52f145f3148c3e9fdba436273bc45480fbae6481..0000000000000000000000000000000000000000 --- a/spaces/AIConsultant/MusicGen/audiocraft/utils/export_legacy.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Legacy functions used at the time of the first release, kept for referencd. -""" - -from pathlib import Path -import typing as tp - -from omegaconf import OmegaConf, DictConfig -import torch - - -def _clean_lm_cfg(cfg: DictConfig): - OmegaConf.set_struct(cfg, False) - # This used to be set automatically in the LM solver, need a more robust solution - # for the future. - cfg['transformer_lm']['card'] = 2048 - cfg['transformer_lm']['n_q'] = 4 - # Experimental params no longer supported. - bad_params = ['spectral_norm_attn_iters', 'spectral_norm_ff_iters', - 'residual_balancer_attn', 'residual_balancer_ff', 'layer_drop'] - for name in bad_params: - del cfg['transformer_lm'][name] - OmegaConf.set_struct(cfg, True) - return cfg - - -def export_encodec(checkpoint_path: tp.Union[Path, str], out_folder: tp.Union[Path, str]): - sig = Path(checkpoint_path).parent.name - assert len(sig) == 8, "Not a valid Dora signature" - pkg = torch.load(checkpoint_path, 'cpu') - new_pkg = { - 'best_state': pkg['ema']['state']['model'], - 'xp.cfg': OmegaConf.to_yaml(pkg['xp.cfg']), - } - out_file = Path(out_folder) / f'{sig}.th' - torch.save(new_pkg, out_file) - return out_file - - -def export_lm(checkpoint_path: tp.Union[Path, str], out_folder: tp.Union[Path, str]): - sig = Path(checkpoint_path).parent.name - assert len(sig) == 8, "Not a valid Dora signature" - pkg = torch.load(checkpoint_path, 'cpu') - new_pkg = { - 'best_state': pkg['fsdp_best_state']['model'], - 'xp.cfg': OmegaConf.to_yaml(_clean_lm_cfg(pkg['xp.cfg'])) - } - out_file = Path(out_folder) / f'{sig}.th' - torch.save(new_pkg, out_file) - return out_file diff --git a/spaces/AIFILMS/generate_human_motion/pyrender/README.md b/spaces/AIFILMS/generate_human_motion/pyrender/README.md deleted file mode 100644 index ae88ed1c5e78f247e38291ed83cf4c81230bf976..0000000000000000000000000000000000000000 --- a/spaces/AIFILMS/generate_human_motion/pyrender/README.md +++ /dev/null @@ -1,92 +0,0 @@ -# Pyrender - -[![Build Status](https://travis-ci.org/mmatl/pyrender.svg?branch=master)](https://travis-ci.org/mmatl/pyrender) -[![Documentation Status](https://readthedocs.org/projects/pyrender/badge/?version=latest)](https://pyrender.readthedocs.io/en/latest/?badge=latest) -[![Coverage Status](https://coveralls.io/repos/github/mmatl/pyrender/badge.svg?branch=master)](https://coveralls.io/github/mmatl/pyrender?branch=master) -[![PyPI version](https://badge.fury.io/py/pyrender.svg)](https://badge.fury.io/py/pyrender) -[![Downloads](https://pepy.tech/badge/pyrender)](https://pepy.tech/project/pyrender) - -Pyrender is a pure Python (2.7, 3.4, 3.5, 3.6) library for physically-based -rendering and visualization. -It is designed to meet the [glTF 2.0 specification from Khronos](https://www.khronos.org/gltf/). - -Pyrender is lightweight, easy to install, and simple to use. -It comes packaged with both an intuitive scene viewer and a headache-free -offscreen renderer with support for GPU-accelerated rendering on headless -servers, which makes it perfect for machine learning applications. - -Extensive documentation, including a quickstart guide, is provided [here](https://pyrender.readthedocs.io/en/latest/). - -For a minimal working example of GPU-accelerated offscreen rendering using EGL, -check out the [EGL Google CoLab Notebook](https://colab.research.google.com/drive/1pcndwqeY8vker3bLKQNJKr3B-7-SYenE?usp=sharing). - - -

      - GIF of Viewer - Damaged Helmet -

      - -## Installation -You can install pyrender directly from pip. - -```bash -pip install pyrender -``` - -## Features - -Despite being lightweight, pyrender has lots of features, including: - -* Simple interoperation with the amazing [trimesh](https://github.com/mikedh/trimesh) project, -which enables out-of-the-box support for dozens of mesh types, including OBJ, -STL, DAE, OFF, PLY, and GLB. -* An easy-to-use scene viewer with support for animation, showing face and vertex -normals, toggling lighting conditions, and saving images and GIFs. -* An offscreen rendering module that supports OSMesa and EGL backends. -* Shadow mapping for directional and spot lights. -* Metallic-roughness materials for physically-based rendering, including several -types of texture and normal mapping. -* Transparency. -* Depth and color image generation. - -## Sample Usage - -For sample usage, check out the [quickstart -guide](https://pyrender.readthedocs.io/en/latest/examples/index.html) or one of -the Google CoLab Notebooks: - -* [EGL Google CoLab Notebook](https://colab.research.google.com/drive/1pcndwqeY8vker3bLKQNJKr3B-7-SYenE?usp=sharing) - -## Viewer Keyboard and Mouse Controls - -When using the viewer, the basic controls for moving about the scene are as follows: - -* To rotate the camera about the center of the scene, hold the left mouse button and drag the cursor. -* To rotate the camera about its viewing axis, hold `CTRL` left mouse button and drag the cursor. -* To pan the camera, do one of the following: - * Hold `SHIFT`, then hold the left mouse button and drag the cursor. - * Hold the middle mouse button and drag the cursor. -* To zoom the camera in or out, do one of the following: - * Scroll the mouse wheel. - * Hold the right mouse button and drag the cursor. - -The available keyboard commands are as follows: - -* `a`: Toggles rotational animation mode. -* `c`: Toggles backface culling. -* `f`: Toggles fullscreen mode. -* `h`: Toggles shadow rendering. -* `i`: Toggles axis display mode (no axes, world axis, mesh axes, all axes). -* `l`: Toggles lighting mode (scene lighting, Raymond lighting, or direct lighting). -* `m`: Toggles face normal visualization. -* `n`: Toggles vertex normal visualization. -* `o`: Toggles orthographic camera mode. -* `q`: Quits the viewer. -* `r`: Starts recording a GIF, and pressing again stops recording and opens a file dialog. -* `s`: Opens a file dialog to save the current view as an image. -* `w`: Toggles wireframe mode (scene default, flip wireframes, all wireframe, or all solid). -* `z`: Resets the camera to the default view. - -As a note, displaying shadows significantly slows down rendering, so if you're -experiencing low framerates, just kill shadows or reduce the number of lights in -your scene. diff --git a/spaces/AIZero2Hero4Health/3-ChatbotBlenderbot-GR/README.md b/spaces/AIZero2Hero4Health/3-ChatbotBlenderbot-GR/README.md deleted file mode 100644 index 5d45a17e81e1a1616182dce44cebaa9bf13ca31a..0000000000000000000000000000000000000000 --- a/spaces/AIZero2Hero4Health/3-ChatbotBlenderbot-GR/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: 3 ChatbotBlenderbot GR -emoji: 🏢 -colorFrom: blue -colorTo: green -sdk: gradio -sdk_version: 3.8.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ASJMO/freegpt/g4f/Provider/Providers/Ails.py b/spaces/ASJMO/freegpt/g4f/Provider/Providers/Ails.py deleted file mode 100644 index 5feec9e987e3cd2590e2a72b623dc4b90e0cf53d..0000000000000000000000000000000000000000 --- a/spaces/ASJMO/freegpt/g4f/Provider/Providers/Ails.py +++ /dev/null @@ -1,87 +0,0 @@ -import os -import time -import json -import uuid -import hashlib -import requests - -from ...typing import sha256, Dict, get_type_hints -from datetime import datetime - -url: str = 'https://ai.ls' -model: str = 'gpt-3.5-turbo' -supports_stream = True -needs_auth = False -working = True - - -class Utils: - def hash(json_data: Dict[str, str]) -> sha256: - - base_string: str = '%s:%s:%s:%s' % ( - json_data['t'], - json_data['m'], - 'WI,2rU#_r:r~aF4aJ36[.Z(/8Rv93Rf', - len(json_data['m']) - ) - - return hashlib.sha256(base_string.encode()).hexdigest() - - def format_timestamp(timestamp: int) -> str: - - e = timestamp - n = e % 10 - r = n + 1 if n % 2 == 0 else n - return str(e - n + r) - - -def _create_completion(model: str, messages: list, temperature: float = 0.6, stream: bool = False, **kwargs): - - headers = { - 'authority': 'api.caipacity.com', - 'accept': '*/*', - 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', - 'authorization': 'Bearer free', - 'client-id': str(uuid.uuid4()), - 'client-v': '0.1.249', - 'content-type': 'application/json', - 'origin': 'https://ai.ls', - 'referer': 'https://ai.ls/', - 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"Windows"', - 'sec-fetch-dest': 'empty', - 'sec-fetch-mode': 'cors', - 'sec-fetch-site': 'cross-site', - 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36', - } - - timestamp = Utils.format_timestamp(int(time.time() * 1000)) - - sig = { - 'd': datetime.now().strftime('%Y-%m-%d'), - 't': timestamp, - 's': Utils.hash({ - 't': timestamp, - 'm': messages[-1]['content']})} - - json_data = json.dumps(separators=(',', ':'), obj={ - 'model': 'gpt-3.5-turbo', - 'temperature': 0.6, - 'stream': True, - 'messages': messages} | sig) - - response = requests.post('https://api.caipacity.com/v1/chat/completions', - headers=headers, data=json_data, stream=True) - - for token in response.iter_lines(): - if b'content' in token: - completion_chunk = json.loads(token.decode().replace('data: ', '')) - token = completion_chunk['choices'][0]['delta'].get('content') - if token != None: - yield token - - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join( - [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) diff --git a/spaces/AfrodreamsAI/afrodreams/CaffeLoader.py b/spaces/AfrodreamsAI/afrodreams/CaffeLoader.py deleted file mode 100644 index d09ccdd752f47e1d1e03f4560a8649e9cf921adc..0000000000000000000000000000000000000000 --- a/spaces/AfrodreamsAI/afrodreams/CaffeLoader.py +++ /dev/null @@ -1,254 +0,0 @@ -import torch -import torch.nn as nn - - -class VGG(nn.Module): - def __init__(self, features, num_classes=1000): - super(VGG, self).__init__() - self.features = features - self.classifier = nn.Sequential( - nn.Linear(512 * 7 * 7, 4096), - nn.ReLU(True), - nn.Dropout(), - nn.Linear(4096, 4096), - nn.ReLU(True), - nn.Dropout(), - nn.Linear(4096, num_classes), - ) - - -class VGG_SOD(nn.Module): - def __init__(self, features, num_classes=100): - super(VGG_SOD, self).__init__() - self.features = features - self.classifier = nn.Sequential( - nn.Linear(512 * 7 * 7, 4096), - nn.ReLU(True), - nn.Dropout(), - nn.Linear(4096, 4096), - nn.ReLU(True), - nn.Dropout(), - nn.Linear(4096, 100), - ) - - -class VGG_FCN32S(nn.Module): - def __init__(self, features, num_classes=1000): - super(VGG_FCN32S, self).__init__() - self.features = features - self.classifier = nn.Sequential( - nn.Conv2d(512,4096,(7, 7)), - nn.ReLU(True), - nn.Dropout(0.5), - nn.Conv2d(4096,4096,(1, 1)), - nn.ReLU(True), - nn.Dropout(0.5), - ) - - -class VGG_PRUNED(nn.Module): - def __init__(self, features, num_classes=1000): - super(VGG_PRUNED, self).__init__() - self.features = features - self.classifier = nn.Sequential( - nn.Linear(512 * 7 * 7, 4096), - nn.ReLU(True), - nn.Dropout(0.5), - nn.Linear(4096, 4096), - nn.ReLU(True), - nn.Dropout(0.5), - ) - - -class NIN(nn.Module): - def __init__(self, pooling): - super(NIN, self).__init__() - if pooling == 'max': - pool2d = nn.MaxPool2d((3, 3),(2, 2),(0, 0),ceil_mode=True) - elif pooling == 'avg': - pool2d = nn.AvgPool2d((3, 3),(2, 2),(0, 0),ceil_mode=True) - - self.features = nn.Sequential( - nn.Conv2d(3,96,(11, 11),(4, 4)), - nn.ReLU(inplace=True), - nn.Conv2d(96,96,(1, 1)), - nn.ReLU(inplace=True), - nn.Conv2d(96,96,(1, 1)), - nn.ReLU(inplace=True), - pool2d, - nn.Conv2d(96,256,(5, 5),(1, 1),(2, 2)), - nn.ReLU(inplace=True), - nn.Conv2d(256,256,(1, 1)), - nn.ReLU(inplace=True), - nn.Conv2d(256,256,(1, 1)), - nn.ReLU(inplace=True), - pool2d, - nn.Conv2d(256,384,(3, 3),(1, 1),(1, 1)), - nn.ReLU(inplace=True), - nn.Conv2d(384,384,(1, 1)), - nn.ReLU(inplace=True), - nn.Conv2d(384,384,(1, 1)), - nn.ReLU(inplace=True), - pool2d, - nn.Dropout(0.5), - nn.Conv2d(384,1024,(3, 3),(1, 1),(1, 1)), - nn.ReLU(inplace=True), - nn.Conv2d(1024,1024,(1, 1)), - nn.ReLU(inplace=True), - nn.Conv2d(1024,1000,(1, 1)), - nn.ReLU(inplace=True), - nn.AvgPool2d((6, 6),(1, 1),(0, 0),ceil_mode=True), - nn.Softmax(), - ) - - - -class ModelParallel(nn.Module): - def __init__(self, net, device_ids, device_splits): - super(ModelParallel, self).__init__() - self.device_list = self.name_devices(device_ids.split(',')) - self.chunks = self.chunks_to_devices(self.split_net(net, device_splits.split(','))) - - def name_devices(self, input_list): - device_list = [] - for i, device in enumerate(input_list): - if str(device).lower() != 'c': - device_list.append("cuda:" + str(device)) - else: - device_list.append("cpu") - return device_list - - def split_net(self, net, device_splits): - chunks, cur_chunk = [], nn.Sequential() - for i, l in enumerate(net): - cur_chunk.add_module(str(i), net[i]) - if str(i) in device_splits and device_splits != '': - del device_splits[0] - chunks.append(cur_chunk) - cur_chunk = nn.Sequential() - chunks.append(cur_chunk) - return chunks - - def chunks_to_devices(self, chunks): - for i, chunk in enumerate(chunks): - chunk.to(self.device_list[i]) - return chunks - - def c(self, input, i): - if input.type() == 'torch.FloatTensor' and 'cuda' in self.device_list[i]: - input = input.type('torch.cuda.FloatTensor') - elif input.type() == 'torch.cuda.FloatTensor' and 'cpu' in self.device_list[i]: - input = input.type('torch.FloatTensor') - return input - - def forward(self, input): - for i, chunk in enumerate(self.chunks): - if i < len(self.chunks) -1: - input = self.c(chunk(self.c(input, i).to(self.device_list[i])), i+1).to(self.device_list[i+1]) - else: - input = chunk(input) - return input - - - -def buildSequential(channel_list, pooling): - layers = [] - in_channels = 3 - if pooling == 'max': - pool2d = nn.MaxPool2d(kernel_size=2, stride=2) - elif pooling == 'avg': - pool2d = nn.AvgPool2d(kernel_size=2, stride=2) - else: - raise ValueError("Unrecognized pooling parameter") - for c in channel_list: - if c == 'P': - layers += [pool2d] - else: - conv2d = nn.Conv2d(in_channels, c, kernel_size=3, padding=1) - layers += [conv2d, nn.ReLU(inplace=True)] - in_channels = c - return nn.Sequential(*layers) - - -channel_list = { -'VGG-16p': [24, 22, 'P', 41, 51, 'P', 108, 89, 111, 'P', 184, 276, 228, 'P', 512, 512, 512, 'P'], -'VGG-16': [64, 64, 'P', 128, 128, 'P', 256, 256, 256, 'P', 512, 512, 512, 'P', 512, 512, 512, 'P'], -'VGG-19': [64, 64, 'P', 128, 128, 'P', 256, 256, 256, 256, 'P', 512, 512, 512, 512, 'P', 512, 512, 512, 512, 'P'], -} - -nin_dict = { -'C': ['conv1', 'cccp1', 'cccp2', 'conv2', 'cccp3', 'cccp4', 'conv3', 'cccp5', 'cccp6', 'conv4-1024', 'cccp7-1024', 'cccp8-1024'], -'R': ['relu0', 'relu1', 'relu2', 'relu3', 'relu5', 'relu6', 'relu7', 'relu8', 'relu9', 'relu10', 'relu11', 'relu12'], -'P': ['pool1', 'pool2', 'pool3', 'pool4'], -'D': ['drop'], -} -vgg16_dict = { -'C': ['conv1_1', 'conv1_2', 'conv2_1', 'conv2_2', 'conv3_1', 'conv3_2', 'conv3_3', 'conv4_1', 'conv4_2', 'conv4_3', 'conv5_1', 'conv5_2', 'conv5_3'], -'R': ['relu1_1', 'relu1_2', 'relu2_1', 'relu2_2', 'relu3_1', 'relu3_2', 'relu3_3', 'relu4_1', 'relu4_2', 'relu4_3', 'relu5_1', 'relu5_2', 'relu5_3'], -'P': ['pool1', 'pool2', 'pool3', 'pool4', 'pool5'], -} -vgg19_dict = { -'C': ['conv1_1', 'conv1_2', 'conv2_1', 'conv2_2', 'conv3_1', 'conv3_2', 'conv3_3', 'conv3_4', 'conv4_1', 'conv4_2', 'conv4_3', 'conv4_4', 'conv5_1', 'conv5_2', 'conv5_3', 'conv5_4'], -'R': ['relu1_1', 'relu1_2', 'relu2_1', 'relu2_2', 'relu3_1', 'relu3_2', 'relu3_3', 'relu3_4', 'relu4_1', 'relu4_2', 'relu4_3', 'relu4_4', 'relu5_1', 'relu5_2', 'relu5_3', 'relu5_4'], -'P': ['pool1', 'pool2', 'pool3', 'pool4', 'pool5'], -} - - -def modelSelector(model_file, pooling): - vgg_list = ["fcn32s", "pruning", "sod", "vgg"] - if any(name in model_file for name in vgg_list): - if "pruning" in model_file: - print("VGG-16 Architecture Detected") - print("Using The Channel Pruning Model") - cnn, layerList = VGG_PRUNED(buildSequential(channel_list['VGG-16p'], pooling)), vgg16_dict - elif "fcn32s" in model_file: - print("VGG-16 Architecture Detected") - print("Using the fcn32s-heavy-pascal Model") - cnn, layerList = VGG_FCN32S(buildSequential(channel_list['VGG-16'], pooling)), vgg16_dict - elif "sod" in model_file: - print("VGG-16 Architecture Detected") - print("Using The SOD Fintune Model") - cnn, layerList = VGG_SOD(buildSequential(channel_list['VGG-16'], pooling)), vgg16_dict - elif "19" in model_file: - print("VGG-19 Architecture Detected") - cnn, layerList = VGG(buildSequential(channel_list['VGG-19'], pooling)), vgg19_dict - elif "16" in model_file: - print("VGG-16 Architecture Detected") - cnn, layerList = VGG(buildSequential(channel_list['VGG-16'], pooling)), vgg16_dict - else: - raise ValueError("VGG architecture not recognized.") - elif "nin" in model_file: - print("NIN Architecture Detected") - cnn, layerList = NIN(pooling), nin_dict - else: - raise ValueError("Model architecture not recognized.") - return cnn, layerList - - -# Print like Torch7/loadcaffe -def print_loadcaffe(cnn, layerList): - c = 0 - for l in list(cnn): - if "Conv2d" in str(l): - in_c, out_c, ks = str(l.in_channels), str(l.out_channels), str(l.kernel_size) - print(layerList['C'][c] +": " + (out_c + " " + in_c + " " + ks).replace(")",'').replace("(",'').replace(",",'') ) - c+=1 - if c == len(layerList['C']): - break - - -# Load the model, and configure pooling layer type -def loadCaffemodel(model_file, pooling, use_gpu, disable_check): - cnn, layerList = modelSelector(str(model_file).lower(), pooling) - - cnn.load_state_dict(torch.load(model_file), strict=(not disable_check)) - print("Successfully loaded " + str(model_file)) - - # Maybe convert the model to cuda now, to avoid later issues - if "c" not in str(use_gpu).lower() or "c" not in str(use_gpu[0]).lower(): - cnn = cnn.cuda() - cnn = cnn.features - - print_loadcaffe(cnn, layerList) - - return cnn, layerList diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/mousewheeltoupdown-plugin.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/mousewheeltoupdown-plugin.js deleted file mode 100644 index 18f250b6b4248c631375bfe01b20d1009a3a99cb..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/mousewheeltoupdown-plugin.js +++ /dev/null @@ -1,20 +0,0 @@ -import MouseWheelToUpDown from './mousewheeltoupdown.js'; - -class MouseWheelToUpDownPlugin extends Phaser.Plugins.BasePlugin { - - constructor(pluginManager) { - super(pluginManager); - } - - start() { - var eventEmitter = this.game.events; - eventEmitter.on('destroy', this.destroy, this); - } - - add(scene, config) { - return new MouseWheelToUpDown(scene, config); - } - -} - -export default MouseWheelToUpDownPlugin; \ No newline at end of file diff --git a/spaces/AlirezaSM/bear_classifier/app.py b/spaces/AlirezaSM/bear_classifier/app.py deleted file mode 100644 index b1251c65ba8bc20a57d2e4182a41b81fe5cefb8e..0000000000000000000000000000000000000000 --- a/spaces/AlirezaSM/bear_classifier/app.py +++ /dev/null @@ -1,17 +0,0 @@ -from fastai.vision.all import * -import gradio as gr - -learn = load_learner('export.pkl') - -categories = ('Black', 'Grizzly', 'Teddy') - -def classify_image(img): - pred, idx, probs = learn.predict(img) - return dict(zip(categories, map(float, probs))) - -image = gr.inputs.Image(shape=(192, 192)) -label = gr.outputs.Label() -examples = ['black.jpg', 'grizzly.jpg', 'teddy.jpg'] - -intf = gr.Interface(fn=classify_image, inputs=image, outputs=label, examples=examples) -intf.launch(inline=False) \ No newline at end of file diff --git a/spaces/Alycer/VITS-Umamusume-voice-synthesizer/attentions.py b/spaces/Alycer/VITS-Umamusume-voice-synthesizer/attentions.py deleted file mode 100644 index 86bc73b5fe98cc7b443e9078553920346c996707..0000000000000000000000000000000000000000 --- a/spaces/Alycer/VITS-Umamusume-voice-synthesizer/attentions.py +++ /dev/null @@ -1,300 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -from modules import LayerNorm - - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/pti_models/e4e/encoders/psp_encoders.py b/spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/pti_models/e4e/encoders/psp_encoders.py deleted file mode 100644 index 4bdfa8a5072770967f81ae1f8393b44368ffe42b..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/pti_models/e4e/encoders/psp_encoders.py +++ /dev/null @@ -1,208 +0,0 @@ -from enum import Enum -import math -import numpy as np -import torch -from torch import nn -from torch.nn import Conv2d, BatchNorm2d, PReLU, Sequential, Module - -from pti.pti_models.e4e.encoders.helpers import get_blocks, bottleneck_IR, bottleneck_IR_SE, _upsample_add -from pti.pti_models.e4e.stylegan2.model import EqualLinear - - -class ProgressiveStage(Enum): - WTraining = 0 - Delta1Training = 1 - Delta2Training = 2 - Delta3Training = 3 - Delta4Training = 4 - Delta5Training = 5 - Delta6Training = 6 - Delta7Training = 7 - Delta8Training = 8 - Delta9Training = 9 - Delta10Training = 10 - Delta11Training = 11 - Delta12Training = 12 - Delta13Training = 13 - Delta14Training = 14 - Delta15Training = 15 - Delta16Training = 16 - Delta17Training = 17 - Inference = 18 - - -class GradualStyleBlock(Module): - def __init__(self, in_c, out_c, spatial): - super(GradualStyleBlock, self).__init__() - self.out_c = out_c - self.spatial = spatial - num_pools = int(np.log2(spatial)) - modules = [] - modules += [Conv2d(in_c, out_c, kernel_size=3, stride=2, padding=1), - nn.LeakyReLU()] - for i in range(num_pools - 1): - modules += [ - Conv2d(out_c, out_c, kernel_size=3, stride=2, padding=1), - nn.LeakyReLU() - ] - self.convs = nn.Sequential(*modules) - self.linear = EqualLinear(out_c, out_c, lr_mul=1) - - def forward(self, x): - x = self.convs(x) - x = x.view(-1, self.out_c) - x = self.linear(x) - return x - - -class GradualStyleEncoder(Module): - def __init__(self, num_layers, mode='ir', opts=None): - super(GradualStyleEncoder, self).__init__() - assert num_layers in [ - 50, 100, 152], 'num_layers should be 50,100, or 152' - assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se' - blocks = get_blocks(num_layers) - if mode == 'ir': - unit_module = bottleneck_IR - elif mode == 'ir_se': - unit_module = bottleneck_IR_SE - self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False), - BatchNorm2d(64), - PReLU(64)) - modules = [] - for block in blocks: - for bottleneck in block: - modules.append(unit_module(bottleneck.in_channel, - bottleneck.depth, - bottleneck.stride)) - self.body = Sequential(*modules) - - self.styles = nn.ModuleList() - log_size = int(math.log(opts.stylegan_size, 2)) - self.style_count = 2 * log_size - 2 - self.coarse_ind = 3 - self.middle_ind = 7 - for i in range(self.style_count): - if i < self.coarse_ind: - style = GradualStyleBlock(512, 512, 16) - elif i < self.middle_ind: - style = GradualStyleBlock(512, 512, 32) - else: - style = GradualStyleBlock(512, 512, 64) - self.styles.append(style) - self.latlayer1 = nn.Conv2d( - 256, 512, kernel_size=1, stride=1, padding=0) - self.latlayer2 = nn.Conv2d( - 128, 512, kernel_size=1, stride=1, padding=0) - - def forward(self, x): - x = self.input_layer(x) - - latents = [] - modulelist = list(self.body._modules.values()) - for i, l in enumerate(modulelist): - x = l(x) - if i == 6: - c1 = x - elif i == 20: - c2 = x - elif i == 23: - c3 = x - - for j in range(self.coarse_ind): - latents.append(self.styles[j](c3)) - - p2 = _upsample_add(c3, self.latlayer1(c2)) - for j in range(self.coarse_ind, self.middle_ind): - latents.append(self.styles[j](p2)) - - p1 = _upsample_add(p2, self.latlayer2(c1)) - for j in range(self.middle_ind, self.style_count): - latents.append(self.styles[j](p1)) - - out = torch.stack(latents, dim=1) - return out - - -class Encoder4Editing(Module): - def __init__(self, num_layers, mode='ir', opts=None): - super(Encoder4Editing, self).__init__() - assert num_layers in [ - 50, 100, 152], 'num_layers should be 50,100, or 152' - assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se' - blocks = get_blocks(num_layers) - if mode == 'ir': - unit_module = bottleneck_IR - elif mode == 'ir_se': - unit_module = bottleneck_IR_SE - self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False), - BatchNorm2d(64), - PReLU(64)) - modules = [] - for block in blocks: - for bottleneck in block: - modules.append(unit_module(bottleneck.in_channel, - bottleneck.depth, - bottleneck.stride)) - self.body = Sequential(*modules) - - self.styles = nn.ModuleList() - log_size = int(math.log(opts.stylegan_size, 2)) - self.style_count = 2 * log_size - 2 - self.coarse_ind = 3 - self.middle_ind = 7 - - for i in range(self.style_count): - if i < self.coarse_ind: - style = GradualStyleBlock(512, 512, 16) - elif i < self.middle_ind: - style = GradualStyleBlock(512, 512, 32) - else: - style = GradualStyleBlock(512, 512, 64) - self.styles.append(style) - - self.latlayer1 = nn.Conv2d( - 256, 512, kernel_size=1, stride=1, padding=0) - self.latlayer2 = nn.Conv2d( - 128, 512, kernel_size=1, stride=1, padding=0) - - self.progressive_stage = ProgressiveStage.Inference - - def get_deltas_starting_dimensions(self): - ''' Get a list of the initial dimension of every delta from which it is applied ''' - return list(range(self.style_count)) # Each dimension has a delta applied to it - - def set_progressive_stage(self, new_stage: ProgressiveStage): - self.progressive_stage = new_stage - print('Changed progressive stage to: ', new_stage) - - def forward(self, x): - x = self.input_layer(x) - - modulelist = list(self.body._modules.values()) - for i, l in enumerate(modulelist): - x = l(x) - if i == 6: - c1 = x - elif i == 20: - c2 = x - elif i == 23: - c3 = x - - # Infer main W and duplicate it - w0 = self.styles[0](c3) - w = w0.repeat(self.style_count, 1, 1).permute(1, 0, 2) - stage = self.progressive_stage.value - features = c3 - for i in range(1, min(stage + 1, self.style_count)): # Infer additional deltas - if i == self.coarse_ind: - # FPN's middle features - p2 = _upsample_add(c3, self.latlayer1(c2)) - features = p2 - elif i == self.middle_ind: - # FPN's fine features - p1 = _upsample_add(p2, self.latlayer2(c1)) - features = p1 - delta_i = self.styles[i](features) - w[:, i] += delta_i - return w diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/clip_guided_stable_diffusion_img2img.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/clip_guided_stable_diffusion_img2img.py deleted file mode 100644 index a72a5a127c72785806d4bfe194fb990d4740060e..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/clip_guided_stable_diffusion_img2img.py +++ /dev/null @@ -1,496 +0,0 @@ -import inspect -from typing import List, Optional, Union - -import numpy as np -import PIL -import torch -from torch import nn -from torch.nn import functional as F -from torchvision import transforms -from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer - -from diffusers import ( - AutoencoderKL, - DDIMScheduler, - DiffusionPipeline, - DPMSolverMultistepScheduler, - LMSDiscreteScheduler, - PNDMScheduler, - UNet2DConditionModel, -) -from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput -from diffusers.utils import ( - PIL_INTERPOLATION, - deprecate, - randn_tensor, -) - - -EXAMPLE_DOC_STRING = """ - Examples: - ``` - from io import BytesIO - - import requests - import torch - from diffusers import DiffusionPipeline - from PIL import Image - from transformers import CLIPFeatureExtractor, CLIPModel - - feature_extractor = CLIPFeatureExtractor.from_pretrained( - "laion/CLIP-ViT-B-32-laion2B-s34B-b79K" - ) - clip_model = CLIPModel.from_pretrained( - "laion/CLIP-ViT-B-32-laion2B-s34B-b79K", torch_dtype=torch.float16 - ) - - - guided_pipeline = DiffusionPipeline.from_pretrained( - "CompVis/stable-diffusion-v1-4", - # custom_pipeline="clip_guided_stable_diffusion", - custom_pipeline="/home/njindal/diffusers/examples/community/clip_guided_stable_diffusion.py", - clip_model=clip_model, - feature_extractor=feature_extractor, - torch_dtype=torch.float16, - ) - guided_pipeline.enable_attention_slicing() - guided_pipeline = guided_pipeline.to("cuda") - - prompt = "fantasy book cover, full moon, fantasy forest landscape, golden vector elements, fantasy magic, dark light night, intricate, elegant, sharp focus, illustration, highly detailed, digital painting, concept art, matte, art by WLOP and Artgerm and Albert Bierstadt, masterpiece" - - url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" - - response = requests.get(url) - init_image = Image.open(BytesIO(response.content)).convert("RGB") - - image = guided_pipeline( - prompt=prompt, - num_inference_steps=30, - image=init_image, - strength=0.75, - guidance_scale=7.5, - clip_guidance_scale=100, - num_cutouts=4, - use_cutouts=False, - ).images[0] - display(image) - ``` -""" - - -def preprocess(image, w, h): - if isinstance(image, torch.Tensor): - return image - elif isinstance(image, PIL.Image.Image): - image = [image] - - if isinstance(image[0], PIL.Image.Image): - image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] - image = np.concatenate(image, axis=0) - image = np.array(image).astype(np.float32) / 255.0 - image = image.transpose(0, 3, 1, 2) - image = 2.0 * image - 1.0 - image = torch.from_numpy(image) - elif isinstance(image[0], torch.Tensor): - image = torch.cat(image, dim=0) - return image - - -class MakeCutouts(nn.Module): - def __init__(self, cut_size, cut_power=1.0): - super().__init__() - - self.cut_size = cut_size - self.cut_power = cut_power - - def forward(self, pixel_values, num_cutouts): - sideY, sideX = pixel_values.shape[2:4] - max_size = min(sideX, sideY) - min_size = min(sideX, sideY, self.cut_size) - cutouts = [] - for _ in range(num_cutouts): - size = int(torch.rand([]) ** self.cut_power * (max_size - min_size) + min_size) - offsetx = torch.randint(0, sideX - size + 1, ()) - offsety = torch.randint(0, sideY - size + 1, ()) - cutout = pixel_values[:, :, offsety : offsety + size, offsetx : offsetx + size] - cutouts.append(F.adaptive_avg_pool2d(cutout, self.cut_size)) - return torch.cat(cutouts) - - -def spherical_dist_loss(x, y): - x = F.normalize(x, dim=-1) - y = F.normalize(y, dim=-1) - return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2) - - -def set_requires_grad(model, value): - for param in model.parameters(): - param.requires_grad = value - - -class CLIPGuidedStableDiffusion(DiffusionPipeline): - """CLIP guided stable diffusion based on the amazing repo by @crowsonkb and @Jack000 - - https://github.com/Jack000/glid-3-xl - - https://github.dev/crowsonkb/k-diffusion - """ - - def __init__( - self, - vae: AutoencoderKL, - text_encoder: CLIPTextModel, - clip_model: CLIPModel, - tokenizer: CLIPTokenizer, - unet: UNet2DConditionModel, - scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler], - feature_extractor: CLIPFeatureExtractor, - ): - super().__init__() - self.register_modules( - vae=vae, - text_encoder=text_encoder, - clip_model=clip_model, - tokenizer=tokenizer, - unet=unet, - scheduler=scheduler, - feature_extractor=feature_extractor, - ) - - self.normalize = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std) - self.cut_out_size = ( - feature_extractor.size - if isinstance(feature_extractor.size, int) - else feature_extractor.size["shortest_edge"] - ) - self.make_cutouts = MakeCutouts(self.cut_out_size) - - set_requires_grad(self.text_encoder, False) - set_requires_grad(self.clip_model, False) - - def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"): - if slice_size == "auto": - # half the attention head size is usually a good trade-off between - # speed and memory - slice_size = self.unet.config.attention_head_dim // 2 - self.unet.set_attention_slice(slice_size) - - def disable_attention_slicing(self): - self.enable_attention_slicing(None) - - def freeze_vae(self): - set_requires_grad(self.vae, False) - - def unfreeze_vae(self): - set_requires_grad(self.vae, True) - - def freeze_unet(self): - set_requires_grad(self.unet, False) - - def unfreeze_unet(self): - set_requires_grad(self.unet, True) - - def get_timesteps(self, num_inference_steps, strength, device): - # get the original timestep using init_timestep - init_timestep = min(int(num_inference_steps * strength), num_inference_steps) - - t_start = max(num_inference_steps - init_timestep, 0) - timesteps = self.scheduler.timesteps[t_start:] - - return timesteps, num_inference_steps - t_start - - def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): - if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): - raise ValueError( - f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" - ) - - image = image.to(device=device, dtype=dtype) - - batch_size = batch_size * num_images_per_prompt - if isinstance(generator, list) and len(generator) != batch_size: - raise ValueError( - f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" - f" size of {batch_size}. Make sure the batch size matches the length of the generators." - ) - - if isinstance(generator, list): - init_latents = [ - self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size) - ] - init_latents = torch.cat(init_latents, dim=0) - else: - init_latents = self.vae.encode(image).latent_dist.sample(generator) - - init_latents = self.vae.config.scaling_factor * init_latents - - if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: - # expand init_latents for batch_size - deprecation_message = ( - f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" - " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" - " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" - " your script to pass as many initial images as text prompts to suppress this warning." - ) - deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) - additional_image_per_prompt = batch_size // init_latents.shape[0] - init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) - elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: - raise ValueError( - f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." - ) - else: - init_latents = torch.cat([init_latents], dim=0) - - shape = init_latents.shape - noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) - - # get latents - init_latents = self.scheduler.add_noise(init_latents, noise, timestep) - latents = init_latents - - return latents - - @torch.enable_grad() - def cond_fn( - self, - latents, - timestep, - index, - text_embeddings, - noise_pred_original, - text_embeddings_clip, - clip_guidance_scale, - num_cutouts, - use_cutouts=True, - ): - latents = latents.detach().requires_grad_() - - latent_model_input = self.scheduler.scale_model_input(latents, timestep) - - # predict the noise residual - noise_pred = self.unet(latent_model_input, timestep, encoder_hidden_states=text_embeddings).sample - - if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)): - alpha_prod_t = self.scheduler.alphas_cumprod[timestep] - beta_prod_t = 1 - alpha_prod_t - # compute predicted original sample from predicted noise also called - # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf - pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5) - - fac = torch.sqrt(beta_prod_t) - sample = pred_original_sample * (fac) + latents * (1 - fac) - elif isinstance(self.scheduler, LMSDiscreteScheduler): - sigma = self.scheduler.sigmas[index] - sample = latents - sigma * noise_pred - else: - raise ValueError(f"scheduler type {type(self.scheduler)} not supported") - - sample = 1 / self.vae.config.scaling_factor * sample - image = self.vae.decode(sample).sample - image = (image / 2 + 0.5).clamp(0, 1) - - if use_cutouts: - image = self.make_cutouts(image, num_cutouts) - else: - image = transforms.Resize(self.cut_out_size)(image) - image = self.normalize(image).to(latents.dtype) - - image_embeddings_clip = self.clip_model.get_image_features(image) - image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True) - - if use_cutouts: - dists = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip) - dists = dists.view([num_cutouts, sample.shape[0], -1]) - loss = dists.sum(2).mean(0).sum() * clip_guidance_scale - else: - loss = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip).mean() * clip_guidance_scale - - grads = -torch.autograd.grad(loss, latents)[0] - - if isinstance(self.scheduler, LMSDiscreteScheduler): - latents = latents.detach() + grads * (sigma**2) - noise_pred = noise_pred_original - else: - noise_pred = noise_pred_original - torch.sqrt(beta_prod_t) * grads - return noise_pred, latents - - @torch.no_grad() - def __call__( - self, - prompt: Union[str, List[str]], - height: Optional[int] = 512, - width: Optional[int] = 512, - image: Union[torch.FloatTensor, PIL.Image.Image] = None, - strength: float = 0.8, - num_inference_steps: Optional[int] = 50, - guidance_scale: Optional[float] = 7.5, - num_images_per_prompt: Optional[int] = 1, - eta: float = 0.0, - clip_guidance_scale: Optional[float] = 100, - clip_prompt: Optional[Union[str, List[str]]] = None, - num_cutouts: Optional[int] = 4, - use_cutouts: Optional[bool] = True, - generator: Optional[torch.Generator] = None, - latents: Optional[torch.FloatTensor] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - ): - if isinstance(prompt, str): - batch_size = 1 - elif isinstance(prompt, list): - batch_size = len(prompt) - else: - raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") - - if height % 8 != 0 or width % 8 != 0: - raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") - - # get prompt text embeddings - text_input = self.tokenizer( - prompt, - padding="max_length", - max_length=self.tokenizer.model_max_length, - truncation=True, - return_tensors="pt", - ) - text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0] - # duplicate text embeddings for each generation per prompt - text_embeddings = text_embeddings.repeat_interleave(num_images_per_prompt, dim=0) - - # set timesteps - accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys()) - extra_set_kwargs = {} - if accepts_offset: - extra_set_kwargs["offset"] = 1 - - self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs) - # Some schedulers like PNDM have timesteps as arrays - # It's more optimized to move all timesteps to correct device beforehand - self.scheduler.timesteps.to(self.device) - - timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, self.device) - latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) - - # Preprocess image - image = preprocess(image, width, height) - latents = self.prepare_latents( - image, latent_timestep, batch_size, num_images_per_prompt, text_embeddings.dtype, self.device, generator - ) - - if clip_guidance_scale > 0: - if clip_prompt is not None: - clip_text_input = self.tokenizer( - clip_prompt, - padding="max_length", - max_length=self.tokenizer.model_max_length, - truncation=True, - return_tensors="pt", - ).input_ids.to(self.device) - else: - clip_text_input = text_input.input_ids.to(self.device) - text_embeddings_clip = self.clip_model.get_text_features(clip_text_input) - text_embeddings_clip = text_embeddings_clip / text_embeddings_clip.norm(p=2, dim=-1, keepdim=True) - # duplicate text embeddings clip for each generation per prompt - text_embeddings_clip = text_embeddings_clip.repeat_interleave(num_images_per_prompt, dim=0) - - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) - # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` - # corresponds to doing no classifier free guidance. - do_classifier_free_guidance = guidance_scale > 1.0 - # get unconditional embeddings for classifier free guidance - if do_classifier_free_guidance: - max_length = text_input.input_ids.shape[-1] - uncond_input = self.tokenizer([""], padding="max_length", max_length=max_length, return_tensors="pt") - uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0] - # duplicate unconditional embeddings for each generation per prompt - uncond_embeddings = uncond_embeddings.repeat_interleave(num_images_per_prompt, dim=0) - - # For classifier free guidance, we need to do two forward passes. - # Here we concatenate the unconditional and text embeddings into a single batch - # to avoid doing two forward passes - text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) - - # get the initial random noise unless the user supplied it - - # Unlike in other pipelines, latents need to be generated in the target device - # for 1-to-1 results reproducibility with the CompVis implementation. - # However this currently doesn't work in `mps`. - latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) - latents_dtype = text_embeddings.dtype - if latents is None: - if self.device.type == "mps": - # randn does not work reproducibly on mps - latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to( - self.device - ) - else: - latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype) - else: - if latents.shape != latents_shape: - raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") - latents = latents.to(self.device) - - # scale the initial noise by the standard deviation required by the scheduler - latents = latents * self.scheduler.init_noise_sigma - - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. - # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 - # and should be between [0, 1] - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - extra_step_kwargs = {} - if accepts_eta: - extra_step_kwargs["eta"] = eta - - # check if the scheduler accepts generator - accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) - if accepts_generator: - extra_step_kwargs["generator"] = generator - - with self.progress_bar(total=num_inference_steps): - for i, t in enumerate(timesteps): - # expand the latents if we are doing classifier free guidance - latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents - latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) - - # predict the noise residual - noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample - - # perform classifier free guidance - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) - - # perform clip guidance - if clip_guidance_scale > 0: - text_embeddings_for_guidance = ( - text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings - ) - noise_pred, latents = self.cond_fn( - latents, - t, - i, - text_embeddings_for_guidance, - noise_pred, - text_embeddings_clip, - clip_guidance_scale, - num_cutouts, - use_cutouts, - ) - - # compute the previous noisy sample x_t -> x_t-1 - latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample - - # scale and decode the image latents with vae - latents = 1 / self.vae.config.scaling_factor * latents - image = self.vae.decode(latents).sample - - image = (image / 2 + 0.5).clamp(0, 1) - image = image.cpu().permute(0, 2, 3, 1).numpy() - - if output_type == "pil": - image = self.numpy_to_pil(image) - - if not return_dict: - return (image, None) - - return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None) diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_original_t2i_adapter.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_original_t2i_adapter.py deleted file mode 100644 index 01a1fecf4e4b4a458cd1d866786cc7c975ed8ad2..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_original_t2i_adapter.py +++ /dev/null @@ -1,250 +0,0 @@ -# coding=utf-8 -# Copyright 2023 The HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Conversion script for the T2I-Adapter checkpoints. -""" - -import argparse - -import torch - -from diffusers import T2IAdapter - - -def convert_adapter(src_state, in_channels): - original_body_length = max([int(x.split(".")[1]) for x in src_state.keys() if "body." in x]) + 1 - - assert original_body_length == 8 - - # (0, 1) -> channels 1 - assert src_state["body.0.block1.weight"].shape == (320, 320, 3, 3) - - # (2, 3) -> channels 2 - assert src_state["body.2.in_conv.weight"].shape == (640, 320, 1, 1) - - # (4, 5) -> channels 3 - assert src_state["body.4.in_conv.weight"].shape == (1280, 640, 1, 1) - - # (6, 7) -> channels 4 - assert src_state["body.6.block1.weight"].shape == (1280, 1280, 3, 3) - - res_state = { - "adapter.conv_in.weight": src_state.pop("conv_in.weight"), - "adapter.conv_in.bias": src_state.pop("conv_in.bias"), - # 0.resnets.0 - "adapter.body.0.resnets.0.block1.weight": src_state.pop("body.0.block1.weight"), - "adapter.body.0.resnets.0.block1.bias": src_state.pop("body.0.block1.bias"), - "adapter.body.0.resnets.0.block2.weight": src_state.pop("body.0.block2.weight"), - "adapter.body.0.resnets.0.block2.bias": src_state.pop("body.0.block2.bias"), - # 0.resnets.1 - "adapter.body.0.resnets.1.block1.weight": src_state.pop("body.1.block1.weight"), - "adapter.body.0.resnets.1.block1.bias": src_state.pop("body.1.block1.bias"), - "adapter.body.0.resnets.1.block2.weight": src_state.pop("body.1.block2.weight"), - "adapter.body.0.resnets.1.block2.bias": src_state.pop("body.1.block2.bias"), - # 1 - "adapter.body.1.in_conv.weight": src_state.pop("body.2.in_conv.weight"), - "adapter.body.1.in_conv.bias": src_state.pop("body.2.in_conv.bias"), - # 1.resnets.0 - "adapter.body.1.resnets.0.block1.weight": src_state.pop("body.2.block1.weight"), - "adapter.body.1.resnets.0.block1.bias": src_state.pop("body.2.block1.bias"), - "adapter.body.1.resnets.0.block2.weight": src_state.pop("body.2.block2.weight"), - "adapter.body.1.resnets.0.block2.bias": src_state.pop("body.2.block2.bias"), - # 1.resnets.1 - "adapter.body.1.resnets.1.block1.weight": src_state.pop("body.3.block1.weight"), - "adapter.body.1.resnets.1.block1.bias": src_state.pop("body.3.block1.bias"), - "adapter.body.1.resnets.1.block2.weight": src_state.pop("body.3.block2.weight"), - "adapter.body.1.resnets.1.block2.bias": src_state.pop("body.3.block2.bias"), - # 2 - "adapter.body.2.in_conv.weight": src_state.pop("body.4.in_conv.weight"), - "adapter.body.2.in_conv.bias": src_state.pop("body.4.in_conv.bias"), - # 2.resnets.0 - "adapter.body.2.resnets.0.block1.weight": src_state.pop("body.4.block1.weight"), - "adapter.body.2.resnets.0.block1.bias": src_state.pop("body.4.block1.bias"), - "adapter.body.2.resnets.0.block2.weight": src_state.pop("body.4.block2.weight"), - "adapter.body.2.resnets.0.block2.bias": src_state.pop("body.4.block2.bias"), - # 2.resnets.1 - "adapter.body.2.resnets.1.block1.weight": src_state.pop("body.5.block1.weight"), - "adapter.body.2.resnets.1.block1.bias": src_state.pop("body.5.block1.bias"), - "adapter.body.2.resnets.1.block2.weight": src_state.pop("body.5.block2.weight"), - "adapter.body.2.resnets.1.block2.bias": src_state.pop("body.5.block2.bias"), - # 3.resnets.0 - "adapter.body.3.resnets.0.block1.weight": src_state.pop("body.6.block1.weight"), - "adapter.body.3.resnets.0.block1.bias": src_state.pop("body.6.block1.bias"), - "adapter.body.3.resnets.0.block2.weight": src_state.pop("body.6.block2.weight"), - "adapter.body.3.resnets.0.block2.bias": src_state.pop("body.6.block2.bias"), - # 3.resnets.1 - "adapter.body.3.resnets.1.block1.weight": src_state.pop("body.7.block1.weight"), - "adapter.body.3.resnets.1.block1.bias": src_state.pop("body.7.block1.bias"), - "adapter.body.3.resnets.1.block2.weight": src_state.pop("body.7.block2.weight"), - "adapter.body.3.resnets.1.block2.bias": src_state.pop("body.7.block2.bias"), - } - - assert len(src_state) == 0 - - adapter = T2IAdapter(in_channels=in_channels, adapter_type="full_adapter") - - adapter.load_state_dict(res_state) - - return adapter - - -def convert_light_adapter(src_state): - original_body_length = max([int(x.split(".")[1]) for x in src_state.keys() if "body." in x]) + 1 - - assert original_body_length == 4 - - res_state = { - # body.0.in_conv - "adapter.body.0.in_conv.weight": src_state.pop("body.0.in_conv.weight"), - "adapter.body.0.in_conv.bias": src_state.pop("body.0.in_conv.bias"), - # body.0.resnets.0 - "adapter.body.0.resnets.0.block1.weight": src_state.pop("body.0.body.0.block1.weight"), - "adapter.body.0.resnets.0.block1.bias": src_state.pop("body.0.body.0.block1.bias"), - "adapter.body.0.resnets.0.block2.weight": src_state.pop("body.0.body.0.block2.weight"), - "adapter.body.0.resnets.0.block2.bias": src_state.pop("body.0.body.0.block2.bias"), - # body.0.resnets.1 - "adapter.body.0.resnets.1.block1.weight": src_state.pop("body.0.body.1.block1.weight"), - "adapter.body.0.resnets.1.block1.bias": src_state.pop("body.0.body.1.block1.bias"), - "adapter.body.0.resnets.1.block2.weight": src_state.pop("body.0.body.1.block2.weight"), - "adapter.body.0.resnets.1.block2.bias": src_state.pop("body.0.body.1.block2.bias"), - # body.0.resnets.2 - "adapter.body.0.resnets.2.block1.weight": src_state.pop("body.0.body.2.block1.weight"), - "adapter.body.0.resnets.2.block1.bias": src_state.pop("body.0.body.2.block1.bias"), - "adapter.body.0.resnets.2.block2.weight": src_state.pop("body.0.body.2.block2.weight"), - "adapter.body.0.resnets.2.block2.bias": src_state.pop("body.0.body.2.block2.bias"), - # body.0.resnets.3 - "adapter.body.0.resnets.3.block1.weight": src_state.pop("body.0.body.3.block1.weight"), - "adapter.body.0.resnets.3.block1.bias": src_state.pop("body.0.body.3.block1.bias"), - "adapter.body.0.resnets.3.block2.weight": src_state.pop("body.0.body.3.block2.weight"), - "adapter.body.0.resnets.3.block2.bias": src_state.pop("body.0.body.3.block2.bias"), - # body.0.out_conv - "adapter.body.0.out_conv.weight": src_state.pop("body.0.out_conv.weight"), - "adapter.body.0.out_conv.bias": src_state.pop("body.0.out_conv.bias"), - # body.1.in_conv - "adapter.body.1.in_conv.weight": src_state.pop("body.1.in_conv.weight"), - "adapter.body.1.in_conv.bias": src_state.pop("body.1.in_conv.bias"), - # body.1.resnets.0 - "adapter.body.1.resnets.0.block1.weight": src_state.pop("body.1.body.0.block1.weight"), - "adapter.body.1.resnets.0.block1.bias": src_state.pop("body.1.body.0.block1.bias"), - "adapter.body.1.resnets.0.block2.weight": src_state.pop("body.1.body.0.block2.weight"), - "adapter.body.1.resnets.0.block2.bias": src_state.pop("body.1.body.0.block2.bias"), - # body.1.resnets.1 - "adapter.body.1.resnets.1.block1.weight": src_state.pop("body.1.body.1.block1.weight"), - "adapter.body.1.resnets.1.block1.bias": src_state.pop("body.1.body.1.block1.bias"), - "adapter.body.1.resnets.1.block2.weight": src_state.pop("body.1.body.1.block2.weight"), - "adapter.body.1.resnets.1.block2.bias": src_state.pop("body.1.body.1.block2.bias"), - # body.1.body.2 - "adapter.body.1.resnets.2.block1.weight": src_state.pop("body.1.body.2.block1.weight"), - "adapter.body.1.resnets.2.block1.bias": src_state.pop("body.1.body.2.block1.bias"), - "adapter.body.1.resnets.2.block2.weight": src_state.pop("body.1.body.2.block2.weight"), - "adapter.body.1.resnets.2.block2.bias": src_state.pop("body.1.body.2.block2.bias"), - # body.1.body.3 - "adapter.body.1.resnets.3.block1.weight": src_state.pop("body.1.body.3.block1.weight"), - "adapter.body.1.resnets.3.block1.bias": src_state.pop("body.1.body.3.block1.bias"), - "adapter.body.1.resnets.3.block2.weight": src_state.pop("body.1.body.3.block2.weight"), - "adapter.body.1.resnets.3.block2.bias": src_state.pop("body.1.body.3.block2.bias"), - # body.1.out_conv - "adapter.body.1.out_conv.weight": src_state.pop("body.1.out_conv.weight"), - "adapter.body.1.out_conv.bias": src_state.pop("body.1.out_conv.bias"), - # body.2.in_conv - "adapter.body.2.in_conv.weight": src_state.pop("body.2.in_conv.weight"), - "adapter.body.2.in_conv.bias": src_state.pop("body.2.in_conv.bias"), - # body.2.body.0 - "adapter.body.2.resnets.0.block1.weight": src_state.pop("body.2.body.0.block1.weight"), - "adapter.body.2.resnets.0.block1.bias": src_state.pop("body.2.body.0.block1.bias"), - "adapter.body.2.resnets.0.block2.weight": src_state.pop("body.2.body.0.block2.weight"), - "adapter.body.2.resnets.0.block2.bias": src_state.pop("body.2.body.0.block2.bias"), - # body.2.body.1 - "adapter.body.2.resnets.1.block1.weight": src_state.pop("body.2.body.1.block1.weight"), - "adapter.body.2.resnets.1.block1.bias": src_state.pop("body.2.body.1.block1.bias"), - "adapter.body.2.resnets.1.block2.weight": src_state.pop("body.2.body.1.block2.weight"), - "adapter.body.2.resnets.1.block2.bias": src_state.pop("body.2.body.1.block2.bias"), - # body.2.body.2 - "adapter.body.2.resnets.2.block1.weight": src_state.pop("body.2.body.2.block1.weight"), - "adapter.body.2.resnets.2.block1.bias": src_state.pop("body.2.body.2.block1.bias"), - "adapter.body.2.resnets.2.block2.weight": src_state.pop("body.2.body.2.block2.weight"), - "adapter.body.2.resnets.2.block2.bias": src_state.pop("body.2.body.2.block2.bias"), - # body.2.body.3 - "adapter.body.2.resnets.3.block1.weight": src_state.pop("body.2.body.3.block1.weight"), - "adapter.body.2.resnets.3.block1.bias": src_state.pop("body.2.body.3.block1.bias"), - "adapter.body.2.resnets.3.block2.weight": src_state.pop("body.2.body.3.block2.weight"), - "adapter.body.2.resnets.3.block2.bias": src_state.pop("body.2.body.3.block2.bias"), - # body.2.out_conv - "adapter.body.2.out_conv.weight": src_state.pop("body.2.out_conv.weight"), - "adapter.body.2.out_conv.bias": src_state.pop("body.2.out_conv.bias"), - # body.3.in_conv - "adapter.body.3.in_conv.weight": src_state.pop("body.3.in_conv.weight"), - "adapter.body.3.in_conv.bias": src_state.pop("body.3.in_conv.bias"), - # body.3.body.0 - "adapter.body.3.resnets.0.block1.weight": src_state.pop("body.3.body.0.block1.weight"), - "adapter.body.3.resnets.0.block1.bias": src_state.pop("body.3.body.0.block1.bias"), - "adapter.body.3.resnets.0.block2.weight": src_state.pop("body.3.body.0.block2.weight"), - "adapter.body.3.resnets.0.block2.bias": src_state.pop("body.3.body.0.block2.bias"), - # body.3.body.1 - "adapter.body.3.resnets.1.block1.weight": src_state.pop("body.3.body.1.block1.weight"), - "adapter.body.3.resnets.1.block1.bias": src_state.pop("body.3.body.1.block1.bias"), - "adapter.body.3.resnets.1.block2.weight": src_state.pop("body.3.body.1.block2.weight"), - "adapter.body.3.resnets.1.block2.bias": src_state.pop("body.3.body.1.block2.bias"), - # body.3.body.2 - "adapter.body.3.resnets.2.block1.weight": src_state.pop("body.3.body.2.block1.weight"), - "adapter.body.3.resnets.2.block1.bias": src_state.pop("body.3.body.2.block1.bias"), - "adapter.body.3.resnets.2.block2.weight": src_state.pop("body.3.body.2.block2.weight"), - "adapter.body.3.resnets.2.block2.bias": src_state.pop("body.3.body.2.block2.bias"), - # body.3.body.3 - "adapter.body.3.resnets.3.block1.weight": src_state.pop("body.3.body.3.block1.weight"), - "adapter.body.3.resnets.3.block1.bias": src_state.pop("body.3.body.3.block1.bias"), - "adapter.body.3.resnets.3.block2.weight": src_state.pop("body.3.body.3.block2.weight"), - "adapter.body.3.resnets.3.block2.bias": src_state.pop("body.3.body.3.block2.bias"), - # body.3.out_conv - "adapter.body.3.out_conv.weight": src_state.pop("body.3.out_conv.weight"), - "adapter.body.3.out_conv.bias": src_state.pop("body.3.out_conv.bias"), - } - - assert len(src_state) == 0 - - adapter = T2IAdapter(in_channels=3, channels=[320, 640, 1280], num_res_blocks=4, adapter_type="light_adapter") - - adapter.load_state_dict(res_state) - - return adapter - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - - parser.add_argument( - "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." - ) - parser.add_argument( - "--output_path", default=None, type=str, required=True, help="Path to the store the result checkpoint." - ) - parser.add_argument( - "--is_adapter_light", - action="store_true", - help="Is checkpoint come from Adapter-Light architecture. ex: color-adapter", - ) - parser.add_argument("--in_channels", required=False, type=int, help="Input channels for non-light adapter") - - args = parser.parse_args() - src_state = torch.load(args.checkpoint_path) - - if args.is_adapter_light: - adapter = convert_light_adapter(src_state) - else: - if args.in_channels is None: - raise ValueError("set `--in_channels=`") - adapter = convert_adapter(src_state, args.in_channels) - - adapter.save_pretrained(args.output_path) diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/dual_transformer_2d.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/dual_transformer_2d.py deleted file mode 100644 index 3db7e73ca6afc5fa7c67c1902d79e67c1aa728bc..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/dual_transformer_2d.py +++ /dev/null @@ -1,151 +0,0 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import Optional - -from torch import nn - -from .transformer_2d import Transformer2DModel, Transformer2DModelOutput - - -class DualTransformer2DModel(nn.Module): - """ - Dual transformer wrapper that combines two `Transformer2DModel`s for mixed inference. - - Parameters: - num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. - attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. - in_channels (`int`, *optional*): - Pass if the input is continuous. The number of channels in the input and output. - num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. - dropout (`float`, *optional*, defaults to 0.1): The dropout probability to use. - cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use. - sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images. - Note that this is fixed at training time as it is used for learning a number of position embeddings. See - `ImagePositionalEmbeddings`. - num_vector_embeds (`int`, *optional*): - Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels. - Includes the class for the masked latent pixel. - activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. - num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`. - The number of diffusion steps used during training. Note that this is fixed at training time as it is used - to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for - up to but not more than steps than `num_embeds_ada_norm`. - attention_bias (`bool`, *optional*): - Configure if the TransformerBlocks' attention should contain a bias parameter. - """ - - def __init__( - self, - num_attention_heads: int = 16, - attention_head_dim: int = 88, - in_channels: Optional[int] = None, - num_layers: int = 1, - dropout: float = 0.0, - norm_num_groups: int = 32, - cross_attention_dim: Optional[int] = None, - attention_bias: bool = False, - sample_size: Optional[int] = None, - num_vector_embeds: Optional[int] = None, - activation_fn: str = "geglu", - num_embeds_ada_norm: Optional[int] = None, - ): - super().__init__() - self.transformers = nn.ModuleList( - [ - Transformer2DModel( - num_attention_heads=num_attention_heads, - attention_head_dim=attention_head_dim, - in_channels=in_channels, - num_layers=num_layers, - dropout=dropout, - norm_num_groups=norm_num_groups, - cross_attention_dim=cross_attention_dim, - attention_bias=attention_bias, - sample_size=sample_size, - num_vector_embeds=num_vector_embeds, - activation_fn=activation_fn, - num_embeds_ada_norm=num_embeds_ada_norm, - ) - for _ in range(2) - ] - ) - - # Variables that can be set by a pipeline: - - # The ratio of transformer1 to transformer2's output states to be combined during inference - self.mix_ratio = 0.5 - - # The shape of `encoder_hidden_states` is expected to be - # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` - self.condition_lengths = [77, 257] - - # Which transformer to use to encode which condition. - # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` - self.transformer_index_for_condition = [1, 0] - - def forward( - self, - hidden_states, - encoder_hidden_states, - timestep=None, - attention_mask=None, - cross_attention_kwargs=None, - return_dict: bool = True, - ): - """ - Args: - hidden_states ( When discrete, `torch.LongTensor` of shape `(batch size, num latent pixels)`. - When continuous, `torch.FloatTensor` of shape `(batch size, channel, height, width)`): Input - hidden_states - encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*): - Conditional embeddings for cross attention layer. If not given, cross-attention defaults to - self-attention. - timestep ( `torch.long`, *optional*): - Optional timestep to be applied as an embedding in AdaLayerNorm's. Used to indicate denoising step. - attention_mask (`torch.FloatTensor`, *optional*): - Optional attention mask to be applied in Attention - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple. - - Returns: - [`~models.transformer_2d.Transformer2DModelOutput`] or `tuple`: - [`~models.transformer_2d.Transformer2DModelOutput`] if `return_dict` is True, otherwise a `tuple`. When - returning a tuple, the first element is the sample tensor. - """ - input_states = hidden_states - - encoded_states = [] - tokens_start = 0 - # attention_mask is not used yet - for i in range(2): - # for each of the two transformers, pass the corresponding condition tokens - condition_state = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] - transformer_index = self.transformer_index_for_condition[i] - encoded_state = self.transformers[transformer_index]( - input_states, - encoder_hidden_states=condition_state, - timestep=timestep, - cross_attention_kwargs=cross_attention_kwargs, - return_dict=False, - )[0] - encoded_states.append(encoded_state - input_states) - tokens_start += self.condition_lengths[i] - - output_states = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) - output_states = output_states + input_states - - if not return_dict: - return (output_states,) - - return Transformer2DModelOutput(sample=output_states) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r50-d8_512x1024_80k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r50-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index d2feeef7e982550481365f8187cb1a50f0fafcc9..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r50-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' -] diff --git a/spaces/Anonumous/RuImageCaptioning/README.md b/spaces/Anonumous/RuImageCaptioning/README.md deleted file mode 100644 index 313354e38369e9ea72d746d8c641ff15d788ec3a..0000000000000000000000000000000000000000 --- a/spaces/Anonumous/RuImageCaptioning/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: RuImageCaptionong -emoji: 👁 -colorFrom: yellow -colorTo: yellow -sdk: gradio -sdk_version: 3.24.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/utils/change_place.py b/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/utils/change_place.py deleted file mode 100644 index 121ff17830addba408f304f21ef091b2f8d51e19..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/utils/change_place.py +++ /dev/null @@ -1,121 +0,0 @@ -#!/usr/bin/python -#****************************************************************# -# ScriptName: change_place.py -# Author: Anonymous_123 -# Create Date: 2022-08-26 14:13 -# Modify Author: Anonymous_123 -# Modify Date: 2022-08-26 14:13 -# Function: -#***************************************************************# - -import os -import torch -import torch.nn as nn -from torchvision.transforms import functional as TF -import cv2 -from PIL import Image -import numpy as np -import random -# random.seed(0) -import pdb -import imutils -from tqdm import tqdm - -def change_place(img, mask, bbox, invert_mask): - ''' - img: N,C,H,W - ''' - if invert_mask: - mask = 1-mask - - device = img.device - x,y,new_x,new_y,w,h = bbox - - img_ori = img.clone() - mask_ori = mask.clone() - img_ori = img_ori.to(device) - mask_ori = mask_ori.to(device) - - img[:,:, new_y:new_y+h, new_x:new_x+w] = img_ori[:,:, y:y+h, x:x+w] - mask_new = torch.zeros(mask.shape).to(device) - mask_new[:,:, new_y:new_y+h, new_x:new_x+w] = mask_ori[:,:, y:y+h, x:x+w] - mask_ = mask_new > 0.5 - img = img*mask_ + (~mask_)*img_ori - - if invert_mask: - mask_new = 1 - mask_new - - return img, mask_new - -def find_bbox(mask): - mask_copy = mask.copy() - - contours, _ = cv2.findContours(mask[:,:,0],cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) - bbox = [] - max_area = 0 - for cnt in contours: - x, y, w, h = cv2.boundingRect(cnt) - cv2.rectangle(mask_copy, (x, y), (x + w, y + h), (0, 255, 0), 2) - if max_area < w*h: - max_area = w*h - bbox = [x,y,w,h] - - if bbox == []: - return None - else: - H,W,C = mask.shape - x,y,w,h = bbox - new_x = random.randint(0, W-w) - new_y = random.randint(0, H-h) - return [x, y, new_x, new_y, w,h] - - -if __name__ == '__main__': - mask_path = 'n01440764/ILSVRC2012_val_00000293.png' - - ori_img_path_root = 'ImageNet-S/ImageNetS919/validation/' - outpainting_root = 'TFill/results/imagenet_2/test_latest/img_ref_out/' - padding_root = 'ImageNet-S/ImageNetS919/validation-size-0.05-padding-4901/' - mask_root = 'ImageNet-S/ImageNetS919/validation-segmentation-label-mask/' - - - imgs = os.listdir(outpainting_root) - - shape = (256,256) - for cls in tqdm(os.listdir(mask_root)): - for img_name in os.listdir(os.path.join(mask_root, cls)): - if not img_name.split('.')[0]+'_0.png' in imgs: - continue - img_path = os.path.join(ori_img_path_root, cls, img_name.split('.')[0]+'.JPEG') - img_path_init = os.path.join(outpainting_root, img_name.split('.')[0]+'_0.png') - img_path_2 = os.path.join(padding_root, cls, img_name.split('.')[0]+'.JPEG') - mask_path = os.path.join(mask_root, cls, img_name) - if os.path.exists(img_path) and os.path.exists(img_path_init) and os.path.exists(img_path_2) and os.path.exists(mask_path): - img = Image.open(img_path_2).convert('RGB') - img = img.resize(shape, Image.LANCZOS) - img = TF.to_tensor(img).unsqueeze(0).mul(2).sub(1) - - mask = Image.open(mask_path).convert('RGB') - mask = mask.resize(shape, Image.NEAREST) - bbox = find_bbox(np.array(mask)) - - mask = ((np.array(mask) > 0.5) * 255).astype(np.uint8) - - mask = TF.to_tensor(Image.fromarray(mask)) - mask = mask[0, ...].unsqueeze(0).unsqueeze(0) - - if bbox is not None: - img, mask = change_place(img, mask, bbox) - - img_init = Image.open(img_path_init).convert('RGB') - img_init = img_init.resize(shape, Image.LANCZOS) - img_init = TF.to_tensor(img_init).unsqueeze(0).mul(2).sub(1) - img_new = img_init*(1-mask) + img*mask - - img_new = np.transpose(((img_new+1)/2*255)[0].numpy(), (1,2,0))[:,:,::-1] - img_init = cv2.imread(img_path) - img_init = cv2.resize(img_init, shape) - # cv2.imwrite('tmp/'+img_name, cv2.hconcat([img_init, img_new.astype('uint8')])) - cv2.imwrite('tmp/'+img_name, img_new.astype('uint8')) - - diff --git a/spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/results/celeba/test_latest/index.html b/spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/results/celeba/test_latest/index.html deleted file mode 100644 index 955c0989bb6f9902364f7e6d9419c8d58984834e..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/results/celeba/test_latest/index.html +++ /dev/null @@ -1,563 +0,0 @@ - - - - Experiment = celeba, Phase = test, Epoch = latest - - -

      ILSVRC2012_test_00007239

      - - - - - - - -
      -

      - - -
      -

      img_org

      -

      -
      -

      - - -
      -

      img_m

      -

      -
      -

      - - -
      -

      img_out

      -

      -
      -

      - - -
      -

      img_ref_out

      -

      -
      -

      ILSVRC2012_test_00031325

      - - - - - - - -
      -

      - - -
      -

      img_org

      -

      -
      -

      - - -
      -

      img_m

      -

      -
      -

      - - -
      -

      img_out

      -

      -
      -

      - - -
      -

      img_ref_out

      -

      -
      -

      ILSVRC2012_test_00038546

      - - - - - - - -
      -

      - - -
      -

      img_org

      -

      -
      -

      - - -
      -

      img_m

      -

      -
      -

      - - -
      -

      img_out

      -

      -
      -

      - - -
      -

      img_ref_out

      -

      -
      -

      ILSVRC2012_test_00038608

      - - - - - - - -
      -

      - - -
      -

      img_org

      -

      -
      -

      - - -
      -

      img_m

      -

      -
      -

      - - -
      -

      img_out

      -

      -
      -

      - - -
      -

      img_ref_out

      -

      -
      -

      ILSVRC2012_test_00051208

      - - - - - - - -
      -

      - - -
      -

      img_org

      -

      -
      -

      - - -
      -

      img_m

      -

      -
      -

      - - -
      -

      img_out

      -

      -
      -

      - - -
      -

      img_ref_out

      -

      -
      -

      ILSVRC2012_test_00055197

      - - - - - - - -
      -

      - - -
      -

      img_org

      -

      -
      -

      - - -
      -

      img_m

      -

      -
      -

      - - -
      -

      img_out

      -

      -
      -

      - - -
      -

      img_ref_out

      -

      -
      -

      ILSVRC2012_test_00057270

      - - - - - - - -
      -

      - - -
      -

      img_org

      -

      -
      -

      - - -
      -

      img_m

      -

      -
      -

      - - -
      -

      img_out

      -

      -
      -

      - - -
      -

      img_ref_out

      -

      -
      -

      ILSVRC2012_test_00061469

      - - - - - - - -
      -

      - - -
      -

      img_org

      -

      -
      -

      - - -
      -

      img_m

      -

      -
      -

      - - -
      -

      img_out

      -

      -
      -

      - - -
      -

      img_ref_out

      -

      -
      -

      ILSVRC2012_test_00068490

      - - - - - - - -
      -

      - - -
      -

      img_org

      -

      -
      -

      - - -
      -

      img_m

      -

      -
      -

      - - -
      -

      img_out

      -

      -
      -

      - - -
      -

      img_ref_out

      -

      -
      -

      ILSVRC2012_test_00074872

      - - - - - - - -
      -

      - - -
      -

      img_org

      -

      -
      -

      - - -
      -

      img_m

      -

      -
      -

      - - -
      -

      img_out

      -

      -
      -

      - - -
      -

      img_ref_out

      -

      -
      -

      ILSVRC2012_test_00076650

      - - - - - - - -
      -

      - - -
      -

      img_org

      -

      -
      -

      - - -
      -

      img_m

      -

      -
      -

      - - -
      -

      img_out

      -

      -
      -

      - - -
      -

      img_ref_out

      -

      -
      -

      ILSVRC2012_test_00079136

      - - - - - - - -
      -

      - - -
      -

      img_org

      -

      -
      -

      - - -
      -

      img_m

      -

      -
      -

      - - -
      -

      img_out

      -

      -
      -

      - - -
      -

      img_ref_out

      -

      -
      -

      ILSVRC2012_test_00081141

      - - - - - - - -
      -

      - - -
      -

      img_org

      -

      -
      -

      - - -
      -

      img_m

      -

      -
      -

      - - -
      -

      img_out

      -

      -
      -

      - - -
      -

      img_ref_out

      -

      -
      -

      ILSVRC2012_test_00088244

      - - - - - - - -
      -

      - - -
      -

      img_org

      -

      -
      -

      - - -
      -

      img_m

      -

      -
      -

      - - -
      -

      img_out

      -

      -
      -

      - - -
      -

      img_ref_out

      -

      -
      -

      ILSVRC2012_test_00098832

      - - - - - - - -
      -

      - - -
      -

      img_org

      -

      -
      -

      - - -
      -

      img_m

      -

      -
      -

      - - -
      -

      img_out

      -

      -
      -

      - - -
      -

      img_ref_out

      -

      -
      - - \ No newline at end of file diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/point_sample.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/point_sample.py deleted file mode 100644 index 267f4b3c56630acd85f9bdc630b7be09abab0aba..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/point_sample.py +++ /dev/null @@ -1,336 +0,0 @@ -# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend # noqa - -from os import path as osp - -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn.modules.utils import _pair -from torch.onnx.operators import shape_as_tensor - - -def bilinear_grid_sample(im, grid, align_corners=False): - """Given an input and a flow-field grid, computes the output using input - values and pixel locations from grid. Supported only bilinear interpolation - method to sample the input pixels. - - Args: - im (torch.Tensor): Input feature map, shape (N, C, H, W) - grid (torch.Tensor): Point coordinates, shape (N, Hg, Wg, 2) - align_corners {bool}: If set to True, the extrema (-1 and 1) are - considered as referring to the center points of the input’s - corner pixels. If set to False, they are instead considered as - referring to the corner points of the input’s corner pixels, - making the sampling more resolution agnostic. - Returns: - torch.Tensor: A tensor with sampled points, shape (N, C, Hg, Wg) - """ - n, c, h, w = im.shape - gn, gh, gw, _ = grid.shape - assert n == gn - - x = grid[:, :, :, 0] - y = grid[:, :, :, 1] - - if align_corners: - x = ((x + 1) / 2) * (w - 1) - y = ((y + 1) / 2) * (h - 1) - else: - x = ((x + 1) * w - 1) / 2 - y = ((y + 1) * h - 1) / 2 - - x = x.view(n, -1) - y = y.view(n, -1) - - x0 = torch.floor(x).long() - y0 = torch.floor(y).long() - x1 = x0 + 1 - y1 = y0 + 1 - - wa = ((x1 - x) * (y1 - y)).unsqueeze(1) - wb = ((x1 - x) * (y - y0)).unsqueeze(1) - wc = ((x - x0) * (y1 - y)).unsqueeze(1) - wd = ((x - x0) * (y - y0)).unsqueeze(1) - - # Apply default for grid_sample function zero padding - im_padded = F.pad(im, pad=[1, 1, 1, 1], mode='constant', value=0) - padded_h = h + 2 - padded_w = w + 2 - # save points positions after padding - x0, x1, y0, y1 = x0 + 1, x1 + 1, y0 + 1, y1 + 1 - - # Clip coordinates to padded image size - x0 = torch.where(x0 < 0, torch.tensor(0), x0) - x0 = torch.where(x0 > padded_w - 1, torch.tensor(padded_w - 1), x0) - x1 = torch.where(x1 < 0, torch.tensor(0), x1) - x1 = torch.where(x1 > padded_w - 1, torch.tensor(padded_w - 1), x1) - y0 = torch.where(y0 < 0, torch.tensor(0), y0) - y0 = torch.where(y0 > padded_h - 1, torch.tensor(padded_h - 1), y0) - y1 = torch.where(y1 < 0, torch.tensor(0), y1) - y1 = torch.where(y1 > padded_h - 1, torch.tensor(padded_h - 1), y1) - - im_padded = im_padded.view(n, c, -1) - - x0_y0 = (x0 + y0 * padded_w).unsqueeze(1).expand(-1, c, -1) - x0_y1 = (x0 + y1 * padded_w).unsqueeze(1).expand(-1, c, -1) - x1_y0 = (x1 + y0 * padded_w).unsqueeze(1).expand(-1, c, -1) - x1_y1 = (x1 + y1 * padded_w).unsqueeze(1).expand(-1, c, -1) - - Ia = torch.gather(im_padded, 2, x0_y0) - Ib = torch.gather(im_padded, 2, x0_y1) - Ic = torch.gather(im_padded, 2, x1_y0) - Id = torch.gather(im_padded, 2, x1_y1) - - return (Ia * wa + Ib * wb + Ic * wc + Id * wd).reshape(n, c, gh, gw) - - -def is_in_onnx_export_without_custom_ops(): - from annotator.uniformer.mmcv.ops import get_onnxruntime_op_path - ort_custom_op_path = get_onnxruntime_op_path() - return torch.onnx.is_in_onnx_export( - ) and not osp.exists(ort_custom_op_path) - - -def normalize(grid): - """Normalize input grid from [-1, 1] to [0, 1] - Args: - grid (Tensor): The grid to be normalize, range [-1, 1]. - Returns: - Tensor: Normalized grid, range [0, 1]. - """ - - return (grid + 1.0) / 2.0 - - -def denormalize(grid): - """Denormalize input grid from range [0, 1] to [-1, 1] - Args: - grid (Tensor): The grid to be denormalize, range [0, 1]. - Returns: - Tensor: Denormalized grid, range [-1, 1]. - """ - - return grid * 2.0 - 1.0 - - -def generate_grid(num_grid, size, device): - """Generate regular square grid of points in [0, 1] x [0, 1] coordinate - space. - - Args: - num_grid (int): The number of grids to sample, one for each region. - size (tuple(int, int)): The side size of the regular grid. - device (torch.device): Desired device of returned tensor. - - Returns: - (torch.Tensor): A tensor of shape (num_grid, size[0]*size[1], 2) that - contains coordinates for the regular grids. - """ - - affine_trans = torch.tensor([[[1., 0., 0.], [0., 1., 0.]]], device=device) - grid = F.affine_grid( - affine_trans, torch.Size((1, 1, *size)), align_corners=False) - grid = normalize(grid) - return grid.view(1, -1, 2).expand(num_grid, -1, -1) - - -def rel_roi_point_to_abs_img_point(rois, rel_roi_points): - """Convert roi based relative point coordinates to image based absolute - point coordinates. - - Args: - rois (Tensor): RoIs or BBoxes, shape (N, 4) or (N, 5) - rel_roi_points (Tensor): Point coordinates inside RoI, relative to - RoI, location, range (0, 1), shape (N, P, 2) - Returns: - Tensor: Image based absolute point coordinates, shape (N, P, 2) - """ - - with torch.no_grad(): - assert rel_roi_points.size(0) == rois.size(0) - assert rois.dim() == 2 - assert rel_roi_points.dim() == 3 - assert rel_roi_points.size(2) == 2 - # remove batch idx - if rois.size(1) == 5: - rois = rois[:, 1:] - abs_img_points = rel_roi_points.clone() - # To avoid an error during exporting to onnx use independent - # variables instead inplace computation - xs = abs_img_points[:, :, 0] * (rois[:, None, 2] - rois[:, None, 0]) - ys = abs_img_points[:, :, 1] * (rois[:, None, 3] - rois[:, None, 1]) - xs += rois[:, None, 0] - ys += rois[:, None, 1] - abs_img_points = torch.stack([xs, ys], dim=2) - return abs_img_points - - -def get_shape_from_feature_map(x): - """Get spatial resolution of input feature map considering exporting to - onnx mode. - - Args: - x (torch.Tensor): Input tensor, shape (N, C, H, W) - Returns: - torch.Tensor: Spatial resolution (width, height), shape (1, 1, 2) - """ - if torch.onnx.is_in_onnx_export(): - img_shape = shape_as_tensor(x)[2:].flip(0).view(1, 1, 2).to( - x.device).float() - else: - img_shape = torch.tensor(x.shape[2:]).flip(0).view(1, 1, 2).to( - x.device).float() - return img_shape - - -def abs_img_point_to_rel_img_point(abs_img_points, img, spatial_scale=1.): - """Convert image based absolute point coordinates to image based relative - coordinates for sampling. - - Args: - abs_img_points (Tensor): Image based absolute point coordinates, - shape (N, P, 2) - img (tuple/Tensor): (height, width) of image or feature map. - spatial_scale (float): Scale points by this factor. Default: 1. - - Returns: - Tensor: Image based relative point coordinates for sampling, - shape (N, P, 2) - """ - - assert (isinstance(img, tuple) and len(img) == 2) or \ - (isinstance(img, torch.Tensor) and len(img.shape) == 4) - - if isinstance(img, tuple): - h, w = img - scale = torch.tensor([w, h], - dtype=torch.float, - device=abs_img_points.device) - scale = scale.view(1, 1, 2) - else: - scale = get_shape_from_feature_map(img) - - return abs_img_points / scale * spatial_scale - - -def rel_roi_point_to_rel_img_point(rois, - rel_roi_points, - img, - spatial_scale=1.): - """Convert roi based relative point coordinates to image based absolute - point coordinates. - - Args: - rois (Tensor): RoIs or BBoxes, shape (N, 4) or (N, 5) - rel_roi_points (Tensor): Point coordinates inside RoI, relative to - RoI, location, range (0, 1), shape (N, P, 2) - img (tuple/Tensor): (height, width) of image or feature map. - spatial_scale (float): Scale points by this factor. Default: 1. - - Returns: - Tensor: Image based relative point coordinates for sampling, - shape (N, P, 2) - """ - - abs_img_point = rel_roi_point_to_abs_img_point(rois, rel_roi_points) - rel_img_point = abs_img_point_to_rel_img_point(abs_img_point, img, - spatial_scale) - - return rel_img_point - - -def point_sample(input, points, align_corners=False, **kwargs): - """A wrapper around :func:`grid_sample` to support 3D point_coords tensors - Unlike :func:`torch.nn.functional.grid_sample` it assumes point_coords to - lie inside ``[0, 1] x [0, 1]`` square. - - Args: - input (Tensor): Feature map, shape (N, C, H, W). - points (Tensor): Image based absolute point coordinates (normalized), - range [0, 1] x [0, 1], shape (N, P, 2) or (N, Hgrid, Wgrid, 2). - align_corners (bool): Whether align_corners. Default: False - - Returns: - Tensor: Features of `point` on `input`, shape (N, C, P) or - (N, C, Hgrid, Wgrid). - """ - - add_dim = False - if points.dim() == 3: - add_dim = True - points = points.unsqueeze(2) - if is_in_onnx_export_without_custom_ops(): - # If custom ops for onnx runtime not compiled use python - # implementation of grid_sample function to make onnx graph - # with supported nodes - output = bilinear_grid_sample( - input, denormalize(points), align_corners=align_corners) - else: - output = F.grid_sample( - input, denormalize(points), align_corners=align_corners, **kwargs) - if add_dim: - output = output.squeeze(3) - return output - - -class SimpleRoIAlign(nn.Module): - - def __init__(self, output_size, spatial_scale, aligned=True): - """Simple RoI align in PointRend, faster than standard RoIAlign. - - Args: - output_size (tuple[int]): h, w - spatial_scale (float): scale the input boxes by this number - aligned (bool): if False, use the legacy implementation in - MMDetection, align_corners=True will be used in F.grid_sample. - If True, align the results more perfectly. - """ - - super(SimpleRoIAlign, self).__init__() - self.output_size = _pair(output_size) - self.spatial_scale = float(spatial_scale) - # to be consistent with other RoI ops - self.use_torchvision = False - self.aligned = aligned - - def forward(self, features, rois): - num_imgs = features.size(0) - num_rois = rois.size(0) - rel_roi_points = generate_grid( - num_rois, self.output_size, device=rois.device) - - if torch.onnx.is_in_onnx_export(): - rel_img_points = rel_roi_point_to_rel_img_point( - rois, rel_roi_points, features, self.spatial_scale) - rel_img_points = rel_img_points.reshape(num_imgs, -1, - *rel_img_points.shape[1:]) - point_feats = point_sample( - features, rel_img_points, align_corners=not self.aligned) - point_feats = point_feats.transpose(1, 2) - else: - point_feats = [] - for batch_ind in range(num_imgs): - # unravel batch dim - feat = features[batch_ind].unsqueeze(0) - inds = (rois[:, 0].long() == batch_ind) - if inds.any(): - rel_img_points = rel_roi_point_to_rel_img_point( - rois[inds], rel_roi_points[inds], feat, - self.spatial_scale).unsqueeze(0) - point_feat = point_sample( - feat, rel_img_points, align_corners=not self.aligned) - point_feat = point_feat.squeeze(0).transpose(0, 1) - point_feats.append(point_feat) - - point_feats = torch.cat(point_feats, dim=0) - - channels = features.size(1) - roi_feats = point_feats.reshape(num_rois, channels, *self.output_size) - - return roi_feats - - def __repr__(self): - format_str = self.__class__.__name__ - format_str += '(output_size={}, spatial_scale={}'.format( - self.output_size, self.spatial_scale) - return format_str diff --git a/spaces/Ariharasudhan/YoloV5/utils/segment/plots.py b/spaces/Ariharasudhan/YoloV5/utils/segment/plots.py deleted file mode 100644 index 9b90900b3772fe23dbd57deb64221f98e563b069..0000000000000000000000000000000000000000 --- a/spaces/Ariharasudhan/YoloV5/utils/segment/plots.py +++ /dev/null @@ -1,143 +0,0 @@ -import contextlib -import math -from pathlib import Path - -import cv2 -import matplotlib.pyplot as plt -import numpy as np -import pandas as pd -import torch - -from .. import threaded -from ..general import xywh2xyxy -from ..plots import Annotator, colors - - -@threaded -def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg', names=None): - # Plot image grid with labels - if isinstance(images, torch.Tensor): - images = images.cpu().float().numpy() - if isinstance(targets, torch.Tensor): - targets = targets.cpu().numpy() - if isinstance(masks, torch.Tensor): - masks = masks.cpu().numpy().astype(int) - - max_size = 1920 # max image size - max_subplots = 16 # max image subplots, i.e. 4x4 - bs, _, h, w = images.shape # batch size, _, height, width - bs = min(bs, max_subplots) # limit plot images - ns = np.ceil(bs ** 0.5) # number of subplots (square) - if np.max(images[0]) <= 1: - images *= 255 # de-normalise (optional) - - # Build Image - mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init - for i, im in enumerate(images): - if i == max_subplots: # if last batch has fewer images than we expect - break - x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin - im = im.transpose(1, 2, 0) - mosaic[y:y + h, x:x + w, :] = im - - # Resize (optional) - scale = max_size / ns / max(h, w) - if scale < 1: - h = math.ceil(scale * h) - w = math.ceil(scale * w) - mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) - - # Annotate - fs = int((h + w) * ns * 0.01) # font size - annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) - for i in range(i + 1): - x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin - annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders - if paths: - annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames - if len(targets) > 0: - idx = targets[:, 0] == i - ti = targets[idx] # image targets - - boxes = xywh2xyxy(ti[:, 2:6]).T - classes = ti[:, 1].astype('int') - labels = ti.shape[1] == 6 # labels if no conf column - conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred) - - if boxes.shape[1]: - if boxes.max() <= 1.01: # if normalized with tolerance 0.01 - boxes[[0, 2]] *= w # scale to pixels - boxes[[1, 3]] *= h - elif scale < 1: # absolute coords need scale if image scales - boxes *= scale - boxes[[0, 2]] += x - boxes[[1, 3]] += y - for j, box in enumerate(boxes.T.tolist()): - cls = classes[j] - color = colors(cls) - cls = names[cls] if names else cls - if labels or conf[j] > 0.25: # 0.25 conf thresh - label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}' - annotator.box_label(box, label, color=color) - - # Plot masks - if len(masks): - if masks.max() > 1.0: # mean that masks are overlap - image_masks = masks[[i]] # (1, 640, 640) - nl = len(ti) - index = np.arange(nl).reshape(nl, 1, 1) + 1 - image_masks = np.repeat(image_masks, nl, axis=0) - image_masks = np.where(image_masks == index, 1.0, 0.0) - else: - image_masks = masks[idx] - - im = np.asarray(annotator.im).copy() - for j, box in enumerate(boxes.T.tolist()): - if labels or conf[j] > 0.25: # 0.25 conf thresh - color = colors(classes[j]) - mh, mw = image_masks[j].shape - if mh != h or mw != w: - mask = image_masks[j].astype(np.uint8) - mask = cv2.resize(mask, (w, h)) - mask = mask.astype(bool) - else: - mask = image_masks[j].astype(bool) - with contextlib.suppress(Exception): - im[y:y + h, x:x + w, :][mask] = im[y:y + h, x:x + w, :][mask] * 0.4 + np.array(color) * 0.6 - annotator.fromarray(im) - annotator.im.save(fname) # save - - -def plot_results_with_masks(file="path/to/results.csv", dir="", best=True): - # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') - save_dir = Path(file).parent if file else Path(dir) - fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True) - ax = ax.ravel() - files = list(save_dir.glob("results*.csv")) - assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot." - for f in files: - try: - data = pd.read_csv(f) - index = np.argmax(0.9 * data.values[:, 8] + 0.1 * data.values[:, 7] + 0.9 * data.values[:, 12] + - 0.1 * data.values[:, 11]) - s = [x.strip() for x in data.columns] - x = data.values[:, 0] - for i, j in enumerate([1, 2, 3, 4, 5, 6, 9, 10, 13, 14, 15, 16, 7, 8, 11, 12]): - y = data.values[:, j] - # y[y == 0] = np.nan # don't show zero values - ax[i].plot(x, y, marker=".", label=f.stem, linewidth=2, markersize=2) - if best: - # best - ax[i].scatter(index, y[index], color="r", label=f"best:{index}", marker="*", linewidth=3) - ax[i].set_title(s[j] + f"\n{round(y[index], 5)}") - else: - # last - ax[i].scatter(x[-1], y[-1], color="r", label="last", marker="*", linewidth=3) - ax[i].set_title(s[j] + f"\n{round(y[-1], 5)}") - # if j in [8, 9, 10]: # share train and val loss y axes - # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) - except Exception as e: - print(f"Warning: Plotting error for {f}: {e}") - ax[1].legend() - fig.savefig(save_dir / "results.png", dpi=200) - plt.close() diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/distlib/manifest.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/distlib/manifest.py deleted file mode 100644 index ca0fe442d9ca499466df9438df16eca405c5f102..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/distlib/manifest.py +++ /dev/null @@ -1,393 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2012-2013 Python Software Foundation. -# See LICENSE.txt and CONTRIBUTORS.txt. -# -""" -Class representing the list of files in a distribution. - -Equivalent to distutils.filelist, but fixes some problems. -""" -import fnmatch -import logging -import os -import re -import sys - -from . import DistlibException -from .compat import fsdecode -from .util import convert_path - - -__all__ = ['Manifest'] - -logger = logging.getLogger(__name__) - -# a \ followed by some spaces + EOL -_COLLAPSE_PATTERN = re.compile('\\\\w*\n', re.M) -_COMMENTED_LINE = re.compile('#.*?(?=\n)|\n(?=$)', re.M | re.S) - -# -# Due to the different results returned by fnmatch.translate, we need -# to do slightly different processing for Python 2.7 and 3.2 ... this needed -# to be brought in for Python 3.6 onwards. -# -_PYTHON_VERSION = sys.version_info[:2] - -class Manifest(object): - """A list of files built by on exploring the filesystem and filtered by - applying various patterns to what we find there. - """ - - def __init__(self, base=None): - """ - Initialise an instance. - - :param base: The base directory to explore under. - """ - self.base = os.path.abspath(os.path.normpath(base or os.getcwd())) - self.prefix = self.base + os.sep - self.allfiles = None - self.files = set() - - # - # Public API - # - - def findall(self): - """Find all files under the base and set ``allfiles`` to the absolute - pathnames of files found. - """ - from stat import S_ISREG, S_ISDIR, S_ISLNK - - self.allfiles = allfiles = [] - root = self.base - stack = [root] - pop = stack.pop - push = stack.append - - while stack: - root = pop() - names = os.listdir(root) - - for name in names: - fullname = os.path.join(root, name) - - # Avoid excess stat calls -- just one will do, thank you! - stat = os.stat(fullname) - mode = stat.st_mode - if S_ISREG(mode): - allfiles.append(fsdecode(fullname)) - elif S_ISDIR(mode) and not S_ISLNK(mode): - push(fullname) - - def add(self, item): - """ - Add a file to the manifest. - - :param item: The pathname to add. This can be relative to the base. - """ - if not item.startswith(self.prefix): - item = os.path.join(self.base, item) - self.files.add(os.path.normpath(item)) - - def add_many(self, items): - """ - Add a list of files to the manifest. - - :param items: The pathnames to add. These can be relative to the base. - """ - for item in items: - self.add(item) - - def sorted(self, wantdirs=False): - """ - Return sorted files in directory order - """ - - def add_dir(dirs, d): - dirs.add(d) - logger.debug('add_dir added %s', d) - if d != self.base: - parent, _ = os.path.split(d) - assert parent not in ('', '/') - add_dir(dirs, parent) - - result = set(self.files) # make a copy! - if wantdirs: - dirs = set() - for f in result: - add_dir(dirs, os.path.dirname(f)) - result |= dirs - return [os.path.join(*path_tuple) for path_tuple in - sorted(os.path.split(path) for path in result)] - - def clear(self): - """Clear all collected files.""" - self.files = set() - self.allfiles = [] - - def process_directive(self, directive): - """ - Process a directive which either adds some files from ``allfiles`` to - ``files``, or removes some files from ``files``. - - :param directive: The directive to process. This should be in a format - compatible with distutils ``MANIFEST.in`` files: - - http://docs.python.org/distutils/sourcedist.html#commands - """ - # Parse the line: split it up, make sure the right number of words - # is there, and return the relevant words. 'action' is always - # defined: it's the first word of the line. Which of the other - # three are defined depends on the action; it'll be either - # patterns, (dir and patterns), or (dirpattern). - action, patterns, thedir, dirpattern = self._parse_directive(directive) - - # OK, now we know that the action is valid and we have the - # right number of words on the line for that action -- so we - # can proceed with minimal error-checking. - if action == 'include': - for pattern in patterns: - if not self._include_pattern(pattern, anchor=True): - logger.warning('no files found matching %r', pattern) - - elif action == 'exclude': - for pattern in patterns: - found = self._exclude_pattern(pattern, anchor=True) - #if not found: - # logger.warning('no previously-included files ' - # 'found matching %r', pattern) - - elif action == 'global-include': - for pattern in patterns: - if not self._include_pattern(pattern, anchor=False): - logger.warning('no files found matching %r ' - 'anywhere in distribution', pattern) - - elif action == 'global-exclude': - for pattern in patterns: - found = self._exclude_pattern(pattern, anchor=False) - #if not found: - # logger.warning('no previously-included files ' - # 'matching %r found anywhere in ' - # 'distribution', pattern) - - elif action == 'recursive-include': - for pattern in patterns: - if not self._include_pattern(pattern, prefix=thedir): - logger.warning('no files found matching %r ' - 'under directory %r', pattern, thedir) - - elif action == 'recursive-exclude': - for pattern in patterns: - found = self._exclude_pattern(pattern, prefix=thedir) - #if not found: - # logger.warning('no previously-included files ' - # 'matching %r found under directory %r', - # pattern, thedir) - - elif action == 'graft': - if not self._include_pattern(None, prefix=dirpattern): - logger.warning('no directories found matching %r', - dirpattern) - - elif action == 'prune': - if not self._exclude_pattern(None, prefix=dirpattern): - logger.warning('no previously-included directories found ' - 'matching %r', dirpattern) - else: # pragma: no cover - # This should never happen, as it should be caught in - # _parse_template_line - raise DistlibException( - 'invalid action %r' % action) - - # - # Private API - # - - def _parse_directive(self, directive): - """ - Validate a directive. - :param directive: The directive to validate. - :return: A tuple of action, patterns, thedir, dir_patterns - """ - words = directive.split() - if len(words) == 1 and words[0] not in ('include', 'exclude', - 'global-include', - 'global-exclude', - 'recursive-include', - 'recursive-exclude', - 'graft', 'prune'): - # no action given, let's use the default 'include' - words.insert(0, 'include') - - action = words[0] - patterns = thedir = dir_pattern = None - - if action in ('include', 'exclude', - 'global-include', 'global-exclude'): - if len(words) < 2: - raise DistlibException( - '%r expects ...' % action) - - patterns = [convert_path(word) for word in words[1:]] - - elif action in ('recursive-include', 'recursive-exclude'): - if len(words) < 3: - raise DistlibException( - '%r expects ...' % action) - - thedir = convert_path(words[1]) - patterns = [convert_path(word) for word in words[2:]] - - elif action in ('graft', 'prune'): - if len(words) != 2: - raise DistlibException( - '%r expects a single ' % action) - - dir_pattern = convert_path(words[1]) - - else: - raise DistlibException('unknown action %r' % action) - - return action, patterns, thedir, dir_pattern - - def _include_pattern(self, pattern, anchor=True, prefix=None, - is_regex=False): - """Select strings (presumably filenames) from 'self.files' that - match 'pattern', a Unix-style wildcard (glob) pattern. - - Patterns are not quite the same as implemented by the 'fnmatch' - module: '*' and '?' match non-special characters, where "special" - is platform-dependent: slash on Unix; colon, slash, and backslash on - DOS/Windows; and colon on Mac OS. - - If 'anchor' is true (the default), then the pattern match is more - stringent: "*.py" will match "foo.py" but not "foo/bar.py". If - 'anchor' is false, both of these will match. - - If 'prefix' is supplied, then only filenames starting with 'prefix' - (itself a pattern) and ending with 'pattern', with anything in between - them, will match. 'anchor' is ignored in this case. - - If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and - 'pattern' is assumed to be either a string containing a regex or a - regex object -- no translation is done, the regex is just compiled - and used as-is. - - Selected strings will be added to self.files. - - Return True if files are found. - """ - # XXX docstring lying about what the special chars are? - found = False - pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex) - - # delayed loading of allfiles list - if self.allfiles is None: - self.findall() - - for name in self.allfiles: - if pattern_re.search(name): - self.files.add(name) - found = True - return found - - def _exclude_pattern(self, pattern, anchor=True, prefix=None, - is_regex=False): - """Remove strings (presumably filenames) from 'files' that match - 'pattern'. - - Other parameters are the same as for 'include_pattern()', above. - The list 'self.files' is modified in place. Return True if files are - found. - - This API is public to allow e.g. exclusion of SCM subdirs, e.g. when - packaging source distributions - """ - found = False - pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex) - for f in list(self.files): - if pattern_re.search(f): - self.files.remove(f) - found = True - return found - - def _translate_pattern(self, pattern, anchor=True, prefix=None, - is_regex=False): - """Translate a shell-like wildcard pattern to a compiled regular - expression. - - Return the compiled regex. If 'is_regex' true, - then 'pattern' is directly compiled to a regex (if it's a string) - or just returned as-is (assumes it's a regex object). - """ - if is_regex: - if isinstance(pattern, str): - return re.compile(pattern) - else: - return pattern - - if _PYTHON_VERSION > (3, 2): - # ditch start and end characters - start, _, end = self._glob_to_re('_').partition('_') - - if pattern: - pattern_re = self._glob_to_re(pattern) - if _PYTHON_VERSION > (3, 2): - assert pattern_re.startswith(start) and pattern_re.endswith(end) - else: - pattern_re = '' - - base = re.escape(os.path.join(self.base, '')) - if prefix is not None: - # ditch end of pattern character - if _PYTHON_VERSION <= (3, 2): - empty_pattern = self._glob_to_re('') - prefix_re = self._glob_to_re(prefix)[:-len(empty_pattern)] - else: - prefix_re = self._glob_to_re(prefix) - assert prefix_re.startswith(start) and prefix_re.endswith(end) - prefix_re = prefix_re[len(start): len(prefix_re) - len(end)] - sep = os.sep - if os.sep == '\\': - sep = r'\\' - if _PYTHON_VERSION <= (3, 2): - pattern_re = '^' + base + sep.join((prefix_re, - '.*' + pattern_re)) - else: - pattern_re = pattern_re[len(start): len(pattern_re) - len(end)] - pattern_re = r'%s%s%s%s.*%s%s' % (start, base, prefix_re, sep, - pattern_re, end) - else: # no prefix -- respect anchor flag - if anchor: - if _PYTHON_VERSION <= (3, 2): - pattern_re = '^' + base + pattern_re - else: - pattern_re = r'%s%s%s' % (start, base, pattern_re[len(start):]) - - return re.compile(pattern_re) - - def _glob_to_re(self, pattern): - """Translate a shell-like glob pattern to a regular expression. - - Return a string containing the regex. Differs from - 'fnmatch.translate()' in that '*' does not match "special characters" - (which are platform-specific). - """ - pattern_re = fnmatch.translate(pattern) - - # '?' and '*' in the glob pattern become '.' and '.*' in the RE, which - # IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix, - # and by extension they shouldn't match such "special characters" under - # any OS. So change all non-escaped dots in the RE to match any - # character except the special characters (currently: just os.sep). - sep = os.sep - if os.sep == '\\': - # we're using a regex to manipulate a regex, so we need - # to escape the backslash twice - sep = r'\\\\' - escaped = r'\1[^%s]' % sep - pattern_re = re.sub(r'((? None: - self.layout = layout - self.style = style - - def __rich_console__( - self, console: Console, options: ConsoleOptions - ) -> RenderResult: - width = options.max_width - height = options.height or options.size.height - layout = self.layout - title = ( - f"{layout.name!r} ({width} x {height})" - if layout.name - else f"({width} x {height})" - ) - yield Panel( - Align.center(Pretty(layout), vertical="middle"), - style=self.style, - title=self.highlighter(title), - border_style="blue", - height=height, - ) - - -class Splitter(ABC): - """Base class for a splitter.""" - - name: str = "" - - @abstractmethod - def get_tree_icon(self) -> str: - """Get the icon (emoji) used in layout.tree""" - - @abstractmethod - def divide( - self, children: Sequence["Layout"], region: Region - ) -> Iterable[Tuple["Layout", Region]]: - """Divide a region amongst several child layouts. - - Args: - children (Sequence(Layout)): A number of child layouts. - region (Region): A rectangular region to divide. - """ - - -class RowSplitter(Splitter): - """Split a layout region in to rows.""" - - name = "row" - - def get_tree_icon(self) -> str: - return "[layout.tree.row]⬌" - - def divide( - self, children: Sequence["Layout"], region: Region - ) -> Iterable[Tuple["Layout", Region]]: - x, y, width, height = region - render_widths = ratio_resolve(width, children) - offset = 0 - _Region = Region - for child, child_width in zip(children, render_widths): - yield child, _Region(x + offset, y, child_width, height) - offset += child_width - - -class ColumnSplitter(Splitter): - """Split a layout region in to columns.""" - - name = "column" - - def get_tree_icon(self) -> str: - return "[layout.tree.column]⬍" - - def divide( - self, children: Sequence["Layout"], region: Region - ) -> Iterable[Tuple["Layout", Region]]: - x, y, width, height = region - render_heights = ratio_resolve(height, children) - offset = 0 - _Region = Region - for child, child_height in zip(children, render_heights): - yield child, _Region(x, y + offset, width, child_height) - offset += child_height - - -@rich_repr -class Layout: - """A renderable to divide a fixed height in to rows or columns. - - Args: - renderable (RenderableType, optional): Renderable content, or None for placeholder. Defaults to None. - name (str, optional): Optional identifier for Layout. Defaults to None. - size (int, optional): Optional fixed size of layout. Defaults to None. - minimum_size (int, optional): Minimum size of layout. Defaults to 1. - ratio (int, optional): Optional ratio for flexible layout. Defaults to 1. - visible (bool, optional): Visibility of layout. Defaults to True. - """ - - splitters = {"row": RowSplitter, "column": ColumnSplitter} - - def __init__( - self, - renderable: Optional[RenderableType] = None, - *, - name: Optional[str] = None, - size: Optional[int] = None, - minimum_size: int = 1, - ratio: int = 1, - visible: bool = True, - ) -> None: - self._renderable = renderable or _Placeholder(self) - self.size = size - self.minimum_size = minimum_size - self.ratio = ratio - self.name = name - self.visible = visible - self.splitter: Splitter = self.splitters["column"]() - self._children: List[Layout] = [] - self._render_map: RenderMap = {} - self._lock = RLock() - - def __rich_repr__(self) -> Result: - yield "name", self.name, None - yield "size", self.size, None - yield "minimum_size", self.minimum_size, 1 - yield "ratio", self.ratio, 1 - - @property - def renderable(self) -> RenderableType: - """Layout renderable.""" - return self if self._children else self._renderable - - @property - def children(self) -> List["Layout"]: - """Gets (visible) layout children.""" - return [child for child in self._children if child.visible] - - @property - def map(self) -> RenderMap: - """Get a map of the last render.""" - return self._render_map - - def get(self, name: str) -> Optional["Layout"]: - """Get a named layout, or None if it doesn't exist. - - Args: - name (str): Name of layout. - - Returns: - Optional[Layout]: Layout instance or None if no layout was found. - """ - if self.name == name: - return self - else: - for child in self._children: - named_layout = child.get(name) - if named_layout is not None: - return named_layout - return None - - def __getitem__(self, name: str) -> "Layout": - layout = self.get(name) - if layout is None: - raise KeyError(f"No layout with name {name!r}") - return layout - - @property - def tree(self) -> "Tree": - """Get a tree renderable to show layout structure.""" - from pip._vendor.rich.styled import Styled - from pip._vendor.rich.table import Table - from pip._vendor.rich.tree import Tree - - def summary(layout: "Layout") -> Table: - - icon = layout.splitter.get_tree_icon() - - table = Table.grid(padding=(0, 1, 0, 0)) - - text: RenderableType = ( - Pretty(layout) if layout.visible else Styled(Pretty(layout), "dim") - ) - table.add_row(icon, text) - _summary = table - return _summary - - layout = self - tree = Tree( - summary(layout), - guide_style=f"layout.tree.{layout.splitter.name}", - highlight=True, - ) - - def recurse(tree: "Tree", layout: "Layout") -> None: - for child in layout._children: - recurse( - tree.add( - summary(child), - guide_style=f"layout.tree.{child.splitter.name}", - ), - child, - ) - - recurse(tree, self) - return tree - - def split( - self, - *layouts: Union["Layout", RenderableType], - splitter: Union[Splitter, str] = "column", - ) -> None: - """Split the layout in to multiple sub-layouts. - - Args: - *layouts (Layout): Positional arguments should be (sub) Layout instances. - splitter (Union[Splitter, str]): Splitter instance or name of splitter. - """ - _layouts = [ - layout if isinstance(layout, Layout) else Layout(layout) - for layout in layouts - ] - try: - self.splitter = ( - splitter - if isinstance(splitter, Splitter) - else self.splitters[splitter]() - ) - except KeyError: - raise NoSplitter(f"No splitter called {splitter!r}") - self._children[:] = _layouts - - def add_split(self, *layouts: Union["Layout", RenderableType]) -> None: - """Add a new layout(s) to existing split. - - Args: - *layouts (Union[Layout, RenderableType]): Positional arguments should be renderables or (sub) Layout instances. - - """ - _layouts = ( - layout if isinstance(layout, Layout) else Layout(layout) - for layout in layouts - ) - self._children.extend(_layouts) - - def split_row(self, *layouts: Union["Layout", RenderableType]) -> None: - """Split the layout in to a row (layouts side by side). - - Args: - *layouts (Layout): Positional arguments should be (sub) Layout instances. - """ - self.split(*layouts, splitter="row") - - def split_column(self, *layouts: Union["Layout", RenderableType]) -> None: - """Split the layout in to a column (layouts stacked on top of each other). - - Args: - *layouts (Layout): Positional arguments should be (sub) Layout instances. - """ - self.split(*layouts, splitter="column") - - def unsplit(self) -> None: - """Reset splits to initial state.""" - del self._children[:] - - def update(self, renderable: RenderableType) -> None: - """Update renderable. - - Args: - renderable (RenderableType): New renderable object. - """ - with self._lock: - self._renderable = renderable - - def refresh_screen(self, console: "Console", layout_name: str) -> None: - """Refresh a sub-layout. - - Args: - console (Console): Console instance where Layout is to be rendered. - layout_name (str): Name of layout. - """ - with self._lock: - layout = self[layout_name] - region, _lines = self._render_map[layout] - (x, y, width, height) = region - lines = console.render_lines( - layout, console.options.update_dimensions(width, height) - ) - self._render_map[layout] = LayoutRender(region, lines) - console.update_screen_lines(lines, x, y) - - def _make_region_map(self, width: int, height: int) -> RegionMap: - """Create a dict that maps layout on to Region.""" - stack: List[Tuple[Layout, Region]] = [(self, Region(0, 0, width, height))] - push = stack.append - pop = stack.pop - layout_regions: List[Tuple[Layout, Region]] = [] - append_layout_region = layout_regions.append - while stack: - append_layout_region(pop()) - layout, region = layout_regions[-1] - children = layout.children - if children: - for child_and_region in layout.splitter.divide(children, region): - push(child_and_region) - - region_map = { - layout: region - for layout, region in sorted(layout_regions, key=itemgetter(1)) - } - return region_map - - def render(self, console: Console, options: ConsoleOptions) -> RenderMap: - """Render the sub_layouts. - - Args: - console (Console): Console instance. - options (ConsoleOptions): Console options. - - Returns: - RenderMap: A dict that maps Layout on to a tuple of Region, lines - """ - render_width = options.max_width - render_height = options.height or console.height - region_map = self._make_region_map(render_width, render_height) - layout_regions = [ - (layout, region) - for layout, region in region_map.items() - if not layout.children - ] - render_map: Dict["Layout", "LayoutRender"] = {} - render_lines = console.render_lines - update_dimensions = options.update_dimensions - - for layout, region in layout_regions: - lines = render_lines( - layout.renderable, update_dimensions(region.width, region.height) - ) - render_map[layout] = LayoutRender(region, lines) - return render_map - - def __rich_console__( - self, console: Console, options: ConsoleOptions - ) -> RenderResult: - with self._lock: - width = options.max_width or console.width - height = options.height or console.height - render_map = self.render(console, options.update_dimensions(width, height)) - self._render_map = render_map - layout_lines: List[List[Segment]] = [[] for _ in range(height)] - _islice = islice - for (region, lines) in render_map.values(): - _x, y, _layout_width, layout_height = region - for row, line in zip( - _islice(layout_lines, y, y + layout_height), lines - ): - row.extend(line) - - new_line = Segment.line() - for layout_row in layout_lines: - yield from layout_row - yield new_line - - -if __name__ == "__main__": - from pip._vendor.rich.console import Console - - console = Console() - layout = Layout() - - layout.split_column( - Layout(name="header", size=3), - Layout(ratio=1, name="main"), - Layout(size=10, name="footer"), - ) - - layout["main"].split_row(Layout(name="side"), Layout(name="body", ratio=2)) - - layout["body"].split_row(Layout(name="content", ratio=2), Layout(name="s2")) - - layout["s2"].split_column( - Layout(name="top"), Layout(name="middle"), Layout(name="bottom") - ) - - layout["side"].split_column(Layout(layout.tree, name="left1"), Layout(name="left2")) - - layout["content"].update("foo") - - console.print(layout) diff --git a/spaces/Benson/text-generation/Examples/918kiss Descarga Apk Kaya 2021.md b/spaces/Benson/text-generation/Examples/918kiss Descarga Apk Kaya 2021.md deleted file mode 100644 index 38930fcbf99c074442ed601b46424bcbafb5fc59..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/918kiss Descarga Apk Kaya 2021.md +++ /dev/null @@ -1,73 +0,0 @@ -
      -

      918kiss Kaya APK Descargar 2021: Cómo jugar y ganar juegos de casino en línea

      -

      Si usted está buscando una manera divertida y emocionante para disfrutar de los juegos de casino en línea en su dispositivo móvil, usted debe tratar 918kiss Kaya APK. Esta es una de las plataformas de casino en línea más populares y confiables en Malasia que ofrece una variedad de juegos, como tragamonedas, juegos de mesa, casino en vivo, juegos de pesca y más. En este artículo, le mostraremos cómo descargar e instalar 918kiss Kaya APK en su dispositivo Android o iOS, cómo registrarse e iniciar sesión en su cuenta, y cómo jugar y ganar juegos de casino en línea en esta plataforma.

      -

      ¿Qué es 918kiss Kaya APK?

      -

      918kiss Kaya APK es una aplicación de casino en línea que le permite acceder a cientos de juegos de diferentes proveedores, como 918Kiss, Mega888, Pussy888, XE88, Joker123, y más. Puedes jugar a estos juegos en cualquier momento y en cualquier lugar que desees, siempre y cuando tengas una conexión a Internet estable y un dispositivo compatible.

      -

      918kiss descarga apk kaya 2021


      Download Zip 🔗 https://bltlly.com/2v6KXl



      -

      Características de 918kiss Kaya APK

      -

      Algunas de las características que hacen que 918kiss Kaya APK se destacan de otras plataformas de casino en línea son:

      -
        -
      • Gráficos y efectos de sonido de alta calidad que crean una experiencia de juego inmersiva.
      • -
      • Interfaz suave y fácil de usar que hace que sea fácil de navegar y jugar.
      • -
      • Sistema seguro y cifrado que protege su información personal y financiera.
      • -
      • Resultados de juego justo y aleatorio que aseguran una oportunidad justa de ganar.
      • -
      • Generosos bonos y promociones que recompensan tu lealtad y actividad.
      • -
      • 24/7 servicio al cliente que proporciona una asistencia rápida y profesional.
      • -
      -

      Beneficios de 918kiss Kaya APK

      -

      Algunos de los beneficios que se pueden disfrutar cuando se juega en 918kiss Kaya APK son:

      -
        -
      • Puedes jugar una amplia gama de juegos de diferentes géneros y temas, como tragamonedas clásicas, tragamonedas de video, tragamonedas progresivas, blackjack, ruleta, baccarat, póker, sic bo, tigre dragón, cazador de peces y más.
      • - -
      • Puedes ganar grandes premios y premios, especialmente si juegas a las tragamonedas progresivas o a los juegos de casino en vivo.
      • -
      • Puedes divertirte y relajarte, mientras mejoras tus habilidades y estrategias.
      • -
      -

      Cómo descargar e instalar 918kiss Kaya APK?

      -

      Descargar e instalar 918kiss Kaya APK es muy fácil y rápido. Solo tiene que seguir estos sencillos pasos:

      -

      Para dispositivos Android

      -
        -
      1. Ir a la página web oficial de 918kiss Kaya APK y haga clic en el botón de descarga para dispositivos Android.
      2. -
      3. Permitir la descarga de fuentes desconocidas en la configuración del dispositivo.
      4. -
      5. Abra el archivo descargado e instale la aplicación.
      6. -
      7. Inicie la aplicación y disfrute jugando.
      8. -
      -

      Para dispositivos iOS

      -
        -
      1. Ir a la página web oficial de 918kiss Kaya APK y haga clic en el botón de descarga para dispositivos iOS.
      2. -
      3. Confía en el desarrollador en la configuración de tu dispositivo.
      4. -
      5. Abra el archivo descargado e instale la aplicación.
      6. -
      7. Inicie la aplicación y disfrute jugando.
      8. -
      -

      ¿Cómo registrarse e iniciar sesión en 918kiss Kaya APK?

      -

      Para jugar en 918kiss Kaya APK, es necesario registrar una cuenta e iniciar sesión con su nombre de usuario y contraseña. Aquí es cómo se puede hacer eso:

      -

      Registrarse con un agente oficial

      -

      La mejor manera de registrar una cuenta en 918kiss Kaya APK es ponerse en contacto con un agente oficial. Puedes encontrarlos en el sitio web oficial, plataformas de redes sociales o foros en línea. Ellos le guiarán a través del proceso de registro y le proporcionarán un nombre de usuario y contraseña. También necesitarás hacer un depósito para activar tu cuenta.

      -

      Inicie sesión con su nombre de usuario y contraseña

      -

      Una vez que tenga su nombre de usuario y contraseña, puede iniciar sesión en su cuenta en 918kiss Kaya APK. Simplemente introduzca sus credenciales en la página de inicio de sesión y haga clic en el botón de inicio de sesión. A continuación, podrá acceder a todos los juegos y características de la plataforma.

      -

      - -

      Jugar y ganar juegos de casino en línea en 918kiss Kaya APK no solo es divertido, sino también gratificante. Aquí hay algunos consejos que pueden ayudarle a mejorar sus posibilidades de ganar:

      -

      Elige tu juego favorito

      -

      Lo primero que tienes que hacer es elegir un juego que se adapte a tu gusto y nivel de habilidad. Puede navegar a través de las diferentes categorías y géneros de juegos en 918kiss Kaya APK y probarlos de forma gratuita o por dinero real. También puedes consultar las reseñas y valoraciones de otros jugadores para ver qué juegos son populares y rentables.

      -

      Aprenda las reglas y estrategias

      -

      Lo siguiente que tienes que hacer es aprender las reglas y estrategias del juego que has elegido. Puede leer las instrucciones y consejos en la pantalla del juego o en el sitio web oficial de 918kiss Kaya APK. También puedes ver videos o tutoriales en línea que explican cómo jugar y ganar el juego. Cuanto más sepas del juego, mejor podrás jugarlo.

      -

      Gestiona tu bankroll y apuesta sabiamente

      -

      Lo último que necesitas hacer es administrar tu bankroll y apostar sabiamente. Debes establecer un presupuesto para tu sesión de juego y ceñirte a él. También debe evitar perseguir sus pérdidas o apostar más de lo que puede permitirse. También debe utilizar los bonos y promociones que 918kiss Kaya APK ofrece para aumentar su bankroll y aumentar sus posibilidades de ganar.

      -

      Conclusión

      -

      En conclusión, 918kiss Kaya APK es una de las mejores plataformas de casino en línea en Malasia que ofrece una variedad de juegos, características, beneficios y bonos. Puede descargarlo e instalarlo en su dispositivo Android o iOS, registrarse e iniciar sesión en su cuenta, y jugar y ganar juegos de casino en línea en esta plataforma. Si usted está buscando una manera divertida y emocionante para disfrutar de los juegos de casino en línea en su dispositivo móvil, usted debe tratar 918kiss Kaya APK hoy!

      -

      Preguntas frecuentes

      -

      Aquí hay algunas preguntas frecuentes sobre 918kiss Kaya APK:

      -
        - -
      • Sí, 918kiss Kaya APK es seguro y legal. Utiliza un sistema seguro y cifrado que protege su información personal y financiera. También cumple con las leyes y reglamentos de la industria del juego en línea en Malasia. Está autorizado y regulado por las autoridades pertinentes.
      • -
      • ¿Cuáles son los requisitos mínimos para jugar en 918kiss Kaya APK?
      • -
      • Los requisitos mínimos para jugar en 918kiss Kaya APK son: un dispositivo Android o iOS compatible, una conexión a Internet estable, y una cuenta registrada. También necesitas tener al menos 18 años para jugar en esta plataforma.
      • -
      • ¿Cómo puedo retirar mis ganancias de 918kiss Kaya APK?
      • -
      • Usted puede retirar sus ganancias de 918kiss Kaya APK poniéndose en contacto con su agente oficial. Ellos procesarán su solicitud de retiro y transferirán su dinero a su cuenta bancaria dentro de las 24 horas. El monto mínimo de retiro es de RM50 y el monto máximo de retiro es de RM50,000 por día.
      • -
      • ¿Puedo jugar en 918kiss Kaya APK con otros jugadores?
      • -
      • Sí, se puede jugar en 918kiss Kaya APK con otros jugadores. Puede unirse a los juegos de casino en vivo e interactuar con los distribuidores en vivo y otros jugadores. También puede chatear con otros jugadores en los foros en línea o plataformas de redes sociales. Puedes hacer nuevos amigos y compartir tus experiencias de juego con ellos.
      • -
      • ¿Puedo obtener ayuda si tengo problemas o preguntas sobre 918kiss Kaya APK?
      • -
      • Sí, puede obtener ayuda si tiene algún problema o pregunta sobre 918kiss Kaya APK. Puede ponerse en contacto con el equipo de servicio al cliente 24/7 por teléfono, correo electrónico, WhatsApp, WeChat, Telegram o chat en vivo. Ellos le proporcionarán una asistencia rápida y profesional.
      • -

      64aa2da5cf
      -
      -
      \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Api-ms-win-core-localization-l1-2-0.dll Download.md b/spaces/Benson/text-generation/Examples/Api-ms-win-core-localization-l1-2-0.dll Download.md deleted file mode 100644 index 6188b86283cb2a1c08c3acae14de33445623fd2c..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Api-ms-win-core-localization-l1-2-0.dll Download.md +++ /dev/null @@ -1,77 +0,0 @@ -
      -

      ¿Qué es api-ms-win-core-localization-l1-2-0.dll y por qué lo necesita?

      -

      Si usted es un usuario de Windows, es posible que haya encontrado un mensaje de error diciendo que api-ms-win-core-localization-l1-2-0.dll falta o no se encuentra. Esto puede ser frustrante e impedirle ejecutar ciertos programas o aplicaciones. ¿Pero qué es este misterioso archivo y por qué es tan importante?

      -

      Api-ms-win-core-localization-l1-2-0.dll es un archivo de biblioteca de enlaces dinámicos (DLL) que contiene funciones y recursos relacionados con la localización, como la visualización de texto en el idioma correcto para una región o región en particular. El archivo DLL es parte del sistema operativo Microsoft Windows y es utilizado por muchos programas diferentes.

      -

      api-ms-win-core-localization-l1-2-0.dll download


      Download File ····· https://bltlly.com/2v6Kun



      -

      Cuando un programa necesita usar una función o recurso del archivo DLL, lo llama y lo carga en memoria. De esta manera, varios programas pueden compartir el mismo archivo DLL y ahorrar espacio y recursos. Sin embargo, si el archivo DLL falta o está dañado, el programa no puede acceder a él y mostrará un mensaje de error.

      -

      ¿Qué causa los errores api-ms-win-core-localization-l1-2-0.dll?

      -

      Hay varias razones posibles por las que pueden ocurrir errores api-ms-win-core-localization-l1-2-0.dll en su PC con Windows. Algunas de ellas son:

      -
        -
      • Su software antivirus puede haber eliminado o puesto en cuarentena el archivo DLL como un falso positivo.
      • -
      • Es posible que haya eliminado accidentalmente o movido el archivo DLL a otra ubicación.
      • -
      • Es posible que haya instalado o desinstalado un programa que modificó o reemplazó el archivo DLL.
      • -
      • Es posible que tenga una versión defectuosa o desactualizada de Windows que haya dañado o sobrescrito el archivo DLL.
      • -
      • Es posible que tenga una infección de virus o malware que haya dañado o secuestrado el archivo DLL.
      • -
      - -

      ¿Cómo corregir los errores api-ms-win -core-localization-l1-2-0.dll?

      -

      Método 1: Comprobador de archivos del sistema de ejecución

      -

      System File Checker (SFC) es una herramienta integrada de Windows que puede escanear y reparar archivos del sistema dañados o faltantes, incluidos archivos DLL. Para ejecutar SFC, siga estos pasos:

      -
        -
      1. Pulse la tecla de Windows + R para abrir el cuadro de diálogo Ejecutar.
      2. -
      3. Escriba cmd y presione Ctrl + Shift + Enter para ejecutar el símbolo del sistema como administrador.
      4. -
      5. Escriba sfc /scannow y presione Enter para iniciar el escaneo.
      6. -
      7. Espere a que se complete la exploración. Puede llevar algún tiempo, así que sea paciente.
      8. -
      9. Si SFC encuentra y corrige cualquier error, reinicie su PC y compruebe si el error DLL está resuelto.
      10. -
      -

      SFC scan

      -

      Método 2: Descargar e instalar el archivo DLL desde una fuente de confianza

      -

      Si SFC no corrige el error DLL, puede intentar descargar e instalar el archivo DLL desde una fuente de confianza. Sin embargo, tenga cuidado al descargar archivos DLL desde Internet, ya que algunos sitios web pueden contener archivos maliciosos o desactualizados que pueden dañar su PC. Solo descargue archivos DLL de sitios web verificados, como or . Para descargar e instalar el archivo DLL, siga estos pasos:

      -
        -
      1. Ir a o y buscar api-ms-win-core-localización-l1-2-0.dll.
      2. -
      3. Seleccione la versión apropiada del archivo DLL para su sistema Windows (32 bits o 64 bits).
      4. -
      5. Haga clic en Descargar y guarde el archivo ZIP en su PC.
      6. -
      7. Extraiga el archivo ZIP y copie el archivo DLL a la carpeta donde está instalado el programa que lo requiere. Por ejemplo, si obtiene el error al intentar ejecutar Skype, copie el archivo DLL en C: Archivos de programa Skype.
      8. -
      9. Si eso no funciona, copie el archivo DLL a la carpeta del sistema de Windows. Para Windows de 32 bits, cópielo a C: Windows System32. Para Windows de 64 bits, cópielo en C: Windows SysWOW64.
      10. -
      11. Reinicie su PC y compruebe si el error DLL está resuelto.
      12. -
      - -

      Método 3: Reinstalar el programa que requiere el archivo DLL

      -

      Si ninguno de los métodos anteriores funciona, puede intentar reinstalar el programa que está generando el mensaje de error. Esto puede ayudar a restaurar los archivos perdidos o dañados que están asociados con el programa, incluyendo el archivo DLL. Para reinstalar el programa, siga estos pasos:

      -
        -
      1. Pulse la tecla de Windows + R para abrir el cuadro de diálogo Ejecutar.
      2. -
      3. Escriba appwiz.cpl y presione Enter para abrir Programas y Características.
      4. -
      5. Encuentre y seleccione el programa que requiere el archivo DLL, como Skype, WordPress o un juego.
      6. -
      7. Haga clic en Desinstalar y siga las instrucciones para eliminar el programa de su PC.
      8. -
      9. Reinicie su PC y descargar e instalar la última versión del programa desde su sitio web oficial o fuente.
      10. -
      11. Compruebe si el error DLL está resuelto.
      12. -
      -

      Programa de desinstalación

      -

      ¿Cómo prevenir errores api-ms-win-core-localization-l1-2-0.dll en el futuro?

      -

      Para evitar errores api-ms-win-core-localization-l1-2-0.dll en el futuro, debe tomar algunas medidas preventivas para mantener su PC en buena forma. Estos son algunos consejos:

      -

      -
        -
      • Mantenga su Windows actualizado con los últimos parches de seguridad y correcciones de errores. Esto puede ayudar a solucionar cualquier vulnerabilidad o problema que pueda afectar los archivos del sistema.
      • -
      • Ejecutar software antivirus regularmente y escanear su PC para cualquier virus o infección de malware. Esto puede ayudar a eliminar cualquier programa malicioso que pueda dañar o secuestrar sus archivos DLL.
      • -
      • Copia de seguridad de sus archivos importantes con regularidad y crear un punto de restauración del sistema. Esto puede ayudarle a recuperar sus datos y restaurar su sistema en caso de cualquier desastre o fallo.
      • -
      • Limpie su espacio en disco y registro regularmente. Esto puede ayudar a eliminar cualquier archivo basura o entradas no válidas que puedan desordenar o dañar su sistema.
      • - -
      -

      Conclusión

      -

      En este artículo, aprendiste qué es api-ms-win-core-localization-l -1-2-0.dll y por qué lo necesitas, qué causa los errores api-ms-win-core-localization-l1-2-0.dll y cómo solucionarlos. También aprendió algunos consejos sobre cómo prevenir estos errores en el futuro. Esperamos que este artículo le resulte útil e informativo. Si lo hizo, por favor compártalo con sus amigos y deje un comentario a continuación. Si tiene alguna pregunta o sugerencia, no dude en contactarnos. ¡Gracias por leer!

      -

      Preguntas frecuentes

      -

      Aquí hay algunas preguntas frecuentes sobre api-ms-win-core-localization-l1-2-0.dll:

      -
        -
      1. ¿Qué es un archivo DLL?
      2. -

        Un archivo DLL es un archivo de biblioteca de enlaces dinámicos que contiene funciones y recursos que pueden ser utilizados por varios programas. Los archivos DLL son parte del sistema operativo Windows y ayudan a ahorrar espacio y recursos.

        -
      3. ¿Cómo sé qué archivo DLL falta?
      4. -

        Si falta un archivo DLL, generalmente verá un mensaje de error que le indica el nombre del archivo DLL y el programa que lo requiere. Por ejemplo, "api-ms-win-core-localization-l1-2-0.dll falta en su computadora. Intente reinstalar el programa para solucionar este problema."

        -
      5. ¿Puedo eliminar archivos DLL no utilizados?
      6. -

        No, no debe eliminar ningún archivo DLL a menos que esté seguro de que no es necesario para ningún programa o función del sistema. La eliminación de archivos DLL puede causar errores o inestabilidad en su PC.

        -
      7. ¿Dónde puedo encontrar más información sobre los archivos DLL?
      8. -

        Puede encontrar más información sobre archivos DLL en sitios web como o . Estos sitios web proporcionan información detallada sobre cada archivo DLL, como su descripción, tamaño, versión y ubicación.

        -
      9. ¿Cómo puedo contactar con usted para obtener más ayuda?
      10. -

        Puede contactarnos visitando nuestro sitio web o enviándonos un correo electrónico. Estaremos encantados de ayudarle con cualquier problema o pregunta relacionada con api-ms-win-core-localization-l1-2-0.dll u otros archivos DLL.

        -

      64aa2da5cf
      -
      -
      \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Apkpro.md b/spaces/Benson/text-generation/Examples/Apkpro.md deleted file mode 100644 index 43de4a869df2d4708061aa5aaa64bf207b3f064d..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Apkpro.md +++ /dev/null @@ -1,69 +0,0 @@ -
      -

      APKPRO: ¿Qué es y cómo usarlo?

      -

      Si eres un fan de los juegos móviles y quieres disfrutar de los mejores, más recientes y más populares juegos y aplicaciones en tu dispositivo Android, es posible que hayas oído hablar de APKPRO. Pero ¿qué es exactamente y cómo se puede utilizar para mejorar su experiencia de juego? En este artículo, vamos a responder a estas preguntas y más. Le explicaremos qué es APKPRO, por qué debe usarlo, cómo descargarlo e instalarlo, y cómo usarlo para descargar y jugar juegos y aplicaciones. Así que, vamos a empezar!

      -

      Introducción

      -

      ¿Qué es APKPRO?

      -

      APKPRO es un sitio web que proporciona descargas gratuitas de juegos y aplicaciones para dispositivos Android. Ofrece una amplia gama de categorías, como acción, aventura, árcade, rompecabezas, carreras, simulación, deportes, estrategia y más. Puede encontrar juegos y aplicaciones nuevos y antiguos en APKPRO, así como versiones modificadas que tienen dinero ilimitado, monedas, gemas, vidas u otras características. APKPRO también actualiza su contenido regularmente, para que siempre puedas encontrar algo nuevo y emocionante para jugar.

      -

      apkpro


      DOWNLOAD 🆗 https://bltlly.com/2v6JSC



      -

      ¿Por qué usar APKPRO?

      -

      Hay muchas razones por las que es posible que desee utilizar APKPRO para descargar juegos y aplicaciones para su dispositivo Android. Estos son algunos de ellos:

      -
        -
      • Puede acceder a juegos y aplicaciones que no están disponibles en su región o en la Google Play Store.
      • -
      • Puedes disfrutar de versiones modificadas de juegos y aplicaciones que tienen características o beneficios adicionales.
      • -
      • Puedes ahorrar dinero descargando juegos y aplicaciones gratis en lugar de pagarlos.
      • -
      • Puedes divertirte explorando diferentes géneros y categorías de juegos y aplicaciones.
      • -
      • Puedes descubrir nuevos juegos y aplicaciones de los que quizás no hayas oído hablar antes.
      • -
      -

      ¿Cómo descargar e instalar APKPRO?

      -

      Paso 1: Habilitar fuentes desconocidas

      - -
        -
      1. Ir a la configuración de su dispositivo.
      2. -
      3. Toque en la seguridad o la privacidad.
      4. -
      5. Encuentra la opción que dice fuentes desconocidas o permite la instalación desde fuentes desconocidas.
      6. -
      7. Conéctalo o marca la casilla al lado.
      8. -
      -

      Paso 2: Descargar APKPRO desde el sitio web oficial

      -

      Ahora que ha habilitado fuentes desconocidas, puede descargar APKPRO desde su sitio web oficial. Para hacer esto, siga estos pasos:

      -
        -
      1. Abra su navegador y vaya a apkpro.me.
      2. -
      3. Desplácese hacia abajo hasta que vea el botón de descarga.
      4. -
      5. Toque en el botón de descarga y espere a que el archivo se descargue.
      6. -
      -

      Paso 3: Instalar APKPRO en su dispositivo

      -

      Una vez que haya descargado el archivo, puede instalar APKPRO en su dispositivo. Para hacer esto, siga estos pasos:

      -
        -
      1. Abra su gestor de archivos o carpeta de descargas.
      2. -
      3. Encuentra el archivo que dice apkpro.apk o algo similar.
      4. -
      5. Toque en el archivo y siga las instrucciones en la pantalla.
      6. -
      7. Espere a que termine la instalación.
      8. -
      Cómo usar APKPRO para descargar y jugar juegos y aplicaciones? -

      Ahora que ha instalado APKPRO en su dispositivo, puede usarlo para descargar y jugar juegos y aplicaciones. Para hacer esto, siga estos pasos:

      -

      Paso 1: Explora las categorías o busca tu juego o aplicación deseada

      -

      Abra APKPRO y verá una pantalla de inicio con diferentes categorías de juegos y aplicaciones, como populares, tendencias, destacados, etc. Puede deslizar hacia la izquierda o hacia la derecha para ver más categorías, o toque en el icono del menú en la esquina superior izquierda para ver la lista completa de categorías. También puede utilizar la barra de búsqueda en la esquina superior derecha para escribir el nombre del juego o aplicación que está buscando.

      -

      Paso 2: Toque en el botón de descarga y espere a que el archivo se descargue

      - -

      Paso 3: Abra el archivo e instale el juego o la aplicación en su dispositivo

      -

      Después de descargar el archivo, puede abrirlo e instalar el juego o la aplicación en su dispositivo. Para hacer esto, siga estos pasos:

      -
        -
      1. Deslice hacia abajo desde la parte superior de la pantalla y toque en la notificación que dice APKPRO descargado.
      2. -
      3. Toque en el archivo y siga las instrucciones en la pantalla.
      4. -
      5. Espere a que termine la instalación.
      6. -
      7. Toque en abrir o iniciar para comenzar a jugar o usar la aplicación.
      8. -
      -

      Conclusión

      -

      Resumen de los puntos principales

      -

      En este artículo, hemos aprendido lo que es APKPRO, por qué debe usarlo, cómo descargarlo e instalarlo, y cómo usarlo para descargar y jugar juegos y aplicaciones. Hemos visto que APKPRO es un sitio web que proporciona descargas gratuitas de juegos y aplicaciones para dispositivos Android, incluyendo versiones modificadas que tienen características o beneficios adicionales. También hemos visto que APKPRO es fácil de usar y ofrece una amplia gama de categorías y géneros de juegos y aplicaciones para elegir.

      -

      -

      Llamada a la acción

      -

      Si eres un fan de los juegos móviles y quieres disfrutar de los mejores, más recientes y más populares juegos y aplicaciones en tu dispositivo Android, definitivamente deberías probar APKPRO. No te arrepentirás de ello. Solo recuerde habilitar fuentes desconocidas en su configuración antes de descargar e instalar APKPRO, y siempre tenga cuidado con lo que descarga de fuentes desconocidas. ¡Feliz juego!

      -

      Preguntas frecuentes

      -
        -
      • ¿Qué es APKPRO?
        APKPRO es un sitio web que proporciona descargas gratuitas de juegos y aplicaciones para dispositivos Android.
      • -
      • ¿Es seguro APKPRO?
        APKPRO es generalmente seguro, pero siempre debe tener cuidado con lo que descarga de fuentes desconocidas. Asegúrate de tener una buena aplicación antivirus en tu dispositivo y escanea cada archivo antes de instalarlo.
      • - -
      • ¿Cómo actualizo APKPRO?
        Puede actualizar APKPRO visitando su sitio web oficial y descargando la última versión de la aplicación. También puede comprobar si hay actualizaciones dentro de la aplicación pulsando en el icono del menú en la esquina superior izquierda y seleccionando comprobar si hay actualizaciones.
      • -
      • ¿Cómo puedo desinstalar APKPRO?
        Puede desinstalar APKPRO yendo a la configuración de su dispositivo, tocando en aplicaciones o aplicaciones, encontrando APKPRO en la lista, tocando en él y seleccionando desinstalar.
      • -

      64aa2da5cf
      -
      -
      \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Descarga De La Saga 3d Apk.md b/spaces/Benson/text-generation/Examples/Descarga De La Saga 3d Apk.md deleted file mode 100644 index 103416757b41c019293bced0711bcc4cf7ce5a64..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descarga De La Saga 3d Apk.md +++ /dev/null @@ -1,74 +0,0 @@ - -

      Cómo descargar AetherSX2 APK para PC

      -

      Si eres un fan de los juegos de PlayStation 2, es posible que hayas oído hablar de AetherSX2, un emulador que te permite jugar juegos de PS2 en tu dispositivo Android. Pero ¿qué pasa si quieres disfrutar de esos juegos en una pantalla más grande, con mejores gráficos y controles? En este artículo, le mostraremos cómo descargar AetherSX2 APK para PC y ejecutarlo utilizando diferentes métodos.

      -

      descarga de la saga 3d apk


      Download Filehttps://bltlly.com/2v6JjC



      -

      Qué es un archivo APK y cómo ejecutarlo en PC

      -

      Un archivo APK es un paquete de aplicaciones Android que contiene todos los archivos y datos necesarios para instalar y ejecutar una aplicación en un dispositivo Android. Sin embargo, también puede ejecutar archivos APK en su PC utilizando algunas herramientas que emulan el entorno de Android o convertir el archivo APK en un formato compatible.

      -

      Hay dos métodos principales para ejecutar archivos APK en el PC: usando un emulador de Android o usando una extensión del navegador. Echemos un vistazo a cada método en detalle.

      -

      Método 1: Usando un emulador de Android

      -

      Un emulador de Android es un software que crea un dispositivo Android virtual en su PC, donde puede instalar y ejecutar cualquier aplicación o juego de Android. Hay muchos emuladores de Android disponibles de forma gratuita, como BlueStacks, Nox, LDPlayer, etc. Estos son los pasos para utilizar un emulador de Android para ejecutar AetherSX2 APK en PC:

      -
        -
      1. Descargar e instalar un emulador de Android de su elección desde su sitio web oficial.
      2. -
      3. Inicie el emulador e inicie sesión con su cuenta de Google.
      4. -
      5. Descargar el archivo APK AetherSX2 de una fuente confiable (vamos a discutir esto más tarde).
      6. -
      7. Arrastre y suelte el archivo APK en la ventana del emulador o haga clic en el botón Instalar APK en el menú del emulador.
      8. -
      9. Espere a que se complete la instalación y luego inicie la aplicación AetherSX2 desde la pantalla de inicio del emulador.
      10. -
      11. Disfruta jugando juegos de PS2 en tu PC.
      12. -
      -

      Pros y contras de usar un emulador

      - - -ProsContras -- Puede acceder a la Google Play Store y otras características de Android. - Necesita un PC potente para ejecutar el emulador sin problemas. -- Puede personalizar la configuración, resolución, controles, etc. del emulador. - Necesitas suficiente espacio de almacenamiento para instalar el emulador y las aplicaciones. -- Puedes jugar varios juegos a la vez usando múltiples instancias del emulador. - Usted puede encontrar problemas de compatibilidad con algunas aplicaciones o juegos. Método 2: Usando una extensión de navegador -

      Una extensión de navegador es un software que añade características o funcionalidades adicionales a su navegador web. Algunas extensiones del navegador pueden ayudarle a ejecutar archivos APK en el PC convirtiéndolos en una aplicación web que se puede abrir en una nueva pestaña. Una de las extensiones de navegador más populares para este propósito es ARC Welder, que funciona con Google Chrome. Estos son los pasos para usar ARC Welder para ejecutar AetherSX2 APK en PC:

      -

      -
        -
      1. Descargar e instalar Google Chrome desde su sitio web oficial si usted no lo tiene ya.
      2. -
      3. Descargar e instalar ARC soldador de la Chrome Web Store.
      4. -
      5. Descargar el archivo APK AetherSX2 de una fuente confiable (vamos a discutir esto más tarde).
      6. -
      7. Inicie Google Chrome y haga clic en el icono ARC Welder en la esquina superior derecha.
      8. -
      9. Elija un directorio donde desea almacenar los archivos APK convertidos.
      10. -
      11. Haga clic en Añadir su APK y seleccione el archivo AetherSX2 APK de su ordenador.
      12. -
      13. Configure los ajustes, como orientación, factor de forma, etc. según su preferencia.
      14. -
      15. Haga clic en Probar para iniciar la aplicación AetherSX2 en una nueva pestaña.
      16. -
      17. Disfruta jugando juegos de PS2 en tu PC.
      18. -
      -

      Pros y contras de usar una extensión de navegador

      -

      Usar una extensión de navegador tiene algunas ventajas y desventajas que debes considerar antes de elegir este método. Aquí están algunas de ellas:

      - -ProsContras - -- Puedes cambiar fácilmente entre diferentes archivos APK sin desinstalarlos o reinstalarlos. - Es posible que no pueda acceder a todas las características o funciones de la aplicación o juego. -- Puede guardar los archivos APK convertidos para su uso sin conexión. - Puede comprometer su seguridad o privacidad permitiendo que la extensión acceda a sus datos. - -

      Cómo descargar AetherSX2 APK de una fuente confiable

      -

      Ahora que sabes cómo ejecutar AetherSX2 APK en el PC, es posible que se pregunte dónde descargarlo. Hay muchos sitios web que ofrecen archivos APK de forma gratuita, pero no todos ellos son seguros o de confianza. Algunos de ellos pueden contener virus, malware o anuncios no deseados que pueden dañar su dispositivo o datos. Por lo tanto, siempre debe descargar archivos APK de fuentes de renombre que tienen comentarios positivos y calificaciones de usuarios y expertos. Aquí hay algunos consejos sobre cómo encontrar y descargar AetherSX2 APK de una fuente confiable:

      -
        -
      • Buscar AetherSX2 APK en Google o cualquier otro motor de búsqueda y buscar sitios web que tienen un alto rango y autoridad, tales como Uptodown, APKCombo, APKPure, etc.
      • -
      • Compruebe el nombre de dominio del sitio web y asegúrese de que coincide con el nombre del sitio web. Evite sitios web que tengan nombres de dominio sospechosos o engañosos, como . ru, . cn, . tk, etc.
      • -
      • Lea la descripción y los detalles del archivo APK AetherSX2 y asegúrese de que es compatible con su dispositivo y emulador o extensión del navegador. Busque información como versión, tamaño, desarrollador, fecha de actualización, etc.
      • -
      • Leer los comentarios de los usuarios y comentarios en el sitio web y ver lo que otras personas tienen que decir sobre el archivo AetherSX2 APK. Busque comentarios y valoraciones positivas, así como cualquier queja o problema reportado por los usuarios.
      • -
      • Descargar el archivo APK AetherSX2 desde el sitio web haciendo clic en el botón de descarga o enlace. Evite hacer clic en cualquier pop-ups o anuncios que puedan aparecer en el sitio web.
      • -
      - -

      Antes de instalar y ejecutar el archivo APK AetherSX2 en su PC, también debe comprobar si hay virus o malware que pueden haber sido ocultos o conectados a ella por actores maliciosos. De esta manera, puede proteger su PC de cualquier daño o infección potencial. Aquí hay algunas maneras de comprobar el archivo APK para virus o malware:

      -
        -
      • Utilice una herramienta en línea como VirusTotal o Malwarebytes para escanear el archivo APK en busca de cualquier amenaza. Estas herramientas analizarán el archivo APK utilizando múltiples motores antivirus y le darán un informe sobre su seguridad.
      • -
      • Utilice un software antivirus en su PC para escanear el archivo APK antes de instalarlo. Asegúrese de que su software antivirus esté actualizado y tenga habilitada la protección en tiempo real.
      • -
      • Utilice el sentido común y evitar la instalación de cualquier archivo APK que parece sospechoso o tiene una reputación baja o tiene una reputación o calificación baja.
      • -
      -

      Conclusión

      -

      En conclusión, descargar AetherSX2 APK para PC no es una tarea difícil si sigue los pasos y consejos que hemos proporcionado en este artículo. Puede elegir entre usar un emulador de Android o una extensión de navegador para ejecutar el archivo APK en su PC, dependiendo de su preferencia y conveniencia. También puede encontrar y descargar el archivo APK de una fuente confiable y verificarlo por cualquier virus o malware antes de instalarlo. Al hacerlo, puedes disfrutar jugando juegos de PS2 en tu PC con AetherSX2, un emulador que ofrece alto rendimiento, compatibilidad y características.

      -

      Preguntas frecuentes

      -

      Aquí hay algunas preguntas frecuentes y respuestas sobre la descarga de AetherSX2 APK para PC:

      -
        -
      1. ¿Qué es AetherSX2?
        AetherSX2 es un emulador que te permite jugar juegos de PlayStation 2 en tu dispositivo Android. Es compatible con una amplia gama de juegos de PS2 y ofrece características tales como carga rápida, alta resolución, guardar estados, trucos, etc.
      2. - -
      3. AetherSX2 es legal?
        AetherSX2 es legal siempre y cuando usted es dueño de los juegos originales de PS2 y utilizarlos como ROMs o ISOs para el emulador. Sin embargo, descargar o distribuir juegos de PS2 pirateados o con derechos de autor es ilegal y puede tener consecuencias legales.
      4. -
      5. ¿Cuáles son los requisitos del sistema para AetherSX2?
        AetherSX2 requiere un dispositivo Android con al menos 4 GB de RAM, CPU de 64 bits y Android 5.0 o superior. Para PC, necesita una computadora Windows o Mac con al menos 4 GB de RAM, CPU de 64 bits y navegador Google Chrome.
      6. -
      7. ¿Dónde puedo obtener más información o soporte para AetherSX2?
        Puede visitar el sitio web oficial de AetherSX2 en https://aethersx2.com/ o unirse a su servidor de discordia en https://discord.gg/aethersx2. También puede seguir sus cuentas de redes sociales en Facebook, Twitter, Instagram, etc.
      8. -

      64aa2da5cf
      -
      -
      \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Descargar 69 Yoruba Parte De La Pelcula 2.md b/spaces/Benson/text-generation/Examples/Descargar 69 Yoruba Parte De La Pelcula 2.md deleted file mode 100644 index 93765bea113edeea4fed4d458d3808181898c438..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descargar 69 Yoruba Parte De La Pelcula 2.md +++ /dev/null @@ -1,93 +0,0 @@ - -

      Descargar 69 Yoruba Movie Part 2: Una guía para los amantes de Nollywood

      -

      Si eres un fan de Nollywood, especialmente del género yoruba, es posible que hayas oído hablar de la película 69, una película audaz y controvertida que ha causado un gran revuelo en la industria. La película, que fue lanzada en 2021, es una secuela del original 69 que salió en 2019. En este artículo, te contaremos todo lo que necesitas saber sobre 69 Yoruba Movie Part 2, incluyendo de qué se trata, quiénes son los actores, cómo se recibió y, lo más importante, cómo puedes descargarlo gratis. Por lo tanto, sentarse y disfrutar de esta guía para los amantes de Nollywood.

      -

      descargar 69 yoruba parte de la película 2


      Download Zip >>>>> https://bltlly.com/2v6LSP



      -

      ¿Qué es 69 Yoruba Movie Part 2?

      -

      69 Yoruba Movie Part 2 es una película nigeriana producida por Shola Subair, una joven actriz y cineasta. Cuenta con el veterano actor Ibrahim Chatta como el personaje masculino principal, junto con otras estrellas como Tope Adebayo, Peter Ijagbemi, Akin Olaiya, y más. La película está dirigida por Tope Adebayo, que también es hijo del legendario actor Adebayo Salami.

      -

      La trama de la película

      -

      La película cuenta la historia de una joven y hermosa dama llamada Lola (Shola Subair), que es leal y comprometida con su novio Lugard (Ibrahim Chatta), un notorio señor de la droga. Sin embargo, su vida toma un giro dramático cuando conoce a Gbade (Peter Ijagbemi), un hombre gentil y guapo que le ofrece su verdadero amor y felicidad. Lola se debate entre quedarse con Lugard, quien le proporciona lujo y seguridad, o dejarlo por Gbade, quien le da respeto y romance. ¿Qué elegirá? ¿Y cuáles serán las consecuencias de su elección?

      -

      El reparto y el equipo de la película

      -

      Aquí están algunos de los miembros principales del reparto y del equipo de 69 Yoruba Movie Part 2:

      -
        - -
      • Ibrahim Chatta: Interpreta a Lugard, el novio de Lola. Es uno de los actores más populares y versátiles de Nollywood. Ha protagonizado películas como Sango, Omo Ghetto, Omo Ekun, Alani Pamolekun, y muchos más.
      • -
      • Peter Ijagbemi: Interpreta a Gbade, el amante de Lola. Es una estrella en ascenso en la industria. Ha aparecido en películas como Sixty Nine (el original), Tango With Me, Living Funeral, y más.
      • -
      • Tope Adebayo: Es el director de la película. También es actor y cineasta. Es hijo de Adebayo Salami, un veterano actor y productor. Ha dirigido películas como Sixty Nine (la original), Omo Iya Osun, y más.
      • -
      • Akin Olaiya: Interpreta al jefe de Lugard. Es un actor y comediante experimentado. Ha aparecido en películas como Jenifa, Omo Ghetto, Alakada, y más.
      • -
      -

      La recepción y comentarios de la película

      -

      69 Yoruba Movie Part 2 fue lanzado en YouTube el 14 de febrero de 2021, como un especial del Día de San Valentín. Desde entonces, la película ha obtenido más de 1,5 millones de visitas y miles de likes y comentarios. La película también ha recibido críticas mixtas de críticos y espectadores. Algunos elogiaron la película por su tema audaz y audaz, su historia cautivadora, su excelente actuación y su calidad de producción. Otros criticaron la película por sus escenas explícitas y vulgares, su mala edición, sus giros de la trama poco realistas y sus implicaciones morales. Estos son algunos de los comentarios de YouTube:

      -

      -
      -

      "Esta es una de las mejores películas yorubas que he visto. La historia es muy realista y relacionable. Los actores hicieron un gran trabajo. Felicitaciones al productor y director."

      -

      "Esta película es basura. Está promoviendo la inmoralidad y el adulterio. No es adecuado para niños o personas decentes. Debe ser prohibido."

      - -

      "Esta película es una pérdida de tiempo y datos. Está llena de tonterías y basura. No tiene mensaje ni valor. Es solo una forma barata de hacer dinero."

      -
      -

      ¿Por qué deberías ver 69 Yoruba Movie Part 2?

      -

      Si todavía te preguntas si deberías ver 69 Yoruba Movie Part 2 o no, aquí hay algunas razones por las que deberías probarlo:

      -

      Es un raro ejemplo de una película yoruba con calificación 18+

      -

      La mayoría de las películas yorubas son familiares y adecuadas para el público en general. Por lo general, evitan temas o escenas que se consideran tabú u ofensivas en la cultura yoruba. Sin embargo, 69 Yoruba Movie Part 2 rompe esta norma y explora el lado oscuro y sensual de las relaciones humanas. La película contiene escenas que son gráficas, eróticas, violentas e impactantes. La película tiene una calificación de 18+ solo para audiencias maduras.

      -

      Es una emocionante y sensual historia de amor y traición

      -

      La película no es solo sobre sexo y violencia. También es sobre amor y traición. La película representa el viaje complejo y emocional de Lola, que tiene que elegir entre dos hombres que le ofrecen cosas diferentes. La película también muestra las consecuencias de su elección y cómo afecta su vida y la de otros a su alrededor. La película te mantiene al borde de tu asiento mientras ves cómo se desarrolla el drama.

      -

      Muestra el talento y la diversidad de la industria cinematográfica yoruba

      -

      La película también muestra el talento y la diversidad de la industria cinematográfica yoruba. La película cuenta con algunos de los mejores actores y actrices en Nollywood, que ofrecen actuaciones excepcionales en sus papeles. La película también demuestra la creatividad y la innovación del productor y director, que se atrevió a hacer algo diferente de las películas de Yoruba habituales. La película también refleja la rica cultura y el idioma del pueblo yoruba, que es uno de los grupos étnicos más grandes de Nigeria.

      -

      Cómo descargar 69 Yoruba Movie Part 2 gratis?

      - -

      Los mejores sitios web para descargar películas Yoruba gratis

      -

      Hay muchos sitios web que ofrecen descargas gratuitas de películas Yoruba, pero no todos son confiables o seguros. Algunos de ellos pueden contener virus o malware que pueden dañar su dispositivo o robar sus datos. Algunos de ellos también pueden tener descargas de baja calidad o incompletas que pueden arruinar su experiencia de visualización.

      -

      Para evitar estos problemas, recomendamos usar estos tres sitios web que son confiables y probados por muchos fans de Nollywood:

      -

      Netnaija

      -

      Netnaija es uno de los sitios web más populares y confiables para descargar películas yorubas de forma gratuita. Tiene una gran y actualizada colección de películas yorubas en varios géneros y categorías. También tiene una interfaz fácil de usar y una velocidad de descarga rápida. Para descargar 69 Yoruba Movie Part 2 desde Netnaija, sigue estos pasos:

      -
        -
      1. Vaya a Netnaija y busque 69 Yoruba Movie Part 2 en el cuadro de búsqueda.
      2. -
      3. Seleccione la película de los resultados de búsqueda y haga clic en ella.
      4. -
      5. Desplácese hasta la parte inferior de la página y haga clic en el botón verde que dice "Descargar"
      6. -
      7. Elija un enlace de descarga de la lista y haga clic en él.
      8. -
      9. Espere a que la descarga se inicie y se complete.
      10. -
      -

      9jarocks

      -

      9jarocks es otro sitio web que ofrece descargas gratuitas de películas yorubas. Cuenta con una enorme y diversa biblioteca de películas yorubas en diferentes formatos y calidades. También tiene una interfaz simple y fácil de usar y una alta velocidad de descarga. Para descargar 69 Yoruba Movie Part 2 desde 9jarocks, sigue estos pasos:

      -
        -
      1. Ir a 9jarocks y buscar 69 Yoruba Movie Part 2 en el cuadro de búsqueda.
      2. -
      3. Seleccione la película de los resultados de búsqueda y haga clic en ella.
      4. - -
      5. Seleccione una opción de descarga de la lista y haga clic en ella.
      6. -
      7. Espere a que la descarga se inicie y se complete.
      8. -
      -

      YouTube

      -

      YouTube no es solo un sitio web para ver videos en línea, sino también un sitio web para descargar videos sin conexión. Puedes encontrar muchas películas yorubas en YouTube, incluyendo 69 Yoruba Movie Part 2. Sin embargo, no puedes descargar videos directamente desde YouTube, a menos que tengas una suscripción Premium de YouTube. Necesitarás usar un descargador de terceros para descargar videos de YouTube. Te mostraremos cómo hacerlo en la siguiente sección.

      -

      El mejor descargador para descargar películas Yoruba desde sitios de streaming online

      -

      Si quieres descargar películas Yoruba de sitios de streaming online como YouTube, necesitarás un descargador que pueda capturar y convertir vídeos de estos sitios. Hay muchos descargadores disponibles en línea, pero no todos son seguros o eficaces. Algunos de ellos pueden contener virus o malware que pueden dañar su dispositivo o robar sus datos. Algunos de ellos también pueden tener descargas de baja calidad o incompletas que pueden arruinar su experiencia de visualización.

      -

      Para evitar estos problemas, recomendamos usar este descargador que es confiable y probado por muchos fans de Nollywood:

      -

      WonderFox Free HD Video Converter Factory

      -

      WonderFox Free HD Video Converter Factory es un potente y versátil descargador que puede descargar vídeos de más de 300 sitios de streaming en línea, incluyendo YouTube, Vimeo, Dailymotion, Facebook, Instagram, Twitter y más. También puede convertir videos a más de 500 formatos y dispositivos, incluyendo MP4, AVI, MKV, MOV, iPhone, Android, TV, etc. También puede editar videos recortando, recortando, girando, agregando subtítulos, etc. Es gratuito, seguro, rápido y fácil de usar. Para usarlo para descargar 69 Yoruba Movie Part 2, sigue estos pasos:

      -
        - -
      1. Inicie el programa y haga clic en "Downloader" en la interfaz principal.
      2. -
      3. Haga clic en "+ Nueva descarga" en la esquina superior izquierda.
      4. -
      5. Ve a YouTube y busca 69 Yoruba Movie Part 2. Copia la URL del video.
      6. -
      7. Pegue la URL en el descargador y haga clic en "Analizar". Espere a que termine el análisis.
      8. -
      9. Seleccione su resolución y formato preferido de la lista y haga clic en "OK". También puede elegir varios vídeos para descargar a la vez.
      10. -
      11. Haga clic en "Descargar todo" en la esquina inferior derecha. Elija una carpeta de destino para sus descargas y haga clic en "OK". Espere a que la descarga se inicie y se complete.
      12. -
      -

      Conclusión

      -

      En conclusión, 69 Yoruba Movie Part 2 es una película que no debes perderte si eres un amante de Nollywood, especialmente del género yoruba. Es una película que te mantendrá entretenido, emocionado e intrigado de principio a fin. También es una película que te retará a pensar en las elecciones y consecuencias del amor y la traición. Es una película que muestra el talento y la diversidad de la industria cinematográfica yoruba.

      -

      Si quieres ver 69 Yoruba Movie Part 2, puedes descargarlo gratis desde los sitios web o el descargador que hemos recomendado en este artículo. También puedes verlo online en YouTube u otras plataformas de streaming. Sin embargo usted elige verlo, esperamos que usted lo disfrute y comparta sus pensamientos con nosotros en la sección de comentarios abajo.

      -

      Gracias por leer este artículo y ver feliz!

      -

      Preguntas frecuentes

      -

      Aquí están algunas de las preguntas más frecuentes sobre 69 Yoruba Movie Part 2:

      -
        -
      • Q: ¿Es 69 Yoruba Movie Part 2 una historia real?
      • -
      • A: No, 69 Yoruba Movie Part 2 no es una historia real. Es una historia ficticia creada por el productor y director de la película.
      • - -
      • A: Puedes ver 69 Yoruba Movie Part 2 online en YouTube u otras plataformas de streaming. Sin embargo, es posible que tenga que pagar una cuota de suscripción o ver anuncios para acceder a algunas de estas plataformas.
      • -
      • Q: ¿Quién es el productor de 69 Yoruba Movie Part 2?
      • -
      • A: El productor de 69 Yoruba Movie Part 2 es Shola Subair, quien también desempeña el papel principal femenino en la película.
      • -
      • Q: ¿Cuál es la calificación de 69 Yoruba Movie Part 2?
      • -
      • A: La valoración de 69 Yoruba Movie Part 2 es 18+. Contiene escenas que son gráficas, eróticas, violentas y chocantes. No es adecuado para niños o espectadores sensibles.
      • -
      • Q: ¿Cuánto tiempo es 69 Yoruba Movie Part 2?
      • -
      • A: La duración del 69 Yoruba Movie Part 2 es de aproximadamente una hora y treinta minutos.
      • -

      64aa2da5cf
      -
      -
      \ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/cachecontrol/controller.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/cachecontrol/controller.py deleted file mode 100644 index 7f23529f1155cd3bbfde335ccdb7fc483b9d2d19..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/cachecontrol/controller.py +++ /dev/null @@ -1,439 +0,0 @@ -# SPDX-FileCopyrightText: 2015 Eric Larson -# -# SPDX-License-Identifier: Apache-2.0 - -""" -The httplib2 algorithms ported for use with requests. -""" -import logging -import re -import calendar -import time -from email.utils import parsedate_tz - -from pip._vendor.requests.structures import CaseInsensitiveDict - -from .cache import DictCache, SeparateBodyBaseCache -from .serialize import Serializer - - -logger = logging.getLogger(__name__) - -URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?") - -PERMANENT_REDIRECT_STATUSES = (301, 308) - - -def parse_uri(uri): - """Parses a URI using the regex given in Appendix B of RFC 3986. - - (scheme, authority, path, query, fragment) = parse_uri(uri) - """ - groups = URI.match(uri).groups() - return (groups[1], groups[3], groups[4], groups[6], groups[8]) - - -class CacheController(object): - """An interface to see if request should cached or not.""" - - def __init__( - self, cache=None, cache_etags=True, serializer=None, status_codes=None - ): - self.cache = DictCache() if cache is None else cache - self.cache_etags = cache_etags - self.serializer = serializer or Serializer() - self.cacheable_status_codes = status_codes or (200, 203, 300, 301, 308) - - @classmethod - def _urlnorm(cls, uri): - """Normalize the URL to create a safe key for the cache""" - (scheme, authority, path, query, fragment) = parse_uri(uri) - if not scheme or not authority: - raise Exception("Only absolute URIs are allowed. uri = %s" % uri) - - scheme = scheme.lower() - authority = authority.lower() - - if not path: - path = "/" - - # Could do syntax based normalization of the URI before - # computing the digest. See Section 6.2.2 of Std 66. - request_uri = query and "?".join([path, query]) or path - defrag_uri = scheme + "://" + authority + request_uri - - return defrag_uri - - @classmethod - def cache_url(cls, uri): - return cls._urlnorm(uri) - - def parse_cache_control(self, headers): - known_directives = { - # https://tools.ietf.org/html/rfc7234#section-5.2 - "max-age": (int, True), - "max-stale": (int, False), - "min-fresh": (int, True), - "no-cache": (None, False), - "no-store": (None, False), - "no-transform": (None, False), - "only-if-cached": (None, False), - "must-revalidate": (None, False), - "public": (None, False), - "private": (None, False), - "proxy-revalidate": (None, False), - "s-maxage": (int, True), - } - - cc_headers = headers.get("cache-control", headers.get("Cache-Control", "")) - - retval = {} - - for cc_directive in cc_headers.split(","): - if not cc_directive.strip(): - continue - - parts = cc_directive.split("=", 1) - directive = parts[0].strip() - - try: - typ, required = known_directives[directive] - except KeyError: - logger.debug("Ignoring unknown cache-control directive: %s", directive) - continue - - if not typ or not required: - retval[directive] = None - if typ: - try: - retval[directive] = typ(parts[1].strip()) - except IndexError: - if required: - logger.debug( - "Missing value for cache-control " "directive: %s", - directive, - ) - except ValueError: - logger.debug( - "Invalid value for cache-control directive " "%s, must be %s", - directive, - typ.__name__, - ) - - return retval - - def cached_request(self, request): - """ - Return a cached response if it exists in the cache, otherwise - return False. - """ - cache_url = self.cache_url(request.url) - logger.debug('Looking up "%s" in the cache', cache_url) - cc = self.parse_cache_control(request.headers) - - # Bail out if the request insists on fresh data - if "no-cache" in cc: - logger.debug('Request header has "no-cache", cache bypassed') - return False - - if "max-age" in cc and cc["max-age"] == 0: - logger.debug('Request header has "max_age" as 0, cache bypassed') - return False - - # Request allows serving from the cache, let's see if we find something - cache_data = self.cache.get(cache_url) - if cache_data is None: - logger.debug("No cache entry available") - return False - - if isinstance(self.cache, SeparateBodyBaseCache): - body_file = self.cache.get_body(cache_url) - else: - body_file = None - - # Check whether it can be deserialized - resp = self.serializer.loads(request, cache_data, body_file) - if not resp: - logger.warning("Cache entry deserialization failed, entry ignored") - return False - - # If we have a cached permanent redirect, return it immediately. We - # don't need to test our response for other headers b/c it is - # intrinsically "cacheable" as it is Permanent. - # - # See: - # https://tools.ietf.org/html/rfc7231#section-6.4.2 - # - # Client can try to refresh the value by repeating the request - # with cache busting headers as usual (ie no-cache). - if int(resp.status) in PERMANENT_REDIRECT_STATUSES: - msg = ( - "Returning cached permanent redirect response " - "(ignoring date and etag information)" - ) - logger.debug(msg) - return resp - - headers = CaseInsensitiveDict(resp.headers) - if not headers or "date" not in headers: - if "etag" not in headers: - # Without date or etag, the cached response can never be used - # and should be deleted. - logger.debug("Purging cached response: no date or etag") - self.cache.delete(cache_url) - logger.debug("Ignoring cached response: no date") - return False - - now = time.time() - date = calendar.timegm(parsedate_tz(headers["date"])) - current_age = max(0, now - date) - logger.debug("Current age based on date: %i", current_age) - - # TODO: There is an assumption that the result will be a - # urllib3 response object. This may not be best since we - # could probably avoid instantiating or constructing the - # response until we know we need it. - resp_cc = self.parse_cache_control(headers) - - # determine freshness - freshness_lifetime = 0 - - # Check the max-age pragma in the cache control header - if "max-age" in resp_cc: - freshness_lifetime = resp_cc["max-age"] - logger.debug("Freshness lifetime from max-age: %i", freshness_lifetime) - - # If there isn't a max-age, check for an expires header - elif "expires" in headers: - expires = parsedate_tz(headers["expires"]) - if expires is not None: - expire_time = calendar.timegm(expires) - date - freshness_lifetime = max(0, expire_time) - logger.debug("Freshness lifetime from expires: %i", freshness_lifetime) - - # Determine if we are setting freshness limit in the - # request. Note, this overrides what was in the response. - if "max-age" in cc: - freshness_lifetime = cc["max-age"] - logger.debug( - "Freshness lifetime from request max-age: %i", freshness_lifetime - ) - - if "min-fresh" in cc: - min_fresh = cc["min-fresh"] - # adjust our current age by our min fresh - current_age += min_fresh - logger.debug("Adjusted current age from min-fresh: %i", current_age) - - # Return entry if it is fresh enough - if freshness_lifetime > current_age: - logger.debug('The response is "fresh", returning cached response') - logger.debug("%i > %i", freshness_lifetime, current_age) - return resp - - # we're not fresh. If we don't have an Etag, clear it out - if "etag" not in headers: - logger.debug('The cached response is "stale" with no etag, purging') - self.cache.delete(cache_url) - - # return the original handler - return False - - def conditional_headers(self, request): - cache_url = self.cache_url(request.url) - resp = self.serializer.loads(request, self.cache.get(cache_url)) - new_headers = {} - - if resp: - headers = CaseInsensitiveDict(resp.headers) - - if "etag" in headers: - new_headers["If-None-Match"] = headers["ETag"] - - if "last-modified" in headers: - new_headers["If-Modified-Since"] = headers["Last-Modified"] - - return new_headers - - def _cache_set(self, cache_url, request, response, body=None, expires_time=None): - """ - Store the data in the cache. - """ - if isinstance(self.cache, SeparateBodyBaseCache): - # We pass in the body separately; just put a placeholder empty - # string in the metadata. - self.cache.set( - cache_url, - self.serializer.dumps(request, response, b""), - expires=expires_time, - ) - self.cache.set_body(cache_url, body) - else: - self.cache.set( - cache_url, - self.serializer.dumps(request, response, body), - expires=expires_time, - ) - - def cache_response(self, request, response, body=None, status_codes=None): - """ - Algorithm for caching requests. - - This assumes a requests Response object. - """ - # From httplib2: Don't cache 206's since we aren't going to - # handle byte range requests - cacheable_status_codes = status_codes or self.cacheable_status_codes - if response.status not in cacheable_status_codes: - logger.debug( - "Status code %s not in %s", response.status, cacheable_status_codes - ) - return - - response_headers = CaseInsensitiveDict(response.headers) - - if "date" in response_headers: - date = calendar.timegm(parsedate_tz(response_headers["date"])) - else: - date = 0 - - # If we've been given a body, our response has a Content-Length, that - # Content-Length is valid then we can check to see if the body we've - # been given matches the expected size, and if it doesn't we'll just - # skip trying to cache it. - if ( - body is not None - and "content-length" in response_headers - and response_headers["content-length"].isdigit() - and int(response_headers["content-length"]) != len(body) - ): - return - - cc_req = self.parse_cache_control(request.headers) - cc = self.parse_cache_control(response_headers) - - cache_url = self.cache_url(request.url) - logger.debug('Updating cache with response from "%s"', cache_url) - - # Delete it from the cache if we happen to have it stored there - no_store = False - if "no-store" in cc: - no_store = True - logger.debug('Response header has "no-store"') - if "no-store" in cc_req: - no_store = True - logger.debug('Request header has "no-store"') - if no_store and self.cache.get(cache_url): - logger.debug('Purging existing cache entry to honor "no-store"') - self.cache.delete(cache_url) - if no_store: - return - - # https://tools.ietf.org/html/rfc7234#section-4.1: - # A Vary header field-value of "*" always fails to match. - # Storing such a response leads to a deserialization warning - # during cache lookup and is not allowed to ever be served, - # so storing it can be avoided. - if "*" in response_headers.get("vary", ""): - logger.debug('Response header has "Vary: *"') - return - - # If we've been given an etag, then keep the response - if self.cache_etags and "etag" in response_headers: - expires_time = 0 - if response_headers.get("expires"): - expires = parsedate_tz(response_headers["expires"]) - if expires is not None: - expires_time = calendar.timegm(expires) - date - - expires_time = max(expires_time, 14 * 86400) - - logger.debug("etag object cached for {0} seconds".format(expires_time)) - logger.debug("Caching due to etag") - self._cache_set(cache_url, request, response, body, expires_time) - - # Add to the cache any permanent redirects. We do this before looking - # that the Date headers. - elif int(response.status) in PERMANENT_REDIRECT_STATUSES: - logger.debug("Caching permanent redirect") - self._cache_set(cache_url, request, response, b"") - - # Add to the cache if the response headers demand it. If there - # is no date header then we can't do anything about expiring - # the cache. - elif "date" in response_headers: - date = calendar.timegm(parsedate_tz(response_headers["date"])) - # cache when there is a max-age > 0 - if "max-age" in cc and cc["max-age"] > 0: - logger.debug("Caching b/c date exists and max-age > 0") - expires_time = cc["max-age"] - self._cache_set( - cache_url, - request, - response, - body, - expires_time, - ) - - # If the request can expire, it means we should cache it - # in the meantime. - elif "expires" in response_headers: - if response_headers["expires"]: - expires = parsedate_tz(response_headers["expires"]) - if expires is not None: - expires_time = calendar.timegm(expires) - date - else: - expires_time = None - - logger.debug( - "Caching b/c of expires header. expires in {0} seconds".format( - expires_time - ) - ) - self._cache_set( - cache_url, - request, - response, - body, - expires_time, - ) - - def update_cached_response(self, request, response): - """On a 304 we will get a new set of headers that we want to - update our cached value with, assuming we have one. - - This should only ever be called when we've sent an ETag and - gotten a 304 as the response. - """ - cache_url = self.cache_url(request.url) - - cached_response = self.serializer.loads(request, self.cache.get(cache_url)) - - if not cached_response: - # we didn't have a cached response - return response - - # Lets update our headers with the headers from the new request: - # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1 - # - # The server isn't supposed to send headers that would make - # the cached body invalid. But... just in case, we'll be sure - # to strip out ones we know that might be problmatic due to - # typical assumptions. - excluded_headers = ["content-length"] - - cached_response.headers.update( - dict( - (k, v) - for k, v in response.headers.items() - if k.lower() not in excluded_headers - ) - ) - - # we want a 200 b/c we have content via the cache - cached_response.status = 200 - - # update our cache - self._cache_set(cache_url, request, cached_response) - - return cached_response diff --git a/spaces/Biswa13/Examples-Of-AI-2023/README.md b/spaces/Biswa13/Examples-Of-AI-2023/README.md deleted file mode 100644 index 72faee6cd99d38c523aa1b1449be709a7e7edc41..0000000000000000000000000000000000000000 --- a/spaces/Biswa13/Examples-Of-AI-2023/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Examples Of AI 2023 -emoji: 📚 -colorFrom: purple -colorTo: indigo -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/CVH-vn1210/make_hair/minigpt4/processors/randaugment.py b/spaces/CVH-vn1210/make_hair/minigpt4/processors/randaugment.py deleted file mode 100644 index 5c6a9e6d62f74358f490d19546c9829b3ac6aaef..0000000000000000000000000000000000000000 --- a/spaces/CVH-vn1210/make_hair/minigpt4/processors/randaugment.py +++ /dev/null @@ -1,398 +0,0 @@ -""" - Copyright (c) 2022, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause -""" - -import cv2 -import numpy as np - -import torch - - -## aug functions -def identity_func(img): - return img - - -def autocontrast_func(img, cutoff=0): - """ - same output as PIL.ImageOps.autocontrast - """ - n_bins = 256 - - def tune_channel(ch): - n = ch.size - cut = cutoff * n // 100 - if cut == 0: - high, low = ch.max(), ch.min() - else: - hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins]) - low = np.argwhere(np.cumsum(hist) > cut) - low = 0 if low.shape[0] == 0 else low[0] - high = np.argwhere(np.cumsum(hist[::-1]) > cut) - high = n_bins - 1 if high.shape[0] == 0 else n_bins - 1 - high[0] - if high <= low: - table = np.arange(n_bins) - else: - scale = (n_bins - 1) / (high - low) - offset = -low * scale - table = np.arange(n_bins) * scale + offset - table[table < 0] = 0 - table[table > n_bins - 1] = n_bins - 1 - table = table.clip(0, 255).astype(np.uint8) - return table[ch] - - channels = [tune_channel(ch) for ch in cv2.split(img)] - out = cv2.merge(channels) - return out - - -def equalize_func(img): - """ - same output as PIL.ImageOps.equalize - PIL's implementation is different from cv2.equalize - """ - n_bins = 256 - - def tune_channel(ch): - hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins]) - non_zero_hist = hist[hist != 0].reshape(-1) - step = np.sum(non_zero_hist[:-1]) // (n_bins - 1) - if step == 0: - return ch - n = np.empty_like(hist) - n[0] = step // 2 - n[1:] = hist[:-1] - table = (np.cumsum(n) // step).clip(0, 255).astype(np.uint8) - return table[ch] - - channels = [tune_channel(ch) for ch in cv2.split(img)] - out = cv2.merge(channels) - return out - - -def rotate_func(img, degree, fill=(0, 0, 0)): - """ - like PIL, rotate by degree, not radians - """ - H, W = img.shape[0], img.shape[1] - center = W / 2, H / 2 - M = cv2.getRotationMatrix2D(center, degree, 1) - out = cv2.warpAffine(img, M, (W, H), borderValue=fill) - return out - - -def solarize_func(img, thresh=128): - """ - same output as PIL.ImageOps.posterize - """ - table = np.array([el if el < thresh else 255 - el for el in range(256)]) - table = table.clip(0, 255).astype(np.uint8) - out = table[img] - return out - - -def color_func(img, factor): - """ - same output as PIL.ImageEnhance.Color - """ - ## implementation according to PIL definition, quite slow - # degenerate = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)[:, :, np.newaxis] - # out = blend(degenerate, img, factor) - # M = ( - # np.eye(3) * factor - # + np.float32([0.114, 0.587, 0.299]).reshape(3, 1) * (1. - factor) - # )[np.newaxis, np.newaxis, :] - M = np.float32( - [[0.886, -0.114, -0.114], [-0.587, 0.413, -0.587], [-0.299, -0.299, 0.701]] - ) * factor + np.float32([[0.114], [0.587], [0.299]]) - out = np.matmul(img, M).clip(0, 255).astype(np.uint8) - return out - - -def contrast_func(img, factor): - """ - same output as PIL.ImageEnhance.Contrast - """ - mean = np.sum(np.mean(img, axis=(0, 1)) * np.array([0.114, 0.587, 0.299])) - table = ( - np.array([(el - mean) * factor + mean for el in range(256)]) - .clip(0, 255) - .astype(np.uint8) - ) - out = table[img] - return out - - -def brightness_func(img, factor): - """ - same output as PIL.ImageEnhance.Contrast - """ - table = (np.arange(256, dtype=np.float32) * factor).clip(0, 255).astype(np.uint8) - out = table[img] - return out - - -def sharpness_func(img, factor): - """ - The differences the this result and PIL are all on the 4 boundaries, the center - areas are same - """ - kernel = np.ones((3, 3), dtype=np.float32) - kernel[1][1] = 5 - kernel /= 13 - degenerate = cv2.filter2D(img, -1, kernel) - if factor == 0.0: - out = degenerate - elif factor == 1.0: - out = img - else: - out = img.astype(np.float32) - degenerate = degenerate.astype(np.float32)[1:-1, 1:-1, :] - out[1:-1, 1:-1, :] = degenerate + factor * (out[1:-1, 1:-1, :] - degenerate) - out = out.astype(np.uint8) - return out - - -def shear_x_func(img, factor, fill=(0, 0, 0)): - H, W = img.shape[0], img.shape[1] - M = np.float32([[1, factor, 0], [0, 1, 0]]) - out = cv2.warpAffine( - img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR - ).astype(np.uint8) - return out - - -def translate_x_func(img, offset, fill=(0, 0, 0)): - """ - same output as PIL.Image.transform - """ - H, W = img.shape[0], img.shape[1] - M = np.float32([[1, 0, -offset], [0, 1, 0]]) - out = cv2.warpAffine( - img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR - ).astype(np.uint8) - return out - - -def translate_y_func(img, offset, fill=(0, 0, 0)): - """ - same output as PIL.Image.transform - """ - H, W = img.shape[0], img.shape[1] - M = np.float32([[1, 0, 0], [0, 1, -offset]]) - out = cv2.warpAffine( - img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR - ).astype(np.uint8) - return out - - -def posterize_func(img, bits): - """ - same output as PIL.ImageOps.posterize - """ - out = np.bitwise_and(img, np.uint8(255 << (8 - bits))) - return out - - -def shear_y_func(img, factor, fill=(0, 0, 0)): - H, W = img.shape[0], img.shape[1] - M = np.float32([[1, 0, 0], [factor, 1, 0]]) - out = cv2.warpAffine( - img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR - ).astype(np.uint8) - return out - - -def cutout_func(img, pad_size, replace=(0, 0, 0)): - replace = np.array(replace, dtype=np.uint8) - H, W = img.shape[0], img.shape[1] - rh, rw = np.random.random(2) - pad_size = pad_size // 2 - ch, cw = int(rh * H), int(rw * W) - x1, x2 = max(ch - pad_size, 0), min(ch + pad_size, H) - y1, y2 = max(cw - pad_size, 0), min(cw + pad_size, W) - out = img.copy() - out[x1:x2, y1:y2, :] = replace - return out - - -### level to args -def enhance_level_to_args(MAX_LEVEL): - def level_to_args(level): - return ((level / MAX_LEVEL) * 1.8 + 0.1,) - - return level_to_args - - -def shear_level_to_args(MAX_LEVEL, replace_value): - def level_to_args(level): - level = (level / MAX_LEVEL) * 0.3 - if np.random.random() > 0.5: - level = -level - return (level, replace_value) - - return level_to_args - - -def translate_level_to_args(translate_const, MAX_LEVEL, replace_value): - def level_to_args(level): - level = (level / MAX_LEVEL) * float(translate_const) - if np.random.random() > 0.5: - level = -level - return (level, replace_value) - - return level_to_args - - -def cutout_level_to_args(cutout_const, MAX_LEVEL, replace_value): - def level_to_args(level): - level = int((level / MAX_LEVEL) * cutout_const) - return (level, replace_value) - - return level_to_args - - -def solarize_level_to_args(MAX_LEVEL): - def level_to_args(level): - level = int((level / MAX_LEVEL) * 256) - return (level,) - - return level_to_args - - -def none_level_to_args(level): - return () - - -def posterize_level_to_args(MAX_LEVEL): - def level_to_args(level): - level = int((level / MAX_LEVEL) * 4) - return (level,) - - return level_to_args - - -def rotate_level_to_args(MAX_LEVEL, replace_value): - def level_to_args(level): - level = (level / MAX_LEVEL) * 30 - if np.random.random() < 0.5: - level = -level - return (level, replace_value) - - return level_to_args - - -func_dict = { - "Identity": identity_func, - "AutoContrast": autocontrast_func, - "Equalize": equalize_func, - "Rotate": rotate_func, - "Solarize": solarize_func, - "Color": color_func, - "Contrast": contrast_func, - "Brightness": brightness_func, - "Sharpness": sharpness_func, - "ShearX": shear_x_func, - "TranslateX": translate_x_func, - "TranslateY": translate_y_func, - "Posterize": posterize_func, - "ShearY": shear_y_func, -} - -translate_const = 10 -MAX_LEVEL = 10 -replace_value = (128, 128, 128) -arg_dict = { - "Identity": none_level_to_args, - "AutoContrast": none_level_to_args, - "Equalize": none_level_to_args, - "Rotate": rotate_level_to_args(MAX_LEVEL, replace_value), - "Solarize": solarize_level_to_args(MAX_LEVEL), - "Color": enhance_level_to_args(MAX_LEVEL), - "Contrast": enhance_level_to_args(MAX_LEVEL), - "Brightness": enhance_level_to_args(MAX_LEVEL), - "Sharpness": enhance_level_to_args(MAX_LEVEL), - "ShearX": shear_level_to_args(MAX_LEVEL, replace_value), - "TranslateX": translate_level_to_args(translate_const, MAX_LEVEL, replace_value), - "TranslateY": translate_level_to_args(translate_const, MAX_LEVEL, replace_value), - "Posterize": posterize_level_to_args(MAX_LEVEL), - "ShearY": shear_level_to_args(MAX_LEVEL, replace_value), -} - - -class RandomAugment(object): - def __init__(self, N=2, M=10, isPIL=False, augs=[]): - self.N = N - self.M = M - self.isPIL = isPIL - if augs: - self.augs = augs - else: - self.augs = list(arg_dict.keys()) - - def get_random_ops(self): - sampled_ops = np.random.choice(self.augs, self.N) - return [(op, 0.5, self.M) for op in sampled_ops] - - def __call__(self, img): - if self.isPIL: - img = np.array(img) - ops = self.get_random_ops() - for name, prob, level in ops: - if np.random.random() > prob: - continue - args = arg_dict[name](level) - img = func_dict[name](img, *args) - return img - - -class VideoRandomAugment(object): - def __init__(self, N=2, M=10, p=0.0, tensor_in_tensor_out=True, augs=[]): - self.N = N - self.M = M - self.p = p - self.tensor_in_tensor_out = tensor_in_tensor_out - if augs: - self.augs = augs - else: - self.augs = list(arg_dict.keys()) - - def get_random_ops(self): - sampled_ops = np.random.choice(self.augs, self.N, replace=False) - return [(op, self.M) for op in sampled_ops] - - def __call__(self, frames): - assert ( - frames.shape[-1] == 3 - ), "Expecting last dimension for 3-channels RGB (b, h, w, c)." - - if self.tensor_in_tensor_out: - frames = frames.numpy().astype(np.uint8) - - num_frames = frames.shape[0] - - ops = num_frames * [self.get_random_ops()] - apply_or_not = num_frames * [np.random.random(size=self.N) > self.p] - - frames = torch.stack( - list(map(self._aug, frames, ops, apply_or_not)), dim=0 - ).float() - - return frames - - def _aug(self, img, ops, apply_or_not): - for i, (name, level) in enumerate(ops): - if not apply_or_not[i]: - continue - args = arg_dict[name](level) - img = func_dict[name](img, *args) - return torch.from_numpy(img) - - -if __name__ == "__main__": - a = RandomAugment() - img = np.random.randn(32, 32, 3) - a(img) diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/structures/masks.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/structures/masks.py deleted file mode 100644 index b3732a5f4619b91183b5bf4f307256eea7b8fb3d..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/structures/masks.py +++ /dev/null @@ -1,424 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import copy -import itertools -import numpy as np -from typing import Any, Iterator, List, Union -import pycocotools.mask as mask_utils -import torch - -from detectron2.layers import cat -from detectron2.layers.roi_align import ROIAlign - -from .boxes import Boxes - - -def polygon_area(x, y): - # Using the shoelace formula - # https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates - return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) - - -def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray: - """ - Args: - polygons (list[ndarray]): each array has shape (Nx2,) - height, width (int) - - Returns: - ndarray: a bool mask of shape (height, width) - """ - assert len(polygons) > 0, "COCOAPI does not support empty polygons" - rles = mask_utils.frPyObjects(polygons, height, width) - rle = mask_utils.merge(rles) - return mask_utils.decode(rle).astype(np.bool) - - -def rasterize_polygons_within_box( - polygons: List[np.ndarray], box: np.ndarray, mask_size: int -) -> torch.Tensor: - """ - Rasterize the polygons into a mask image and - crop the mask content in the given box. - The cropped mask is resized to (mask_size, mask_size). - - This function is used when generating training targets for mask head in Mask R-CNN. - Given original ground-truth masks for an image, new ground-truth mask - training targets in the size of `mask_size x mask_size` - must be provided for each predicted box. This function will be called to - produce such targets. - - Args: - polygons (list[ndarray[float]]): a list of polygons, which represents an instance. - box: 4-element numpy array - mask_size (int): - - Returns: - Tensor: BoolTensor of shape (mask_size, mask_size) - """ - # 1. Shift the polygons w.r.t the boxes - w, h = box[2] - box[0], box[3] - box[1] - - polygons = copy.deepcopy(polygons) - for p in polygons: - p[0::2] = p[0::2] - box[0] - p[1::2] = p[1::2] - box[1] - - # 2. Rescale the polygons to the new box size - ratio_h = mask_size / max(h, 0.1) - ratio_w = mask_size / max(w, 0.1) - - if ratio_h == ratio_w: - for p in polygons: - p *= ratio_h - else: - for p in polygons: - p[0::2] *= ratio_w - p[1::2] *= ratio_h - - # 3. Rasterize the polygons with coco api - mask = polygons_to_bitmask(polygons, mask_size, mask_size) - mask = torch.from_numpy(mask) - return mask - - -class BitMasks: - """ - This class stores the segmentation masks for all objects in one image, in - the form of bitmaps. - - Attributes: - tensor: bool Tensor of N,H,W, representing N instances in the image. - """ - - def __init__(self, tensor: Union[torch.Tensor, np.ndarray]): - """ - Args: - tensor: bool Tensor of N,H,W, representing N instances in the image. - """ - device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu") - tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device) - assert tensor.dim() == 3, tensor.size() - self.image_size = tensor.shape[1:] - self.tensor = tensor - - def to(self, device: str) -> "BitMasks": - return BitMasks(self.tensor.to(device)) - - @property - def device(self) -> torch.device: - return self.tensor.device - - def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks": - """ - Returns: - BitMasks: Create a new :class:`BitMasks` by indexing. - - The following usage are allowed: - - 1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask. - 2. `new_masks = masks[2:10]`: return a slice of masks. - 3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor - with `length = len(masks)`. Nonzero elements in the vector will be selected. - - Note that the returned object might share storage with this object, - subject to Pytorch's indexing semantics. - """ - if isinstance(item, int): - return BitMasks(self.tensor[item].view(1, -1)) - m = self.tensor[item] - assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format( - item, m.shape - ) - return BitMasks(m) - - def __iter__(self) -> torch.Tensor: - yield from self.tensor - - def __repr__(self) -> str: - s = self.__class__.__name__ + "(" - s += "num_instances={})".format(len(self.tensor)) - return s - - def __len__(self) -> int: - return self.tensor.shape[0] - - def nonempty(self) -> torch.Tensor: - """ - Find masks that are non-empty. - - Returns: - Tensor: a BoolTensor which represents - whether each mask is empty (False) or non-empty (True). - """ - return self.tensor.flatten(1).any(dim=1) - - @staticmethod - def from_polygon_masks( - polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int - ) -> "BitMasks": - """ - Args: - polygon_masks (list[list[ndarray]] or PolygonMasks) - height, width (int) - """ - if isinstance(polygon_masks, PolygonMasks): - polygon_masks = polygon_masks.polygons - masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks] - return BitMasks(torch.stack([torch.from_numpy(x) for x in masks])) - - def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: - """ - Crop each bitmask by the given box, and resize results to (mask_size, mask_size). - This can be used to prepare training targets for Mask R-CNN. - It has less reconstruction error compared to rasterization with polygons. - However we observe no difference in accuracy, - but BitMasks requires more memory to store all the masks. - - Args: - boxes (Tensor): Nx4 tensor storing the boxes for each mask - mask_size (int): the size of the rasterized mask. - - Returns: - Tensor: - A bool tensor of shape (N, mask_size, mask_size), where - N is the number of predicted boxes for this image. - """ - assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) - device = self.tensor.device - - batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None] - rois = torch.cat([batch_inds, boxes], dim=1) # Nx5 - - bit_masks = self.tensor.to(dtype=torch.float32) - rois = rois.to(device=device) - output = ( - ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True) - .forward(bit_masks[:, None, :, :], rois) - .squeeze(1) - ) - output = output >= 0.5 - return output - - def get_bounding_boxes(self) -> None: - # not needed now - raise NotImplementedError - - @staticmethod - def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks": - """ - Concatenates a list of BitMasks into a single BitMasks - - Arguments: - bitmasks_list (list[BitMasks]) - - Returns: - BitMasks: the concatenated BitMasks - """ - assert isinstance(bitmasks_list, (list, tuple)) - assert len(bitmasks_list) > 0 - assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list) - - cat_bitmasks = type(bitmasks_list[0])(cat([bm.tensor for bm in bitmasks_list], dim=0)) - return cat_bitmasks - - -class PolygonMasks: - """ - This class stores the segmentation masks for all objects in one image, in the form of polygons. - - Attributes: - polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon. - """ - - def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]): - """ - Arguments: - polygons (list[list[np.ndarray]]): The first - level of the list correspond to individual instances, - the second level to all the polygons that compose the - instance, and the third level to the polygon coordinates. - The third level array should have the format of - [x0, y0, x1, y1, ..., xn, yn] (n >= 3). - """ - assert isinstance(polygons, list), ( - "Cannot create PolygonMasks: Expect a list of list of polygons per image. " - "Got '{}' instead.".format(type(polygons)) - ) - - def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray: - # Use float64 for higher precision, because why not? - # Always put polygons on CPU (self.to is a no-op) since they - # are supposed to be small tensors. - # May need to change this assumption if GPU placement becomes useful - if isinstance(t, torch.Tensor): - t = t.cpu().numpy() - return np.asarray(t).astype("float64") - - def process_polygons( - polygons_per_instance: List[Union[torch.Tensor, np.ndarray]] - ) -> List[np.ndarray]: - assert isinstance(polygons_per_instance, list), ( - "Cannot create polygons: Expect a list of polygons per instance. " - "Got '{}' instead.".format(type(polygons_per_instance)) - ) - # transform the polygon to a tensor - polygons_per_instance = [_make_array(p) for p in polygons_per_instance] - for polygon in polygons_per_instance: - assert len(polygon) % 2 == 0 and len(polygon) >= 6 - return polygons_per_instance - - self.polygons: List[List[np.ndarray]] = [ - process_polygons(polygons_per_instance) for polygons_per_instance in polygons - ] - - def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks": - return self - - @property - def device(self) -> torch.device: - return torch.device("cpu") - - def get_bounding_boxes(self) -> Boxes: - """ - Returns: - Boxes: tight bounding boxes around polygon masks. - """ - boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32) - for idx, polygons_per_instance in enumerate(self.polygons): - minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32) - maxxy = torch.zeros(2, dtype=torch.float32) - for polygon in polygons_per_instance: - coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32) - minxy = torch.min(minxy, torch.min(coords, dim=0).values) - maxxy = torch.max(maxxy, torch.max(coords, dim=0).values) - boxes[idx, :2] = minxy - boxes[idx, 2:] = maxxy - return Boxes(boxes) - - def nonempty(self) -> torch.Tensor: - """ - Find masks that are non-empty. - - Returns: - Tensor: - a BoolTensor which represents whether each mask is empty (False) or not (True). - """ - keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons] - return torch.from_numpy(np.asarray(keep, dtype=np.bool)) - - def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks": - """ - Support indexing over the instances and return a `PolygonMasks` object. - `item` can be: - - 1. An integer. It will return an object with only one instance. - 2. A slice. It will return an object with the selected instances. - 3. A list[int]. It will return an object with the selected instances, - correpsonding to the indices in the list. - 4. A vector mask of type BoolTensor, whose length is num_instances. - It will return an object with the instances whose mask is nonzero. - """ - if isinstance(item, int): - selected_polygons = [self.polygons[item]] - elif isinstance(item, slice): - selected_polygons = self.polygons[item] - elif isinstance(item, list): - selected_polygons = [self.polygons[i] for i in item] - elif isinstance(item, torch.Tensor): - # Polygons is a list, so we have to move the indices back to CPU. - if item.dtype == torch.bool: - assert item.dim() == 1, item.shape - item = item.nonzero().squeeze(1).cpu().numpy().tolist() - elif item.dtype in [torch.int32, torch.int64]: - item = item.cpu().numpy().tolist() - else: - raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype)) - selected_polygons = [self.polygons[i] for i in item] - return PolygonMasks(selected_polygons) - - def __iter__(self) -> Iterator[List[np.ndarray]]: - """ - Yields: - list[ndarray]: the polygons for one instance. - Each Tensor is a float64 vector representing a polygon. - """ - return iter(self.polygons) - - def __repr__(self) -> str: - s = self.__class__.__name__ + "(" - s += "num_instances={})".format(len(self.polygons)) - return s - - def __len__(self) -> int: - return len(self.polygons) - - def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: - """ - Crop each mask by the given box, and resize results to (mask_size, mask_size). - This can be used to prepare training targets for Mask R-CNN. - - Args: - boxes (Tensor): Nx4 tensor storing the boxes for each mask - mask_size (int): the size of the rasterized mask. - - Returns: - Tensor: A bool tensor of shape (N, mask_size, mask_size), where - N is the number of predicted boxes for this image. - """ - assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) - - device = boxes.device - # Put boxes on the CPU, as the polygon representation is not efficient GPU-wise - # (several small tensors for representing a single instance mask) - boxes = boxes.to(torch.device("cpu")) - - results = [ - rasterize_polygons_within_box(poly, box.numpy(), mask_size) - for poly, box in zip(self.polygons, boxes) - ] - """ - poly: list[list[float]], the polygons for one instance - box: a tensor of shape (4,) - """ - if len(results) == 0: - return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device) - return torch.stack(results, dim=0).to(device=device) - - def area(self): - """ - Computes area of the mask. - Only works with Polygons, using the shoelace formula: - https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates - - Returns: - Tensor: a vector, area for each instance - """ - - area = [] - for polygons_per_instance in self.polygons: - area_per_instance = 0 - for p in polygons_per_instance: - area_per_instance += polygon_area(p[0::2], p[1::2]) - area.append(area_per_instance) - - return torch.tensor(area) - - @staticmethod - def cat(polymasks_list: List["PolygonMasks"]) -> "PolygonMasks": - """ - Concatenates a list of PolygonMasks into a single PolygonMasks - - Arguments: - polymasks_list (list[PolygonMasks]) - - Returns: - PolygonMasks: the concatenated PolygonMasks - """ - assert isinstance(polymasks_list, (list, tuple)) - assert len(polymasks_list) > 0 - assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list) - - cat_polymasks = type(polymasks_list[0])( - list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list)) - ) - return cat_polymasks diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/README.md b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/README.md deleted file mode 100644 index 30a41f008776a8755ec4dc19f4be07c514cd3794..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/README.md +++ /dev/null @@ -1,31 +0,0 @@ - -Here are a few projects that are built on detectron2. -They are examples of how to use detectron2 as a library, to make your projects more -maintainable. - -## Projects by Facebook - -Note that these are research projects, and therefore may not have the same level -of support or stability of detectron2. - -+ [DensePose: Dense Human Pose Estimation In The Wild](DensePose) -+ [Scale-Aware Trident Networks for Object Detection](TridentNet) -+ [TensorMask: A Foundation for Dense Object Segmentation](TensorMask) -+ [Mesh R-CNN](https://github.com/facebookresearch/meshrcnn) -+ [PointRend: Image Segmentation as Rendering](PointRend) -+ [Momentum Contrast for Unsupervised Visual Representation Learning](https://github.com/facebookresearch/moco/tree/master/detection) - - -## External Projects - -External projects in the community that use detectron2: - - - -+ [VoVNet backbones](https://github.com/youngwanLEE/vovnet-detectron2). -+ [AdelaiDet](https://github.com/aim-uofa/adet), a detection toolbox from the Universtiy of Adelaide. -+ [CenterMask : Real-Time Anchor-Free Instance Segmentation](https://github.com/youngwanLEE/centermask2) diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/ops/layer_norm.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/ops/layer_norm.py deleted file mode 100644 index 2e20c7475f9fc71e69cf0e2a44cac0508ae7fa8f..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/ops/layer_norm.py +++ /dev/null @@ -1,21 +0,0 @@ -# -------------------------------------------------------- -# OpenVQA -# Written by Yuhao Cui https://github.com/cuiyuhao1996 -# -------------------------------------------------------- - -import torch.nn as nn -import torch - -class LayerNorm(nn.Module): - def __init__(self, size, eps=1e-6): - super(LayerNorm, self).__init__() - self.eps = eps - - self.a_2 = nn.Parameter(torch.ones(size)) - self.b_2 = nn.Parameter(torch.zeros(size)) - - def forward(self, x): - mean = x.mean(-1, keepdim=True) - std = x.std(-1, keepdim=True) - - return self.a_2 * (x - mean) / (std + self.eps) + self.b_2 diff --git a/spaces/CVPR/LIVE/thrust/thrust/iterator/detail/permutation_iterator_base.h b/spaces/CVPR/LIVE/thrust/thrust/iterator/detail/permutation_iterator_base.h deleted file mode 100644 index 2610cfdfaffdeb50ad085f90d4ff9b85920ede4f..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/iterator/detail/permutation_iterator_base.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include -#include -#include -#include - -namespace thrust -{ - -template class permutation_iterator; - - -namespace detail -{ - -template - struct permutation_iterator_base -{ - typedef typename thrust::iterator_system::type System1; - typedef typename thrust::iterator_system::type System2; - - typedef thrust::iterator_adaptor< - permutation_iterator, - IndexIterator, - typename thrust::iterator_value::type, - typename detail::minimum_system::type, - thrust::use_default, - typename thrust::iterator_reference::type - > type; -}; // end permutation_iterator_base - -} // end detail - -} // end thrust - diff --git a/spaces/CVPR/transfiner/configs/new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ.py b/spaces/CVPR/transfiner/configs/new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ.py deleted file mode 100644 index df7a2aedf480ed8dc4aa3645e37420e9b893fae4..0000000000000000000000000000000000000000 --- a/spaces/CVPR/transfiner/configs/new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ.py +++ /dev/null @@ -1,72 +0,0 @@ -import detectron2.data.transforms as T -from detectron2.config.lazy import LazyCall as L -from detectron2.layers.batch_norm import NaiveSyncBatchNorm -from detectron2.solver import WarmupParamScheduler -from fvcore.common.param_scheduler import MultiStepParamScheduler - -from ..common.data.coco import dataloader -from ..common.models.mask_rcnn_fpn import model -from ..common.optim import SGD as optimizer -from ..common.train import train - -# train from scratch -train.init_checkpoint = "" -train.amp.enabled = True -train.ddp.fp16_compression = True -model.backbone.bottom_up.freeze_at = 0 - -# SyncBN -# fmt: off -model.backbone.bottom_up.stem.norm = \ - model.backbone.bottom_up.stages.norm = \ - model.backbone.norm = "SyncBN" - -# Using NaiveSyncBatchNorm becase heads may have empty input. That is not supported by -# torch.nn.SyncBatchNorm. We can remove this after -# https://github.com/pytorch/pytorch/issues/36530 is fixed. -model.roi_heads.box_head.conv_norm = \ - model.roi_heads.mask_head.conv_norm = lambda c: NaiveSyncBatchNorm(c, - stats_mode="N") -# fmt: on - -# 2conv in RPN: -# https://github.com/tensorflow/tpu/blob/b24729de804fdb751b06467d3dce0637fa652060/models/official/detection/modeling/architecture/heads.py#L95-L97 # noqa: E501, B950 -model.proposal_generator.head.conv_dims = [-1, -1] - -# 4conv1fc box head -model.roi_heads.box_head.conv_dims = [256, 256, 256, 256] -model.roi_heads.box_head.fc_dims = [1024] - -# resize_and_crop_image in: -# https://github.com/tensorflow/tpu/blob/b24729de804fdb751b06467d3dce0637fa652060/models/official/detection/utils/input_utils.py#L127 # noqa: E501, B950 -image_size = 1024 -dataloader.train.mapper.augmentations = [ - L(T.ResizeScale)( - min_scale=0.1, max_scale=2.0, target_height=image_size, target_width=image_size - ), - L(T.FixedSizeCrop)(crop_size=(image_size, image_size)), - L(T.RandomFlip)(horizontal=True), -] - -# recompute boxes due to cropping -dataloader.train.mapper.recompute_boxes = True - -# larger batch-size. -dataloader.train.total_batch_size = 64 - -# Equivalent to 100 epochs. -# 100 ep = 184375 iters * 64 images/iter / 118000 images/ep -train.max_iter = 184375 - -lr_multiplier = L(WarmupParamScheduler)( - scheduler=L(MultiStepParamScheduler)( - values=[1.0, 0.1, 0.01], - milestones=[163889, 177546], - num_updates=train.max_iter, - ), - warmup_length=500 / train.max_iter, - warmup_factor=0.067, -) - -optimizer.lr = 0.1 -optimizer.weight_decay = 4e-5 diff --git a/spaces/ChandraMohanNayal/AutoGPT/tests/unit/test_browse_scrape_links.py b/spaces/ChandraMohanNayal/AutoGPT/tests/unit/test_browse_scrape_links.py deleted file mode 100644 index 0a3340e7397a997da96b8ab9828954230e1a3c20..0000000000000000000000000000000000000000 --- a/spaces/ChandraMohanNayal/AutoGPT/tests/unit/test_browse_scrape_links.py +++ /dev/null @@ -1,118 +0,0 @@ -# Generated by CodiumAI - -# Dependencies: -# pip install pytest-mock -import pytest - -from autogpt.commands.web_requests import scrape_links - -""" -Code Analysis - -Objective: -The objective of the 'scrape_links' function is to scrape hyperlinks from a -given URL and return them in a formatted way. - -Inputs: -- url: a string representing the URL to be scraped. - -Flow: -1. Send a GET request to the given URL using the requests library and the user agent header from the config file. -2. Check if the response contains an HTTP error. If it does, return "error". -3. Parse the HTML content of the response using the BeautifulSoup library. -4. Remove any script and style tags from the parsed HTML. -5. Extract all hyperlinks from the parsed HTML using the 'extract_hyperlinks' function. -6. Format the extracted hyperlinks using the 'format_hyperlinks' function. -7. Return the formatted hyperlinks. - -Outputs: -- A list of formatted hyperlinks. - -Additional aspects: -- The function uses the 'requests' and 'BeautifulSoup' libraries to send HTTP -requests and parse HTML content, respectively. -- The 'extract_hyperlinks' function is called to extract hyperlinks from the parsed HTML. -- The 'format_hyperlinks' function is called to format the extracted hyperlinks. -- The function checks for HTTP errors and returns "error" if any are found. -""" - - -class TestScrapeLinks: - # Tests that the function returns a list of formatted hyperlinks when - # provided with a valid url that returns a webpage with hyperlinks. - def test_valid_url_with_hyperlinks(self): - url = "https://www.google.com" - result = scrape_links(url) - assert len(result) > 0 - assert isinstance(result, list) - assert isinstance(result[0], str) - - # Tests that the function returns correctly formatted hyperlinks when given a valid url. - def test_valid_url(self, mocker): - # Mock the requests.get() function to return a response with sample HTML containing hyperlinks - mock_response = mocker.Mock() - mock_response.status_code = 200 - mock_response.text = ( - "Google" - ) - mocker.patch("requests.Session.get", return_value=mock_response) - - # Call the function with a valid URL - result = scrape_links("https://www.example.com") - - # Assert that the function returns correctly formatted hyperlinks - assert result == ["Google (https://www.google.com)"] - - # Tests that the function returns "error" when given an invalid url. - def test_invalid_url(self, mocker): - # Mock the requests.get() function to return an HTTP error response - mock_response = mocker.Mock() - mock_response.status_code = 404 - mocker.patch("requests.Session.get", return_value=mock_response) - - # Call the function with an invalid URL - result = scrape_links("https://www.invalidurl.com") - - # Assert that the function returns "error" - assert "Error:" in result - - # Tests that the function returns an empty list when the html contains no hyperlinks. - def test_no_hyperlinks(self, mocker): - # Mock the requests.get() function to return a response with sample HTML containing no hyperlinks - mock_response = mocker.Mock() - mock_response.status_code = 200 - mock_response.text = "

      No hyperlinks here

      " - mocker.patch("requests.Session.get", return_value=mock_response) - - # Call the function with a URL containing no hyperlinks - result = scrape_links("https://www.example.com") - - # Assert that the function returns an empty list - assert result == [] - - # Tests that scrape_links() correctly extracts and formats hyperlinks from - # a sample HTML containing a few hyperlinks. - def test_scrape_links_with_few_hyperlinks(self, mocker): - # Mock the requests.get() function to return a response with a sample HTML containing hyperlinks - mock_response = mocker.Mock() - mock_response.status_code = 200 - mock_response.text = """ - - - - - - - - """ - mocker.patch("requests.Session.get", return_value=mock_response) - - # Call the function being tested - result = scrape_links("https://www.example.com") - - # Assert that the function returns a list of formatted hyperlinks - assert isinstance(result, list) - assert len(result) == 3 - assert result[0] == "Google (https://www.google.com)" - assert result[1] == "GitHub (https://github.com)" - assert result[2] == "CodiumAI (https://www.codium.ai)" diff --git a/spaces/ClassCat/mnist-classification/README.md b/spaces/ClassCat/mnist-classification/README.md deleted file mode 100644 index 0b9c32de71c5623878cda296715cca5f3196a90d..0000000000000000000000000000000000000000 --- a/spaces/ClassCat/mnist-classification/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Mnist Classification -emoji: 📊 -colorFrom: yellow -colorTo: red -sdk: gradio -sdk_version: 3.16.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/CyberPeace-Institute/SecureBERT-NER-Space/README.md b/spaces/CyberPeace-Institute/SecureBERT-NER-Space/README.md deleted file mode 100644 index dd893d3d77d03db70dda557ba58506d635940589..0000000000000000000000000000000000000000 --- a/spaces/CyberPeace-Institute/SecureBERT-NER-Space/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: SecureBERT NER Space -emoji: 🏢 -colorFrom: gray -colorTo: gray -sdk: streamlit -sdk_version: 1.21.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Cyril666/my_abi/modules/model_abinet_iter.py b/spaces/Cyril666/my_abi/modules/model_abinet_iter.py deleted file mode 100644 index 6588890570a1180ea32f7969cefd4b59c25409a7..0000000000000000000000000000000000000000 --- a/spaces/Cyril666/my_abi/modules/model_abinet_iter.py +++ /dev/null @@ -1,34 +0,0 @@ -import torch -import torch.nn as nn -from fastai.vision import * - -from .model_vision import BaseVision -from .model_language import BCNLanguage -from .model_alignment import BaseAlignment - - -class ABINetIterModel(nn.Module): - def __init__(self, config): - super().__init__() - self.iter_size = ifnone(config.model_iter_size, 1) - self.max_length = config.dataset_max_length + 1 # additional stop token - self.vision = BaseVision(config) - self.language = BCNLanguage(config) - self.alignment = BaseAlignment(config) - - def forward(self, images, *args): - v_res = self.vision(images) - a_res = v_res - all_l_res, all_a_res = [], [] - for _ in range(self.iter_size): - tokens = torch.softmax(a_res['logits'], dim=-1) - lengths = a_res['pt_lengths'] - lengths.clamp_(2, self.max_length) # TODO:move to langauge model - l_res = self.language(tokens, lengths) - all_l_res.append(l_res) - a_res = self.alignment(l_res['feature'], v_res['feature']) - all_a_res.append(a_res) - if self.training: - return all_a_res, all_l_res, v_res - else: - return a_res, all_l_res[-1], v_res diff --git a/spaces/DESUCLUB/BLLAMA/generate.py b/spaces/DESUCLUB/BLLAMA/generate.py deleted file mode 100644 index ea3a9318b3d2901d6e08fdf23f242263c4782848..0000000000000000000000000000000000000000 --- a/spaces/DESUCLUB/BLLAMA/generate.py +++ /dev/null @@ -1,200 +0,0 @@ -import torch -from peft import PeftModel -import transformers -import gradio as gr -import BLIPIntepret -assert ( - "LlamaTokenizer" in transformers._import_structure["models.llama"] -), "LLaMA is now in HuggingFace's main branch.\nPlease reinstall it: pip uninstall transformers && pip install git+https://github.com/huggingface/transformers.git" -from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig - -tokenizer = LlamaTokenizer.from_pretrained("decapoda-research/llama-7b-hf") - -BASE_MODEL = "decapoda-research/llama-7b-hf" -LORA_WEIGHTS = "tloen/alpaca-lora-7b" - -if torch.cuda.is_available(): - device = "cuda" - print('Using GPU') -else: - device = "cpu" - -try: - if torch.backends.mps.is_available(): - device = "mps" -except: - pass - -if device == "cuda": - model = LlamaForCausalLM.from_pretrained( - BASE_MODEL, - load_in_8bit=True, - torch_dtype=torch.float16, - device_map="auto", - ) - model = PeftModel.from_pretrained(model, LORA_WEIGHTS, torch_dtype=torch.float16) -elif device == "mps": - model = LlamaForCausalLM.from_pretrained( - BASE_MODEL, - device_map={"": device}, - torch_dtype=torch.float16, - ) - model = PeftModel.from_pretrained( - model, - LORA_WEIGHTS, - device_map={"": device}, - torch_dtype=torch.float16, - ) -else: - model = LlamaForCausalLM.from_pretrained( - BASE_MODEL, device_map={"": device}, low_cpu_mem_usage=True - ) - model = PeftModel.from_pretrained( - model, - LORA_WEIGHTS, - device_map={"": device}, - ) - -BLIPmodel,BLIPprocessor = BLIPIntepret.init_BLIP(device) -def generate_prompt(instruction, input=None, context = None): - if context and input: - print('Context and Input combined') - return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. - -### Instruction: -{context} -{instruction} - -### Input: -{input} - - ### Response:""" - - elif input: - print('Input only mode') - return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. - -### Instruction: -{instruction} - -### Input: -{input} - -### Response:""" - elif context: - print('Context only mode') - return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. - -### Instruction: -{context} -{instruction} - -### Response:""" - - else: - print('Instruction Mode') - return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request. - -### Instruction: -{instruction} - -### Response:""" - - -model.eval() -if torch.__version__ >= "2": - model = torch.compile(model) - - - - -def evaluate( - instruction, - input=None, - image = None, - temperature=0.1, - top_p=0.75, - top_k=40, - num_beams=4, - max_new_tokens=128, - **kwargs, -): - if image is None: - context = None - else: - context = BLIPIntepret.infer_BLIP2(BLIPmodel,BLIPprocessor, image, device) - context+= '\nThe above are the context of an image that you will use alongside the response.' - prompt = generate_prompt(instruction, input, context) - inputs = tokenizer(prompt, return_tensors="pt") - input_ids = inputs["input_ids"].to(device) - generation_config = GenerationConfig( - temperature=temperature, - top_p=top_p, - top_k=top_k, - num_beams=num_beams, - **kwargs, - ) - with torch.no_grad(): - generation_output = model.generate( - input_ids=input_ids, - generation_config=generation_config, - return_dict_in_generate=True, - output_scores=True, - max_new_tokens=max_new_tokens, - ) - s = generation_output.sequences[0] - output = tokenizer.decode(s) - return output.split("### Response:")[1].strip() - - -gr.Interface( - fn=evaluate, - inputs=[ - gr.components.Textbox( - lines=2, label="Instruction", placeholder="Tell me about alpacas." - ), - gr.components.Textbox(lines=2, label="Input", placeholder="none"), - gr.components.Image(shape = (200,200), placeholder = "Image"), - gr.components.Slider(minimum=0, maximum=1, value=0.1, label="Temperature"), - gr.components.Slider(minimum=0, maximum=1, value=0.75, label="Top p"), - gr.components.Slider(minimum=0, maximum=100, step=1, value=40, label="Top k"), - gr.components.Slider(minimum=1, maximum=4, step=1, value=4, label="Beams"), - gr.components.Slider( - minimum=1, maximum=2000, step=1, value=128, label="Max tokens" - ), - ], - outputs=[ - gr.inputs.Textbox( - lines=5, - label="Output", - ) - ], - title="🦙🌲 BLLAMA", - description="BLLAMA is a pipeline that uses both ALPACA-LORA as well as BLIP-2 to allow LLAMA to generate text in the context of simple images. You can visit the Github repo [here](https://github.com/DESU-CLUB/BLLAMA)\n\n\ -The original ALPACA-LORA can be found [here](https://github.com/tloen/alpaca-lora) and the BLIP-2 model can be found on huggingface.\ -\n## Credits\n\ -I would like to credit tloen, the creator of ALPACA-LORA, as well as huggingface for their own implementation of LLAMA and BLIP-2. \ -\nI would also like to credit the original creators of [LLAMA](https://github.com/facebookresearch/llama), Meta AI, as well as Stanford University, who created [ALPACA](https://github.com/tatsu-lab/stanford_alpaca)\ - ", -).launch() - -# Old testing code follows. - -""" -if __name__ == "__main__": - # testing code for readme - for instruction in [ - "Tell me about alpacas.", - "Tell me about the president of Mexico in 2019.", - "Tell me about the king of France in 2019.", - "List all Canadian provinces in alphabetical order.", - "Write a Python program that prints the first 10 Fibonacci numbers.", - "Write a program that prints the numbers from 1 to 100. But for multiples of three print 'Fizz' instead of the number and for the multiples of five print 'Buzz'. For numbers which are multiples of both three and five print 'FizzBuzz'.", - "Tell me five words that rhyme with 'shock'.", - "Translate the sentence 'I have no mouth but I must scream' into Spanish.", - "Count up from 1 to 500.", - ]: - print("Instruction:", instruction) - print("Response:", evaluate(instruction)) - print() -""" diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/dotenv/variables.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/dotenv/variables.py deleted file mode 100644 index 667f2f26ff2182ecdfc5b809ba97a6cf1d1be13a..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/dotenv/variables.py +++ /dev/null @@ -1,86 +0,0 @@ -import re -from abc import ABCMeta, abstractmethod -from typing import Iterator, Mapping, Optional, Pattern - -_posix_variable: Pattern[str] = re.compile( - r""" - \$\{ - (?P[^\}:]*) - (?::- - (?P[^\}]*) - )? - \} - """, - re.VERBOSE, -) - - -class Atom(metaclass=ABCMeta): - def __ne__(self, other: object) -> bool: - result = self.__eq__(other) - if result is NotImplemented: - return NotImplemented - return not result - - @abstractmethod - def resolve(self, env: Mapping[str, Optional[str]]) -> str: ... - - -class Literal(Atom): - def __init__(self, value: str) -> None: - self.value = value - - def __repr__(self) -> str: - return f"Literal(value={self.value})" - - def __eq__(self, other: object) -> bool: - if not isinstance(other, self.__class__): - return NotImplemented - return self.value == other.value - - def __hash__(self) -> int: - return hash((self.__class__, self.value)) - - def resolve(self, env: Mapping[str, Optional[str]]) -> str: - return self.value - - -class Variable(Atom): - def __init__(self, name: str, default: Optional[str]) -> None: - self.name = name - self.default = default - - def __repr__(self) -> str: - return f"Variable(name={self.name}, default={self.default})" - - def __eq__(self, other: object) -> bool: - if not isinstance(other, self.__class__): - return NotImplemented - return (self.name, self.default) == (other.name, other.default) - - def __hash__(self) -> int: - return hash((self.__class__, self.name, self.default)) - - def resolve(self, env: Mapping[str, Optional[str]]) -> str: - default = self.default if self.default is not None else "" - result = env.get(self.name, default) - return result if result is not None else "" - - -def parse_variables(value: str) -> Iterator[Atom]: - cursor = 0 - - for match in _posix_variable.finditer(value): - (start, end) = match.span() - name = match["name"] - default = match["default"] - - if start > cursor: - yield Literal(value=value[cursor:start]) - - yield Variable(name=name, default=default) - cursor = end - - length = len(value) - if cursor < length: - yield Literal(value=value[cursor:length]) diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/misc/classifyTools.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/misc/classifyTools.py deleted file mode 100644 index e46386230e5c826486963cf47640ae0a920377cb..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/misc/classifyTools.py +++ /dev/null @@ -1,172 +0,0 @@ -""" fontTools.misc.classifyTools.py -- tools for classifying things. -""" - - -class Classifier(object): - - """ - Main Classifier object, used to classify things into similar sets. - """ - - def __init__(self, sort=True): - - self._things = set() # set of all things known so far - self._sets = [] # list of class sets produced so far - self._mapping = {} # map from things to their class set - self._dirty = False - self._sort = sort - - def add(self, set_of_things): - """ - Add a set to the classifier. Any iterable is accepted. - """ - if not set_of_things: - return - - self._dirty = True - - things, sets, mapping = self._things, self._sets, self._mapping - - s = set(set_of_things) - intersection = s.intersection(things) # existing things - s.difference_update(intersection) # new things - difference = s - del s - - # Add new class for new things - if difference: - things.update(difference) - sets.append(difference) - for thing in difference: - mapping[thing] = difference - del difference - - while intersection: - # Take one item and process the old class it belongs to - old_class = mapping[next(iter(intersection))] - old_class_intersection = old_class.intersection(intersection) - - # Update old class to remove items from new set - old_class.difference_update(old_class_intersection) - - # Remove processed items from todo list - intersection.difference_update(old_class_intersection) - - # Add new class for the intersection with old class - sets.append(old_class_intersection) - for thing in old_class_intersection: - mapping[thing] = old_class_intersection - del old_class_intersection - - def update(self, list_of_sets): - """ - Add a a list of sets to the classifier. Any iterable of iterables is accepted. - """ - for s in list_of_sets: - self.add(s) - - def _process(self): - if not self._dirty: - return - - # Do any deferred processing - sets = self._sets - self._sets = [s for s in sets if s] - - if self._sort: - self._sets = sorted(self._sets, key=lambda s: (-len(s), sorted(s))) - - self._dirty = False - - # Output methods - - def getThings(self): - """Returns the set of all things known so far. - - The return value belongs to the Classifier object and should NOT - be modified while the classifier is still in use. - """ - self._process() - return self._things - - def getMapping(self): - """Returns the mapping from things to their class set. - - The return value belongs to the Classifier object and should NOT - be modified while the classifier is still in use. - """ - self._process() - return self._mapping - - def getClasses(self): - """Returns the list of class sets. - - The return value belongs to the Classifier object and should NOT - be modified while the classifier is still in use. - """ - self._process() - return self._sets - - -def classify(list_of_sets, sort=True): - """ - Takes a iterable of iterables (list of sets from here on; but any - iterable works.), and returns the smallest list of sets such that - each set, is either a subset, or is disjoint from, each of the input - sets. - - In other words, this function classifies all the things present in - any of the input sets, into similar classes, based on which sets - things are a member of. - - If sort=True, return class sets are sorted by decreasing size and - their natural sort order within each class size. Otherwise, class - sets are returned in the order that they were identified, which is - generally not significant. - - >>> classify([]) == ([], {}) - True - >>> classify([[]]) == ([], {}) - True - >>> classify([[], []]) == ([], {}) - True - >>> classify([[1]]) == ([{1}], {1: {1}}) - True - >>> classify([[1,2]]) == ([{1, 2}], {1: {1, 2}, 2: {1, 2}}) - True - >>> classify([[1],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}}) - True - >>> classify([[1,2],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}}) - True - >>> classify([[1,2],[2,4]]) == ([{1}, {2}, {4}], {1: {1}, 2: {2}, 4: {4}}) - True - >>> classify([[1,2],[2,4,5]]) == ( - ... [{4, 5}, {1}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}}) - True - >>> classify([[1,2],[2,4,5]], sort=False) == ( - ... [{1}, {4, 5}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}}) - True - >>> classify([[1,2,9],[2,4,5]], sort=False) == ( - ... [{1, 9}, {4, 5}, {2}], {1: {1, 9}, 2: {2}, 4: {4, 5}, 5: {4, 5}, - ... 9: {1, 9}}) - True - >>> classify([[1,2,9,15],[2,4,5]], sort=False) == ( - ... [{1, 9, 15}, {4, 5}, {2}], {1: {1, 9, 15}, 2: {2}, 4: {4, 5}, - ... 5: {4, 5}, 9: {1, 9, 15}, 15: {1, 9, 15}}) - True - >>> classes, mapping = classify([[1,2,9,15],[2,4,5],[15,5]], sort=False) - >>> set([frozenset(c) for c in classes]) == set( - ... [frozenset(s) for s in ({1, 9}, {4}, {2}, {5}, {15})]) - True - >>> mapping == {1: {1, 9}, 2: {2}, 4: {4}, 5: {5}, 9: {1, 9}, 15: {15}} - True - """ - classifier = Classifier(sort=sort) - classifier.update(list_of_sets) - return classifier.getClasses(), classifier.getMapping() - - -if __name__ == "__main__": - import sys, doctest - - sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed) diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/misc/transform.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/misc/transform.py deleted file mode 100644 index f85b54b73121589cb8de284a5e8efe9d20fefa17..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/misc/transform.py +++ /dev/null @@ -1,495 +0,0 @@ -"""Affine 2D transformation matrix class. - -The Transform class implements various transformation matrix operations, -both on the matrix itself, as well as on 2D coordinates. - -Transform instances are effectively immutable: all methods that operate on the -transformation itself always return a new instance. This has as the -interesting side effect that Transform instances are hashable, ie. they can be -used as dictionary keys. - -This module exports the following symbols: - -Transform - this is the main class -Identity - Transform instance set to the identity transformation -Offset - Convenience function that returns a translating transformation -Scale - Convenience function that returns a scaling transformation - -The DecomposedTransform class implements a transformation with separate -translate, rotation, scale, skew, and transformation-center components. - -:Example: - - >>> t = Transform(2, 0, 0, 3, 0, 0) - >>> t.transformPoint((100, 100)) - (200, 300) - >>> t = Scale(2, 3) - >>> t.transformPoint((100, 100)) - (200, 300) - >>> t.transformPoint((0, 0)) - (0, 0) - >>> t = Offset(2, 3) - >>> t.transformPoint((100, 100)) - (102, 103) - >>> t.transformPoint((0, 0)) - (2, 3) - >>> t2 = t.scale(0.5) - >>> t2.transformPoint((100, 100)) - (52.0, 53.0) - >>> import math - >>> t3 = t2.rotate(math.pi / 2) - >>> t3.transformPoint((0, 0)) - (2.0, 3.0) - >>> t3.transformPoint((100, 100)) - (-48.0, 53.0) - >>> t = Identity.scale(0.5).translate(100, 200).skew(0.1, 0.2) - >>> t.transformPoints([(0, 0), (1, 1), (100, 100)]) - [(50.0, 100.0), (50.550167336042726, 100.60135501775433), (105.01673360427253, 160.13550177543362)] - >>> -""" - -import math -from typing import NamedTuple -from dataclasses import dataclass - - -__all__ = ["Transform", "Identity", "Offset", "Scale", "DecomposedTransform"] - - -_EPSILON = 1e-15 -_ONE_EPSILON = 1 - _EPSILON -_MINUS_ONE_EPSILON = -1 + _EPSILON - - -def _normSinCos(v): - if abs(v) < _EPSILON: - v = 0 - elif v > _ONE_EPSILON: - v = 1 - elif v < _MINUS_ONE_EPSILON: - v = -1 - return v - - -class Transform(NamedTuple): - - """2x2 transformation matrix plus offset, a.k.a. Affine transform. - Transform instances are immutable: all transforming methods, eg. - rotate(), return a new Transform instance. - - :Example: - - >>> t = Transform() - >>> t - - >>> t.scale(2) - - >>> t.scale(2.5, 5.5) - - >>> - >>> t.scale(2, 3).transformPoint((100, 100)) - (200, 300) - - Transform's constructor takes six arguments, all of which are - optional, and can be used as keyword arguments:: - - >>> Transform(12) - - >>> Transform(dx=12) - - >>> Transform(yx=12) - - - Transform instances also behave like sequences of length 6:: - - >>> len(Identity) - 6 - >>> list(Identity) - [1, 0, 0, 1, 0, 0] - >>> tuple(Identity) - (1, 0, 0, 1, 0, 0) - - Transform instances are comparable:: - - >>> t1 = Identity.scale(2, 3).translate(4, 6) - >>> t2 = Identity.translate(8, 18).scale(2, 3) - >>> t1 == t2 - 1 - - But beware of floating point rounding errors:: - - >>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6) - >>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3) - >>> t1 - - >>> t2 - - >>> t1 == t2 - 0 - - Transform instances are hashable, meaning you can use them as - keys in dictionaries:: - - >>> d = {Scale(12, 13): None} - >>> d - {: None} - - But again, beware of floating point rounding errors:: - - >>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6) - >>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3) - >>> t1 - - >>> t2 - - >>> d = {t1: None} - >>> d - {: None} - >>> d[t2] - Traceback (most recent call last): - File "", line 1, in ? - KeyError: - """ - - xx: float = 1 - xy: float = 0 - yx: float = 0 - yy: float = 1 - dx: float = 0 - dy: float = 0 - - def transformPoint(self, p): - """Transform a point. - - :Example: - - >>> t = Transform() - >>> t = t.scale(2.5, 5.5) - >>> t.transformPoint((100, 100)) - (250.0, 550.0) - """ - (x, y) = p - xx, xy, yx, yy, dx, dy = self - return (xx * x + yx * y + dx, xy * x + yy * y + dy) - - def transformPoints(self, points): - """Transform a list of points. - - :Example: - - >>> t = Scale(2, 3) - >>> t.transformPoints([(0, 0), (0, 100), (100, 100), (100, 0)]) - [(0, 0), (0, 300), (200, 300), (200, 0)] - >>> - """ - xx, xy, yx, yy, dx, dy = self - return [(xx * x + yx * y + dx, xy * x + yy * y + dy) for x, y in points] - - def transformVector(self, v): - """Transform an (dx, dy) vector, treating translation as zero. - - :Example: - - >>> t = Transform(2, 0, 0, 2, 10, 20) - >>> t.transformVector((3, -4)) - (6, -8) - >>> - """ - (dx, dy) = v - xx, xy, yx, yy = self[:4] - return (xx * dx + yx * dy, xy * dx + yy * dy) - - def transformVectors(self, vectors): - """Transform a list of (dx, dy) vector, treating translation as zero. - - :Example: - >>> t = Transform(2, 0, 0, 2, 10, 20) - >>> t.transformVectors([(3, -4), (5, -6)]) - [(6, -8), (10, -12)] - >>> - """ - xx, xy, yx, yy = self[:4] - return [(xx * dx + yx * dy, xy * dx + yy * dy) for dx, dy in vectors] - - def translate(self, x=0, y=0): - """Return a new transformation, translated (offset) by x, y. - - :Example: - >>> t = Transform() - >>> t.translate(20, 30) - - >>> - """ - return self.transform((1, 0, 0, 1, x, y)) - - def scale(self, x=1, y=None): - """Return a new transformation, scaled by x, y. The 'y' argument - may be None, which implies to use the x value for y as well. - - :Example: - >>> t = Transform() - >>> t.scale(5) - - >>> t.scale(5, 6) - - >>> - """ - if y is None: - y = x - return self.transform((x, 0, 0, y, 0, 0)) - - def rotate(self, angle): - """Return a new transformation, rotated by 'angle' (radians). - - :Example: - >>> import math - >>> t = Transform() - >>> t.rotate(math.pi / 2) - - >>> - """ - import math - - c = _normSinCos(math.cos(angle)) - s = _normSinCos(math.sin(angle)) - return self.transform((c, s, -s, c, 0, 0)) - - def skew(self, x=0, y=0): - """Return a new transformation, skewed by x and y. - - :Example: - >>> import math - >>> t = Transform() - >>> t.skew(math.pi / 4) - - >>> - """ - import math - - return self.transform((1, math.tan(y), math.tan(x), 1, 0, 0)) - - def transform(self, other): - """Return a new transformation, transformed by another - transformation. - - :Example: - >>> t = Transform(2, 0, 0, 3, 1, 6) - >>> t.transform((4, 3, 2, 1, 5, 6)) - - >>> - """ - xx1, xy1, yx1, yy1, dx1, dy1 = other - xx2, xy2, yx2, yy2, dx2, dy2 = self - return self.__class__( - xx1 * xx2 + xy1 * yx2, - xx1 * xy2 + xy1 * yy2, - yx1 * xx2 + yy1 * yx2, - yx1 * xy2 + yy1 * yy2, - xx2 * dx1 + yx2 * dy1 + dx2, - xy2 * dx1 + yy2 * dy1 + dy2, - ) - - def reverseTransform(self, other): - """Return a new transformation, which is the other transformation - transformed by self. self.reverseTransform(other) is equivalent to - other.transform(self). - - :Example: - >>> t = Transform(2, 0, 0, 3, 1, 6) - >>> t.reverseTransform((4, 3, 2, 1, 5, 6)) - - >>> Transform(4, 3, 2, 1, 5, 6).transform((2, 0, 0, 3, 1, 6)) - - >>> - """ - xx1, xy1, yx1, yy1, dx1, dy1 = self - xx2, xy2, yx2, yy2, dx2, dy2 = other - return self.__class__( - xx1 * xx2 + xy1 * yx2, - xx1 * xy2 + xy1 * yy2, - yx1 * xx2 + yy1 * yx2, - yx1 * xy2 + yy1 * yy2, - xx2 * dx1 + yx2 * dy1 + dx2, - xy2 * dx1 + yy2 * dy1 + dy2, - ) - - def inverse(self): - """Return the inverse transformation. - - :Example: - >>> t = Identity.translate(2, 3).scale(4, 5) - >>> t.transformPoint((10, 20)) - (42, 103) - >>> it = t.inverse() - >>> it.transformPoint((42, 103)) - (10.0, 20.0) - >>> - """ - if self == Identity: - return self - xx, xy, yx, yy, dx, dy = self - det = xx * yy - yx * xy - xx, xy, yx, yy = yy / det, -xy / det, -yx / det, xx / det - dx, dy = -xx * dx - yx * dy, -xy * dx - yy * dy - return self.__class__(xx, xy, yx, yy, dx, dy) - - def toPS(self): - """Return a PostScript representation - - :Example: - - >>> t = Identity.scale(2, 3).translate(4, 5) - >>> t.toPS() - '[2 0 0 3 8 15]' - >>> - """ - return "[%s %s %s %s %s %s]" % self - - def toDecomposed(self) -> "DecomposedTransform": - """Decompose into a DecomposedTransform.""" - return DecomposedTransform.fromTransform(self) - - def __bool__(self): - """Returns True if transform is not identity, False otherwise. - - :Example: - - >>> bool(Identity) - False - >>> bool(Transform()) - False - >>> bool(Scale(1.)) - False - >>> bool(Scale(2)) - True - >>> bool(Offset()) - False - >>> bool(Offset(0)) - False - >>> bool(Offset(2)) - True - """ - return self != Identity - - def __repr__(self): - return "<%s [%g %g %g %g %g %g]>" % ((self.__class__.__name__,) + self) - - -Identity = Transform() - - -def Offset(x=0, y=0): - """Return the identity transformation offset by x, y. - - :Example: - >>> Offset(2, 3) - - >>> - """ - return Transform(1, 0, 0, 1, x, y) - - -def Scale(x, y=None): - """Return the identity transformation scaled by x, y. The 'y' argument - may be None, which implies to use the x value for y as well. - - :Example: - >>> Scale(2, 3) - - >>> - """ - if y is None: - y = x - return Transform(x, 0, 0, y, 0, 0) - - -@dataclass -class DecomposedTransform: - """The DecomposedTransform class implements a transformation with separate - translate, rotation, scale, skew, and transformation-center components. - """ - - translateX: float = 0 - translateY: float = 0 - rotation: float = 0 # in degrees, counter-clockwise - scaleX: float = 1 - scaleY: float = 1 - skewX: float = 0 # in degrees, clockwise - skewY: float = 0 # in degrees, counter-clockwise - tCenterX: float = 0 - tCenterY: float = 0 - - @classmethod - def fromTransform(self, transform): - # Adapted from an answer on - # https://math.stackexchange.com/questions/13150/extracting-rotation-scale-values-from-2d-transformation-matrix - a, b, c, d, x, y = transform - - sx = math.copysign(1, a) - if sx < 0: - a *= sx - b *= sx - - delta = a * d - b * c - - rotation = 0 - scaleX = scaleY = 0 - skewX = skewY = 0 - - # Apply the QR-like decomposition. - if a != 0 or b != 0: - r = math.sqrt(a * a + b * b) - rotation = math.acos(a / r) if b >= 0 else -math.acos(a / r) - scaleX, scaleY = (r, delta / r) - skewX, skewY = (math.atan((a * c + b * d) / (r * r)), 0) - elif c != 0 or d != 0: - s = math.sqrt(c * c + d * d) - rotation = math.pi / 2 - ( - math.acos(-c / s) if d >= 0 else -math.acos(c / s) - ) - scaleX, scaleY = (delta / s, s) - skewX, skewY = (0, math.atan((a * c + b * d) / (s * s))) - else: - # a = b = c = d = 0 - pass - - return DecomposedTransform( - x, - y, - math.degrees(rotation), - scaleX * sx, - scaleY, - math.degrees(skewX) * sx, - math.degrees(skewY), - 0, - 0, - ) - - def toTransform(self): - """Return the Transform() equivalent of this transformation. - - :Example: - >>> DecomposedTransform(scaleX=2, scaleY=2).toTransform() - - >>> - """ - t = Transform() - t = t.translate( - self.translateX + self.tCenterX, self.translateY + self.tCenterY - ) - t = t.rotate(math.radians(self.rotation)) - t = t.scale(self.scaleX, self.scaleY) - t = t.skew(math.radians(self.skewX), math.radians(self.skewY)) - t = t.translate(-self.tCenterX, -self.tCenterY) - return t - - -if __name__ == "__main__": - import sys - import doctest - - sys.exit(doctest.testmod().failed) diff --git a/spaces/DataScienceEngineering/4-Seq2SeqQAT5/qasrl_model_pipeline.py b/spaces/DataScienceEngineering/4-Seq2SeqQAT5/qasrl_model_pipeline.py deleted file mode 100644 index 50135f76849bc8537fcae83b72532da661487da6..0000000000000000000000000000000000000000 --- a/spaces/DataScienceEngineering/4-Seq2SeqQAT5/qasrl_model_pipeline.py +++ /dev/null @@ -1,183 +0,0 @@ -from typing import Optional -import json -from argparse import Namespace -from pathlib import Path -from transformers import Text2TextGenerationPipeline, AutoModelForSeq2SeqLM, AutoTokenizer - -def get_markers_for_model(is_t5_model: bool) -> Namespace: - special_tokens_constants = Namespace() - if is_t5_model: - # T5 model have 100 special tokens by default - special_tokens_constants.separator_input_question_predicate = "" - special_tokens_constants.separator_output_answers = "" - special_tokens_constants.separator_output_questions = "" # if using only questions - special_tokens_constants.separator_output_question_answer = "" - special_tokens_constants.separator_output_pairs = "" - special_tokens_constants.predicate_generic_marker = "" - special_tokens_constants.predicate_verb_marker = "" - special_tokens_constants.predicate_nominalization_marker = "" - - else: - special_tokens_constants.separator_input_question_predicate = "" - special_tokens_constants.separator_output_answers = "" - special_tokens_constants.separator_output_questions = "" # if using only questions - special_tokens_constants.separator_output_question_answer = "" - special_tokens_constants.separator_output_pairs = "" - special_tokens_constants.predicate_generic_marker = "" - special_tokens_constants.predicate_verb_marker = "" - special_tokens_constants.predicate_nominalization_marker = "" - return special_tokens_constants - -def load_trained_model(name_or_path): - import huggingface_hub as HFhub - tokenizer = AutoTokenizer.from_pretrained(name_or_path) - model = AutoModelForSeq2SeqLM.from_pretrained(name_or_path) - # load preprocessing_kwargs from the model repo on HF hub, or from the local model directory - kwargs_filename = None - if name_or_path.startswith("kleinay/"): # and 'preprocessing_kwargs.json' in HFhub.list_repo_files(name_or_path): # the supported version of HFhub doesn't support list_repo_files - kwargs_filename = HFhub.hf_hub_download(repo_id=name_or_path, filename="preprocessing_kwargs.json") - elif Path(name_or_path).is_dir() and (Path(name_or_path) / "experiment_kwargs.json").exists(): - kwargs_filename = Path(name_or_path) / "experiment_kwargs.json" - - if kwargs_filename: - preprocessing_kwargs = json.load(open(kwargs_filename)) - # integrate into model.config (for decoding args, e.g. "num_beams"), and save also as standalone object for preprocessing - model.config.preprocessing_kwargs = Namespace(**preprocessing_kwargs) - model.config.update(preprocessing_kwargs) - return model, tokenizer - - -class QASRL_Pipeline(Text2TextGenerationPipeline): - def __init__(self, model_repo: str, **kwargs): - model, tokenizer = load_trained_model(model_repo) - super().__init__(model, tokenizer, framework="pt") - self.is_t5_model = "t5" in model.config.model_type - self.special_tokens = get_markers_for_model(self.is_t5_model) - self.data_args = model.config.preprocessing_kwargs - # backward compatibility - default keyword values implemeted in `run_summarization`, thus not saved in `preprocessing_kwargs` - if "predicate_marker_type" not in vars(self.data_args): - self.data_args.predicate_marker_type = "generic" - if "use_bilateral_predicate_marker" not in vars(self.data_args): - self.data_args.use_bilateral_predicate_marker = True - if "append_verb_form" not in vars(self.data_args): - self.data_args.append_verb_form = True - self._update_config(**kwargs) - - def _update_config(self, **kwargs): - " Update self.model.config with initialization parameters and necessary defaults. " - # set default values that will always override model.config, but can overriden by __init__ kwargs - kwargs["max_length"] = kwargs.get("max_length", 80) - # override model.config with kwargs - for k,v in kwargs.items(): - self.model.config.__dict__[k] = v - - def _sanitize_parameters(self, **kwargs): - preprocess_kwargs, forward_kwargs, postprocess_kwargs = {}, {}, {} - if "predicate_marker" in kwargs: - preprocess_kwargs["predicate_marker"] = kwargs["predicate_marker"] - if "predicate_type" in kwargs: - preprocess_kwargs["predicate_type"] = kwargs["predicate_type"] - if "verb_form" in kwargs: - preprocess_kwargs["verb_form"] = kwargs["verb_form"] - return preprocess_kwargs, forward_kwargs, postprocess_kwargs - - def preprocess(self, inputs, predicate_marker="", predicate_type=None, verb_form=None): - # Here, inputs is string or list of strings; apply string postprocessing - if isinstance(inputs, str): - processed_inputs = self._preprocess_string(inputs, predicate_marker, predicate_type, verb_form) - elif hasattr(inputs, "__iter__"): - processed_inputs = [self._preprocess_string(s, predicate_marker, predicate_type, verb_form) for s in inputs] - else: - raise ValueError("inputs must be str or Iterable[str]") - # Now pass to super.preprocess for tokenization - return super().preprocess(processed_inputs) - - def _preprocess_string(self, seq: str, predicate_marker: str, predicate_type: Optional[str], verb_form: Optional[str]) -> str: - sent_tokens = seq.split(" ") - assert predicate_marker in sent_tokens, f"Input sentence must include a predicate-marker token ('{predicate_marker}') before the target predicate word" - predicate_idx = sent_tokens.index(predicate_marker) - sent_tokens.remove(predicate_marker) - sentence_before_predicate = " ".join([sent_tokens[i] for i in range(predicate_idx)]) - predicate = sent_tokens[predicate_idx] - sentence_after_predicate = " ".join([sent_tokens[i] for i in range(predicate_idx+1, len(sent_tokens))]) - - if self.data_args.predicate_marker_type == "generic": - predicate_marker = self.special_tokens.predicate_generic_marker - # In case we want special marker for each predicate type: """ - elif self.data_args.predicate_marker_type == "pred_type": - assert predicate_type is not None, "For this model, you must provide the `predicate_type` either when initializing QASRL_Pipeline(...) or when applying __call__(...) on it" - assert predicate_type in ("verbal", "nominal"), f"`predicate_type` must be either 'verbal' or 'nominal'; got '{predicate_type}'" - predicate_marker = {"verbal": self.special_tokens.predicate_verb_marker , - "nominal": self.special_tokens.predicate_nominalization_marker - }[predicate_type] - - if self.data_args.use_bilateral_predicate_marker: - seq = f"{sentence_before_predicate} {predicate_marker} {predicate} {predicate_marker} {sentence_after_predicate}" - else: - seq = f"{sentence_before_predicate} {predicate_marker} {predicate} {sentence_after_predicate}" - - # embed also verb_form - if self.data_args.append_verb_form and verb_form is None: - raise ValueError(f"For this model, you must provide the `verb_form` of the predicate when applying __call__(...)") - elif self.data_args.append_verb_form: - seq = f"{seq} {self.special_tokens.separator_input_question_predicate} {verb_form} " - else: - seq = f"{seq} " - - # append source prefix (for t5 models) - prefix = self._get_source_prefix(predicate_type) - - return prefix + seq - - def _get_source_prefix(self, predicate_type: Optional[str]): - if not self.is_t5_model or self.data_args.source_prefix is None: - return '' - if not self.data_args.source_prefix.startswith("<"): # Regular prefix - not dependent on input row x - return self.data_args.source_prefix - if self.data_args.source_prefix == "": - if predicate_type is None: - raise ValueError("source_prefix is '' but input no `predicate_type`.") - else: - return f"Generate QAs for {predicate_type} QASRL: " - - def _forward(self, *args, **kwargs): - outputs = super()._forward(*args, **kwargs) - return outputs - - - def postprocess(self, model_outputs): - output_seq = self.tokenizer.decode( - model_outputs["output_ids"].squeeze(), - skip_special_tokens=False, - clean_up_tokenization_spaces=False, - ) - output_seq = output_seq.strip(self.tokenizer.pad_token).strip(self.tokenizer.eos_token).strip() - qa_subseqs = output_seq.split(self.special_tokens.separator_output_pairs) - qas = [self._postrocess_qa(qa_subseq) for qa_subseq in qa_subseqs] - return {"generated_text": output_seq, - "QAs": qas} - - def _postrocess_qa(self, seq: str) -> str: - # split question and answers - if self.special_tokens.separator_output_question_answer in seq: - question, answer = seq.split(self.special_tokens.separator_output_question_answer)[:2] - else: - print("invalid format: no separator between question and answer found...") - return None - # question, answer = seq, '' # Or: backoff to only question - # skip "_" slots in questions - question = ' '.join(t for t in question.split(' ') if t != '_') - answers = [a.strip() for a in answer.split(self.special_tokens.separator_output_answers)] - return {"question": question, "answers": answers} - - -if __name__ == "__main__": - pipe = QASRL_Pipeline("kleinay/qanom-seq2seq-model-baseline") - res1 = pipe("The student was interested in Luke 's research about sea animals .", verb_form="research", predicate_type="nominal") - res2 = pipe(["The doctor was interested in Luke 's treatment .", - "The Veterinary student was interested in Luke 's treatment of sea animals ."], verb_form="treat", predicate_type="nominal", num_beams=10) - res3 = pipe("A number of professions have developed that specialize in the treatment of mental disorders .", verb_form="develop", predicate_type="verbal") - print(res1) - print(res2) - print(res3) - \ No newline at end of file diff --git a/spaces/Detomo/naomi-app-api/README.md b/spaces/Detomo/naomi-app-api/README.md deleted file mode 100644 index 86983648b566326acfcc02366adafe3ca972db1f..0000000000000000000000000000000000000000 --- a/spaces/Detomo/naomi-app-api/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Naomi App Api -emoji: 🐨 -colorFrom: green -colorTo: red -sdk: docker -pinned: false -license: creativeml-openrail-m ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/training/training_loop.py b/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/training/training_loop.py deleted file mode 100644 index d9ccb45b1a0321f1d938efa6a62229ffe396dcfe..0000000000000000000000000000000000000000 --- a/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/training/training_loop.py +++ /dev/null @@ -1,278 +0,0 @@ -# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. -# -# This work is licensed under the Creative Commons Attribution-NonCommercial -# 4.0 International License. To view a copy of this license, visit -# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to -# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. - -"""Main training script.""" - -import os -import numpy as np -import tensorflow as tf -import dnnlib -import dnnlib.tflib as tflib -from dnnlib.tflib.autosummary import autosummary - -import config -import train -from training import dataset -from training import misc -from metrics import metric_base - -#---------------------------------------------------------------------------- -# Just-in-time processing of training images before feeding them to the networks. - -def process_reals(x, lod, mirror_augment, drange_data, drange_net): - with tf.name_scope('ProcessReals'): - with tf.name_scope('DynamicRange'): - x = tf.cast(x, tf.float32) - x = misc.adjust_dynamic_range(x, drange_data, drange_net) - if mirror_augment: - with tf.name_scope('MirrorAugment'): - s = tf.shape(x) - mask = tf.random_uniform([s[0], 1, 1, 1], 0.0, 1.0) - mask = tf.tile(mask, [1, s[1], s[2], s[3]]) - x = tf.where(mask < 0.5, x, tf.reverse(x, axis=[3])) - with tf.name_scope('FadeLOD'): # Smooth crossfade between consecutive levels-of-detail. - s = tf.shape(x) - y = tf.reshape(x, [-1, s[1], s[2]//2, 2, s[3]//2, 2]) - y = tf.reduce_mean(y, axis=[3, 5], keepdims=True) - y = tf.tile(y, [1, 1, 1, 2, 1, 2]) - y = tf.reshape(y, [-1, s[1], s[2], s[3]]) - x = tflib.lerp(x, y, lod - tf.floor(lod)) - with tf.name_scope('UpscaleLOD'): # Upscale to match the expected input/output size of the networks. - s = tf.shape(x) - factor = tf.cast(2 ** tf.floor(lod), tf.int32) - x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1]) - x = tf.tile(x, [1, 1, 1, factor, 1, factor]) - x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor]) - return x - -#---------------------------------------------------------------------------- -# Evaluate time-varying training parameters. - -def training_schedule( - cur_nimg, - training_set, - num_gpus, - lod_initial_resolution = 4, # Image resolution used at the beginning. - lod_training_kimg = 600, # Thousands of real images to show before doubling the resolution. - lod_transition_kimg = 600, # Thousands of real images to show when fading in new layers. - minibatch_base = 16, # Maximum minibatch size, divided evenly among GPUs. - minibatch_dict = {}, # Resolution-specific overrides. - max_minibatch_per_gpu = {}, # Resolution-specific maximum minibatch size per GPU. - G_lrate_base = 0.001, # Learning rate for the generator. - G_lrate_dict = {}, # Resolution-specific overrides. - D_lrate_base = 0.001, # Learning rate for the discriminator. - D_lrate_dict = {}, # Resolution-specific overrides. - lrate_rampup_kimg = 0, # Duration of learning rate ramp-up. - tick_kimg_base = 160, # Default interval of progress snapshots. - tick_kimg_dict = {4: 160, 8:140, 16:120, 32:100, 64:80, 128:60, 256:40, 512:30, 1024:20}): # Resolution-specific overrides. - - # Initialize result dict. - s = dnnlib.EasyDict() - s.kimg = cur_nimg / 1000.0 - - # Training phase. - phase_dur = lod_training_kimg + lod_transition_kimg - phase_idx = int(np.floor(s.kimg / phase_dur)) if phase_dur > 0 else 0 - phase_kimg = s.kimg - phase_idx * phase_dur - - # Level-of-detail and resolution. - s.lod = training_set.resolution_log2 - s.lod -= np.floor(np.log2(lod_initial_resolution)) - s.lod -= phase_idx - if lod_transition_kimg > 0: - s.lod -= max(phase_kimg - lod_training_kimg, 0.0) / lod_transition_kimg - s.lod = max(s.lod, 0.0) - s.resolution = 2 ** (training_set.resolution_log2 - int(np.floor(s.lod))) - - # Minibatch size. - s.minibatch = minibatch_dict.get(s.resolution, minibatch_base) - s.minibatch -= s.minibatch % num_gpus - if s.resolution in max_minibatch_per_gpu: - s.minibatch = min(s.minibatch, max_minibatch_per_gpu[s.resolution] * num_gpus) - - # Learning rate. - s.G_lrate = G_lrate_dict.get(s.resolution, G_lrate_base) - s.D_lrate = D_lrate_dict.get(s.resolution, D_lrate_base) - if lrate_rampup_kimg > 0: - rampup = min(s.kimg / lrate_rampup_kimg, 1.0) - s.G_lrate *= rampup - s.D_lrate *= rampup - - # Other parameters. - s.tick_kimg = tick_kimg_dict.get(s.resolution, tick_kimg_base) - return s - -#---------------------------------------------------------------------------- -# Main training script. - -def training_loop( - submit_config, - G_args = {}, # Options for generator network. - D_args = {}, # Options for discriminator network. - G_opt_args = {}, # Options for generator optimizer. - D_opt_args = {}, # Options for discriminator optimizer. - G_loss_args = {}, # Options for generator loss. - D_loss_args = {}, # Options for discriminator loss. - dataset_args = {}, # Options for dataset.load_dataset(). - sched_args = {}, # Options for train.TrainingSchedule. - grid_args = {}, # Options for train.setup_snapshot_image_grid(). - metric_arg_list = [], # Options for MetricGroup. - tf_config = {}, # Options for tflib.init_tf(). - G_smoothing_kimg = 10.0, # Half-life of the running average of generator weights. - D_repeats = 1, # How many times the discriminator is trained per G iteration. - minibatch_repeats = 4, # Number of minibatches to run before adjusting training parameters. - reset_opt_for_new_lod = True, # Reset optimizer internal state (e.g. Adam moments) when new layers are introduced? - total_kimg = 15000, # Total length of the training, measured in thousands of real images. - mirror_augment = False, # Enable mirror augment? - drange_net = [-1,1], # Dynamic range used when feeding image data to the networks. - image_snapshot_ticks = 1, # How often to export image snapshots? - network_snapshot_ticks = 10, # How often to export network snapshots? - save_tf_graph = False, # Include full TensorFlow computation graph in the tfevents file? - save_weight_histograms = False, # Include weight histograms in the tfevents file? - resume_run_id = None, # Run ID or network pkl to resume training from, None = start from scratch. - resume_snapshot = None, # Snapshot index to resume training from, None = autodetect. - resume_kimg = 0.0, # Assumed training progress at the beginning. Affects reporting and training schedule. - resume_time = 0.0): # Assumed wallclock time at the beginning. Affects reporting. - - # Initialize dnnlib and TensorFlow. - ctx = dnnlib.RunContext(submit_config, train) - tflib.init_tf(tf_config) - - # Load training set. - training_set = dataset.load_dataset(data_dir=config.data_dir, verbose=True, **dataset_args) - - # Construct networks. - with tf.device('/gpu:0'): - if resume_run_id is not None: - network_pkl = misc.locate_network_pkl(resume_run_id, resume_snapshot) - print('Loading networks from "%s"...' % network_pkl) - G, D, Gs = misc.load_pkl(network_pkl) - else: - print('Constructing networks...') - G = tflib.Network('G', num_channels=training_set.shape[0], resolution=training_set.shape[1], label_size=training_set.label_size, **G_args) - D = tflib.Network('D', num_channels=training_set.shape[0], resolution=training_set.shape[1], label_size=training_set.label_size, **D_args) - Gs = G.clone('Gs') - G.print_layers(); D.print_layers() - - print('Building TensorFlow graph...') - with tf.name_scope('Inputs'), tf.device('/cpu:0'): - lod_in = tf.placeholder(tf.float32, name='lod_in', shape=[]) - lrate_in = tf.placeholder(tf.float32, name='lrate_in', shape=[]) - minibatch_in = tf.placeholder(tf.int32, name='minibatch_in', shape=[]) - minibatch_split = minibatch_in // submit_config.num_gpus - Gs_beta = 0.5 ** tf.div(tf.cast(minibatch_in, tf.float32), G_smoothing_kimg * 1000.0) if G_smoothing_kimg > 0.0 else 0.0 - - G_opt = tflib.Optimizer(name='TrainG', learning_rate=lrate_in, **G_opt_args) - D_opt = tflib.Optimizer(name='TrainD', learning_rate=lrate_in, **D_opt_args) - for gpu in range(submit_config.num_gpus): - with tf.name_scope('GPU%d' % gpu), tf.device('/gpu:%d' % gpu): - G_gpu = G if gpu == 0 else G.clone(G.name + '_shadow') - D_gpu = D if gpu == 0 else D.clone(D.name + '_shadow') - lod_assign_ops = [tf.assign(G_gpu.find_var('lod'), lod_in), tf.assign(D_gpu.find_var('lod'), lod_in)] - reals, labels = training_set.get_minibatch_tf() - reals = process_reals(reals, lod_in, mirror_augment, training_set.dynamic_range, drange_net) - with tf.name_scope('G_loss'), tf.control_dependencies(lod_assign_ops): - G_loss = dnnlib.util.call_func_by_name(G=G_gpu, D=D_gpu, opt=G_opt, training_set=training_set, minibatch_size=minibatch_split, **G_loss_args) - with tf.name_scope('D_loss'), tf.control_dependencies(lod_assign_ops): - D_loss = dnnlib.util.call_func_by_name(G=G_gpu, D=D_gpu, opt=D_opt, training_set=training_set, minibatch_size=minibatch_split, reals=reals, labels=labels, **D_loss_args) - G_opt.register_gradients(tf.reduce_mean(G_loss), G_gpu.trainables) - D_opt.register_gradients(tf.reduce_mean(D_loss), D_gpu.trainables) - G_train_op = G_opt.apply_updates() - D_train_op = D_opt.apply_updates() - - Gs_update_op = Gs.setup_as_moving_average_of(G, beta=Gs_beta) - with tf.device('/gpu:0'): - try: - peak_gpu_mem_op = tf.contrib.memory_stats.MaxBytesInUse() - except tf.errors.NotFoundError: - peak_gpu_mem_op = tf.constant(0) - - print('Setting up snapshot image grid...') - grid_size, grid_reals, grid_labels, grid_latents = misc.setup_snapshot_image_grid(G, training_set, **grid_args) - sched = training_schedule(cur_nimg=total_kimg*1000, training_set=training_set, num_gpus=submit_config.num_gpus, **sched_args) - grid_fakes = Gs.run(grid_latents, grid_labels, is_validation=True, minibatch_size=sched.minibatch//submit_config.num_gpus) - - print('Setting up run dir...') - misc.save_image_grid(grid_reals, os.path.join(submit_config.run_dir, 'reals.png'), drange=training_set.dynamic_range, grid_size=grid_size) - misc.save_image_grid(grid_fakes, os.path.join(submit_config.run_dir, 'fakes%06d.png' % resume_kimg), drange=drange_net, grid_size=grid_size) - summary_log = tf.summary.FileWriter(submit_config.run_dir) - if save_tf_graph: - summary_log.add_graph(tf.get_default_graph()) - if save_weight_histograms: - G.setup_weight_histograms(); D.setup_weight_histograms() - metrics = metric_base.MetricGroup(metric_arg_list) - - print('Training...\n') - ctx.update('', cur_epoch=resume_kimg, max_epoch=total_kimg) - maintenance_time = ctx.get_last_update_interval() - cur_nimg = int(resume_kimg * 1000) - cur_tick = 0 - tick_start_nimg = cur_nimg - prev_lod = -1.0 - while cur_nimg < total_kimg * 1000: - if ctx.should_stop(): break - - # Choose training parameters and configure training ops. - sched = training_schedule(cur_nimg=cur_nimg, training_set=training_set, num_gpus=submit_config.num_gpus, **sched_args) - training_set.configure(sched.minibatch // submit_config.num_gpus, sched.lod) - if reset_opt_for_new_lod: - if np.floor(sched.lod) != np.floor(prev_lod) or np.ceil(sched.lod) != np.ceil(prev_lod): - G_opt.reset_optimizer_state(); D_opt.reset_optimizer_state() - prev_lod = sched.lod - - # Run training ops. - for _mb_repeat in range(minibatch_repeats): - for _D_repeat in range(D_repeats): - tflib.run([D_train_op, Gs_update_op], {lod_in: sched.lod, lrate_in: sched.D_lrate, minibatch_in: sched.minibatch}) - cur_nimg += sched.minibatch - tflib.run([G_train_op], {lod_in: sched.lod, lrate_in: sched.G_lrate, minibatch_in: sched.minibatch}) - - # Perform maintenance tasks once per tick. - done = (cur_nimg >= total_kimg * 1000) - if cur_nimg >= tick_start_nimg + sched.tick_kimg * 1000 or done: - cur_tick += 1 - tick_kimg = (cur_nimg - tick_start_nimg) / 1000.0 - tick_start_nimg = cur_nimg - tick_time = ctx.get_time_since_last_update() - total_time = ctx.get_time_since_start() + resume_time - - # Report progress. - print('tick %-5d kimg %-8.1f lod %-5.2f minibatch %-4d time %-12s sec/tick %-7.1f sec/kimg %-7.2f maintenance %-6.1f gpumem %-4.1f' % ( - autosummary('Progress/tick', cur_tick), - autosummary('Progress/kimg', cur_nimg / 1000.0), - autosummary('Progress/lod', sched.lod), - autosummary('Progress/minibatch', sched.minibatch), - dnnlib.util.format_time(autosummary('Timing/total_sec', total_time)), - autosummary('Timing/sec_per_tick', tick_time), - autosummary('Timing/sec_per_kimg', tick_time / tick_kimg), - autosummary('Timing/maintenance_sec', maintenance_time), - autosummary('Resources/peak_gpu_mem_gb', peak_gpu_mem_op.eval() / 2**30))) - autosummary('Timing/total_hours', total_time / (60.0 * 60.0)) - autosummary('Timing/total_days', total_time / (24.0 * 60.0 * 60.0)) - - # Save snapshots. - if cur_tick % image_snapshot_ticks == 0 or done: - grid_fakes = Gs.run(grid_latents, grid_labels, is_validation=True, minibatch_size=sched.minibatch//submit_config.num_gpus) - misc.save_image_grid(grid_fakes, os.path.join(submit_config.run_dir, 'fakes%06d.png' % (cur_nimg // 1000)), drange=drange_net, grid_size=grid_size) - if cur_tick % network_snapshot_ticks == 0 or done or cur_tick == 1: - pkl = os.path.join(submit_config.run_dir, 'network-snapshot-%06d.pkl' % (cur_nimg // 1000)) - misc.save_pkl((G, D, Gs), pkl) - metrics.run(pkl, run_dir=submit_config.run_dir, num_gpus=submit_config.num_gpus, tf_config=tf_config) - - # Update summaries and RunContext. - metrics.update_autosummaries() - tflib.autosummary.save_summaries(summary_log, cur_nimg) - ctx.update('%.2f' % sched.lod, cur_epoch=cur_nimg // 1000, max_epoch=total_kimg) - maintenance_time = ctx.get_last_update_interval() - tick_time - - # Write final results. - misc.save_pkl((G, D, Gs), os.path.join(submit_config.run_dir, 'network-final.pkl')) - summary_log.close() - - ctx.close() - -#---------------------------------------------------------------------------- diff --git a/spaces/DragGan/DragGan-Inversion/stylegan_human/style_mixing.py b/spaces/DragGan/DragGan-Inversion/stylegan_human/style_mixing.py deleted file mode 100644 index 022912df133bd977364786f90d6ae635292dc135..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan-Inversion/stylegan_human/style_mixing.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright (c) SenseTime Research. All rights reserved. - -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. -# - - -import os -import re -from typing import List -import legacy - -import click -import dnnlib -import numpy as np -import PIL.Image -import torch - -""" -Style mixing using pretrained network pickle. - -Examples: - -\b -python style_mixing.py --network=pretrained_models/stylegan_human_v2_1024.pkl --rows=85,100,75,458,1500 \\ - --cols=55,821,1789,293 --styles=0-3 --outdir=outputs/stylemixing -""" - - -@click.command() -@click.option('--network', 'network_pkl', help='Network pickle filename', required=True) -@click.option('--rows', 'row_seeds', type=legacy.num_range, help='Random seeds to use for image rows', required=True) -@click.option('--cols', 'col_seeds', type=legacy.num_range, help='Random seeds to use for image columns', required=True) -@click.option('--styles', 'col_styles', type=legacy.num_range, help='Style layer range', default='0-6', show_default=True) -@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=0.8, show_default=True) -@click.option('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True) -@click.option('--outdir', type=str, required=True, default='outputs/stylemixing') -def generate_style_mix( - network_pkl: str, - row_seeds: List[int], - col_seeds: List[int], - col_styles: List[int], - truncation_psi: float, - noise_mode: str, - outdir: str -): - - print('Loading networks from "%s"...' % network_pkl) - device = torch.device('cuda') - with dnnlib.util.open_url(network_pkl) as f: - G = legacy.load_network_pkl(f)['G_ema'].to(device) - - os.makedirs(outdir, exist_ok=True) - - print('Generating W vectors...') - all_seeds = list(set(row_seeds + col_seeds)) - all_z = np.stack([np.random.RandomState(seed).randn(G.z_dim) - for seed in all_seeds]) - all_w = G.mapping(torch.from_numpy(all_z).to(device), None) - w_avg = G.mapping.w_avg - all_w = w_avg + (all_w - w_avg) * truncation_psi - w_dict = {seed: w for seed, w in zip(all_seeds, list(all_w))} - - print('Generating images...') - all_images = G.synthesis(all_w, noise_mode=noise_mode) - all_images = (all_images.permute(0, 2, 3, 1) * 127.5 + - 128).clamp(0, 255).to(torch.uint8).cpu().numpy() - image_dict = {(seed, seed): image for seed, - image in zip(all_seeds, list(all_images))} - - print('Generating style-mixed images...') - for row_seed in row_seeds: - for col_seed in col_seeds: - w = w_dict[row_seed].clone() - w[col_styles] = w_dict[col_seed][col_styles] - image = G.synthesis(w[np.newaxis], noise_mode=noise_mode) - image = (image.permute(0, 2, 3, 1) * 127.5 + - 128).clamp(0, 255).to(torch.uint8) - image_dict[(row_seed, col_seed)] = image[0].cpu().numpy() - - os.makedirs(outdir, exist_ok=True) - # print('Saving images...') - # for (row_seed, col_seed), image in image_dict.items(): - # PIL.Image.fromarray(image, 'RGB').save(f'{outdir}/{row_seed}-{col_seed}.png') - - print('Saving image grid...') - W = G.img_resolution // 2 - H = G.img_resolution - canvas = PIL.Image.new( - 'RGB', (W * (len(col_seeds) + 1), H * (len(row_seeds) + 1)), 'black') - for row_idx, row_seed in enumerate([0] + row_seeds): - for col_idx, col_seed in enumerate([0] + col_seeds): - if row_idx == 0 and col_idx == 0: - continue - key = (row_seed, col_seed) - if row_idx == 0: - key = (col_seed, col_seed) - if col_idx == 0: - key = (row_seed, row_seed) - canvas.paste(PIL.Image.fromarray( - image_dict[key], 'RGB'), (W * col_idx, H * row_idx)) - canvas.save(f'{outdir}/grid.png') - - -# ---------------------------------------------------------------------------- - -if __name__ == "__main__": - generate_style_mix() # pylint: disable=no-value-for-parameter - -# ---------------------------------------------------------------------------- diff --git a/spaces/EDGAhab/VITS-Aatrox-AI/app.py b/spaces/EDGAhab/VITS-Aatrox-AI/app.py deleted file mode 100644 index 7108d8d0a42b83759c9d08325ed703777f2fee38..0000000000000000000000000000000000000000 --- a/spaces/EDGAhab/VITS-Aatrox-AI/app.py +++ /dev/null @@ -1,54 +0,0 @@ -import gradio as gr -import os -os.system('cd monotonic_align && python setup.py build_ext --inplace && cd ..') -import torch - -import commons -import utils -from models import SynthesizerTrn -from text.symbols import symbols -from text import text_to_sequence - -import IPython.display as ipd - -import json -import math - -def get_text(text, hps): - text_norm = text_to_sequence(text, hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = torch.LongTensor(text_norm) - return text_norm - -hps = utils.get_hparams_from_file("configs/biaobei_base.json") - -net_g = SynthesizerTrn( - len(symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - **hps.model) -_ = net_g.eval() - -_ = utils.load_checkpoint("G_aatrox.pth", net_g, None) - -def vc_fn(input): - stn_tst = get_text(input, hps) - with torch.no_grad(): - x_tst = stn_tst.unsqueeze(0) - x_tst_lengths = torch.LongTensor([stn_tst.size(0)]) - audio = net_g.infer(x_tst, x_tst_lengths, noise_scale=.667, noise_scale_w=0.8, length_scale=1)[0][0,0].data.cpu().float().numpy() - sampling_rate = 22050 - return (sampling_rate, audio) - -app = gr.Blocks() -with app: - with gr.Tabs(): - with gr.TabItem("Basic"): - vc_input = gr.Textbox(label="Input Message") - vc_submit = gr.Button("Convert", variant="primary") - vc_output = gr.Audio(label="Output Audio") - #vc_output = ipd.display(ipd.Audio(vc_fn(get_text(vc_input, hps)), rate=hps.data.sampling_rate)) - vc_submit.click(vc_fn, [vc_input], [vc_output]) - - app.launch() \ No newline at end of file diff --git a/spaces/EronSamez/RVC_HFmeu/julius/resample.py b/spaces/EronSamez/RVC_HFmeu/julius/resample.py deleted file mode 100644 index fd3b9b547d4c33ec7136d32e5f086420d0a72e14..0000000000000000000000000000000000000000 --- a/spaces/EronSamez/RVC_HFmeu/julius/resample.py +++ /dev/null @@ -1,216 +0,0 @@ -# File under the MIT license, see https://github.com/adefossez/julius/LICENSE for details. -# Author: adefossez, 2020 -""" -Differentiable, Pytorch based resampling. -Implementation of Julius O. Smith algorithm for resampling. -See https://ccrma.stanford.edu/~jos/resample/ for details. -This implementation is specially optimized for when new_sr / old_sr is a fraction -with a small numerator and denominator when removing the gcd (e.g. new_sr = 700, old_sr = 500). - -Very similar to [bmcfee/resampy](https://github.com/bmcfee/resampy) except this implementation -is optimized for the case mentioned before, while resampy is slower but more general. - -""" - -import math -from typing import Optional - -import torch -from torch.nn import functional as F - -from .core import sinc -from .utils import simple_repr - - -class ResampleFrac(torch.nn.Module): - """ - Resampling from the sample rate `old_sr` to `new_sr`. - """ - def __init__(self, old_sr: int, new_sr: int, zeros: int = 24, rolloff: float = 0.945): - """ - Args: - old_sr (int): sample rate of the input signal x. - new_sr (int): sample rate of the output. - zeros (int): number of zero crossing to keep in the sinc filter. - rolloff (float): use a lowpass filter that is `rolloff * new_sr / 2`, - to ensure sufficient margin due to the imperfection of the FIR filter used. - Lowering this value will reduce anti-aliasing, but will reduce some of the - highest frequencies. - - Shape: - - - Input: `[*, T]` - - Output: `[*, T']` with `T' = int(new_sr * T / old_sr) - - - .. caution:: - After dividing `old_sr` and `new_sr` by their GCD, both should be small - for this implementation to be fast. - - >>> import torch - >>> resample = ResampleFrac(4, 5) - >>> x = torch.randn(1000) - >>> print(len(resample(x))) - 1250 - """ - super().__init__() - if not isinstance(old_sr, int) or not isinstance(new_sr, int): - raise ValueError("old_sr and new_sr should be integers") - gcd = math.gcd(old_sr, new_sr) - self.old_sr = old_sr // gcd - self.new_sr = new_sr // gcd - self.zeros = zeros - self.rolloff = rolloff - - self._init_kernels() - - def _init_kernels(self): - if self.old_sr == self.new_sr: - return - - kernels = [] - sr = min(self.new_sr, self.old_sr) - # rolloff will perform antialiasing filtering by removing the highest frequencies. - # At first I thought I only needed this when downsampling, but when upsampling - # you will get edge artifacts without this, the edge is equivalent to zero padding, - # which will add high freq artifacts. - sr *= self.rolloff - - # The key idea of the algorithm is that x(t) can be exactly reconstructed from x[i] (tensor) - # using the sinc interpolation formula: - # x(t) = sum_i x[i] sinc(pi * old_sr * (i / old_sr - t)) - # We can then sample the function x(t) with a different sample rate: - # y[j] = x(j / new_sr) - # or, - # y[j] = sum_i x[i] sinc(pi * old_sr * (i / old_sr - j / new_sr)) - - # We see here that y[j] is the convolution of x[i] with a specific filter, for which - # we take an FIR approximation, stopping when we see at least `zeros` zeros crossing. - # But y[j+1] is going to have a different set of weights and so on, until y[j + new_sr]. - # Indeed: - # y[j + new_sr] = sum_i x[i] sinc(pi * old_sr * ((i / old_sr - (j + new_sr) / new_sr)) - # = sum_i x[i] sinc(pi * old_sr * ((i - old_sr) / old_sr - j / new_sr)) - # = sum_i x[i + old_sr] sinc(pi * old_sr * (i / old_sr - j / new_sr)) - # so y[j+new_sr] uses the same filter as y[j], but on a shifted version of x by `old_sr`. - # This will explain the F.conv1d after, with a stride of old_sr. - self._width = math.ceil(self.zeros * self.old_sr / sr) - # If old_sr is still big after GCD reduction, most filters will be very unbalanced, i.e., - # they will have a lot of almost zero values to the left or to the right... - # There is probably a way to evaluate those filters more efficiently, but this is kept for - # future work. - idx = torch.arange(-self._width, self._width + self.old_sr).float() - for i in range(self.new_sr): - t = (-i/self.new_sr + idx/self.old_sr) * sr - t = t.clamp_(-self.zeros, self.zeros) - t *= math.pi - window = torch.cos(t/self.zeros/2)**2 - kernel = sinc(t) * window - # Renormalize kernel to ensure a constant signal is preserved. - kernel.div_(kernel.sum()) - kernels.append(kernel) - - self.register_buffer("kernel", torch.stack(kernels).view(self.new_sr, 1, -1)) - - def forward(self, x: torch.Tensor, output_length: Optional[int] = None, full: bool = False): - """ - Resample x. - Args: - x (Tensor): signal to resample, time should be the last dimension - output_length (None or int): This can be set to the desired output length - (last dimension). Allowed values are between 0 and - ceil(length * new_sr / old_sr). When None (default) is specified, the - floored output length will be used. In order to select the largest possible - size, use the `full` argument. - full (bool): return the longest possible output from the input. This can be useful - if you chain resampling operations, and want to give the `output_length` only - for the last one, while passing `full=True` to all the other ones. - """ - if self.old_sr == self.new_sr: - return x - shape = x.shape - length = x.shape[-1] - x = x.reshape(-1, length) - x = F.pad(x[:, None], (self._width, self._width + self.old_sr), mode='replicate') - ys = F.conv1d(x, self.kernel, stride=self.old_sr) # type: ignore - y = ys.transpose(1, 2).reshape(list(shape[:-1]) + [-1]) - - float_output_length = self.new_sr * length / self.old_sr - max_output_length = int(math.ceil(float_output_length)) - default_output_length = int(float_output_length) - if output_length is None: - output_length = max_output_length if full else default_output_length - elif output_length < 0 or output_length > max_output_length: - raise ValueError(f"output_length must be between 0 and {max_output_length}") - else: - if full: - raise ValueError("You cannot pass both full=True and output_length") - return y[..., :output_length] - - def __repr__(self): - return simple_repr(self) - - -def resample_frac(x: torch.Tensor, old_sr: int, new_sr: int, - zeros: int = 24, rolloff: float = 0.945, - output_length: Optional[int] = None, full: bool = False): - """ - Functional version of `ResampleFrac`, refer to its documentation for more information. - - ..warning:: - If you call repeatidly this functions with the same sample rates, then the - resampling kernel will be recomputed everytime. For best performance, you should use - and cache an instance of `ResampleFrac`. - """ - return ResampleFrac(old_sr, new_sr, zeros, rolloff).to(x)(x, output_length, full) - - -# Easier implementations for downsampling and upsampling by a factor of 2 -# Kept for testing and reference - -def _kernel_upsample2_downsample2(zeros): - # Kernel for upsampling and downsampling by a factor of 2. Interestingly, - # it is the same kernel used for both. - win = torch.hann_window(4 * zeros + 1, periodic=False) - winodd = win[1::2] - t = torch.linspace(-zeros + 0.5, zeros - 0.5, 2 * zeros) - t *= math.pi - kernel = (sinc(t) * winodd).view(1, 1, -1) - return kernel - - -def _upsample2(x, zeros=24): - """ - Upsample x by a factor of two. The output will be exactly twice as long as the input. - Args: - x (Tensor): signal to upsample, time should be the last dimension - zeros (int): number of zero crossing to keep in the sinc filter. - - This function is kept only for reference, you should use the more generic `resample_frac` - one. This function does not perform anti-aliasing filtering. - """ - *other, time = x.shape - kernel = _kernel_upsample2_downsample2(zeros).to(x) - out = F.conv1d(x.view(-1, 1, time), kernel, padding=zeros)[..., 1:].view(*other, time) - y = torch.stack([x, out], dim=-1) - return y.view(*other, -1) - - -def _downsample2(x, zeros=24): - """ - Downsample x by a factor of two. The output length is half of the input, ceiled. - Args: - x (Tensor): signal to downsample, time should be the last dimension - zeros (int): number of zero crossing to keep in the sinc filter. - - This function is kept only for reference, you should use the more generic `resample_frac` - one. This function does not perform anti-aliasing filtering. - """ - if x.shape[-1] % 2 != 0: - x = F.pad(x, (0, 1)) - xeven = x[..., ::2] - xodd = x[..., 1::2] - *other, time = xodd.shape - kernel = _kernel_upsample2_downsample2(zeros).to(x) - out = xeven + F.conv1d(xodd.view(-1, 1, time), kernel, padding=zeros)[..., :-1].view( - *other, time) - return out.view(*other, -1).mul(0.5) diff --git a/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/data/transforms.py b/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/data/transforms.py deleted file mode 100644 index aead9dc73ed063e1c5865040eaa2652b26aa3ad3..0000000000000000000000000000000000000000 --- a/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/data/transforms.py +++ /dev/null @@ -1,165 +0,0 @@ -import cv2 -import random - - -def mod_crop(img, scale): - """Mod crop images, used during testing. - - Args: - img (ndarray): Input image. - scale (int): Scale factor. - - Returns: - ndarray: Result image. - """ - img = img.copy() - if img.ndim in (2, 3): - h, w = img.shape[0], img.shape[1] - h_remainder, w_remainder = h % scale, w % scale - img = img[:h - h_remainder, :w - w_remainder, ...] - else: - raise ValueError(f'Wrong img ndim: {img.ndim}.') - return img - - -def paired_random_crop(img_gts, img_lqs, gt_patch_size, scale, gt_path): - """Paired random crop. - - It crops lists of lq and gt images with corresponding locations. - - Args: - img_gts (list[ndarray] | ndarray): GT images. Note that all images - should have the same shape. If the input is an ndarray, it will - be transformed to a list containing itself. - img_lqs (list[ndarray] | ndarray): LQ images. Note that all images - should have the same shape. If the input is an ndarray, it will - be transformed to a list containing itself. - gt_patch_size (int): GT patch size. - scale (int): Scale factor. - gt_path (str): Path to ground-truth. - - Returns: - list[ndarray] | ndarray: GT images and LQ images. If returned results - only have one element, just return ndarray. - """ - - if not isinstance(img_gts, list): - img_gts = [img_gts] - if not isinstance(img_lqs, list): - img_lqs = [img_lqs] - - h_lq, w_lq, _ = img_lqs[0].shape - h_gt, w_gt, _ = img_gts[0].shape - lq_patch_size = gt_patch_size // scale - - if h_gt != h_lq * scale or w_gt != w_lq * scale: - raise ValueError(f'Scale mismatches. GT ({h_gt}, {w_gt}) is not {scale}x ', - f'multiplication of LQ ({h_lq}, {w_lq}).') - if h_lq < lq_patch_size or w_lq < lq_patch_size: - raise ValueError(f'LQ ({h_lq}, {w_lq}) is smaller than patch size ' - f'({lq_patch_size}, {lq_patch_size}). ' - f'Please remove {gt_path}.') - - # randomly choose top and left coordinates for lq patch - top = random.randint(0, h_lq - lq_patch_size) - left = random.randint(0, w_lq - lq_patch_size) - - # crop lq patch - img_lqs = [v[top:top + lq_patch_size, left:left + lq_patch_size, ...] for v in img_lqs] - - # crop corresponding gt patch - top_gt, left_gt = int(top * scale), int(left * scale) - img_gts = [v[top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size, ...] for v in img_gts] - if len(img_gts) == 1: - img_gts = img_gts[0] - if len(img_lqs) == 1: - img_lqs = img_lqs[0] - return img_gts, img_lqs - - -def augment(imgs, hflip=True, rotation=True, flows=None, return_status=False): - """Augment: horizontal flips OR rotate (0, 90, 180, 270 degrees). - - We use vertical flip and transpose for rotation implementation. - All the images in the list use the same augmentation. - - Args: - imgs (list[ndarray] | ndarray): Images to be augmented. If the input - is an ndarray, it will be transformed to a list. - hflip (bool): Horizontal flip. Default: True. - rotation (bool): Ratotation. Default: True. - flows (list[ndarray]: Flows to be augmented. If the input is an - ndarray, it will be transformed to a list. - Dimension is (h, w, 2). Default: None. - return_status (bool): Return the status of flip and rotation. - Default: False. - - Returns: - list[ndarray] | ndarray: Augmented images and flows. If returned - results only have one element, just return ndarray. - - """ - hflip = hflip and random.random() < 0.5 - vflip = rotation and random.random() < 0.5 - rot90 = rotation and random.random() < 0.5 - - def _augment(img): - if hflip: # horizontal - cv2.flip(img, 1, img) - if vflip: # vertical - cv2.flip(img, 0, img) - if rot90: - img = img.transpose(1, 0, 2) - return img - - def _augment_flow(flow): - if hflip: # horizontal - cv2.flip(flow, 1, flow) - flow[:, :, 0] *= -1 - if vflip: # vertical - cv2.flip(flow, 0, flow) - flow[:, :, 1] *= -1 - if rot90: - flow = flow.transpose(1, 0, 2) - flow = flow[:, :, [1, 0]] - return flow - - if not isinstance(imgs, list): - imgs = [imgs] - imgs = [_augment(img) for img in imgs] - if len(imgs) == 1: - imgs = imgs[0] - - if flows is not None: - if not isinstance(flows, list): - flows = [flows] - flows = [_augment_flow(flow) for flow in flows] - if len(flows) == 1: - flows = flows[0] - return imgs, flows - else: - if return_status: - return imgs, (hflip, vflip, rot90) - else: - return imgs - - -def img_rotate(img, angle, center=None, scale=1.0): - """Rotate image. - - Args: - img (ndarray): Image to be rotated. - angle (float): Rotation angle in degrees. Positive values mean - counter-clockwise rotation. - center (tuple[int]): Rotation center. If the center is None, - initialize it as the center of the image. Default: None. - scale (float): Isotropic scale factor. Default: 1.0. - """ - (h, w) = img.shape[:2] - - if center is None: - center = (w // 2, h // 2) - - matrix = cv2.getRotationMatrix2D(center, angle, scale) - rotated_img = cv2.warpAffine(img, matrix, (w, h)) - return rotated_img diff --git a/spaces/Flux9665/PoeticTTS/reference_audios/__init__.py b/spaces/Flux9665/PoeticTTS/reference_audios/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/FridaZuley/RVC_HFKawaii/run.sh b/spaces/FridaZuley/RVC_HFKawaii/run.sh deleted file mode 100644 index 704c9fff20b42b8659f7b4c797cd2928af9dec7a..0000000000000000000000000000000000000000 --- a/spaces/FridaZuley/RVC_HFKawaii/run.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/bash - -if [[ "$(uname)" == "Darwin" ]]; then - # macOS specific env: - export PYTORCH_ENABLE_MPS_FALLBACK=1 - export PYTORCH_MPS_HIGH_WATERMARK_RATIO=0.0 -elif [[ "$(uname)" != "Linux" ]]; then - echo "Unsupported operating system." - exit 1 -fi - -if [ -d ".venv" ]; then - echo "Activate venv..." - source .venv/bin/activate -else - echo "Create venv..." - requirements_file="requirements.txt" - - # Check if Python 3.8 is installed - if ! command -v python3 &> /dev/null; then - echo "Python 3 not found. Attempting to install 3.8..." - if [[ "$(uname)" == "Darwin" ]] && command -v brew &> /dev/null; then - brew install python@3.8 - elif [[ "$(uname)" == "Linux" ]] && command -v apt-get &> /dev/null; then - sudo apt-get update - sudo apt-get install python3.8 - else - echo "Please install Python 3.8 manually." - exit 1 - fi - fi - - python3 -m venv .venv - source .venv/bin/activate - - # Check if required packages are installed and install them if not - if [ -f "${requirements_file}" ]; then - installed_packages=$(python3 -m pip freeze) - while IFS= read -r package; do - [[ "${package}" =~ ^#.* ]] && continue - package_name=$(echo "${package}" | sed 's/[<>=!].*//') - if ! echo "${installed_packages}" | grep -q "${package_name}"; then - echo "${package_name} not found. Attempting to install..." - python3 -m pip install --upgrade "${package}" - fi - done < "${requirements_file}" - else - echo "${requirements_file} not found. Please ensure the requirements file with required packages exists." - exit 1 - fi -fi - -# Download models -./tools/dlmodels.sh - -if [[ $? -ne 0 ]]; then - exit 1 -fi - -# Run the main script -python3 infer-web.py --pycmd python3 diff --git a/spaces/Frorozcol/mariposas/app.py b/spaces/Frorozcol/mariposas/app.py deleted file mode 100644 index 5dfb3f285469b5172288f0d19de0b8ba453855ec..0000000000000000000000000000000000000000 --- a/spaces/Frorozcol/mariposas/app.py +++ /dev/null @@ -1,42 +0,0 @@ -import streamlit as st -from utils import cargar_mdoel, general - -## Pagina principal - -st.title("Generaedor de mariposas") -st.write("Este es un modelo Linght GAN entrenado y utilizado con platzi!!!") - -##barra lateral -st.sidebar.subheader("!Esta mariposa no existe, ¿Puedes creerlo?") -st.sidebar.image("assets/logo.png", width=200) -st.sidebar.caption("Demo creado en vivo.") - -## Cargamos el modelo -repo_id = "ceyda/butterfly_cropped_uniq1K_512" -modelo_gan = cargar_mdoel(repo_id) - -#Generamos 4 mariposas -n_mariposas = 4 - -def corre(): - with st.spinner("Generando, espera un poco..."): - ims = general( modelo_gan, n_mariposas) - st.session_state["ims"] = ims - -if "ims" not in st.session_state: - st.session_state["ims"] = None - corre() - -ims = st.session_state["ims"] - -corre_boton = st.button( - "Genera mariposas porfa", - on_click=corre, - help="Estamos en vuelo, abrocha tu cinturón" -) - -if ims is not None: - cols = st.columns(n_mariposas) - for j, im in enumerate(ims): - i = j % n_mariposas - cols[i].image(ims, use_column_width=True) \ No newline at end of file diff --git a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/mix_piles.py b/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/mix_piles.py deleted file mode 100644 index 0e1c26f121dfbdc83ca1d74dce50562431ef1c83..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/mix_piles.py +++ /dev/null @@ -1,42 +0,0 @@ -import numpy as np -from cliport.tasks.task import Task -from cliport.utils import utils -from cliport.tasks import primitives -from cliport.tasks.grippers import Spatula - -class MixPiles(Task): - """Create two separate piles of ten blocks with different colors. Then, push them into a zone.""" - - def __init__(self): - super().__init__() - self.max_steps = 20 - self.num_blocks = 10 - self.lang_template = "create two separate piles of ten blocks with different colors. Then, push them into a zone." - self.task_completed_desc = "done mixing piles." - self.ee = Spatula - self.primitive = primitives.push - self.additional_reset() - - def reset(self, env): - super().reset(env) - - # Add goal zone. - zone_size = (0.12, 0.12, 0) - zone_pose = self.get_random_pose(env, zone_size) - env.add_object('zone/zone.urdf', zone_pose, 'fixed') - - # Get two random colors of piles - sample_colors, _ = utils.get_colors(self.mode, n_colors=2) - - # Add piles 1. - piles1 = self.make_piles(env, block_color=sample_colors[0]) - - # Add piles 2. - piles2 = self.make_piles(env, block_color=sample_colors[1]) - - # Goal: each block is in the goal zone, alternating between red and blue. - blocks = piles1 + piles2 - matches = np.ones((len(blocks), 1)) - self.add_goal(objs=blocks, matches=matches, targ_poses=[zone_pose], replace=True, - rotations=False, metric='zone', params=[(zone_pose, zone_size)], step_max_reward=1, - language_goal=self.lang_template) diff --git a/spaces/GeorgeOrville/bingo/src/components/learn-more.tsx b/spaces/GeorgeOrville/bingo/src/components/learn-more.tsx deleted file mode 100644 index a64459ee7900a612292e117a6bda96ee9260990f..0000000000000000000000000000000000000000 --- a/spaces/GeorgeOrville/bingo/src/components/learn-more.tsx +++ /dev/null @@ -1,39 +0,0 @@ -import React from 'react' -import { SourceAttribution } from '@/lib/bots/bing/types' - -export interface LearnMoreProps { - sourceAttributions?: SourceAttribution[] -} - -export function LearnMore({ sourceAttributions }: LearnMoreProps) { - if (!sourceAttributions?.length) { - return null - } - - return ( -
      -
      了解详细信息:
      -
      -
      - {sourceAttributions.map((attribution, index) => { - const { providerDisplayName, seeMoreUrl } = attribution - const { host } = new URL(seeMoreUrl) - return ( - - {index + 1}. {host} - - ) - })} -
      -
      -
      - ) -} diff --git a/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/README.md b/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/README.md deleted file mode 100644 index 3804ddb93ff8dc141f3180c7d6af8e105b949c24..0000000000000000000000000000000000000000 --- a/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/README.md +++ /dev/null @@ -1,135 +0,0 @@ ---- -title: Anything V3.0 -emoji: 🏃 -colorFrom: gray -colorTo: yellow -sdk: gradio -sdk_version: 3.10.1 -app_file: app.py -pinned: false -duplicated_from: yangheng/Super-Resolution-Anime-Diffusion ---- - -# If you have a GPU, try the [Stable Diffusion WebUI](https://github.com/yangheng95/stable-diffusion-webui) - - -# [Online Web Demo](https://huggingface.co/spaces/yangheng/Super-Resolution-Anime-Diffusion) - -This is demo forked from https://huggingface.co/Linaqruf/anything-v3.0. - -## Super Resolution Anime Diffusion -At this moment, many diffusion models can only generate <1024 width and length pictures. -I integrated the Super Resolution with [Anything diffusion model](https://huggingface.co/Linaqruf/anything-v3.0) to produce high resolution pictures. -Thanks to the open-source project: https://github.com/yu45020/Waifu2x - - -## Modifications -1. Disable the safety checker to save time and memory. You need to abide the original rules of the model. -2. Add the Super Resolution function to the model. -3. Add batch generation function to the model (see inference.py). - -## Install -1. Install [Anaconda](https://www.anaconda.com/products/distribution) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html) -2. create a conda environment: -```bash -conda create -n diffusion python=3.9 -conda activate diffusion -``` -3. install requirements: -```ash -conda install pytorch pytorch-cuda=11.7 -c pytorch -c nvidia -pip install -r requirements.txt -``` -4. Run web demo: -``` -python app.py -``` -5. or run batch anime-generation -``` -python inference.py -``` -see the source code for details, you can set scale factor to magnify pictures - -## Random Examples (512*768) x4 scale factor -![Anime Girl](./random_examples/1.png) -![Anime Girl](./random_examples/2.png) -# Origin README ---- -language: -- en -license: creativeml-openrail-m -tags: -- stable-diffusion -- stable-diffusion-diffusers -- text-to-image -- diffusers -inference: true ---- - -# Anything V3 - -Welcome to Anything V3 - a latent diffusion model for weebs. This model is intended to produce high-quality, highly detailed anime style with just a few prompts. Like other anime-style Stable Diffusion models, it also supports danbooru tags to generate images. - -e.g. **_1girl, white hair, golden eyes, beautiful eyes, detail, flower meadow, cumulonimbus clouds, lighting, detailed sky, garden_** - -## Gradio - -We support a [Gradio](https://github.com/gradio-app/gradio) Web UI to run Anything-V3.0: - -[Open in Spaces](https://huggingface.co/spaces/akhaliq/anything-v3.0) - - - -## 🧨 Diffusers - -This model can be used just like any other Stable Diffusion model. For more information, -please have a look at the [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion). - -You can also export the model to [ONNX](https://huggingface.co/docs/diffusers/optimization/onnx), [MPS](https://huggingface.co/docs/diffusers/optimization/mps) and/or [FLAX/JAX](). - -```python -from diffusers import StableDiffusionPipeline -import torch - -model_id = "Linaqruf/anything-v3.0" -pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) -pipe = pipe.to("cuda") - -prompt = "pikachu" -image = pipe(prompt).images[0] - -image.save("./pikachu.png") -``` - -## Examples - -Below are some examples of images generated using this model: - -**Anime Girl:** -![Anime Girl](https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/1girl.png) -``` -1girl, brown hair, green eyes, colorful, autumn, cumulonimbus clouds, lighting, blue sky, falling leaves, garden -Steps: 50, Sampler: DDIM, CFG scale: 12 -``` -**Anime Boy:** -![Anime Boy](https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/1boy.png) -``` -1boy, medium hair, blonde hair, blue eyes, bishounen, colorful, autumn, cumulonimbus clouds, lighting, blue sky, falling leaves, garden -Steps: 50, Sampler: DDIM, CFG scale: 12 -``` -**Scenery:** -![Scenery](https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/scenery.png) -``` -scenery, shibuya tokyo, post-apocalypse, ruins, rust, sky, skyscraper, abandoned, blue sky, broken window, building, cloud, crane machine, outdoors, overgrown, pillar, sunset -Steps: 50, Sampler: DDIM, CFG scale: 12 -``` - -## License - -This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. -The CreativeML OpenRAIL License specifies: - -1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content -2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license -3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) -[Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license) diff --git a/spaces/Gradio-Blocks/pubmed-abstract-retriever/README.md b/spaces/Gradio-Blocks/pubmed-abstract-retriever/README.md deleted file mode 100644 index 05a7522491204f2401025f3a863d6d0e0436ff53..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/pubmed-abstract-retriever/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: PubMed Abstract Retriever -emoji: 📚​🔎​📄​ -colorFrom: red -colorTo: green -sdk: gradio -app_file: app.py -pinned: false ---- - - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco.py deleted file mode 100644 index 1afeeef1212db831dd1f097d30b0354e459daa97..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco.py +++ /dev/null @@ -1,13 +0,0 @@ -_base_ = './cascade_rcnn_r50_fpn_20e_coco.py' -model = dict( - pretrained='open-mmlab://resnext101_32x4d', - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch')) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/decode_heads/da_head.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/decode_heads/da_head.py deleted file mode 100644 index 8ee0e08c3d69ee4392aa550072a043548c377571..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/decode_heads/da_head.py +++ /dev/null @@ -1,178 +0,0 @@ -import torch -import torch.nn.functional as F -from mmcv.cnn import ConvModule, Scale -from torch import nn - -from mmseg.core import add_prefix -from ..builder import HEADS -from ..utils import SelfAttentionBlock as _SelfAttentionBlock -from .decode_head import BaseDecodeHead - - -class PAM(_SelfAttentionBlock): - """Position Attention Module (PAM) - - Args: - in_channels (int): Input channels of key/query feature. - channels (int): Output channels of key/query transform. - """ - - def __init__(self, in_channels, channels): - super(PAM, self).__init__( - key_in_channels=in_channels, - query_in_channels=in_channels, - channels=channels, - out_channels=in_channels, - share_key_query=False, - query_downsample=None, - key_downsample=None, - key_query_num_convs=1, - key_query_norm=False, - value_out_num_convs=1, - value_out_norm=False, - matmul_norm=False, - with_out=False, - conv_cfg=None, - norm_cfg=None, - act_cfg=None) - - self.gamma = Scale(0) - - def forward(self, x): - """Forward function.""" - out = super(PAM, self).forward(x, x) - - out = self.gamma(out) + x - return out - - -class CAM(nn.Module): - """Channel Attention Module (CAM)""" - - def __init__(self): - super(CAM, self).__init__() - self.gamma = Scale(0) - - def forward(self, x): - """Forward function.""" - batch_size, channels, height, width = x.size() - proj_query = x.view(batch_size, channels, -1) - proj_key = x.view(batch_size, channels, -1).permute(0, 2, 1) - energy = torch.bmm(proj_query, proj_key) - energy_new = torch.max( - energy, -1, keepdim=True)[0].expand_as(energy) - energy - attention = F.softmax(energy_new, dim=-1) - proj_value = x.view(batch_size, channels, -1) - - out = torch.bmm(attention, proj_value) - out = out.view(batch_size, channels, height, width) - - out = self.gamma(out) + x - return out - - -@HEADS.register_module() -class DAHead(BaseDecodeHead): - """Dual Attention Network for Scene Segmentation. - - This head is the implementation of `DANet - `_. - - Args: - pam_channels (int): The channels of Position Attention Module(PAM). - """ - - def __init__(self, pam_channels, **kwargs): - super(DAHead, self).__init__(**kwargs) - self.pam_channels = pam_channels - self.pam_in_conv = ConvModule( - self.in_channels, - self.channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - self.pam = PAM(self.channels, pam_channels) - self.pam_out_conv = ConvModule( - self.channels, - self.channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - self.pam_conv_seg = nn.Conv2d( - self.channels, self.num_classes, kernel_size=1) - - self.cam_in_conv = ConvModule( - self.in_channels, - self.channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - self.cam = CAM() - self.cam_out_conv = ConvModule( - self.channels, - self.channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - self.cam_conv_seg = nn.Conv2d( - self.channels, self.num_classes, kernel_size=1) - - def pam_cls_seg(self, feat): - """PAM feature classification.""" - if self.dropout is not None: - feat = self.dropout(feat) - output = self.pam_conv_seg(feat) - return output - - def cam_cls_seg(self, feat): - """CAM feature classification.""" - if self.dropout is not None: - feat = self.dropout(feat) - output = self.cam_conv_seg(feat) - return output - - def forward(self, inputs): - """Forward function.""" - x = self._transform_inputs(inputs) - pam_feat = self.pam_in_conv(x) - pam_feat = self.pam(pam_feat) - pam_feat = self.pam_out_conv(pam_feat) - pam_out = self.pam_cls_seg(pam_feat) - - cam_feat = self.cam_in_conv(x) - cam_feat = self.cam(cam_feat) - cam_feat = self.cam_out_conv(cam_feat) - cam_out = self.cam_cls_seg(cam_feat) - - feat_sum = pam_feat + cam_feat - pam_cam_out = self.cls_seg(feat_sum) - - return pam_cam_out, pam_out, cam_out - - def forward_test(self, inputs, img_metas, test_cfg): - """Forward function for testing, only ``pam_cam`` is used.""" - return self.forward(inputs)[0] - - def losses(self, seg_logit, seg_label): - """Compute ``pam_cam``, ``pam``, ``cam`` loss.""" - pam_cam_seg_logit, pam_seg_logit, cam_seg_logit = seg_logit - loss = dict() - loss.update( - add_prefix( - super(DAHead, self).losses(pam_cam_seg_logit, seg_label), - 'pam_cam')) - loss.update( - add_prefix( - super(DAHead, self).losses(pam_seg_logit, seg_label), 'pam')) - loss.update( - add_prefix( - super(DAHead, self).losses(cam_seg_logit, seg_label), 'cam')) - return loss diff --git a/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/CODE_OF_CONDUCT.md b/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/CODE_OF_CONDUCT.md deleted file mode 100644 index 83f431e8feeb7e80d571f39c9f6c1b96857b5f85..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,80 +0,0 @@ -# Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to make participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, sex characteristics, gender identity and expression, -level of experience, education, socio-economic status, nationality, personal -appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic -address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a -professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies within all project spaces, and it also applies when -an individual is representing the project or its community in public spaces. -Examples of representing a project or community include using an official -project e-mail address, posting via an official social media account, or acting -as an appointed representative at an online or offline event. Representation of -a project may be further defined and clarified by project maintainers. - -This Code of Conduct also applies outside the project spaces when there is a -reasonable belief that an individual's behavior may have a negative impact on -the project or its community. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at . All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html - -[homepage]: https://www.contributor-covenant.org - -For answers to common questions about this code of conduct, see -https://www.contributor-covenant.org/faq diff --git a/spaces/HaHaBill/LandShapes-Antarctica/models/stylegan2/stylegan2-pytorch/projector.py b/spaces/HaHaBill/LandShapes-Antarctica/models/stylegan2/stylegan2-pytorch/projector.py deleted file mode 100644 index d63ad3573696cc22640cbeddc197d8cb15c52977..0000000000000000000000000000000000000000 --- a/spaces/HaHaBill/LandShapes-Antarctica/models/stylegan2/stylegan2-pytorch/projector.py +++ /dev/null @@ -1,203 +0,0 @@ -import argparse -import math -import os - -import torch -from torch import optim -from torch.nn import functional as F -from torchvision import transforms -from PIL import Image -from tqdm import tqdm - -import lpips -from model import Generator - - -def noise_regularize(noises): - loss = 0 - - for noise in noises: - size = noise.shape[2] - - while True: - loss = ( - loss - + (noise * torch.roll(noise, shifts=1, dims=3)).mean().pow(2) - + (noise * torch.roll(noise, shifts=1, dims=2)).mean().pow(2) - ) - - if size <= 8: - break - - noise = noise.reshape([1, 1, size // 2, 2, size // 2, 2]) - noise = noise.mean([3, 5]) - size //= 2 - - return loss - - -def noise_normalize_(noises): - for noise in noises: - mean = noise.mean() - std = noise.std() - - noise.data.add_(-mean).div_(std) - - -def get_lr(t, initial_lr, rampdown=0.25, rampup=0.05): - lr_ramp = min(1, (1 - t) / rampdown) - lr_ramp = 0.5 - 0.5 * math.cos(lr_ramp * math.pi) - lr_ramp = lr_ramp * min(1, t / rampup) - - return initial_lr * lr_ramp - - -def latent_noise(latent, strength): - noise = torch.randn_like(latent) * strength - - return latent + noise - - -def make_image(tensor): - return ( - tensor.detach() - .clamp_(min=-1, max=1) - .add(1) - .div_(2) - .mul(255) - .type(torch.uint8) - .permute(0, 2, 3, 1) - .to('cpu') - .numpy() - ) - - -if __name__ == '__main__': - device = 'cuda' - - parser = argparse.ArgumentParser() - parser.add_argument('--ckpt', type=str, required=True) - parser.add_argument('--size', type=int, default=256) - parser.add_argument('--lr_rampup', type=float, default=0.05) - parser.add_argument('--lr_rampdown', type=float, default=0.25) - parser.add_argument('--lr', type=float, default=0.1) - parser.add_argument('--noise', type=float, default=0.05) - parser.add_argument('--noise_ramp', type=float, default=0.75) - parser.add_argument('--step', type=int, default=1000) - parser.add_argument('--noise_regularize', type=float, default=1e5) - parser.add_argument('--mse', type=float, default=0) - parser.add_argument('--w_plus', action='store_true') - parser.add_argument('files', metavar='FILES', nargs='+') - - args = parser.parse_args() - - n_mean_latent = 10000 - - resize = min(args.size, 256) - - transform = transforms.Compose( - [ - transforms.Resize(resize), - transforms.CenterCrop(resize), - transforms.ToTensor(), - transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]), - ] - ) - - imgs = [] - - for imgfile in args.files: - img = transform(Image.open(imgfile).convert('RGB')) - imgs.append(img) - - imgs = torch.stack(imgs, 0).to(device) - - g_ema = Generator(args.size, 512, 8) - g_ema.load_state_dict(torch.load(args.ckpt)['g_ema'], strict=False) - g_ema.eval() - g_ema = g_ema.to(device) - - with torch.no_grad(): - noise_sample = torch.randn(n_mean_latent, 512, device=device) - latent_out = g_ema.style(noise_sample) - - latent_mean = latent_out.mean(0) - latent_std = ((latent_out - latent_mean).pow(2).sum() / n_mean_latent) ** 0.5 - - percept = lpips.PerceptualLoss( - model='net-lin', net='vgg', use_gpu=device.startswith('cuda') - ) - - noises = g_ema.make_noise() - - latent_in = latent_mean.detach().clone().unsqueeze(0).repeat(2, 1) - - if args.w_plus: - latent_in = latent_in.unsqueeze(1).repeat(1, g_ema.n_latent, 1) - - latent_in.requires_grad = True - - for noise in noises: - noise.requires_grad = True - - optimizer = optim.Adam([latent_in] + noises, lr=args.lr) - - pbar = tqdm(range(args.step)) - latent_path = [] - - for i in pbar: - t = i / args.step - lr = get_lr(t, args.lr) - optimizer.param_groups[0]['lr'] = lr - noise_strength = latent_std * args.noise * max(0, 1 - t / args.noise_ramp) ** 2 - latent_n = latent_noise(latent_in, noise_strength.item()) - - img_gen, _ = g_ema([latent_n], input_is_latent=True, noise=noises) - - batch, channel, height, width = img_gen.shape - - if height > 256: - factor = height // 256 - - img_gen = img_gen.reshape( - batch, channel, height // factor, factor, width // factor, factor - ) - img_gen = img_gen.mean([3, 5]) - - p_loss = percept(img_gen, imgs).sum() - n_loss = noise_regularize(noises) - mse_loss = F.mse_loss(img_gen, imgs) - - loss = p_loss + args.noise_regularize * n_loss + args.mse * mse_loss - - optimizer.zero_grad() - loss.backward() - optimizer.step() - - noise_normalize_(noises) - - if (i + 1) % 100 == 0: - latent_path.append(latent_in.detach().clone()) - - pbar.set_description( - ( - f'perceptual: {p_loss.item():.4f}; noise regularize: {n_loss.item():.4f};' - f' mse: {mse_loss.item():.4f}; lr: {lr:.4f}' - ) - ) - - result_file = {'noises': noises} - - img_gen, _ = g_ema([latent_path[-1]], input_is_latent=True, noise=noises) - - filename = os.path.splitext(os.path.basename(args.files[0]))[0] + '.pt' - - img_ar = make_image(img_gen) - - for i, input_name in enumerate(args.files): - result_file[input_name] = {'img': img_gen[i], 'latent': latent_in[i]} - img_name = os.path.splitext(os.path.basename(input_name))[0] + '-project.png' - pil_img = Image.fromarray(img_ar[i]) - pil_img.save(img_name) - - torch.save(result_file, filename) diff --git a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/templates/frontend/assets/index.3517cbba.css b/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/templates/frontend/assets/index.3517cbba.css deleted file mode 100644 index 8a986feb0fd82c371bc0dfbbbced8f43167c38aa..0000000000000000000000000000000000000000 --- a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/templates/frontend/assets/index.3517cbba.css +++ /dev/null @@ -1 +0,0 @@ -span.svelte-1cgkd5k{text-shadow:0 0 8px rgba(0,0,0,.5)}progress.svelte-1cgkd5k::-webkit-progress-bar{border-radius:2px;background-color:#fff3;overflow:hidden}progress.svelte-1cgkd5k::-webkit-progress-value{background-color:#ffffffe6}.mirror.svelte-1cgkd5k{transform:scaleX(-1)}.wrap.svelte-1cgkd5k{width:calc(100% - .75rem)} diff --git a/spaces/Hoodady/3DFuse/ldm/modules/midas/midas/base_model.py b/spaces/Hoodady/3DFuse/ldm/modules/midas/midas/base_model.py deleted file mode 100644 index 5cf430239b47ec5ec07531263f26f5c24a2311cd..0000000000000000000000000000000000000000 --- a/spaces/Hoodady/3DFuse/ldm/modules/midas/midas/base_model.py +++ /dev/null @@ -1,16 +0,0 @@ -import torch - - -class BaseModel(torch.nn.Module): - def load(self, path): - """Load model from file. - - Args: - path (str): file path - """ - parameters = torch.load(path, map_location=torch.device('cpu')) - - if "optimizer" in parameters: - parameters = parameters["model"] - - self.load_state_dict(parameters) diff --git a/spaces/HuggingFaceH4/open_llm_leaderboard/src/load_from_hub.py b/spaces/HuggingFaceH4/open_llm_leaderboard/src/load_from_hub.py deleted file mode 100644 index e512e529eed70156cc91d76b02296a8d55f86224..0000000000000000000000000000000000000000 --- a/spaces/HuggingFaceH4/open_llm_leaderboard/src/load_from_hub.py +++ /dev/null @@ -1,91 +0,0 @@ -import json -import os -from collections import defaultdict - -import pandas as pd - -from src.assets.hardcoded_evals import baseline, gpt4_values, gpt35_values -from src.get_model_info.apply_metadata_to_df import apply_metadata -from src.plots.read_results import get_eval_results_dicts, make_clickable_model -from src.get_model_info.utils import AutoEvalColumn, EvalQueueColumn, has_no_nan_values - -IS_PUBLIC = bool(os.environ.get("IS_PUBLIC", True)) - - -def get_all_requested_models(requested_models_dir: str) -> set[str]: - depth = 1 - file_names = [] - users_to_submission_dates = defaultdict(list) - - for root, _, files in os.walk(requested_models_dir): - current_depth = root.count(os.sep) - requested_models_dir.count(os.sep) - if current_depth == depth: - for file in files: - if not file.endswith(".json"): - continue - with open(os.path.join(root, file), "r") as f: - info = json.load(f) - file_names.append(f"{info['model']}_{info['revision']}_{info['precision']}") - - # Select organisation - if info["model"].count("/") == 0 or "submitted_time" not in info: - continue - organisation, _ = info["model"].split("/") - users_to_submission_dates[organisation].append(info["submitted_time"]) - - return set(file_names), users_to_submission_dates - - -def get_leaderboard_df(results_path: str, cols: list, benchmark_cols: list) -> pd.DataFrame: - all_data = get_eval_results_dicts(results_path) - - if not IS_PUBLIC: - all_data.append(gpt4_values) - all_data.append(gpt35_values) - - all_data.append(baseline) - apply_metadata(all_data) # Populate model type based on known hardcoded values in `metadata.py` - - df = pd.DataFrame.from_records(all_data) - df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False) - df = df[cols].round(decimals=2) - - # filter out if any of the benchmarks have not been produced - df = df[has_no_nan_values(df, benchmark_cols)] - return df - - -def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]: - entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")] - all_evals = [] - - for entry in entries: - if ".json" in entry: - file_path = os.path.join(save_path, entry) - with open(file_path) as fp: - data = json.load(fp) - - data[EvalQueueColumn.model.name] = make_clickable_model(data["model"]) - data[EvalQueueColumn.revision.name] = data.get("revision", "main") - - all_evals.append(data) - elif ".md" not in entry: - # this is a folder - sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if not e.startswith(".")] - for sub_entry in sub_entries: - file_path = os.path.join(save_path, entry, sub_entry) - with open(file_path) as fp: - data = json.load(fp) - - data[EvalQueueColumn.model.name] = make_clickable_model(data["model"]) - data[EvalQueueColumn.revision.name] = data.get("revision", "main") - all_evals.append(data) - - pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]] - running_list = [e for e in all_evals if e["status"] == "RUNNING"] - finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"] - df_pending = pd.DataFrame.from_records(pending_list, columns=cols) - df_running = pd.DataFrame.from_records(running_list, columns=cols) - df_finished = pd.DataFrame.from_records(finished_list, columns=cols) - return df_finished[cols], df_running[cols], df_pending[cols] - diff --git a/spaces/HuseynG/ECS7022P-WGAN-GP/app.py b/spaces/HuseynG/ECS7022P-WGAN-GP/app.py deleted file mode 100644 index 8f024b78f1a7a4800ddbb4975d6792b7dd666627..0000000000000000000000000000000000000000 --- a/spaces/HuseynG/ECS7022P-WGAN-GP/app.py +++ /dev/null @@ -1,28 +0,0 @@ -import gradio as gr -import torch -from utils import load_model, generate_random_img, schedule_function -import time -import random -import threading -from gradio_client import Client - -def generate_image(): - with torch.no_grad(): - model = load_model('generator', 'generator_model_epoch_94.pth') - generated_image = generate_random_img(model) - return generated_image - -iface = gr.Interface( - fn=generate_image, - inputs=[], - outputs=gr.outputs.Image(type='numpy'), - allow_screenshot=True, - title='Random Landscape Image Generator By Huseyn Gorbani', - description='This app generates random images, using DCFAN inspired WGAN-GP model. Special Thanks to Aladdin Persson and Emilien Dupont for their insightful repos on GitHub. Aladdin Persson (repo: https://github.com/aladdinpersson/Machine-Learning-Collection/tree/master/ML/Pytorch/GANs/4.%20WGAN-GP) Emilien Dupont (repo: https://github.com/EmilienDupont/wgan-gp/blob/master/training.py)', - css='img_styles.css', -) - -if __name__ == '__main__': - scheduler_thread = threading.Thread(target=schedule_function) # avoiding sleep, again this project is for academic purposes only - # scheduler_thread.start() - iface.launch() \ No newline at end of file diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/data/offset_tokens_dataset.py b/spaces/ICML2022/OFA/fairseq/fairseq/data/offset_tokens_dataset.py deleted file mode 100644 index 6fabbdcdaa1a8f70d8d8c07db4cd53754503c194..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/data/offset_tokens_dataset.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from . import BaseWrapperDataset - - -class OffsetTokensDataset(BaseWrapperDataset): - def __init__(self, dataset, offset): - super().__init__(dataset) - self.offset = offset - - def __getitem__(self, idx): - return self.dataset[idx] + self.offset diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/models/nat/nat_crf_transformer.py b/spaces/ICML2022/OFA/fairseq/fairseq/models/nat/nat_crf_transformer.py deleted file mode 100644 index d4b3cd931ceb077eb30db73df1d5d6cd714a86c2..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/models/nat/nat_crf_transformer.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -from fairseq.models import register_model, register_model_architecture -from fairseq.models.nat import NATransformerModel, base_architecture -from fairseq.modules import DynamicCRF - - -@register_model("nacrf_transformer") -class NACRFTransformerModel(NATransformerModel): - def __init__(self, args, encoder, decoder): - super().__init__(args, encoder, decoder) - self.crf_layer = DynamicCRF( - num_embedding=len(self.tgt_dict), - low_rank=args.crf_lowrank_approx, - beam_size=args.crf_beam_approx, - ) - - @property - def allow_ensemble(self): - return False - - @staticmethod - def add_args(parser): - NATransformerModel.add_args(parser) - parser.add_argument( - "--crf-lowrank-approx", - type=int, - help="the dimension of low-rank approximation of transition", - ) - parser.add_argument( - "--crf-beam-approx", - type=int, - help="the beam size for apporixmating the normalizing factor", - ) - parser.add_argument( - "--word-ins-loss-factor", - type=float, - help="weights on NAT loss used to co-training with CRF loss.", - ) - - def forward( - self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs - ): - # encoding - encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) - - # length prediction - length_out = self.decoder.forward_length( - normalize=False, encoder_out=encoder_out - ) - length_tgt = self.decoder.forward_length_prediction( - length_out, encoder_out, tgt_tokens - ) - - # decoding - word_ins_out = self.decoder( - normalize=False, - prev_output_tokens=prev_output_tokens, - encoder_out=encoder_out, - ) - word_ins_tgt, word_ins_mask = tgt_tokens, tgt_tokens.ne(self.pad) - - # compute the log-likelihood of CRF - crf_nll = -self.crf_layer(word_ins_out, word_ins_tgt, word_ins_mask) - crf_nll = (crf_nll / word_ins_mask.type_as(crf_nll).sum(-1)).mean() - - return { - "word_ins": { - "out": word_ins_out, - "tgt": word_ins_tgt, - "mask": word_ins_mask, - "ls": self.args.label_smoothing, - "nll_loss": True, - "factor": self.args.word_ins_loss_factor, - }, - "word_crf": {"loss": crf_nll}, - "length": { - "out": length_out, - "tgt": length_tgt, - "factor": self.decoder.length_loss_factor, - }, - } - - def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs): - output_tokens = decoder_out.output_tokens - output_scores = decoder_out.output_scores - history = decoder_out.history - - # execute the decoder and get emission scores - output_masks = output_tokens.ne(self.pad) - word_ins_out = self.decoder( - normalize=False, prev_output_tokens=output_tokens, encoder_out=encoder_out - ) - - # run viterbi decoding through CRF - _scores, _tokens = self.crf_layer.forward_decoder(word_ins_out, output_masks) - output_tokens.masked_scatter_(output_masks, _tokens[output_masks]) - output_scores.masked_scatter_(output_masks, _scores[output_masks]) - if history is not None: - history.append(output_tokens.clone()) - - return decoder_out._replace( - output_tokens=output_tokens, - output_scores=output_scores, - attn=None, - history=history, - ) - - -@register_model_architecture("nacrf_transformer", "nacrf_transformer") -def nacrf_base_architecture(args): - args.crf_lowrank_approx = getattr(args, "crf_lowrank_approx", 32) - args.crf_beam_approx = getattr(args, "crf_beam_approx", 64) - args.word_ins_loss_factor = getattr(args, "word_ins_loss_factor", 0.5) - args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) - args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True) - base_architecture(args) diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/modules/quantization/pq/__init__.py b/spaces/ICML2022/OFA/fairseq/fairseq/modules/quantization/pq/__init__.py deleted file mode 100644 index c142a802e05ec7ecfa5dba7d9a98c26a60ac75d2..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/modules/quantization/pq/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from .utils import SizeTracker, get_param, attrsetter, quantize_model_ # NOQA diff --git a/spaces/Ibtehaj10/cheating-detection-FYP/pages/signup.py b/spaces/Ibtehaj10/cheating-detection-FYP/pages/signup.py deleted file mode 100644 index d7753e687fd683bd2ec45cd58b9a72013dc23689..0000000000000000000000000000000000000000 --- a/spaces/Ibtehaj10/cheating-detection-FYP/pages/signup.py +++ /dev/null @@ -1,82 +0,0 @@ -import pickle -from pathlib import Path -import streamlit as st -import os -import pandas as pd -import csv -data = ['Id','Password'] - -# with open('LoginStatus.csv', 'w') as file: -# writer = csv.writer(file) -# writer.writerow(data) -db = {} - -l1 = [] -l2 = [] -ids = st.text_input("Email Address") -password = st.text_input("Password",type="password",key="password") -# l1.append(ids) -# l2.append(password) - -# l1.append(ids) -# l2.append(password) -key1 = "Id" -db.setdefault(key1, []) -db[key1].append(ids) - -key2 = "password" -db.setdefault(key2, []) -db[key2].append(password) - -# print(db) -# db['Id'] = l1 -# db['Password'] = l2 -# for i in db: -df = pd.DataFrame(db) -# st.write(db) -# df -if st.button('Register'): - df.to_csv('LoginStatus.csv', mode='a', header=False, index=False) - st.success('User Successfully Registered!') - - - -# import streamlit as st -# def check_password(): -# """Returns `True` if the user had a correct password.""" - -# def password_entered(): -# """Checks whether a password entered by the user is correct.""" -# if ( -# st.session_state["username"] in st.secrets["passwords"] -# and st.session_state["password"] -# == st.secrets["passwords"][st.session_state["username"]] -# ): -# st.session_state["password_correct"] = True -# del st.session_state["password"] # don't store username + password -# del st.session_state["username"] -# else: -# st.session_state["password_correct"] = False - -# if "password_correct" not in st.session_state: -# # First run, show inputs for username + password. -# st.text_input("Username", on_change=password_entered, key="username") -# st.text_input( -# "Password", type="password", on_change=password_entered, key="password" -# ) -# return False -# elif not st.session_state["password_correct"]: -# # Password not correct, show input + error. -# st.text_input("Username", on_change=password_entered, key="username") -# st.text_input( -# "Password", type="password", on_change=password_entered, key="password" -# ) -# st.error("😕 User not known or password incorrect") -# return False -# else: -# # Password correct. -# return True - -# if check_password(): -# st.write("Here goes your normal Streamlit app...") -# st.button("Click me") \ No newline at end of file diff --git a/spaces/ItsJayQz/Marvel_WhatIf_Diffusion/app.py b/spaces/ItsJayQz/Marvel_WhatIf_Diffusion/app.py deleted file mode 100644 index eac6841a968aba1cbc4cbb5d05136afd271eb7d5..0000000000000000000000000000000000000000 --- a/spaces/ItsJayQz/Marvel_WhatIf_Diffusion/app.py +++ /dev/null @@ -1,137 +0,0 @@ -from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler -import gradio as gr -import torch -from PIL import Image - -model_id = 'ItsJayQz/Marvel_WhatIf_Diffusion' -prefix = 'whatif style' - -scheduler = DPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler") - -pipe = StableDiffusionPipeline.from_pretrained( - model_id, - torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, - scheduler=scheduler) - -pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained( - model_id, - torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, - scheduler=scheduler) - -if torch.cuda.is_available(): - pipe = pipe.to("cuda") - pipe_i2i = pipe_i2i.to("cuda") - -def error_str(error, title="Error"): - return f"""#### {title} - {error}""" if error else "" - -def inference(prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt="", auto_prefix=False): - - generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None - prompt = f"{prefix} {prompt}" if auto_prefix else prompt - - try: - if img is not None: - return img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator), None - else: - return txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator), None - except Exception as e: - return None, error_str(e) - -def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator): - - result = pipe( - prompt, - negative_prompt = neg_prompt, - num_inference_steps = int(steps), - guidance_scale = guidance, - width = width, - height = height, - generator = generator) - - return result.images[0] - -def img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator): - - ratio = min(height / img.height, width / img.width) - img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS) - result = pipe_i2i( - prompt, - negative_prompt = neg_prompt, - init_image = img, - num_inference_steps = int(steps), - strength = strength, - guidance_scale = guidance, - width = width, - height = height, - generator = generator) - - return result.images[0] - -css = """.main-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.main-div div h1{font-weight:900;margin-bottom:7px}.main-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem} -""" -with gr.Blocks(css=css) as demo: - gr.HTML( - f""" -
      -
      -

      Marvel Whatif Diffusion

      -
      -

      - Demo for Marvel Whatif Diffusion Stable Diffusion model.
      - {"Add the following tokens to your prompts for the model to work properly: prefix" if prefix else ""} -

      - Running on {"GPU 🔥" if torch.cuda.is_available() else f"CPU 🥶. For faster inference it is recommended to upgrade to GPU in Settings"} after duplicating the space

      - Duplicate Space -
      - """ - ) - with gr.Row(): - - with gr.Column(scale=55): - with gr.Group(): - with gr.Row(): - prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder=f"{prefix} [your prompt]").style(container=False) - generate = gr.Button(value="Generate").style(rounded=(False, True, True, False)) - - image_out = gr.Image(height=512) - error_output = gr.Markdown() - - with gr.Column(scale=45): - with gr.Tab("Options"): - with gr.Group(): - neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image") - auto_prefix = gr.Checkbox(label="Prefix styling tokens automatically (whatif style)", value=prefix, visible=prefix) - - with gr.Row(): - guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15) - steps = gr.Slider(label="Steps", value=25, minimum=2, maximum=75, step=1) - - with gr.Row(): - width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8) - height = gr.Slider(label="Height", value=512, minimum=64, maximum=1024, step=8) - - seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1) - - with gr.Tab("Image to image"): - with gr.Group(): - image = gr.Image(label="Image", height=256, tool="editor", type="pil") - strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5) - - auto_prefix.change(lambda x: gr.update(placeholder=f"{prefix} [your prompt]" if x else "[Your prompt]"), inputs=auto_prefix, outputs=prompt, queue=False) - - inputs = [prompt, guidance, steps, width, height, seed, image, strength, neg_prompt, auto_prefix] - outputs = [image_out, error_output] - prompt.submit(inference, inputs=inputs, outputs=outputs) - generate.click(inference, inputs=inputs, outputs=outputs) - - gr.HTML(""" -
      -
      -

      This space was created using SD Space Creator.

      -
      - """) - -demo.queue(concurrency_count=1) -demo.launch() diff --git a/spaces/Jamel887/Rvc-tio887/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py b/spaces/Jamel887/Rvc-tio887/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py deleted file mode 100644 index b2c592527a5966e6f8e79e8c52dc5b414246dcc6..0000000000000000000000000000000000000000 --- a/spaces/Jamel887/Rvc-tio887/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py +++ /dev/null @@ -1,97 +0,0 @@ -from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor -import parselmouth -import numpy as np - - -class PMF0Predictor(F0Predictor): - def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100): - self.hop_length = hop_length - self.f0_min = f0_min - self.f0_max = f0_max - self.sampling_rate = sampling_rate - - def interpolate_f0(self, f0): - """ - 对F0进行插值处理 - """ - - data = np.reshape(f0, (f0.size, 1)) - - vuv_vector = np.zeros((data.size, 1), dtype=np.float32) - vuv_vector[data > 0.0] = 1.0 - vuv_vector[data <= 0.0] = 0.0 - - ip_data = data - - frame_number = data.size - last_value = 0.0 - for i in range(frame_number): - if data[i] <= 0.0: - j = i + 1 - for j in range(i + 1, frame_number): - if data[j] > 0.0: - break - if j < frame_number - 1: - if last_value > 0.0: - step = (data[j] - data[i - 1]) / float(j - i) - for k in range(i, j): - ip_data[k] = data[i - 1] + step * (k - i + 1) - else: - for k in range(i, j): - ip_data[k] = data[j] - else: - for k in range(i, frame_number): - ip_data[k] = last_value - else: - ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝 - last_value = data[i] - - return ip_data[:, 0], vuv_vector[:, 0] - - def compute_f0(self, wav, p_len=None): - x = wav - if p_len is None: - p_len = x.shape[0] // self.hop_length - else: - assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error" - time_step = self.hop_length / self.sampling_rate * 1000 - f0 = ( - parselmouth.Sound(x, self.sampling_rate) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=self.f0_min, - pitch_ceiling=self.f0_max, - ) - .selected_array["frequency"] - ) - - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant") - f0, uv = self.interpolate_f0(f0) - return f0 - - def compute_f0_uv(self, wav, p_len=None): - x = wav - if p_len is None: - p_len = x.shape[0] // self.hop_length - else: - assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error" - time_step = self.hop_length / self.sampling_rate * 1000 - f0 = ( - parselmouth.Sound(x, self.sampling_rate) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=self.f0_min, - pitch_ceiling=self.f0_max, - ) - .selected_array["frequency"] - ) - - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant") - f0, uv = self.interpolate_f0(f0) - return f0, uv diff --git a/spaces/Jeffsun/LSP-LearningandStrivePartner-Demo/modules/prompt_parser.py b/spaces/Jeffsun/LSP-LearningandStrivePartner-Demo/modules/prompt_parser.py deleted file mode 100644 index 42cbbb3038612a44571765905e8526553f462663..0000000000000000000000000000000000000000 --- a/spaces/Jeffsun/LSP-LearningandStrivePartner-Demo/modules/prompt_parser.py +++ /dev/null @@ -1,391 +0,0 @@ - -import re -import math -import numpy as np -import torch - -# Code from https://github.com/AUTOMATIC1111/stable-diffusion-webui/commit/8e2aeee4a127b295bfc880800e4a312e0f049b85, modified. - -class PromptChunk: - """ - This object contains token ids, weight (multipliers:1.4) and textual inversion embedding info for a chunk of prompt. - If a prompt is short, it is represented by one PromptChunk, otherwise, multiple are necessary. - Each PromptChunk contains an exact amount of tokens - 77, which includes one for start and end token, - so just 75 tokens from prompt. - """ - - def __init__(self): - self.tokens = [] - self.multipliers = [] - self.fixes = [] - - -class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): - """A pytorch module that is a wrapper for FrozenCLIPEmbedder module. it enhances FrozenCLIPEmbedder, making it possible to - have unlimited prompt length and assign weights to tokens in prompt. - """ - - def __init__(self, text_encoder, enable_emphasis=True): - super().__init__() - - self.device = lambda: text_encoder.device - self.enable_emphasis = enable_emphasis - """Original FrozenCLIPEmbedder module; can also be FrozenOpenCLIPEmbedder or xlmr.BertSeriesModelWithTransformation, - depending on model.""" - - self.chunk_length = 75 - - def empty_chunk(self): - """creates an empty PromptChunk and returns it""" - - chunk = PromptChunk() - chunk.tokens = [self.id_start] + [self.id_end] * (self.chunk_length + 1) - chunk.multipliers = [1.0] * (self.chunk_length + 2) - return chunk - - def get_target_prompt_token_count(self, token_count): - """returns the maximum number of tokens a prompt of a known length can have before it requires one more PromptChunk to be represented""" - - return math.ceil(max(token_count, 1) / self.chunk_length) * self.chunk_length - - def tokenize_line(self, line): - """ - this transforms a single prompt into a list of PromptChunk objects - as many as needed to - represent the prompt. - Returns the list and the total number of tokens in the prompt. - """ - - if self.enable_emphasis: - parsed = parse_prompt_attention(line) - else: - parsed = [[line, 1.0]] - - tokenized = self.tokenize([text for text, _ in parsed]) - - chunks = [] - chunk = PromptChunk() - token_count = 0 - last_comma = -1 - - def next_chunk(is_last=False): - """puts current chunk into the list of results and produces the next one - empty; - if is_last is true, tokens tokens at the end won't add to token_count""" - nonlocal token_count - nonlocal last_comma - nonlocal chunk - - if is_last: - token_count += len(chunk.tokens) - else: - token_count += self.chunk_length - - to_add = self.chunk_length - len(chunk.tokens) - if to_add > 0: - chunk.tokens += [self.id_end] * to_add - chunk.multipliers += [1.0] * to_add - - chunk.tokens = [self.id_start] + chunk.tokens + [self.id_end] - chunk.multipliers = [1.0] + chunk.multipliers + [1.0] - - last_comma = -1 - chunks.append(chunk) - chunk = PromptChunk() - - comma_padding_backtrack = 20 # default value in https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/6cff4401824299a983c8e13424018efc347b4a2b/modules/shared.py#L410 - for tokens, (text, weight) in zip(tokenized, parsed): - if text == "BREAK" and weight == -1: - next_chunk() - continue - - position = 0 - while position < len(tokens): - token = tokens[position] - - if token == self.comma_token: - last_comma = len(chunk.tokens) - - # this is when we are at the end of alloted 75 tokens for the current chunk, and the current token is not a comma. opts.comma_padding_backtrack - # is a setting that specifies that if there is a comma nearby, the text after the comma should be moved out of this chunk and into the next. - elif ( - comma_padding_backtrack != 0 - and len(chunk.tokens) == self.chunk_length - and last_comma != -1 - and len(chunk.tokens) - last_comma <= comma_padding_backtrack - ): - break_location = last_comma + 1 - - reloc_tokens = chunk.tokens[break_location:] - reloc_mults = chunk.multipliers[break_location:] - - chunk.tokens = chunk.tokens[:break_location] - chunk.multipliers = chunk.multipliers[:break_location] - - next_chunk() - chunk.tokens = reloc_tokens - chunk.multipliers = reloc_mults - - if len(chunk.tokens) == self.chunk_length: - next_chunk() - - chunk.tokens.append(token) - chunk.multipliers.append(weight) - position += 1 - - if len(chunk.tokens) > 0 or len(chunks) == 0: - next_chunk(is_last=True) - - return chunks, token_count - - def process_texts(self, texts): - """ - Accepts a list of texts and calls tokenize_line() on each, with cache. Returns the list of results and maximum - length, in tokens, of all texts. - """ - - token_count = 0 - - cache = {} - batch_chunks = [] - for line in texts: - if line in cache: - chunks = cache[line] - else: - chunks, current_token_count = self.tokenize_line(line) - token_count = max(current_token_count, token_count) - - cache[line] = chunks - - batch_chunks.append(chunks) - - return batch_chunks, token_count - - def forward(self, texts): - """ - Accepts an array of texts; Passes texts through transformers network to create a tensor with numerical representation of those texts. - Returns a tensor with shape of (B, T, C), where B is length of the array; T is length, in tokens, of texts (including padding) - T will - be a multiple of 77; and C is dimensionality of each token - for SD1 it's 768, and for SD2 it's 1024. - An example shape returned by this function can be: (2, 77, 768). - Webui usually sends just one text at a time through this function - the only time when texts is an array with more than one elemenet - is when you do prompt editing: "a picture of a [cat:dog:0.4] eating ice cream" - """ - - batch_chunks, token_count = self.process_texts(texts) - chunk_count = max([len(x) for x in batch_chunks]) - - zs = [] - ts = [] - for i in range(chunk_count): - batch_chunk = [ - chunks[i] if i < len(chunks) else self.empty_chunk() - for chunks in batch_chunks - ] - - tokens = [x.tokens for x in batch_chunk] - multipliers = [x.multipliers for x in batch_chunk] - # self.embeddings.fixes = [x.fixes for x in batch_chunk] - - # for fixes in self.embeddings.fixes: - # for position, embedding in fixes: - # used_embeddings[embedding.name] = embedding - - z = self.process_tokens(tokens, multipliers) - zs.append(z) - ts.append(tokens) - - return np.hstack(ts), torch.hstack(zs) - - def process_tokens(self, remade_batch_tokens, batch_multipliers): - """ - sends one single prompt chunk to be encoded by transformers neural network. - remade_batch_tokens is a batch of tokens - a list, where every element is a list of tokens; usually - there are exactly 77 tokens in the list. batch_multipliers is the same but for multipliers instead of tokens. - Multipliers are used to give more or less weight to the outputs of transformers network. Each multiplier - corresponds to one token. - """ - tokens = torch.asarray(remade_batch_tokens).to(self.device()) - - # this is for SD2: SD1 uses the same token for padding and end of text, while SD2 uses different ones. - if self.id_end != self.id_pad: - for batch_pos in range(len(remade_batch_tokens)): - index = remade_batch_tokens[batch_pos].index(self.id_end) - tokens[batch_pos, index + 1 : tokens.shape[1]] = self.id_pad - - z = self.encode_with_transformers(tokens) - - # restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise - batch_multipliers = torch.asarray(batch_multipliers).to(self.device()) - original_mean = z.mean() - z = z * batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape) - new_mean = z.mean() - z = z * (original_mean / new_mean) - - return z - - -class FrozenCLIPEmbedderWithCustomWords(FrozenCLIPEmbedderWithCustomWordsBase): - def __init__(self, tokenizer, text_encoder): - super().__init__(text_encoder) - self.tokenizer = tokenizer - self.text_encoder = text_encoder - - vocab = self.tokenizer.get_vocab() - - self.comma_token = vocab.get(",", None) - - self.token_mults = {} - tokens_with_parens = [ - (k, v) - for k, v in vocab.items() - if "(" in k or ")" in k or "[" in k or "]" in k - ] - for text, ident in tokens_with_parens: - mult = 1.0 - for c in text: - if c == "[": - mult /= 1.1 - if c == "]": - mult *= 1.1 - if c == "(": - mult *= 1.1 - if c == ")": - mult /= 1.1 - - if mult != 1.0: - self.token_mults[ident] = mult - - self.id_start = self.tokenizer.bos_token_id - self.id_end = self.tokenizer.eos_token_id - self.id_pad = self.id_end - - def tokenize(self, texts): - tokenized = self.tokenizer( - texts, truncation=False, add_special_tokens=False - )["input_ids"] - - return tokenized - - def encode_with_transformers(self, tokens): - CLIP_stop_at_last_layers = 1 - tokens = tokens.to(self.text_encoder.device) - outputs = self.text_encoder(tokens, output_hidden_states=True) - - if CLIP_stop_at_last_layers > 1: - z = outputs.hidden_states[-CLIP_stop_at_last_layers] - z = self.text_encoder.text_model.final_layer_norm(z) - else: - z = outputs.last_hidden_state - - return z - - -re_attention = re.compile( - r""" -\\\(| -\\\)| -\\\[| -\\]| -\\\\| -\\| -\(| -\[| -:([+-]?[.\d]+)\)| -\)| -]| -[^\\()\[\]:]+| -: -""", - re.X, -) - -re_break = re.compile(r"\s*\bBREAK\b\s*", re.S) - - -def parse_prompt_attention(text): - """ - Parses a string with attention tokens and returns a list of pairs: text and its associated weight. - Accepted tokens are: - (abc) - increases attention to abc by a multiplier of 1.1 - (abc:3.12) - increases attention to abc by a multiplier of 3.12 - [abc] - decreases attention to abc by a multiplier of 1.1 - \( - literal character '(' - \[ - literal character '[' - \) - literal character ')' - \] - literal character ']' - \\ - literal character '\' - anything else - just text - - >>> parse_prompt_attention('normal text') - [['normal text', 1.0]] - >>> parse_prompt_attention('an (important) word') - [['an ', 1.0], ['important', 1.1], [' word', 1.0]] - >>> parse_prompt_attention('(unbalanced') - [['unbalanced', 1.1]] - >>> parse_prompt_attention('\(literal\]') - [['(literal]', 1.0]] - >>> parse_prompt_attention('(unnecessary)(parens)') - [['unnecessaryparens', 1.1]] - >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).') - [['a ', 1.0], - ['house', 1.5730000000000004], - [' ', 1.1], - ['on', 1.0], - [' a ', 1.1], - ['hill', 0.55], - [', sun, ', 1.1], - ['sky', 1.4641000000000006], - ['.', 1.1]] - """ - - res = [] - round_brackets = [] - square_brackets = [] - - round_bracket_multiplier = 1.1 - square_bracket_multiplier = 1 / 1.1 - - def multiply_range(start_position, multiplier): - for p in range(start_position, len(res)): - res[p][1] *= multiplier - - for m in re_attention.finditer(text): - text = m.group(0) - weight = m.group(1) - - if text.startswith("\\"): - res.append([text[1:], 1.0]) - elif text == "(": - round_brackets.append(len(res)) - elif text == "[": - square_brackets.append(len(res)) - elif weight is not None and len(round_brackets) > 0: - multiply_range(round_brackets.pop(), float(weight)) - elif text == ")" and len(round_brackets) > 0: - multiply_range(round_brackets.pop(), round_bracket_multiplier) - elif text == "]" and len(square_brackets) > 0: - multiply_range(square_brackets.pop(), square_bracket_multiplier) - else: - parts = re.split(re_break, text) - for i, part in enumerate(parts): - if i > 0: - res.append(["BREAK", -1]) - res.append([part, 1.0]) - - for pos in round_brackets: - multiply_range(pos, round_bracket_multiplier) - - for pos in square_brackets: - multiply_range(pos, square_bracket_multiplier) - - if len(res) == 0: - res = [["", 1.0]] - - # merge runs of identical weights - i = 0 - while i + 1 < len(res): - if res[i][1] == res[i + 1][1]: - res[i][0] += res[i + 1][0] - res.pop(i + 1) - else: - i += 1 - - return res diff --git a/spaces/Jikiwi/sovits-models/inference/infer_tool_grad.py b/spaces/Jikiwi/sovits-models/inference/infer_tool_grad.py deleted file mode 100644 index b75af49c08e2e724839828bc419792ed580809bb..0000000000000000000000000000000000000000 --- a/spaces/Jikiwi/sovits-models/inference/infer_tool_grad.py +++ /dev/null @@ -1,160 +0,0 @@ -import hashlib -import json -import logging -import os -import time -from pathlib import Path -import io -import librosa -import maad -import numpy as np -from inference import slicer -import parselmouth -import soundfile -import torch -import torchaudio - -from hubert import hubert_model -import utils -from models import SynthesizerTrn -logging.getLogger('numba').setLevel(logging.WARNING) -logging.getLogger('matplotlib').setLevel(logging.WARNING) - -def resize2d_f0(x, target_len): - source = np.array(x) - source[source < 0.001] = np.nan - target = np.interp(np.arange(0, len(source) * target_len, len(source)) / target_len, np.arange(0, len(source)), - source) - res = np.nan_to_num(target) - return res - -def get_f0(x, p_len,f0_up_key=0): - - time_step = 160 / 16000 * 1000 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - - f0 = parselmouth.Sound(x, 16000).to_pitch_ac( - time_step=time_step / 1000, voicing_threshold=0.6, - pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency'] - - pad_size=(p_len - len(f0) + 1) // 2 - if(pad_size>0 or p_len - len(f0) - pad_size>0): - f0 = np.pad(f0,[[pad_size,p_len - len(f0) - pad_size]], mode='constant') - - f0 *= pow(2, f0_up_key / 12) - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (f0_mel_max - f0_mel_min) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int) - return f0_coarse, f0 - -def clean_pitch(input_pitch): - num_nan = np.sum(input_pitch == 1) - if num_nan / len(input_pitch) > 0.9: - input_pitch[input_pitch != 1] = 1 - return input_pitch - - -def plt_pitch(input_pitch): - input_pitch = input_pitch.astype(float) - input_pitch[input_pitch == 1] = np.nan - return input_pitch - - -def f0_to_pitch(ff): - f0_pitch = 69 + 12 * np.log2(ff / 440) - return f0_pitch - - -def fill_a_to_b(a, b): - if len(a) < len(b): - for _ in range(0, len(b) - len(a)): - a.append(a[0]) - - -def mkdir(paths: list): - for path in paths: - if not os.path.exists(path): - os.mkdir(path) - - -class VitsSvc(object): - def __init__(self): - self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - self.SVCVITS = None - self.hps = None - self.speakers = None - self.hubert_soft = utils.get_hubert_model() - - def set_device(self, device): - self.device = torch.device(device) - self.hubert_soft.to(self.device) - if self.SVCVITS != None: - self.SVCVITS.to(self.device) - - def loadCheckpoint(self, path): - self.hps = utils.get_hparams_from_file(f"checkpoints/{path}/config.json") - self.SVCVITS = SynthesizerTrn( - self.hps.data.filter_length // 2 + 1, - self.hps.train.segment_size // self.hps.data.hop_length, - **self.hps.model) - _ = utils.load_checkpoint(f"checkpoints/{path}/model.pth", self.SVCVITS, None) - _ = self.SVCVITS.eval().to(self.device) - self.speakers = self.hps.spk - - def get_units(self, source, sr): - source = source.unsqueeze(0).to(self.device) - with torch.inference_mode(): - units = self.hubert_soft.units(source) - return units - - - def get_unit_pitch(self, in_path, tran): - source, sr = torchaudio.load(in_path) - source = torchaudio.functional.resample(source, sr, 16000) - if len(source.shape) == 2 and source.shape[1] >= 2: - source = torch.mean(source, dim=0).unsqueeze(0) - soft = self.get_units(source, sr).squeeze(0).cpu().numpy() - f0_coarse, f0 = get_f0(source.cpu().numpy()[0], soft.shape[0]*2, tran) - return soft, f0 - - def infer(self, speaker_id, tran, raw_path): - speaker_id = self.speakers[speaker_id] - sid = torch.LongTensor([int(speaker_id)]).to(self.device).unsqueeze(0) - soft, pitch = self.get_unit_pitch(raw_path, tran) - f0 = torch.FloatTensor(clean_pitch(pitch)).unsqueeze(0).to(self.device) - stn_tst = torch.FloatTensor(soft) - with torch.no_grad(): - x_tst = stn_tst.unsqueeze(0).to(self.device) - x_tst = torch.repeat_interleave(x_tst, repeats=2, dim=1).transpose(1, 2) - audio = self.SVCVITS.infer(x_tst, f0=f0, g=sid)[0,0].data.float() - return audio, audio.shape[-1] - - def inference(self,srcaudio,chara,tran,slice_db): - sampling_rate, audio = srcaudio - audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) - if len(audio.shape) > 1: - audio = librosa.to_mono(audio.transpose(1, 0)) - if sampling_rate != 16000: - audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000) - soundfile.write("tmpwav.wav", audio, 16000, format="wav") - chunks = slicer.cut("tmpwav.wav", db_thresh=slice_db) - audio_data, audio_sr = slicer.chunks2audio("tmpwav.wav", chunks) - audio = [] - for (slice_tag, data) in audio_data: - length = int(np.ceil(len(data) / audio_sr * self.hps.data.sampling_rate)) - raw_path = io.BytesIO() - soundfile.write(raw_path, data, audio_sr, format="wav") - raw_path.seek(0) - if slice_tag: - _audio = np.zeros(length) - else: - out_audio, out_sr = self.infer(chara, tran, raw_path) - _audio = out_audio.cpu().numpy() - audio.extend(list(_audio)) - audio = (np.array(audio) * 32768.0).astype('int16') - return (self.hps.data.sampling_rate,audio) diff --git a/spaces/JohnCalimoso/animalbreedidentificationversion1.5/Control/Guineapig/con_guineapig_SVM.py b/spaces/JohnCalimoso/animalbreedidentificationversion1.5/Control/Guineapig/con_guineapig_SVM.py deleted file mode 100644 index 980c4b1404ba312ae0a447fd02eaecb42bc9f780..0000000000000000000000000000000000000000 --- a/spaces/JohnCalimoso/animalbreedidentificationversion1.5/Control/Guineapig/con_guineapig_SVM.py +++ /dev/null @@ -1,37 +0,0 @@ -import cv2 -import numpy as np -from PIL import Image -import pickle -import tensorflow as tf -import io - -class gpSVM: - def __init__(self,url) -> None: - self.image = url - - def predict_image(self): - # Load the model - load_extractor = tf.keras.models.load_model("././Model/Guineapig/resnetxSVM/resnet_EXTRACTOR.h5") - - modelpath = "././Model/Guineapig/resnetxSVM/dataSaved.pkl" - - with open(modelpath, 'rb') as file: - saved_data = pickle.load(file) - animal_breed = saved_data['class_name'] - model = saved_data['svm_model'] - - im = Image.open(self.image) - img = im.convert("RGB") - img= np.asarray(img) - image_resized= cv2.resize(img, (224,224)) - features = load_extractor.predict(np.expand_dims(image_resized, axis=0)) - - reshaped_features = features.reshape(features.shape[0],-1) - predicted_class = model.predict(reshaped_features) - pred_prob = model.predict_proba(reshaped_features) - prediction_probability = pred_prob[0][predicted_class[0]] - predicted_class - - output_class= animal_breed[predicted_class[0]] - - return [output_class, prediction_probability] diff --git a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/ppg2mel/train/optim.py b/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/ppg2mel/train/optim.py deleted file mode 100644 index 62533b95864019df1eca855287cc0bcdb53745d4..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/ppg2mel/train/optim.py +++ /dev/null @@ -1,45 +0,0 @@ -import torch -import numpy as np - - -class Optimizer(): - def __init__(self, parameters, optimizer, lr, eps, lr_scheduler, - **kwargs): - - # Setup torch optimizer - self.opt_type = optimizer - self.init_lr = lr - self.sch_type = lr_scheduler - opt = getattr(torch.optim, optimizer) - if lr_scheduler == 'warmup': - warmup_step = 4000.0 - init_lr = lr - self.lr_scheduler = lambda step: init_lr * warmup_step ** 0.5 * \ - np.minimum((step+1)*warmup_step**-1.5, (step+1)**-0.5) - self.opt = opt(parameters, lr=1.0) - else: - self.lr_scheduler = None - self.opt = opt(parameters, lr=lr, eps=eps) # ToDo: 1e-8 better? - - def get_opt_state_dict(self): - return self.opt.state_dict() - - def load_opt_state_dict(self, state_dict): - self.opt.load_state_dict(state_dict) - - def pre_step(self, step): - if self.lr_scheduler is not None: - cur_lr = self.lr_scheduler(step) - for param_group in self.opt.param_groups: - param_group['lr'] = cur_lr - else: - cur_lr = self.init_lr - self.opt.zero_grad() - return cur_lr - - def step(self): - self.opt.step() - - def create_msg(self): - return ['Optim.Info.| Algo. = {}\t| Lr = {}\t (schedule = {})' - .format(self.opt_type, self.init_lr, self.sch_type)] diff --git a/spaces/Kimata/Sanskrit-TTS/indic_nlp_library/indicnlp/transliterate/script_unifier.py b/spaces/Kimata/Sanskrit-TTS/indic_nlp_library/indicnlp/transliterate/script_unifier.py deleted file mode 100644 index 20f39339ffb3178ea17785aba09eb620d108f330..0000000000000000000000000000000000000000 --- a/spaces/Kimata/Sanskrit-TTS/indic_nlp_library/indicnlp/transliterate/script_unifier.py +++ /dev/null @@ -1,157 +0,0 @@ -# -# Copyright (c) 2013-present, Anoop Kunchukuttan -# All rights reserved. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -# - -#Program for normalization of text written in Unicode. This is mainly geared towards Indic scripts -# -# @author Anoop Kunchukuttan -# - -import sys -from indicnlp.normalize import indic_normalize -from indicnlp.transliterate import unicode_transliterate -from indicnlp import loader - -class AggressiveScriptUnifier(): - - def __init__(self,common_lang='hi',nasals_mode='to_nasal_consonants'): - self.common_lang=common_lang - self.nasals_mode=nasals_mode - self.do_normalize_chandras=True - self.do_normalize_vowel_ending=True - self.remove_nuktas=True - self.normalizer_map={} - self._init_normalizers() - - def _init_normalizers(self): - normalizer_factory=indic_normalize.IndicNormalizerFactory() - - ## for languages with common parameters - for lang in ['hi','mr','sa','kK','ne','sd','bn','gu','ta','te','kn']: - self.normalizer_map[lang]=normalizer_factory.get_normalizer(lang, nasals_mode=self.nasals_mode, - do_normalize_chandras=self.do_normalize_chandras, remove_nuktas=self.remove_nuktas, - do_normalize_vowel_ending=self.do_normalize_vowel_ending) - - ## for languages with language specific parameters - self.normalizer_map['pa']=normalizer_factory.get_normalizer('pa', nasals_mode=self.nasals_mode, - do_normalize_chandras=self.do_normalize_chandras, remove_nuktas=self.remove_nuktas, - do_normalize_vowel_ending=self.do_normalize_vowel_ending, - do_canonicalize_addak=True, do_canonicalize_tippi=True, - do_replace_vowel_bases=True) - self.normalizer_map['or']=normalizer_factory.get_normalizer('or', nasals_mode=self.nasals_mode, - do_normalize_chandras=self.do_normalize_chandras, remove_nuktas=self.remove_nuktas, - do_normalize_vowel_ending=self.do_normalize_vowel_ending, - do_remap_wa=True) - self.normalizer_map['as']=normalizer_factory.get_normalizer('as', nasals_mode=self.nasals_mode, - do_normalize_chandras=self.do_normalize_chandras, remove_nuktas=self.remove_nuktas, - do_normalize_vowel_ending=self.do_normalize_vowel_ending, - do_remap_assamese_chars=True) - self.normalizer_map['ml']=normalizer_factory.get_normalizer('ml', nasals_mode=self.nasals_mode, - do_normalize_chandras=self.do_normalize_chandras, remove_nuktas=self.remove_nuktas, - do_normalize_vowel_ending=self.do_normalize_vowel_ending, - do_canonicalize_chillus=True, do_correct_geminated_T=True) - - def transform(self,text,lang): - text=self.normalizer_map[lang].normalize(text) - text=unicode_transliterate.UnicodeIndicTransliterator.transliterate(text, lang, self.common_lang) - return text - -class BasicScriptUnifier(): - - def __init__(self,common_lang='hi',nasals_mode='do_nothing'): - self.common_lang=common_lang - self.nasals_mode=nasals_mode - self.normalizer_map={} - self._init_normalizers() - - def _init_normalizers(self): - normalizer_factory=indic_normalize.IndicNormalizerFactory() - - for lang in ['hi','mr','sa','kK','ne','sd','bn','gu','ta','te','kn','pa','or','as','ml']: - self.normalizer_map[lang]=normalizer_factory.get_normalizer(lang, nasals_mode=self.nasals_mode) - - def transform(self,text,lang): - - if lang in self.normalizer_map: - text=self.normalizer_map[lang].normalize(text) - - text=unicode_transliterate.UnicodeIndicTransliterator.transliterate(text, lang, self.common_lang) - return text - -class NaiveScriptUnifier(): - - def __init__(self,common_lang='hi'): - self.common_lang=common_lang - - def transform(self,text,lang): - - text=unicode_transliterate.UnicodeIndicTransliterator.transliterate(text, lang, self.common_lang) - return text - -if __name__ == '__main__': - - loader.load() - - if len(sys.argv)<=4: - print("Usage: python script_unifier ") - sys.exit(1) - - if sys.argv[1]=='aggressive': - - language=sys.argv[4] - - unifier=AggressiveScriptUnifier(nasals_mode='to_nasal_consonants') - - with open(sys.argv[2],'r',encoding='utf-8') as ifile: - with open(sys.argv[3],'w',encoding='utf-8') as ofile: - for i, line in enumerate(ifile.readlines()): - - line=line.strip() - transliterated_line=unifier.transform(line,language) - ofile.write(transliterated_line+'\n') - - elif sys.argv[1]=='moderate': - - language=sys.argv[4] - - unifier=AggressiveScriptUnifier(nasals_mode='do_nothing') - - with open(sys.argv[2],'r',encoding='utf-8') as ifile: - with open(sys.argv[3],'w',encoding='utf-8') as ofile: - for i, line in enumerate(ifile.readlines()): - - line=line.strip() - transliterated_line=unifier.transform(line,language) - ofile.write(transliterated_line+'\n') - - elif sys.argv[1]=='basic': - - language=sys.argv[4] - - unifier=BasicScriptUnifier() - - with open(sys.argv[2],'r',encoding='utf-8') as ifile: - with open(sys.argv[3],'w',encoding='utf-8') as ofile: - for i, line in enumerate(ifile.readlines()): - - line=line.strip() - transliterated_line=unifier.transform(line,language) - ofile.write(transliterated_line+'\n') - - elif sys.argv[1]=='naive': - - language=sys.argv[4] - - unifier=NaiveScriptUnifier() - - with open(sys.argv[2],'r',encoding='utf-8') as ifile: - with open(sys.argv[3],'w',encoding='utf-8') as ofile: - for i, line in enumerate(ifile.readlines()): - - line=line.strip() - transliterated_line=unifier.transform(line,language) - ofile.write(transliterated_line+'\n') diff --git a/spaces/KyanChen/RSPrompter/mmdet/engine/hooks/set_epoch_info_hook.py b/spaces/KyanChen/RSPrompter/mmdet/engine/hooks/set_epoch_info_hook.py deleted file mode 100644 index 183f3167445dc0818e4fa37bdd2049d3876ed031..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/engine/hooks/set_epoch_info_hook.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmengine.hooks import Hook -from mmengine.model.wrappers import is_model_wrapper - -from mmdet.registry import HOOKS - - -@HOOKS.register_module() -class SetEpochInfoHook(Hook): - """Set runner's epoch information to the model.""" - - def before_train_epoch(self, runner): - epoch = runner.epoch - model = runner.model - if is_model_wrapper(model): - model = model.module - model.set_epoch(epoch) diff --git a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/infer/infer_libs/infer_pack/models_onnx.py b/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/infer/infer_libs/infer_pack/models_onnx.py deleted file mode 100644 index 2b06555fafd8e64826844ecf4ee9e15b94fcec6a..0000000000000000000000000000000000000000 --- a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/infer/infer_libs/infer_pack/models_onnx.py +++ /dev/null @@ -1,824 +0,0 @@ -import math -import logging - -logger = logging.getLogger(__name__) - -import numpy as np -import torch -from torch import nn -from torch.nn import Conv1d, Conv2d, ConvTranspose1d -from torch.nn import functional as F -from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm - -from lib.infer.infer_libs.infer_pack import attentions, commons, modules -from lib.infer.infer_libs.infer_pack.commons import get_padding, init_weights - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder768(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(768, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMsNSFsidM(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - version, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - if version == "v1": - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - else: - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - self.speaker_map = None - logger.debug( - "gin_channels: " - + gin_channels - + ", self.spk_embed_dim: " - + self.spk_embed_dim - ) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def construct_spkmixmap(self, n_speaker): - self.speaker_map = torch.zeros((n_speaker, 1, 1, self.gin_channels)) - for i in range(n_speaker): - self.speaker_map[i] = self.emb_g(torch.LongTensor([[i]])) - self.speaker_map = self.speaker_map.unsqueeze(0) - - def forward(self, phone, phone_lengths, pitch, nsff0, g, rnd, max_len=None): - if self.speaker_map is not None: # [N, S] * [S, B, 1, H] - g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1)) # [N, S, B, 1, 1] - g = g * self.speaker_map # [N, S, B, 1, H] - g = torch.sum(g, dim=1) # [N, 1, B, 1, H] - g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N] - else: - g = g.unsqueeze(0) - g = self.emb_g(g).transpose(1, 2) - - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class MultiPeriodDiscriminatorV2(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminatorV2, self).__init__() - # periods = [2, 3, 5, 7, 11, 17] - periods = [2, 3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/Marshalls/testmtd/models/ddc_model.py b/spaces/Marshalls/testmtd/models/ddc_model.py deleted file mode 100644 index abe5992b40d42ffc0a1b21cbe8fd2237593ab373..0000000000000000000000000000000000000000 --- a/spaces/Marshalls/testmtd/models/ddc_model.py +++ /dev/null @@ -1,212 +0,0 @@ -from .base_model import BaseModel -import torch.nn.functional as F -from torch import nn -import torch -#from models import constants -import numpy as np -import os - -class DDCModel(nn.Module): -#class DDCModel(BaseModel): - - def __init__(self, opt): - super().__init__() - #super().__init__(opt) - self.opt = opt - self.loss_names = ['ce', 'humaneness_reg', 'total'] - self.metric_names = ['accuracy'] - self.module_names = [''] # changed from 'model_names' - self.schedulers = [] - self.net = DDCNet(opt) - self.optimizers = [torch.optim.Adam([ - {'params': [param for name, param in self.net.named_parameters() if name[-4:] == 'bias'], - 'lr': 2 * opt.learning_rate}, # bias parameters change quicker - no weight decay is applied - {'params': [param for name, param in self.net.named_parameters() if name[-4:] != 'bias'], - 'lr': opt.learning_rate, 'weight_decay': opt.weight_decay} # filter parameters have weight decay - ])] - self.loss_ce = None - self.humaneness_reg = None - self.save_dir=opt.checkpoints_dir+"/block_placement_ddc2" - self.device="cpu" - - def name(self): - return "DDCNet" - - def load_networks(self, epoch): - for name in self.module_names: - if isinstance(name, str): - load_filename = '%s_net_%s.pth' % (epoch, name) - load_path = os.path.join(self.save_dir, load_filename) - net = getattr(self, 'net' + name) - if isinstance(net, torch.nn.DataParallel): - net = net.module - print('loading the model from %s' % load_path) - # if you are using PyTorch newer than 0.4 (e.g., built from - # GitHub source), you can remove str() on self.device - state_dict = torch.load(load_path, map_location=str(self.device)) - if hasattr(state_dict, '_metadata'): - del state_dict._metadata - - # patch InstanceNorm checkpoints prior to 0.4 - #for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop - # self.__patch_instance_norm_state_dict(state_dict, net, key.split('.')) - # if not self.opt.gpu_ids: - # state_dict = {key[6:]: value for key, value in - # state_dict.items()} # remove data_parallel's "module." - net.load_state_dict(state_dict) - - @staticmethod - def modify_commandline_options(parser, is_train): - # parser.add_argument('--num_classes', type=int, default=20) - # parser.add_argument('--output_channels', type=int, default=(4*3)) - # parser.add_argument('--kernel_size', type=int, default=2) - # parser.add_argument('--bias', action='store_false') - parser.add_argument('--entropy_loss_coeff', type=float, default=0.0) - parser.add_argument('--humaneness_reg_coeff', type=float, default=0.0) - parser.add_argument('--hidden_dim', type=int, default=512) - parser.add_argument('--num_classes', type=int, default=2) - parser.add_argument('--dropout', type=float, default=0.0) - return parser - - def set_input(self, data): - # move multiple samples of the same song to the second dimension and the reshape to batch dimension - input_ = data['input'] - target_ = data['target'] - input_shape = input_.shape - target_shape = target_.shape - # 0 batch dimension, 1 window dimension, 2 context time dimension, 3 frequency dimension, 4 mel_window_size dimension, 5 time dimension - self.input = input_.reshape((input_shape[0]*input_shape[1], input_shape[2], input_shape[3], input_shape[4], input_shape[5])).to(self.device) - self.input = self.input.permute(0,4,1,2,3) # batch/window x time x temporal_context x frequency_features x mel_window_sizes - #we collapse all the dimensions of target_ because that is the same way the output of the network is being processed for the cross entropy calculation (see self.forward) - # here, 0 is the batch dimension, 1 is the window index, 2 is the time dimension, 3 is the output channel dimension - self.target = target_.reshape((target_shape[0]*target_shape[1]*target_shape[2]*target_shape[3])).to(self.device) - - def forward(self): - self.output = self.net.forward(self.input) - x = self.output - [n, l , classes] = x.size() - x = x.view(n * l, classes) - - # print(x) - self.loss_ce = F.cross_entropy(x, self.target) - if self.opt.entropy_loss_coeff > 0: - S = F.softmax(x, dim=1) * F.log_softmax(x, dim=1) - S = -1.0 * S.mean() - self.loss_ce += self.opt.entropy_loss_coeff * S - self.metric_accuracy = (torch.argmax(x,1) == self.target).sum().float()/len(self.target) - - #TODO: implement humaneness_reg maybe - # problem is we don't have past notes available in input, so need to do that differently - # just use output I guess :P - # step_size = self.opt.step_size - # humaneness_delta = constants.HUMAN_DELTA - # window_size = int(humaneness_delta/step_size) - # - # receptive_field = self.net.module.receptive_field - # notes = (torch.argmax(input[:,-5:,receptive_field//2-(window_size):receptive_field//2],1)==4).float() - # distance_factor = torch.tensor(np.exp(-2*np.arange(window_size,0,-1)/window_size)).float().cuda() - # if self.opt.entropy_loss_coeff > 0: - # weights = torch.tensordot(notes,distance_factor,dims=1) - # humaneness_reg = F.cross_entropy(x,torch.zeros(weights.shape).long().cuda(), reduction='none') - # humaneness_reg = torch.dot(humaneness_reg, weights) - # self.loss_humaneness_reg = humaneness_reg - # # self.loss_humaneness_reg = 0 - # self.loss_total = self.loss_ce + self.opt.humaneness_reg_coeff * self.loss_humaneness_reg - # else: - # self.loss_humaneness_reg = 0 - # self.loss_total = self.loss_ce - self.loss_humaneness_reg = 0 - self.loss_total = self.loss_ce - - def backward(self): - self.optimizers[0].zero_grad() - self.loss_total.backward() - self.optimizers[0].step() - - def optimize_parameters(self): - self.set_requires_grad(self.net, requires_grad=True) - self.forward() - self.backward() - for scheduler in self.schedulers: - # step for schedulers that update after each iteration - try: - scheduler.batch_step() - except AttributeError: - pass - - def prepare_input(self,y): - # dimensions of y are: features x window_sizes x time - receptive_field = 1 - input_length = y.shape[-1] - y = np.concatenate((np.zeros((y.shape[0],y.shape[1],receptive_field+self.opt.time_shifts//2)),y),2) - # we also pad at the end to allow generation to be of the same length of song, by padding an amount corresponding to time_shifts - y = np.concatenate((y,np.zeros((y.shape[0],y.shape[1],self.opt.time_shifts//2))),2) - input_windowss = [] - time_shifts = self.opt.time_shifts - 1 - # loop that gets the input features for each of the windows, shifted by `ii`, and saves them in `input_windowss` - for ii in range(-time_shifts//2, time_shifts//2+1): - input_windows = [y[:,:,self.opt.time_shifts//2+ii:self.opt.time_shifts//2+ii+input_length]] - input_windows = torch.tensor(input_windows) - input_windows = (input_windows - input_windows.mean())/torch.abs(input_windows).max() - # input_windows = (input_windows.permute(3,0,1,2) - input_windows.mean(-1)).permute(1,2,3,0) - input_windowss.append(input_windows.float()) - input = torch.stack(input_windowss,dim=1).float() - input_shape = input.shape - input = input.to(self.device) - input = input.permute(0,4,1,2,3) # batch/window x time x temporal_context x frequency_features x mel_window_sizes - return input - - def generate(self,y): - input = self.prepare_input(y) - if self.opt.cuda: - with torch.no_grad(): - self.net.module.eval() - return F.softmax(self.net.module.forward(input)[0],2) - else: - with torch.no_grad(): - self.net.eval() - return F.softmax(self.net.forward(input)[0],2) - - def generate_features(self,y): - input = self.prepare_input(y) - if self.opt.cuda: - with torch.no_grad(): - self.net.module.eval() - logits, h = self.net.module.forward(input) - return h, F.softmax(logits,2) - else: - with torch.no_grad(): - self.net.eval() - logits, h = self.net.forward(input) - return h, F.softmax(logits,2) - - -class DDCNet(nn.Module): - def __init__(self,opt): - super(DDCNet, self).__init__() - self.conv1 = nn.Conv2d(3, 20, (7,3)) #assumes CHW format - # self.pool = nn.MaxPool1d(3, 3) - self.pool = nn.MaxPool2d((1,3), (1,3)) - self.conv2 = nn.Conv2d(20, 20, 3) - # self.fc1 = nn.Linear(20 * 9, 256) - # self.fc2 = nn.Linear(256, 128) - self.lstm = nn.LSTM(input_size=20*7*8, hidden_size=opt.hidden_dim, num_layers=2, batch_first=True) # Define the LSTM - self.hidden_to_state = nn.Linear(opt.hidden_dim, - opt.num_classes) - - def forward(self, x): - # batch/window x time x temporal_context x frequency_features x mel_window_sizes - # print(x.shape) - [N,L,deltaT,dim,winsizes] = x.shape - x = x.reshape(N*L,deltaT,dim,winsizes) - x = x.permute(0,3,1,2) - x = self.pool(F.relu(self.conv1(x))) - x = self.pool(F.relu(self.conv2(x))) - # print(x.shape) - x = x.reshape(N,L,20*7*8) # batch x time x CNN_features - # x = F.relu(self.fc1(x)) - # x = F.relu(self.fc2(x)) - lstm_out, _ = self.lstm(x) - logits = self.hidden_to_state(lstm_out) - # print(logits.shape) - return logits, lstm_out diff --git a/spaces/MetaWabbit/Auto-GPT/run_continuous.sh b/spaces/MetaWabbit/Auto-GPT/run_continuous.sh deleted file mode 100644 index 1f4436c88503172c0578b15a8447ed8268502578..0000000000000000000000000000000000000000 --- a/spaces/MetaWabbit/Auto-GPT/run_continuous.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -./run.sh --continuous $@ diff --git a/spaces/MirageML/sjc/guided_diffusion/nn.py b/spaces/MirageML/sjc/guided_diffusion/nn.py deleted file mode 100644 index a4cd59c2324b003626b8cf4c7581effd334908d3..0000000000000000000000000000000000000000 --- a/spaces/MirageML/sjc/guided_diffusion/nn.py +++ /dev/null @@ -1,170 +0,0 @@ -""" -Various utilities for neural networks. -""" - -import math - -import torch as th -import torch.nn as nn - - -# PyTorch 1.7 has SiLU, but we support PyTorch 1.5. -class SiLU(nn.Module): - def forward(self, x): - return x * th.sigmoid(x) - - -class GroupNorm32(nn.GroupNorm): - def forward(self, x): - return super().forward(x.float()).type(x.dtype) - - -def conv_nd(dims, *args, **kwargs): - """ - Create a 1D, 2D, or 3D convolution module. - """ - if dims == 1: - return nn.Conv1d(*args, **kwargs) - elif dims == 2: - return nn.Conv2d(*args, **kwargs) - elif dims == 3: - return nn.Conv3d(*args, **kwargs) - raise ValueError(f"unsupported dimensions: {dims}") - - -def linear(*args, **kwargs): - """ - Create a linear module. - """ - return nn.Linear(*args, **kwargs) - - -def avg_pool_nd(dims, *args, **kwargs): - """ - Create a 1D, 2D, or 3D average pooling module. - """ - if dims == 1: - return nn.AvgPool1d(*args, **kwargs) - elif dims == 2: - return nn.AvgPool2d(*args, **kwargs) - elif dims == 3: - return nn.AvgPool3d(*args, **kwargs) - raise ValueError(f"unsupported dimensions: {dims}") - - -def update_ema(target_params, source_params, rate=0.99): - """ - Update target parameters to be closer to those of source parameters using - an exponential moving average. - - :param target_params: the target parameter sequence. - :param source_params: the source parameter sequence. - :param rate: the EMA rate (closer to 1 means slower). - """ - for targ, src in zip(target_params, source_params): - targ.detach().mul_(rate).add_(src, alpha=1 - rate) - - -def zero_module(module): - """ - Zero out the parameters of a module and return it. - """ - for p in module.parameters(): - p.detach().zero_() - return module - - -def scale_module(module, scale): - """ - Scale the parameters of a module and return it. - """ - for p in module.parameters(): - p.detach().mul_(scale) - return module - - -def mean_flat(tensor): - """ - Take the mean over all non-batch dimensions. - """ - return tensor.mean(dim=list(range(1, len(tensor.shape)))) - - -def normalization(channels): - """ - Make a standard normalization layer. - - :param channels: number of input channels. - :return: an nn.Module for normalization. - """ - return GroupNorm32(32, channels) - - -def timestep_embedding(timesteps, dim, max_period=10000): - """ - Create sinusoidal timestep embeddings. - - :param timesteps: a 1-D Tensor of N indices, one per batch element. - These may be fractional. - :param dim: the dimension of the output. - :param max_period: controls the minimum frequency of the embeddings. - :return: an [N x dim] Tensor of positional embeddings. - """ - half = dim // 2 - freqs = th.exp( - -math.log(max_period) * th.arange(start=0, end=half, dtype=th.float32) / half - ).to(device=timesteps.device) - args = timesteps[:, None].float() * freqs[None] - embedding = th.cat([th.cos(args), th.sin(args)], dim=-1) - if dim % 2: - embedding = th.cat([embedding, th.zeros_like(embedding[:, :1])], dim=-1) - return embedding - - -def checkpoint(func, inputs, params, flag): - """ - Evaluate a function without caching intermediate activations, allowing for - reduced memory at the expense of extra compute in the backward pass. - - :param func: the function to evaluate. - :param inputs: the argument sequence to pass to `func`. - :param params: a sequence of parameters `func` depends on but does not - explicitly take as arguments. - :param flag: if False, disable gradient checkpointing. - """ - if flag: - args = tuple(inputs) + tuple(params) - return CheckpointFunction.apply(func, len(inputs), *args) - else: - return func(*inputs) - - -class CheckpointFunction(th.autograd.Function): - @staticmethod - def forward(ctx, run_function, length, *args): - ctx.run_function = run_function - ctx.input_tensors = list(args[:length]) - ctx.input_params = list(args[length:]) - with th.no_grad(): - output_tensors = ctx.run_function(*ctx.input_tensors) - return output_tensors - - @staticmethod - def backward(ctx, *output_grads): - ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors] - with th.enable_grad(): - # Fixes a bug where the first op in run_function modifies the - # Tensor storage in place, which is not allowed for detach()'d - # Tensors. - shallow_copies = [x.view_as(x) for x in ctx.input_tensors] - output_tensors = ctx.run_function(*shallow_copies) - input_grads = th.autograd.grad( - output_tensors, - ctx.input_tensors + ctx.input_params, - output_grads, - allow_unused=True, - ) - del ctx.input_tensors - del ctx.input_params - del output_tensors - return (None, None) + input_grads diff --git a/spaces/MirageML/sjc/sd1/ldm/modules/image_degradation/bsrgan_light.py b/spaces/MirageML/sjc/sd1/ldm/modules/image_degradation/bsrgan_light.py deleted file mode 100644 index 9e1f823996bf559e9b015ea9aa2b3cd38dd13af1..0000000000000000000000000000000000000000 --- a/spaces/MirageML/sjc/sd1/ldm/modules/image_degradation/bsrgan_light.py +++ /dev/null @@ -1,650 +0,0 @@ -# -*- coding: utf-8 -*- -import numpy as np -import cv2 -import torch - -from functools import partial -import random -from scipy import ndimage -import scipy -import scipy.stats as ss -from scipy.interpolate import interp2d -from scipy.linalg import orth -import albumentations - -import ldm.modules.image_degradation.utils_image as util - -""" -# -------------------------------------------- -# Super-Resolution -# -------------------------------------------- -# -# Kai Zhang (cskaizhang@gmail.com) -# https://github.com/cszn -# From 2019/03--2021/08 -# -------------------------------------------- -""" - - -def modcrop_np(img, sf): - ''' - Args: - img: numpy image, WxH or WxHxC - sf: scale factor - Return: - cropped image - ''' - w, h = img.shape[:2] - im = np.copy(img) - return im[:w - w % sf, :h - h % sf, ...] - - -""" -# -------------------------------------------- -# anisotropic Gaussian kernels -# -------------------------------------------- -""" - - -def analytic_kernel(k): - """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)""" - k_size = k.shape[0] - # Calculate the big kernels size - big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2)) - # Loop over the small kernel to fill the big one - for r in range(k_size): - for c in range(k_size): - big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k - # Crop the edges of the big kernel to ignore very small values and increase run time of SR - crop = k_size // 2 - cropped_big_k = big_k[crop:-crop, crop:-crop] - # Normalize to 1 - return cropped_big_k / cropped_big_k.sum() - - -def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): - """ generate an anisotropic Gaussian kernel - Args: - ksize : e.g., 15, kernel size - theta : [0, pi], rotation angle range - l1 : [0.1,50], scaling of eigenvalues - l2 : [0.1,l1], scaling of eigenvalues - If l1 = l2, will get an isotropic Gaussian kernel. - Returns: - k : kernel - """ - - v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.])) - V = np.array([[v[0], v[1]], [v[1], -v[0]]]) - D = np.array([[l1, 0], [0, l2]]) - Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) - k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) - - return k - - -def gm_blur_kernel(mean, cov, size=15): - center = size / 2.0 + 0.5 - k = np.zeros([size, size]) - for y in range(size): - for x in range(size): - cy = y - center + 1 - cx = x - center + 1 - k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov) - - k = k / np.sum(k) - return k - - -def shift_pixel(x, sf, upper_left=True): - """shift pixel for super-resolution with different scale factors - Args: - x: WxHxC or WxH - sf: scale factor - upper_left: shift direction - """ - h, w = x.shape[:2] - shift = (sf - 1) * 0.5 - xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0) - if upper_left: - x1 = xv + shift - y1 = yv + shift - else: - x1 = xv - shift - y1 = yv - shift - - x1 = np.clip(x1, 0, w - 1) - y1 = np.clip(y1, 0, h - 1) - - if x.ndim == 2: - x = interp2d(xv, yv, x)(x1, y1) - if x.ndim == 3: - for i in range(x.shape[-1]): - x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1) - - return x - - -def blur(x, k): - ''' - x: image, NxcxHxW - k: kernel, Nx1xhxw - ''' - n, c = x.shape[:2] - p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 - x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate') - k = k.repeat(1, c, 1, 1) - k = k.view(-1, 1, k.shape[2], k.shape[3]) - x = x.view(1, -1, x.shape[2], x.shape[3]) - x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) - x = x.view(n, c, x.shape[2], x.shape[3]) - - return x - - -def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0): - """" - # modified version of https://github.com/assafshocher/BlindSR_dataset_generator - # Kai Zhang - # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var - # max_var = 2.5 * sf - """ - # Set random eigen-vals (lambdas) and angle (theta) for COV matrix - lambda_1 = min_var + np.random.rand() * (max_var - min_var) - lambda_2 = min_var + np.random.rand() * (max_var - min_var) - theta = np.random.rand() * np.pi # random theta - noise = -noise_level + np.random.rand(*k_size) * noise_level * 2 - - # Set COV matrix using Lambdas and Theta - LAMBDA = np.diag([lambda_1, lambda_2]) - Q = np.array([[np.cos(theta), -np.sin(theta)], - [np.sin(theta), np.cos(theta)]]) - SIGMA = Q @ LAMBDA @ Q.T - INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] - - # Set expectation position (shifting kernel for aligned image) - MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2) - MU = MU[None, None, :, None] - - # Create meshgrid for Gaussian - [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1])) - Z = np.stack([X, Y], 2)[:, :, :, None] - - # Calcualte Gaussian for every pixel of the kernel - ZZ = Z - MU - ZZ_t = ZZ.transpose(0, 1, 3, 2) - raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise) - - # shift the kernel so it will be centered - # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor) - - # Normalize the kernel and return - # kernel = raw_kernel_centered / np.sum(raw_kernel_centered) - kernel = raw_kernel / np.sum(raw_kernel) - return kernel - - -def fspecial_gaussian(hsize, sigma): - hsize = [hsize, hsize] - siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0] - std = sigma - [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1)) - arg = -(x * x + y * y) / (2 * std * std) - h = np.exp(arg) - h[h < scipy.finfo(float).eps * h.max()] = 0 - sumh = h.sum() - if sumh != 0: - h = h / sumh - return h - - -def fspecial_laplacian(alpha): - alpha = max([0, min([alpha, 1])]) - h1 = alpha / (alpha + 1) - h2 = (1 - alpha) / (alpha + 1) - h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]] - h = np.array(h) - return h - - -def fspecial(filter_type, *args, **kwargs): - ''' - python code from: - https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py - ''' - if filter_type == 'gaussian': - return fspecial_gaussian(*args, **kwargs) - if filter_type == 'laplacian': - return fspecial_laplacian(*args, **kwargs) - - -""" -# -------------------------------------------- -# degradation models -# -------------------------------------------- -""" - - -def bicubic_degradation(x, sf=3): - ''' - Args: - x: HxWxC image, [0, 1] - sf: down-scale factor - Return: - bicubicly downsampled LR image - ''' - x = util.imresize_np(x, scale=1 / sf) - return x - - -def srmd_degradation(x, k, sf=3): - ''' blur + bicubic downsampling - Args: - x: HxWxC image, [0, 1] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - Reference: - @inproceedings{zhang2018learning, - title={Learning a single convolutional super-resolution network for multiple degradations}, - author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - pages={3262--3271}, - year={2018} - } - ''' - x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror' - x = bicubic_degradation(x, sf=sf) - return x - - -def dpsr_degradation(x, k, sf=3): - ''' bicubic downsampling + blur - Args: - x: HxWxC image, [0, 1] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - Reference: - @inproceedings{zhang2019deep, - title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, - author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - pages={1671--1681}, - year={2019} - } - ''' - x = bicubic_degradation(x, sf=sf) - x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') - return x - - -def classical_degradation(x, k, sf=3): - ''' blur + downsampling - Args: - x: HxWxC image, [0, 1]/[0, 255] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - ''' - x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') - # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2)) - st = 0 - return x[st::sf, st::sf, ...] - - -def add_sharpening(img, weight=0.5, radius=50, threshold=10): - """USM sharpening. borrowed from real-ESRGAN - Input image: I; Blurry image: B. - 1. K = I + weight * (I - B) - 2. Mask = 1 if abs(I - B) > threshold, else: 0 - 3. Blur mask: - 4. Out = Mask * K + (1 - Mask) * I - Args: - img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. - weight (float): Sharp weight. Default: 1. - radius (float): Kernel size of Gaussian blur. Default: 50. - threshold (int): - """ - if radius % 2 == 0: - radius += 1 - blur = cv2.GaussianBlur(img, (radius, radius), 0) - residual = img - blur - mask = np.abs(residual) * 255 > threshold - mask = mask.astype('float32') - soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) - - K = img + weight * residual - K = np.clip(K, 0, 1) - return soft_mask * K + (1 - soft_mask) * img - - -def add_blur(img, sf=4): - wd2 = 4.0 + sf - wd = 2.0 + 0.2 * sf - - wd2 = wd2/4 - wd = wd/4 - - if random.random() < 0.5: - l1 = wd2 * random.random() - l2 = wd2 * random.random() - k = anisotropic_Gaussian(ksize=random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2) - else: - k = fspecial('gaussian', random.randint(2, 4) + 3, wd * random.random()) - img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror') - - return img - - -def add_resize(img, sf=4): - rnum = np.random.rand() - if rnum > 0.8: # up - sf1 = random.uniform(1, 2) - elif rnum < 0.7: # down - sf1 = random.uniform(0.5 / sf, 1) - else: - sf1 = 1.0 - img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3])) - img = np.clip(img, 0.0, 1.0) - - return img - - -# def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): -# noise_level = random.randint(noise_level1, noise_level2) -# rnum = np.random.rand() -# if rnum > 0.6: # add color Gaussian noise -# img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) -# elif rnum < 0.4: # add grayscale Gaussian noise -# img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) -# else: # add noise -# L = noise_level2 / 255. -# D = np.diag(np.random.rand(3)) -# U = orth(np.random.rand(3, 3)) -# conv = np.dot(np.dot(np.transpose(U), D), U) -# img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) -# img = np.clip(img, 0.0, 1.0) -# return img - -def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): - noise_level = random.randint(noise_level1, noise_level2) - rnum = np.random.rand() - if rnum > 0.6: # add color Gaussian noise - img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) - elif rnum < 0.4: # add grayscale Gaussian noise - img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) - else: # add noise - L = noise_level2 / 255. - D = np.diag(np.random.rand(3)) - U = orth(np.random.rand(3, 3)) - conv = np.dot(np.dot(np.transpose(U), D), U) - img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) - img = np.clip(img, 0.0, 1.0) - return img - - -def add_speckle_noise(img, noise_level1=2, noise_level2=25): - noise_level = random.randint(noise_level1, noise_level2) - img = np.clip(img, 0.0, 1.0) - rnum = random.random() - if rnum > 0.6: - img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) - elif rnum < 0.4: - img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) - else: - L = noise_level2 / 255. - D = np.diag(np.random.rand(3)) - U = orth(np.random.rand(3, 3)) - conv = np.dot(np.dot(np.transpose(U), D), U) - img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) - img = np.clip(img, 0.0, 1.0) - return img - - -def add_Poisson_noise(img): - img = np.clip((img * 255.0).round(), 0, 255) / 255. - vals = 10 ** (2 * random.random() + 2.0) # [2, 4] - if random.random() < 0.5: - img = np.random.poisson(img * vals).astype(np.float32) / vals - else: - img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114]) - img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255. - noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray - img += noise_gray[:, :, np.newaxis] - img = np.clip(img, 0.0, 1.0) - return img - - -def add_JPEG_noise(img): - quality_factor = random.randint(80, 95) - img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR) - result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor]) - img = cv2.imdecode(encimg, 1) - img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB) - return img - - -def random_crop(lq, hq, sf=4, lq_patchsize=64): - h, w = lq.shape[:2] - rnd_h = random.randint(0, h - lq_patchsize) - rnd_w = random.randint(0, w - lq_patchsize) - lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :] - - rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf) - hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :] - return lq, hq - - -def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): - """ - This is the degradation model of BSRGAN from the paper - "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" - ---------- - img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) - sf: scale factor - isp_model: camera ISP model - Returns - ------- - img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] - hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] - """ - isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 - sf_ori = sf - - h1, w1 = img.shape[:2] - img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop - h, w = img.shape[:2] - - if h < lq_patchsize * sf or w < lq_patchsize * sf: - raise ValueError(f'img size ({h1}X{w1}) is too small!') - - hq = img.copy() - - if sf == 4 and random.random() < scale2_prob: # downsample1 - if np.random.rand() < 0.5: - img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - img = util.imresize_np(img, 1 / 2, True) - img = np.clip(img, 0.0, 1.0) - sf = 2 - - shuffle_order = random.sample(range(7), 7) - idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) - if idx1 > idx2: # keep downsample3 last - shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] - - for i in shuffle_order: - - if i == 0: - img = add_blur(img, sf=sf) - - elif i == 1: - img = add_blur(img, sf=sf) - - elif i == 2: - a, b = img.shape[1], img.shape[0] - # downsample2 - if random.random() < 0.75: - sf1 = random.uniform(1, 2 * sf) - img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) - k_shifted = shift_pixel(k, sf) - k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel - img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror') - img = img[0::sf, 0::sf, ...] # nearest downsampling - img = np.clip(img, 0.0, 1.0) - - elif i == 3: - # downsample3 - img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) - img = np.clip(img, 0.0, 1.0) - - elif i == 4: - # add Gaussian noise - img = add_Gaussian_noise(img, noise_level1=2, noise_level2=8) - - elif i == 5: - # add JPEG noise - if random.random() < jpeg_prob: - img = add_JPEG_noise(img) - - elif i == 6: - # add processed camera sensor noise - if random.random() < isp_prob and isp_model is not None: - with torch.no_grad(): - img, hq = isp_model.forward(img.copy(), hq) - - # add final JPEG compression noise - img = add_JPEG_noise(img) - - # random crop - img, hq = random_crop(img, hq, sf_ori, lq_patchsize) - - return img, hq - - -# todo no isp_model? -def degradation_bsrgan_variant(image, sf=4, isp_model=None): - """ - This is the degradation model of BSRGAN from the paper - "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" - ---------- - sf: scale factor - isp_model: camera ISP model - Returns - ------- - img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] - hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] - """ - image = util.uint2single(image) - isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 - sf_ori = sf - - h1, w1 = image.shape[:2] - image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop - h, w = image.shape[:2] - - hq = image.copy() - - if sf == 4 and random.random() < scale2_prob: # downsample1 - if np.random.rand() < 0.5: - image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - image = util.imresize_np(image, 1 / 2, True) - image = np.clip(image, 0.0, 1.0) - sf = 2 - - shuffle_order = random.sample(range(7), 7) - idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) - if idx1 > idx2: # keep downsample3 last - shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] - - for i in shuffle_order: - - if i == 0: - image = add_blur(image, sf=sf) - - # elif i == 1: - # image = add_blur(image, sf=sf) - - if i == 0: - pass - - elif i == 2: - a, b = image.shape[1], image.shape[0] - # downsample2 - if random.random() < 0.8: - sf1 = random.uniform(1, 2 * sf) - image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) - k_shifted = shift_pixel(k, sf) - k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel - image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror') - image = image[0::sf, 0::sf, ...] # nearest downsampling - - image = np.clip(image, 0.0, 1.0) - - elif i == 3: - # downsample3 - image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) - image = np.clip(image, 0.0, 1.0) - - elif i == 4: - # add Gaussian noise - image = add_Gaussian_noise(image, noise_level1=1, noise_level2=2) - - elif i == 5: - # add JPEG noise - if random.random() < jpeg_prob: - image = add_JPEG_noise(image) - # - # elif i == 6: - # # add processed camera sensor noise - # if random.random() < isp_prob and isp_model is not None: - # with torch.no_grad(): - # img, hq = isp_model.forward(img.copy(), hq) - - # add final JPEG compression noise - image = add_JPEG_noise(image) - image = util.single2uint(image) - example = {"image": image} - return example - - - - -if __name__ == '__main__': - print("hey") - img = util.imread_uint('utils/test.png', 3) - img = img[:448, :448] - h = img.shape[0] // 4 - print("resizing to", h) - sf = 4 - deg_fn = partial(degradation_bsrgan_variant, sf=sf) - for i in range(20): - print(i) - img_hq = img - img_lq = deg_fn(img)["image"] - img_hq, img_lq = util.uint2single(img_hq), util.uint2single(img_lq) - print(img_lq) - img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img_hq)["image"] - print(img_lq.shape) - print("bicubic", img_lq_bicubic.shape) - print(img_hq.shape) - lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), - interpolation=0) - lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic), - (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), - interpolation=0) - img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1) - util.imsave(img_concat, str(i) + '.png') diff --git a/spaces/Miuzarte/SUI-svc-4.0/inference/slicer.py b/spaces/Miuzarte/SUI-svc-4.0/inference/slicer.py deleted file mode 100644 index b05840bcf6bdced0b6e2adbecb1a1dd5b3dee462..0000000000000000000000000000000000000000 --- a/spaces/Miuzarte/SUI-svc-4.0/inference/slicer.py +++ /dev/null @@ -1,142 +0,0 @@ -import librosa -import torch -import torchaudio - - -class Slicer: - def __init__(self, - sr: int, - threshold: float = -40., - min_length: int = 5000, - min_interval: int = 300, - hop_size: int = 20, - max_sil_kept: int = 5000): - if not min_length >= min_interval >= hop_size: - raise ValueError('The following condition must be satisfied: min_length >= min_interval >= hop_size') - if not max_sil_kept >= hop_size: - raise ValueError('The following condition must be satisfied: max_sil_kept >= hop_size') - min_interval = sr * min_interval / 1000 - self.threshold = 10 ** (threshold / 20.) - self.hop_size = round(sr * hop_size / 1000) - self.win_size = min(round(min_interval), 4 * self.hop_size) - self.min_length = round(sr * min_length / 1000 / self.hop_size) - self.min_interval = round(min_interval / self.hop_size) - self.max_sil_kept = round(sr * max_sil_kept / 1000 / self.hop_size) - - def _apply_slice(self, waveform, begin, end): - if len(waveform.shape) > 1: - return waveform[:, begin * self.hop_size: min(waveform.shape[1], end * self.hop_size)] - else: - return waveform[begin * self.hop_size: min(waveform.shape[0], end * self.hop_size)] - - # @timeit - def slice(self, waveform): - if len(waveform.shape) > 1: - samples = librosa.to_mono(waveform) - else: - samples = waveform - if samples.shape[0] <= self.min_length: - return {"0": {"slice": False, "split_time": f"0,{len(waveform)}"}} - rms_list = librosa.feature.rms(y=samples, frame_length=self.win_size, hop_length=self.hop_size).squeeze(0) - sil_tags = [] - silence_start = None - clip_start = 0 - for i, rms in enumerate(rms_list): - # Keep looping while frame is silent. - if rms < self.threshold: - # Record start of silent frames. - if silence_start is None: - silence_start = i - continue - # Keep looping while frame is not silent and silence start has not been recorded. - if silence_start is None: - continue - # Clear recorded silence start if interval is not enough or clip is too short - is_leading_silence = silence_start == 0 and i > self.max_sil_kept - need_slice_middle = i - silence_start >= self.min_interval and i - clip_start >= self.min_length - if not is_leading_silence and not need_slice_middle: - silence_start = None - continue - # Need slicing. Record the range of silent frames to be removed. - if i - silence_start <= self.max_sil_kept: - pos = rms_list[silence_start: i + 1].argmin() + silence_start - if silence_start == 0: - sil_tags.append((0, pos)) - else: - sil_tags.append((pos, pos)) - clip_start = pos - elif i - silence_start <= self.max_sil_kept * 2: - pos = rms_list[i - self.max_sil_kept: silence_start + self.max_sil_kept + 1].argmin() - pos += i - self.max_sil_kept - pos_l = rms_list[silence_start: silence_start + self.max_sil_kept + 1].argmin() + silence_start - pos_r = rms_list[i - self.max_sil_kept: i + 1].argmin() + i - self.max_sil_kept - if silence_start == 0: - sil_tags.append((0, pos_r)) - clip_start = pos_r - else: - sil_tags.append((min(pos_l, pos), max(pos_r, pos))) - clip_start = max(pos_r, pos) - else: - pos_l = rms_list[silence_start: silence_start + self.max_sil_kept + 1].argmin() + silence_start - pos_r = rms_list[i - self.max_sil_kept: i + 1].argmin() + i - self.max_sil_kept - if silence_start == 0: - sil_tags.append((0, pos_r)) - else: - sil_tags.append((pos_l, pos_r)) - clip_start = pos_r - silence_start = None - # Deal with trailing silence. - total_frames = rms_list.shape[0] - if silence_start is not None and total_frames - silence_start >= self.min_interval: - silence_end = min(total_frames, silence_start + self.max_sil_kept) - pos = rms_list[silence_start: silence_end + 1].argmin() + silence_start - sil_tags.append((pos, total_frames + 1)) - # Apply and return slices. - if len(sil_tags) == 0: - return {"0": {"slice": False, "split_time": f"0,{len(waveform)}"}} - else: - chunks = [] - # 第一段静音并非从头开始,补上有声片段 - if sil_tags[0][0]: - chunks.append( - {"slice": False, "split_time": f"0,{min(waveform.shape[0], sil_tags[0][0] * self.hop_size)}"}) - for i in range(0, len(sil_tags)): - # 标识有声片段(跳过第一段) - if i: - chunks.append({"slice": False, - "split_time": f"{sil_tags[i - 1][1] * self.hop_size},{min(waveform.shape[0], sil_tags[i][0] * self.hop_size)}"}) - # 标识所有静音片段 - chunks.append({"slice": True, - "split_time": f"{sil_tags[i][0] * self.hop_size},{min(waveform.shape[0], sil_tags[i][1] * self.hop_size)}"}) - # 最后一段静音并非结尾,补上结尾片段 - if sil_tags[-1][1] * self.hop_size < len(waveform): - chunks.append({"slice": False, "split_time": f"{sil_tags[-1][1] * self.hop_size},{len(waveform)}"}) - chunk_dict = {} - for i in range(len(chunks)): - chunk_dict[str(i)] = chunks[i] - return chunk_dict - - -def cut(audio_path, db_thresh=-30, min_len=5000): - audio, sr = librosa.load(audio_path, sr=None) - slicer = Slicer( - sr=sr, - threshold=db_thresh, - min_length=min_len - ) - chunks = slicer.slice(audio) - return chunks - - -def chunks2audio(audio_path, chunks): - chunks = dict(chunks) - audio, sr = torchaudio.load(audio_path) - if len(audio.shape) == 2 and audio.shape[1] >= 2: - audio = torch.mean(audio, dim=0).unsqueeze(0) - audio = audio.cpu().numpy()[0] - result = [] - for k, v in chunks.items(): - tag = v["split_time"].split(",") - if tag[0] != tag[1]: - result.append((v["slice"], audio[int(tag[0]):int(tag[1])])) - return result, sr diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/decoders/aster_decoder.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/decoders/aster_decoder.py deleted file mode 100644 index 83e249b08c00acc06a7a31a5b5e44ba70ff3b712..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/decoders/aster_decoder.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Dict, Optional, Sequence, Tuple, Union - -import torch -import torch.nn as nn - -from mmocr.models.common.dictionary import Dictionary -from mmocr.registry import MODELS -from mmocr.structures import TextRecogDataSample -from .base import BaseDecoder - - -@MODELS.register_module() -class ASTERDecoder(BaseDecoder): - """Implement attention decoder. - - Args: - in_channels (int): Number of input channels. - emb_dims (int): Dims of char embedding. Defaults to 512. - attn_dims (int): Dims of attention. Both hidden states and features - will be projected to this dims. Defaults to 512. - hidden_size (int): Dims of hidden state for GRU. Defaults to 512. - dictionary (dict or :obj:`Dictionary`): The config for `Dictionary` or - the instance of `Dictionary`. Defaults to None. - max_seq_len (int): Maximum output sequence length :math:`T`. Defaults - to 25. - module_loss (dict, optional): Config to build loss. Defaults to None. - postprocessor (dict, optional): Config to build postprocessor. - Defaults to None. - init_cfg (dict or list[dict], optional): Initialization configs. - Defaults to None. - """ - - def __init__(self, - in_channels: int, - emb_dims: int = 512, - attn_dims: int = 512, - hidden_size: int = 512, - dictionary: Union[Dictionary, Dict] = None, - max_seq_len: int = 25, - module_loss: Dict = None, - postprocessor: Dict = None, - init_cfg=dict(type='Xavier', layer='Conv2d')): - super().__init__( - init_cfg=init_cfg, - dictionary=dictionary, - module_loss=module_loss, - postprocessor=postprocessor, - max_seq_len=max_seq_len) - - self.start_idx = self.dictionary.start_idx - self.num_classes = self.dictionary.num_classes - self.in_channels = in_channels - self.embedding_dim = emb_dims - self.att_dims = attn_dims - self.hidden_size = hidden_size - - # Projection layers - self.proj_feat = nn.Linear(in_channels, attn_dims) - self.proj_hidden = nn.Linear(hidden_size, attn_dims) - self.proj_sum = nn.Linear(attn_dims, 1) - - # Decoder input embedding - self.embedding = nn.Embedding(self.num_classes, self.att_dims) - - # GRU - self.gru = nn.GRU( - input_size=self.in_channels + self.embedding_dim, - hidden_size=self.hidden_size, - batch_first=True) - - # Prediction layer - self.fc = nn.Linear(hidden_size, self.dictionary.num_classes) - self.softmax = nn.Softmax(dim=-1) - - def _attention(self, feat: torch.Tensor, prev_hidden: torch.Tensor, - prev_char: torch.Tensor - ) -> Tuple[torch.Tensor, torch.Tensor]: - """Implement the attention mechanism. - - Args: - feat (Tensor): Feature map from encoder of shape :math:`(N, T, C)`. - prev_hidden (Tensor): Previous hidden state from GRU of shape - :math:`(1, N, self.hidden_size)`. - prev_char (Tensor): Previous predicted character of shape - :math:`(N, )`. - - Returns: - tuple(Tensor, Tensor): - - output (Tensor): Predicted character of current time step of - shape :math:`(N, 1)`. - - state (Tensor): Hidden state from GRU of current time step of - shape :math:`(N, self.hidden_size)`. - """ - # Calculate the attention weights - B, T, _ = feat.size() - feat_proj = self.proj_feat(feat) # [N, T, attn_dims] - hidden_proj = self.proj_hidden(prev_hidden) # [1, N, attn_dims] - hidden_proj = hidden_proj.squeeze(0).unsqueeze(1) # [N, 1, attn_dims] - hidden_proj = hidden_proj.expand(B, T, - self.att_dims) # [N, T, attn_dims] - - sum_tanh = torch.tanh(feat_proj + hidden_proj) # [N, T, attn_dims] - sum_proj = self.proj_sum(sum_tanh).squeeze(-1) # [N, T] - attn_weights = torch.softmax(sum_proj, dim=1) # [N, T] - - # GRU forward - context = torch.bmm(attn_weights.unsqueeze(1), feat).squeeze(1) - char_embed = self.embedding(prev_char.long()) # [N, emb_dims] - output, state = self.gru( - torch.cat([char_embed, context], 1).unsqueeze(1), prev_hidden) - output = output.squeeze(1) - output = self.fc(output) - return output, state - - def forward_train( - self, - feat: torch.Tensor = None, - out_enc: Optional[torch.Tensor] = None, - data_samples: Optional[Sequence[TextRecogDataSample]] = None - ) -> torch.Tensor: - """ - Args: - feat (Tensor): Feature from backbone. Unused in this decoder. - out_enc (torch.Tensor, optional): Encoder output. Defaults to None. - data_samples (list[TextRecogDataSample], optional): Batch of - TextRecogDataSample, containing gt_text information. Defaults - to None. - - Returns: - Tensor: The raw logit tensor. Shape :math:`(N, T, C)` where - :math:`C` is ``num_classes``. - """ - B = out_enc.shape[0] - state = torch.zeros(1, B, self.hidden_size).to(out_enc.device) - padded_targets = [ - data_sample.gt_text.padded_indexes for data_sample in data_samples - ] - padded_targets = torch.stack(padded_targets, dim=0).to(out_enc.device) - outputs = [] - for i in range(self.max_seq_len): - prev_char = padded_targets[:, i].to(out_enc.device) - output, state = self._attention(out_enc, state, prev_char) - outputs.append(output) - outputs = torch.cat([_.unsqueeze(1) for _ in outputs], 1) - return outputs - - def forward_test( - self, - feat: Optional[torch.Tensor] = None, - out_enc: Optional[torch.Tensor] = None, - data_samples: Optional[Sequence[TextRecogDataSample]] = None - ) -> torch.Tensor: - """ - Args: - feat (Tensor): Feature from backbone. Unused in this decoder. - out_enc (torch.Tensor, optional): Encoder output. Defaults to None. - data_samples (list[TextRecogDataSample], optional): Batch of - TextRecogDataSample, containing gt_text information. Defaults - to None. Unused in this decoder. - - Returns: - Tensor: The raw logit tensor. Shape :math:`(N, T, C)` where - :math:`C` is ``num_classes``. - """ - B = out_enc.shape[0] - predicted = [] - state = torch.zeros(1, B, self.hidden_size).to(out_enc.device) - outputs = [] - for i in range(self.max_seq_len): - if i == 0: - prev_char = torch.zeros(B).fill_(self.start_idx).to( - out_enc.device) - else: - prev_char = predicted - - output, state = self._attention(out_enc, state, prev_char) - outputs.append(output) - _, predicted = output.max(-1) - outputs = torch.cat([_.unsqueeze(1) for _ in outputs], 1) - return self.softmax(outputs) diff --git a/spaces/NATSpeech/PortaSpeech/inference/tts/gradio/infer.py b/spaces/NATSpeech/PortaSpeech/inference/tts/gradio/infer.py deleted file mode 100644 index faae06f1b6fb17167e698d07518b7b52d821d6c4..0000000000000000000000000000000000000000 --- a/spaces/NATSpeech/PortaSpeech/inference/tts/gradio/infer.py +++ /dev/null @@ -1,69 +0,0 @@ -import importlib -import re - -import gradio as gr -import yaml -from gradio.inputs import Textbox - -from inference.tts.base_tts_infer import BaseTTSInfer -from utils.commons.hparams import set_hparams -from utils.commons.hparams import hparams as hp -import numpy as np - -from utils.text.text_encoder import PUNCS - - -class GradioInfer: - def __init__(self, exp_name, inference_cls, title, description, article, example_inputs): - self.exp_name = exp_name - self.title = title - self.description = description - self.article = article - self.example_inputs = example_inputs - pkg = ".".join(inference_cls.split(".")[:-1]) - cls_name = inference_cls.split(".")[-1] - self.inference_cls = getattr(importlib.import_module(pkg), cls_name) - - def greet(self, text): - sents = re.split(rf'([{PUNCS}])', text.replace('\n', ',')) - if sents[-1] not in list(PUNCS): - sents = sents + ['.'] - audio_outs = [] - s = "" - for i in range(0, len(sents), 2): - if len(sents[i]) > 0: - s += sents[i] + sents[i + 1] - if len(s) >= 400 or (i >= len(sents) - 2 and len(s) > 0): - audio_out = self.infer_ins.infer_once({ - 'text': s - }) - audio_out = audio_out * 32767 - audio_out = audio_out.astype(np.int16) - audio_outs.append(audio_out) - audio_outs.append(np.zeros(int(hp['audio_sample_rate'] * 0.3)).astype(np.int16)) - s = "" - audio_outs = np.concatenate(audio_outs) - return hp['audio_sample_rate'], audio_outs - - def run(self): - set_hparams(exp_name=self.exp_name) - infer_cls = self.inference_cls - self.infer_ins: BaseTTSInfer = infer_cls(hp) - example_inputs = self.example_inputs - iface = gr.Interface(fn=self.greet, - inputs=Textbox( - lines=10, placeholder=None, default=example_inputs[0], label="input text"), - outputs="audio", - allow_flagging="never", - title=self.title, - description=self.description, - article=self.article, - examples=example_inputs, - enable_queue=True) - iface.launch(cache_examples=True) - - -if __name__ == '__main__': - gradio_config = yaml.safe_load(open('inference/tts/gradio/gradio_settings.yaml')) - g = GradioInfer(**gradio_config) - g.run() diff --git a/spaces/NN520/AI/src/components/providers.tsx b/spaces/NN520/AI/src/components/providers.tsx deleted file mode 100644 index 892226412d80fe0b05211911b9e245cd22876460..0000000000000000000000000000000000000000 --- a/spaces/NN520/AI/src/components/providers.tsx +++ /dev/null @@ -1,15 +0,0 @@ -'use client' - -import * as React from 'react' -import { ThemeProvider as NextThemesProvider } from 'next-themes' -import { ThemeProviderProps } from 'next-themes/dist/types' - -import { TooltipProvider } from '@/components/ui/tooltip' - -export function Providers({ children, ...props }: ThemeProviderProps) { - return ( - - {children} - - ) -} diff --git a/spaces/NemesisAlm/GeolocationCountryClassification/README.md b/spaces/NemesisAlm/GeolocationCountryClassification/README.md deleted file mode 100644 index 8c02424b89dd28db83102a4a060b308ebec626f7..0000000000000000000000000000000000000000 --- a/spaces/NemesisAlm/GeolocationCountryClassification/README.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: GeolocationCountryClassification -app_file: app.py -sdk: gradio -sdk_version: 3.41.2 ---- diff --git a/spaces/Niansuh/DALL-E/README.md b/spaces/Niansuh/DALL-E/README.md deleted file mode 100644 index fbbdcfaa627363412e2da593ab0546bfb9ae874c..0000000000000000000000000000000000000000 --- a/spaces/Niansuh/DALL-E/README.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -title: DALL·E -emoji: 🥑 -colorFrom: yellow -colorTo: green -sdk: static -pinned: false -license: apache-2.0 ---- \ No newline at end of file diff --git a/spaces/NicolasVana/image-captioning/app.py b/spaces/NicolasVana/image-captioning/app.py deleted file mode 100644 index efa955ad4b2499e449c2ca3be733a5a708d524a1..0000000000000000000000000000000000000000 --- a/spaces/NicolasVana/image-captioning/app.py +++ /dev/null @@ -1,92 +0,0 @@ -import streamlit as st -import requests -import io - - -# Designing the interface -st.title("Medical Image Captioning") - -st.sidebar.markdown( - """ - This project features 3 different Medical image captioning models. - Two of the use the InceptionV3 architecture to do feature extraction and then generate the captions using an LSTM model. - The difference between these two is that the first one uses InceptionV3 trained on ImageNet data and outputs 2048 features. - The second one is based on a retrained version of InceptionV3 that uses the CUI data from the ROCO dataset to extract 745 features from the images. - The final model is transformer based on... - """ -) - -with st.spinner('Loading objects ...'): - from model import * - -random_image_id = get_random_image_id() - -st.sidebar.title("Select a sample image") -sample_image_id = st.sidebar.selectbox( - "Please choose a sample image", - sample_image_ids -) - -st.sidebar.title("Select a model Type") -model_type = st.sidebar.selectbox( - "Please choose a model", - ['Pretrained Inception', 'Retrained Inception', 'Transformer'] -) - -inception, lstm = fetch_model(model_type) -word2Index, index2Word, variable_params = fetch_auxiliary_files(model_type) -max_len = variable_params['max_caption_len'] - -if st.sidebar.button("Random ROCO (test) images"): - random_image_id = get_random_image_id() - sample_image_id = "None" - -bytes_data = None -with st.sidebar.form("file-uploader-form", clear_on_submit=True): - uploaded_file = st.file_uploader("Choose a file") - submitted = st.form_submit_button("Upload") - if submitted and uploaded_file is not None: - bytes_data = io.BytesIO(uploaded_file.getvalue()) - -if (bytes_data is None) and submitted: - - st.write("No file is selected to upload") - -else: - - image_id = random_image_id - if sample_image_id != "None": - assert type(sample_image_id) == int - image_id = sample_image_id - - sample_name = f"ROCO_{str(image_id).zfill(5)}.jpg" - sample_path = os.path.join(sample_dir, sample_name) - - if bytes_data is not None: - image = Image.open(bytes_data) - elif os.path.isfile(sample_path): - image = Image.open(sample_path) - - width, height = 299, 299 - resized = image.resize(size=(width, height)) - - if bytes_data is None: - st.markdown(f"ROCO_{str(image_id).zfill(5)}.jpg") - show = st.image(resized) - show.image(resized, '\n\nSelected Image') - - # For newline - st.sidebar.write('\n') - - with st.spinner('Generating image caption ...'): - st.header(f'Predicted caption:\n\n') - - preprocessed_img = preprocess_image_inception(resized) - features = extract_features(inception, preprocessed_img) - caption = generate_caption(lstm, features, max_len, word2Index, index2Word) - st.subheader(caption) - - st.sidebar.header("Model predicts: ") - st.sidebar.write(f"{caption}") - - image.close() diff --git a/spaces/OFA-Sys/FAST-CPU-small-stable-diffusion-v0/pipeline_openvino_stable_diffusion.py b/spaces/OFA-Sys/FAST-CPU-small-stable-diffusion-v0/pipeline_openvino_stable_diffusion.py deleted file mode 100644 index 2c934742760f46b0bbeb4676a3cdc66178eac8d1..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/FAST-CPU-small-stable-diffusion-v0/pipeline_openvino_stable_diffusion.py +++ /dev/null @@ -1,405 +0,0 @@ -# Copyright 2022 The OFA-Sys Team. -# This source code is licensed under the Apache 2.0 license -# found in the LICENSE file in the root directory. -# Copyright 2022 The HuggingFace Inc. team. -# All rights reserved. -# This source code is licensed under the Apache 2.0 license -# found in the LICENSE file in the root directory. - -import inspect -from typing import Callable, List, Optional, Union - -import numpy as np -import torch -import os - -from transformers import CLIPFeatureExtractor, CLIPTokenizer - -from diffusers.configuration_utils import FrozenDict -from diffusers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler -from diffusers.utils import deprecate, logging -from diffusers import OnnxRuntimeModel - -from diffusers import OnnxStableDiffusionPipeline, DiffusionPipeline -from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput -from openvino.runtime import Core -ORT_TO_NP_TYPE = { - "tensor(bool)": np.bool_, - "tensor(int8)": np.int8, - "tensor(uint8)": np.uint8, - "tensor(int16)": np.int16, - "tensor(uint16)": np.uint16, - "tensor(int32)": np.int32, - "tensor(uint32)": np.uint32, - "tensor(int64)": np.int64, - "tensor(uint64)": np.uint64, - "tensor(float16)": np.float16, - "tensor(float)": np.float32, - "tensor(double)": np.float64, -} - -logger = logging.get_logger(__name__) - - -class OpenVINOStableDiffusionPipeline(DiffusionPipeline): - vae_encoder: OnnxRuntimeModel - vae_decoder: OnnxRuntimeModel - text_encoder: OnnxRuntimeModel - tokenizer: CLIPTokenizer - unet: OnnxRuntimeModel - scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] - safety_checker: OnnxRuntimeModel - feature_extractor: CLIPFeatureExtractor - - _optional_components = ["safety_checker", "feature_extractor"] - - def __init__( - self, - vae_encoder: OnnxRuntimeModel, - vae_decoder: OnnxRuntimeModel, - text_encoder: OnnxRuntimeModel, - tokenizer: CLIPTokenizer, - unet: OnnxRuntimeModel, - scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], - safety_checker: OnnxRuntimeModel, - feature_extractor: CLIPFeatureExtractor, - requires_safety_checker: bool = True, - ): - super().__init__() - - if hasattr(scheduler.config, - "steps_offset") and scheduler.config.steps_offset != 1: - deprecation_message = ( - f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" - f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " - "to update the config accordingly as leaving `steps_offset` might led to incorrect results" - " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," - " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" - " file") - deprecate("steps_offset!=1", - "1.0.0", - deprecation_message, - standard_warn=False) - new_config = dict(scheduler.config) - new_config["steps_offset"] = 1 - scheduler._internal_dict = FrozenDict(new_config) - - if hasattr(scheduler.config, - "clip_sample") and scheduler.config.clip_sample is True: - deprecation_message = ( - f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." - " `clip_sample` should be set to False in the configuration file. Please make sure to update the" - " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" - " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" - " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" - ) - deprecate("clip_sample not set", - "1.0.0", - deprecation_message, - standard_warn=False) - new_config = dict(scheduler.config) - new_config["clip_sample"] = False - scheduler._internal_dict = FrozenDict(new_config) - - if safety_checker is None and requires_safety_checker: - logger.warning( - f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" - " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" - " results in services or applications open to the public. Both the diffusers team and Hugging Face" - " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" - " it only for use-cases that involve analyzing network behavior or auditing its results. For more" - " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." - ) - - if safety_checker is not None and feature_extractor is None: - raise ValueError( - "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" - " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." - ) - - self.register_modules( - vae_encoder=vae_encoder, - vae_decoder=vae_decoder, - text_encoder=text_encoder, - tokenizer=tokenizer, - unet=unet, - scheduler=scheduler, - safety_checker=safety_checker, - feature_extractor=feature_extractor, - ) - self.convert_to_openvino() - self.register_to_config( - requires_safety_checker=requires_safety_checker) - - @classmethod - def from_onnx_pipeline(cls, onnx_pipe: OnnxStableDiffusionPipeline): - r""" - Create OpenVINOStableDiffusionPipeline from a onnx stable pipeline. - Parameters: - onnx_pipe (OnnxStableDiffusionPipeline) - """ - return cls(onnx_pipe.vae_encoder, onnx_pipe.vae_decoder, - onnx_pipe.text_encoder, onnx_pipe.tokenizer, onnx_pipe.unet, - onnx_pipe.scheduler, onnx_pipe.safety_checker, - onnx_pipe.feature_extractor, True) - - def convert_to_openvino(self): - ie = Core() - - # VAE decoder - vae_decoder_onnx = ie.read_model( - model=os.path.join(self.vae_decoder.model_save_dir, "model.onnx")) - vae_decoder = ie.compile_model(model=vae_decoder_onnx, - device_name="CPU") - - # Text encoder - text_encoder_onnx = ie.read_model( - model=os.path.join(self.text_encoder.model_save_dir, "model.onnx")) - text_encoder = ie.compile_model(model=text_encoder_onnx, - device_name="CPU") - - # Unet - unet_onnx = ie.read_model( - model=os.path.join(self.unet.model_save_dir, "model.onnx")) - unet = ie.compile_model(model=unet_onnx, device_name="CPU") - - self.register_modules(vae_decoder=vae_decoder, - text_encoder=text_encoder, - unet=unet) - - def _encode_prompt(self, prompt, num_images_per_prompt, - do_classifier_free_guidance, negative_prompt): - r""" - Encodes the prompt into text encoder hidden states. - Args: - prompt (`str` or `List[str]`): - prompt to be encoded - num_images_per_prompt (`int`): - number of images that should be generated per prompt - do_classifier_free_guidance (`bool`): - whether to use classifier free guidance or not - negative_prompt (`str` or `List[str]`): - The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored - if `guidance_scale` is less than `1`). - """ - batch_size = len(prompt) if isinstance(prompt, list) else 1 - - # get prompt text embeddings - text_inputs = self.tokenizer( - prompt, - padding="max_length", - max_length=self.tokenizer.model_max_length, - truncation=True, - return_tensors="np", - ) - text_input_ids = text_inputs.input_ids - untruncated_ids = self.tokenizer(prompt, - padding="max_length", - return_tensors="np").input_ids - - if not np.array_equal(text_input_ids, untruncated_ids): - removed_text = self.tokenizer.batch_decode( - untruncated_ids[:, self.tokenizer.model_max_length - 1:-1]) - logger.warning( - "The following part of your input was truncated because CLIP can only handle sequences up to" - f" {self.tokenizer.model_max_length} tokens: {removed_text}") - - prompt_embeds = self.text_encoder( - {"input_ids": - text_input_ids.astype(np.int32)})[self.text_encoder.outputs[0]] - prompt_embeds = np.repeat(prompt_embeds, num_images_per_prompt, axis=0) - - # get unconditional embeddings for classifier free guidance - if do_classifier_free_guidance: - uncond_tokens: List[str] - if negative_prompt is None: - uncond_tokens = [""] * batch_size - elif type(prompt) is not type(negative_prompt): - raise TypeError( - f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" - f" {type(prompt)}.") - elif isinstance(negative_prompt, str): - uncond_tokens = [negative_prompt] * batch_size - elif batch_size != len(negative_prompt): - raise ValueError( - f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" - f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" - " the batch size of `prompt`.") - else: - uncond_tokens = negative_prompt - - max_length = text_input_ids.shape[-1] - uncond_input = self.tokenizer( - uncond_tokens, - padding="max_length", - max_length=max_length, - truncation=True, - return_tensors="np", - ) - negative_prompt_embeds = self.text_encoder({ - "input_ids": - uncond_input.input_ids.astype(np.int32) - })[self.text_encoder.outputs[0]] - negative_prompt_embeds = np.repeat(negative_prompt_embeds, - num_images_per_prompt, - axis=0) - - # For classifier free guidance, we need to do two forward passes. - # Here we concatenate the unconditional and text embeddings into a single batch - # to avoid doing two forward passes - prompt_embeds = np.concatenate( - [negative_prompt_embeds, prompt_embeds]) - - return prompt_embeds - - def __call__( - self, - prompt: Union[str, List[str]], - height: Optional[int] = 512, - width: Optional[int] = 512, - num_inference_steps: Optional[int] = 50, - guidance_scale: Optional[float] = 7.5, - negative_prompt: Optional[Union[str, List[str]]] = None, - num_images_per_prompt: Optional[int] = 1, - eta: Optional[float] = 0.0, - generator: Optional[np.random.RandomState] = None, - latents: Optional[np.ndarray] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - callback: Optional[Callable[[int, int, np.ndarray], None]] = None, - callback_steps: Optional[int] = 1, - ): - if isinstance(prompt, str): - batch_size = 1 - elif isinstance(prompt, list): - batch_size = len(prompt) - else: - raise ValueError( - f"`prompt` has to be of type `str` or `list` but is {type(prompt)}" - ) - - if height % 8 != 0 or width % 8 != 0: - raise ValueError( - f"`height` and `width` have to be divisible by 8 but are {height} and {width}." - ) - - if (callback_steps is None) or (callback_steps is not None and - (not isinstance(callback_steps, int) - or callback_steps <= 0)): - raise ValueError( - f"`callback_steps` has to be a positive integer but is {callback_steps} of type" - f" {type(callback_steps)}.") - - if generator is None: - generator = np.random - - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) - # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` - # corresponds to doing no classifier free guidance. - do_classifier_free_guidance = guidance_scale > 1.0 - - prompt_embeds = self._encode_prompt(prompt, num_images_per_prompt, - do_classifier_free_guidance, - negative_prompt) - - # get the initial random noise unless the user supplied it - latents_dtype = prompt_embeds.dtype - latents_shape = (batch_size * num_images_per_prompt, 4, height // 8, - width // 8) - if latents is None: - latents = generator.randn(*latents_shape).astype(latents_dtype) - elif latents.shape != latents_shape: - raise ValueError( - f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" - ) - - # set timesteps - self.scheduler.set_timesteps(num_inference_steps) - - latents = latents * np.float64(self.scheduler.init_noise_sigma) - - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. - # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 - # and should be between [0, 1] - accepts_eta = "eta" in set( - inspect.signature(self.scheduler.step).parameters.keys()) - extra_step_kwargs = {} - if accepts_eta: - extra_step_kwargs["eta"] = eta - - # timestep_dtype = next( - # (input.type for input in self.unet.model.get_inputs() if input.name == "timestep"), "tensor(float)" - # ) - timestep_dtype = 'tensor(int64)' - timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype] - - for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): - # expand the latents if we are doing classifier free guidance - latent_model_input = np.concatenate( - [latents] * 2) if do_classifier_free_guidance else latents - latent_model_input = self.scheduler.scale_model_input( - torch.from_numpy(latent_model_input), t) - latent_model_input = latent_model_input.cpu().numpy() - - # predict the noise residual - timestep = np.array([t], dtype=timestep_dtype) - unet_input = { - "sample": latent_model_input, - "timestep": timestep, - "encoder_hidden_states": prompt_embeds - } - noise_pred = self.unet(unet_input)[self.unet.outputs[0]] - # noise_pred = noise_pred[0] - - # perform guidance - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2) - noise_pred = noise_pred_uncond + guidance_scale * ( - noise_pred_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - scheduler_output = self.scheduler.step( - torch.from_numpy(noise_pred), t, torch.from_numpy(latents), - **extra_step_kwargs) - latents = scheduler_output.prev_sample.numpy() - - # call the callback, if provided - if callback is not None and i % callback_steps == 0: - callback(i, t, latents) - - latents = 1 / 0.18215 * latents - image = self.vae_decoder({"latent_sample": - latents})[self.vae_decoder.outputs[0]] - - image = np.clip(image / 2 + 0.5, 0, 1) - image = image.transpose((0, 2, 3, 1)) - - if self.safety_checker is not None: - safety_checker_input = self.feature_extractor( - self.numpy_to_pil(image), - return_tensors="np").pixel_values.astype(image.dtype) - - image, has_nsfw_concepts = self.safety_checker( - clip_input=safety_checker_input, images=image) - - # There will throw an error if use safety_checker batchsize>1 - images, has_nsfw_concept = [], [] - for i in range(image.shape[0]): - image_i, has_nsfw_concept_i = self.safety_checker( - clip_input=safety_checker_input[i:i + 1], - images=image[i:i + 1]) - images.append(image_i) - has_nsfw_concept.append(has_nsfw_concept_i[0]) - image = np.concatenate(images) - else: - has_nsfw_concept = None - - if output_type == "pil": - image = self.numpy_to_pil(image) - - if not return_dict: - return (image, has_nsfw_concept) - - return StableDiffusionPipelineOutput( - images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/multilingual/sampled_multi_epoch_dataset.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/multilingual/sampled_multi_epoch_dataset.py deleted file mode 100644 index 17387b2f85c0ee76db1a003091331b46de8d8def..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/data/multilingual/sampled_multi_epoch_dataset.py +++ /dev/null @@ -1,199 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import hashlib -import logging -import math - -import numpy as np -from fairseq.data import SampledMultiDataset - -from .sampled_multi_dataset import CollateFormat, default_virtual_size_func - - -logger = logging.getLogger(__name__) - - -class SampledMultiEpochDataset(SampledMultiDataset): - """Samples from multiple sub-datasets according to sampling ratios - using virtual epoch sizes to speed up dataloading. - Args: - datasets ( - List[~torch.utils.data.Dataset] - or OrderedDict[str, ~torch.utils.data.Dataset] - ): datasets - sampling_ratios (List[float]): list of probability of each dataset to be sampled - (default: None, which corresponds to concating all dataset together). - seed (int): RNG seed to use (default: 2). - epoch (int): starting epoch number (default: 1). - eval_key (str, optional): a key used at evaluation time that causes - this instance to pass-through batches from *datasets[eval_key]*. - collate_format (CollateFormat): collater output format, either CollateFormat.ordered_dict or - CollateFormat.single (default: CollateFormat.single) where CollateFormat.single configures - the collater to output batches of data mixed from all sub-datasets, - and CollateFormat.ordered_dict configures the collater to output a dictionary of batches indexed by keys - of sub-datasets. - Note that not all sub-datasets will present in a single batch in both formats. - virtual_size (int, or callable): the expected virtual size of the dataset (default: default_virtual_size_func). - split (str): the split of the data, e.g. 'train', 'valid' or 'test'. - virtual_epoch_size (int): virtual epoch size, the dataset will go through the data by - this virtual epoch size one by one to speed up data loading, e.g. indicing and filtering - can be performed whenever a virtual epoch is loaded without waiting for the whole dataset to be loaded. - shared_collater (bool): whether or not to all sub-datasets have the same collater. - shard_epoch (int): the real epoch number for shard selection. - shuffle (bool): whether or not to shuffle data (default: True). - """ - - def __init__( - self, - datasets, - sampling_ratios=None, - seed=2, - epoch=1, - eval_key=None, - collate_format=CollateFormat.single, - virtual_size=default_virtual_size_func, - split="", - virtual_epoch_size=None, - shared_collater=False, - shard_epoch=1, - shuffle=True, - ): - self.virtual_epoch_size = virtual_epoch_size - self._current_epoch_start_index = None - self._random_global_indices = None - self.shard_epoch = shard_epoch if shard_epoch is not None else 1 - self.load_next_shard = None - self._epoch_sizes = None - super().__init__( - datasets=datasets, - sampling_ratios=sampling_ratios, - seed=seed, - epoch=epoch, - eval_key=eval_key, - collate_format=collate_format, - virtual_size=virtual_size, - split=split, - shared_collater=shared_collater, - shuffle=shuffle, - ) - - def _setup(self, epoch): - self.virtual_epoch_size = ( - self.virtual_epoch_size - if self.virtual_epoch_size is not None - else self.virtual_size - ) - if self.virtual_epoch_size > self.virtual_size: - logger.warning( - f"virtual epoch size {self.virtual_epoch_size} " - f"is greater than virtual dataset size {self.virtual_size}" - ) - self.virtual_epoch_size = self.virtual_size - self.num_virtual_epochs = math.ceil(self.virtual_size / self.virtual_epoch_size) - self._current_epoch_start_index = self._get_epoch_start_index(epoch) - logger.info( - f"virtual epoch size {self.virtual_epoch_size}; virtual dataset size {self.virtual_size}" - ) - - def _map_epoch_index_to_global(self, index): - index = self._current_epoch_start_index + index - # add randomness - return self._random_global_indices[index] - - @property - def sizes(self): - if self._epoch_sizes is not None: - return self._epoch_sizes - _sizes = super().sizes - indices = self._random_global_indices[ - self._current_epoch_start_index : self._current_epoch_start_index - + len(self) - ] - self._epoch_sizes = _sizes[indices] - # del super()._sizes to save memory - del self._sizes - self._sizes = None - return self._epoch_sizes - - def _get_dataset_and_index(self, index): - i = self._map_epoch_index_to_global(index) - return super()._get_dataset_and_index(i) - - def __len__(self): - return ( - self.virtual_epoch_size - if self._current_epoch_start_index + self.virtual_epoch_size - < self.virtual_size - else self.virtual_size - self._current_epoch_start_index - ) - - def set_epoch(self, epoch): - if self._current_epoch_start_index is None: - # initializing epoch idnices of a virtual dataset - self._setup(epoch) - self._next_virtual_epoch(epoch) - else: - # working on already intialized epoch indices - if epoch == self._cur_epoch: - # re-enter so return - return - self._next_virtual_epoch(epoch) - - def _get_epoch_start_index(self, epoch): - assert epoch >= 1 # fairseq is using 1-based epoch everywhere - return ((epoch - 1) % self.num_virtual_epochs) * self.virtual_epoch_size - - def _next_global_indices(self, epoch): - rng = np.random.RandomState( - [ - int( - hashlib.sha1( - str(self.__class__.__name__).encode("utf-8") - ).hexdigest(), - 16, - ) - % (2 ** 32), - self.seed % (2 ** 32), # global seed - epoch, # epoch index, - ] - ) - del self._random_global_indices - self._random_global_indices = rng.choice( - self.virtual_size, self.virtual_size, replace=False - ) - if self.load_next_shard is None: - self.load_next_shard = False - else: - # increase shard epoch for next loading - self.shard_epoch += 1 - self.load_next_shard = True - logger.info( - "to load next epoch/shard in next load_dataset: " - f"epoch={epoch}/shard_epoch={self.shard_epoch}" - ) - - def _next_virtual_epoch(self, epoch): - index = self._get_epoch_start_index(epoch) - if index == 0 or self._random_global_indices is None: - # need to start from the beginning, - # so call super().set_epoch(epoch) to establish the global virtual indices - logger.info( - "establishing a new set of global virtual indices for " - f"epoch={epoch}/shard_epoch={self.shard_epoch}" - ) - super().set_epoch(epoch) - self._next_global_indices(epoch) - else: - self._cur_epoch = epoch - - # reset cache sizes and ordered_indices for the epoch after moving to a new epoch - self._clean_if_not_none( - [ - self._epoch_sizes, - ] - ) - self._epoch_sizes = None - self._current_epoch_start_index = index diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/scripts/compare_namespaces.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/scripts/compare_namespaces.py deleted file mode 100644 index bc24db624f8db36f546c263ba3a806dae6d466bf..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/scripts/compare_namespaces.py +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env python -"""Helper script to compare two argparse.Namespace objects.""" - -from argparse import Namespace # noqa - - -def main(): - - ns1 = eval(input("Namespace 1: ")) - ns2 = eval(input("Namespace 2: ")) - - def keys(ns): - ks = set() - for k in dir(ns): - if not k.startswith("_"): - ks.add(k) - return ks - - k1 = keys(ns1) - k2 = keys(ns2) - - def print_keys(ks, ns1, ns2=None): - for k in ks: - if ns2 is None: - print("{}\t{}".format(k, getattr(ns1, k, None))) - else: - print( - "{}\t{}\t{}".format(k, getattr(ns1, k, None), getattr(ns2, k, None)) - ) - - print("Keys unique to namespace 1:") - print_keys(k1 - k2, ns1) - print() - - print("Keys unique to namespace 2:") - print_keys(k2 - k1, ns2) - print() - - print("Overlapping keys with different values:") - ks = [k for k in k1 & k2 if getattr(ns1, k, "None") != getattr(ns2, k, "None")] - print_keys(ks, ns1, ns2) - print() - - -if __name__ == "__main__": - main() diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/noisychannel/__init__.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/noisychannel/__init__.py deleted file mode 100644 index 89f1aef4f6328d25425e0bcabb42dfffd2ed35f0..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/noisychannel/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from .rerank_options import * # noqa diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_recognition/tasks/__init__.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_recognition/tasks/__init__.py deleted file mode 100644 index 7ac3b8dc69639c92cc129294356e9012745e3fb2..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_recognition/tasks/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -import importlib -import os - - -for file in sorted(os.listdir(os.path.dirname(__file__))): - if file.endswith(".py") and not file.startswith("_"): - task_name = file[: file.find(".py")] - importlib.import_module("examples.speech_recognition.tasks." + task_name) diff --git a/spaces/Omnibus/Video-Diffusion-WebUI/video_diffusion/stable_diffusion_video/stable_video_text2video.py b/spaces/Omnibus/Video-Diffusion-WebUI/video_diffusion/stable_diffusion_video/stable_video_text2video.py deleted file mode 100644 index 8235f010644f025c202574a38fa703f9399f7851..0000000000000000000000000000000000000000 --- a/spaces/Omnibus/Video-Diffusion-WebUI/video_diffusion/stable_diffusion_video/stable_video_text2video.py +++ /dev/null @@ -1,158 +0,0 @@ -import gradio as gr -import numpy as np -import torch - -from video_diffusion.stable_diffusion_video.stable_diffusion_pipeline import StableDiffusionWalkPipeline -from video_diffusion.utils.model_list import stable_model_list - - -class StableDiffusionText2VideoGenerator: - def __init__(self): - self.pipe = None - - def load_model( - self, - model_path, - ): - if self.pipe is None: - self.pipe = StableDiffusionWalkPipeline.from_pretrained( - model_path, - torch_dtype=torch.float16, - revision="fp16", - ) - - self.pipe.to("cuda") - self.pipe.enable_xformers_memory_efficient_attention() - self.pipe.enable_attention_slicing() - - return self.pipe - - def generate_video( - self, - model_path: str, - first_prompts: str, - second_prompts: str, - negative_prompt: str, - num_interpolation_steps: int, - guidance_scale: int, - num_inference_step: int, - height: int, - width: int, - upsample: bool, - fps=int, - ): - first_seed = np.random.randint(0, 100000) - second_seed = np.random.randint(0, 100000) - seeds = [first_seed, second_seed] - prompts = [first_prompts, second_prompts] - pipe = self.load_model(model_path=model_path) - - output_video = pipe.walk( - prompts=prompts, - num_interpolation_steps=int(num_interpolation_steps), - height=height, - width=width, - guidance_scale=guidance_scale, - num_inference_steps=num_inference_step, - negative_prompt=negative_prompt, - seeds=seeds, - upsample=upsample, - fps=fps, - ) - - return output_video - - def app(): - with gr.Blocks(): - with gr.Row(): - with gr.Column(): - stable_text2video_first_prompt = gr.Textbox( - lines=1, - placeholder="First Prompt", - show_label=False, - ) - stable_text2video_second_prompt = gr.Textbox( - lines=1, - placeholder="Second Prompt", - show_label=False, - ) - stable_text2video_negative_prompt = gr.Textbox( - lines=1, - placeholder="Negative Prompt ", - show_label=False, - ) - with gr.Row(): - with gr.Column(): - stable_text2video_model_path = gr.Dropdown( - choices=stable_model_list, - label="Stable Model List", - value=stable_model_list[0], - ) - stable_text2video_guidance_scale = gr.Slider( - minimum=0, - maximum=15, - step=1, - value=8.5, - label="Guidance Scale", - ) - stable_text2video_num_inference_steps = gr.Slider( - minimum=1, - maximum=100, - step=1, - value=30, - label="Number of Inference Steps", - ) - stable_text2video_fps = gr.Slider( - minimum=1, - maximum=60, - step=1, - value=10, - label="Fps", - ) - with gr.Row(): - with gr.Column(): - stable_text2video_num_interpolation_steps = gr.Number( - value=10, - label="Number of Interpolation Steps", - ) - stable_text2video_height = gr.Slider( - minimum=1, - maximum=1000, - step=1, - value=512, - label="Height", - ) - stable_text2video_width = gr.Slider( - minimum=1, - maximum=1000, - step=1, - value=512, - label="Width", - ) - stable_text2video_upsample = gr.Checkbox( - label="Upsample", - default=False, - ) - - text2video_generate = gr.Button(value="Generator") - - with gr.Column(): - text2video_output = gr.Video(label="Output") - - text2video_generate.click( - fn=StableDiffusionText2VideoGenerator().generate_video, - inputs=[ - stable_text2video_model_path, - stable_text2video_first_prompt, - stable_text2video_second_prompt, - stable_text2video_negative_prompt, - stable_text2video_num_interpolation_steps, - stable_text2video_guidance_scale, - stable_text2video_num_inference_steps, - stable_text2video_height, - stable_text2video_width, - stable_text2video_upsample, - stable_text2video_fps, - ], - outputs=text2video_output, - ) diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/setup.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/setup.py deleted file mode 100644 index 50a5e23e9ba6fab32775ebd16fb2746c7bf6660c..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/setup.py +++ /dev/null @@ -1,206 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) Facebook, Inc. and its affiliates. - -import glob -import os -import shutil -from os import path -from setuptools import find_packages, setup -from typing import List -import torch -from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension - -torch_ver = [int(x) for x in torch.__version__.split(".")[:2]] -assert torch_ver >= [1, 8], "Requires PyTorch >= 1.8" - - -def get_version(): - init_py_path = path.join(path.abspath(path.dirname(__file__)), "detectron2", "__init__.py") - init_py = open(init_py_path, "r").readlines() - version_line = [l.strip() for l in init_py if l.startswith("__version__")][0] - version = version_line.split("=")[-1].strip().strip("'\"") - - # The following is used to build release packages. - # Users should never use it. - suffix = os.getenv("D2_VERSION_SUFFIX", "") - version = version + suffix - if os.getenv("BUILD_NIGHTLY", "0") == "1": - from datetime import datetime - - date_str = datetime.today().strftime("%y%m%d") - version = version + ".dev" + date_str - - new_init_py = [l for l in init_py if not l.startswith("__version__")] - new_init_py.append('__version__ = "{}"\n'.format(version)) - with open(init_py_path, "w") as f: - f.write("".join(new_init_py)) - return version - - -def get_extensions(): - this_dir = path.dirname(path.abspath(__file__)) - extensions_dir = path.join(this_dir, "detectron2", "layers", "csrc") - - main_source = path.join(extensions_dir, "vision.cpp") - sources = glob.glob(path.join(extensions_dir, "**", "*.cpp")) - - from torch.utils.cpp_extension import ROCM_HOME - - is_rocm_pytorch = ( - True if ((torch.version.hip is not None) and (ROCM_HOME is not None)) else False - ) - if is_rocm_pytorch: - assert torch_ver >= [1, 8], "ROCM support requires PyTorch >= 1.8!" - - # common code between cuda and rocm platforms, for hipify version [1,0,0] and later. - source_cuda = glob.glob(path.join(extensions_dir, "**", "*.cu")) + glob.glob( - path.join(extensions_dir, "*.cu") - ) - sources = [main_source] + sources - - extension = CppExtension - - extra_compile_args = {"cxx": []} - define_macros = [] - - if (torch.cuda.is_available() and ((CUDA_HOME is not None) or is_rocm_pytorch)) or os.getenv( - "FORCE_CUDA", "0" - ) == "1": - extension = CUDAExtension - sources += source_cuda - - if not is_rocm_pytorch: - define_macros += [("WITH_CUDA", None)] - extra_compile_args["nvcc"] = [ - "-O3", - "-DCUDA_HAS_FP16=1", - "-D__CUDA_NO_HALF_OPERATORS__", - "-D__CUDA_NO_HALF_CONVERSIONS__", - "-D__CUDA_NO_HALF2_OPERATORS__", - ] - else: - define_macros += [("WITH_HIP", None)] - extra_compile_args["nvcc"] = [] - - if torch_ver < [1, 7]: - # supported by https://github.com/pytorch/pytorch/pull/43931 - CC = os.environ.get("CC", None) - if CC is not None: - extra_compile_args["nvcc"].append("-ccbin={}".format(CC)) - - include_dirs = [extensions_dir] - - ext_modules = [ - extension( - "detectron2._C", - sources, - include_dirs=include_dirs, - define_macros=define_macros, - extra_compile_args=extra_compile_args, - ) - ] - - return ext_modules - - -def get_model_zoo_configs() -> List[str]: - """ - Return a list of configs to include in package for model zoo. Copy over these configs inside - detectron2/model_zoo. - """ - - # Use absolute paths while symlinking. - source_configs_dir = path.join(path.dirname(path.realpath(__file__)), "configs") - destination = path.join( - path.dirname(path.realpath(__file__)), "detectron2", "model_zoo", "configs" - ) - # Symlink the config directory inside package to have a cleaner pip install. - - # Remove stale symlink/directory from a previous build. - if path.exists(source_configs_dir): - if path.islink(destination): - os.unlink(destination) - elif path.isdir(destination): - shutil.rmtree(destination) - - if not path.exists(destination): - try: - os.symlink(source_configs_dir, destination) - except OSError: - # Fall back to copying if symlink fails: ex. on Windows. - shutil.copytree(source_configs_dir, destination) - - config_paths = glob.glob("configs/**/*.yaml", recursive=True) + glob.glob( - "configs/**/*.py", recursive=True - ) - return config_paths - - -# For projects that are relative small and provide features that are very close -# to detectron2's core functionalities, we install them under detectron2.projects -PROJECTS = { - -} - -setup( - name="detectron2", - version=get_version(), - author="FAIR", - url="https://github.com/facebookresearch/detectron2", - description="Detectron2 is FAIR's next-generation research " - "platform for object detection and segmentation.", - packages=find_packages(exclude=("configs", "tests*")) + list(PROJECTS.keys()), - package_dir=PROJECTS, - package_data={"detectron2.model_zoo": get_model_zoo_configs()}, - python_requires=">=3.6", - install_requires=[ - # These dependencies are not pure-python. - # In general, avoid adding more dependencies like them because they are not - # guaranteed to be installable by `pip install` on all platforms. - # To tell if a package is pure-python, go to https://pypi.org/project/{name}/#files - "Pillow>=7.1", # or use pillow-simd for better performance - "matplotlib", # TODO move it to optional after we add opencv visualization - "pycocotools>=2.0.2", # corresponds to https://github.com/ppwwyyxx/cocoapi - # Do not add opencv here. Just like pytorch, user should install - # opencv themselves, preferrably by OS's package manager, or by - # choosing the proper pypi package name at https://github.com/skvark/opencv-python - # The following are pure-python dependencies that should be easily installable - "termcolor>=1.1", - "yacs>=0.1.8", - "tabulate", - "cloudpickle", - "tqdm>4.29.0", - "tensorboard", - # Lock version of fvcore/iopath because they may have breaking changes - # NOTE: when updating fvcore/iopath version, make sure fvcore depends - # on compatible version of iopath. - "fvcore>=0.1.5,<0.1.6", # required like this to make it pip installable - "iopath>=0.1.7,<0.1.10", - "future", # used by caffe2 - "pydot", # used to save caffe2 SVGs - "dataclasses; python_version<'3.7'", - "omegaconf>=2.1", - "hydra-core>=1.1", - "black==21.4b2", - # If a new dependency is required at import time (in addition to runtime), it - # probably needs to exist in docs/requirements.txt, or as a mock in docs/conf.py - ], - extras_require={ - # optional dependencies, required by some features - "all": [ - "shapely", - "pygments>=2.2", - "psutil", - "panopticapi @ https://github.com/cocodataset/panopticapi/archive/master.zip", - ], - # dev dependencies. Install them by `pip install 'detectron2[dev]'` - "dev": [ - "flake8==3.8.1", - "isort==4.3.21", - "flake8-bugbear", - "flake8-comprehensions", - ], - }, - ext_modules=get_extensions(), - cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension}, -) diff --git a/spaces/PSLD/PSLD/stable-diffusion/ldm/modules/x_transformer.py b/spaces/PSLD/PSLD/stable-diffusion/ldm/modules/x_transformer.py deleted file mode 100644 index 5fc15bf9cfe0111a910e7de33d04ffdec3877576..0000000000000000000000000000000000000000 --- a/spaces/PSLD/PSLD/stable-diffusion/ldm/modules/x_transformer.py +++ /dev/null @@ -1,641 +0,0 @@ -"""shout-out to https://github.com/lucidrains/x-transformers/tree/main/x_transformers""" -import torch -from torch import nn, einsum -import torch.nn.functional as F -from functools import partial -from inspect import isfunction -from collections import namedtuple -from einops import rearrange, repeat, reduce - -# constants - -DEFAULT_DIM_HEAD = 64 - -Intermediates = namedtuple('Intermediates', [ - 'pre_softmax_attn', - 'post_softmax_attn' -]) - -LayerIntermediates = namedtuple('Intermediates', [ - 'hiddens', - 'attn_intermediates' -]) - - -class AbsolutePositionalEmbedding(nn.Module): - def __init__(self, dim, max_seq_len): - super().__init__() - self.emb = nn.Embedding(max_seq_len, dim) - self.init_() - - def init_(self): - nn.init.normal_(self.emb.weight, std=0.02) - - def forward(self, x): - n = torch.arange(x.shape[1], device=x.device) - return self.emb(n)[None, :, :] - - -class FixedPositionalEmbedding(nn.Module): - def __init__(self, dim): - super().__init__() - inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim)) - self.register_buffer('inv_freq', inv_freq) - - def forward(self, x, seq_dim=1, offset=0): - t = torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq) + offset - sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq) - emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1) - return emb[None, :, :] - - -# helpers - -def exists(val): - return val is not None - - -def default(val, d): - if exists(val): - return val - return d() if isfunction(d) else d - - -def always(val): - def inner(*args, **kwargs): - return val - return inner - - -def not_equals(val): - def inner(x): - return x != val - return inner - - -def equals(val): - def inner(x): - return x == val - return inner - - -def max_neg_value(tensor): - return -torch.finfo(tensor.dtype).max - - -# keyword argument helpers - -def pick_and_pop(keys, d): - values = list(map(lambda key: d.pop(key), keys)) - return dict(zip(keys, values)) - - -def group_dict_by_key(cond, d): - return_val = [dict(), dict()] - for key in d.keys(): - match = bool(cond(key)) - ind = int(not match) - return_val[ind][key] = d[key] - return (*return_val,) - - -def string_begins_with(prefix, str): - return str.startswith(prefix) - - -def group_by_key_prefix(prefix, d): - return group_dict_by_key(partial(string_begins_with, prefix), d) - - -def groupby_prefix_and_trim(prefix, d): - kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d) - kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items()))) - return kwargs_without_prefix, kwargs - - -# classes -class Scale(nn.Module): - def __init__(self, value, fn): - super().__init__() - self.value = value - self.fn = fn - - def forward(self, x, **kwargs): - x, *rest = self.fn(x, **kwargs) - return (x * self.value, *rest) - - -class Rezero(nn.Module): - def __init__(self, fn): - super().__init__() - self.fn = fn - self.g = nn.Parameter(torch.zeros(1)) - - def forward(self, x, **kwargs): - x, *rest = self.fn(x, **kwargs) - return (x * self.g, *rest) - - -class ScaleNorm(nn.Module): - def __init__(self, dim, eps=1e-5): - super().__init__() - self.scale = dim ** -0.5 - self.eps = eps - self.g = nn.Parameter(torch.ones(1)) - - def forward(self, x): - norm = torch.norm(x, dim=-1, keepdim=True) * self.scale - return x / norm.clamp(min=self.eps) * self.g - - -class RMSNorm(nn.Module): - def __init__(self, dim, eps=1e-8): - super().__init__() - self.scale = dim ** -0.5 - self.eps = eps - self.g = nn.Parameter(torch.ones(dim)) - - def forward(self, x): - norm = torch.norm(x, dim=-1, keepdim=True) * self.scale - return x / norm.clamp(min=self.eps) * self.g - - -class Residual(nn.Module): - def forward(self, x, residual): - return x + residual - - -class GRUGating(nn.Module): - def __init__(self, dim): - super().__init__() - self.gru = nn.GRUCell(dim, dim) - - def forward(self, x, residual): - gated_output = self.gru( - rearrange(x, 'b n d -> (b n) d'), - rearrange(residual, 'b n d -> (b n) d') - ) - - return gated_output.reshape_as(x) - - -# feedforward - -class GEGLU(nn.Module): - def __init__(self, dim_in, dim_out): - super().__init__() - self.proj = nn.Linear(dim_in, dim_out * 2) - - def forward(self, x): - x, gate = self.proj(x).chunk(2, dim=-1) - return x * F.gelu(gate) - - -class FeedForward(nn.Module): - def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.): - super().__init__() - inner_dim = int(dim * mult) - dim_out = default(dim_out, dim) - project_in = nn.Sequential( - nn.Linear(dim, inner_dim), - nn.GELU() - ) if not glu else GEGLU(dim, inner_dim) - - self.net = nn.Sequential( - project_in, - nn.Dropout(dropout), - nn.Linear(inner_dim, dim_out) - ) - - def forward(self, x): - return self.net(x) - - -# attention. -class Attention(nn.Module): - def __init__( - self, - dim, - dim_head=DEFAULT_DIM_HEAD, - heads=8, - causal=False, - mask=None, - talking_heads=False, - sparse_topk=None, - use_entmax15=False, - num_mem_kv=0, - dropout=0., - on_attn=False - ): - super().__init__() - if use_entmax15: - raise NotImplementedError("Check out entmax activation instead of softmax activation!") - self.scale = dim_head ** -0.5 - self.heads = heads - self.causal = causal - self.mask = mask - - inner_dim = dim_head * heads - - self.to_q = nn.Linear(dim, inner_dim, bias=False) - self.to_k = nn.Linear(dim, inner_dim, bias=False) - self.to_v = nn.Linear(dim, inner_dim, bias=False) - self.dropout = nn.Dropout(dropout) - - # talking heads - self.talking_heads = talking_heads - if talking_heads: - self.pre_softmax_proj = nn.Parameter(torch.randn(heads, heads)) - self.post_softmax_proj = nn.Parameter(torch.randn(heads, heads)) - - # explicit topk sparse attention - self.sparse_topk = sparse_topk - - # entmax - #self.attn_fn = entmax15 if use_entmax15 else F.softmax - self.attn_fn = F.softmax - - # add memory key / values - self.num_mem_kv = num_mem_kv - if num_mem_kv > 0: - self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) - self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) - - # attention on attention - self.attn_on_attn = on_attn - self.to_out = nn.Sequential(nn.Linear(inner_dim, dim * 2), nn.GLU()) if on_attn else nn.Linear(inner_dim, dim) - - def forward( - self, - x, - context=None, - mask=None, - context_mask=None, - rel_pos=None, - sinusoidal_emb=None, - prev_attn=None, - mem=None - ): - b, n, _, h, talking_heads, device = *x.shape, self.heads, self.talking_heads, x.device - kv_input = default(context, x) - - q_input = x - k_input = kv_input - v_input = kv_input - - if exists(mem): - k_input = torch.cat((mem, k_input), dim=-2) - v_input = torch.cat((mem, v_input), dim=-2) - - if exists(sinusoidal_emb): - # in shortformer, the query would start at a position offset depending on the past cached memory - offset = k_input.shape[-2] - q_input.shape[-2] - q_input = q_input + sinusoidal_emb(q_input, offset=offset) - k_input = k_input + sinusoidal_emb(k_input) - - q = self.to_q(q_input) - k = self.to_k(k_input) - v = self.to_v(v_input) - - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v)) - - input_mask = None - if any(map(exists, (mask, context_mask))): - q_mask = default(mask, lambda: torch.ones((b, n), device=device).bool()) - k_mask = q_mask if not exists(context) else context_mask - k_mask = default(k_mask, lambda: torch.ones((b, k.shape[-2]), device=device).bool()) - q_mask = rearrange(q_mask, 'b i -> b () i ()') - k_mask = rearrange(k_mask, 'b j -> b () () j') - input_mask = q_mask * k_mask - - if self.num_mem_kv > 0: - mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b=b), (self.mem_k, self.mem_v)) - k = torch.cat((mem_k, k), dim=-2) - v = torch.cat((mem_v, v), dim=-2) - if exists(input_mask): - input_mask = F.pad(input_mask, (self.num_mem_kv, 0), value=True) - - dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale - mask_value = max_neg_value(dots) - - if exists(prev_attn): - dots = dots + prev_attn - - pre_softmax_attn = dots - - if talking_heads: - dots = einsum('b h i j, h k -> b k i j', dots, self.pre_softmax_proj).contiguous() - - if exists(rel_pos): - dots = rel_pos(dots) - - if exists(input_mask): - dots.masked_fill_(~input_mask, mask_value) - del input_mask - - if self.causal: - i, j = dots.shape[-2:] - r = torch.arange(i, device=device) - mask = rearrange(r, 'i -> () () i ()') < rearrange(r, 'j -> () () () j') - mask = F.pad(mask, (j - i, 0), value=False) - dots.masked_fill_(mask, mask_value) - del mask - - if exists(self.sparse_topk) and self.sparse_topk < dots.shape[-1]: - top, _ = dots.topk(self.sparse_topk, dim=-1) - vk = top[..., -1].unsqueeze(-1).expand_as(dots) - mask = dots < vk - dots.masked_fill_(mask, mask_value) - del mask - - attn = self.attn_fn(dots, dim=-1) - post_softmax_attn = attn - - attn = self.dropout(attn) - - if talking_heads: - attn = einsum('b h i j, h k -> b k i j', attn, self.post_softmax_proj).contiguous() - - out = einsum('b h i j, b h j d -> b h i d', attn, v) - out = rearrange(out, 'b h n d -> b n (h d)') - - intermediates = Intermediates( - pre_softmax_attn=pre_softmax_attn, - post_softmax_attn=post_softmax_attn - ) - - return self.to_out(out), intermediates - - -class AttentionLayers(nn.Module): - def __init__( - self, - dim, - depth, - heads=8, - causal=False, - cross_attend=False, - only_cross=False, - use_scalenorm=False, - use_rmsnorm=False, - use_rezero=False, - rel_pos_num_buckets=32, - rel_pos_max_distance=128, - position_infused_attn=False, - custom_layers=None, - sandwich_coef=None, - par_ratio=None, - residual_attn=False, - cross_residual_attn=False, - macaron=False, - pre_norm=True, - gate_residual=False, - **kwargs - ): - super().__init__() - ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs) - attn_kwargs, _ = groupby_prefix_and_trim('attn_', kwargs) - - dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD) - - self.dim = dim - self.depth = depth - self.layers = nn.ModuleList([]) - - self.has_pos_emb = position_infused_attn - self.pia_pos_emb = FixedPositionalEmbedding(dim) if position_infused_attn else None - self.rotary_pos_emb = always(None) - - assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance' - self.rel_pos = None - - self.pre_norm = pre_norm - - self.residual_attn = residual_attn - self.cross_residual_attn = cross_residual_attn - - norm_class = ScaleNorm if use_scalenorm else nn.LayerNorm - norm_class = RMSNorm if use_rmsnorm else norm_class - norm_fn = partial(norm_class, dim) - - norm_fn = nn.Identity if use_rezero else norm_fn - branch_fn = Rezero if use_rezero else None - - if cross_attend and not only_cross: - default_block = ('a', 'c', 'f') - elif cross_attend and only_cross: - default_block = ('c', 'f') - else: - default_block = ('a', 'f') - - if macaron: - default_block = ('f',) + default_block - - if exists(custom_layers): - layer_types = custom_layers - elif exists(par_ratio): - par_depth = depth * len(default_block) - assert 1 < par_ratio <= par_depth, 'par ratio out of range' - default_block = tuple(filter(not_equals('f'), default_block)) - par_attn = par_depth // par_ratio - depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper - par_width = (depth_cut + depth_cut // par_attn) // par_attn - assert len(default_block) <= par_width, 'default block is too large for par_ratio' - par_block = default_block + ('f',) * (par_width - len(default_block)) - par_head = par_block * par_attn - layer_types = par_head + ('f',) * (par_depth - len(par_head)) - elif exists(sandwich_coef): - assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth' - layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef - else: - layer_types = default_block * depth - - self.layer_types = layer_types - self.num_attn_layers = len(list(filter(equals('a'), layer_types))) - - for layer_type in self.layer_types: - if layer_type == 'a': - layer = Attention(dim, heads=heads, causal=causal, **attn_kwargs) - elif layer_type == 'c': - layer = Attention(dim, heads=heads, **attn_kwargs) - elif layer_type == 'f': - layer = FeedForward(dim, **ff_kwargs) - layer = layer if not macaron else Scale(0.5, layer) - else: - raise Exception(f'invalid layer type {layer_type}') - - if isinstance(layer, Attention) and exists(branch_fn): - layer = branch_fn(layer) - - if gate_residual: - residual_fn = GRUGating(dim) - else: - residual_fn = Residual() - - self.layers.append(nn.ModuleList([ - norm_fn(), - layer, - residual_fn - ])) - - def forward( - self, - x, - context=None, - mask=None, - context_mask=None, - mems=None, - return_hiddens=False - ): - hiddens = [] - intermediates = [] - prev_attn = None - prev_cross_attn = None - - mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers - - for ind, (layer_type, (norm, block, residual_fn)) in enumerate(zip(self.layer_types, self.layers)): - is_last = ind == (len(self.layers) - 1) - - if layer_type == 'a': - hiddens.append(x) - layer_mem = mems.pop(0) - - residual = x - - if self.pre_norm: - x = norm(x) - - if layer_type == 'a': - out, inter = block(x, mask=mask, sinusoidal_emb=self.pia_pos_emb, rel_pos=self.rel_pos, - prev_attn=prev_attn, mem=layer_mem) - elif layer_type == 'c': - out, inter = block(x, context=context, mask=mask, context_mask=context_mask, prev_attn=prev_cross_attn) - elif layer_type == 'f': - out = block(x) - - x = residual_fn(out, residual) - - if layer_type in ('a', 'c'): - intermediates.append(inter) - - if layer_type == 'a' and self.residual_attn: - prev_attn = inter.pre_softmax_attn - elif layer_type == 'c' and self.cross_residual_attn: - prev_cross_attn = inter.pre_softmax_attn - - if not self.pre_norm and not is_last: - x = norm(x) - - if return_hiddens: - intermediates = LayerIntermediates( - hiddens=hiddens, - attn_intermediates=intermediates - ) - - return x, intermediates - - return x - - -class Encoder(AttentionLayers): - def __init__(self, **kwargs): - assert 'causal' not in kwargs, 'cannot set causality on encoder' - super().__init__(causal=False, **kwargs) - - - -class TransformerWrapper(nn.Module): - def __init__( - self, - *, - num_tokens, - max_seq_len, - attn_layers, - emb_dim=None, - max_mem_len=0., - emb_dropout=0., - num_memory_tokens=None, - tie_embedding=False, - use_pos_emb=True - ): - super().__init__() - assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder' - - dim = attn_layers.dim - emb_dim = default(emb_dim, dim) - - self.max_seq_len = max_seq_len - self.max_mem_len = max_mem_len - self.num_tokens = num_tokens - - self.token_emb = nn.Embedding(num_tokens, emb_dim) - self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len) if ( - use_pos_emb and not attn_layers.has_pos_emb) else always(0) - self.emb_dropout = nn.Dropout(emb_dropout) - - self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity() - self.attn_layers = attn_layers - self.norm = nn.LayerNorm(dim) - - self.init_() - - self.to_logits = nn.Linear(dim, num_tokens) if not tie_embedding else lambda t: t @ self.token_emb.weight.t() - - # memory tokens (like [cls]) from Memory Transformers paper - num_memory_tokens = default(num_memory_tokens, 0) - self.num_memory_tokens = num_memory_tokens - if num_memory_tokens > 0: - self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim)) - - # let funnel encoder know number of memory tokens, if specified - if hasattr(attn_layers, 'num_memory_tokens'): - attn_layers.num_memory_tokens = num_memory_tokens - - def init_(self): - nn.init.normal_(self.token_emb.weight, std=0.02) - - def forward( - self, - x, - return_embeddings=False, - mask=None, - return_mems=False, - return_attn=False, - mems=None, - **kwargs - ): - b, n, device, num_mem = *x.shape, x.device, self.num_memory_tokens - x = self.token_emb(x) - x += self.pos_emb(x) - x = self.emb_dropout(x) - - x = self.project_emb(x) - - if num_mem > 0: - mem = repeat(self.memory_tokens, 'n d -> b n d', b=b) - x = torch.cat((mem, x), dim=1) - - # auto-handle masking after appending memory tokens - if exists(mask): - mask = F.pad(mask, (num_mem, 0), value=True) - - x, intermediates = self.attn_layers(x, mask=mask, mems=mems, return_hiddens=True, **kwargs) - x = self.norm(x) - - mem, x = x[:, :num_mem], x[:, num_mem:] - - out = self.to_logits(x) if not return_embeddings else x - - if return_mems: - hiddens = intermediates.hiddens - new_mems = list(map(lambda pair: torch.cat(pair, dim=-2), zip(mems, hiddens))) if exists(mems) else hiddens - new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems)) - return out, new_mems - - if return_attn: - attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates)) - return out, attn_maps - - return out - diff --git a/spaces/PSLD/PSLD/stable-diffusion/scripts/download_first_stages.sh b/spaces/PSLD/PSLD/stable-diffusion/scripts/download_first_stages.sh deleted file mode 100644 index a8d79e99ccdff0a8d8762f23f3c0642401f32f6c..0000000000000000000000000000000000000000 --- a/spaces/PSLD/PSLD/stable-diffusion/scripts/download_first_stages.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash -wget -O models/first_stage_models/kl-f4/model.zip https://ommer-lab.com/files/latent-diffusion/kl-f4.zip -wget -O models/first_stage_models/kl-f8/model.zip https://ommer-lab.com/files/latent-diffusion/kl-f8.zip -wget -O models/first_stage_models/kl-f16/model.zip https://ommer-lab.com/files/latent-diffusion/kl-f16.zip -wget -O models/first_stage_models/kl-f32/model.zip https://ommer-lab.com/files/latent-diffusion/kl-f32.zip -wget -O models/first_stage_models/vq-f4/model.zip https://ommer-lab.com/files/latent-diffusion/vq-f4.zip -wget -O models/first_stage_models/vq-f4-noattn/model.zip https://ommer-lab.com/files/latent-diffusion/vq-f4-noattn.zip -wget -O models/first_stage_models/vq-f8/model.zip https://ommer-lab.com/files/latent-diffusion/vq-f8.zip -wget -O models/first_stage_models/vq-f8-n256/model.zip https://ommer-lab.com/files/latent-diffusion/vq-f8-n256.zip -wget -O models/first_stage_models/vq-f16/model.zip https://ommer-lab.com/files/latent-diffusion/vq-f16.zip - - - -cd models/first_stage_models/kl-f4 -unzip -o model.zip - -cd ../kl-f8 -unzip -o model.zip - -cd ../kl-f16 -unzip -o model.zip - -cd ../kl-f32 -unzip -o model.zip - -cd ../vq-f4 -unzip -o model.zip - -cd ../vq-f4-noattn -unzip -o model.zip - -cd ../vq-f8 -unzip -o model.zip - -cd ../vq-f8-n256 -unzip -o model.zip - -cd ../vq-f16 -unzip -o model.zip - -cd ../.. \ No newline at end of file diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/ice-9/poll.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/ice-9/poll.go deleted file mode 100644 index 930137f8af294233bbbe8239f58ddb211d1db2b7..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/ice-9/poll.go and /dev/null differ diff --git a/spaces/PeepDaSlan9/AutoGPT/tests/unit/test_browse_scrape_links.py b/spaces/PeepDaSlan9/AutoGPT/tests/unit/test_browse_scrape_links.py deleted file mode 100644 index 0a3340e7397a997da96b8ab9828954230e1a3c20..0000000000000000000000000000000000000000 --- a/spaces/PeepDaSlan9/AutoGPT/tests/unit/test_browse_scrape_links.py +++ /dev/null @@ -1,118 +0,0 @@ -# Generated by CodiumAI - -# Dependencies: -# pip install pytest-mock -import pytest - -from autogpt.commands.web_requests import scrape_links - -""" -Code Analysis - -Objective: -The objective of the 'scrape_links' function is to scrape hyperlinks from a -given URL and return them in a formatted way. - -Inputs: -- url: a string representing the URL to be scraped. - -Flow: -1. Send a GET request to the given URL using the requests library and the user agent header from the config file. -2. Check if the response contains an HTTP error. If it does, return "error". -3. Parse the HTML content of the response using the BeautifulSoup library. -4. Remove any script and style tags from the parsed HTML. -5. Extract all hyperlinks from the parsed HTML using the 'extract_hyperlinks' function. -6. Format the extracted hyperlinks using the 'format_hyperlinks' function. -7. Return the formatted hyperlinks. - -Outputs: -- A list of formatted hyperlinks. - -Additional aspects: -- The function uses the 'requests' and 'BeautifulSoup' libraries to send HTTP -requests and parse HTML content, respectively. -- The 'extract_hyperlinks' function is called to extract hyperlinks from the parsed HTML. -- The 'format_hyperlinks' function is called to format the extracted hyperlinks. -- The function checks for HTTP errors and returns "error" if any are found. -""" - - -class TestScrapeLinks: - # Tests that the function returns a list of formatted hyperlinks when - # provided with a valid url that returns a webpage with hyperlinks. - def test_valid_url_with_hyperlinks(self): - url = "https://www.google.com" - result = scrape_links(url) - assert len(result) > 0 - assert isinstance(result, list) - assert isinstance(result[0], str) - - # Tests that the function returns correctly formatted hyperlinks when given a valid url. - def test_valid_url(self, mocker): - # Mock the requests.get() function to return a response with sample HTML containing hyperlinks - mock_response = mocker.Mock() - mock_response.status_code = 200 - mock_response.text = ( - "Google" - ) - mocker.patch("requests.Session.get", return_value=mock_response) - - # Call the function with a valid URL - result = scrape_links("https://www.example.com") - - # Assert that the function returns correctly formatted hyperlinks - assert result == ["Google (https://www.google.com)"] - - # Tests that the function returns "error" when given an invalid url. - def test_invalid_url(self, mocker): - # Mock the requests.get() function to return an HTTP error response - mock_response = mocker.Mock() - mock_response.status_code = 404 - mocker.patch("requests.Session.get", return_value=mock_response) - - # Call the function with an invalid URL - result = scrape_links("https://www.invalidurl.com") - - # Assert that the function returns "error" - assert "Error:" in result - - # Tests that the function returns an empty list when the html contains no hyperlinks. - def test_no_hyperlinks(self, mocker): - # Mock the requests.get() function to return a response with sample HTML containing no hyperlinks - mock_response = mocker.Mock() - mock_response.status_code = 200 - mock_response.text = "

      No hyperlinks here

      " - mocker.patch("requests.Session.get", return_value=mock_response) - - # Call the function with a URL containing no hyperlinks - result = scrape_links("https://www.example.com") - - # Assert that the function returns an empty list - assert result == [] - - # Tests that scrape_links() correctly extracts and formats hyperlinks from - # a sample HTML containing a few hyperlinks. - def test_scrape_links_with_few_hyperlinks(self, mocker): - # Mock the requests.get() function to return a response with a sample HTML containing hyperlinks - mock_response = mocker.Mock() - mock_response.status_code = 200 - mock_response.text = """ - - - - - - - - """ - mocker.patch("requests.Session.get", return_value=mock_response) - - # Call the function being tested - result = scrape_links("https://www.example.com") - - # Assert that the function returns a list of formatted hyperlinks - assert isinstance(result, list) - assert len(result) == 3 - assert result[0] == "Google (https://www.google.com)" - assert result[1] == "GitHub (https://github.com)" - assert result[2] == "CodiumAI (https://www.codium.ai)" diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/parallel/scatter_gather.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/parallel/scatter_gather.py deleted file mode 100644 index 900ff88566f8f14830590459dc4fd16d4b382e47..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/parallel/scatter_gather.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -from torch.nn.parallel._functions import Scatter as OrigScatter - -from ._functions import Scatter -from .data_container import DataContainer - - -def scatter(inputs, target_gpus, dim=0): - """Scatter inputs to target gpus. - - The only difference from original :func:`scatter` is to add support for - :type:`~mmcv.parallel.DataContainer`. - """ - - def scatter_map(obj): - if isinstance(obj, torch.Tensor): - if target_gpus != [-1]: - return OrigScatter.apply(target_gpus, None, dim, obj) - else: - # for CPU inference we use self-implemented scatter - return Scatter.forward(target_gpus, obj) - if isinstance(obj, DataContainer): - if obj.cpu_only: - return obj.data - else: - return Scatter.forward(target_gpus, obj.data) - if isinstance(obj, tuple) and len(obj) > 0: - return list(zip(*map(scatter_map, obj))) - if isinstance(obj, list) and len(obj) > 0: - out = list(map(list, zip(*map(scatter_map, obj)))) - return out - if isinstance(obj, dict) and len(obj) > 0: - out = list(map(type(obj), zip(*map(scatter_map, obj.items())))) - return out - return [obj for targets in target_gpus] - - # After scatter_map is called, a scatter_map cell will exist. This cell - # has a reference to the actual function scatter_map, which has references - # to a closure that has a reference to the scatter_map cell (because the - # fn is recursive). To avoid this reference cycle, we set the function to - # None, clearing the cell - try: - return scatter_map(inputs) - finally: - scatter_map = None - - -def scatter_kwargs(inputs, kwargs, target_gpus, dim=0): - """Scatter with support for kwargs dictionary.""" - inputs = scatter(inputs, target_gpus, dim) if inputs else [] - kwargs = scatter(kwargs, target_gpus, dim) if kwargs else [] - if len(inputs) < len(kwargs): - inputs.extend([() for _ in range(len(kwargs) - len(inputs))]) - elif len(kwargs) < len(inputs): - kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))]) - inputs = tuple(inputs) - kwargs = tuple(kwargs) - return inputs, kwargs diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/modeling/registry.py b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/modeling/registry.py deleted file mode 100644 index dca130c07ef96b6f4bcb649675ec4d405b7a4728..0000000000000000000000000000000000000000 --- a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/modeling/registry.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. - -from maskrcnn_benchmark.utils.registry import Registry - -BACKBONES = Registry() - -LANGUAGE_BACKBONES = Registry() - -ROI_BOX_FEATURE_EXTRACTORS = Registry() -RPN_HEADS = Registry() diff --git a/spaces/Plachta/VITS-Umamusume-voice-synthesizer/commons.py b/spaces/Plachta/VITS-Umamusume-voice-synthesizer/commons.py deleted file mode 100644 index 2153153f527d94e2abb641ea00c80b518ff6c5bd..0000000000000000000000000000000000000000 --- a/spaces/Plachta/VITS-Umamusume-voice-synthesizer/commons.py +++ /dev/null @@ -1,97 +0,0 @@ -import math -import torch -from torch.nn import functional as F -import torch.jit - - -def script_method(fn, _rcb=None): - return fn - - -def script(obj, optimize=True, _frames_up=0, _rcb=None): - return obj - - -torch.jit.script_method = script_method -torch.jit.script = script - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path diff --git a/spaces/RamAnanth1/T2I-Adapter/ldm/data/base.py b/spaces/RamAnanth1/T2I-Adapter/ldm/data/base.py deleted file mode 100644 index b196c2f7aa583a3e8bc4aad9f943df0c4dae0da7..0000000000000000000000000000000000000000 --- a/spaces/RamAnanth1/T2I-Adapter/ldm/data/base.py +++ /dev/null @@ -1,23 +0,0 @@ -from abc import abstractmethod -from torch.utils.data import Dataset, ConcatDataset, ChainDataset, IterableDataset - - -class Txt2ImgIterableBaseDataset(IterableDataset): - ''' - Define an interface to make the IterableDatasets for text2img data chainable - ''' - def __init__(self, num_records=0, valid_ids=None, size=256): - super().__init__() - self.num_records = num_records - self.valid_ids = valid_ids - self.sample_ids = valid_ids - self.size = size - - print(f'{self.__class__.__name__} dataset contains {self.__len__()} examples.') - - def __len__(self): - return self.num_records - - @abstractmethod - def __iter__(self): - pass \ No newline at end of file diff --git a/spaces/RamAnanth1/Youtube-to-HF-Dataset/interpreter/__init__.py b/spaces/RamAnanth1/Youtube-to-HF-Dataset/interpreter/__init__.py deleted file mode 100644 index d96b087245cc31d925fc49a0400b4b6513a86ce1..0000000000000000000000000000000000000000 --- a/spaces/RamAnanth1/Youtube-to-HF-Dataset/interpreter/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .interpreter import * -from .whisper_interpreter import * \ No newline at end of file diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/build_meta.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/build_meta.py deleted file mode 100644 index e8f1c72d598d6d5a03b75f68a6d567b1d6b1e9a2..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/build_meta.py +++ /dev/null @@ -1,511 +0,0 @@ -"""A PEP 517 interface to setuptools - -Previously, when a user or a command line tool (let's call it a "frontend") -needed to make a request of setuptools to take a certain action, for -example, generating a list of installation requirements, the frontend would -would call "setup.py egg_info" or "setup.py bdist_wheel" on the command line. - -PEP 517 defines a different method of interfacing with setuptools. Rather -than calling "setup.py" directly, the frontend should: - - 1. Set the current directory to the directory with a setup.py file - 2. Import this module into a safe python interpreter (one in which - setuptools can potentially set global variables or crash hard). - 3. Call one of the functions defined in PEP 517. - -What each function does is defined in PEP 517. However, here is a "casual" -definition of the functions (this definition should not be relied on for -bug reports or API stability): - - - `build_wheel`: build a wheel in the folder and return the basename - - `get_requires_for_build_wheel`: get the `setup_requires` to build - - `prepare_metadata_for_build_wheel`: get the `install_requires` - - `build_sdist`: build an sdist in the folder and return the basename - - `get_requires_for_build_sdist`: get the `setup_requires` to build - -Again, this is not a formal definition! Just a "taste" of the module. -""" - -import io -import os -import shlex -import sys -import tokenize -import shutil -import contextlib -import tempfile -import warnings -from pathlib import Path -from typing import Dict, Iterator, List, Optional, Union - -import setuptools -import distutils -from . import errors -from ._path import same_path -from ._reqs import parse_strings -from ._deprecation_warning import SetuptoolsDeprecationWarning -from distutils.util import strtobool - - -__all__ = ['get_requires_for_build_sdist', - 'get_requires_for_build_wheel', - 'prepare_metadata_for_build_wheel', - 'build_wheel', - 'build_sdist', - 'get_requires_for_build_editable', - 'prepare_metadata_for_build_editable', - 'build_editable', - '__legacy__', - 'SetupRequirementsError'] - -SETUPTOOLS_ENABLE_FEATURES = os.getenv("SETUPTOOLS_ENABLE_FEATURES", "").lower() -LEGACY_EDITABLE = "legacy-editable" in SETUPTOOLS_ENABLE_FEATURES.replace("_", "-") - - -class SetupRequirementsError(BaseException): - def __init__(self, specifiers): - self.specifiers = specifiers - - -class Distribution(setuptools.dist.Distribution): - def fetch_build_eggs(self, specifiers): - specifier_list = list(parse_strings(specifiers)) - - raise SetupRequirementsError(specifier_list) - - @classmethod - @contextlib.contextmanager - def patch(cls): - """ - Replace - distutils.dist.Distribution with this class - for the duration of this context. - """ - orig = distutils.core.Distribution - distutils.core.Distribution = cls - try: - yield - finally: - distutils.core.Distribution = orig - - -@contextlib.contextmanager -def no_install_setup_requires(): - """Temporarily disable installing setup_requires - - Under PEP 517, the backend reports build dependencies to the frontend, - and the frontend is responsible for ensuring they're installed. - So setuptools (acting as a backend) should not try to install them. - """ - orig = setuptools._install_setup_requires - setuptools._install_setup_requires = lambda attrs: None - try: - yield - finally: - setuptools._install_setup_requires = orig - - -def _get_immediate_subdirectories(a_dir): - return [name for name in os.listdir(a_dir) - if os.path.isdir(os.path.join(a_dir, name))] - - -def _file_with_extension(directory, extension): - matching = ( - f for f in os.listdir(directory) - if f.endswith(extension) - ) - try: - file, = matching - except ValueError: - raise ValueError( - 'No distribution was found. Ensure that `setup.py` ' - 'is not empty and that it calls `setup()`.') - return file - - -def _open_setup_script(setup_script): - if not os.path.exists(setup_script): - # Supply a default setup.py - return io.StringIO(u"from setuptools import setup; setup()") - - return getattr(tokenize, 'open', open)(setup_script) - - -@contextlib.contextmanager -def suppress_known_deprecation(): - with warnings.catch_warnings(): - warnings.filterwarnings('ignore', 'setup.py install is deprecated') - yield - - -_ConfigSettings = Optional[Dict[str, Union[str, List[str], None]]] -""" -Currently the user can run:: - - pip install -e . --config-settings key=value - python -m build -C--key=value -C key=value - -- pip will pass both key and value as strings and overwriting repeated keys - (pypa/pip#11059). -- build will accumulate values associated with repeated keys in a list. - It will also accept keys with no associated value. - This means that an option passed by build can be ``str | list[str] | None``. -- PEP 517 specifies that ``config_settings`` is an optional dict. -""" - - -class _ConfigSettingsTranslator: - """Translate ``config_settings`` into distutils-style command arguments. - Only a limited number of options is currently supported. - """ - # See pypa/setuptools#1928 pypa/setuptools#2491 - - def _get_config(self, key: str, config_settings: _ConfigSettings) -> List[str]: - """ - Get the value of a specific key in ``config_settings`` as a list of strings. - - >>> fn = _ConfigSettingsTranslator()._get_config - >>> fn("--global-option", None) - [] - >>> fn("--global-option", {}) - [] - >>> fn("--global-option", {'--global-option': 'foo'}) - ['foo'] - >>> fn("--global-option", {'--global-option': ['foo']}) - ['foo'] - >>> fn("--global-option", {'--global-option': 'foo'}) - ['foo'] - >>> fn("--global-option", {'--global-option': 'foo bar'}) - ['foo', 'bar'] - """ - cfg = config_settings or {} - opts = cfg.get(key) or [] - return shlex.split(opts) if isinstance(opts, str) else opts - - def _valid_global_options(self): - """Global options accepted by setuptools (e.g. quiet or verbose).""" - options = (opt[:2] for opt in setuptools.dist.Distribution.global_options) - return {flag for long_and_short in options for flag in long_and_short if flag} - - def _global_args(self, config_settings: _ConfigSettings) -> Iterator[str]: - """ - Let the user specify ``verbose`` or ``quiet`` + escape hatch via - ``--global-option``. - Note: ``-v``, ``-vv``, ``-vvv`` have similar effects in setuptools, - so we just have to cover the basic scenario ``-v``. - - >>> fn = _ConfigSettingsTranslator()._global_args - >>> list(fn(None)) - [] - >>> list(fn({"verbose": "False"})) - ['-q'] - >>> list(fn({"verbose": "1"})) - ['-v'] - >>> list(fn({"--verbose": None})) - ['-v'] - >>> list(fn({"verbose": "true", "--global-option": "-q --no-user-cfg"})) - ['-v', '-q', '--no-user-cfg'] - >>> list(fn({"--quiet": None})) - ['-q'] - """ - cfg = config_settings or {} - falsey = {"false", "no", "0", "off"} - if "verbose" in cfg or "--verbose" in cfg: - level = str(cfg.get("verbose") or cfg.get("--verbose") or "1") - yield ("-q" if level.lower() in falsey else "-v") - if "quiet" in cfg or "--quiet" in cfg: - level = str(cfg.get("quiet") or cfg.get("--quiet") or "1") - yield ("-v" if level.lower() in falsey else "-q") - - valid = self._valid_global_options() - args = self._get_config("--global-option", config_settings) - yield from (arg for arg in args if arg.strip("-") in valid) - - def __dist_info_args(self, config_settings: _ConfigSettings) -> Iterator[str]: - """ - The ``dist_info`` command accepts ``tag-date`` and ``tag-build``. - - .. warning:: - We cannot use this yet as it requires the ``sdist`` and ``bdist_wheel`` - commands run in ``build_sdist`` and ``build_wheel`` to re-use the egg-info - directory created in ``prepare_metadata_for_build_wheel``. - - >>> fn = _ConfigSettingsTranslator()._ConfigSettingsTranslator__dist_info_args - >>> list(fn(None)) - [] - >>> list(fn({"tag-date": "False"})) - ['--no-date'] - >>> list(fn({"tag-date": None})) - ['--no-date'] - >>> list(fn({"tag-date": "true", "tag-build": ".a"})) - ['--tag-date', '--tag-build', '.a'] - """ - cfg = config_settings or {} - if "tag-date" in cfg: - val = strtobool(str(cfg["tag-date"] or "false")) - yield ("--tag-date" if val else "--no-date") - if "tag-build" in cfg: - yield from ["--tag-build", str(cfg["tag-build"])] - - def _editable_args(self, config_settings: _ConfigSettings) -> Iterator[str]: - """ - The ``editable_wheel`` command accepts ``editable-mode=strict``. - - >>> fn = _ConfigSettingsTranslator()._editable_args - >>> list(fn(None)) - [] - >>> list(fn({"editable-mode": "strict"})) - ['--mode', 'strict'] - """ - cfg = config_settings or {} - mode = cfg.get("editable-mode") or cfg.get("editable_mode") - if not mode: - return - yield from ["--mode", str(mode)] - - def _arbitrary_args(self, config_settings: _ConfigSettings) -> Iterator[str]: - """ - Users may expect to pass arbitrary lists of arguments to a command - via "--global-option" (example provided in PEP 517 of a "escape hatch"). - - >>> fn = _ConfigSettingsTranslator()._arbitrary_args - >>> list(fn(None)) - [] - >>> list(fn({})) - [] - >>> list(fn({'--build-option': 'foo'})) - ['foo'] - >>> list(fn({'--build-option': ['foo']})) - ['foo'] - >>> list(fn({'--build-option': 'foo'})) - ['foo'] - >>> list(fn({'--build-option': 'foo bar'})) - ['foo', 'bar'] - >>> warnings.simplefilter('error', SetuptoolsDeprecationWarning) - >>> list(fn({'--global-option': 'foo'})) # doctest: +IGNORE_EXCEPTION_DETAIL - Traceback (most recent call last): - SetuptoolsDeprecationWarning: ...arguments given via `--global-option`... - """ - args = self._get_config("--global-option", config_settings) - global_opts = self._valid_global_options() - bad_args = [] - - for arg in args: - if arg.strip("-") not in global_opts: - bad_args.append(arg) - yield arg - - yield from self._get_config("--build-option", config_settings) - - if bad_args: - msg = f""" - The arguments {bad_args!r} were given via `--global-option`. - Please use `--build-option` instead, - `--global-option` is reserved to flags like `--verbose` or `--quiet`. - """ - warnings.warn(msg, SetuptoolsDeprecationWarning) - - -class _BuildMetaBackend(_ConfigSettingsTranslator): - def _get_build_requires(self, config_settings, requirements): - sys.argv = [ - *sys.argv[:1], - *self._global_args(config_settings), - "egg_info", - *self._arbitrary_args(config_settings), - ] - try: - with Distribution.patch(): - self.run_setup() - except SetupRequirementsError as e: - requirements += e.specifiers - - return requirements - - def run_setup(self, setup_script='setup.py'): - # Note that we can reuse our build directory between calls - # Correctness comes first, then optimization later - __file__ = setup_script - __name__ = '__main__' - - with _open_setup_script(__file__) as f: - code = f.read().replace(r'\r\n', r'\n') - - exec(code, locals()) - - def get_requires_for_build_wheel(self, config_settings=None): - return self._get_build_requires(config_settings, requirements=['wheel']) - - def get_requires_for_build_sdist(self, config_settings=None): - return self._get_build_requires(config_settings, requirements=[]) - - def _bubble_up_info_directory(self, metadata_directory: str, suffix: str) -> str: - """ - PEP 517 requires that the .dist-info directory be placed in the - metadata_directory. To comply, we MUST copy the directory to the root. - - Returns the basename of the info directory, e.g. `proj-0.0.0.dist-info`. - """ - info_dir = self._find_info_directory(metadata_directory, suffix) - if not same_path(info_dir.parent, metadata_directory): - shutil.move(str(info_dir), metadata_directory) - # PEP 517 allow other files and dirs to exist in metadata_directory - return info_dir.name - - def _find_info_directory(self, metadata_directory: str, suffix: str) -> Path: - for parent, dirs, _ in os.walk(metadata_directory): - candidates = [f for f in dirs if f.endswith(suffix)] - - if len(candidates) != 0 or len(dirs) != 1: - assert len(candidates) == 1, f"Multiple {suffix} directories found" - return Path(parent, candidates[0]) - - msg = f"No {suffix} directory found in {metadata_directory}" - raise errors.InternalError(msg) - - def prepare_metadata_for_build_wheel(self, metadata_directory, - config_settings=None): - sys.argv = [ - *sys.argv[:1], - *self._global_args(config_settings), - "dist_info", - "--output-dir", metadata_directory, - "--keep-egg-info", - ] - with no_install_setup_requires(): - self.run_setup() - - self._bubble_up_info_directory(metadata_directory, ".egg-info") - return self._bubble_up_info_directory(metadata_directory, ".dist-info") - - def _build_with_temp_dir(self, setup_command, result_extension, - result_directory, config_settings): - result_directory = os.path.abspath(result_directory) - - # Build in a temporary directory, then copy to the target. - os.makedirs(result_directory, exist_ok=True) - with tempfile.TemporaryDirectory(dir=result_directory) as tmp_dist_dir: - sys.argv = [ - *sys.argv[:1], - *self._global_args(config_settings), - *setup_command, - "--dist-dir", tmp_dist_dir, - *self._arbitrary_args(config_settings), - ] - with no_install_setup_requires(): - self.run_setup() - - result_basename = _file_with_extension( - tmp_dist_dir, result_extension) - result_path = os.path.join(result_directory, result_basename) - if os.path.exists(result_path): - # os.rename will fail overwriting on non-Unix. - os.remove(result_path) - os.rename(os.path.join(tmp_dist_dir, result_basename), result_path) - - return result_basename - - def build_wheel(self, wheel_directory, config_settings=None, - metadata_directory=None): - with suppress_known_deprecation(): - return self._build_with_temp_dir(['bdist_wheel'], '.whl', - wheel_directory, config_settings) - - def build_sdist(self, sdist_directory, config_settings=None): - return self._build_with_temp_dir(['sdist', '--formats', 'gztar'], - '.tar.gz', sdist_directory, - config_settings) - - def _get_dist_info_dir(self, metadata_directory: Optional[str]) -> Optional[str]: - if not metadata_directory: - return None - dist_info_candidates = list(Path(metadata_directory).glob("*.dist-info")) - assert len(dist_info_candidates) <= 1 - return str(dist_info_candidates[0]) if dist_info_candidates else None - - if not LEGACY_EDITABLE: - - # PEP660 hooks: - # build_editable - # get_requires_for_build_editable - # prepare_metadata_for_build_editable - def build_editable( - self, wheel_directory, config_settings=None, metadata_directory=None - ): - # XXX can or should we hide our editable_wheel command normally? - info_dir = self._get_dist_info_dir(metadata_directory) - opts = ["--dist-info-dir", info_dir] if info_dir else [] - cmd = ["editable_wheel", *opts, *self._editable_args(config_settings)] - with suppress_known_deprecation(): - return self._build_with_temp_dir( - cmd, ".whl", wheel_directory, config_settings - ) - - def get_requires_for_build_editable(self, config_settings=None): - return self.get_requires_for_build_wheel(config_settings) - - def prepare_metadata_for_build_editable(self, metadata_directory, - config_settings=None): - return self.prepare_metadata_for_build_wheel( - metadata_directory, config_settings - ) - - -class _BuildMetaLegacyBackend(_BuildMetaBackend): - """Compatibility backend for setuptools - - This is a version of setuptools.build_meta that endeavors - to maintain backwards - compatibility with pre-PEP 517 modes of invocation. It - exists as a temporary - bridge between the old packaging mechanism and the new - packaging mechanism, - and will eventually be removed. - """ - def run_setup(self, setup_script='setup.py'): - # In order to maintain compatibility with scripts assuming that - # the setup.py script is in a directory on the PYTHONPATH, inject - # '' into sys.path. (pypa/setuptools#1642) - sys_path = list(sys.path) # Save the original path - - script_dir = os.path.dirname(os.path.abspath(setup_script)) - if script_dir not in sys.path: - sys.path.insert(0, script_dir) - - # Some setup.py scripts (e.g. in pygame and numpy) use sys.argv[0] to - # get the directory of the source code. They expect it to refer to the - # setup.py script. - sys_argv_0 = sys.argv[0] - sys.argv[0] = setup_script - - try: - super(_BuildMetaLegacyBackend, - self).run_setup(setup_script=setup_script) - finally: - # While PEP 517 frontends should be calling each hook in a fresh - # subprocess according to the standard (and thus it should not be - # strictly necessary to restore the old sys.path), we'll restore - # the original path so that the path manipulation does not persist - # within the hook after run_setup is called. - sys.path[:] = sys_path - sys.argv[0] = sys_argv_0 - - -# The primary backend -_BACKEND = _BuildMetaBackend() - -get_requires_for_build_wheel = _BACKEND.get_requires_for_build_wheel -get_requires_for_build_sdist = _BACKEND.get_requires_for_build_sdist -prepare_metadata_for_build_wheel = _BACKEND.prepare_metadata_for_build_wheel -build_wheel = _BACKEND.build_wheel -build_sdist = _BACKEND.build_sdist - -if not LEGACY_EDITABLE: - get_requires_for_build_editable = _BACKEND.get_requires_for_build_editable - prepare_metadata_for_build_editable = _BACKEND.prepare_metadata_for_build_editable - build_editable = _BACKEND.build_editable - - -# The legacy backend -__legacy__ = _BuildMetaLegacyBackend() diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/command/rotate.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/command/rotate.py deleted file mode 100644 index 74795ba922bb376e24858760e63dc9124ef22b9f..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/command/rotate.py +++ /dev/null @@ -1,64 +0,0 @@ -from distutils.util import convert_path -from distutils import log -from distutils.errors import DistutilsOptionError -import os -import shutil - -from setuptools import Command - - -class rotate(Command): - """Delete older distributions""" - - description = "delete older distributions, keeping N newest files" - user_options = [ - ('match=', 'm', "patterns to match (required)"), - ('dist-dir=', 'd', "directory where the distributions are"), - ('keep=', 'k', "number of matching distributions to keep"), - ] - - boolean_options = [] - - def initialize_options(self): - self.match = None - self.dist_dir = None - self.keep = None - - def finalize_options(self): - if self.match is None: - raise DistutilsOptionError( - "Must specify one or more (comma-separated) match patterns " - "(e.g. '.zip' or '.egg')" - ) - if self.keep is None: - raise DistutilsOptionError("Must specify number of files to keep") - try: - self.keep = int(self.keep) - except ValueError as e: - raise DistutilsOptionError("--keep must be an integer") from e - if isinstance(self.match, str): - self.match = [ - convert_path(p.strip()) for p in self.match.split(',') - ] - self.set_undefined_options('bdist', ('dist_dir', 'dist_dir')) - - def run(self): - self.run_command("egg_info") - from glob import glob - - for pattern in self.match: - pattern = self.distribution.get_name() + '*' + pattern - files = glob(os.path.join(self.dist_dir, pattern)) - files = [(os.path.getmtime(f), f) for f in files] - files.sort() - files.reverse() - - log.info("%d file(s) matching %s", len(files), pattern) - files = files[self.keep:] - for (t, f) in files: - log.info("Deleting %s", f) - if not self.dry_run: - if os.path.isdir(f): - shutil.rmtree(f) - else: - os.unlink(f) diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/config/_validate_pyproject/fastjsonschema_exceptions.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/config/_validate_pyproject/fastjsonschema_exceptions.py deleted file mode 100644 index d2dddd6a106f021a4723c1e8f5953ccc09e55e1f..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/config/_validate_pyproject/fastjsonschema_exceptions.py +++ /dev/null @@ -1,51 +0,0 @@ -import re - - -SPLIT_RE = re.compile(r'[\.\[\]]+') - - -class JsonSchemaException(ValueError): - """ - Base exception of ``fastjsonschema`` library. - """ - - -class JsonSchemaValueException(JsonSchemaException): - """ - Exception raised by validation function. Available properties: - - * ``message`` containing human-readable information what is wrong (e.g. ``data.property[index] must be smaller than or equal to 42``), - * invalid ``value`` (e.g. ``60``), - * ``name`` of a path in the data structure (e.g. ``data.property[index]``), - * ``path`` as an array in the data structure (e.g. ``['data', 'property', 'index']``), - * the whole ``definition`` which the ``value`` has to fulfil (e.g. ``{'type': 'number', 'maximum': 42}``), - * ``rule`` which the ``value`` is breaking (e.g. ``maximum``) - * and ``rule_definition`` (e.g. ``42``). - - .. versionchanged:: 2.14.0 - Added all extra properties. - """ - - def __init__(self, message, value=None, name=None, definition=None, rule=None): - super().__init__(message) - self.message = message - self.value = value - self.name = name - self.definition = definition - self.rule = rule - - @property - def path(self): - return [item for item in SPLIT_RE.split(self.name) if item != ''] - - @property - def rule_definition(self): - if not self.rule or not self.definition: - return None - return self.definition.get(self.rule) - - -class JsonSchemaDefinitionException(JsonSchemaException): - """ - Exception raised by generator of validation function. - """ diff --git a/spaces/Realcat/image-matching-webui/third_party/ASpanFormer/src/datasets/__init__.py b/spaces/Realcat/image-matching-webui/third_party/ASpanFormer/src/datasets/__init__.py deleted file mode 100644 index 4feb648440e6c8db60de3aa475cd82ce460dcc1c..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/ASpanFormer/src/datasets/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .scannet import ScanNetDataset -from .megadepth import MegaDepthDataset diff --git a/spaces/Realcat/image-matching-webui/third_party/TopicFM/src/models/utils/coarse_matching.py b/spaces/Realcat/image-matching-webui/third_party/TopicFM/src/models/utils/coarse_matching.py deleted file mode 100644 index 0cd0ea3db496fe50f82bf7660696e96e26b23484..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/TopicFM/src/models/utils/coarse_matching.py +++ /dev/null @@ -1,235 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from einops.einops import rearrange - -INF = 1e9 - - -def mask_border(m, b: int, v): - """Mask borders with value - Args: - m (torch.Tensor): [N, H0, W0, H1, W1] - b (int) - v (m.dtype) - """ - if b <= 0: - return - - m[:, :b] = v - m[:, :, :b] = v - m[:, :, :, :b] = v - m[:, :, :, :, :b] = v - m[:, -b:] = v - m[:, :, -b:] = v - m[:, :, :, -b:] = v - m[:, :, :, :, -b:] = v - - -def mask_border_with_padding(m, bd, v, p_m0, p_m1): - if bd <= 0: - return - - m[:, :bd] = v - m[:, :, :bd] = v - m[:, :, :, :bd] = v - m[:, :, :, :, :bd] = v - - h0s, w0s = p_m0.sum(1).max(-1)[0].int(), p_m0.sum(-1).max(-1)[0].int() - h1s, w1s = p_m1.sum(1).max(-1)[0].int(), p_m1.sum(-1).max(-1)[0].int() - for b_idx, (h0, w0, h1, w1) in enumerate(zip(h0s, w0s, h1s, w1s)): - m[b_idx, h0 - bd :] = v - m[b_idx, :, w0 - bd :] = v - m[b_idx, :, :, h1 - bd :] = v - m[b_idx, :, :, :, w1 - bd :] = v - - -def compute_max_candidates(p_m0, p_m1): - """Compute the max candidates of all pairs within a batch - - Args: - p_m0, p_m1 (torch.Tensor): padded masks - """ - h0s, w0s = p_m0.sum(1).max(-1)[0], p_m0.sum(-1).max(-1)[0] - h1s, w1s = p_m1.sum(1).max(-1)[0], p_m1.sum(-1).max(-1)[0] - max_cand = torch.sum(torch.min(torch.stack([h0s * w0s, h1s * w1s], -1), -1)[0]) - return max_cand - - -class CoarseMatching(nn.Module): - def __init__(self, config): - super().__init__() - self.config = config - # general config - self.thr = config["thr"] - self.border_rm = config["border_rm"] - # -- # for trainig fine-level LoFTR - self.train_coarse_percent = config["train_coarse_percent"] - self.train_pad_num_gt_min = config["train_pad_num_gt_min"] - - # we provide 2 options for differentiable matching - self.match_type = config["match_type"] - if self.match_type == "dual_softmax": - self.temperature = config["dsmax_temperature"] - elif self.match_type == "sinkhorn": - try: - from .superglue import log_optimal_transport - except ImportError: - raise ImportError("download superglue.py first!") - self.log_optimal_transport = log_optimal_transport - self.bin_score = nn.Parameter( - torch.tensor(config["skh_init_bin_score"], requires_grad=True) - ) - self.skh_iters = config["skh_iters"] - self.skh_prefilter = config["skh_prefilter"] - else: - raise NotImplementedError() - - def forward(self, data): - """ - Args: - data (dict) - Update: - data (dict): { - 'b_ids' (torch.Tensor): [M'], - 'i_ids' (torch.Tensor): [M'], - 'j_ids' (torch.Tensor): [M'], - 'gt_mask' (torch.Tensor): [M'], - 'mkpts0_c' (torch.Tensor): [M, 2], - 'mkpts1_c' (torch.Tensor): [M, 2], - 'mconf' (torch.Tensor): [M]} - NOTE: M' != M during training. - """ - conf_matrix = data["conf_matrix"] - # predict coarse matches from conf_matrix - data.update(**self.get_coarse_match(conf_matrix, data)) - - @torch.no_grad() - def get_coarse_match(self, conf_matrix, data): - """ - Args: - conf_matrix (torch.Tensor): [N, L, S] - data (dict): with keys ['hw0_i', 'hw1_i', 'hw0_c', 'hw1_c'] - Returns: - coarse_matches (dict): { - 'b_ids' (torch.Tensor): [M'], - 'i_ids' (torch.Tensor): [M'], - 'j_ids' (torch.Tensor): [M'], - 'gt_mask' (torch.Tensor): [M'], - 'm_bids' (torch.Tensor): [M], - 'mkpts0_c' (torch.Tensor): [M, 2], - 'mkpts1_c' (torch.Tensor): [M, 2], - 'mconf' (torch.Tensor): [M]} - """ - axes_lengths = { - "h0c": data["hw0_c"][0], - "w0c": data["hw0_c"][1], - "h1c": data["hw1_c"][0], - "w1c": data["hw1_c"][1], - } - _device = conf_matrix.device - # 1. confidence thresholding - mask = conf_matrix > self.thr - mask = rearrange( - mask, "b (h0c w0c) (h1c w1c) -> b h0c w0c h1c w1c", **axes_lengths - ) - if "mask0" not in data: - mask_border(mask, self.border_rm, False) - else: - mask_border_with_padding( - mask, self.border_rm, False, data["mask0"], data["mask1"] - ) - mask = rearrange( - mask, "b h0c w0c h1c w1c -> b (h0c w0c) (h1c w1c)", **axes_lengths - ) - - # 2. mutual nearest - mask = ( - mask - * (conf_matrix == conf_matrix.max(dim=2, keepdim=True)[0]) - * (conf_matrix == conf_matrix.max(dim=1, keepdim=True)[0]) - ) - - # 3. find all valid coarse matches - # this only works when at most one `True` in each row - mask_v, all_j_ids = mask.max(dim=2) - b_ids, i_ids = torch.where(mask_v) - j_ids = all_j_ids[b_ids, i_ids] - mconf = conf_matrix[b_ids, i_ids, j_ids] - - # 4. Random sampling of training samples for fine-level LoFTR - # (optional) pad samples with gt coarse-level matches - if self.training: - # NOTE: - # The sampling is performed across all pairs in a batch without manually balancing - # #samples for fine-level increases w.r.t. batch_size - if "mask0" not in data: - num_candidates_max = mask.size(0) * max(mask.size(1), mask.size(2)) - else: - num_candidates_max = compute_max_candidates( - data["mask0"], data["mask1"] - ) - num_matches_train = int(num_candidates_max * self.train_coarse_percent) - num_matches_pred = len(b_ids) - assert ( - self.train_pad_num_gt_min < num_matches_train - ), "min-num-gt-pad should be less than num-train-matches" - - # pred_indices is to select from prediction - if num_matches_pred <= num_matches_train - self.train_pad_num_gt_min: - pred_indices = torch.arange(num_matches_pred, device=_device) - else: - pred_indices = torch.randint( - num_matches_pred, - (num_matches_train - self.train_pad_num_gt_min,), - device=_device, - ) - - # gt_pad_indices is to select from gt padding. e.g. max(3787-4800, 200) - gt_pad_indices = torch.randint( - len(data["spv_b_ids"]), - (max(num_matches_train - num_matches_pred, self.train_pad_num_gt_min),), - device=_device, - ) - mconf_gt = torch.zeros( - len(data["spv_b_ids"]), device=_device - ) # set conf of gt paddings to all zero - - b_ids, i_ids, j_ids, mconf = map( - lambda x, y: torch.cat([x[pred_indices], y[gt_pad_indices]], dim=0), - *zip( - [b_ids, data["spv_b_ids"]], - [i_ids, data["spv_i_ids"]], - [j_ids, data["spv_j_ids"]], - [mconf, mconf_gt], - ) - ) - - # These matches select patches that feed into fine-level network - coarse_matches = {"b_ids": b_ids, "i_ids": i_ids, "j_ids": j_ids} - - # 4. Update with matches in original image resolution - scale = data["hw0_i"][0] / data["hw0_c"][0] - scale0 = scale * data["scale0"][b_ids] if "scale0" in data else scale - scale1 = scale * data["scale1"][b_ids] if "scale1" in data else scale - mkpts0_c = ( - torch.stack([i_ids % data["hw0_c"][1], i_ids // data["hw0_c"][1]], dim=1) - * scale0 - ) - mkpts1_c = ( - torch.stack([j_ids % data["hw1_c"][1], j_ids // data["hw1_c"][1]], dim=1) - * scale1 - ) - - # These matches is the current prediction (for visualization) - coarse_matches.update( - { - "gt_mask": mconf == 0, - "m_bids": b_ids[mconf != 0], # mconf == 0 => gt matches - "mkpts0_c": mkpts0_c[mconf != 0], - "mkpts1_c": mkpts1_c[mconf != 0], - "mconf": mconf[mconf != 0], - } - ) - - return coarse_matches diff --git a/spaces/RedBaron5/PatentSolver/App/bin/CorpusProcessor.py b/spaces/RedBaron5/PatentSolver/App/bin/CorpusProcessor.py deleted file mode 100644 index 4de678e6134b9c3dbae142472527528bdf5e25e9..0000000000000000000000000000000000000000 --- a/spaces/RedBaron5/PatentSolver/App/bin/CorpusProcessor.py +++ /dev/null @@ -1,460 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - - -import json -import os -import re -import matplotlib.pyplot as plt -import numpy as np -import Levenshtein -from io import StringIO -from App.bin import constants -import hashlib -from collections import OrderedDict -from App.bin.InformationExtractor import InformationExtractor -from App.bin.ParameterExtractor import ParameterExtractor -from App.bin.TechnologyFinder import TechnologyFinder -from App.bin.InformationExtractor_Claims import InformationExtractorClaims - -class CorpusProcessor(object): - - def __init__(self, patents,input_folder, file_extension): - self.patents = patents - self.input_folder = input_folder - self.file_extension = file_extension - print("Processing started") - - - def make_graphic (self, sizes, text, colors, labels): - - col = [[i / 255. for i in c] for c in colors] - - fig, ax = plt.subplots() - ax.axis('equal') - width = 0.35 - kwargs = dict(colors=col, startangle=180) - outside, _ = ax.pie(sizes, radius=1, pctdistance=1 - width / 2, labels=labels, **kwargs) - plt.setp(outside, width=width, edgecolor='white') - - kwargs = dict(size=20, fontweight='bold', va='center') - ax.text(0, 0, text, ha='center', **kwargs) - - plt.show() - - def change_keys(self, dictionnary, number): - number = number+'-' - if type(dictionnary) is dict: - return dict([(number+str(k) , self.change_keys(v, number)) for k, v in dictionnary.items()]) - else: - return dictionnary - - def process_corpus(self): - - count_abstract = 0 - count_claims = 0 - count_description = 0 - count_patent = 0 - total_sentences_number =0 - count_concepts_solupart = 0 - count_concepts_problem = 0 - patents = self.patents - input_folder = self.input_folder - file_extension = self.file_extension - project_folder = os.path.basename(os.path.normpath(input_folder)) - graph_folder = constants.GRAPH_FOLDER + project_folder+"/" - extracted_concepts = [] - output_result = [] - parameters_graph = [] - reduced_content = [] - patent_corpus = [] - source_list = [] - parameters_list =[] - technologies_graph =[] - - - for patent_file in patents: - output_json_claims ={} - total_sentences_number_claims =0 - - if type(patent_file) is dict: - patent_file = json.dumps(patent_file) - - read_patent = StringIO(patent_file) - patent = json.load(read_patent) - nNumber = patent['number'] - aAbstract = patent['abstract'] - cClaims = patent['claims'] - dDescription = patent['description'] - - root_img_url = 'https://worldwide.espacenet.com/espacenetImage.jpg?flavour=firstPageClipping&locale=en_EP&FT=D&' - root_pdf_url = 'https://worldwide.espacenet.com/publicationDetails/originalDocument?' - - if nNumber is not None: - match = re.search('(^[a-zA-Z]+)(([0-9]+)\s?([a-zA-Z0-9_]+$))', nNumber) - # CC for country code - CC = match.group(1) - # NR for Number - NR = match.group(2) - NR = re.sub(r'\s', '', NR) - # KC for Kind code - KC = match.group(4) - - urlImg = root_img_url + '&CC=' + CC + '&NR=' + NR + '&KC=' + KC - urlPDF = root_pdf_url + 'CC=' + CC + '&NR=' + NR + '&KC=' + KC + '&FT=D&ND=3&date=' + '&DB=&locale=en_EP#' - - - - #Find a more elegant way to do it - patent_content = aAbstract + cClaims + dDescription - patent_content = patent_content.splitlines() - # for line in patent_content: - # line = self.dataCleaner(line) - # reduced_content.append(line) - - for line in patent_content: - get_parameters = ParameterExtractor(line) - parameters = get_parameters.extract_parameters() - if parameters: - parameters_list.extend( parameters) - for i in parameters_list: - for j in parameters_list: - if i != j and len(i.split()) == 1: - if j.find(i) > -1 and i in parameters_list: - - parameters_list.remove(i) - - parameters_list=list(set(parameters_list)) - if len(parameters_list) > 50: - for i in parameters_list: - for j in parameters_list: - if i!=j: - comp = Levenshtein.ratio(i, j) - if comp >=.4 and i in parameters_list and j in parameters_list: - if len(i) > len(j): - # print('{} is near duplicate of {}'.format(i, j)) - parameters_list.remove(i) - - for el in parameters_list: - if len(el.split()) == 1: - parameters_list.remove(el) - - parameters = dict(enumerate(parameters_list, 1)) - - parameters = self.change_keys(parameters, nNumber.lower()) - - - - source = input_folder+"/"+nNumber+file_extension.strip("*") - - parameters_array = OrderedDict({ - "concept": { - "source": source, - "valeurs": parameters, - "image": urlImg, - "pdf": urlPDF - } - - }) - pParameters= json.dumps(parameters_array, sort_keys=OrderedDict, indent=4, separators=(',', ': ')) - - parameters_graph.append(pParameters) - - if dDescription !="" or cClaims!="": - count_description +=1 - extract_concepts = InformationExtractor(dDescription,input_folder, file_extension, nNumber ) - output_json, total_sentences_number = extract_concepts.get_from_description() - extract_concepts_claims = InformationExtractorClaims(cClaims,input_folder, file_extension, nNumber ) - output_json_claims_result= extract_concepts_claims.main() - if output_json_claims_result is not None: - output_json_claims, total_sentences_number_claims = output_json_claims_result - - count_claims += 1 - if output_json is not None: - if type(output_json) is dict: - output_json = json.dumps(output_json) - extracted_concepts.append(output_json) - total_sentences_number += total_sentences_number - if output_json_claims is not None : - if type(output_json_claims) is dict: - output_json_claims = json.dumps(output_json_claims) - extracted_concepts.append(output_json_claims) - total_sentences_number += total_sentences_number_claims - elif cClaims !="": - count_claims +=1 - print('Processing claims') - else: - count_abstract +=1 - print("processing abstract") - count_patent +=1 - - - #print(source) - source_list.append(source) - patent_corpus.append(reduced_content) - patent_corpus = dict(zip(source_list, patent_corpus)) - ''' - get_patent_technologies = TechnologyFinder(patent_corpus) - technologies = get_patent_technologies.get_technologies() - - - for source_file, technologies_list in technologies.items(): - - technologies_array = OrderedDict({ - "concept": { - "source": source_file, - "values": technologies_list - } - - }) - tTechnologies = json.dumps(technologies_array, sort_keys=OrderedDict, indent=4, separators=(',', ': ')) - - technologies_graph.append(tTechnologies) -''' - print(type(extracted_concepts)) - header = '{' - graph = '"problem_graph": [%s],' % ','.join(extracted_concepts) - parameters_output = '"parameters": [%s]' % ','.join(parameters_graph) - #technologies_output = '"technologies": [%s]' % ','.join(technologies_graph) - footer = '}' - #output_result.extend((header, graph, parameters_output,technologies_output, footer )) - output_result.extend((header, graph, parameters_output, footer)) - - output_result = "".join(output_result) - output_result = re.sub(r'\,{2,}', ',', output_result) - output_result = re.sub(r'\}\,\]', '}]', output_result) - - - # exit() - # print(output_result) - concepts_json = json.loads(output_result) - - # concepts_json = json.loads(concepts_json) - - - count_concepts = len(concepts_json['problem_graph']) - for item, value in concepts_json.items(): - #if cle == "type" and value =="partialSolution": - # print ("yes") - for element in value: - for cle, valeur in element.items(): - for k,v in valeur.items(): - if k == "type" and v =="partialSolution": - count_concepts_solupart += 1 - elif k == "type" and v =="problem": - count_concepts_problem += 1 - json_write_to_file = json.dumps(concepts_json, sort_keys=False, indent=4, separators=(',', ': ')) - #print(concepts_json.keys()) - - # original code - with open(graph_folder+"graph.json", 'w') as json_graph: - - # with open(graph_folder + 'graph.json', 'w') as json_graph: - json_graph.write(json_write_to_file) - number_neutre = count_concepts - count_concepts_problem - count_concepts_solupart - print("Le corpus contenait %s brevets dont %s abstract, %s revendications et %s descriptions" % (count_patent, count_abstract, count_claims, count_description)) - print("%s phrases ont été analysée(s)" % (total_sentences_number)) - print("%s concepts ont été trouvé(s) dont %s problèmes, %s solutions partielles et %s neutres" % (count_concepts, count_concepts_problem, count_concepts_solupart, number_neutre)) - - #Display graphics - first_color = (46, 204, 113) - second_color = (245, 176, 65) - #self.make_graphic([count_concepts_problem, count_concepts_solupart], "Ratio",[first_color,second_color],['Problems','Partial Solutions']) - return json_write_to_file - - def process_corpus_json(self): - - count_abstract = 0 - count_claims = 0 - count_description = 0 - count_patent = 0 - total_sentences_number = 0 - count_concepts_solupart = 0 - count_concepts_problem = 0 - patents = self.patents - input_folder = self.input_folder - file_extension = self.file_extension - project_folder = os.path.basename(os.path.normpath(input_folder)) - graph_folder = constants.GRAPH_FOLDER + project_folder + "/" - extracted_concepts = [] - output_result = [] - parameters_graph = [] - reduced_content = [] - patent_corpus = [] - source_list = [] - parameters_list = [] - technologies_graph = [] - for patent_file in patents: - # print(type(patent_file)) - - #if type(patent_file) is dict: - patent_file = json.dumps(patent_file) - - read_patent = StringIO(patent_file) - patent = json.load(read_patent) - # print(type(patent)) - filename = patent['filename'] - nNumber = patent['number'] - aAbstract = patent['abstract'] - cClaims = patent['claims'] - dDescription = patent['description'] - - # Find a more elegant way to do it - patent_content = aAbstract + cClaims + dDescription - patent_content = patent_content.splitlines() - # for line in patent_content: - # line = self.dataCleaner(line) - # reduced_content.append(line) - - for line in patent_content: - get_parameters = ParameterExtractor(line) - parameters = get_parameters.extract_parameters() - if parameters: - parameters_list.extend(parameters) - for i in parameters_list: - for j in parameters_list: - if i != j and len(i.split()) == 1: - if j.find(i) > -1 and i in parameters_list: - - parameters_list.remove(i) - - parameters_list = list(set(parameters_list)) - - if len(parameters_list) > 50: - for i in parameters_list: - for j in parameters_list: - if i!=j: - comp = Levenshtein.ratio(i, j) - if comp >=.4 and i in parameters_list and j in parameters_list: - if len(i) > len(j): - # print('{} is near duplicate of {}'.format(i, j)) - parameters_list.remove(i) - - for el in parameters_list: - if len(el.split()) == 1: - parameters_list.remove(el) - - - - - - print('{} {}'.format('Taille: ', len(parameters_list))) - - - parameters = dict(enumerate(parameters_list, 1)) - - parameters = self.change_keys(parameters, nNumber.lower()) - - source = input_folder + "/" + nNumber + file_extension.strip("*") - - parameters_array = OrderedDict({ - "concept": { - "source": source, - "valeurs": parameters - } - - }) - pParameters = json.dumps(parameters_array, sort_keys=OrderedDict, indent=4, separators=(',', ': ')) - - parameters_graph.append(pParameters) - - #if dDescription != "" and cClaims!="": - if dDescription != "": - count_description += 1 - extract_concepts = InformationExtractor(dDescription, input_folder, file_extension, filename) - output_json, total_sentences_number_d = extract_concepts.get_from_description() - if output_json != "": - extracted_concepts.append(output_json) - total_sentences_number += total_sentences_number_d - #count_claims += 1 - #extract_concepts = InformationExtractor(cClaims, input_folder, file_extension, nNumber) - #output_json, total_sentences_number_c = extract_concepts.get_from_claims() - #if output_json != "": - #extracted_concepts.append(output_json) - #total_sentences_number_c += total_sentences_number_c - #total_sentences_number = total_sentences_number_c+total_sentences_number_d - - elif cClaims != "": - count_claims += 1 - extract_concepts = InformationExtractor(cClaims, input_folder, file_extension, nNumber) - output_json, total_sentences_number = extract_concepts.get_from_claims() - if output_json != "": - extracted_concepts.append(output_json) - total_sentences_number += total_sentences_number - elif dDescription != "": - count_description += 1 - extract_concepts = InformationExtractor(dDescription, input_folder, file_extension, nNumber) - output_json, total_sentences_number = extract_concepts.get_from_description() - if output_json != "": - extracted_concepts.append(output_json) - total_sentences_number += total_sentences_number - count_claims += 1 - - else: - count_abstract += 1 - print("processing abstract") - count_patent += 1 - - # print(source) - # source_list.append(source) - # patent_corpus.append(reduced_content) - # patent_corpus = dict(zip(source_list, patent_corpus)) - ''' - get_patent_technologies = TechnologyFinder(patent_corpus) - technologies = get_patent_technologies.get_technologies() - - - for source_file, technologies_list in technologies.items(): - - technologies_array = OrderedDict({ - "concept": { - "source": source_file, - "values": technologies_list - } - - }) - tTechnologies = json.dumps(technologies_array, sort_keys=OrderedDict, indent=4, separators=(',', ': ')) - - technologies_graph.append(tTechnologies) -''' - - header = '{' - graph = '"problem_graph": [%s],' % ','.join(extracted_concepts) - parameters_output = '"parameters": [%s]' % ','.join(parameters_graph) - # technologies_output = '"technologies": [%s]' % ','.join(technologies_graph) - footer = '}' - # output_result.extend((header, graph, parameters_output,technologies_output, footer )) - output_result.extend((header, graph, parameters_output, footer)) - - output_result = "".join(output_result) - output_result = re.sub(r'\,{2,}', ',', output_result) - output_result = re.sub(r'\}\,\]', '}]', output_result) - concepts_json = json.loads(output_result) - - count_concepts = len(concepts_json['problem_graph']) - for item, value in concepts_json.items(): - # if cle == "type" and value =="partialSolution": - # print ("yes") - for element in value: - for cle, valeur in element.items(): - for k, v in valeur.items(): - if k == "type" and v == "partialSolution": - count_concepts_solupart += 1 - elif k == "type" and v == "problem": - count_concepts_problem += 1 - json_write_to_file = json.dumps(concepts_json, sort_keys=False, indent=4, separators=(',', ': ')) - # print(concepts_json.keys()) - with open(graph_folder + "graph.json", 'w') as json_graph: - json_graph.write(json_write_to_file) - - print("Le corpus contenait %s brevets dont %s abstract, %s revendications et %s descriptions" % ( - count_patent, count_abstract, count_claims, count_description)) - print("%s phrases ont été analysée(s)" % (total_sentences_number)) - print("%s concepts ont été trouvé(s) dont %s problèmes et %s solutions partielles" % ( - count_concepts, count_concepts_problem, count_concepts_solupart)) - - # Display graphics - first_color = (46, 204, 113) - second_color = (245, 176, 65) - # self.make_graphic([count_concepts_problem, count_concepts_solupart], "Ratio",[first_color,second_color],['Problems','Partial Solutions']) - return json_write_to_file \ No newline at end of file diff --git a/spaces/Reeve/Ohayou_Face/datasets/gt_res_dataset.py b/spaces/Reeve/Ohayou_Face/datasets/gt_res_dataset.py deleted file mode 100644 index 8892efabcfad7b902c5d49e4b496001241e7ed99..0000000000000000000000000000000000000000 --- a/spaces/Reeve/Ohayou_Face/datasets/gt_res_dataset.py +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/python -# encoding: utf-8 -import os -from torch.utils.data import Dataset -from PIL import Image - - -class GTResDataset(Dataset): - - def __init__(self, root_path, gt_dir=None, transform=None, transform_train=None): - self.pairs = [] - for f in os.listdir(root_path): - image_path = os.path.join(root_path, f) - gt_path = os.path.join(gt_dir, f) - if f.endswith(".jpg") or f.endswith(".png"): - self.pairs.append([image_path, gt_path.replace('.png', '.jpg'), None]) - self.transform = transform - self.transform_train = transform_train - - def __len__(self): - return len(self.pairs) - - def __getitem__(self, index): - from_path, to_path, _ = self.pairs[index] - from_im = Image.open(from_path).convert('RGB') - to_im = Image.open(to_path).convert('RGB') - - if self.transform: - to_im = self.transform(to_im) - from_im = self.transform(from_im) - - return from_im, to_im diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/utils/ext_loader.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/utils/ext_loader.py deleted file mode 100644 index cd8044f71184fa1081566da0ab771caf5f5b39f8..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/utils/ext_loader.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import importlib -import os -import pkgutil -import warnings -from collections import namedtuple - -import torch - -if torch.__version__ != 'parrots': - - def load_ext(name, funcs): - ext = importlib.import_module('annotator.uniformer.mmcv.' + name) - for fun in funcs: - assert hasattr(ext, fun), f'{fun} miss in module {name}' - return ext -else: - from parrots import extension - from parrots.base import ParrotsException - - has_return_value_ops = [ - 'nms', - 'softnms', - 'nms_match', - 'nms_rotated', - 'top_pool_forward', - 'top_pool_backward', - 'bottom_pool_forward', - 'bottom_pool_backward', - 'left_pool_forward', - 'left_pool_backward', - 'right_pool_forward', - 'right_pool_backward', - 'fused_bias_leakyrelu', - 'upfirdn2d', - 'ms_deform_attn_forward', - 'pixel_group', - 'contour_expand', - ] - - def get_fake_func(name, e): - - def fake_func(*args, **kwargs): - warnings.warn(f'{name} is not supported in parrots now') - raise e - - return fake_func - - def load_ext(name, funcs): - ExtModule = namedtuple('ExtModule', funcs) - ext_list = [] - lib_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) - for fun in funcs: - try: - ext_fun = extension.load(fun, name, lib_dir=lib_root) - except ParrotsException as e: - if 'No element registered' not in e.message: - warnings.warn(e.message) - ext_fun = get_fake_func(fun, e) - ext_list.append(ext_fun) - else: - if fun in has_return_value_ops: - ext_list.append(ext_fun.op) - else: - ext_list.append(ext_fun.op_) - return ExtModule(*ext_list) - - -def check_ops_exist(): - ext_loader = pkgutil.find_loader('mmcv._ext') - return ext_loader is not None diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv_custom/__init__.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv_custom/__init__.py deleted file mode 100644 index 4b958738b9fd93bfcec239c550df1d9a44b8c536..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv_custom/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# -*- coding: utf-8 -*- - -from .checkpoint import load_checkpoint - -__all__ = ['load_checkpoint'] \ No newline at end of file diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/datasets/__init__.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/datasets/__init__.py deleted file mode 100644 index be4ea28a86e3c165cc2556f860079305f316294e..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/datasets/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset -# from .cityscapes import CityscapesDataset -# from .coco import CocoDataset -# from .custom import CustomDataset -# from .dataset_wrappers import (ClassBalancedDataset, ConcatDataset, -# RepeatDataset) -# from .deepfashion import DeepFashionDataset -# from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset -# from .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler -# from .utils import (NumClassCheckHook, get_loading_pipeline, -# replace_ImageToTensor) -# from .voc import VOCDataset -# from .wider_face import WIDERFaceDataset -# from .xml_style import XMLDataset - -# __all__ = [ -# 'CustomDataset', 'XMLDataset', 'CocoDataset', 'DeepFashionDataset', -# 'VOCDataset', 'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', -# 'LVISV1Dataset', 'GroupSampler', 'DistributedGroupSampler', -# 'DistributedSampler', 'build_dataloader', 'ConcatDataset', 'RepeatDataset', -# 'ClassBalancedDataset', 'WIDERFaceDataset', 'DATASETS', 'PIPELINES', -# 'build_dataset', 'replace_ImageToTensor', 'get_loading_pipeline', -# 'NumClassCheckHook' -# ] -from .utils import replace_ImageToTensor -__all__ = ['replace_ImageToTensor'] \ No newline at end of file diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/ops/correlation.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/ops/correlation.py deleted file mode 100644 index 3d0b79c301b29915dfaf4d2b1846c59be73127d3..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/ops/correlation.py +++ /dev/null @@ -1,196 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -from torch import Tensor, nn -from torch.autograd import Function -from torch.autograd.function import once_differentiable -from torch.nn.modules.utils import _pair - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext( - '_ext', ['correlation_forward', 'correlation_backward']) - - -class CorrelationFunction(Function): - - @staticmethod - def forward(ctx, - input1, - input2, - kernel_size=1, - max_displacement=1, - stride=1, - padding=1, - dilation=1, - dilation_patch=1): - - ctx.save_for_backward(input1, input2) - - kH, kW = ctx.kernel_size = _pair(kernel_size) - patch_size = max_displacement * 2 + 1 - ctx.patch_size = patch_size - dH, dW = ctx.stride = _pair(stride) - padH, padW = ctx.padding = _pair(padding) - dilationH, dilationW = ctx.dilation = _pair(dilation) - dilation_patchH, dilation_patchW = ctx.dilation_patch = _pair( - dilation_patch) - - output_size = CorrelationFunction._output_size(ctx, input1) - - output = input1.new_zeros(output_size) - - ext_module.correlation_forward( - input1, - input2, - output, - kH=kH, - kW=kW, - patchH=patch_size, - patchW=patch_size, - padH=padH, - padW=padW, - dilationH=dilationH, - dilationW=dilationW, - dilation_patchH=dilation_patchH, - dilation_patchW=dilation_patchW, - dH=dH, - dW=dW) - - return output - - @staticmethod - @once_differentiable - def backward(ctx, grad_output): - input1, input2 = ctx.saved_tensors - - kH, kW = ctx.kernel_size - patch_size = ctx.patch_size - padH, padW = ctx.padding - dilationH, dilationW = ctx.dilation - dilation_patchH, dilation_patchW = ctx.dilation_patch - dH, dW = ctx.stride - grad_input1 = torch.zeros_like(input1) - grad_input2 = torch.zeros_like(input2) - - ext_module.correlation_backward( - grad_output, - input1, - input2, - grad_input1, - grad_input2, - kH=kH, - kW=kW, - patchH=patch_size, - patchW=patch_size, - padH=padH, - padW=padW, - dilationH=dilationH, - dilationW=dilationW, - dilation_patchH=dilation_patchH, - dilation_patchW=dilation_patchW, - dH=dH, - dW=dW) - return grad_input1, grad_input2, None, None, None, None, None, None - - @staticmethod - def _output_size(ctx, input1): - iH, iW = input1.size(2), input1.size(3) - batch_size = input1.size(0) - kH, kW = ctx.kernel_size - patch_size = ctx.patch_size - dH, dW = ctx.stride - padH, padW = ctx.padding - dilationH, dilationW = ctx.dilation - dilatedKH = (kH - 1) * dilationH + 1 - dilatedKW = (kW - 1) * dilationW + 1 - - oH = int((iH + 2 * padH - dilatedKH) / dH + 1) - oW = int((iW + 2 * padW - dilatedKW) / dW + 1) - - output_size = (batch_size, patch_size, patch_size, oH, oW) - return output_size - - -class Correlation(nn.Module): - r"""Correlation operator - - This correlation operator works for optical flow correlation computation. - - There are two batched tensors with shape :math:`(N, C, H, W)`, - and the correlation output's shape is :math:`(N, max\_displacement \times - 2 + 1, max\_displacement * 2 + 1, H_{out}, W_{out})` - - where - - .. math:: - H_{out} = \left\lfloor\frac{H_{in} + 2 \times padding - - dilation \times (kernel\_size - 1) - 1} - {stride} + 1\right\rfloor - - .. math:: - W_{out} = \left\lfloor\frac{W_{in} + 2 \times padding - dilation - \times (kernel\_size - 1) - 1} - {stride} + 1\right\rfloor - - the correlation item :math:`(N_i, dy, dx)` is formed by taking the sliding - window convolution between input1 and shifted input2, - - .. math:: - Corr(N_i, dx, dy) = - \sum_{c=0}^{C-1} - input1(N_i, c) \star - \mathcal{S}(input2(N_i, c), dy, dx) - - where :math:`\star` is the valid 2d sliding window convolution operator, - and :math:`\mathcal{S}` means shifting the input features (auto-complete - zero marginal), and :math:`dx, dy` are shifting distance, :math:`dx, dy \in - [-max\_displacement \times dilation\_patch, max\_displacement \times - dilation\_patch]`. - - Args: - kernel_size (int): The size of sliding window i.e. local neighborhood - representing the center points and involved in correlation - computation. Defaults to 1. - max_displacement (int): The radius for computing correlation volume, - but the actual working space can be dilated by dilation_patch. - Defaults to 1. - stride (int): The stride of the sliding blocks in the input spatial - dimensions. Defaults to 1. - padding (int): Zero padding added to all four sides of the input1. - Defaults to 0. - dilation (int): The spacing of local neighborhood that will involved - in correlation. Defaults to 1. - dilation_patch (int): The spacing between position need to compute - correlation. Defaults to 1. - """ - - def __init__(self, - kernel_size: int = 1, - max_displacement: int = 1, - stride: int = 1, - padding: int = 0, - dilation: int = 1, - dilation_patch: int = 1) -> None: - super().__init__() - self.kernel_size = kernel_size - self.max_displacement = max_displacement - self.stride = stride - self.padding = padding - self.dilation = dilation - self.dilation_patch = dilation_patch - - def forward(self, input1: Tensor, input2: Tensor) -> Tensor: - return CorrelationFunction.apply(input1, input2, self.kernel_size, - self.max_displacement, self.stride, - self.padding, self.dilation, - self.dilation_patch) - - def __repr__(self) -> str: - s = self.__class__.__name__ - s += f'(kernel_size={self.kernel_size}, ' - s += f'max_displacement={self.max_displacement}, ' - s += f'stride={self.stride}, ' - s += f'padding={self.padding}, ' - s += f'dilation={self.dilation}, ' - s += f'dilation_patch={self.dilation_patch})' - return s diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/runner/checkpoint.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/runner/checkpoint.py deleted file mode 100644 index b29ca320679164432f446adad893e33fb2b4b29e..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/runner/checkpoint.py +++ /dev/null @@ -1,707 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import io -import os -import os.path as osp -import pkgutil -import re -import time -import warnings -from collections import OrderedDict -from importlib import import_module -from tempfile import TemporaryDirectory - -import torch -import torchvision -from torch.optim import Optimizer -from torch.utils import model_zoo - -import annotator.uniformer.mmcv as mmcv -from ..fileio import FileClient -from ..fileio import load as load_file -from ..parallel import is_module_wrapper -from ..utils import mkdir_or_exist -from .dist_utils import get_dist_info - -ENV_MMCV_HOME = 'MMCV_HOME' -ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME' -DEFAULT_CACHE_DIR = '~/.cache' - - -def _get_mmcv_home(): - mmcv_home = os.path.expanduser( - os.getenv( - ENV_MMCV_HOME, - os.path.join( - os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'mmcv'))) - - mkdir_or_exist(mmcv_home) - return mmcv_home - - -def load_state_dict(module, state_dict, strict=False, logger=None): - """Load state_dict to a module. - - This method is modified from :meth:`torch.nn.Module.load_state_dict`. - Default value for ``strict`` is set to ``False`` and the message for - param mismatch will be shown even if strict is False. - - Args: - module (Module): Module that receives the state_dict. - state_dict (OrderedDict): Weights. - strict (bool): whether to strictly enforce that the keys - in :attr:`state_dict` match the keys returned by this module's - :meth:`~torch.nn.Module.state_dict` function. Default: ``False``. - logger (:obj:`logging.Logger`, optional): Logger to log the error - message. If not specified, print function will be used. - """ - unexpected_keys = [] - all_missing_keys = [] - err_msg = [] - - metadata = getattr(state_dict, '_metadata', None) - state_dict = state_dict.copy() - if metadata is not None: - state_dict._metadata = metadata - - # use _load_from_state_dict to enable checkpoint version control - def load(module, prefix=''): - # recursively check parallel module in case that the model has a - # complicated structure, e.g., nn.Module(nn.Module(DDP)) - if is_module_wrapper(module): - module = module.module - local_metadata = {} if metadata is None else metadata.get( - prefix[:-1], {}) - module._load_from_state_dict(state_dict, prefix, local_metadata, True, - all_missing_keys, unexpected_keys, - err_msg) - for name, child in module._modules.items(): - if child is not None: - load(child, prefix + name + '.') - - load(module) - load = None # break load->load reference cycle - - # ignore "num_batches_tracked" of BN layers - missing_keys = [ - key for key in all_missing_keys if 'num_batches_tracked' not in key - ] - - if unexpected_keys: - err_msg.append('unexpected key in source ' - f'state_dict: {", ".join(unexpected_keys)}\n') - if missing_keys: - err_msg.append( - f'missing keys in source state_dict: {", ".join(missing_keys)}\n') - - rank, _ = get_dist_info() - if len(err_msg) > 0 and rank == 0: - err_msg.insert( - 0, 'The model and loaded state dict do not match exactly\n') - err_msg = '\n'.join(err_msg) - if strict: - raise RuntimeError(err_msg) - elif logger is not None: - logger.warning(err_msg) - else: - print(err_msg) - - -def get_torchvision_models(): - model_urls = dict() - for _, name, ispkg in pkgutil.walk_packages(torchvision.models.__path__): - if ispkg: - continue - _zoo = import_module(f'torchvision.models.{name}') - if hasattr(_zoo, 'model_urls'): - _urls = getattr(_zoo, 'model_urls') - model_urls.update(_urls) - return model_urls - - -def get_external_models(): - mmcv_home = _get_mmcv_home() - default_json_path = osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json') - default_urls = load_file(default_json_path) - assert isinstance(default_urls, dict) - external_json_path = osp.join(mmcv_home, 'open_mmlab.json') - if osp.exists(external_json_path): - external_urls = load_file(external_json_path) - assert isinstance(external_urls, dict) - default_urls.update(external_urls) - - return default_urls - - -def get_mmcls_models(): - mmcls_json_path = osp.join(mmcv.__path__[0], 'model_zoo/mmcls.json') - mmcls_urls = load_file(mmcls_json_path) - - return mmcls_urls - - -def get_deprecated_model_names(): - deprecate_json_path = osp.join(mmcv.__path__[0], - 'model_zoo/deprecated.json') - deprecate_urls = load_file(deprecate_json_path) - assert isinstance(deprecate_urls, dict) - - return deprecate_urls - - -def _process_mmcls_checkpoint(checkpoint): - state_dict = checkpoint['state_dict'] - new_state_dict = OrderedDict() - for k, v in state_dict.items(): - if k.startswith('backbone.'): - new_state_dict[k[9:]] = v - new_checkpoint = dict(state_dict=new_state_dict) - - return new_checkpoint - - -class CheckpointLoader: - """A general checkpoint loader to manage all schemes.""" - - _schemes = {} - - @classmethod - def _register_scheme(cls, prefixes, loader, force=False): - if isinstance(prefixes, str): - prefixes = [prefixes] - else: - assert isinstance(prefixes, (list, tuple)) - for prefix in prefixes: - if (prefix not in cls._schemes) or force: - cls._schemes[prefix] = loader - else: - raise KeyError( - f'{prefix} is already registered as a loader backend, ' - 'add "force=True" if you want to override it') - # sort, longer prefixes take priority - cls._schemes = OrderedDict( - sorted(cls._schemes.items(), key=lambda t: t[0], reverse=True)) - - @classmethod - def register_scheme(cls, prefixes, loader=None, force=False): - """Register a loader to CheckpointLoader. - - This method can be used as a normal class method or a decorator. - - Args: - prefixes (str or list[str] or tuple[str]): - The prefix of the registered loader. - loader (function, optional): The loader function to be registered. - When this method is used as a decorator, loader is None. - Defaults to None. - force (bool, optional): Whether to override the loader - if the prefix has already been registered. Defaults to False. - """ - - if loader is not None: - cls._register_scheme(prefixes, loader, force=force) - return - - def _register(loader_cls): - cls._register_scheme(prefixes, loader_cls, force=force) - return loader_cls - - return _register - - @classmethod - def _get_checkpoint_loader(cls, path): - """Finds a loader that supports the given path. Falls back to the local - loader if no other loader is found. - - Args: - path (str): checkpoint path - - Returns: - loader (function): checkpoint loader - """ - - for p in cls._schemes: - if path.startswith(p): - return cls._schemes[p] - - @classmethod - def load_checkpoint(cls, filename, map_location=None, logger=None): - """load checkpoint through URL scheme path. - - Args: - filename (str): checkpoint file name with given prefix - map_location (str, optional): Same as :func:`torch.load`. - Default: None - logger (:mod:`logging.Logger`, optional): The logger for message. - Default: None - - Returns: - dict or OrderedDict: The loaded checkpoint. - """ - - checkpoint_loader = cls._get_checkpoint_loader(filename) - class_name = checkpoint_loader.__name__ - mmcv.print_log( - f'load checkpoint from {class_name[10:]} path: {filename}', logger) - return checkpoint_loader(filename, map_location) - - -@CheckpointLoader.register_scheme(prefixes='') -def load_from_local(filename, map_location): - """load checkpoint by local file path. - - Args: - filename (str): local checkpoint file path - map_location (str, optional): Same as :func:`torch.load`. - - Returns: - dict or OrderedDict: The loaded checkpoint. - """ - - if not osp.isfile(filename): - raise IOError(f'{filename} is not a checkpoint file') - checkpoint = torch.load(filename, map_location=map_location) - return checkpoint - - -@CheckpointLoader.register_scheme(prefixes=('http://', 'https://')) -def load_from_http(filename, map_location=None, model_dir=None): - """load checkpoint through HTTP or HTTPS scheme path. In distributed - setting, this function only download checkpoint at local rank 0. - - Args: - filename (str): checkpoint file path with modelzoo or - torchvision prefix - map_location (str, optional): Same as :func:`torch.load`. - model_dir (string, optional): directory in which to save the object, - Default: None - - Returns: - dict or OrderedDict: The loaded checkpoint. - """ - rank, world_size = get_dist_info() - rank = int(os.environ.get('LOCAL_RANK', rank)) - if rank == 0: - checkpoint = model_zoo.load_url( - filename, model_dir=model_dir, map_location=map_location) - if world_size > 1: - torch.distributed.barrier() - if rank > 0: - checkpoint = model_zoo.load_url( - filename, model_dir=model_dir, map_location=map_location) - return checkpoint - - -@CheckpointLoader.register_scheme(prefixes='pavi://') -def load_from_pavi(filename, map_location=None): - """load checkpoint through the file path prefixed with pavi. In distributed - setting, this function download ckpt at all ranks to different temporary - directories. - - Args: - filename (str): checkpoint file path with pavi prefix - map_location (str, optional): Same as :func:`torch.load`. - Default: None - - Returns: - dict or OrderedDict: The loaded checkpoint. - """ - assert filename.startswith('pavi://'), \ - f'Expected filename startswith `pavi://`, but get {filename}' - model_path = filename[7:] - - try: - from pavi import modelcloud - except ImportError: - raise ImportError( - 'Please install pavi to load checkpoint from modelcloud.') - - model = modelcloud.get(model_path) - with TemporaryDirectory() as tmp_dir: - downloaded_file = osp.join(tmp_dir, model.name) - model.download(downloaded_file) - checkpoint = torch.load(downloaded_file, map_location=map_location) - return checkpoint - - -@CheckpointLoader.register_scheme(prefixes='s3://') -def load_from_ceph(filename, map_location=None, backend='petrel'): - """load checkpoint through the file path prefixed with s3. In distributed - setting, this function download ckpt at all ranks to different temporary - directories. - - Args: - filename (str): checkpoint file path with s3 prefix - map_location (str, optional): Same as :func:`torch.load`. - backend (str, optional): The storage backend type. Options are 'ceph', - 'petrel'. Default: 'petrel'. - - .. warning:: - :class:`mmcv.fileio.file_client.CephBackend` will be deprecated, - please use :class:`mmcv.fileio.file_client.PetrelBackend` instead. - - Returns: - dict or OrderedDict: The loaded checkpoint. - """ - allowed_backends = ['ceph', 'petrel'] - if backend not in allowed_backends: - raise ValueError(f'Load from Backend {backend} is not supported.') - - if backend == 'ceph': - warnings.warn( - 'CephBackend will be deprecated, please use PetrelBackend instead') - - # CephClient and PetrelBackend have the same prefix 's3://' and the latter - # will be chosen as default. If PetrelBackend can not be instantiated - # successfully, the CephClient will be chosen. - try: - file_client = FileClient(backend=backend) - except ImportError: - allowed_backends.remove(backend) - file_client = FileClient(backend=allowed_backends[0]) - - with io.BytesIO(file_client.get(filename)) as buffer: - checkpoint = torch.load(buffer, map_location=map_location) - return checkpoint - - -@CheckpointLoader.register_scheme(prefixes=('modelzoo://', 'torchvision://')) -def load_from_torchvision(filename, map_location=None): - """load checkpoint through the file path prefixed with modelzoo or - torchvision. - - Args: - filename (str): checkpoint file path with modelzoo or - torchvision prefix - map_location (str, optional): Same as :func:`torch.load`. - - Returns: - dict or OrderedDict: The loaded checkpoint. - """ - model_urls = get_torchvision_models() - if filename.startswith('modelzoo://'): - warnings.warn('The URL scheme of "modelzoo://" is deprecated, please ' - 'use "torchvision://" instead') - model_name = filename[11:] - else: - model_name = filename[14:] - return load_from_http(model_urls[model_name], map_location=map_location) - - -@CheckpointLoader.register_scheme(prefixes=('open-mmlab://', 'openmmlab://')) -def load_from_openmmlab(filename, map_location=None): - """load checkpoint through the file path prefixed with open-mmlab or - openmmlab. - - Args: - filename (str): checkpoint file path with open-mmlab or - openmmlab prefix - map_location (str, optional): Same as :func:`torch.load`. - Default: None - - Returns: - dict or OrderedDict: The loaded checkpoint. - """ - - model_urls = get_external_models() - prefix_str = 'open-mmlab://' - if filename.startswith(prefix_str): - model_name = filename[13:] - else: - model_name = filename[12:] - prefix_str = 'openmmlab://' - - deprecated_urls = get_deprecated_model_names() - if model_name in deprecated_urls: - warnings.warn(f'{prefix_str}{model_name} is deprecated in favor ' - f'of {prefix_str}{deprecated_urls[model_name]}') - model_name = deprecated_urls[model_name] - model_url = model_urls[model_name] - # check if is url - if model_url.startswith(('http://', 'https://')): - checkpoint = load_from_http(model_url, map_location=map_location) - else: - filename = osp.join(_get_mmcv_home(), model_url) - if not osp.isfile(filename): - raise IOError(f'{filename} is not a checkpoint file') - checkpoint = torch.load(filename, map_location=map_location) - return checkpoint - - -@CheckpointLoader.register_scheme(prefixes='mmcls://') -def load_from_mmcls(filename, map_location=None): - """load checkpoint through the file path prefixed with mmcls. - - Args: - filename (str): checkpoint file path with mmcls prefix - map_location (str, optional): Same as :func:`torch.load`. - - Returns: - dict or OrderedDict: The loaded checkpoint. - """ - - model_urls = get_mmcls_models() - model_name = filename[8:] - checkpoint = load_from_http( - model_urls[model_name], map_location=map_location) - checkpoint = _process_mmcls_checkpoint(checkpoint) - return checkpoint - - -def _load_checkpoint(filename, map_location=None, logger=None): - """Load checkpoint from somewhere (modelzoo, file, url). - - Args: - filename (str): Accept local filepath, URL, ``torchvision://xxx``, - ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for - details. - map_location (str, optional): Same as :func:`torch.load`. - Default: None. - logger (:mod:`logging.Logger`, optional): The logger for error message. - Default: None - - Returns: - dict or OrderedDict: The loaded checkpoint. It can be either an - OrderedDict storing model weights or a dict containing other - information, which depends on the checkpoint. - """ - return CheckpointLoader.load_checkpoint(filename, map_location, logger) - - -def _load_checkpoint_with_prefix(prefix, filename, map_location=None): - """Load partial pretrained model with specific prefix. - - Args: - prefix (str): The prefix of sub-module. - filename (str): Accept local filepath, URL, ``torchvision://xxx``, - ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for - details. - map_location (str | None): Same as :func:`torch.load`. Default: None. - - Returns: - dict or OrderedDict: The loaded checkpoint. - """ - - checkpoint = _load_checkpoint(filename, map_location=map_location) - - if 'state_dict' in checkpoint: - state_dict = checkpoint['state_dict'] - else: - state_dict = checkpoint - if not prefix.endswith('.'): - prefix += '.' - prefix_len = len(prefix) - - state_dict = { - k[prefix_len:]: v - for k, v in state_dict.items() if k.startswith(prefix) - } - - assert state_dict, f'{prefix} is not in the pretrained model' - return state_dict - - -def load_checkpoint(model, - filename, - map_location=None, - strict=False, - logger=None, - revise_keys=[(r'^module\.', '')]): - """Load checkpoint from a file or URI. - - Args: - model (Module): Module to load checkpoint. - filename (str): Accept local filepath, URL, ``torchvision://xxx``, - ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for - details. - map_location (str): Same as :func:`torch.load`. - strict (bool): Whether to allow different params for the model and - checkpoint. - logger (:mod:`logging.Logger` or None): The logger for error message. - revise_keys (list): A list of customized keywords to modify the - state_dict in checkpoint. Each item is a (pattern, replacement) - pair of the regular expression operations. Default: strip - the prefix 'module.' by [(r'^module\\.', '')]. - - Returns: - dict or OrderedDict: The loaded checkpoint. - """ - checkpoint = _load_checkpoint(filename, map_location, logger) - # OrderedDict is a subclass of dict - if not isinstance(checkpoint, dict): - raise RuntimeError( - f'No state_dict found in checkpoint file {filename}') - # get state_dict from checkpoint - if 'state_dict' in checkpoint: - state_dict = checkpoint['state_dict'] - else: - state_dict = checkpoint - - # strip prefix of state_dict - metadata = getattr(state_dict, '_metadata', OrderedDict()) - for p, r in revise_keys: - state_dict = OrderedDict( - {re.sub(p, r, k): v - for k, v in state_dict.items()}) - # Keep metadata in state_dict - state_dict._metadata = metadata - - # load state_dict - load_state_dict(model, state_dict, strict, logger) - return checkpoint - - -def weights_to_cpu(state_dict): - """Copy a model state_dict to cpu. - - Args: - state_dict (OrderedDict): Model weights on GPU. - - Returns: - OrderedDict: Model weights on GPU. - """ - state_dict_cpu = OrderedDict() - for key, val in state_dict.items(): - state_dict_cpu[key] = val.cpu() - # Keep metadata in state_dict - state_dict_cpu._metadata = getattr(state_dict, '_metadata', OrderedDict()) - return state_dict_cpu - - -def _save_to_state_dict(module, destination, prefix, keep_vars): - """Saves module state to `destination` dictionary. - - This method is modified from :meth:`torch.nn.Module._save_to_state_dict`. - - Args: - module (nn.Module): The module to generate state_dict. - destination (dict): A dict where state will be stored. - prefix (str): The prefix for parameters and buffers used in this - module. - """ - for name, param in module._parameters.items(): - if param is not None: - destination[prefix + name] = param if keep_vars else param.detach() - for name, buf in module._buffers.items(): - # remove check of _non_persistent_buffers_set to allow nn.BatchNorm2d - if buf is not None: - destination[prefix + name] = buf if keep_vars else buf.detach() - - -def get_state_dict(module, destination=None, prefix='', keep_vars=False): - """Returns a dictionary containing a whole state of the module. - - Both parameters and persistent buffers (e.g. running averages) are - included. Keys are corresponding parameter and buffer names. - - This method is modified from :meth:`torch.nn.Module.state_dict` to - recursively check parallel module in case that the model has a complicated - structure, e.g., nn.Module(nn.Module(DDP)). - - Args: - module (nn.Module): The module to generate state_dict. - destination (OrderedDict): Returned dict for the state of the - module. - prefix (str): Prefix of the key. - keep_vars (bool): Whether to keep the variable property of the - parameters. Default: False. - - Returns: - dict: A dictionary containing a whole state of the module. - """ - # recursively check parallel module in case that the model has a - # complicated structure, e.g., nn.Module(nn.Module(DDP)) - if is_module_wrapper(module): - module = module.module - - # below is the same as torch.nn.Module.state_dict() - if destination is None: - destination = OrderedDict() - destination._metadata = OrderedDict() - destination._metadata[prefix[:-1]] = local_metadata = dict( - version=module._version) - _save_to_state_dict(module, destination, prefix, keep_vars) - for name, child in module._modules.items(): - if child is not None: - get_state_dict( - child, destination, prefix + name + '.', keep_vars=keep_vars) - for hook in module._state_dict_hooks.values(): - hook_result = hook(module, destination, prefix, local_metadata) - if hook_result is not None: - destination = hook_result - return destination - - -def save_checkpoint(model, - filename, - optimizer=None, - meta=None, - file_client_args=None): - """Save checkpoint to file. - - The checkpoint will have 3 fields: ``meta``, ``state_dict`` and - ``optimizer``. By default ``meta`` will contain version and time info. - - Args: - model (Module): Module whose params are to be saved. - filename (str): Checkpoint filename. - optimizer (:obj:`Optimizer`, optional): Optimizer to be saved. - meta (dict, optional): Metadata to be saved in checkpoint. - file_client_args (dict, optional): Arguments to instantiate a - FileClient. See :class:`mmcv.fileio.FileClient` for details. - Default: None. - `New in version 1.3.16.` - """ - if meta is None: - meta = {} - elif not isinstance(meta, dict): - raise TypeError(f'meta must be a dict or None, but got {type(meta)}') - meta.update(mmcv_version=mmcv.__version__, time=time.asctime()) - - if is_module_wrapper(model): - model = model.module - - if hasattr(model, 'CLASSES') and model.CLASSES is not None: - # save class name to the meta - meta.update(CLASSES=model.CLASSES) - - checkpoint = { - 'meta': meta, - 'state_dict': weights_to_cpu(get_state_dict(model)) - } - # save optimizer state dict in the checkpoint - if isinstance(optimizer, Optimizer): - checkpoint['optimizer'] = optimizer.state_dict() - elif isinstance(optimizer, dict): - checkpoint['optimizer'] = {} - for name, optim in optimizer.items(): - checkpoint['optimizer'][name] = optim.state_dict() - - if filename.startswith('pavi://'): - if file_client_args is not None: - raise ValueError( - 'file_client_args should be "None" if filename starts with' - f'"pavi://", but got {file_client_args}') - try: - from pavi import modelcloud - from pavi import exception - except ImportError: - raise ImportError( - 'Please install pavi to load checkpoint from modelcloud.') - model_path = filename[7:] - root = modelcloud.Folder() - model_dir, model_name = osp.split(model_path) - try: - model = modelcloud.get(model_dir) - except exception.NodeNotFoundError: - model = root.create_training_model(model_dir) - with TemporaryDirectory() as tmp_dir: - checkpoint_file = osp.join(tmp_dir, model_name) - with open(checkpoint_file, 'wb') as f: - torch.save(checkpoint, f) - f.flush() - model.create_file(checkpoint_file, name=model_name) - else: - file_client = FileClient.infer_client(file_client_args, filename) - with io.BytesIO() as f: - torch.save(checkpoint, f) - file_client.put(f.getvalue(), filename) diff --git a/spaces/Rongjiehuang/GenerSpeech/data_gen/tts/wav_processors/common_processors.py b/spaces/Rongjiehuang/GenerSpeech/data_gen/tts/wav_processors/common_processors.py deleted file mode 100644 index de0b49f4a31cb6737f2cffc6c8d010d88d11c853..0000000000000000000000000000000000000000 --- a/spaces/Rongjiehuang/GenerSpeech/data_gen/tts/wav_processors/common_processors.py +++ /dev/null @@ -1,86 +0,0 @@ -import os -import subprocess -import librosa -import numpy as np -from data_gen.tts.wav_processors.base_processor import BaseWavProcessor, register_wav_processors -from data_gen.tts.data_gen_utils import trim_long_silences -from utils.audio import save_wav -from utils.rnnoise import rnnoise -from utils.hparams import hparams - - -@register_wav_processors(name='sox_to_wav') -class ConvertToWavProcessor(BaseWavProcessor): - @property - def name(self): - return 'ToWav' - - def process(self, input_fn, sr, tmp_dir, processed_dir, item_name, preprocess_args): - if input_fn[-4:] == '.wav': - return input_fn, sr - else: - output_fn = self.output_fn(input_fn) - subprocess.check_call(f'sox -v 0.95 "{input_fn}" -t wav "{output_fn}"', shell=True) - return output_fn, sr - - -@register_wav_processors(name='sox_resample') -class ResampleProcessor(BaseWavProcessor): - @property - def name(self): - return 'Resample' - - def process(self, input_fn, sr, tmp_dir, processed_dir, item_name, preprocess_args): - output_fn = self.output_fn(input_fn) - sr_file = librosa.core.get_samplerate(input_fn) - if sr != sr_file: - subprocess.check_call(f'sox -v 0.95 "{input_fn}" -r{sr} "{output_fn}"', shell=True) - y, _ = librosa.core.load(input_fn, sr=sr) - y, _ = librosa.effects.trim(y) - save_wav(y, output_fn, sr) - return output_fn, sr - else: - return input_fn, sr - - -@register_wav_processors(name='trim_sil') -class TrimSILProcessor(BaseWavProcessor): - @property - def name(self): - return 'TrimSIL' - - def process(self, input_fn, sr, tmp_dir, processed_dir, item_name, preprocess_args): - output_fn = self.output_fn(input_fn) - y, _ = librosa.core.load(input_fn, sr=sr) - y, _ = librosa.effects.trim(y) - save_wav(y, output_fn, sr) - return output_fn - - -@register_wav_processors(name='trim_all_sil') -class TrimAllSILProcessor(BaseWavProcessor): - @property - def name(self): - return 'TrimSIL' - - def process(self, input_fn, sr, tmp_dir, processed_dir, item_name, preprocess_args): - output_fn = self.output_fn(input_fn) - y, audio_mask, _ = trim_long_silences( - input_fn, vad_max_silence_length=preprocess_args.get('vad_max_silence_length', 12)) - save_wav(y, output_fn, sr) - if preprocess_args['save_sil_mask']: - os.makedirs(f'{processed_dir}/sil_mask', exist_ok=True) - np.save(f'{processed_dir}/sil_mask/{item_name}.npy', audio_mask) - return output_fn, sr - - -@register_wav_processors(name='denoise') -class DenoiseProcessor(BaseWavProcessor): - @property - def name(self): - return 'Denoise' - - def process(self, input_fn, sr, tmp_dir, processed_dir, item_name, preprocess_args): - output_fn = self.output_fn(input_fn) - rnnoise(input_fn, output_fn, out_sample_rate=sr) - return output_fn, sr diff --git a/spaces/RoversX/Stable-Platypus2-13B-GGML/tabbed.py b/spaces/RoversX/Stable-Platypus2-13B-GGML/tabbed.py deleted file mode 100644 index 2e6cb8337a48689eef48835bbc094696d4485c6e..0000000000000000000000000000000000000000 --- a/spaces/RoversX/Stable-Platypus2-13B-GGML/tabbed.py +++ /dev/null @@ -1,140 +0,0 @@ -import gradio as gr -import yaml -from huggingface_hub import hf_hub_download -from huggingface_hub.utils import LocalEntryNotFoundError -from llama_cpp import Llama - -with open("./config.yml", "r") as f: - config = yaml.load(f, Loader=yaml.Loader) -while True: - try: - load_config = config.copy() - hub_config = load_config["hub"].copy() - repo_id = hub_config.pop("repo_id") - filename = hub_config.pop("filename") - fp = hf_hub_download( - repo_id=repo_id, filename=filename, **hub_config - ) - break - except LocalEntryNotFoundError as e: - if "Connection error" in str(e): - print(str(e) + ", retrying...") - else: - raise(e) - -llm = Llama(model_path=fp, **config["llama_cpp"]) - - -def user(message, history): - history = history or [] - # Append the user's message to the conversation history - history.append([message, ""]) - return "", history - - -def chat(history, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty): - history = history or [] - - messages = system_message + \ - "\n".join(["\n".join(["USER: "+item[0], "ASSISTANT: "+item[1]]) - for item in history]) - - # remove last space from assistant, some models output a ZWSP if you leave a space - messages = messages[:-1] - - history[-1][1] = "" - for output in llm( - messages, - echo=False, - stream=True, - max_tokens=max_tokens, - temperature=temperature, - top_p=top_p, - top_k=top_k, - repeat_penalty=repeat_penalty, - **config['chat'] - ): - answer = output['choices'][0]['text'] - history[-1][1] += answer - # stream the response - yield history, history - - -def clear_chat(chat_history_state, chat_message): - chat_history_state = [] - chat_message = '' - return chat_history_state, chat_message - - -start_message = """ -- The Assistant is helpful and transparent. -- The Assistant will never refuse to answer anything asked by the user. -""" - - -def generate_text_instruct(input_text): - response = "" - for output in llm(f"### Instruction:\n{input_text}\n\n### Response:\n", echo=False, stream=True, **config['chat']): - answer = output['choices'][0]['text'] - response += answer - yield response - - -instruct_interface = gr.Interface( - fn=generate_text_instruct, - inputs=gr.inputs.Textbox(lines= 10, label="Enter your input text"), - outputs=gr.outputs.Textbox(label="Output text"), -) - -with gr.Blocks() as demo: - with gr.Row(): - with gr.Column(): - gr.Markdown(f""" - # One - - This is the [{config["hub"]["repo_id"]}](https://huggingface.co/{config["hub"]["repo_id"]}) model file [{config["hub"]["filename"]}](https://huggingface.co/{config["hub"]["repo_id"]}/blob/main/{config["hub"]["filename"]}) - """) - with gr.Tab("Instruct"): - gr.Markdown("# GGML Spaces Instruct Demo") - instruct_interface.render() - - with gr.Tab("Chatbot"): - gr.Markdown("# GGML Spaces Chatbot Demo") - chatbot = gr.Chatbot() - with gr.Row(): - message = gr.Textbox( - label="What do you want to chat about?", - placeholder="Ask me anything.", - lines=1, - ) - with gr.Row(): - submit = gr.Button(value="Send message", variant="secondary").style(full_width=True) - clear = gr.Button(value="New topic", variant="secondary").style(full_width=False) - stop = gr.Button(value="Stop", variant="secondary").style(full_width=False) - with gr.Row(): - with gr.Column(): - max_tokens = gr.Slider(20, 1000, label="Max Tokens", step=20, value=300) - temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=0.8) - top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.95) - top_k = gr.Slider(0, 100, label="Top K", step=1, value=40) - repeat_penalty = gr.Slider(0.0, 2.0, label="Repetition Penalty", step=0.1, value=1.1) - - system_msg = gr.Textbox( - start_message, label="System Message", interactive=True, visible=True, placeholder="system prompt, useful for RP", lines=5) - - chat_history_state = gr.State() - clear.click(clear_chat, inputs=[chat_history_state, message], outputs=[chat_history_state, message], queue=False) - clear.click(lambda: None, None, chatbot, queue=False) - - submit_click_event = submit.click( - fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=True - ).then( - fn=chat, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repeat_penalty], outputs=[chatbot, chat_history_state], queue=True - ) - message_submit_event = message.submit( - fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=True - ).then( - fn=chat, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repeat_penalty], outputs=[chatbot, chat_history_state], queue=True - ) - stop.click(fn=None, inputs=None, outputs=None, cancels=[submit_click_event, message_submit_event], queue=False) - -demo.queue(**config["queue"]).launch(debug=True, server_name="0.0.0.0", server_port=7860) diff --git a/spaces/Sandy0909/Finance_Sentiment/README.md b/spaces/Sandy0909/Finance_Sentiment/README.md deleted file mode 100644 index 38c6977e578c47470a71d53ccfd279f279ffdb35..0000000000000000000000000000000000000000 --- a/spaces/Sandy0909/Finance_Sentiment/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Finance Sentiment -emoji: 📚 -colorFrom: green -colorTo: pink -sdk: streamlit -sdk_version: 1.27.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/SeViLA/SeViLA/lavis/models/clip_models/tokenizer.py b/spaces/SeViLA/SeViLA/lavis/models/clip_models/tokenizer.py deleted file mode 100644 index 7e19124df29ace4b7e0599d1082e80d38aca0748..0000000000000000000000000000000000000000 --- a/spaces/SeViLA/SeViLA/lavis/models/clip_models/tokenizer.py +++ /dev/null @@ -1,203 +0,0 @@ -""" - Copyright (c) 2022, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause - - Based on https://github.com/mlfoundations/open_clip -""" - -""" CLIP tokenizer -Copied from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI. -""" -import gzip -import html -import os -from functools import lru_cache -from typing import Union, List - -import ftfy -import regex as re -import torch - - -@lru_cache() -def default_bpe(): - return os.path.join( - os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz" - ) - - -@lru_cache() -def bytes_to_unicode(): - """ - Returns list of utf-8 byte and a corresponding list of unicode strings. - The reversible bpe codes work on unicode strings. - This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. - When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. - This is a signficant percentage of your normal, say, 32K bpe vocab. - To avoid that, we want lookup tables between utf-8 bytes and unicode strings. - And avoids mapping to whitespace/control characters the bpe code barfs on. - """ - bs = ( - list(range(ord("!"), ord("~") + 1)) - + list(range(ord("¡"), ord("¬") + 1)) - + list(range(ord("®"), ord("ÿ") + 1)) - ) - cs = bs[:] - n = 0 - for b in range(2**8): - if b not in bs: - bs.append(b) - cs.append(2**8 + n) - n += 1 - cs = [chr(n) for n in cs] - return dict(zip(bs, cs)) - - -def get_pairs(word): - """Return set of symbol pairs in a word. - Word is represented as tuple of symbols (symbols being variable-length strings). - """ - pairs = set() - prev_char = word[0] - for char in word[1:]: - pairs.add((prev_char, char)) - prev_char = char - return pairs - - -def basic_clean(text): - text = ftfy.fix_text(text) - text = html.unescape(html.unescape(text)) - return text.strip() - - -def whitespace_clean(text): - text = re.sub(r"\s+", " ", text) - text = text.strip() - return text - - -class SimpleTokenizer(object): - def __init__(self, bpe_path: str = default_bpe(), special_tokens=None): - self.byte_encoder = bytes_to_unicode() - self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} - merges = gzip.open(bpe_path).read().decode("utf-8").split("\n") - merges = merges[1 : 49152 - 256 - 2 + 1] - merges = [tuple(merge.split()) for merge in merges] - vocab = list(bytes_to_unicode().values()) - vocab = vocab + [v + "" for v in vocab] - for merge in merges: - vocab.append("".join(merge)) - if not special_tokens: - special_tokens = ["", ""] - else: - special_tokens = ["", ""] + special_tokens - vocab.extend(special_tokens) - self.encoder = dict(zip(vocab, range(len(vocab)))) - self.decoder = {v: k for k, v in self.encoder.items()} - self.bpe_ranks = dict(zip(merges, range(len(merges)))) - self.cache = {t: t for t in special_tokens} - special = "|".join(special_tokens) - self.pat = re.compile( - special + r"""|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", - re.IGNORECASE, - ) - - self.vocab_size = len(self.encoder) - self.all_special_ids = [self.encoder[t] for t in special_tokens] - - def bpe(self, token): - if token in self.cache: - return self.cache[token] - word = tuple(token[:-1]) + (token[-1] + "",) - pairs = get_pairs(word) - - if not pairs: - return token + "" - - while True: - bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) - if bigram not in self.bpe_ranks: - break - first, second = bigram - new_word = [] - i = 0 - while i < len(word): - try: - j = word.index(first, i) - new_word.extend(word[i:j]) - i = j - except: - new_word.extend(word[i:]) - break - - if word[i] == first and i < len(word) - 1 and word[i + 1] == second: - new_word.append(first + second) - i += 2 - else: - new_word.append(word[i]) - i += 1 - new_word = tuple(new_word) - word = new_word - if len(word) == 1: - break - else: - pairs = get_pairs(word) - word = " ".join(word) - self.cache[token] = word - return word - - def encode(self, text): - bpe_tokens = [] - text = whitespace_clean(basic_clean(text)).lower() - for token in re.findall(self.pat, text): - token = "".join(self.byte_encoder[b] for b in token.encode("utf-8")) - bpe_tokens.extend( - self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ") - ) - return bpe_tokens - - def decode(self, tokens): - text = "".join([self.decoder[token] for token in tokens]) - text = ( - bytearray([self.byte_decoder[c] for c in text]) - .decode("utf-8", errors="replace") - .replace("", " ") - ) - return text - - -_tokenizer = SimpleTokenizer() - - -def tokenize( - texts: Union[str, List[str]], context_length: int = 77 -) -> torch.LongTensor: - """ - Returns the tokenized representation of given input string(s) - Parameters - ---------- - texts : Union[str, List[str]] - An input string or a list of input strings to tokenize - context_length : int - The context length to use; all CLIP models use 77 as the context length - Returns - ------- - A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length] - """ - if isinstance(texts, str): - texts = [texts] - - sot_token = _tokenizer.encoder[""] - eot_token = _tokenizer.encoder[""] - all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts] - result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) - - for i, tokens in enumerate(all_tokens): - if len(tokens) > context_length: - tokens = tokens[:context_length] # Truncate - result[i, : len(tokens)] = torch.tensor(tokens) - - return result diff --git a/spaces/Shawn37/UTR_LM/esm/model/msa_transformer.py b/spaces/Shawn37/UTR_LM/esm/model/msa_transformer.py deleted file mode 100644 index 08c99cd24e1e13a80d59928b573ea892cf27bef0..0000000000000000000000000000000000000000 --- a/spaces/Shawn37/UTR_LM/esm/model/msa_transformer.py +++ /dev/null @@ -1,238 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch -import torch.nn as nn - -from ..modules import ( - AxialTransformerLayer, - LearnedPositionalEmbedding, - RobertaLMHead, - ESM1bLayerNorm, - ContactPredictionHead, -) - -from ..axial_attention import RowSelfAttention, ColumnSelfAttention - - - -class MSATransformer(nn.Module): - @classmethod - def add_args(cls, parser): - # fmt: off - parser.add_argument( - "--num_layers", - default=12, - type=int, - metavar="N", - help="number of layers" - ) - parser.add_argument( - "--embed_dim", - default=768, - type=int, - metavar="N", - help="embedding dimension" - ) - parser.add_argument( - "--logit_bias", - action="store_true", - help="whether to apply bias to logits" - ) - parser.add_argument( - "--ffn_embed_dim", - default=3072, - type=int, - metavar="N", - help="embedding dimension for FFN", - ) - parser.add_argument( - "--attention_heads", - default=12, - type=int, - metavar="N", - help="number of attention heads", - ) - parser.add_argument( - "--dropout", - default=0.1, - type=float, - help="Dropout to apply." - ) - parser.add_argument( - "--attention_dropout", - default=0.1, - type=float, - help="Dropout to apply." - ) - parser.add_argument( - "--activation_dropout", - default=0.1, - type=float, - help="Dropout to apply." - ) - parser.add_argument( - "--max_tokens_per_msa", - default=2 ** 14, - type=int, - help=( - "Used during inference to batch attention computations in a single " - "forward pass. This allows increased input sizes with less memory." - ), - ) - # fmt: on - - def __init__(self, args, alphabet): - super().__init__() - self.args = args - self.alphabet_size = len(alphabet) - self.padding_idx = alphabet.padding_idx - self.mask_idx = alphabet.mask_idx - self.cls_idx = alphabet.cls_idx - self.eos_idx = alphabet.eos_idx - self.prepend_bos = alphabet.prepend_bos - self.append_eos = alphabet.append_eos - - self.embed_tokens = nn.Embedding( - self.alphabet_size, self.args.embed_dim, padding_idx=self.padding_idx - ) - - if getattr(self.args, "embed_positions_msa", False): - emb_dim = getattr(self.args, "embed_positions_msa_dim", self.args.embed_dim) - self.msa_position_embedding = nn.Parameter( - 0.01 * torch.randn(1, 1024, 1, emb_dim), - requires_grad=True, - ) - else: - self.register_parameter("msa_position_embedding", None) - - self.dropout_module = nn.Dropout(self.args.dropout) - self.layers = nn.ModuleList( - [ - AxialTransformerLayer( - self.args.embed_dim, - self.args.ffn_embed_dim, - self.args.attention_heads, - self.args.dropout, - self.args.attention_dropout, - self.args.activation_dropout, - getattr(self.args, "max_tokens_per_msa", self.args.max_tokens), - ) - for _ in range(self.args.layers) - ] - ) - - self.contact_head = ContactPredictionHead( - self.args.layers * self.args.attention_heads, - self.prepend_bos, - self.append_eos, - eos_idx=self.eos_idx, - ) - self.embed_positions = LearnedPositionalEmbedding( - self.args.max_positions, - self.args.embed_dim, - self.padding_idx, - ) - self.emb_layer_norm_before = ESM1bLayerNorm(self.args.embed_dim) - self.emb_layer_norm_after = ESM1bLayerNorm(self.args.embed_dim) - self.lm_head = RobertaLMHead( - embed_dim=self.args.embed_dim, - output_dim=self.alphabet_size, - weight=self.embed_tokens.weight, - ) - - def forward(self, tokens, repr_layers=[], need_head_weights=False, return_contacts=False): - if return_contacts: - need_head_weights = True - - assert tokens.ndim == 3 - batch_size, num_alignments, seqlen = tokens.size() - padding_mask = tokens.eq(self.padding_idx) # B, R, C - if not padding_mask.any(): - padding_mask = None - - x = self.embed_tokens(tokens) - x += self.embed_positions(tokens.view(batch_size * num_alignments, seqlen)).view(x.size()) - if self.msa_position_embedding is not None: - if x.size(1) > 1024: - raise RuntimeError( - "Using model with MSA position embedding trained on maximum MSA " - f"depth of 1024, but received {x.size(1)} alignments." - ) - x += self.msa_position_embedding[:, :num_alignments] - - x = self.emb_layer_norm_before(x) - - x = self.dropout_module(x) - - if padding_mask is not None: - x = x * (1 - padding_mask.unsqueeze(-1).type_as(x)) - - repr_layers = set(repr_layers) - hidden_representations = {} - if 0 in repr_layers: - hidden_representations[0] = x - - if need_head_weights: - row_attn_weights = [] - col_attn_weights = [] - - # B x R x C x D -> R x C x B x D - x = x.permute(1, 2, 0, 3) - - for layer_idx, layer in enumerate(self.layers): - x = layer( - x, - self_attn_padding_mask=padding_mask, - need_head_weights=need_head_weights, - ) - if need_head_weights: - x, col_attn, row_attn = x - # H x C x B x R x R -> B x H x C x R x R - col_attn_weights.append(col_attn.permute(2, 0, 1, 3, 4)) - # H x B x C x C -> B x H x C x C - row_attn_weights.append(row_attn.permute(1, 0, 2, 3)) - if (layer_idx + 1) in repr_layers: - hidden_representations[layer_idx + 1] = x.permute(2, 0, 1, 3) - - x = self.emb_layer_norm_after(x) - x = x.permute(2, 0, 1, 3) # R x C x B x D -> B x R x C x D - - # last hidden representation should have layer norm applied - if (layer_idx + 1) in repr_layers: - hidden_representations[layer_idx + 1] = x - x = self.lm_head(x) - - result = {"logits": x, "representations": hidden_representations} - if need_head_weights: - # col_attentions: B x L x H x C x R x R - col_attentions = torch.stack(col_attn_weights, 1) - # row_attentions: B x L x H x C x C - row_attentions = torch.stack(row_attn_weights, 1) - result["col_attentions"] = col_attentions - result["row_attentions"] = row_attentions - if return_contacts: - contacts = self.contact_head(tokens, row_attentions) - result["contacts"] = contacts - - return result - - def predict_contacts(self, tokens): - return self(tokens, return_contacts=True)["contacts"] - - @property - def num_layers(self): - return self.args.layers - - def max_tokens_per_msa_(self, value: int) -> None: - """The MSA Transformer automatically batches attention computations when - gradients are disabled to allow you to pass in larger MSAs at test time than - you can fit in GPU memory. By default this occurs when more than 2^14 tokens - are passed in the input MSA. You can set this value to infinity to disable - this behavior. - """ - for module in self.modules(): - if isinstance(module, (RowSelfAttention, ColumnSelfAttention)): - module.max_tokens_per_msa = value \ No newline at end of file diff --git a/spaces/ShiwenNi/ChatReviewer/app.py b/spaces/ShiwenNi/ChatReviewer/app.py deleted file mode 100644 index 083a1fa40a17e4950c79a5c0934f3d74036eb445..0000000000000000000000000000000000000000 --- a/spaces/ShiwenNi/ChatReviewer/app.py +++ /dev/null @@ -1,218 +0,0 @@ -import numpy as np -import os -import re -import jieba -from io import BytesIO -import datetime -import time -import openai, tenacity -import argparse -import configparser -import json -import tiktoken -import PyPDF2 -import gradio - - -def contains_chinese(text): - for ch in text: - if u'\u4e00' <= ch <= u'\u9fff': - return True - return False - -def insert_sentence(text, sentence, interval): - lines = text.split('\n') - new_lines = [] - - for line in lines: - if contains_chinese(line): - words = list(jieba.cut(line)) - separator = '' - else: - words = line.split() - separator = ' ' - - new_words = [] - count = 0 - - for word in words: - new_words.append(word) - count += 1 - - if count % interval == 0: - new_words.append(sentence) - - new_lines.append(separator.join(new_words)) - - return '\n'.join(new_lines) - -# 定义Reviewer类 -class Reviewer: - # 初始化方法,设置属性 - def __init__(self, api, review_format, paper_pdf, language): - self.api = api - self.review_format = review_format - - self.language = language - self.paper_pdf = paper_pdf - self.max_token_num = 12000 - self.encoding = tiktoken.get_encoding("gpt2") - - - def review_by_chatgpt(self, paper_list): - text = self.extract_chapter(self.paper_pdf) - chat_review_text, total_token_used = self.chat_review(text=text) - return chat_review_text, total_token_used - - - - @tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10), - stop=tenacity.stop_after_attempt(5), - reraise=True) - def chat_review(self, text): - openai.api_key = self.api # 读取api - review_prompt_token = 1000 - try: - text_token = len(self.encoding.encode(text)) - except: - text_token = 13000 - input_text_index = int(len(text)*(self.max_token_num-review_prompt_token)/(text_token+1)) - input_text = "This is the paper for your review:" + text[:input_text_index] - messages=[ - {"role": "system", "content": "You are a professional reviewer. Now I will give you a paper. You need to give a complete review opinion according to the following requirements and format:"+ self.review_format + "Be sure to use {} answers".format(self.language)} , - {"role": "user", "content": input_text + " Translate the output into {}.".format(self.language)}, - ] - try: - response = openai.ChatCompletion.create( - model="gpt-3.5-turbo-16k", - messages=messages, - temperature=0.7 - ) - result = '' - for choice in response.choices: - result += choice.message.content - result = insert_sentence(result, '**Generated by ChatGPT, no copying allowed!**', 50) - result += "\n\n⚠伦理声明/Ethics statement:\n--禁止直接复制生成的评论用于任何论文审稿工作!\n--Direct copying of generated comments for any paper review work is prohibited!" - usage = response.usage.total_tokens - except Exception as e: - # 处理其他的异常 - result = "⚠:非常抱歉>_<,生了一个错误:"+ str(e) - usage = 'xxxxx' - print("********"*10) - print(result) - print("********"*10) - return result, usage - - - - - - def extract_chapter(self, pdf_path): - file_object = BytesIO(pdf_path) - pdf_reader = PyPDF2.PdfReader(file_object) - # 获取PDF的总页数 - num_pages = len(pdf_reader.pages) - # 初始化提取状态和提取文本 - extraction_started = False - extracted_text = "" - # 遍历PDF中的每一页 - for page_number in range(num_pages): - page = pdf_reader.pages[page_number] - page_text = page.extract_text() - - # 开始提取 - extraction_started = True - page_number_start = page_number - # 如果提取已开始,将页面文本添加到提取文本中 - if extraction_started: - extracted_text += page_text - # 停止提取 - if page_number_start + 1 < page_number: - break - return extracted_text - -def main(api, review_format, paper_pdf, language): - start_time = time.time() - comments = '' - output2 = '' - if not api or not review_format or not paper_pdf: - comments = "⚠:API-key或审稿要求或论文pdf未输入!请检测!" - output2 = "⚠:API-key或审稿要求或论文pdf未输入!请检测!" - # 判断PDF文件 - else: - # 创建一个Reader对象 - reviewer1 = Reviewer(api, review_format, paper_pdf, language) - # 开始判断是路径还是文件: - comments, total_token_used = reviewer1.review_by_chatgpt(paper_list=paper_pdf) - time_used = time.time() - start_time - output2 ="使用token数:"+ str(total_token_used)+"\n花费时间:"+ str(round(time_used, 2)) +"秒" - return comments, output2 - - - -######################################################################################################## -# 标题 -title = "🤖ChatReviewer🤖" -# 描述 - -description = '''
      - -ChatReviewer是一款基于ChatGPT-3.5的API开发的智能论文分析与建议助手。其用途如下: - -⭐️对论文的优缺点进行快速总结和分析,提高科研人员的文献阅读和理解的效率,紧跟研究前沿。 - -⭐️对自己的论文进行分析,根据ChatReviewer生成的改进建议进行查漏补缺,进一步提高自己的论文质量。 - -如果觉得很卡,可以点击右上角的Duplicate this Space,把ChatReviewer复制到你自己的Space中!(🈲:禁止直接复制生成的评论用于任何论文审稿工作!) - -本项目的[Github](https://github.com/nishiwen1214/ChatReviewer),欢迎Star和Fork,也欢迎大佬赞助让本项目快速成长!💗 - - - -
      -''' - -# 创建Gradio界面 -inp = [gradio.inputs.Textbox(label="请输入你的API-key(sk开头的字符串)", - default="", - type='password'), - gradio.inputs.Textbox(lines=5, - label="请输入特定的分析要求和格式(否则为默认格式)", - default="""* Overall Review -Please briefly summarize the main points and contributions of this paper. -xxx -* Paper Strength -Please provide a list of the strengths of this paper, including but not limited to: innovative and practical methodology, insightful empirical findings or in-depth theoretical analysis, -well-structured review of relevant literature, and any other factors that may make the paper valuable to readers. (Maximum length: 2,000 characters) -(1) xxx -(2) xxx -(3) xxx -* Paper Weakness -Please provide a numbered list of your main concerns regarding this paper (so authors could respond to the concerns individually). -These may include, but are not limited to: inadequate implementation details for reproducing the study, limited evaluation and ablation studies for the proposed method, -correctness of the theoretical analysis or experimental results, lack of comparisons or discussions with widely-known baselines in the field, lack of clarity in exposition, -or any other factors that may impede the reader's understanding or benefit from the paper. Please kindly refrain from providing a general assessment of the paper's novelty without providing detailed explanations. (Maximum length: 2,000 characters) -(1) xxx -(2) xxx -(3) xxx -* Questions To Authors And Suggestions For Rebuttal -Please provide a numbered list of specific and clear questions that pertain to the details of the proposed method, evaluation setting, or additional results that would aid in supporting the authors' claims. -The questions should be formulated in a manner that, after the authors have answered them during the rebuttal, it would enable a more thorough assessment of the paper's quality. (Maximum length: 2,000 characters) -*Overall score (1-10) -The paper is scored on a scale of 1-10, with 10 being the full mark, and 6 stands for borderline accept. Then give the reason for your rating. -xxx""" - ), - gradio.inputs.File(label="请上传论文PDF文件(请务必等pdf上传完成后再点击Submit!)",type="bytes"), - gradio.inputs.Radio(choices=["English", "Chinese", "French", "German","Japenese"], - default="English", - label="选择输出语言"), -] - -chat_reviewer_gui = gradio.Interface(fn=main, - inputs=inp, - outputs = [gradio.Textbox(lines=25, label="分析结果"), gradio.Textbox(lines=2, label="资源统计")], - title=title, - description=description) - -# Start server -chat_reviewer_gui .launch(quiet=True, show_api=False) \ No newline at end of file diff --git a/spaces/Shreeradha/GradioChatBotAI/README.md b/spaces/Shreeradha/GradioChatBotAI/README.md deleted file mode 100644 index 8360f792c493b559d35bada6056338e5e88516eb..0000000000000000000000000000000000000000 --- a/spaces/Shreeradha/GradioChatBotAI/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: GradioChatBotAI -emoji: 🏆 -colorFrom: red -colorTo: indigo -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Shularp/marian_translation_test_th_ar_en/README.md b/spaces/Shularp/marian_translation_test_th_ar_en/README.md deleted file mode 100644 index d5a6a1e631caf3e9e147999c4f756256b40098ac..0000000000000000000000000000000000000000 --- a/spaces/Shularp/marian_translation_test_th_ar_en/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Marian Translation Test Th Ar En -emoji: 📉 -colorFrom: green -colorTo: gray -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Sing11104/bingo-11104/README.md b/spaces/Sing11104/bingo-11104/README.md deleted file mode 100644 index 5d6936218874c647b5d22e13ad4be7edb8936f92..0000000000000000000000000000000000000000 --- a/spaces/Sing11104/bingo-11104/README.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: bingo -emoji: 😊 -colorFrom: red -colorTo: red -sdk: docker -license: mit -duplicated_from: hf4all/bingo ---- - -
      - -# Bingo - -Bingo,一个让你呼吸顺畅 New Bing。 - -高度还原 New Bing 网页版的主要操作,国内可用,兼容绝大多数微软 Bing AI 的功能,可自行部署使用。 - -![Github stars](https://badgen.net/github/stars/weaigc/bingo?icon=github&label=stars) -![Gthub issues](https://img.shields.io/github/issues/weaigc/bingo) -[![docker build](https://github.com/weaigc/bingo/actions/workflows/docker.yml/badge.svg)](https://hub.docker.com/repository/docker/weaigc/bingo/) -[![docker hub](https://badgen.net/docker/size/weaigc/bingo?icon=docker&label=image%20size)](https://hub.docker.com/repository/docker/weaigc/bingo/) -[![MIT License](https://img.shields.io/badge/license-MIT-97c50f)](https://github.com/weaigc/bingo/blob/main/license) - -问题反馈请前往 https://github.com/weaigc/bingo/issues -
      - - diff --git a/spaces/StealYourGhost/Joeythemonster-anything-midjourney-v-4-1/README.md b/spaces/StealYourGhost/Joeythemonster-anything-midjourney-v-4-1/README.md deleted file mode 100644 index 3d299291769219de0ea751e6bc335d3d65ba6703..0000000000000000000000000000000000000000 --- a/spaces/StealYourGhost/Joeythemonster-anything-midjourney-v-4-1/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Joeythemonster Anything Midjourney V 4 1 -emoji: 🚀 -colorFrom: green -colorTo: red -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/SujanMidatani/resume_details_extractor/app.py b/spaces/SujanMidatani/resume_details_extractor/app.py deleted file mode 100644 index 771365b2d68daa3cc4314f81e2a433aa83f22fc2..0000000000000000000000000000000000000000 --- a/spaces/SujanMidatani/resume_details_extractor/app.py +++ /dev/null @@ -1,127 +0,0 @@ -from PyPDF2 import PdfReader -import gradio as gr -from langchain.chat_models import ChatOpenAI -from kor import create_extraction_chain, Object, Text -from langchain.prompts.prompt import PromptTemplate -# from langchain.chains import LLMChain -# from langchain.llms import OpenAI - -from dotenv import load_dotenv - -load_dotenv() -def gen_text(pdf_file): - with open(pdf_file.name, "rb") as f: - reader = PdfReader(f) - num_pages = len(reader.pages) - text = "" - for page in reader.pages: - text += page.extract_text() - print(text) - constraints=context_extracter(text) - - return constraints - - - -# def generate_questions(resume,role='',experience=''): -# _PROMPT_TEMPLATE = """ -# this is the resume of user: -# {resume_details} - -# here is the role he want to join in : -# {role} - -# Based on the following experience: -# {experience} - -# What are your interview questions for the given user resume and role he want to join in with that experience? -# generate no of questions = {questions}! -# """ -# PROMPT = PromptTemplate(input_variables=["resume_details", "role", "experience",'questions'], template=_PROMPT_TEMPLATE) - -# llm1 = OpenAI(model_name="text-davinci-003", temperature=0) -# chain = LLMChain(llm=llm1, prompt=PROMPT) -# prompt = chain.predict_and_parse(resume_details= gen_text(resume), -# role= role, -# experience= experience, -# questions=10) -# return prompt.split('\n') -def context_extracter(text): - - llm = ChatOpenAI( - model_name="gpt-3.5-turbo-16k", - temperature=0, - max_tokens=2000, - frequency_penalty=0, - presence_penalty=0, - top_p=1.0, - ) - schema = Object( - id="interviewer", - description=( - "interviewer is examining resume text and should produce set of attributes which represents that person by his resume" - - ), - attributes=[ - Text( - id="summary_or_objective", - description="A brief overview of the candidate's professional background, skills, and career goals", - examples=[], - many=True, - ), - Text( - id="work_experience", - description="Details of previous employment positions, including job titles, company names, employment dates, and a description of responsibilities and achievements for each role ", - examples=[], - many=True, - ), - Text( - id="education", - description="Information about the candidate's educational qualifications, including degrees, certificates, and the names of institutions attended", - examples=[], - many=True, - ), - Text( - id="skills", - description="A section highlighting the candidate's relevant skills, such as technical skills, languages spoken, software proficiency, or specific tools used", - examples=[], - many=True, - ), - Text( - id="achievements_or_awards", - description="Any notable achievements, awards, or recognition received by the candidate during their education or career.", - examples=[], - many=True, - ), - Text( - id="certifications_or_licenses", - description="Information about any professional certifications or licenses held by the candidate that are relevant to the desired position", - examples=[], - many=True, - ), - Text( - id="projects", - description="Details of significant projects the candidate has worked on, including a brief description, their role, and any notable outcomes", - examples=[], - many=True, - ), - Text( - id="publications_or_presentations", - description=" If applicable, a list of publications or presentations the candidate has authored or delivered, including the titles, dates, and locations", - examples=[], - many=True, - ), - ], - many=True, - ) - # chain = LLMChain(llm=llm1, prompt=PROMPT) - chain = create_extraction_chain(llm, schema, encoder_or_encoder_class='json') - return chain.run(text=text)['data'] - -k=gr.Interface( - fn=gen_text, - inputs=['file'], - outputs=['json'] -) -k.launch() - \ No newline at end of file diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/consoleapp.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/consoleapp.py deleted file mode 100644 index c2bbe1888f543cb33c4c88796594bab1da297e62..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/consoleapp.py +++ /dev/null @@ -1,12 +0,0 @@ -""" -Shim to maintain backwards compatibility with old IPython.consoleapp imports. -""" -# Copyright (c) IPython Development Team. -# Distributed under the terms of the Modified BSD License. - -from warnings import warn - -warn("The `IPython.consoleapp` package has been deprecated since IPython 4.0." - "You should import from jupyter_client.consoleapp instead.", stacklevel=2) - -from jupyter_client.consoleapp import * diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/chromadb/telemetry/posthog.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/chromadb/telemetry/posthog.py deleted file mode 100644 index a20e20dd25767b2247d7d8aaa67603c212571c52..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/chromadb/telemetry/posthog.py +++ /dev/null @@ -1,35 +0,0 @@ -import posthog -import logging -import sys -from chromadb.config import System -from chromadb.telemetry import Telemetry, TelemetryEvent -from overrides import override - -logger = logging.getLogger(__name__) - - -class Posthog(Telemetry): - def __init__(self, system: System): - if not system.settings.anonymized_telemetry or "pytest" in sys.modules: - posthog.disabled = True - else: - logger.info( - "Anonymized telemetry enabled. See https://docs.trychroma.com/telemetry for more information." - ) - - posthog.project_api_key = "phc_YeUxaojbKk5KPi8hNlx1bBKHzuZ4FDtl67kH1blv8Bh" - posthog_logger = logging.getLogger("posthog") - # Silence posthog's logging - posthog_logger.disabled = True - super().__init__(system) - - @override - def capture(self, event: TelemetryEvent) -> None: - try: - posthog.capture( - self.user_id, - event.name, - {**(event.properties), "chroma_context": self.context}, - ) - except Exception as e: - logger.error(f"Failed to send telemetry event {event.name}: {e}") diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/driver/compression.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/driver/compression.py deleted file mode 100644 index db69ae3f040d424c9ee4811372b52b0a310a7575..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/driver/compression.py +++ /dev/null @@ -1,77 +0,0 @@ -import zlib -from abc import abstractmethod -from typing import Union - -import lz4 -import lz4.frame -import zstandard - -try: - import brotli -except ImportError: - brotli = None - - -available_compression = ['lz4', 'zstd'] - -if brotli: - available_compression.append('br') -available_compression.extend(['gzip', 'deflate']) - -comp_map = {} - - -class Compressor: - def __init_subclass__(cls, tag: str, thread_safe: bool = True): - comp_map[tag] = cls() if thread_safe else cls - - @abstractmethod - def compress_block(self, block) -> Union[bytes, bytearray]: - return block - - def flush(self): - pass - - -class GzipCompressor(Compressor, tag='gzip', thread_safe=False): - def __init__(self, level: int = 6, wbits: int = 31): - self.zlib_obj = zlib.compressobj(level=level, wbits=wbits) - - def compress_block(self, block): - return self.zlib_obj.compress(block) - - def flush(self): - return self.zlib_obj.flush() - - -class Lz4Compressor(Compressor, tag='lz4', thread_safe=False): - def __init__(self): - self.comp = lz4.frame.LZ4FrameCompressor() - - def compress_block(self, block): - output = self.comp.begin(len(block)) - output += self.comp.compress(block) - return output + self.comp.flush() - - -class ZstdCompressor(Compressor, tag='zstd'): - def compress_block(self, block): - return zstandard.compress(block) - - -class BrotliCompressor(Compressor, tag='br'): - def compress_block(self, block): - return brotli.compress(block) - - -null_compressor = Compressor() - - -def get_compressor(compression: str) -> Compressor: - if not compression: - return null_compressor - comp = comp_map[compression] - try: - return comp() - except TypeError: - return comp diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/computation/tensorflow_backend.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/computation/tensorflow_backend.py deleted file mode 100644 index fc963cdb48b3617406aec40ba58e3e301e358679..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/computation/tensorflow_backend.py +++ /dev/null @@ -1,287 +0,0 @@ -import typing -from typing import TYPE_CHECKING, Callable, List, Optional, Tuple - -import numpy as np - -from docarray.computation import AbstractComputationalBackend -from docarray.computation.abstract_numpy_based_backend import AbstractNumpyBasedBackend -from docarray.typing import TensorFlowTensor -from docarray.utils._internal.misc import import_library - -if TYPE_CHECKING: - import tensorflow as tf # type: ignore - import tensorflow._api.v2.experimental.numpy as tnp # type: ignore -else: - tf = import_library('tensorflow', raise_error=True) - tnp = tf._api.v2.experimental.numpy - - -def _unsqueeze_if_single_axis(*matrices: tf.Tensor) -> List[tf.Tensor]: - """ - Unsqueezes tensors that only have one axis, at dim 0. - This ensures that all outputs can be treated as matrices, not vectors. - - :param matrices: Matrices to be unsqueezed - :return: List of the input matrices, - where single axis matrices are unsqueezed at dim 0. - """ - unsqueezed = [] - for m in matrices: - if len(m.shape) == 1: - unsqueezed.append(tf.expand_dims(m, axis=0)) - else: - unsqueezed.append(m) - return unsqueezed - - -def _unsqueeze_if_scalar(t: tf.Tensor) -> tf.Tensor: - """ - Unsqueezes tensor of a scalar, from shape () to shape (1,). - - :param t: tensor to unsqueeze. - :return: unsqueezed tf.Tensor - """ - if len(t.shape) == 0: # avoid scalar output - t = tf.expand_dims(t, 0) - return t - - -def norm_left(t: tf.Tensor) -> TensorFlowTensor: - return TensorFlowTensor(tensor=t) - - -def norm_right(t: TensorFlowTensor) -> tf.Tensor: - return t.tensor - - -class TensorFlowCompBackend(AbstractNumpyBasedBackend[TensorFlowTensor]): - """ - Computational backend for TensorFlow. - """ - - _module = tnp - _cast_output: Callable = norm_left - _get_tensor: Callable = norm_right - - @classmethod - def to_numpy(cls, array: 'TensorFlowTensor') -> 'np.ndarray': - return cls._get_tensor(array).numpy() - - @classmethod - def none_value(cls) -> typing.Any: - """Provide a compatible value that represents None in numpy.""" - return tf.constant(float('nan')) - - @classmethod - def to_device(cls, tensor: 'TensorFlowTensor', device: str) -> 'TensorFlowTensor': - """Move the tensor to the specified device.""" - if cls.device(tensor) == device: - return tensor - else: - with tf.device(device): - return cls._cast_output(tf.identity(cls._get_tensor(tensor))) - - @classmethod - def device(cls, tensor: 'TensorFlowTensor') -> Optional[str]: - """Return device on which the tensor is allocated.""" - return cls._get_tensor(tensor).device - - @classmethod - def detach(cls, tensor: 'TensorFlowTensor') -> 'TensorFlowTensor': - """ - Returns the tensor detached from its current graph. - - :param tensor: tensor to be detached - :return: a detached tensor with the same data. - """ - return cls._cast_output(tf.stop_gradient(cls._get_tensor(tensor))) - - @classmethod - def dtype(cls, tensor: 'TensorFlowTensor') -> tf.dtypes: - """Get the data type of the tensor.""" - d_type = cls._get_tensor(tensor).dtype - return d_type.name - - @classmethod - def minmax_normalize( - cls, - tensor: 'TensorFlowTensor', - t_range: Tuple = (0.0, 1.0), - x_range: Optional[Tuple] = None, - eps: float = 1e-7, - ) -> 'TensorFlowTensor': - a, b = t_range - - t = tf.cast(cls._get_tensor(tensor), tf.float32) - min_d = x_range[0] if x_range else tnp.min(t, axis=-1, keepdims=True) - max_d = x_range[1] if x_range else tnp.max(t, axis=-1, keepdims=True) - - i = (b - a) * (t - min_d) / (max_d - min_d + tf.constant(eps) + a) - - normalized = tnp.clip(i, *((a, b) if a < b else (b, a))) - return cls._cast_output(tf.cast(normalized, tensor.tensor.dtype)) - - class Retrieval(AbstractComputationalBackend.Retrieval[TensorFlowTensor]): - """ - Abstract class for retrieval and ranking functionalities - """ - - @staticmethod - def top_k( - values: 'TensorFlowTensor', - k: int, - descending: bool = False, - device: Optional[str] = None, - ) -> Tuple['TensorFlowTensor', 'TensorFlowTensor']: - """ - Retrieves the top k smallest values in `values`, - and returns them alongside their indices in the input `values`. - Can also be used to retrieve the top k largest values, - by setting the `descending` flag. - - :param values: TensorFlowTensor of values to rank. - Should be of shape (n_queries, n_values_per_query). - Inputs of shape (n_values_per_query,) will be expanded - to (1, n_values_per_query). - :param k: number of values to retrieve - :param descending: retrieve largest values instead of smallest values - :param device: the computational device to use. - :return: Tuple of TensorFlowTensors containing the retrieved values, and - their indices. Both are of shape (n_queries, k) - """ - comp_be = TensorFlowCompBackend - if device is not None: - values = comp_be.to_device(values, device) - - tf_values: tf.Tensor = comp_be._get_tensor(values) - if len(tf_values.shape) <= 1: - tf_values = tf.expand_dims(tf_values, axis=0) - - len_tf_values = ( - tf_values.shape[-1] if len(tf_values.shape) > 1 else len(tf_values) - ) - k = min(k, len_tf_values) - - if not descending: - tf_values = -tf_values - - result = tf.math.top_k(input=tf_values, k=k, sorted=True) - res_values = result.values - res_indices = result.indices - - if not descending: - res_values = -result.values - - return comp_be._cast_output(res_values), comp_be._cast_output(res_indices) - - class Metrics(AbstractComputationalBackend.Metrics[TensorFlowTensor]): - """ - Abstract base class for metrics (distances and similarities). - """ - - @staticmethod - def cosine_sim( - x_mat: 'TensorFlowTensor', - y_mat: 'TensorFlowTensor', - eps: float = 1e-7, - device: Optional[str] = None, - ) -> 'TensorFlowTensor': - """Pairwise cosine similarities between all vectors in x_mat and y_mat. - - :param x_mat: tensor of shape (n_vectors, n_dim), where n_vectors is the - number of vectors and n_dim is the number of dimensions of each example. - :param y_mat: tensor of shape (n_vectors, n_dim), where n_vectors is the - number of vectors and n_dim is the number of dimensions of each example. - :param eps: a small jitter to avoid divde by zero - :param device: the device to use for computations. - If not provided, the devices of x_mat and y_mat are used. - :return: Tensor of shape (n_vectors, n_vectors) containing all pairwise - cosine distances. - The index [i_x, i_y] contains the cosine distance between - x_mat[i_x] and y_mat[i_y]. - """ - comp_be = TensorFlowCompBackend - x_mat_tf: tf.Tensor = comp_be._get_tensor(x_mat) - y_mat_tf: tf.Tensor = comp_be._get_tensor(y_mat) - - with tf.device(device): - x_mat_tf = tf.identity(x_mat_tf) - y_mat_tf = tf.identity(y_mat_tf) - - x_mat_tf, y_mat_tf = _unsqueeze_if_single_axis(x_mat_tf, y_mat_tf) - - a_n = tf.linalg.normalize(x_mat_tf, axis=1)[1] - b_n = tf.linalg.normalize(y_mat_tf, axis=1)[1] - a_norm = x_mat_tf / tf.clip_by_value( - a_n, clip_value_min=eps, clip_value_max=tf.float32.max - ) - b_norm = y_mat_tf / tf.clip_by_value( - b_n, clip_value_min=eps, clip_value_max=tf.float32.max - ) - sims = tf.squeeze(tf.linalg.matmul(a_norm, tf.transpose(b_norm))) - sims = _unsqueeze_if_scalar(sims) - - return comp_be._cast_output(sims) - - @staticmethod - def euclidean_dist( - x_mat: 'TensorFlowTensor', - y_mat: 'TensorFlowTensor', - device: Optional[str] = None, - ) -> 'TensorFlowTensor': - """Pairwise Euclidian distances between all vectors in x_mat and y_mat. - - :param x_mat: tensor of shape (n_vectors, n_dim), where n_vectors is the - number of vectors and n_dim is the number of dimensions of each example. - :param y_mat: tensor of shape (n_vectors, n_dim), where n_vectors is the - number of vectors and n_dim is the number of dimensions of each example. - :param device: the device to use for pytorch computations. - If not provided, the devices of x_mat and y_mat are used. - :return: Tensor of shape (n_vectors, n_vectors) containing all pairwise - euclidian distances. - The index [i_x, i_y] contains the euclidian distance between - x_mat[i_x] and y_mat[i_y]. - """ - comp_be = TensorFlowCompBackend - x_mat_tf: tf.Tensor = comp_be._get_tensor(x_mat) - y_mat_tf: tf.Tensor = comp_be._get_tensor(y_mat) - - with tf.device(device): - x_mat_tf = tf.identity(x_mat_tf) - y_mat_tf = tf.identity(y_mat_tf) - - x_mat_tf, y_mat_tf = _unsqueeze_if_single_axis(x_mat_tf, y_mat_tf) - - dists = tf.squeeze(tf.norm(tf.subtract(x_mat_tf, y_mat_tf), axis=-1)) - dists = _unsqueeze_if_scalar(dists) - - return comp_be._cast_output(dists) - - @staticmethod - def sqeuclidean_dist( - x_mat: 'TensorFlowTensor', - y_mat: 'TensorFlowTensor', - device: Optional[str] = None, - ) -> 'TensorFlowTensor': - """Pairwise Squared Euclidian distances between all vectors - in x_mat and y_mat. - - :param x_mat: tensor of shape (n_vectors, n_dim), where n_vectors is the - number of vectors and n_dim is the number of dimensions of each - example. - :param y_mat: tensor of shape (n_vectors, n_dim), where n_vectors is the - number of vectors and n_dim is the number of dimensions of each - example. - :param device: the device to use for pytorch computations. - If not provided, the devices of x_mat and y_mat are used. - :return: Tensor of shape (n_vectors, n_vectors) containing all pairwise - euclidian distances. - The index [i_x, i_y] contains the euclidian distance between - x_mat[i_x] and y_mat[i_y]. - """ - dists = TensorFlowCompBackend.Metrics.euclidean_dist(x_mat, y_mat) - squared: tf.Tensor = tf.math.square( - TensorFlowCompBackend._get_tensor(dists) - ) - - return TensorFlowCompBackend._cast_output(squared) diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/core/evaluation/class_names.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/core/evaluation/class_names.py deleted file mode 100644 index ffae816cf980ce4b03e491cc0c4298cb823797e6..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/core/evaluation/class_names.py +++ /dev/null @@ -1,152 +0,0 @@ -import annotator.uniformer.mmcv as mmcv - - -def cityscapes_classes(): - """Cityscapes class names for external use.""" - return [ - 'road', 'sidewalk', 'building', 'wall', 'fence', 'pole', - 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', - 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', - 'bicycle' - ] - - -def ade_classes(): - """ADE20K class names for external use.""" - return [ - 'wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ', - 'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth', - 'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car', - 'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug', - 'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe', - 'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column', - 'signboard', 'chest of drawers', 'counter', 'sand', 'sink', - 'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path', - 'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door', - 'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table', - 'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove', - 'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar', - 'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower', - 'chandelier', 'awning', 'streetlight', 'booth', 'television receiver', - 'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister', - 'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van', - 'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything', - 'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent', - 'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank', - 'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake', - 'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce', - 'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen', - 'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass', - 'clock', 'flag' - ] - - -def voc_classes(): - """Pascal VOC class names for external use.""" - return [ - 'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', - 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', - 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', - 'tvmonitor' - ] - - -def cityscapes_palette(): - """Cityscapes palette for external use.""" - return [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], - [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0], - [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60], - [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100], - [0, 0, 230], [119, 11, 32]] - - -def ade_palette(): - """ADE20K palette for external use.""" - return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], - [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], - [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], - [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], - [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], - [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], - [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], - [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], - [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], - [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7], - [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], - [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255], - [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0], - [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], - [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255], - [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255], - [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0], - [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0], - [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255], - [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255], - [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20], - [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255], - [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255], - [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255], - [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0], - [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0], - [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255], - [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112], - [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160], - [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163], - [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0], - [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0], - [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255], - [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204], - [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255], - [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255], - [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194], - [102, 255, 0], [92, 0, 255]] - - -def voc_palette(): - """Pascal VOC palette for external use.""" - return [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128], - [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], - [192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128], - [192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0], - [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]] - - -dataset_aliases = { - 'cityscapes': ['cityscapes'], - 'ade': ['ade', 'ade20k'], - 'voc': ['voc', 'pascal_voc', 'voc12', 'voc12aug'] -} - - -def get_classes(dataset): - """Get class names of a dataset.""" - alias2name = {} - for name, aliases in dataset_aliases.items(): - for alias in aliases: - alias2name[alias] = name - - if mmcv.is_str(dataset): - if dataset in alias2name: - labels = eval(alias2name[dataset] + '_classes()') - else: - raise ValueError(f'Unrecognized dataset: {dataset}') - else: - raise TypeError(f'dataset must a str, but got {type(dataset)}') - return labels - - -def get_palette(dataset): - """Get class palette (RGB) of a dataset.""" - alias2name = {} - for name, aliases in dataset_aliases.items(): - for alias in aliases: - alias2name[alias] = name - - if mmcv.is_str(dataset): - if dataset in alias2name: - labels = eval(alias2name[dataset] + '_palette()') - else: - raise ValueError(f'Unrecognized dataset: {dataset}') - else: - raise TypeError(f'dataset must a str, but got {type(dataset)}') - return labels diff --git a/spaces/TH5314/newbing/src/lib/hooks/use-enter-submit.tsx b/spaces/TH5314/newbing/src/lib/hooks/use-enter-submit.tsx deleted file mode 100644 index d66b2d3253baff164235d4ca791aae6d84721835..0000000000000000000000000000000000000000 --- a/spaces/TH5314/newbing/src/lib/hooks/use-enter-submit.tsx +++ /dev/null @@ -1,23 +0,0 @@ -import { useRef, type RefObject } from 'react' - -export function useEnterSubmit(): { - formRef: RefObject - onKeyDown: (event: React.KeyboardEvent) => void -} { - const formRef = useRef(null) - - const handleKeyDown = ( - event: React.KeyboardEvent - ): void => { - if ( - event.key === 'Enter' && - !event.shiftKey && - !event.nativeEvent.isComposing - ) { - formRef.current?.requestSubmit() - event.preventDefault() - } - } - - return { formRef, onKeyDown: handleKeyDown } -} diff --git a/spaces/TensoraCO/code-explainer/README.md b/spaces/TensoraCO/code-explainer/README.md deleted file mode 100644 index ffbbc7167bdd32eb776999693d5e322e88e1a9d9..0000000000000000000000000000000000000000 --- a/spaces/TensoraCO/code-explainer/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Code Explainer -emoji: 🪞 -colorFrom: red -colorTo: purple -sdk: gradio -sdk_version: 3.0.24 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: os1187/code-explainer ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Tetel/secondbing/EdgeGPT/locale.py b/spaces/Tetel/secondbing/EdgeGPT/locale.py deleted file mode 100644 index c57dc26e143e4331af2d456fefe16c71eca10e2c..0000000000000000000000000000000000000000 --- a/spaces/Tetel/secondbing/EdgeGPT/locale.py +++ /dev/null @@ -1,85 +0,0 @@ -from enum import Enum - -try: - from typing import Union, Literal -except ImportError: - from typing_extensions import Literal -from typing import Optional - - -class LocationHint(Enum): - USA = { - "locale": "en-US", - "LocationHint": [ - { - "country": "United States", - "state": "California", - "city": "Los Angeles", - "timezoneoffset": 8, - "countryConfidence": 8, - "Center": { - "Latitude": 34.0536909, - "Longitude": -118.242766, - }, - "RegionType": 2, - "SourceType": 1, - }, - ], - } - CHINA = { - "locale": "zh-CN", - "LocationHint": [ - { - "country": "China", - "state": "", - "city": "Beijing", - "timezoneoffset": 8, - "countryConfidence": 8, - "Center": { - "Latitude": 39.9042, - "Longitude": 116.4074, - }, - "RegionType": 2, - "SourceType": 1, - }, - ], - } - EU = { - "locale": "en-IE", - "LocationHint": [ - { - "country": "Norway", - "state": "", - "city": "Oslo", - "timezoneoffset": 1, - "countryConfidence": 8, - "Center": { - "Latitude": 59.9139, - "Longitude": 10.7522, - }, - "RegionType": 2, - "SourceType": 1, - }, - ], - } - UK = { - "locale": "en-GB", - "LocationHint": [ - { - "country": "United Kingdom", - "state": "", - "city": "London", - "timezoneoffset": 0, - "countryConfidence": 8, - "Center": { - "Latitude": 51.5074, - "Longitude": -0.1278, - }, - "RegionType": 2, - "SourceType": 1, - }, - ], - } - - -LOCATION_HINT_TYPES = Optional[Union[LocationHint, Literal["USA", "CHINA", "EU", "UK"]]] diff --git a/spaces/TheBritishLibrary/British-Library-books-genre-classifier-v2/app.py b/spaces/TheBritishLibrary/British-Library-books-genre-classifier-v2/app.py deleted file mode 100644 index 3d795b1c5f63e0495bb91408e361f5599396bbb6..0000000000000000000000000000000000000000 --- a/spaces/TheBritishLibrary/British-Library-books-genre-classifier-v2/app.py +++ /dev/null @@ -1,89 +0,0 @@ -import gradio as gr -from transformers import pipeline -from transformers import AutoTokenizer, AutoModelForSequenceClassification - - -sample_text = [ - [ - "Poems on various subjects. Whereto is prefixed a short essay on the structure of English verse" - ], - [ - "Journal of a Residence in China and the neighbouring countries from 1830 to 1833. With an introductory essay by the Hon. and Rev. Baptist Wriothesley Noel. [With a map.]" - ], - ["The Adventures of Oliver Twist. [With plates.]"], - ["['The Adventures of Sherlock Holmes', 'Single Works']"], - [ - "['Coal, Iron, and Oil; or, the Practical American miner. A plain and popular work on our mines and mineral resources ... With numerous maps and engravings, etc']" - ], - [ - "Summer Travelling in Iceland; being the narrative of two journeys across the island ... With a chapter on Askja by E. Delmar Morgan ... Containing also a literal translation of three sagas. Maps, etc'" - ], - [ - "History of the Monument. With a brief account of the Great Fire of London, which it commemorates. By Charles Welch. (With illustrations and a map of Old London.)", - ], - ["The history and antiquities of Newbury and its environs [By E. W. Gray.]"], - ["""A Christmas Carol"""], -] - -description = """ -V2 of a British Library Books genre detection model. The interpretation interface helps show what words the model is using to make its predictions. Words highlighted in red contributed to the model being more confident about a prediction. The intensity of colour corresponds to the importance of that part of the input. The words that decrease the label confidence are highlighted in blue.""" - -article = """ - -

      British Library Books genre detection demo

      -This demo allows you to play with a 'genre' detection model which has been trained to predict, from the title of a book, whether it is 'fiction' or 'non-fiction'. - - -The [model](https://huggingface.co/TheBritishLibrary/bl-books-genre) was trained on training data drawn from [digitised books](https://www.bl.uk/collection-guides/digitised-printed-books) at the British Library. These Books are mainly from the 19th Century. -The demo also shows you which parts of the input the model is using most to make its prediction. The examples include titles from the BL books collection. You may notice that the model makes mistakes on short titles in particular, this can partly be explained by the title format in the original data. For example the novel *'Vanity Fair'* by William Makepeace Thackeray -is found in the training data as: - -``` -Vanity Fair. A novel without a hero ... With all the original illustrations by the author, etc -``` - -You can see that the model gets a bit of help with the genre here 😉. Since the model was trained for a very particular dataset and task it might not work well on titles that don't match this original corpus. - -## Background - -This model was developed as part of work by the [Living with Machines](https://livingwithmachines.ac.uk/). The process of training the model and working with the data is documented in a [tutorial](github.com/living-with-machines/genre-classification). - -## Model description - -This model is intended to predict, from the title of a book, whether it is 'fiction' or 'non-fiction'. This model was trained on data created from the [Digitised printed books (18th-19th Century)](https://www.bl.uk/collection-guides/digitised-printed-books) book collection. -This dataset is dominated by English language books though it includes books in several other languages in much smaller numbers. You can find more information about the model [here](https://huggingface.co/BritishLibraryLabs/bl-books-genre) - -## Training data - -The model is trained on a particular collection of books digitised by the British Library. As a result, the model may do less well on titles that look different to this data. In particular, the training data, was mostly English, and mostly from the 19th Century. The model is likely to do less well with non-English languages and book titles which fall outside of the 19th Century. Since the data was derived from books catalogued by the British Library it is also possible the model will perform less well for books held by other institutions if, for example, they catalogue book titles in different ways, or have different biases in the types of books they hold. Some of the data was generated using weak supervision. You can learn more about how this was done [here](https://living-with-machines.github.io/genre-classification/04_snorkel.html) - -### Credits - ->This work was partly supported by [Living with Machines](https://livingwithmachines.ac.uk/). This project, funded by the UK Research and Innovation (UKRI) Strategic Priority Fund, is a multidisciplinary collaboration delivered by the Arts and Humanities Research Council (AHRC), with The Alan Turing Institute, the British Library and the Universities of Cambridge, East Anglia, Exeter, and Queen Mary University of London. -""" - - -tokenizer = AutoTokenizer.from_pretrained("TheBritishLibrary/bl-books-genre") - -model = AutoModelForSequenceClassification.from_pretrained( - "TheBritishLibrary/bl-books-genre" -) -classifier = pipeline("text-classification", model=model, tokenizer=tokenizer, top_k=10) - - -def predict(text): - predictions = classifier(text) - return {pred["label"]: float(pred["score"]) for pred in predictions} - - -gr.Interface( - predict, - inputs=gr.Textbox(label="Book title"), - outputs="label", - interpretation="shap", - num_shap=10.0, - theme="huggingface", - examples=sample_text, - description=description, - article=article, -).launch(enable_queue=True) diff --git a/spaces/WUXIAOMO/stabilityai-stable-diffusion-2-1-test-space/app.py b/spaces/WUXIAOMO/stabilityai-stable-diffusion-2-1-test-space/app.py deleted file mode 100644 index 0160420876923d89f2ab5fccb9f4d13725e29972..0000000000000000000000000000000000000000 --- a/spaces/WUXIAOMO/stabilityai-stable-diffusion-2-1-test-space/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/stabilityai/stable-diffusion-2-1").launch() \ No newline at end of file diff --git a/spaces/Y-T-G/Blur-Anything/tracker/util/range_transform.py b/spaces/Y-T-G/Blur-Anything/tracker/util/range_transform.py deleted file mode 100644 index d5f678212d8fe0754c9250144ad1985ad32d0643..0000000000000000000000000000000000000000 --- a/spaces/Y-T-G/Blur-Anything/tracker/util/range_transform.py +++ /dev/null @@ -1,12 +0,0 @@ -import torchvision.transforms as transforms - -im_mean = (124, 116, 104) - -im_normalization = transforms.Normalize( - mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] -) - -inv_im_trans = transforms.Normalize( - mean=[-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.225], - std=[1 / 0.229, 1 / 0.224, 1 / 0.225], -) diff --git a/spaces/YUANAI/DiffspeechResearch/tasks/vocoder/hifigan.py b/spaces/YUANAI/DiffspeechResearch/tasks/vocoder/hifigan.py deleted file mode 100644 index a07370ab84f2d5ba6b20cc37db9773c1c2879b73..0000000000000000000000000000000000000000 --- a/spaces/YUANAI/DiffspeechResearch/tasks/vocoder/hifigan.py +++ /dev/null @@ -1,63 +0,0 @@ -import torch.nn.functional as F -from torch import nn - -from modules.vocoder.hifigan.hifigan import HifiGanGenerator, MultiPeriodDiscriminator, MultiScaleDiscriminator, \ - generator_loss, feature_loss, discriminator_loss -from modules.vocoder.hifigan.mel_utils import mel_spectrogram -from modules.vocoder.hifigan.stft_loss import MultiResolutionSTFTLoss -from tasks.vocoder.vocoder_base import VocoderBaseTask -from utils.commons.hparams import hparams -from utils.nn.model_utils import print_arch - - -class HifiGanTask(VocoderBaseTask): - def build_model(self): - self.model_gen = HifiGanGenerator(hparams) - self.model_disc = nn.ModuleDict() - self.model_disc['mpd'] = MultiPeriodDiscriminator() - self.model_disc['msd'] = MultiScaleDiscriminator() - self.stft_loss = MultiResolutionSTFTLoss() - print_arch(self.model_gen) - if hparams['load_ckpt'] != '': - self.load_ckpt(hparams['load_ckpt'], 'model_gen', 'model_gen', force=True, strict=True) - self.load_ckpt(hparams['load_ckpt'], 'model_disc', 'model_disc', force=True, strict=True) - return self.model_gen - - def _training_step(self, sample, batch_idx, optimizer_idx): - mel = sample['mels'] - y = sample['wavs'] - f0 = sample['f0'] - loss_output = {} - if optimizer_idx == 0: - ####################### - # Generator # - ####################### - y_ = self.model_gen(mel, f0) - y_mel = mel_spectrogram(y.squeeze(1), hparams).transpose(1, 2) - y_hat_mel = mel_spectrogram(y_.squeeze(1), hparams).transpose(1, 2) - loss_output['mel'] = F.l1_loss(y_hat_mel, y_mel) * hparams['lambda_mel'] - _, y_p_hat_g, fmap_f_r, fmap_f_g = self.model_disc['mpd'](y, y_, mel) - _, y_s_hat_g, fmap_s_r, fmap_s_g = self.model_disc['msd'](y, y_, mel) - loss_output['a_p'] = generator_loss(y_p_hat_g) * hparams['lambda_adv'] - loss_output['a_s'] = generator_loss(y_s_hat_g) * hparams['lambda_adv'] - if hparams['use_fm_loss']: - loss_output['fm_f'] = feature_loss(fmap_f_r, fmap_f_g) - loss_output['fm_s'] = feature_loss(fmap_s_r, fmap_s_g) - if hparams['use_ms_stft']: - loss_output['sc'], loss_output['mag'] = self.stft_loss(y.squeeze(1), y_.squeeze(1)) - self.y_ = y_.detach() - self.y_mel = y_mel.detach() - self.y_hat_mel = y_hat_mel.detach() - else: - ####################### - # Discriminator # - ####################### - y_ = self.y_ - # MPD - y_p_hat_r, y_p_hat_g, _, _ = self.model_disc['mpd'](y, y_.detach(), mel) - loss_output['r_p'], loss_output['f_p'] = discriminator_loss(y_p_hat_r, y_p_hat_g) - # MSD - y_s_hat_r, y_s_hat_g, _, _ = self.model_disc['msd'](y, y_.detach(), mel) - loss_output['r_s'], loss_output['f_s'] = discriminator_loss(y_s_hat_r, y_s_hat_g) - total_loss = sum(loss_output.values()) - return total_loss, loss_output diff --git a/spaces/Yiqin/ChatVID/model/fastchat/data/inspect.py b/spaces/Yiqin/ChatVID/model/fastchat/data/inspect.py deleted file mode 100644 index 7f63eb0bc3c29efa185306cef247b9cbd6afdfe2..0000000000000000000000000000000000000000 --- a/spaces/Yiqin/ChatVID/model/fastchat/data/inspect.py +++ /dev/null @@ -1,23 +0,0 @@ -""" -Usage: -python3 -m fastchat.data.inspect --in sharegpt_20230322_clean_lang_split.json -""" -import argparse -import json - -import tqdm - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--in-file", type=str, required=True) - parser.add_argument("--begin", type=int) - args = parser.parse_args() - - content = json.load(open(args.in_file, "r")) - for sample in tqdm.tqdm(content[args.begin :]): - print(f"id: {sample['id']}") - for conv in sample["conversations"]: - print(conv["from"] + ": ") - print(conv["value"]) - input() diff --git a/spaces/Yuliang/ICON/lib/pymaf/models/maf_extractor.py b/spaces/Yuliang/ICON/lib/pymaf/models/maf_extractor.py deleted file mode 100644 index b5ca2279b5ca470b5abc8b3c477951ffcac323a8..0000000000000000000000000000000000000000 --- a/spaces/Yuliang/ICON/lib/pymaf/models/maf_extractor.py +++ /dev/null @@ -1,137 +0,0 @@ -# This script is borrowed and extended from https://github.com/shunsukesaito/PIFu/blob/master/lib/model/SurfaceClassifier.py - -from packaging import version -import torch -import scipy -import numpy as np -import torch.nn as nn -import torch.nn.functional as F - -from lib.common.config import cfg -from lib.pymaf.utils.geometry import projection -from lib.pymaf.core.path_config import MESH_DOWNSAMPLEING - -import logging - -logger = logging.getLogger(__name__) - - -class MAF_Extractor(nn.Module): - ''' Mesh-aligned Feature Extrator - - As discussed in the paper, we extract mesh-aligned features based on 2D projection of the mesh vertices. - The features extrated from spatial feature maps will go through a MLP for dimension reduction. - ''' - - def __init__(self, device=torch.device('cuda')): - super().__init__() - - self.device = device - self.filters = [] - self.num_views = 1 - filter_channels = cfg.MODEL.PyMAF.MLP_DIM - self.last_op = nn.ReLU(True) - - for l in range(0, len(filter_channels) - 1): - if 0 != l: - self.filters.append( - nn.Conv1d(filter_channels[l] + filter_channels[0], - filter_channels[l + 1], 1)) - else: - self.filters.append( - nn.Conv1d(filter_channels[l], filter_channels[l + 1], 1)) - - self.add_module("conv%d" % l, self.filters[l]) - - self.im_feat = None - self.cam = None - - # downsample SMPL mesh and assign part labels - # from https://github.com/nkolot/GraphCMR/blob/master/data/mesh_downsampling.npz - smpl_mesh_graph = np.load(MESH_DOWNSAMPLEING, - allow_pickle=True, - encoding='latin1') - - A = smpl_mesh_graph['A'] - U = smpl_mesh_graph['U'] - D = smpl_mesh_graph['D'] # shape: (2,) - - # downsampling - ptD = [] - for i in range(len(D)): - d = scipy.sparse.coo_matrix(D[i]) - i = torch.LongTensor(np.array([d.row, d.col])) - v = torch.FloatTensor(d.data) - ptD.append(torch.sparse.FloatTensor(i, v, d.shape)) - - # downsampling mapping from 6890 points to 431 points - # ptD[0].to_dense() - Size: [1723, 6890] - # ptD[1].to_dense() - Size: [431. 1723] - Dmap = torch.matmul(ptD[1].to_dense(), - ptD[0].to_dense()) # 6890 -> 431 - self.register_buffer('Dmap', Dmap) - - def reduce_dim(self, feature): - ''' - Dimension reduction by multi-layer perceptrons - :param feature: list of [B, C_s, N] point-wise features before dimension reduction - :return: [B, C_p x N] concatantion of point-wise features after dimension reduction - ''' - y = feature - tmpy = feature - for i, f in enumerate(self.filters): - y = self._modules['conv' + - str(i)](y if i == 0 else torch.cat([y, tmpy], 1)) - if i != len(self.filters) - 1: - y = F.leaky_relu(y) - if self.num_views > 1 and i == len(self.filters) // 2: - y = y.view(-1, self.num_views, y.shape[1], - y.shape[2]).mean(dim=1) - tmpy = feature.view(-1, self.num_views, feature.shape[1], - feature.shape[2]).mean(dim=1) - - y = self.last_op(y) - - y = y.view(y.shape[0], -1) - return y - - def sampling(self, points, im_feat=None, z_feat=None): - ''' - Given 2D points, sample the point-wise features for each point, - the dimension of point-wise features will be reduced from C_s to C_p by MLP. - Image features should be pre-computed before this call. - :param points: [B, N, 2] image coordinates of points - :im_feat: [B, C_s, H_s, W_s] spatial feature maps - :return: [B, C_p x N] concatantion of point-wise features after dimension reduction - ''' - if im_feat is None: - im_feat = self.im_feat - - batch_size = im_feat.shape[0] - - if version.parse(torch.__version__) >= version.parse('1.3.0'): - # Default grid_sample behavior has changed to align_corners=False since 1.3.0. - point_feat = torch.nn.functional.grid_sample( - im_feat, points.unsqueeze(2), align_corners=True)[..., 0] - else: - point_feat = torch.nn.functional.grid_sample( - im_feat, points.unsqueeze(2))[..., 0] - - mesh_align_feat = self.reduce_dim(point_feat) - return mesh_align_feat - - def forward(self, p, s_feat=None, cam=None, **kwargs): - ''' Returns mesh-aligned features for the 3D mesh points. - - Args: - p (tensor): [B, N_m, 3] mesh vertices - s_feat (tensor): [B, C_s, H_s, W_s] spatial feature maps - cam (tensor): [B, 3] camera - Return: - mesh_align_feat (tensor): [B, C_p x N_m] mesh-aligned features - ''' - if cam is None: - cam = self.cam - p_proj_2d = projection(p, cam, retain_z=False) - mesh_align_feat = self.sampling(p_proj_2d, s_feat) - return mesh_align_feat diff --git a/spaces/abdvl/datahub_qa_bot/docs/what/relationship.md b/spaces/abdvl/datahub_qa_bot/docs/what/relationship.md deleted file mode 100644 index 1908bbd6ce75f0a76e65e92c84a8fa375d16b21c..0000000000000000000000000000000000000000 --- a/spaces/abdvl/datahub_qa_bot/docs/what/relationship.md +++ /dev/null @@ -1,106 +0,0 @@ -# What is a relationship? - -A relationship is a named associate between exactly two [entities](entity.md), a source and a destination. - -![metadata-modeling](../imgs/metadata-modeling.png) - -From the above graph, a `Group` entity can be linked to a `User` entity via a `HasMember` relationship. -Note that the name of the relationship reflects the direction, i.e. pointing from `Group` to `User`. -This is due to the fact that the actual metadata aspect holding this information is associated with `Group`, rather than User. -Had the direction been reversed, the relationship would have been named `IsMemberOf` instead. -See [Direction of Relationships](#direction-of-relationships) for more discussions on relationship directionality. -A specific instance of a relationship, e.g. `urn:li:corpGroup:group1` has a member `urn:li:corpuser:user1`, -corresponds to an edge in the metadata graph. - -Similar to an entity, a relationship can also be associated with optional attributes that are derived from the metadata. -For example, from the `Membership` metadata aspect shown below, we’re able to derive the `HasMember` relationship that links a specific `Group` to a specific `User`. We can also include additional attribute to the relationship, e.g. importance, which corresponds to the position of the specific member in the original membership array. This allows complex graph query that travel only relationships that match certain criteria, e.g. "returns only the top-5 most important members of this group." -Similar to the entity attributes, relationship attributes should only be added based on the expected query patterns to reduce the indexing cost. - -``` -namespace: com.linkedin.group - -import com.linkedin.common.AuditStamp -import com.linkedin.common.CorpuserUrn - -/** - * The membership metadata for a group - */ -record Membership { - - /** Audit stamp for the last change */ - modified: AuditStamp - - /** Admin of the group */ - admin: CorpuserUrn - - /** Members of the group, ordered in descending importance */ - members: array[CorpuserUrn] -} -``` - -Relationships are meant to be "entity-neutral". In other words, one would expect to use the same `OwnedBy` relationship to link a `Dataset` to a `User` and to link a `Dashboard` to a `User`. As Pegasus doesn’t allow typing a field using multiple URNs (because they’re all essentially strings), we resort to using generic URN type for the source and destination. -We also introduce a `@pairings` [annotation](https://linkedin.github.io/rest.li/pdl_migration#shorthand-for-custom-properties) to limit the allowed source and destination URN types. - -While it’s possible to model relationships in rest.li as [association resources](https://linkedin.github.io/rest.li/modeling/modeling#association), which often get stored as mapping tables, it is far more common to model them as "foreign keys" field in a metadata aspect. For instance, the `Ownership` aspect is likely to contain an array of owner’s corpuser URNs. - -Below is an example of how a relationship is modeled in PDL. Note that: -1. As the `source` and `destination` are of generic URN type, we’re able to factor them out to a common `BaseRelationship` model. -2. Each model is expected to have a `@pairings` annotation that is an array of all allowed source-destination URN pairs. -3. Unlike entity attributes, there’s no requirement on making all relationship attributes optional since relationships do not support partial updates. - -``` -namespace com.linkedin.metadata.relationship - -import com.linkedin.common.Urn - -/** - * Common fields that apply to all relationships - */ -record BaseRelationship { - - /** - * Urn for the source of the relationship - */ - source: Urn - - /** - * Urn for the destination of the relationship - */ - destination: Urn -} -``` - -``` -namespace com.linkedin.metadata.relationship - -/** - * Data model for a has-member relationship - */ -@pairings = [ { - "destination" : "com.linkedin.common.urn.CorpGroupUrn", - "source" : "com.linkedin.common.urn.CorpUserUrn" -} ] -record HasMembership includes BaseRelationship -{ - /** - * The importance of the membership - */ - importance: int -} -``` - -## Direction of Relationships - -As relationships are modeled as directed edges between nodes, it’s natural to ask which way should it be pointing, -or should there be edges going both ways? The answer is, "doesn’t really matter." It’s rather an aesthetic choice than technical one. - -For one, the actual direction doesn’t really impact the execution of graph queries. Most graph DBs are fully capable of traversing edges in reverse direction efficiently. - -That being said, generally there’s a more "natural way" to specify the direction of a relationship, which closely relate to how the metadata is stored. For example, the membership information for an LDAP group is generally stored as a list in group’s metadata. As a result, it’s more natural to model a `HasMember` relationship that points from a group to a member, instead of a `IsMemberOf` relationship pointing from member to group. - -Since all relationships are explicitly declared, it’s fairly easy for a user to discover what relationships are available and their directionality by inspecting -the [relationships directory](../../metadata-models/src/main/pegasus/com/linkedin/metadata/relationship). It’s also possible to provide a UI for the catalog of entities and relationships for analysts who are interested in building complex graph queries to gain insights into the metadata. - -## High Cardinality Relationships - -See [this doc](../advanced/high-cardinality.md) for suggestions on how to best model relationships with high cardinality. diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/visualization/color.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/visualization/color.py deleted file mode 100644 index 9041e0e6b7581c3356795d6a3c5e84667c88f025..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/visualization/color.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from enum import Enum - -import numpy as np - -from annotator.uniformer.mmcv.utils import is_str - - -class Color(Enum): - """An enum that defines common colors. - - Contains red, green, blue, cyan, yellow, magenta, white and black. - """ - red = (0, 0, 255) - green = (0, 255, 0) - blue = (255, 0, 0) - cyan = (255, 255, 0) - yellow = (0, 255, 255) - magenta = (255, 0, 255) - white = (255, 255, 255) - black = (0, 0, 0) - - -def color_val(color): - """Convert various input to color tuples. - - Args: - color (:obj:`Color`/str/tuple/int/ndarray): Color inputs - - Returns: - tuple[int]: A tuple of 3 integers indicating BGR channels. - """ - if is_str(color): - return Color[color].value - elif isinstance(color, Color): - return color.value - elif isinstance(color, tuple): - assert len(color) == 3 - for channel in color: - assert 0 <= channel <= 255 - return color - elif isinstance(color, int): - assert 0 <= color <= 255 - return color, color, color - elif isinstance(color, np.ndarray): - assert color.ndim == 1 and color.size == 3 - assert np.all((color >= 0) & (color <= 255)) - color = color.astype(np.uint8) - return tuple(color) - else: - raise TypeError(f'Invalid type for color: {type(color)}') diff --git a/spaces/abidlabs/docquery/app.py b/spaces/abidlabs/docquery/app.py deleted file mode 100644 index 4289ceac77757d87913e92789cba4ca85035b1af..0000000000000000000000000000000000000000 --- a/spaces/abidlabs/docquery/app.py +++ /dev/null @@ -1,16 +0,0 @@ -import gradio as gr -from transformers import pipeline - -nlp = pipeline( - "document-question-answering", - model="impira/layoutlm-document-qa", -) - -def predict(img, text): - prediction = nlp( - img, - text - ) - return prediction[0]['answer'] - -gr.Interface(predict, [gr.Image(type="filepath"), gr.Textbox(label="Question")], gr.Textbox()).launch() \ No newline at end of file diff --git a/spaces/adityapathakk/crop-health/README.md b/spaces/adityapathakk/crop-health/README.md deleted file mode 100644 index e61d25795ed80ffd9616d60dcc4a9040fcea1096..0000000000000000000000000000000000000000 --- a/spaces/adityapathakk/crop-health/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Crop Health -emoji: 🏢 -colorFrom: purple -colorTo: pink -sdk: gradio -sdk_version: 2.8.12 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/akhaliq/Real-ESRGAN/realesrgan/__init__.py b/spaces/akhaliq/Real-ESRGAN/realesrgan/__init__.py deleted file mode 100644 index bfea78f284116dee22510d4aa91f9e44afb7d472..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Real-ESRGAN/realesrgan/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# flake8: noqa -from .archs import * -from .data import * -from .models import * -from .utils import * -#from .version import * diff --git a/spaces/akhaliq/SummerTime/model/third_party/HMNet/Models/Networks/MeetingNet_Transformer.py b/spaces/akhaliq/SummerTime/model/third_party/HMNet/Models/Networks/MeetingNet_Transformer.py deleted file mode 100644 index f4e3e33c18b65e84a7b360aa1c5267051a586916..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/SummerTime/model/third_party/HMNet/Models/Networks/MeetingNet_Transformer.py +++ /dev/null @@ -1,1528 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT license. - -import copy -import math -import numpy as np -import random -import time -import torch -from torch.autograd import Variable -from torch.distributions import Categorical -import torch.nn as nn -import torch.nn.init as init -import torch.nn.functional as F -from torch.nn.parameter import Parameter -from model.third_party.HMNet.Models.Networks.Layers import dropout, set_seq_dropout -from model.third_party.HMNet.Models.Networks.Transformer import ( - EncoderBlock, - LayerNorm, - Embedder, - Splitter, - Attention, - MLP, -) -from model.third_party.HMNet.ThirdParty.Huggingface.Transformers.src.transformers import ( - tokenization_transfo_xl, -) -from model.third_party.HMNet.ThirdParty.Huggingface.Transformers.src.transformers.modeling_encoder_decoder import ( - calc_banned_ngram_tokens, - calc_banned_bad_words_ids, - top_k_top_p_filtering, - BeamHypotheses, -) -import sys -import os - -# These two dicts are adapted from SpaCy 2.3.1, since HMNet's embedding for POS and ENT is fixed -POS = { - "": 0, - "$": 1, - "''": 2, - ",": 3, - "-LRB-": 4, - "-RRB-": 5, - ".": 6, - ":": 7, - "ADD": 8, - "AFX": 9, - "CC": 10, - "CD": 11, - "DT": 12, - "EX": 13, - "FW": 14, - "HYPH": 15, - "IN": 16, - "JJ": 17, - "JJR": 18, - "JJS": 19, - "LS": 20, - "MD": 21, - "NFP": 22, - "NN": 23, - "NNP": 24, - "NNPS": 25, - "NNS": 26, - "PDT": 27, - "POS": 28, - "PRP": 29, - "PRP$": 30, - "RB": 31, - "RBR": 32, - "RBS": 33, - "RP": 34, - "SYM": 35, - "TO": 36, - "UH": 37, - "VB": 38, - "VBD": 39, - "VBG": 40, - "VBN": 41, - "VBP": 42, - "VBZ": 43, - "WDT": 44, - "WP": 45, - "WP$": 46, - "WRB": 47, - "XX": 48, - "_SP": 49, - "``": 50, -} -ENT = { - "": 0, - "B-ORG": 1, - "B-DATE": 2, - "B-PERSON": 3, - "B-GPE": 4, - "B-MONEY": 5, - "B-CARDINAL": 6, - "B-NORP": 7, - "B-PERCENT": 8, - "B-WORK_OF_ART": 9, - "B-LOC": 10, - "B-TIME": 11, - "B-QUANTITY": 12, - "B-FAC": 13, - "B-EVENT": 14, - "B-ORDINAL": 15, - "B-PRODUCT": 16, - "B-LAW": 17, - "B-LANGUAGE": 18, - "I-ORG": 19, - "I-DATE": 20, - "I-PERSON": 21, - "I-GPE": 22, - "I-MONEY": 23, - "I-CARDINAL": 24, - "I-NORP": 25, - "I-PERCENT": 26, - "I-WORK_OF_ART": 27, - "I-LOC": 28, - "I-TIME": 29, - "I-QUANTITY": 30, - "I-FAC": 31, - "I-EVENT": 32, - "I-ORDINAL": 33, - "I-PRODUCT": 34, - "I-LAW": 35, - "I-LANGUAGE": 36, - "L-ORG": 37, - "L-DATE": 38, - "L-PERSON": 39, - "L-GPE": 40, - "L-MONEY": 41, - "L-CARDINAL": 42, - "L-NORP": 43, - "L-PERCENT": 44, - "L-WORK_OF_ART": 45, - "L-LOC": 46, - "L-TIME": 47, - "L-QUANTITY": 48, - "L-FAC": 49, - "L-EVENT": 50, - "L-ORDINAL": 51, - "L-PRODUCT": 52, - "L-LAW": 53, - "L-LANGUAGE": 54, - "U-ORG": 55, - "U-DATE": 56, - "U-PERSON": 57, - "U-GPE": 58, - "U-MONEY": 59, - "U-CARDINAL": 60, - "U-NORP": 61, - "U-PERCENT": 62, - "U-WORK_OF_ART": 63, - "U-LOC": 64, - "U-TIME": 65, - "U-QUANTITY": 66, - "U-FAC": 67, - "U-EVENT": 68, - "U-ORDINAL": 69, - "U-PRODUCT": 70, - "U-LAW": 71, - "U-LANGUAGE": 72, - "O": 73, -} - - -class MeetingNet_Transformer(nn.Module): - def __init__(self, opt): - super(MeetingNet_Transformer, self).__init__() - - self.opt = opt - self.use_cuda = self.opt["cuda"] == True - self.config = {} - - # load tokenizer - self.tokenizer_class = getattr(tokenization_transfo_xl, opt["PRE_TOKENIZER"]) - self.pretrained_tokenizer_path = os.path.join( - opt["datadir"], opt["PRE_TOKENIZER_PATH"] - ) - if not os.path.isdir(self.pretrained_tokenizer_path): - """ - This if-else statement makes sure the pre-trained tokenizer exists - If it does not exist, it assumes the input string is the HuggingFace tokenizer name, - and downloads it from their website. - """ - self.pretrained_tokenizer_path = opt["PRE_TOKENIZER_PATH"] - else: - print("Loading Tokenizer from {}...".format(self.pretrained_tokenizer_path)) - - # here is a simple workaround to make sure all special tokens are not None - self.tokenizer = self.tokenizer_class.from_pretrained( - self.pretrained_tokenizer_path - ) - special_tokens_tuple_list = [ - ("eos_token", 128), - ("unk_token", 129), - ("pad_token", 130), - ("bos_token", 131), - ] - - for special_token_name, special_token_id_offset in special_tokens_tuple_list: - if getattr(self.tokenizer, special_token_name) == None: - setattr( - self.tokenizer, - special_token_name, - self.tokenizer.convert_ids_to_tokens( - len(self.tokenizer) - special_token_id_offset - ), - ) - self.config[special_token_name] = self.tokenizer.convert_ids_to_tokens( - len(self.tokenizer) - special_token_id_offset - ) - self.config[special_token_name + "_id"] = ( - len(self.tokenizer) - special_token_id_offset - ) - - self.vocab_size = self.tokenizer.vocab_size - opt["vocab_size"] = self.vocab_size - self.role_size = int(opt["ROLE_SIZE"]) - vocab_dim = int(opt["VOCAB_DIM"]) - role_dim = int(opt["ROLE_DIM"]) - opt["transformer_embed_dim"] = vocab_dim - embed = nn.Embedding( - self.vocab_size, vocab_dim, padding_idx=self.tokenizer.pad_token_id - ) - nn.init.normal_(embed.weight, std=0.02) - embedder = Embedder(opt, embed) - role_embed = nn.Embedding(self.role_size, role_dim, padding_idx=0) - - self.encoder = Encoder( - opt, self.vocab_size, vocab_dim, role_dim, embedder, role_embed - ) - self.decoder = Decoder( - opt, - vocab_dim, - self.vocab_size, - embedder, - self.encoder.token_transformer_dim, - self.encoder.sent_transformer_dim, - ) - - if "PYLEARN_MODEL" in self.opt: - self.from_pretrained(os.path.join(opt["datadir"], opt["PYLEARN_MODEL"])) - - def save_pretrained(self, save_dir): - network_state = dict([(k, v) for k, v in self.state_dict().items()]) - params = { - "state_dict": {"network": network_state}, - "config": self.opt, - } - torch.save(params, os.path.join(save_dir, "model.pt")) - - def from_pretrained(self, load_dir): - checkpoint = torch.load( - os.path.join(load_dir, "model.pt"), - map_location=torch.device("cuda", self.opt["local_rank"]) - if self.use_cuda - else "cpu", - ) - state_dict = checkpoint["state_dict"] - - self.load_state_dict(state_dict["network"]) - - return self - - def get_training_parameters(self): - return [p for p in self.parameters() if p.requires_grad] - - def forward(self, batch, beam_search=False, max_sent_len=None): - if beam_search: - # return self.beam_search(batch, max_sent_len) - return self.generate(batch, max_sent_len) - - outputs = self._forward(**batch) - vocab_logprob = outputs[0] - - # assume all encoder-decoder model input has BOS and EOS - # otherwise the loss will be ill-defined - return vocab_logprob - - """ - Input: - encoders_input_ids = 1 * num_turns * x_len (word_ids) - encoders_input_roles = 1 * num_turns (role_ids) - encoders_input_pos = 1 * num_turns * x_len (pos_ids) - encoders_input_ent = 1 * num_turns * x_len (ent_ids) - decoder_input_ids = 1 * y_len (word_ids) - Output: - vocab_logprob = 1 x y_len x vocab_size - """ - - def _forward(self, **kwargs): - - encoder_input_ids = kwargs.pop("encoder_input_ids") - encoder_input_roles = kwargs.pop("encoder_input_roles") - encoder_input_pos = kwargs.pop("encoder_input_pos") - encoder_input_ent = kwargs.pop("encoder_input_ent") - decoder_input_ids = kwargs.pop("decoder_input_ids") - - token_encoder_outputs, sent_encoder_outputs = self.encoder( - encoder_input_ids, encoder_input_roles, encoder_input_pos, encoder_input_ent - ) - vocab_logprob = self.decoder( - token_encoder_outputs, sent_encoder_outputs, decoder_input_ids - ) - return vocab_logprob, (token_encoder_outputs, sent_encoder_outputs) - - def generate(self, batch, max_sent_len): - self.eval() - self.beam_width = int(self.opt["BEAM_WIDTH"]) - - input_ids = batch["encoder_input_ids"] - input_roles = batch["encoder_input_roles"] - input_pos = batch["encoder_input_pos"] - input_ent = batch["encoder_input_ent"] - - batch_size = input_ids.shape[0] - - num_return_sequences = self.opt.get("NUM_RETURN_SEQUENCES", 1) - outputs = self._generate( - input_ids=input_ids, - input_roles=input_roles, - input_pos=input_pos, - input_ent=input_ent, - min_length=self.opt.get("MIN_GEN_LENGTH", None), - max_length=max_sent_len, - num_beams=self.beam_width, - bad_words_ids=None, - bos_token_id=self.tokenizer.bos_token_id, - decoder_start_token_id=self.tokenizer.bos_token_id, - eos_token_id=self.tokenizer.eos_token_id, - pad_token_id=self.tokenizer.pad_token_id, - do_sample=self.opt.get("DO_SAMPLE", False), - top_k=self.opt.get("TOP_K", 50), - top_p=self.opt.get("TOP_P", 1), - repetition_penalty=self.opt.get("REPETITION_PENALTY", 1.0), - length_penalty=self.opt.get("LENGTH_PENALTY", 1.0), - no_repeat_ngram_size=self.opt.get("NO_REPEAT_NGRAM_SIZE", 3), - num_return_sequences=num_return_sequences, - ) - - sents = [] - outputs = outputs.view(outputs.shape[0], num_return_sequences, -1) - - for idx in range(batch_size): - # TODO: use real inference scores - candidates = [ - (self.tokenizer.convert_ids_to_tokens(outputs[idx, i, :]), 0.0) - for i in range(num_return_sequences) - ] - sents.append(candidates) - - return sents - - def prepare_inputs_for_generation(self, input_ids, past, attention_mask, **kwargs): - assert past is not None, "past has to be defined for encoder_outputs" - - # first step - if type(past) is tuple: - encoder_outputs = past - else: - encoder_outputs = (past,) - - return { - "decoder_input_ids": input_ids, - "token_encoder_outputs": encoder_outputs[0], - "sent_encoder_outputs": encoder_outputs[1], - } - - def prepare_scores_for_generation(self, scores, **kwargs): - return scores - - def enforce_repetition_penalty_( - self, lprobs, batch_size, num_beams, prev_output_tokens, repetition_penalty - ): - """repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858).""" - for i in range(batch_size * num_beams): - for previous_token in set(prev_output_tokens[i].tolist()): - # if score < 0 then repetition penalty has to multiplied to reduce the previous token probability - if lprobs[i, previous_token] < 0: - lprobs[i, previous_token] *= repetition_penalty - else: - lprobs[i, previous_token] /= repetition_penalty - - @torch.no_grad() - def _generate( - self, - input_ids=None, - input_roles=None, - input_pos=None, - input_ent=None, - max_length=None, - min_length=None, - do_sample=None, - early_stopping=False, - num_beams=None, - temperature=1.0, - top_k=None, - top_p=None, - repetition_penalty=None, - bad_words_ids=None, - bos_token_id=None, - pad_token_id=None, - eos_token_id=None, - length_penalty=None, - no_repeat_ngram_size=None, - num_return_sequences=None, - attention_mask=None, - decoder_start_token_id=None, - ): - r"""Generates sequences for models with a LM head. The method currently supports greedy decoding, beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling. - - Adapted in part from `Facebook's XLM beam search code`_. - - .. _`Facebook's XLM beam search code`: - https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529 - - - Parameters: - - input_ids: (`optional`) `torch.LongTensor` of shape `(batch_size, sequence_length)` - The sequence used as a prompt for the generation. If `None` the method initializes - it as an empty `torch.LongTensor` of shape `(1,)`. - - max_length: (`optional`) int - The max length of the sequence to be generated. Between `min_length` and infinity. Default to 20. - - min_length: (`optional`) int - The min length of the sequence to be generated. Between 0 and infinity. Default to 0. - - do_sample: (`optional`) bool - If set to `False` greedy decoding is used. Otherwise sampling is used. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`. - - early_stopping: (`optional`) bool - if set to `True` beam search is stopped when at least `num_beams` sentences finished per batch. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`. - - num_beams: (`optional`) int - Number of beams for beam search. Must be between 1 and infinity. 1 means no beam search. Default to 1. - - temperature: (`optional`) float - The value used to module the next token probabilities. Must be strictly positive. Default to 1.0. - - top_k: (`optional`) int - The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50. - - top_p: (`optional`) float - The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1. - - repetition_penalty: (`optional`) float - The parameter for repetition penalty. Between 1.0 and infinity. 1.0 means no penalty. Default to 1.0. - - pad_token_id: (`optional`) int - Padding token. Default to specicic model pad_token_id or None if it does not exist. - - bos_token_id: (`optional`) int - BOS token. Defaults to `bos_token_id` as defined in the models config. - - eos_token_id: (`optional`) int - EOS token. Defaults to `eos_token_id` as defined in the models config. - - length_penalty: (`optional`) float - Exponential penalty to the length. Default to 1. - - no_repeat_ngram_size: (`optional`) int - If set to int > 0, all ngrams of size `no_repeat_ngram_size` can only occur once. - bad_words_ids: (`optional`) list of lists of int - `bad_words_ids` contains tokens that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use `tokenizer.encode(bad_word, add_prefix_space=True)`. - - num_return_sequences: (`optional`) int - The number of independently computed returned sequences for each element in the batch. Default to 1. - - attention_mask (`optional`) obj: `torch.LongTensor` of same shape as `input_ids` - Mask to avoid performing attention on padding token indices. - Mask values selected in ``[0, 1]``: - ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. - Defaults to `None`. - - `What are attention masks? <../glossary.html#attention-mask>`__ - - decoder_start_token_id=None: (`optional`) int - If an encoder-decoder model starts decoding with a different token than BOS. - Defaults to `None` and is changed to `BOS` later. - - Return: - - output: `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)` - sequence_length is either equal to max_length or shorter if all batches finished early due to the `eos_token_id` - - Examples:: - - tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer - model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache. - outputs = model.generate(max_length=40) # do greedy decoding - print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True))) - - tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer - model = AutoModelWithLMHead.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache. - input_context = 'The dog' - input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context - outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog' - for i in range(3): # 3 output sequences were generated - print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True))) - - tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer - model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache. - input_context = 'The dog' - input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context - outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3) # 3 generate sequences using by sampling - for i in range(3): # 3 output sequences were generated - print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True))) - - tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer - model = AutoModelWithLMHead.from_pretrained('ctrl') # Download model and configuration from S3 and cache. - input_context = 'Legal My neighbor is' # "Legal" is one of the control codes for ctrl - input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context - outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences - print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True))) - - tokenizer = AutoTokenizer.from_pretrained('gpt2') # Initialize tokenizer - model = AutoModelWithLMHead.from_pretrained('gpt2') # Download model and configuration from S3 and cache. - input_context = 'My cute dog' # "Legal" is one of the control codes for ctrl - bad_words_ids = [tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ['idiot', 'stupid', 'shut up']] - input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context - outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) # generate sequences without allowing bad_words to be generated - """ - - max_length = max_length if max_length is not None else self.config.max_length - min_length = min_length if min_length is not None else self.config.min_length - do_sample = do_sample if do_sample is not None else self.config.do_sample - early_stopping = ( - early_stopping if early_stopping is not None else self.config.early_stopping - ) - num_beams = num_beams if num_beams is not None else self.config.num_beams - temperature = ( - temperature if temperature is not None else self.config.temperature - ) - top_k = top_k if top_k is not None else self.config.top_k - top_p = top_p if top_p is not None else self.config.top_p - repetition_penalty = ( - repetition_penalty - if repetition_penalty is not None - else self.config.repetition_penalty - ) - bos_token_id = ( - bos_token_id if bos_token_id is not None else self.config.bos_token_id - ) - pad_token_id = ( - pad_token_id if pad_token_id is not None else self.config.pad_token_id - ) - eos_token_id = ( - eos_token_id if eos_token_id is not None else self.config.eos_token_id - ) - length_penalty = ( - length_penalty if length_penalty is not None else self.config.length_penalty - ) - no_repeat_ngram_size = ( - no_repeat_ngram_size - if no_repeat_ngram_size is not None - else self.config.no_repeat_ngram_size - ) - bad_words_ids = bad_words_ids - num_return_sequences = ( - num_return_sequences - if num_return_sequences is not None - else self.config.num_return_sequences - ) - decoder_start_token_id = ( - decoder_start_token_id - if decoder_start_token_id is not None - else self.config.decoder_start_token_id - ) - - if input_ids is not None: - batch_size = input_ids.shape[0] # overriden by the input batch_size - else: - batch_size = 1 - - assert ( - isinstance(max_length, int) and max_length > 0 - ), "`max_length` should be a strictly positive integer." - assert ( - isinstance(min_length, int) and min_length >= 0 - ), "`min_length` should be a positive integer." - assert isinstance(do_sample, bool), "`do_sample` should be a boolean." - assert isinstance(early_stopping, bool), "`early_stopping` should be a boolean." - assert ( - isinstance(num_beams, int) and num_beams > 0 - ), "`num_beams` should be a strictly positive integer." - assert temperature > 0, "`temperature` should be strictly positive." - assert ( - isinstance(top_k, int) and top_k >= 0 - ), "`top_k` should be a positive integer." - assert 0 <= top_p <= 1, "`top_p` should be between 0 and 1." - assert repetition_penalty >= 1.0, "`repetition_penalty` should be >= 1." - assert input_ids is not None or ( - isinstance(bos_token_id, int) and bos_token_id >= 0 - ), "If input_ids is not defined, `bos_token_id` should be a positive integer." - assert pad_token_id is None or ( - isinstance(pad_token_id, int) and (pad_token_id >= 0) - ), "`pad_token_id` should be a positive integer." - assert (eos_token_id is None) or ( - isinstance(eos_token_id, int) and (eos_token_id >= 0) - ), "`eos_token_id` should be a positive integer." - assert length_penalty > 0, "`length_penalty` should be strictly positive." - assert ( - isinstance(no_repeat_ngram_size, int) and no_repeat_ngram_size >= 0 - ), "`no_repeat_ngram_size` should be a positive integer." - assert ( - isinstance(num_return_sequences, int) and num_return_sequences > 0 - ), "`num_return_sequences` should be a strictly positive integer." - assert ( - bad_words_ids is None - or isinstance(bad_words_ids, list) - and isinstance(bad_words_ids[0], list) - ), "`bad_words_ids` is either `None` or a list of lists of tokens that should not be generated" - - if input_ids is None: - assert isinstance(bos_token_id, int) and bos_token_id >= 0, ( - "you should either supply a context to complete as `input_ids` input " - "or a `bos_token_id` (integer >= 0) as a first token to start the generation." - ) - input_ids = torch.full( - (batch_size, 1), - bos_token_id, - dtype=torch.long, - device=next(self.parameters()).device, - ) - else: - assert ( - input_ids.dim() == 3 - ), "Input prompt should be of shape (batch_size, sequence length)." - - # not allow to duplicate outputs when greedy decoding - if do_sample is False: - if num_beams == 1: - # no_beam_search greedy generation conditions - assert ( - num_return_sequences == 1 - ), "Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1" - - else: - # beam_search greedy generation conditions - assert ( - num_beams >= num_return_sequences - ), "Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences" - - # create attention mask if necessary - # TODO (PVP): this should later be handled by the forward fn() in each model in the future see PR 3140 - if ( - (attention_mask is None) - and (pad_token_id is not None) - and (pad_token_id in input_ids) - ): - attention_mask = input_ids.ne(pad_token_id).long() - elif attention_mask is None: - attention_mask = input_ids.new_ones(input_ids.shape) - - # set pad_token_id to eos_token_id if not set. Important that this is done after - # attention_mask is created - if pad_token_id is None and eos_token_id is not None: - logger.warning( - "Setting `pad_token_id` to {} (first `eos_token_id`) to generate sequence".format( - eos_token_id - ) - ) - pad_token_id = eos_token_id - - # current position and vocab size - vocab_size = self.vocab_size - - # set effective batch size and effective batch multiplier according to do_sample - if do_sample: - effective_batch_size = batch_size * num_return_sequences - effective_batch_mult = num_return_sequences - else: - effective_batch_size = batch_size - effective_batch_mult = 1 - - if decoder_start_token_id is None: - decoder_start_token_id = bos_token_id - - assert ( - decoder_start_token_id is not None - ), "decoder_start_token_id or bos_token_id has to be defined for encoder-decoder generation" - - encoder_outputs = self.encoder(input_ids, input_roles, input_pos, input_ent) - - # # Expand input ids if num_beams > 1 or num_return_sequences > 1 - # if num_return_sequences > 1 or num_beams > 1: - # input_sent_len = input_ids.shape[2] - # input_word_len = input_ids.shape[3] - # input_ids = input_ids.unsqueeze(1).expand(batch_size, effective_batch_mult * num_beams, input_sent_len, input_word_len) - # attention_mask = attention_mask.unsqueeze(1).expand( - # batch_size, effective_batch_mult * num_beams, input_sent_len, input_word_len - # ) - - # input_ids = input_ids.contiguous().view( - # effective_batch_size * num_beams, input_sent_len, input_word_len - # ) # shape: (batch_size * num_return_sequences * num_beams, input_sent_len, input_word_len) - # attention_mask = attention_mask.contiguous().view( - # effective_batch_size * num_beams, input_sent_len, input_word_len - # ) # shape: (batch_size * num_return_sequences * num_beams, input_sent_len, input_word_len) - - # create empty decoder_input_ids - input_ids = torch.full( - (effective_batch_size * num_beams, 1), - decoder_start_token_id, - dtype=torch.long, - device=next(self.parameters()).device, - ) - cur_len = 1 - - assert ( - batch_size == encoder_outputs[0].shape[0] - ), f"expected encoder_outputs[0] to have 1st dimension bs={batch_size}, got {encoder_outputs[0].shape[0]} " - - # expand batch_idx to assign correct encoder output for expanded input_ids (due to num_beams > 1 and num_return_sequences > 1) - expanded_batch_idxs = ( - torch.arange(batch_size) - .view(-1, 1) - .repeat(1, num_beams * effective_batch_mult) - .view(-1) - .to(input_ids.device) - ) - # expand encoder_outputs - encoder_outputs = ( - encoder_outputs[0].index_select(0, expanded_batch_idxs), - encoder_outputs[1].index_select(0, expanded_batch_idxs), - ) - - if num_beams > 1: - output = self._generate_beam_search( - input_ids, - cur_len=cur_len, - max_length=max_length, - min_length=min_length, - do_sample=do_sample, - early_stopping=early_stopping, - temperature=temperature, - top_k=top_k, - top_p=top_p, - repetition_penalty=repetition_penalty, - no_repeat_ngram_size=no_repeat_ngram_size, - bad_words_ids=bad_words_ids, - bos_token_id=bos_token_id, - pad_token_id=pad_token_id, - decoder_start_token_id=decoder_start_token_id, - eos_token_id=eos_token_id, - batch_size=effective_batch_size, - num_return_sequences=num_return_sequences, - length_penalty=length_penalty, - num_beams=num_beams, - vocab_size=vocab_size, - encoder_outputs=encoder_outputs, - attention_mask=attention_mask, - ) - else: - output = self._generate_no_beam_search( - input_ids, - cur_len=cur_len, - max_length=max_length, - min_length=min_length, - do_sample=do_sample, - temperature=temperature, - top_k=top_k, - top_p=top_p, - repetition_penalty=repetition_penalty, - no_repeat_ngram_size=no_repeat_ngram_size, - bad_words_ids=bad_words_ids, - bos_token_id=bos_token_id, - pad_token_id=pad_token_id, - decoder_start_token_id=decoder_start_token_id, - eos_token_id=eos_token_id, - batch_size=effective_batch_size, - encoder_outputs=encoder_outputs, - attention_mask=attention_mask, - ) - - return output - - def _generate_no_beam_search( - self, - input_ids, - cur_len, - max_length, - min_length, - do_sample, - temperature, - top_k, - top_p, - repetition_penalty, - no_repeat_ngram_size, - bad_words_ids, - bos_token_id, - pad_token_id, - eos_token_id, - decoder_start_token_id, - batch_size, - encoder_outputs, - attention_mask, - ): - """Generate sequences for each example without beam search (num_beams == 1). - All returned sequence are generated independantly. - """ - # length of generated sentences / unfinished sentences - unfinished_sents = input_ids.new(batch_size).fill_(1) - sent_lengths = input_ids.new(batch_size).fill_(max_length) - - past = encoder_outputs # defined for encoder-decoder models, None for decoder-only models - - while cur_len < max_length: - model_inputs = self.prepare_inputs_for_generation( - input_ids, past=past, attention_mask=attention_mask - ) - - outputs = self.decoder(**model_inputs) - next_token_logits = outputs[:, -1, :] - - # repetition penalty from CTRL paper (https://arxiv.org/abs/1909.05858) - if repetition_penalty != 1.0: - self.enforce_repetition_penalty_( - next_token_logits, batch_size, 1, input_ids, repetition_penalty - ) - - if no_repeat_ngram_size > 0: - # calculate a list of banned tokens to prevent repetitively generating the same ngrams - # from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345 - banned_tokens = calc_banned_ngram_tokens( - input_ids, batch_size, no_repeat_ngram_size, cur_len - ) - for batch_idx in range(batch_size): - next_token_logits[batch_idx, banned_tokens[batch_idx]] = -float( - "inf" - ) - - if bad_words_ids is not None: - # calculate a list of banned tokens according to bad words - banned_tokens = calc_banned_bad_words_ids(input_ids, bad_words_ids) - - for batch_idx in range(batch_size): - next_token_logits[batch_idx, banned_tokens[batch_idx]] = -float( - "inf" - ) - - # set eos token prob to zero if min_length is not reached - if eos_token_id is not None and cur_len < min_length: - next_token_logits[:, eos_token_id] = -float("inf") - - if do_sample: - # Temperature (higher temperature => more likely to sample low probability tokens) - if temperature != 1.0: - next_token_logits = next_token_logits / temperature - # Top-p/top-k filtering - next_token_logits = top_k_top_p_filtering( - next_token_logits, top_k=top_k, top_p=top_p - ) - # Sample - probs = F.softmax(next_token_logits, dim=-1) - next_token = torch.multinomial(probs, num_samples=1).squeeze(1) - else: - # Greedy decoding - next_token = torch.argmax(next_token_logits, dim=-1) - - # update generations and finished sentences - if eos_token_id is not None: - # pad finished sentences if eos_token_id exist - tokens_to_add = next_token * unfinished_sents + (pad_token_id) * ( - 1 - unfinished_sents - ) - else: - tokens_to_add = next_token - - input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1) - - if eos_token_id is not None: - eos_in_sents = tokens_to_add == eos_token_id - # if sentence is unfinished and the token to add is eos, sent_lengths is filled with current length - is_sents_unfinished_and_token_to_add_is_eos = unfinished_sents.mul( - eos_in_sents.long() - ).bool() - sent_lengths.masked_fill_( - is_sents_unfinished_and_token_to_add_is_eos, cur_len + 1 - ) - # unfinished_sents is set to zero if eos in sentence - unfinished_sents.mul_((~eos_in_sents).long()) - - # stop when there is a in each sentence, or if we exceed the maximul length - if unfinished_sents.max() == 0: - break - - cur_len = cur_len + 1 - - # if there are different sentences lengths in the batch, some batches have to be padded - if sent_lengths.min().item() != sent_lengths.max().item(): - assert ( - pad_token_id is not None - ), "`Pad_token_id` has to be defined if batches have different lengths" - # finished sents are filled with pad_token - decoded = input_ids.new(batch_size, sent_lengths.max().item()).fill_( - pad_token_id - ) - else: - decoded = input_ids - - for hypo_idx, hypo in enumerate(input_ids): - decoded[hypo_idx, : sent_lengths[hypo_idx]] = hypo[: sent_lengths[hypo_idx]] - - return decoded - - def _generate_beam_search( - self, - input_ids, - cur_len, - max_length, - min_length, - do_sample, - early_stopping, - temperature, - top_k, - top_p, - repetition_penalty, - no_repeat_ngram_size, - bad_words_ids, - bos_token_id, - pad_token_id, - eos_token_id, - decoder_start_token_id, - batch_size, - num_return_sequences, - length_penalty, - num_beams, - vocab_size, - encoder_outputs, - attention_mask, - ): - """Generate sequences for each example with beam search.""" - - # generated hypotheses - generated_hyps = [ - BeamHypotheses( - num_beams, max_length, length_penalty, early_stopping=early_stopping - ) - for _ in range(batch_size) - ] - - # scores for each sentence in the beam - beam_scores = torch.zeros( - (batch_size, num_beams), dtype=torch.float, device=input_ids.device - ) - - # for greedy decoding it is made sure that only tokens of the first beam are considered to avoid sampling the exact same tokens three times - if do_sample is False: - beam_scores[:, 1:] = -1e9 - beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,) - - # cache compute states - past = encoder_outputs # defined for encoder-decoder models, None for decoder-only models - - # done sentences - done = [False for _ in range(batch_size)] - - while cur_len < max_length: - model_inputs = self.prepare_inputs_for_generation( - input_ids, past=past, attention_mask=attention_mask - ) - outputs = self.decoder( - **model_inputs - ) # (batch_size * num_beams, cur_len, vocab_size) - next_token_logits = outputs[ - :, -1, : - ] # (batch_size * num_beams, vocab_size) - - # repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858) - if repetition_penalty != 1.0: - self.enforce_repetition_penalty_( - next_token_logits, - batch_size, - num_beams, - input_ids, - repetition_penalty, - ) - - if temperature != 1.0: - next_token_logits = next_token_logits / temperature - - scores = F.log_softmax( - next_token_logits, dim=-1 - ) # (batch_size * num_beams, vocab_size) - if do_sample is False: - # TODO (PVP) still a bit hacky here - there might be a better solution - scores = self.prepare_scores_for_generation( - scores, cur_len=cur_len, max_length=max_length - ) - - # set eos token prob to zero if min_length is not reached - if eos_token_id is not None and cur_len < min_length: - scores[:, eos_token_id] = -float("inf") - - if no_repeat_ngram_size > 0: - # calculate a list of banned tokens to prevent repetitively generating the same ngrams - num_batch_hypotheses = batch_size * num_beams - # from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345 - banned_batch_tokens = calc_banned_ngram_tokens( - input_ids, num_batch_hypotheses, no_repeat_ngram_size, cur_len - ) - for i, banned_tokens in enumerate(banned_batch_tokens): - scores[i, banned_tokens] = -float("inf") - - if bad_words_ids is not None: - # calculate a list of banned tokens according to bad words - banned_tokens = calc_banned_bad_words_ids(input_ids, bad_words_ids) - - for i, banned_tokens in enumerate(banned_tokens): - scores[i, banned_tokens] = -float("inf") - - assert scores.shape == ( - batch_size * num_beams, - vocab_size, - ), "Shapes of scores: {} != {}".format( - scores.shape, (batch_size * num_beams, vocab_size) - ) - - if do_sample: - _scores = scores + beam_scores[:, None].expand_as( - scores - ) # (batch_size * num_beams, vocab_size) - # Top-p/top-k filtering - _scores = top_k_top_p_filtering( - _scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2 - ) # (batch_size * num_beams, vocab_size) - # re-organize to group the beam together to sample from all beam_idxs - _scores = _scores.contiguous().view( - batch_size, num_beams * vocab_size - ) # (batch_size, num_beams * vocab_size) - - # Sample 2 next tokens for each beam (so we have some spare tokens and match output of greedy beam search) - probs = F.softmax(_scores, dim=-1) - next_tokens = torch.multinomial( - probs, num_samples=2 * num_beams - ) # (batch_size, num_beams * 2) - # Compute next scores - next_scores = torch.gather( - _scores, -1, next_tokens - ) # (batch_size, num_beams * 2) - # sort the sampled vector to make sure that the first num_beams samples are the best - next_scores, next_scores_indices = torch.sort( - next_scores, descending=True, dim=1 - ) - next_tokens = torch.gather( - next_tokens, -1, next_scores_indices - ) # (batch_size, num_beams * 2) - - else: - next_scores = scores + beam_scores[:, None].expand_as( - scores - ) # (batch_size * num_beams, vocab_size) - - # re-organize to group the beam together (we are keeping top hypothesis accross beams) - next_scores = next_scores.view( - batch_size, num_beams * vocab_size - ) # (batch_size, num_beams * vocab_size) - - next_scores, next_tokens = torch.topk( - next_scores, 2 * num_beams, dim=1, largest=True, sorted=True - ) - - assert ( - next_scores.size() == next_tokens.size() == (batch_size, 2 * num_beams) - ) - - # next batch beam content - next_batch_beam = [] - - # for each sentence - for batch_idx in range(batch_size): - - # if we are done with this sentence - if done[batch_idx]: - assert ( - len(generated_hyps[batch_idx]) >= num_beams - ), "Batch can only be done if at least {} beams have been generated".format( - num_beams - ) - assert ( - eos_token_id is not None and pad_token_id is not None - ), "generated beams >= num_beams -> eos_token_id and pad_token have to be defined" - next_batch_beam.extend( - [(0, pad_token_id, 0)] * num_beams - ) # pad the batch - continue - - # next sentence beam content - next_sent_beam = [] - - # next tokens for this sentence - for beam_token_rank, (beam_token_id, beam_token_score) in enumerate( - zip(next_tokens[batch_idx], next_scores[batch_idx]) - ): - # get beam and token IDs - beam_id = beam_token_id // vocab_size - token_id = beam_token_id % vocab_size - - effective_beam_id = batch_idx * num_beams + beam_id - # add to generated hypotheses if end of sentence or last iteration - if (eos_token_id is not None) and (token_id.item() == eos_token_id): - # if beam_token does not belong to top num_beams tokens, it should not be added - is_beam_token_worse_than_top_num_beams = ( - beam_token_rank >= num_beams - ) - if is_beam_token_worse_than_top_num_beams: - continue - generated_hyps[batch_idx].add( - input_ids[effective_beam_id].clone(), - beam_token_score.item(), - ) - else: - # add next predicted token if it is not eos_token - next_sent_beam.append( - (beam_token_score, token_id, effective_beam_id) - ) - - # the beam for next step is full - if len(next_sent_beam) == num_beams: - break - - # Check if were done so that we can save a pad step if all(done) - done[batch_idx] = done[batch_idx] or generated_hyps[batch_idx].is_done( - next_scores[batch_idx].max().item(), cur_len=cur_len - ) - - # update next beam content - assert len(next_sent_beam) == num_beams, "Beam should always be full" - next_batch_beam.extend(next_sent_beam) - assert len(next_batch_beam) == num_beams * (batch_idx + 1) - - # stop when we are done with each sentence - if all(done): - break - - # sanity check / prepare next batch - assert len(next_batch_beam) == batch_size * num_beams - beam_scores = beam_scores.new([x[0] for x in next_batch_beam]) - beam_tokens = input_ids.new([x[1] for x in next_batch_beam]) - beam_idx = input_ids.new([x[2] for x in next_batch_beam]) - - # re-order batch - input_ids = input_ids[beam_idx, :] - input_ids = torch.cat([input_ids, beam_tokens.unsqueeze(1)], dim=-1) - # re-order internal states - if past is not None: - past = self._reorder_cache(past, beam_idx) - - # update current length - cur_len = cur_len + 1 - - # finalize all open beam hypotheses and end to generated hypotheses - for batch_idx in range(batch_size): - if done[batch_idx]: - continue - - # test that beam scores match previously calculated scores if not eos and batch_idx not done - if eos_token_id is not None and all( - (token_id % vocab_size).item() is not eos_token_id - for token_id in next_tokens[batch_idx] - ): - assert torch.all( - next_scores[batch_idx, :num_beams] - == beam_scores.view(batch_size, num_beams)[batch_idx] - ), "If batch_idx is not done, final next scores: {} have to equal to accumulated beam_scores: {}".format( - next_scores[:, :num_beams][batch_idx], - beam_scores.view(batch_size, num_beams)[batch_idx], - ) - - # need to add best num_beams hypotheses to generated hyps - for beam_id in range(num_beams): - effective_beam_id = batch_idx * num_beams + beam_id - final_score = beam_scores[effective_beam_id].item() - final_tokens = input_ids[effective_beam_id] - generated_hyps[batch_idx].add(final_tokens, final_score) - - # depending on whether greedy generation is wanted or not define different output_batch_size and output_num_return_sequences_per_batch - output_batch_size = ( - batch_size if do_sample else batch_size * num_return_sequences - ) - output_num_return_sequences_per_batch = 1 if do_sample else num_return_sequences - - # select the best hypotheses - sent_lengths = input_ids.new(output_batch_size) - best = [] - - # retrieve best hypotheses - for i, hypotheses in enumerate(generated_hyps): - sorted_hyps = sorted(hypotheses.beams, key=lambda x: x[0]) - for j in range(output_num_return_sequences_per_batch): - effective_batch_idx = output_num_return_sequences_per_batch * i + j - best_hyp = sorted_hyps.pop()[1] - sent_lengths[effective_batch_idx] = len(best_hyp) - best.append(best_hyp) - - # shorter batches are filled with pad_token - if sent_lengths.min().item() != sent_lengths.max().item(): - assert pad_token_id is not None, "`Pad_token_id` has to be defined" - sent_max_len = min(sent_lengths.max().item() + 1, max_length) - decoded = input_ids.new(output_batch_size, sent_max_len).fill_(pad_token_id) - - # fill with hypothesis and eos_token_id if necessary - for i, hypo in enumerate(best): - decoded[i, : sent_lengths[i]] = hypo - if sent_lengths[i] < max_length: - decoded[i, sent_lengths[i]] = eos_token_id - else: - # none of the hypotheses have an eos_token - assert (len(hypo) == max_length for hypo in best) - decoded = ( - torch.stack(best).type(torch.long).to(next(self.parameters()).device) - ) - - return decoded - - # force one of token_ids to be generated by setting prob of all other tokens to 0. - def _force_token_ids_generation(self, scores, token_ids): - if isinstance(token_ids, int): - token_ids = [token_ids] - all_but_token_ids_mask = torch.tensor( - [x for x in range(self.vocab_size) if x not in token_ids], - dtype=torch.long, - device=next(self.parameters()).device, - ) - assert ( - len(scores.shape) == 2 - ), "scores should be of rank 2 with shape: [batch_size, vocab_size]" - scores[:, all_but_token_ids_mask] = -float("inf") - - @staticmethod - def _reorder_cache(past, beam_idx): - reordered_past = [] - for layer_past in past: - # get the correct batch idx from layer past batch dim - # batch dim of `past` and `mems` is at 2nd position - reordered_layer_past = [ - layer_past[i, :].unsqueeze(0).clone().detach() for i in beam_idx - ] - reordered_layer_past = torch.cat(reordered_layer_past, dim=0) - # check that shape matches - assert reordered_layer_past.shape == layer_past.shape - reordered_past.append(reordered_layer_past) - past = tuple(reordered_past) - return past - - -""" - Transformer encoder -""" - - -class MeetingTransformerEncoder(nn.Module): - """ - Input: - transformer_embed_dim: transformer dimension - """ - - def __init__(self, opt, transformer_embed_dim): - super(MeetingTransformerEncoder, self).__init__() - vocab = int(opt["vocab_size"]) - n_layer = int(opt["TRANSFORMER_LAYER"]) - opt["transformer_embed_dim"] = transformer_embed_dim - block = EncoderBlock(opt) - self.blocks = nn.ModuleList([copy.deepcopy(block) for _ in range(n_layer)]) - - """ - Input: - x: batch x len x n_state - Output: - h: batch x len x n_state - """ - - def forward(self, x): - h = x - for block in self.blocks: - h = block(h, None) - return h - - -""" - One encoder block of transformer -""" - - -class MeetingDecoderBlock(nn.Module): - def __init__(self, opt, n_state): - super(MeetingDecoderBlock, self).__init__() - self.opt = opt - self.decoder_splitter = Splitter(n_state) - self.attn = Attention(n_state, opt) - self.token_attn = Attention(n_state, opt) - self.sent_attn = Attention(n_state, opt) - self.ln_1 = LayerNorm(n_state) - self.ln_2 = LayerNorm(n_state) - opt["transformer_embed_dim"] = n_state - self.mlp = MLP(4 * n_state, opt) - self.ln_3 = LayerNorm(n_state) - self.ln_4 = LayerNorm(n_state) - - """ - Input: - y: batch x len x n_state (decoder part) - token_enc_key: batch x encoder_len x n_state - token_enc_value: batch x encoder_len x n_state - sent_enc_key: batch x encoder_len x n_state - sent_enc_value: batch x encoder_len x n_state - Output: - h: batch x len x n_state - """ - - def forward(self, y, token_enc_key, token_enc_value, sent_enc_key, sent_enc_value): - query, key, value = self.decoder_splitter(y) - # batch x len x n_state - - # self-attention - a = self.attn(query, key, value, None, one_dir_visible=True) - # batch x len x n_state - - n = self.ln_1(y + a) # residual - - if "NO_HIERARCHY" in self.opt: - q = y - r = n - else: - # src-tgt attention on sentences - q = self.sent_attn(n, sent_enc_key, sent_enc_value, None) - r = self.ln_3(n + q) # residual - # batch x len x n_state - - # src-tgt attention on tokens - o = self.token_attn(r, token_enc_key, token_enc_value, None) - p = self.ln_2(r + o) # residual - # batch x len x n_state - - m = self.mlp(p) - h = self.ln_4(p + m) - return h - - -""" - Transformer decoder -""" - - -class MeetingTransformerDecoder(nn.Module): - """ - Input: - embed_size: decoder transformer dimension - token_dim: dimension of transformer from token encoder side - sent_dim: dimension of transformer from sent encoder side - """ - - def __init__(self, opt, embedder, embed_size, token_dim, sent_dim): - super(MeetingTransformerDecoder, self).__init__() - self.fp16 = "FP16" in opt - vocab_size = int(opt["vocab_size"]) - n_layer = int(opt["TRANSFORMER_LAYER"]) - self.encoder_splitter = Splitter(embed_size) - block = MeetingDecoderBlock(opt, embed_size) - self.token_linear = nn.Linear(token_dim, embed_size) - self.sent_linear = nn.Linear(sent_dim, embed_size) - self.blocks = nn.ModuleList([copy.deepcopy(block) for _ in range(n_layer)]) - self.linear = nn.Linear(embed_size, vocab_size, bias=False) - self.linear.weight = embedder.embed.weight # share weight - - """ - Input: - token_encoder_outputs: 1 x (encoder_len - sent_num) x token_transformer_dim - sent_encoder_outputs: 1 x sent_num x sent_transformer_dim - y: batch x len x n_state - Output: - prob: batch x len x vocab_size (probabilities after softmax) - """ - - def forward(self, token_encoder_inputs, sent_encoder_inputs, decoder_input_ids): - _, token_enc_key, token_enc_value = self.encoder_splitter( - self.token_linear(token_encoder_inputs) - ) - # token_enc_key: batch x encoder_len x n_state - # token_enc_value: batch x encoder_len x n_state - - _, sent_enc_key, sent_enc_value = self.encoder_splitter( - self.sent_linear(sent_encoder_inputs) - ) - # sent_enc_key: batch x encoder_len x n_state - # sent_enc_value: batch x encoder_len x n_state - - h = decoder_input_ids - for block in self.blocks: - h = block(h, token_enc_key, token_enc_value, sent_enc_key, sent_enc_value) - prob = F.softmax(self.linear(h), dim=-1) - return prob - - -class Encoder(nn.Module): - """ - vocab_size: size of input vocabulary - embed_size: word embedding dimension of dictionary - role_dim: role embedding dimension - embed: the nn.Embedding for vocab - role_embed: the nn.Embedding for role - """ - - def __init__(self, opt, vocab_size, embed_size, role_dim, embedder, role_embed): - super(Encoder, self).__init__() - self.opt = opt - self.vocab_size = vocab_size - - set_seq_dropout("VARIATIONAL_DROPOUT" in self.opt) - - self.embed_size = embed_size - self.embedder = embedder - self.role_embed = role_embed - - self.token_transformer_dim = embed_size - if "USE_POSENT" in opt: - print("Use POS and ENT") - pos_dim = opt["POS_DIM"] - ent_dim = opt["ENT_DIM"] - self.pos_embed = nn.Embedding(len(POS), pos_dim) - self.ent_embed = nn.Embedding(len(ENT), ent_dim) - self.token_transformer_dim += pos_dim + ent_dim - - self.sent_transformer_dim = self.token_transformer_dim - if "USE_ROLE" in opt: - print("USE_ROLE") - role_dim = opt["ROLE_DIM"] - self.sent_transformer_dim += role_dim - - self.token_encoder = MeetingTransformerEncoder(opt, self.token_transformer_dim) - self.sent_encoder = MeetingTransformerEncoder(opt, self.sent_transformer_dim) - - """ - x = bz * sent_num * x_len (word_ids) - x_role = bz * sent_num (role_ids) - x_pos = bz * sent_num * x_len (pos_ids) - x_ent = bz * sent_num * x_len (ent_ids) - outputs: - token_encoder_outputs: bz x x_len_total x token_transformer_dim - sent_encoder_outputs: bz x sent_num x sent_transformer_dim - """ - - def forward(self, x, x_role, x_pos, x_ent): - batch_size = x.size(0) - sent_num = x.size(1) - x_len = x.size(2) - - # x contains word id >= vocab_size - vocab_x = x.clone() - vocab_x[vocab_x >= self.vocab_size] = 1 # UNK - embedded = self.embedder(vocab_x.view(batch_size, -1)) - # embedded = 1 x sent_num * x_len x embed_size - embedded = embedded.view(batch_size, sent_num, x_len, -1) - # embedded = 1 x sent_num x x_len x embed_size - - if "USE_ROLE" in self.opt: - role_embed = self.role_embed(x_role) # 1 x sent_num x role_dim - - if "USE_POSENT" in self.opt: - embedded = torch.cat( - [embedded, self.pos_embed(x_pos), self.ent_embed(x_ent)], dim=3 - ) - # 1 x sent_num x x_len x (embed_size + pos_dim + ent_dim ) - - feat_dim = embedded.size(3) - - token_transformer_output = self.token_encoder( - embedded.view(-1, x_len, feat_dim) - ) - token_transformer_dim = token_transformer_output.size(2) - token_transformer_output = token_transformer_output.view( - batch_size, sent_num, x_len, token_transformer_dim - ) - # 1 x sent_num x x_len x token_transformer_dim - - sent_encoder_inputs = token_transformer_output[ - :, :, 0, : - ] # 1 x sent_num x token_transformer_dim - if "USE_ROLE" in self.opt: - sent_encoder_inputs = torch.cat([sent_encoder_inputs, role_embed], dim=2) - sent_encoder_outputs = self.sent_encoder( - sent_encoder_inputs - ) # 1 x sent_num x sent_transformer_dim - - token_transformer_output = token_transformer_output.view( - batch_size, -1, token_transformer_dim - ) - - return token_transformer_output, sent_encoder_outputs - - -class Decoder(nn.Module): - def __init__( - self, - opt, - embed_size, - vocab_size, - embedder, - token_transformer_dim, - sent_transformer_dim, - ): - super(Decoder, self).__init__() - self.opt = opt - self.embed_size = embed_size - self.vocab_size = vocab_size - self.embedder = embedder - self.sent_decoder = MeetingTransformerDecoder( - opt, embedder, embed_size, token_transformer_dim, sent_transformer_dim - ) - - def forward(self, token_encoder_outputs, sent_encoder_outputs, decoder_input_ids): - vocab_y = decoder_input_ids.clone() - vocab_y[vocab_y >= self.vocab_size] = 1 # UNK - embedded = self.embedder(vocab_y) - - vocab_prob = self.sent_decoder( - token_encoder_outputs, sent_encoder_outputs, embedded - ) - # vocab_prob: batch x y_len x vocab_size - - vocab_logprob = torch.log(vocab_prob + 1e-15) - return vocab_logprob diff --git a/spaces/akhaliq/VQMIVC/train.py b/spaces/akhaliq/VQMIVC/train.py deleted file mode 100644 index 5fab7eca4ccd2ea6e45352ee049d24cd68a50679..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/VQMIVC/train.py +++ /dev/null @@ -1,411 +0,0 @@ -import hydra -from hydra import utils -from itertools import chain -from pathlib import Path -import numpy as np - -import torch -import torch.optim as optim -from torch.utils.data import DataLoader - - -from dataset import CPCDataset_sameSeq as CPCDataset -from scheduler import WarmupScheduler -from model_encoder import Encoder, CPCLoss_sameSeq, Encoder_lf0 -from model_decoder import Decoder_ac -from model_encoder import SpeakerEncoder as Encoder_spk -from mi_estimators import CLUBSample_group, CLUBSample_reshape - -import apex.amp as amp -import os -import time - -torch.manual_seed(137) -np.random.seed(137) - -def save_checkpoint(encoder, encoder_lf0, cpc, encoder_spk, \ - cs_mi_net, ps_mi_net, cp_mi_net, decoder, \ - optimizer, optimizer_cs_mi_net, optimizer_ps_mi_net, optimizer_cp_mi_net, scheduler, amp, epoch, checkpoint_dir, cfg): - if cfg.use_amp: - amp_state_dict = amp.state_dict() - else: - amp_state_dict = None - checkpoint_state = { - "encoder": encoder.state_dict(), - "encoder_lf0": encoder_lf0.state_dict(), - "cpc": cpc.state_dict(), - "encoder_spk": encoder_spk.state_dict(), - "ps_mi_net": ps_mi_net.state_dict(), - "cp_mi_net": cp_mi_net.state_dict(), - "cs_mi_net": cs_mi_net.state_dict(), - "decoder": decoder.state_dict(), - "optimizer": optimizer.state_dict(), - "optimizer_cs_mi_net": optimizer_cs_mi_net.state_dict(), - "optimizer_ps_mi_net": optimizer_ps_mi_net.state_dict(), - "optimizer_cp_mi_net": optimizer_cp_mi_net.state_dict(), - "scheduler": scheduler.state_dict(), - "amp": amp_state_dict, - "epoch": epoch - } - checkpoint_dir.mkdir(exist_ok=True, parents=True) - checkpoint_path = checkpoint_dir / "model.ckpt-{}.pt".format(epoch) - torch.save(checkpoint_state, checkpoint_path) - print("Saved checkpoint: {}".format(checkpoint_path.stem)) - - - -def mi_first_forward(mels, lf0, encoder, encoder_lf0, encoder_spk, cs_mi_net, optimizer_cs_mi_net, - ps_mi_net, optimizer_ps_mi_net, cp_mi_net, optimizer_cp_mi_net, cfg): - optimizer_cs_mi_net.zero_grad() - optimizer_ps_mi_net.zero_grad() - optimizer_cp_mi_net.zero_grad() - z, _, _, _, _ = encoder(mels) - z = z.detach() - lf0_embs = encoder_lf0(lf0).detach() - spk_embs = encoder_spk(mels).detach() - if cfg.use_CSMI: - lld_cs_loss = -cs_mi_net.loglikeli(spk_embs, z) - if cfg.use_amp: - with amp.scale_loss(lld_cs_loss, optimizer_cs_mi_net) as sl: - sl.backward() - else: - lld_cs_loss.backward() - optimizer_cs_mi_net.step() - else: - lld_cs_loss = torch.tensor(0.) - - if cfg.use_CPMI: - lld_cp_loss = -cp_mi_net.loglikeli(lf0_embs.unsqueeze(1).reshape(lf0_embs.shape[0],-1,2,lf0_embs.shape[-1]).mean(2), z) - if cfg.use_amp: - with amp.scale_loss(lld_cp_loss, optimizer_cp_mi_net) as slll: - slll.backward() - else: - lld_cp_loss.backward() - torch.nn.utils.clip_grad_norm_(cp_mi_net.parameters(), 1) - optimizer_cp_mi_net.step() - else: - lld_cp_loss = torch.tensor(0.) - - if cfg.use_PSMI: - lld_ps_loss = -ps_mi_net.loglikeli(spk_embs, lf0_embs) - if cfg.use_amp: - with amp.scale_loss(lld_ps_loss, optimizer_ps_mi_net) as sll: - sll.backward() - else: - lld_ps_loss.backward() - optimizer_ps_mi_net.step() - else: - lld_ps_loss = torch.tensor(0.) - - return optimizer_cs_mi_net, lld_cs_loss, optimizer_ps_mi_net, lld_ps_loss, optimizer_cp_mi_net, lld_cp_loss - - -def mi_second_forward(mels, lf0, encoder, encoder_lf0, cpc, encoder_spk, cs_mi_net, ps_mi_net, cp_mi_net, decoder, cfg, optimizer, scheduler): - optimizer.zero_grad() - z, c, _, vq_loss, perplexity = encoder(mels) - cpc_loss, accuracy = cpc(z, c) - spk_embs = encoder_spk(mels) - lf0_embs = encoder_lf0(lf0) - recon_loss, pred_mels = decoder(z, lf0_embs, spk_embs, mels.transpose(1,2)) - - loss = recon_loss + cpc_loss + vq_loss - - if cfg.use_CSMI: - mi_cs_loss = cfg.mi_weight*cs_mi_net.mi_est(spk_embs, z) - else: - mi_cs_loss = torch.tensor(0.).to(loss.device) - - if cfg.use_CPMI: - mi_cp_loss = cfg.mi_weight*cp_mi_net.mi_est(lf0_embs.unsqueeze(1).reshape(lf0_embs.shape[0],-1,2,lf0_embs.shape[-1]).mean(2), z) - else: - mi_cp_loss = torch.tensor(0.).to(loss.device) - - if cfg.use_PSMI: - mi_ps_loss = cfg.mi_weight*ps_mi_net.mi_est(spk_embs, lf0_embs) - else: - mi_ps_loss = torch.tensor(0.).to(loss.device) - - loss = loss + mi_cs_loss + mi_ps_loss + mi_cp_loss - - if cfg.use_amp: - with amp.scale_loss(loss, optimizer) as scaled_loss: - scaled_loss.backward() - else: - loss.backward() - - optimizer.step() - return optimizer, recon_loss, vq_loss, cpc_loss, accuracy, perplexity, mi_cs_loss, mi_ps_loss, mi_cp_loss - - -def calculate_eval_loss(mels, lf0, \ - encoder, encoder_lf0, cpc, \ - encoder_spk, cs_mi_net, ps_mi_net, \ - cp_mi_net, decoder, cfg): - with torch.no_grad(): - z, c, z_beforeVQ, vq_loss, perplexity = encoder(mels) - c = c - lf0_embs = encoder_lf0(lf0) - spk_embs = encoder_spk(mels) - - if cfg.use_CSMI: - lld_cs_loss = -cs_mi_net.loglikeli(spk_embs, z) - mi_cs_loss = cfg.mi_weight*cs_mi_net.mi_est(spk_embs, z) - else: - lld_cs_loss = torch.tensor(0.) - mi_cs_loss = torch.tensor(0.) - - # z, c, z_beforeVQ, vq_loss, perplexity = encoder(mels) - cpc_loss, accuracy = cpc(z, c) - recon_loss, pred_mels = decoder(z, lf0_embs, spk_embs, mels.transpose(1,2)) - - if cfg.use_CPMI: - mi_cp_loss = cfg.mi_weight*cp_mi_net.mi_est(lf0_embs.unsqueeze(1).reshape(lf0_embs.shape[0],-1,2,lf0_embs.shape[-1]).mean(2), z) - lld_cp_loss = -cp_mi_net.loglikeli(lf0_embs.unsqueeze(1).reshape(lf0_embs.shape[0],-1,2,lf0_embs.shape[-1]).mean(2), z) - else: - mi_cp_loss = torch.tensor(0.) - lld_cp_loss = torch.tensor(0.) - - if cfg.use_PSMI: - mi_ps_loss = cfg.mi_weight*ps_mi_net.mi_est(spk_embs, lf0_embs) - lld_ps_loss = -ps_mi_net.loglikeli(spk_embs, lf0_embs) - else: - mi_ps_loss = torch.tensor(0.) - lld_ps_loss = torch.tensor(0.) - - return recon_loss, vq_loss, cpc_loss, accuracy, perplexity, mi_cs_loss, lld_cs_loss, mi_ps_loss, lld_ps_loss, mi_cp_loss, lld_cp_loss - - -def to_eval(all_models): - for m in all_models: - m.eval() - - -def to_train(all_models): - for m in all_models: - m.train() - - -def eval_model(epoch, checkpoint_dir, device, valid_dataloader, encoder, encoder_lf0, cpc, encoder_spk, cs_mi_net, ps_mi_net, cp_mi_net, decoder, cfg): - stime = time.time() - average_cpc_loss = average_vq_loss = average_perplexity = average_recon_loss = 0 - average_accuracies = np.zeros(cfg.training.n_prediction_steps) - average_lld_cs_loss = average_mi_cs_loss = average_lld_ps_loss = average_mi_ps_loss = average_lld_cp_loss = average_mi_cp_loss = 0 - all_models = [encoder, encoder_lf0, cpc, encoder_spk, cs_mi_net, ps_mi_net, cp_mi_net, decoder] - to_eval(all_models) - for i, (mels, lf0, speakers) in enumerate(valid_dataloader, 1): - lf0 = lf0.to(device) - mels = mels.to(device) # (bs, 80, 128) - recon_loss, vq_loss, cpc_loss, accuracy, perplexity, mi_cs_loss, lld_cs_loss, mi_ps_loss, lld_ps_loss, mi_cp_loss, lld_cp_loss = \ - calculate_eval_loss(mels, lf0, \ - encoder, encoder_lf0, cpc, \ - encoder_spk, cs_mi_net, ps_mi_net, \ - cp_mi_net, decoder, cfg) - - average_recon_loss += (recon_loss.item() - average_recon_loss) / i - average_cpc_loss += (cpc_loss.item() - average_cpc_loss) / i - average_vq_loss += (vq_loss.item() - average_vq_loss) / i - average_perplexity += (perplexity.item() - average_perplexity) / i - average_accuracies += (np.array(accuracy) - average_accuracies) / i - average_lld_cs_loss += (lld_cs_loss.item() - average_lld_cs_loss) / i - average_mi_cs_loss += (mi_cs_loss.item() - average_mi_cs_loss) / i - average_lld_ps_loss += (lld_ps_loss.item() - average_lld_ps_loss) / i - average_mi_ps_loss += (mi_ps_loss.item() - average_mi_ps_loss) / i - average_lld_cp_loss += (lld_cp_loss.item() - average_lld_cp_loss) / i - average_mi_cp_loss += (mi_cp_loss.item() - average_mi_cp_loss) / i - - - ctime = time.time() - print("Eval | epoch:{}, recon loss:{:.3f}, cpc loss:{:.3f}, vq loss:{:.3f}, perpexlity:{:.3f}, lld cs loss:{:.3f}, mi cs loss:{:.3E}, lld ps loss:{:.3f}, mi ps loss:{:.3f}, lld cp loss:{:.3f}, mi cp loss:{:.3f}, used time:{:.3f}s" - .format(epoch, average_recon_loss, average_cpc_loss, average_vq_loss, average_perplexity, average_lld_cs_loss, average_mi_cs_loss, average_lld_ps_loss, average_mi_ps_loss, average_lld_cp_loss, average_mi_cp_loss, ctime-stime)) - print(100 * average_accuracies) - results_txt = open(f'{str(checkpoint_dir)}/results.txt', 'a') - results_txt.write("Eval | epoch:{}, recon loss:{:.3f}, cpc loss:{:.3f}, vq loss:{:.3f}, perpexlity:{:.3f}, lld cs loss:{:.3f}, mi cs loss:{:.3E}, lld ps loss:{:.3f}, mi ps loss:{:.3f}, lld cp loss:{:.3f}, mi cp loss:{:.3f}" - .format(epoch, average_recon_loss, average_cpc_loss, average_vq_loss, average_perplexity, average_lld_cs_loss, average_mi_cs_loss, average_lld_ps_loss, average_mi_ps_loss, average_lld_cp_loss, average_mi_cp_loss)+'\n') - results_txt.write(' '.join([str(cpc_acc) for cpc_acc in average_accuracies])+'\n') - results_txt.close() - - to_train(all_models) - - -@hydra.main(config_path="config/train.yaml") -def train_model(cfg): - cfg.checkpoint_dir = f'{cfg.checkpoint_dir}/useCSMI{cfg.use_CSMI}_useCPMI{cfg.use_CPMI}_usePSMI{cfg.use_PSMI}_useAmp{cfg.use_amp}' - if cfg.encoder_lf0_type == 'no_emb': # default - dim_lf0 = 1 - else: - dim_lf0 = 64 - - checkpoint_dir = Path(utils.to_absolute_path(cfg.checkpoint_dir)) - checkpoint_dir.mkdir(exist_ok=True, parents=True) - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - - # define model - encoder = Encoder(**cfg.model.encoder) - encoder_lf0 = Encoder_lf0(cfg.encoder_lf0_type) - cpc = CPCLoss_sameSeq(**cfg.model.cpc) - encoder_spk = Encoder_spk() - cs_mi_net = CLUBSample_group(256, cfg.model.encoder.z_dim, 512) - ps_mi_net = CLUBSample_group(256, dim_lf0, 512) - cp_mi_net = CLUBSample_reshape(dim_lf0, cfg.model.encoder.z_dim, 512) - decoder = Decoder_ac(dim_neck=cfg.model.encoder.z_dim, dim_lf0=dim_lf0, use_l1_loss=True) - - encoder.to(device) - cpc.to(device) - encoder_lf0.to(device) - encoder_spk.to(device) - cs_mi_net.to(device) - ps_mi_net.to(device) - cp_mi_net.to(device) - decoder.to(device) - - optimizer = optim.Adam( - chain(encoder.parameters(), encoder_lf0.parameters(), cpc.parameters(), encoder_spk.parameters(), decoder.parameters()), - lr=cfg.training.scheduler.initial_lr) - optimizer_cs_mi_net = optim.Adam(cs_mi_net.parameters(), lr=cfg.mi_lr) - optimizer_ps_mi_net = optim.Adam(ps_mi_net.parameters(), lr=cfg.mi_lr) - optimizer_cp_mi_net = optim.Adam(cp_mi_net.parameters(), lr=cfg.mi_lr) - # TODO: use_amp is set default to True to speed up training; no-amp -> more stable training? => need to be verified - if cfg.use_amp: - [encoder, encoder_lf0, cpc, encoder_spk, decoder], optimizer = amp.initialize([encoder, encoder_lf0, cpc, encoder_spk, decoder], optimizer, opt_level='O1') - [cs_mi_net], optimizer_cs_mi_net = amp.initialize([cs_mi_net], optimizer_cs_mi_net, opt_level='O1') - [ps_mi_net], optimizer_ps_mi_net = amp.initialize([ps_mi_net], optimizer_ps_mi_net, opt_level='O1') - [cp_mi_net], optimizer_cp_mi_net = amp.initialize([cp_mi_net], optimizer_cp_mi_net, opt_level='O1') - - root_path = Path(utils.to_absolute_path("data")) - dataset = CPCDataset( - root=root_path, - n_sample_frames=cfg.training.sample_frames, # 128 - mode='train') - valid_dataset = CPCDataset( - root=root_path, - n_sample_frames=cfg.training.sample_frames, # 128 - mode='valid') - - warmup_epochs = 2000 // (len(dataset)//cfg.training.batch_size) - print('warmup_epochs:', warmup_epochs) - scheduler = WarmupScheduler( - optimizer, - warmup_epochs=warmup_epochs, - initial_lr=cfg.training.scheduler.initial_lr, - max_lr=cfg.training.scheduler.max_lr, - milestones=cfg.training.scheduler.milestones, - gamma=cfg.training.scheduler.gamma) - - dataloader = DataLoader( - dataset, - batch_size=cfg.training.batch_size, # 256 - shuffle=True, - num_workers=cfg.training.n_workers, - pin_memory=True, - drop_last=False) - valid_dataloader = DataLoader( - valid_dataset, - batch_size=cfg.training.batch_size, # 256 - shuffle=False, - num_workers=cfg.training.n_workers, - pin_memory=True, - drop_last=False) - - if cfg.resume: - print("Resume checkpoint from: {}:".format(cfg.resume)) - resume_path = utils.to_absolute_path(cfg.resume) - checkpoint = torch.load(resume_path, map_location=lambda storage, loc: storage) - encoder.load_state_dict(checkpoint["encoder"]) - encoder_lf0.load_state_dict(checkpoint["encoder_lf0"]) - cpc.load_state_dict(checkpoint["cpc"]) - encoder_spk.load_state_dict(checkpoint["encoder_spk"]) - cs_mi_net.load_state_dict(checkpoint["cs_mi_net"]) - ps_mi_net.load_state_dict(checkpoint["ps_mi_net"]) - if cfg.use_CPMI: - cp_mi_net.load_state_dict(checkpoint["cp_mi_net"]) - decoder.load_state_dict(checkpoint["decoder"]) - optimizer.load_state_dict(checkpoint["optimizer"]) - optimizer_cs_mi_net.load_state_dict(checkpoint["optimizer_cs_mi_net"]) - optimizer_ps_mi_net.load_state_dict(checkpoint["optimizer_ps_mi_net"]) - optimizer_cp_mi_net.load_state_dict(checkpoint["optimizer_cp_mi_net"]) - if cfg.use_amp: - amp.load_state_dict(checkpoint["amp"]) - scheduler.load_state_dict(checkpoint["scheduler"]) - start_epoch = checkpoint["epoch"] - else: - start_epoch = 1 - - if os.path.exists(f'{str(checkpoint_dir)}/results.txt'): - wmode = 'a' - else: - wmode = 'w' - results_txt = open(f'{str(checkpoint_dir)}/results.txt', wmode) - results_txt.write('save training info...\n') - results_txt.close() - - global_step = 0 - stime = time.time() - for epoch in range(start_epoch, cfg.training.n_epochs + 1): - average_cpc_loss = average_vq_loss = average_perplexity = average_recon_loss = 0 - average_accuracies = np.zeros(cfg.training.n_prediction_steps) - average_lld_cs_loss = average_mi_cs_loss = average_lld_ps_loss = average_mi_ps_loss = average_lld_cp_loss = average_mi_cp_loss = 0 - - for i, (mels, lf0, speakers) in enumerate(dataloader, 1): - lf0 = lf0.to(device) - mels = mels.to(device) # (bs, 80, 128) - if cfg.use_CSMI or cfg.use_CPMI or cfg.use_PSMI: - for j in range(cfg.mi_iters): - optimizer_cs_mi_net, lld_cs_loss, optimizer_ps_mi_net, lld_ps_loss, optimizer_cp_mi_net, lld_cp_loss = mi_first_forward(mels, lf0, encoder, encoder_lf0, encoder_spk, cs_mi_net, optimizer_cs_mi_net, \ - ps_mi_net, optimizer_ps_mi_net, cp_mi_net, optimizer_cp_mi_net, cfg) - else: - lld_cs_loss = torch.tensor(0.) - lld_ps_loss = torch.tensor(0.) - lld_cp_loss = torch.tensor(0.) - - optimizer, recon_loss, vq_loss, cpc_loss, accuracy, perplexity, mi_cs_loss, mi_ps_loss, mi_cp_loss = mi_second_forward(mels, lf0, \ - encoder, encoder_lf0, cpc, \ - encoder_spk, cs_mi_net, ps_mi_net, \ - cp_mi_net, decoder, cfg, \ - optimizer, scheduler) - - average_recon_loss += (recon_loss.item() - average_recon_loss) / i - average_cpc_loss += (cpc_loss.item() - average_cpc_loss) / i - average_vq_loss += (vq_loss.item() - average_vq_loss) / i - average_perplexity += (perplexity.item() - average_perplexity) / i - average_accuracies += (np.array(accuracy) - average_accuracies) / i - average_lld_cs_loss += (lld_cs_loss.item() - average_lld_cs_loss) / i - average_mi_cs_loss += (mi_cs_loss.item() - average_mi_cs_loss) / i - average_lld_ps_loss += (lld_ps_loss.item() - average_lld_ps_loss) / i - average_mi_ps_loss += (mi_ps_loss.item() - average_mi_ps_loss) / i - average_lld_cp_loss += (lld_cp_loss.item() - average_lld_cp_loss) / i - average_mi_cp_loss += (mi_cp_loss.item() - average_mi_cp_loss) / i - - - ctime = time.time() - print("epoch:{}, global step:{}, recon loss:{:.3f}, cpc loss:{:.3f}, vq loss:{:.3f}, perpexlity:{:.3f}, lld cs loss:{:.3f}, mi cs loss:{:.3E}, lld ps loss:{:.3f}, mi ps loss:{:.3f}, lld cp loss:{:.3f}, mi cp loss:{:.3f}, used time:{:.3f}s" - .format(epoch, global_step, average_recon_loss, average_cpc_loss, average_vq_loss, average_perplexity, average_lld_cs_loss, average_mi_cs_loss, average_lld_ps_loss, average_mi_ps_loss, average_lld_cp_loss, average_mi_cp_loss, ctime-stime)) - print(100 * average_accuracies) - stime = time.time() - global_step += 1 - # scheduler.step() - - results_txt = open(f'{str(checkpoint_dir)}/results.txt', 'a') - results_txt.write("epoch:{}, global step:{}, recon loss:{:.3f}, cpc loss:{:.3f}, vq loss:{:.3f}, perpexlity:{:.3f}, lld cs loss:{:.3f}, mi cs loss:{:.3E}, lld ps loss:{:.3f}, mi ps loss:{:.3f}, lld cp loss:{:.3f}, mi cp loss:{:.3f}" - .format(epoch, global_step, average_recon_loss, average_cpc_loss, average_vq_loss, average_perplexity, average_lld_cs_loss, average_mi_cs_loss, average_lld_ps_loss, average_mi_ps_loss, average_lld_cp_loss, average_mi_cp_loss)+'\n') - results_txt.write(' '.join([str(cpc_acc) for cpc_acc in average_accuracies])+'\n') - results_txt.close() - scheduler.step() - - - if epoch % cfg.training.log_interval == 0 and epoch != start_epoch: - eval_model(epoch, checkpoint_dir, device, valid_dataloader, encoder, encoder_lf0, cpc, encoder_spk, cs_mi_net, ps_mi_net, cp_mi_net, decoder, cfg) - - ctime = time.time() - print("epoch:{}, global step:{}, recon loss:{:.3f}, cpc loss:{:.3f}, vq loss:{:.3f}, perpexlity:{:.3f}, lld cs loss:{:.3f}, mi cs loss:{:.3E}, lld ps loss:{:.3f}, mi ps loss:{:.3f}, lld cp loss:{:.3f}, mi cp loss:{:.3f}, used time:{:.3f}s" - .format(epoch, global_step, average_recon_loss, average_cpc_loss, average_vq_loss, average_perplexity, average_lld_cs_loss, average_mi_cs_loss, average_lld_ps_loss, average_mi_ps_loss, average_lld_cp_loss, average_mi_cp_loss, ctime-stime)) - print(100 * average_accuracies) - stime = time.time() - - if epoch % cfg.training.checkpoint_interval == 0 and epoch != start_epoch: - save_checkpoint(encoder, encoder_lf0, cpc, encoder_spk, \ - cs_mi_net, ps_mi_net, cp_mi_net, decoder, \ - optimizer, optimizer_cs_mi_net, optimizer_ps_mi_net, optimizer_cp_mi_net, scheduler, amp, epoch, checkpoint_dir, cfg) - - -if __name__ == "__main__": - train_model() diff --git a/spaces/akhaliq/dreambooth-training/train_dreambooth.py b/spaces/akhaliq/dreambooth-training/train_dreambooth.py deleted file mode 100644 index c18edc83b6a5850b86ee75c8ef2f36bb91691b95..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/dreambooth-training/train_dreambooth.py +++ /dev/null @@ -1,818 +0,0 @@ -import argparse -import itertools -import math -import os -from pathlib import Path -from typing import Optional -import subprocess -import sys - -import torch -import torch.nn.functional as F -import torch.utils.checkpoint -from torch.utils.data import Dataset - -from accelerate import Accelerator -from accelerate.logging import get_logger -from accelerate.utils import set_seed -from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel -from diffusers.optimization import get_scheduler -from huggingface_hub import HfFolder, Repository, whoami -from PIL import Image -from torchvision import transforms -from tqdm.auto import tqdm -from transformers import CLIPTextModel, CLIPTokenizer - - -logger = get_logger(__name__) - - -def parse_args(): - parser = argparse.ArgumentParser(description="Simple example of a training script.") - parser.add_argument( - "--pretrained_model_name_or_path", - type=str, - default=None, - #required=True, - help="Path to pretrained model or model identifier from huggingface.co/models.", - ) - parser.add_argument( - "--tokenizer_name", - type=str, - default=None, - help="Pretrained tokenizer name or path if not the same as model_name", - ) - parser.add_argument( - "--instance_data_dir", - type=str, - default=None, - #required=True, - help="A folder containing the training data of instance images.", - ) - parser.add_argument( - "--class_data_dir", - type=str, - default=None, - required=False, - help="A folder containing the training data of class images.", - ) - parser.add_argument( - "--instance_prompt", - type=str, - default=None, - help="The prompt with identifier specifying the instance", - ) - parser.add_argument( - "--class_prompt", - type=str, - default="", - help="The prompt to specify images in the same class as provided instance images.", - ) - parser.add_argument( - "--with_prior_preservation", - default=False, - action="store_true", - help="Flag to add prior preservation loss.", - ) - parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") - parser.add_argument( - "--num_class_images", - type=int, - default=100, - help=( - "Minimal class images for prior preservation loss. If not have enough images, additional images will be" - " sampled with class_prompt." - ), - ) - parser.add_argument( - "--output_dir", - type=str, - default="", - help="The output directory where the model predictions and checkpoints will be written.", - ) - parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") - parser.add_argument( - "--resolution", - type=int, - default=512, - help=( - "The resolution for input images, all the images in the train/validation dataset will be resized to this" - " resolution" - ), - ) - parser.add_argument( - "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution" - ) - parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder") - parser.add_argument( - "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." - ) - parser.add_argument( - "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." - ) - parser.add_argument("--num_train_epochs", type=int, default=1) - parser.add_argument( - "--max_train_steps", - type=int, - default=None, - help="Total number of training steps to perform. If provided, overrides num_train_epochs.", - ) - parser.add_argument( - "--gradient_accumulation_steps", - type=int, - default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.", - ) - parser.add_argument( - "--gradient_checkpointing", - action="store_true", - help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", - ) - parser.add_argument( - "--learning_rate", - type=float, - default=5e-6, - help="Initial learning rate (after the potential warmup period) to use.", - ) - parser.add_argument( - "--scale_lr", - action="store_true", - default=False, - help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", - ) - parser.add_argument( - "--lr_scheduler", - type=str, - default="constant", - help=( - 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' - ' "constant", "constant_with_warmup"]' - ), - ) - parser.add_argument( - "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." - ) - parser.add_argument( - "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." - ) - parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") - parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") - parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") - parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") - parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") - parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") - parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") - parser.add_argument( - "--hub_model_id", - type=str, - default=None, - help="The name of the repository to keep in sync with the local `output_dir`.", - ) - parser.add_argument( - "--logging_dir", - type=str, - default="logs", - help=( - "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" - " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." - ), - ) - parser.add_argument( - "--mixed_precision", - type=str, - default="no", - choices=["no", "fp16", "bf16"], - help=( - "Whether to use mixed precision. Choose" - "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." - "and an Nvidia Ampere GPU." - ), - ) - - parser.add_argument( - "--save_n_steps", - type=int, - default=1, - help=("Save the model every n global_steps"), - ) - - - parser.add_argument( - "--save_starting_step", - type=int, - default=1, - help=("The step from which it starts saving intermediary checkpoints"), - ) - - parser.add_argument( - "--stop_text_encoder_training", - type=int, - default=1000000, - help=("The step at which the text_encoder is no longer trained"), - ) - - - parser.add_argument( - "--image_captions_filename", - action="store_true", - help="Get captions from filename", - ) - - - parser.add_argument( - "--dump_only_text_encoder", - action="store_true", - default=False, - help="Dump only text encoder", - ) - - parser.add_argument( - "--train_only_unet", - action="store_true", - default=False, - help="Train only the unet", - ) - - parser.add_argument( - "--Session_dir", - type=str, - default="", - help="Current session directory", - ) - - - - - parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") - - args = parser.parse_args() - env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) - if env_local_rank != -1 and env_local_rank != args.local_rank: - args.local_rank = env_local_rank - - #if args.instance_data_dir is None: - # raise ValueError("You must specify a train data directory.") - - #if args.with_prior_preservation: - # if args.class_data_dir is None: - # raise ValueError("You must specify a data directory for class images.") - # if args.class_prompt is None: - # raise ValueError("You must specify prompt for class images.") - - return args - - -class DreamBoothDataset(Dataset): - """ - A dataset to prepare the instance and class images with the prompts for fine-tuning the model. - It pre-processes the images and the tokenizes prompts. - """ - - def __init__( - self, - instance_data_root, - instance_prompt, - tokenizer, - args, - class_data_root=None, - class_prompt=None, - size=512, - center_crop=False, - ): - self.size = size - self.center_crop = center_crop - self.tokenizer = tokenizer - self.image_captions_filename = None - - self.instance_data_root = Path(instance_data_root) - if not self.instance_data_root.exists(): - raise ValueError("Instance images root doesn't exists.") - - self.instance_images_path = list(Path(instance_data_root).iterdir()) - self.num_instance_images = len(self.instance_images_path) - self.instance_prompt = instance_prompt - self._length = self.num_instance_images - - if args.image_captions_filename: - self.image_captions_filename = True - - if class_data_root is not None: - self.class_data_root = Path(class_data_root) - self.class_data_root.mkdir(parents=True, exist_ok=True) - self.class_images_path = list(self.class_data_root.iterdir()) - self.num_class_images = len(self.class_images_path) - self._length = max(self.num_class_images, self.num_instance_images) - self.class_prompt = class_prompt - else: - self.class_data_root = None - - self.image_transforms = transforms.Compose( - [ - transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), - transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size), - transforms.ToTensor(), - transforms.Normalize([0.5], [0.5]), - ] - ) - - def __len__(self): - return self._length - - def __getitem__(self, index): - example = {} - path = self.instance_images_path[index % self.num_instance_images] - instance_image = Image.open(path) - if not instance_image.mode == "RGB": - instance_image = instance_image.convert("RGB") - - instance_prompt = self.instance_prompt - - if self.image_captions_filename: - filename = Path(path).stem - pt=''.join([i for i in filename if not i.isdigit()]) - pt=pt.replace("_"," ") - pt=pt.replace("(","") - pt=pt.replace(")","") - instance_prompt = pt - sys.stdout.write(" " +instance_prompt+" ") - sys.stdout.flush() - - - example["instance_images"] = self.image_transforms(instance_image) - example["instance_prompt_ids"] = self.tokenizer( - instance_prompt, - padding="do_not_pad", - truncation=True, - max_length=self.tokenizer.model_max_length, - ).input_ids - - if self.class_data_root: - class_image = Image.open(self.class_images_path[index % self.num_class_images]) - if not class_image.mode == "RGB": - class_image = class_image.convert("RGB") - example["class_images"] = self.image_transforms(class_image) - example["class_prompt_ids"] = self.tokenizer( - self.class_prompt, - padding="do_not_pad", - truncation=True, - max_length=self.tokenizer.model_max_length, - ).input_ids - - return example - - - -class PromptDataset(Dataset): - "A simple dataset to prepare the prompts to generate class images on multiple GPUs." - - def __init__(self, prompt, num_samples): - self.prompt = prompt - self.num_samples = num_samples - - def __len__(self): - return self.num_samples - - def __getitem__(self, index): - example = {} - example["prompt"] = self.prompt - example["index"] = index - return example - - -def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None): - if token is None: - token = HfFolder.get_token() - if organization is None: - username = whoami(token)["name"] - return f"{username}/{model_id}" - else: - return f"{organization}/{model_id}" - -def merge_two_dicts(starting_dict: dict, updater_dict: dict) -> dict: - """ - Starts from base starting dict and then adds the remaining key values from updater replacing the values from - the first starting/base dict with the second updater dict. - - For later: how does d = {**d1, **d2} replace collision? - - :param starting_dict: - :param updater_dict: - :return: - """ - new_dict: dict = starting_dict.copy() # start with keys and values of starting_dict - new_dict.update(updater_dict) # modifies starting_dict with keys and values of updater_dict - return new_dict - -def merge_args(args1: argparse.Namespace, args2: argparse.Namespace) -> argparse.Namespace: - """ - - ref: https://stackoverflow.com/questions/56136549/how-can-i-merge-two-argparse-namespaces-in-python-2-x - :param args1: - :param args2: - :return: - """ - # - the merged args - # The vars() function returns the __dict__ attribute to values of the given object e.g {field:value}. - merged_key_values_for_namespace: dict = merge_two_dicts(vars(args1), vars(args2)) - args = argparse.Namespace(**merged_key_values_for_namespace) - return args - -def run_training(args_imported): - args_default = parse_args() - args = merge_args(args_default, args_imported) - print(args) - logging_dir = Path(args.output_dir, args.logging_dir) - i=args.save_starting_step - accelerator = Accelerator( - gradient_accumulation_steps=args.gradient_accumulation_steps, - mixed_precision=args.mixed_precision, - log_with="tensorboard", - logging_dir=logging_dir, - ) - - # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate - # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models. - # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate. - if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1: - raise ValueError( - "Gradient accumulation is not supported when training the text encoder in distributed training. " - "Please set gradient_accumulation_steps to 1. This feature will be supported in the future." - ) - - if args.seed is not None: - set_seed(args.seed) - - if args.with_prior_preservation: - class_images_dir = Path(args.class_data_dir) - if not class_images_dir.exists(): - class_images_dir.mkdir(parents=True) - cur_class_images = len(list(class_images_dir.iterdir())) - - if cur_class_images < args.num_class_images: - torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32 - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, torch_dtype=torch_dtype - ) - pipeline.set_progress_bar_config(disable=True) - - num_new_images = args.num_class_images - cur_class_images - logger.info(f"Number of class images to sample: {num_new_images}.") - - sample_dataset = PromptDataset(args.class_prompt, num_new_images) - sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size) - - sample_dataloader = accelerator.prepare(sample_dataloader) - pipeline.to(accelerator.device) - - for example in tqdm( - sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process - ): - with torch.autocast("cuda"): - images = pipeline(example["prompt"]).images - - for i, image in enumerate(images): - image.save(class_images_dir / f"{example['index'][i] + cur_class_images}.jpg") - - del pipeline - if torch.cuda.is_available(): - torch.cuda.empty_cache() - - # Handle the repository creation - if accelerator.is_main_process: - if args.push_to_hub: - if args.hub_model_id is None: - repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token) - else: - repo_name = args.hub_model_id - repo = Repository(args.output_dir, clone_from=repo_name) - - with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: - if "step_*" not in gitignore: - gitignore.write("step_*\n") - if "epoch_*" not in gitignore: - gitignore.write("epoch_*\n") - elif args.output_dir is not None: - os.makedirs(args.output_dir, exist_ok=True) - - # Load the tokenizer - if args.tokenizer_name: - tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name) - elif args.pretrained_model_name_or_path: - tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") - - # Load models and create wrapper for stable diffusion - if args.train_only_unet: - if os.path.exists(str(args.output_dir+"/text_encoder_trained")): - text_encoder = CLIPTextModel.from_pretrained(args.output_dir, subfolder="text_encoder_trained") - elif os.path.exists(str(args.output_dir+"/text_encoder")): - text_encoder = CLIPTextModel.from_pretrained(args.output_dir, subfolder="text_encoder") - else: - text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder") - else: - text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder") - vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae") - unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet") - - vae.requires_grad_(False) - if not args.train_text_encoder: - text_encoder.requires_grad_(False) - - if args.gradient_checkpointing: - unet.enable_gradient_checkpointing() - if args.train_text_encoder: - text_encoder.gradient_checkpointing_enable() - - if args.scale_lr: - args.learning_rate = ( - args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes - ) - - # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs - if args.use_8bit_adam: - try: - import bitsandbytes as bnb - except ImportError: - raise ImportError( - "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." - ) - - optimizer_class = bnb.optim.AdamW8bit - else: - optimizer_class = torch.optim.AdamW - - params_to_optimize = ( - itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters() - ) - optimizer = optimizer_class( - params_to_optimize, - lr=args.learning_rate, - betas=(args.adam_beta1, args.adam_beta2), - weight_decay=args.adam_weight_decay, - eps=args.adam_epsilon, - ) - - noise_scheduler = DDPMScheduler( - beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000 - ) - - train_dataset = DreamBoothDataset( - instance_data_root=args.instance_data_dir, - instance_prompt=args.instance_prompt, - class_data_root=args.class_data_dir if args.with_prior_preservation else None, - class_prompt=args.class_prompt, - tokenizer=tokenizer, - size=args.resolution, - center_crop=args.center_crop, - args=args, - ) - - def collate_fn(examples): - input_ids = [example["instance_prompt_ids"] for example in examples] - pixel_values = [example["instance_images"] for example in examples] - - # Concat class and instance examples for prior preservation. - # We do this to avoid doing two forward passes. - if args.with_prior_preservation: - input_ids += [example["class_prompt_ids"] for example in examples] - pixel_values += [example["class_images"] for example in examples] - - pixel_values = torch.stack(pixel_values) - pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() - - input_ids = tokenizer.pad({"input_ids": input_ids}, padding=True, return_tensors="pt").input_ids - - batch = { - "input_ids": input_ids, - "pixel_values": pixel_values, - } - return batch - - train_dataloader = torch.utils.data.DataLoader( - train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn - ) - - # Scheduler and math around the number of training steps. - overrode_max_train_steps = False - num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) - if args.max_train_steps is None: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - overrode_max_train_steps = True - - lr_scheduler = get_scheduler( - args.lr_scheduler, - optimizer=optimizer, - num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, - num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, - ) - - if args.train_text_encoder: - unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( - unet, text_encoder, optimizer, train_dataloader, lr_scheduler - ) - else: - unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( - unet, optimizer, train_dataloader, lr_scheduler - ) - - weight_dtype = torch.float32 - if args.mixed_precision == "fp16": - weight_dtype = torch.float16 - elif args.mixed_precision == "bf16": - weight_dtype = torch.bfloat16 - - # Move text_encode and vae to gpu. - # For mixed precision training we cast the text_encoder and vae weights to half-precision - # as these models are only used for inference, keeping weights in full precision is not required. - vae.to(accelerator.device, dtype=weight_dtype) - if not args.train_text_encoder: - text_encoder.to(accelerator.device, dtype=weight_dtype) - - # We need to recalculate our total training steps as the size of the training dataloader may have changed. - num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) - if overrode_max_train_steps: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - # Afterwards we recalculate our number of training epochs - args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) - - # We need to initialize the trackers we use, and also store our configuration. - # The trackers initializes automatically on the main process. - if accelerator.is_main_process: - accelerator.init_trackers("dreambooth", config=vars(args)) - - def bar(prg): - br='|'+'█' * prg + ' ' * (25-prg)+'|' - return br - - # Train! - total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps - - logger.info("***** Running training *****") - logger.info(f" Num examples = {len(train_dataset)}") - logger.info(f" Num batches each epoch = {len(train_dataloader)}") - logger.info(f" Num Epochs = {args.num_train_epochs}") - logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") - logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") - logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") - logger.info(f" Total optimization steps = {args.max_train_steps}") - # Only show the progress bar once on each machine. - progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) - global_step = 0 - - for epoch in range(args.num_train_epochs): - unet.train() - if args.train_text_encoder: - text_encoder.train() - for step, batch in enumerate(train_dataloader): - with accelerator.accumulate(unet): - # Convert images to latent space - latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample() - latents = latents * 0.18215 - - # Sample noise that we'll add to the latents - noise = torch.randn_like(latents) - bsz = latents.shape[0] - # Sample a random timestep for each image - timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) - timesteps = timesteps.long() - - # Add noise to the latents according to the noise magnitude at each timestep - # (this is the forward diffusion process) - noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) - - # Get the text embedding for conditioning - encoder_hidden_states = text_encoder(batch["input_ids"])[0] - - # Predict the noise residual - noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample - - if args.with_prior_preservation: - # Chunk the noise and noise_pred into two parts and compute the loss on each part separately. - noise_pred, noise_pred_prior = torch.chunk(noise_pred, 2, dim=0) - noise, noise_prior = torch.chunk(noise, 2, dim=0) - - # Compute instance loss - loss = F.mse_loss(noise_pred.float(), noise.float(), reduction="none").mean([1, 2, 3]).mean() - - # Compute prior loss - prior_loss = F.mse_loss(noise_pred_prior.float(), noise_prior.float(), reduction="mean") - - # Add the prior loss to the instance loss. - loss = loss + args.prior_loss_weight * prior_loss - else: - loss = F.mse_loss(noise_pred.float(), noise.float(), reduction="mean") - - accelerator.backward(loss) - if accelerator.sync_gradients: - params_to_clip = ( - itertools.chain(unet.parameters(), text_encoder.parameters()) - if args.train_text_encoder - else unet.parameters() - ) - accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) - optimizer.step() - lr_scheduler.step() - optimizer.zero_grad() - - # Checks if the accelerator has performed an optimization step behind the scenes - if accelerator.sync_gradients: - progress_bar.update(1) - global_step += 1 - - fll=round((global_step*100)/args.max_train_steps) - fll=round(fll/4) - pr=bar(fll) - - logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} - progress_bar.set_postfix(**logs) - progress_bar.set_description_str("Progress:"+pr) - accelerator.log(logs, step=global_step) - - if global_step >= args.max_train_steps: - break - - if args.train_text_encoder and global_step == args.stop_text_encoder_training and global_step >= 30: - if accelerator.is_main_process: - print(" " +" Freezing the text_encoder ..."+" ") - frz_dir=args.output_dir + "/text_encoder_frozen" - if os.path.exists(frz_dir): - subprocess.call('rm -r '+ frz_dir, shell=True) - os.mkdir(frz_dir) - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - ) - pipeline.text_encoder.save_pretrained(frz_dir) - - if args.save_n_steps >= 200: - if global_step < args.max_train_steps-100 and global_step+1==i: - ckpt_name = "_step_" + str(global_step+1) - save_dir = Path(args.output_dir+ckpt_name) - save_dir=str(save_dir) - save_dir=save_dir.replace(" ", "_") - if not os.path.exists(save_dir): - os.mkdir(save_dir) - inst=save_dir[16:] - inst=inst.replace(" ", "_") - print(" SAVING CHECKPOINT: "+args.Session_dir+"/"+inst+".ckpt") - # Create the pipeline using the trained modules and save it. - if accelerator.is_main_process: - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - ) - pipeline.save_pretrained(save_dir) - frz_dir=args.output_dir + "/text_encoder_frozen" - if args.train_text_encoder and os.path.exists(frz_dir): - subprocess.call('rm -r '+save_dir+'/text_encoder/*.*', shell=True) - subprocess.call('cp -f '+frz_dir +'/*.* '+ save_dir+'/text_encoder', shell=True) - chkpth=args.Session_dir+"/"+inst+".ckpt" - subprocess.call('python /content/diffusers/scripts/convert_diffusers_to_original_stable_diffusion.py --model_path ' + save_dir + ' --checkpoint_path ' + chkpth + ' --half', shell=True) - i=i+args.save_n_steps - - accelerator.wait_for_everyone() - - # Create the pipeline using using the trained modules and save it. - if accelerator.is_main_process: - if args.dump_only_text_encoder: - txt_dir=args.output_dir + "/text_encoder_trained" - if not os.path.exists(txt_dir): - os.mkdir(txt_dir) - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - ) - pipeline.text_encoder.save_pretrained(txt_dir) - - elif args.train_only_unet: - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - ) - pipeline.save_pretrained(args.output_dir) - txt_dir=args.output_dir + "/text_encoder_trained" - subprocess.call('rm -r '+txt_dir, shell=True) - - else: - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - ) - frz_dir=args.output_dir + "/text_encoder_frozen" - pipeline.save_pretrained(args.output_dir) - if args.train_text_encoder and os.path.exists(frz_dir): - subprocess.call('mv -f '+frz_dir +'/*.* '+ args.output_dir+'/text_encoder', shell=True) - subprocess.call('rm -r '+ frz_dir, shell=True) - - if args.push_to_hub: - repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True) - - accelerator.end_training() - -if __name__ == "__main__": - pass - #main() diff --git a/spaces/alexrame/rewardedsoups/streamlit_app/data/locomotion/trajectories/19.html b/spaces/alexrame/rewardedsoups/streamlit_app/data/locomotion/trajectories/19.html deleted file mode 100644 index 1083a2d57d16d3dd5558368160b5dd13fd8f1021..0000000000000000000000000000000000000000 --- a/spaces/alexrame/rewardedsoups/streamlit_app/data/locomotion/trajectories/19.html +++ /dev/null @@ -1,48 +0,0 @@ - - - - brax visualizer - - - - -
      - - - diff --git a/spaces/allknowingroger/Image-Models-Test14/app.py b/spaces/allknowingroger/Image-Models-Test14/app.py deleted file mode 100644 index c45ef6020c60ab162e782d6d93068eb23dcad25e..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test14/app.py +++ /dev/null @@ -1,144 +0,0 @@ -import gradio as gr -# import os -# import sys -# from pathlib import Path -import time - -models =[ - "AaAsr/weight", - "badmonk/renaxoi", - "digiplay/K-main2.1", - "digiplay/realmixUnrealjourney_v1", - "digiplay/AIGEN_v1.4_diffusers", - "digiplay/CamelliaMix_NSFW_diffusers_v1.1", - "digiplay/GhostMixV1.2VAE", - "digiplay/CamelliaMIx_2.5D_diffusers", - "digiplay/LemonTea2.5D", -] - - -model_functions = {} -model_idx = 1 -for model_path in models: - try: - model_functions[model_idx] = gr.Interface.load(f"models/{model_path}", live=False, preprocess=True, postprocess=False) - except Exception as error: - def the_fn(txt): - return None - model_functions[model_idx] = gr.Interface(fn=the_fn, inputs=["text"], outputs=["image"]) - model_idx+=1 - - -def send_it_idx(idx): - def send_it_fn(prompt): - output = (model_functions.get(str(idx)) or model_functions.get(str(1)))(prompt) - return output - return send_it_fn - -def get_prompts(prompt_text): - return prompt_text - -def clear_it(val): - if int(val) != 0: - val = 0 - else: - val = 0 - pass - return val - -def all_task_end(cnt,t_stamp): - to = t_stamp + 60 - et = time.time() - if et > to and t_stamp != 0: - d = gr.update(value=0) - tog = gr.update(value=1) - #print(f'to: {to} et: {et}') - else: - if cnt != 0: - d = gr.update(value=et) - else: - d = gr.update(value=0) - tog = gr.update(value=0) - #print (f'passing: to: {to} et: {et}') - pass - return d, tog - -def all_task_start(): - print("\n\n\n\n\n\n\n") - t = time.gmtime() - t_stamp = time.time() - current_time = time.strftime("%H:%M:%S", t) - return gr.update(value=t_stamp), gr.update(value=t_stamp), gr.update(value=0) - -def clear_fn(): - nn = len(models) - return tuple([None, *[None for _ in range(nn)]]) - - - -with gr.Blocks(title="SD Models") as my_interface: - with gr.Column(scale=12): - # with gr.Row(): - # gr.Markdown("""- Primary prompt: 你想画的内容(英文单词,如 a cat, 加英文逗号效果更好;点 Improve 按钮进行完善)\n- Real prompt: 完善后的提示词,出现后再点右边的 Run 按钮开始运行""") - with gr.Row(): - with gr.Row(scale=6): - primary_prompt=gr.Textbox(label="Prompt", value="") - # real_prompt=gr.Textbox(label="Real prompt") - with gr.Row(scale=6): - # improve_prompts_btn=gr.Button("Improve") - with gr.Row(): - run=gr.Button("Run",variant="primary") - clear_btn=gr.Button("Clear") - with gr.Row(): - sd_outputs = {} - model_idx = 1 - for model_path in models: - with gr.Column(scale=3, min_width=320): - with gr.Box(): - sd_outputs[model_idx] = gr.Image(label=model_path) - pass - model_idx += 1 - pass - pass - - with gr.Row(visible=False): - start_box=gr.Number(interactive=False) - end_box=gr.Number(interactive=False) - tog_box=gr.Textbox(value=0,interactive=False) - - start_box.change( - all_task_end, - [start_box, end_box], - [start_box, tog_box], - every=1, - show_progress=False) - - primary_prompt.submit(all_task_start, None, [start_box, end_box, tog_box]) - run.click(all_task_start, None, [start_box, end_box, tog_box]) - runs_dict = {} - model_idx = 1 - for model_path in models: - runs_dict[model_idx] = run.click(model_functions[model_idx], inputs=[primary_prompt], outputs=[sd_outputs[model_idx]]) - model_idx += 1 - pass - pass - - # improve_prompts_btn_clicked=improve_prompts_btn.click( - # get_prompts, - # inputs=[primary_prompt], - # outputs=[primary_prompt], - # cancels=list(runs_dict.values())) - clear_btn.click( - clear_fn, - None, - [primary_prompt, *list(sd_outputs.values())], - cancels=[*list(runs_dict.values())]) - tog_box.change( - clear_it, - tog_box, - tog_box, - cancels=[*list(runs_dict.values())]) - -my_interface.queue(concurrency_count=600, status_update_rate=1) -my_interface.launch(inline=True, show_api=False) - \ No newline at end of file diff --git a/spaces/alphunt/diffdock-alphunt-demo/models/all_atom_score_model.py b/spaces/alphunt/diffdock-alphunt-demo/models/all_atom_score_model.py deleted file mode 100644 index 6c08aefdb972589303525275a948a8b21da1d346..0000000000000000000000000000000000000000 --- a/spaces/alphunt/diffdock-alphunt-demo/models/all_atom_score_model.py +++ /dev/null @@ -1,415 +0,0 @@ -from e3nn import o3 -import torch -from torch import nn -from torch.nn import functional as F -from torch_cluster import radius, radius_graph -from torch_scatter import scatter_mean -import numpy as np - -from models.score_model import AtomEncoder, TensorProductConvLayer, GaussianSmearing -from utils import so3, torus -from datasets.process_mols import lig_feature_dims, rec_residue_feature_dims, rec_atom_feature_dims - - -class TensorProductScoreModel(torch.nn.Module): - def __init__(self, t_to_sigma, device, timestep_emb_func, in_lig_edge_features=4, sigma_embed_dim=32, sh_lmax=2, - ns=16, nv=4, num_conv_layers=2, lig_max_radius=5, rec_max_radius=30, cross_max_distance=250, - center_max_distance=30, distance_embed_dim=32, cross_distance_embed_dim=32, no_torsion=False, - scale_by_sigma=True, use_second_order_repr=False, batch_norm=True, - dynamic_max_cross=False, dropout=0.0, lm_embedding_type=False, confidence_mode=False, - confidence_dropout=0, confidence_no_batchnorm=False, num_confidence_outputs=1): - super(TensorProductScoreModel, self).__init__() - self.t_to_sigma = t_to_sigma - self.in_lig_edge_features = in_lig_edge_features - self.sigma_embed_dim = sigma_embed_dim - self.lig_max_radius = lig_max_radius - self.rec_max_radius = rec_max_radius - self.cross_max_distance = cross_max_distance - self.dynamic_max_cross = dynamic_max_cross - self.center_max_distance = center_max_distance - self.distance_embed_dim = distance_embed_dim - self.cross_distance_embed_dim = cross_distance_embed_dim - self.sh_irreps = o3.Irreps.spherical_harmonics(lmax=sh_lmax) - self.ns, self.nv = ns, nv - self.scale_by_sigma = scale_by_sigma - self.device = device - self.no_torsion = no_torsion - self.num_conv_layers = num_conv_layers - self.timestep_emb_func = timestep_emb_func - self.confidence_mode = confidence_mode - self.num_conv_layers = num_conv_layers - - # embedding layers - self.lig_node_embedding = AtomEncoder(emb_dim=ns, feature_dims=lig_feature_dims, sigma_embed_dim=sigma_embed_dim) - self.lig_edge_embedding = nn.Sequential(nn.Linear(in_lig_edge_features + sigma_embed_dim + distance_embed_dim, ns),nn.ReLU(),nn.Dropout(dropout),nn.Linear(ns, ns)) - - self.rec_node_embedding = AtomEncoder(emb_dim=ns, feature_dims=rec_residue_feature_dims, sigma_embed_dim=sigma_embed_dim, lm_embedding_type=lm_embedding_type) - self.rec_edge_embedding = nn.Sequential(nn.Linear(sigma_embed_dim + distance_embed_dim, ns), nn.ReLU(), nn.Dropout(dropout),nn.Linear(ns, ns)) - - self.atom_node_embedding = AtomEncoder(emb_dim=ns, feature_dims=rec_atom_feature_dims, sigma_embed_dim=sigma_embed_dim) - self.atom_edge_embedding = nn.Sequential(nn.Linear(sigma_embed_dim + distance_embed_dim, ns), nn.ReLU(), nn.Dropout(dropout),nn.Linear(ns, ns)) - - self.lr_edge_embedding = nn.Sequential(nn.Linear(sigma_embed_dim + cross_distance_embed_dim, ns), nn.ReLU(), nn.Dropout(dropout),nn.Linear(ns, ns)) - self.ar_edge_embedding = nn.Sequential(nn.Linear(sigma_embed_dim + distance_embed_dim, ns), nn.ReLU(), nn.Dropout(dropout),nn.Linear(ns, ns)) - self.la_edge_embedding = nn.Sequential(nn.Linear(sigma_embed_dim + cross_distance_embed_dim, ns), nn.ReLU(), nn.Dropout(dropout),nn.Linear(ns, ns)) - - self.lig_distance_expansion = GaussianSmearing(0.0, lig_max_radius, distance_embed_dim) - self.rec_distance_expansion = GaussianSmearing(0.0, rec_max_radius, distance_embed_dim) - self.cross_distance_expansion = GaussianSmearing(0.0, cross_max_distance, cross_distance_embed_dim) - - if use_second_order_repr: - irrep_seq = [ - f'{ns}x0e', - f'{ns}x0e + {nv}x1o + {nv}x2e', - f'{ns}x0e + {nv}x1o + {nv}x2e + {nv}x1e + {nv}x2o', - f'{ns}x0e + {nv}x1o + {nv}x2e + {nv}x1e + {nv}x2o + {ns}x0o' - ] - else: - irrep_seq = [ - f'{ns}x0e', - f'{ns}x0e + {nv}x1o', - f'{ns}x0e + {nv}x1o + {nv}x1e', - f'{ns}x0e + {nv}x1o + {nv}x1e + {ns}x0o' - ] - - # convolutional layers - conv_layers = [] - for i in range(num_conv_layers): - in_irreps = irrep_seq[min(i, len(irrep_seq) - 1)] - out_irreps = irrep_seq[min(i + 1, len(irrep_seq) - 1)] - parameters = { - 'in_irreps': in_irreps, - 'sh_irreps': self.sh_irreps, - 'out_irreps': out_irreps, - 'n_edge_features': 3 * ns, - 'residual': False, - 'batch_norm': batch_norm, - 'dropout': dropout - } - - for _ in range(9): # 3 intra & 6 inter per each layer - conv_layers.append(TensorProductConvLayer(**parameters)) - - self.conv_layers = nn.ModuleList(conv_layers) - - # confidence and affinity prediction layers - if self.confidence_mode: - output_confidence_dim = num_confidence_outputs - - self.confidence_predictor = nn.Sequential( - nn.Linear(2 * self.ns if num_conv_layers >= 3 else self.ns, ns), - nn.BatchNorm1d(ns) if not confidence_no_batchnorm else nn.Identity(), - nn.ReLU(), - nn.Dropout(confidence_dropout), - nn.Linear(ns, ns), - nn.BatchNorm1d(ns) if not confidence_no_batchnorm else nn.Identity(), - nn.ReLU(), - nn.Dropout(confidence_dropout), - nn.Linear(ns, output_confidence_dim) - ) - - else: - # convolution for translational and rotational scores - self.center_distance_expansion = GaussianSmearing(0.0, center_max_distance, distance_embed_dim) - self.center_edge_embedding = nn.Sequential( - nn.Linear(distance_embed_dim + sigma_embed_dim, ns), - nn.ReLU(), - nn.Dropout(dropout), - nn.Linear(ns, ns) - ) - - self.final_conv = TensorProductConvLayer( - in_irreps=self.conv_layers[-1].out_irreps, - sh_irreps=self.sh_irreps, - out_irreps=f'2x1o + 2x1e', - n_edge_features=2 * ns, - residual=False, - dropout=dropout, - batch_norm=batch_norm - ) - - self.tr_final_layer = nn.Sequential(nn.Linear(1 + sigma_embed_dim, ns), nn.Dropout(dropout), nn.ReLU(), nn.Linear(ns, 1)) - self.rot_final_layer = nn.Sequential(nn.Linear(1 + sigma_embed_dim, ns), nn.Dropout(dropout), nn.ReLU(), nn.Linear(ns, 1)) - - if not no_torsion: - # convolution for torsional score - self.final_edge_embedding = nn.Sequential( - nn.Linear(distance_embed_dim, ns), - nn.ReLU(), - nn.Dropout(dropout), - nn.Linear(ns, ns) - ) - self.final_tp_tor = o3.FullTensorProduct(self.sh_irreps, "2e") - self.tor_bond_conv = TensorProductConvLayer( - in_irreps=self.conv_layers[-1].out_irreps, - sh_irreps=self.final_tp_tor.irreps_out, - out_irreps=f'{ns}x0o + {ns}x0e', - n_edge_features=3 * ns, - residual=False, - dropout=dropout, - batch_norm=batch_norm - ) - self.tor_final_layer = nn.Sequential( - nn.Linear(2 * ns if not self.odd_parity else ns, ns, bias=False), - nn.Tanh(), - nn.Dropout(dropout), - nn.Linear(ns, 1, bias=False) - ) - - def forward(self, data): - if not self.confidence_mode: - tr_sigma, rot_sigma, tor_sigma = self.t_to_sigma(*[data.complex_t[noise_type] for noise_type in ['tr', 'rot', 'tor']]) - else: - tr_sigma, rot_sigma, tor_sigma = [data.complex_t[noise_type] for noise_type in ['tr', 'rot', 'tor']] - - # build ligand graph - lig_node_attr, lig_edge_index, lig_edge_attr, lig_edge_sh = self.build_lig_conv_graph(data) - lig_node_attr = self.lig_node_embedding(lig_node_attr) - lig_edge_attr = self.lig_edge_embedding(lig_edge_attr) - - # build receptor graph - rec_node_attr, rec_edge_index, rec_edge_attr, rec_edge_sh = self.build_rec_conv_graph(data) - rec_node_attr = self.rec_node_embedding(rec_node_attr) - rec_edge_attr = self.rec_edge_embedding(rec_edge_attr) - - # build atom graph - atom_node_attr, atom_edge_index, atom_edge_attr, atom_edge_sh = self.build_atom_conv_graph(data) - atom_node_attr = self.atom_node_embedding(atom_node_attr) - atom_edge_attr = self.atom_edge_embedding(atom_edge_attr) - - # build cross graph - cross_cutoff = (tr_sigma * 3 + 20).unsqueeze(1) if self.dynamic_max_cross else self.cross_max_distance - lr_edge_index, lr_edge_attr, lr_edge_sh, la_edge_index, la_edge_attr, \ - la_edge_sh, ar_edge_index, ar_edge_attr, ar_edge_sh = self.build_cross_conv_graph(data, cross_cutoff) - lr_edge_attr= self.lr_edge_embedding(lr_edge_attr) - la_edge_attr = self.la_edge_embedding(la_edge_attr) - ar_edge_attr = self.ar_edge_embedding(ar_edge_attr) - - for l in range(self.num_conv_layers): - # LIGAND updates - lig_edge_attr_ = torch.cat([lig_edge_attr, lig_node_attr[lig_edge_index[0], :self.ns], lig_node_attr[lig_edge_index[1], :self.ns]], -1) - lig_update = self.conv_layers[9*l](lig_node_attr, lig_edge_index, lig_edge_attr_, lig_edge_sh) - - lr_edge_attr_ = torch.cat([lr_edge_attr, lig_node_attr[lr_edge_index[0], :self.ns], rec_node_attr[lr_edge_index[1], :self.ns]], -1) - lr_update = self.conv_layers[9*l+1](rec_node_attr, lr_edge_index, lr_edge_attr_, lr_edge_sh, - out_nodes=lig_node_attr.shape[0]) - - la_edge_attr_ = torch.cat([la_edge_attr, lig_node_attr[la_edge_index[0], :self.ns], atom_node_attr[la_edge_index[1], :self.ns]], -1) - la_update = self.conv_layers[9*l+2](atom_node_attr, la_edge_index, la_edge_attr_, la_edge_sh, - out_nodes=lig_node_attr.shape[0]) - - if l != self.num_conv_layers-1: # last layer optimisation - - # ATOM UPDATES - atom_edge_attr_ = torch.cat([atom_edge_attr, atom_node_attr[atom_edge_index[0], :self.ns], atom_node_attr[atom_edge_index[1], :self.ns]], -1) - atom_update = self.conv_layers[9*l+3](atom_node_attr, atom_edge_index, atom_edge_attr_, atom_edge_sh) - - al_edge_attr_ = torch.cat([la_edge_attr, atom_node_attr[la_edge_index[1], :self.ns], lig_node_attr[la_edge_index[0], :self.ns]], -1) - al_update = self.conv_layers[9*l+4](lig_node_attr, torch.flip(la_edge_index, dims=[0]), al_edge_attr_, - la_edge_sh, out_nodes=atom_node_attr.shape[0]) - - ar_edge_attr_ = torch.cat([ar_edge_attr, atom_node_attr[ar_edge_index[0], :self.ns], rec_node_attr[ar_edge_index[1], :self.ns]],-1) - ar_update = self.conv_layers[9*l+5](rec_node_attr, ar_edge_index, ar_edge_attr_, ar_edge_sh, out_nodes=atom_node_attr.shape[0]) - - # RECEPTOR updates - rec_edge_attr_ = torch.cat([rec_edge_attr, rec_node_attr[rec_edge_index[0], :self.ns], rec_node_attr[rec_edge_index[1], :self.ns]], -1) - rec_update = self.conv_layers[9*l+6](rec_node_attr, rec_edge_index, rec_edge_attr_, rec_edge_sh) - - rl_edge_attr_ = torch.cat([lr_edge_attr, rec_node_attr[lr_edge_index[1], :self.ns], lig_node_attr[lr_edge_index[0], :self.ns]], -1) - rl_update = self.conv_layers[9*l+7](lig_node_attr, torch.flip(lr_edge_index, dims=[0]), rl_edge_attr_, - lr_edge_sh, out_nodes=rec_node_attr.shape[0]) - - ra_edge_attr_ = torch.cat([ar_edge_attr, rec_node_attr[ar_edge_index[1], :self.ns], atom_node_attr[ar_edge_index[0], :self.ns]], -1) - ra_update = self.conv_layers[9*l+8](atom_node_attr, torch.flip(ar_edge_index, dims=[0]), ra_edge_attr_, - ar_edge_sh, out_nodes=rec_node_attr.shape[0]) - - # padding original features and update features with residual updates - lig_node_attr = F.pad(lig_node_attr, (0, lig_update.shape[-1] - lig_node_attr.shape[-1])) - lig_node_attr = lig_node_attr + lig_update + la_update + lr_update - - if l != self.num_conv_layers - 1: # last layer optimisation - atom_node_attr = F.pad(atom_node_attr, (0, atom_update.shape[-1] - rec_node_attr.shape[-1])) - atom_node_attr = atom_node_attr + atom_update + al_update + ar_update - rec_node_attr = F.pad(rec_node_attr, (0, rec_update.shape[-1] - rec_node_attr.shape[-1])) - rec_node_attr = rec_node_attr + rec_update + ra_update + rl_update - - # confidence and affinity prediction - if self.confidence_mode: - scalar_lig_attr = torch.cat([lig_node_attr[:,:self.ns],lig_node_attr[:,-self.ns:]], dim=1) if self.num_conv_layers >= 3 else lig_node_attr[:,:self.ns] - confidence = self.confidence_predictor(scatter_mean(scalar_lig_attr, data['ligand'].batch, dim=0)).squeeze(dim=-1) - return confidence - - # compute translational and rotational score vectors - center_edge_index, center_edge_attr, center_edge_sh = self.build_center_conv_graph(data) - center_edge_attr = self.center_edge_embedding(center_edge_attr) - center_edge_attr = torch.cat([center_edge_attr, lig_node_attr[center_edge_index[0], :self.ns]], -1) - global_pred = self.final_conv(lig_node_attr, center_edge_index, center_edge_attr, center_edge_sh, out_nodes=data.num_graphs) - - tr_pred = global_pred[:, :3] + global_pred[:, 6:9] - rot_pred = global_pred[:, 3:6] + global_pred[:, 9:] - data.graph_sigma_emb = self.timestep_emb_func(data.complex_t['tr']) - - # adjust the magniture of the score vectors - tr_norm = torch.linalg.vector_norm(tr_pred, dim=1).unsqueeze(1) - tr_pred = tr_pred / tr_norm * self.tr_final_layer(torch.cat([tr_norm, data.graph_sigma_emb], dim=1)) - - rot_norm = torch.linalg.vector_norm(rot_pred, dim=1).unsqueeze(1) - rot_pred = rot_pred / rot_norm * self.rot_final_layer(torch.cat([rot_norm, data.graph_sigma_emb], dim=1)) - - if self.scale_by_sigma: - tr_pred = tr_pred / tr_sigma.unsqueeze(1) - rot_pred = rot_pred * so3.score_norm(rot_sigma.cpu()).unsqueeze(1).to(data['ligand'].x.device) - - if self.no_torsion or data['ligand'].edge_mask.sum() == 0: return tr_pred, rot_pred, torch.empty(0,device=self.device) - - # torsional components - tor_bonds, tor_edge_index, tor_edge_attr, tor_edge_sh = self.build_bond_conv_graph(data) - tor_bond_vec = data['ligand'].pos[tor_bonds[1]] - data['ligand'].pos[tor_bonds[0]] - tor_bond_attr = lig_node_attr[tor_bonds[0]] + lig_node_attr[tor_bonds[1]] - - tor_bonds_sh = o3.spherical_harmonics("2e", tor_bond_vec, normalize=True, normalization='component') - tor_edge_sh = self.final_tp_tor(tor_edge_sh, tor_bonds_sh[tor_edge_index[0]]) - - tor_edge_attr = torch.cat([tor_edge_attr, lig_node_attr[tor_edge_index[1], :self.ns], - tor_bond_attr[tor_edge_index[0], :self.ns]], -1) - tor_pred = self.tor_bond_conv(lig_node_attr, tor_edge_index, tor_edge_attr, tor_edge_sh, - out_nodes=data['ligand'].edge_mask.sum(), reduce='mean') - tor_pred = self.tor_final_layer(tor_pred).squeeze(1) - edge_sigma = tor_sigma[data['ligand'].batch][data['ligand', 'ligand'].edge_index[0]][data['ligand'].edge_mask] - - if self.scale_by_sigma: - tor_pred = tor_pred * torch.sqrt(torch.tensor(torus.score_norm(edge_sigma.cpu().numpy())).float() - .to(data['ligand'].x.device)) - return tr_pred, rot_pred, tor_pred - - def build_lig_conv_graph(self, data): - # build the graph between ligand atoms - data['ligand'].node_sigma_emb = self.timestep_emb_func(data['ligand'].node_t['tr']) - - radius_edges = radius_graph(data['ligand'].pos, self.lig_max_radius, data['ligand'].batch) - edge_index = torch.cat([data['ligand', 'ligand'].edge_index, radius_edges], 1).long() - edge_attr = torch.cat([ - data['ligand', 'ligand'].edge_attr, - torch.zeros(radius_edges.shape[-1], self.in_lig_edge_features, device=data['ligand'].x.device) - ], 0) - - edge_sigma_emb = data['ligand'].node_sigma_emb[edge_index[0].long()] - edge_attr = torch.cat([edge_attr, edge_sigma_emb], 1) - node_attr = torch.cat([data['ligand'].x, data['ligand'].node_sigma_emb], 1) - - src, dst = edge_index - edge_vec = data['ligand'].pos[dst.long()] - data['ligand'].pos[src.long()] - edge_length_emb = self.lig_distance_expansion(edge_vec.norm(dim=-1)) - - edge_attr = torch.cat([edge_attr, edge_length_emb], 1) - edge_sh = o3.spherical_harmonics(self.sh_irreps, edge_vec, normalize=True, normalization='component') - - return node_attr, edge_index, edge_attr, edge_sh - - def build_rec_conv_graph(self, data): - # build the graph between receptor residues - data['receptor'].node_sigma_emb = self.timestep_emb_func(data['receptor'].node_t['tr']) - node_attr = torch.cat([data['receptor'].x, data['receptor'].node_sigma_emb], 1) - - # this assumes the edges were already created in preprocessing since protein's structure is fixed - edge_index = data['receptor', 'receptor'].edge_index - src, dst = edge_index - edge_vec = data['receptor'].pos[dst.long()] - data['receptor'].pos[src.long()] - - edge_length_emb = self.rec_distance_expansion(edge_vec.norm(dim=-1)) - edge_sigma_emb = data['receptor'].node_sigma_emb[edge_index[0].long()] - edge_attr = torch.cat([edge_sigma_emb, edge_length_emb], 1) - edge_sh = o3.spherical_harmonics(self.sh_irreps, edge_vec, normalize=True, normalization='component') - - return node_attr, edge_index, edge_attr, edge_sh - - def build_atom_conv_graph(self, data): - # build the graph between receptor atoms - data['atom'].node_sigma_emb = self.timestep_emb_func(data['atom'].node_t['tr']) - node_attr = torch.cat([data['atom'].x, data['atom'].node_sigma_emb], 1) - - # this assumes the edges were already created in preprocessing since protein's structure is fixed - edge_index = data['atom', 'atom'].edge_index - src, dst = edge_index - edge_vec = data['atom'].pos[dst.long()] - data['atom'].pos[src.long()] - - edge_length_emb = self.lig_distance_expansion(edge_vec.norm(dim=-1)) - edge_sigma_emb = data['atom'].node_sigma_emb[edge_index[0].long()] - edge_attr = torch.cat([edge_sigma_emb, edge_length_emb], 1) - edge_sh = o3.spherical_harmonics(self.sh_irreps, edge_vec, normalize=True, normalization='component') - - return node_attr, edge_index, edge_attr, edge_sh - - def build_cross_conv_graph(self, data, lr_cross_distance_cutoff): - # build the cross edges between ligan atoms, receptor residues and receptor atoms - - # LIGAND to RECEPTOR - if torch.is_tensor(lr_cross_distance_cutoff): - # different cutoff for every graph - lr_edge_index = radius(data['receptor'].pos / lr_cross_distance_cutoff[data['receptor'].batch], - data['ligand'].pos / lr_cross_distance_cutoff[data['ligand'].batch], 1, - data['receptor'].batch, data['ligand'].batch, max_num_neighbors=10000) - else: - lr_edge_index = radius(data['receptor'].pos, data['ligand'].pos, lr_cross_distance_cutoff, - data['receptor'].batch, data['ligand'].batch, max_num_neighbors=10000) - - lr_edge_vec = data['receptor'].pos[lr_edge_index[1].long()] - data['ligand'].pos[lr_edge_index[0].long()] - lr_edge_length_emb = self.cross_distance_expansion(lr_edge_vec.norm(dim=-1)) - lr_edge_sigma_emb = data['ligand'].node_sigma_emb[lr_edge_index[0].long()] - lr_edge_attr = torch.cat([lr_edge_sigma_emb, lr_edge_length_emb], 1) - lr_edge_sh = o3.spherical_harmonics(self.sh_irreps, lr_edge_vec, normalize=True, normalization='component') - - cutoff_d = lr_cross_distance_cutoff[data['ligand'].batch[lr_edge_index[0]]].squeeze() \ - if torch.is_tensor(lr_cross_distance_cutoff) else lr_cross_distance_cutoff - - # LIGAND to ATOM - la_edge_index = radius(data['atom'].pos, data['ligand'].pos, self.lig_max_radius, - data['atom'].batch, data['ligand'].batch, max_num_neighbors=10000) - - la_edge_vec = data['atom'].pos[la_edge_index[1].long()] - data['ligand'].pos[la_edge_index[0].long()] - la_edge_length_emb = self.cross_distance_expansion(la_edge_vec.norm(dim=-1)) - la_edge_sigma_emb = data['ligand'].node_sigma_emb[la_edge_index[0].long()] - la_edge_attr = torch.cat([la_edge_sigma_emb, la_edge_length_emb], 1) - la_edge_sh = o3.spherical_harmonics(self.sh_irreps, la_edge_vec, normalize=True, normalization='component') - - # ATOM to RECEPTOR - ar_edge_index = data['atom', 'receptor'].edge_index - ar_edge_vec = data['receptor'].pos[ar_edge_index[1].long()] - data['atom'].pos[ar_edge_index[0].long()] - ar_edge_length_emb = self.rec_distance_expansion(ar_edge_vec.norm(dim=-1)) - ar_edge_sigma_emb = data['atom'].node_sigma_emb[ar_edge_index[0].long()] - ar_edge_attr = torch.cat([ar_edge_sigma_emb, ar_edge_length_emb], 1) - ar_edge_sh = o3.spherical_harmonics(self.sh_irreps, ar_edge_vec, normalize=True, normalization='component') - - return lr_edge_index, lr_edge_attr, lr_edge_sh, la_edge_index, la_edge_attr, \ - la_edge_sh, ar_edge_index, ar_edge_attr, ar_edge_sh - - def build_center_conv_graph(self, data): - # build the filter for the convolution of the center with the ligand atoms - # for translational and rotational score - edge_index = torch.cat([data['ligand'].batch.unsqueeze(0), torch.arange(len(data['ligand'].batch)).to(data['ligand'].x.device).unsqueeze(0)], dim=0) - - center_pos, count = torch.zeros((data.num_graphs, 3)).to(data['ligand'].x.device), torch.zeros((data.num_graphs, 3)).to(data['ligand'].x.device) - center_pos.index_add_(0, index=data['ligand'].batch, source=data['ligand'].pos) - center_pos = center_pos / torch.bincount(data['ligand'].batch).unsqueeze(1) - - edge_vec = data['ligand'].pos[edge_index[1]] - center_pos[edge_index[0]] - edge_attr = self.center_distance_expansion(edge_vec.norm(dim=-1)) - edge_sigma_emb = data['ligand'].node_sigma_emb[edge_index[1].long()] - edge_attr = torch.cat([edge_attr, edge_sigma_emb], 1) - edge_sh = o3.spherical_harmonics(self.sh_irreps, edge_vec, normalize=True, normalization='component') - return edge_index, edge_attr, edge_sh - - def build_bond_conv_graph(self, data): - # build graph for the pseudotorque layer - bonds = data['ligand', 'ligand'].edge_index[:, data['ligand'].edge_mask].long() - bond_pos = (data['ligand'].pos[bonds[0]] + data['ligand'].pos[bonds[1]]) / 2 - bond_batch = data['ligand'].batch[bonds[0]] - edge_index = radius(data['ligand'].pos, bond_pos, self.lig_max_radius, batch_x=data['ligand'].batch, batch_y=bond_batch) - - edge_vec = data['ligand'].pos[edge_index[1]] - bond_pos[edge_index[0]] - edge_attr = self.lig_distance_expansion(edge_vec.norm(dim=-1)) - - edge_attr = self.final_edge_embedding(edge_attr) - edge_sh = o3.spherical_harmonics(self.sh_irreps, edge_vec, normalize=True, normalization='component') - - return bonds, edge_index, edge_attr, edge_sh diff --git a/spaces/anaclaudia13ct/insect_detection/data/scripts/get_coco128.sh b/spaces/anaclaudia13ct/insect_detection/data/scripts/get_coco128.sh deleted file mode 100644 index ee05a867e5644be8cc7549b89cad89d5e84573d0..0000000000000000000000000000000000000000 --- a/spaces/anaclaudia13ct/insect_detection/data/scripts/get_coco128.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# Download COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) -# Example usage: bash data/scripts/get_coco128.sh -# parent -# ├── yolov5 -# └── datasets -# └── coco128 ← downloads here - -# Download/unzip images and labels -d='../datasets' # unzip directory -url=https://github.com/ultralytics/yolov5/releases/download/v1.0/ -f='coco128.zip' # or 'coco128-segments.zip', 68 MB -echo 'Downloading' $url$f ' ...' -curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & - -wait # finish background tasks diff --git a/spaces/aodianyun/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/src/midas/midas_net_custom.py b/spaces/aodianyun/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/src/midas/midas_net_custom.py deleted file mode 100644 index 50e4acb5e53d5fabefe3dde16ab49c33c2b7797c..0000000000000000000000000000000000000000 --- a/spaces/aodianyun/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/src/midas/midas_net_custom.py +++ /dev/null @@ -1,128 +0,0 @@ -"""MidashNet: Network for monocular depth estimation trained by mixing several datasets. -This file contains code that is adapted from -https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py -""" -import torch -import torch.nn as nn - -from .base_model import BaseModel -from .blocks import FeatureFusionBlock, FeatureFusionBlock_custom, Interpolate, _make_encoder - - -class MidasNet_small(BaseModel): - """Network for monocular depth estimation. - """ - - def __init__(self, path=None, features=64, backbone="efficientnet_lite3", non_negative=True, exportable=True, channels_last=False, align_corners=True, - blocks={'expand': True}): - """Init. - - Args: - path (str, optional): Path to saved model. Defaults to None. - features (int, optional): Number of features. Defaults to 256. - backbone (str, optional): Backbone network for encoder. Defaults to resnet50 - """ - print("Loading weights: ", path) - - super(MidasNet_small, self).__init__() - - use_pretrained = False if path else True - - self.channels_last = channels_last - self.blocks = blocks - self.backbone = backbone - - self.groups = 1 - - features1=features - features2=features - features3=features - features4=features - self.expand = False - if "expand" in self.blocks and self.blocks['expand'] == True: - self.expand = True - features1=features - features2=features*2 - features3=features*4 - features4=features*8 - - self.pretrained, self.scratch = _make_encoder(self.backbone, features, use_pretrained, groups=self.groups, expand=self.expand, exportable=exportable) - - self.scratch.activation = nn.ReLU(False) - - self.scratch.refinenet4 = FeatureFusionBlock_custom(features4, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) - self.scratch.refinenet3 = FeatureFusionBlock_custom(features3, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) - self.scratch.refinenet2 = FeatureFusionBlock_custom(features2, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) - self.scratch.refinenet1 = FeatureFusionBlock_custom(features1, self.scratch.activation, deconv=False, bn=False, align_corners=align_corners) - - - self.scratch.output_conv = nn.Sequential( - nn.Conv2d(features, features//2, kernel_size=3, stride=1, padding=1, groups=self.groups), - Interpolate(scale_factor=2, mode="bilinear"), - nn.Conv2d(features//2, 32, kernel_size=3, stride=1, padding=1), - self.scratch.activation, - nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0), - nn.ReLU(True) if non_negative else nn.Identity(), - nn.Identity(), - ) - - if path: - self.load(path) - - - def forward(self, x): - """Forward pass. - - Args: - x (tensor): input data (image) - - Returns: - tensor: depth - """ - if self.channels_last==True: - print("self.channels_last = ", self.channels_last) - x.contiguous(memory_format=torch.channels_last) - - - layer_1 = self.pretrained.layer1(x) - layer_2 = self.pretrained.layer2(layer_1) - layer_3 = self.pretrained.layer3(layer_2) - layer_4 = self.pretrained.layer4(layer_3) - - layer_1_rn = self.scratch.layer1_rn(layer_1) - layer_2_rn = self.scratch.layer2_rn(layer_2) - layer_3_rn = self.scratch.layer3_rn(layer_3) - layer_4_rn = self.scratch.layer4_rn(layer_4) - - - path_4 = self.scratch.refinenet4(layer_4_rn) - path_3 = self.scratch.refinenet3(path_4, layer_3_rn) - path_2 = self.scratch.refinenet2(path_3, layer_2_rn) - path_1 = self.scratch.refinenet1(path_2, layer_1_rn) - - out = self.scratch.output_conv(path_1) - - return torch.squeeze(out, dim=1) - - - -def fuse_model(m): - prev_previous_type = nn.Identity() - prev_previous_name = '' - previous_type = nn.Identity() - previous_name = '' - for name, module in m.named_modules(): - if prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d and type(module) == nn.ReLU: - # print("FUSED ", prev_previous_name, previous_name, name) - torch.quantization.fuse_modules(m, [prev_previous_name, previous_name, name], inplace=True) - elif prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d: - # print("FUSED ", prev_previous_name, previous_name) - torch.quantization.fuse_modules(m, [prev_previous_name, previous_name], inplace=True) - # elif previous_type == nn.Conv2d and type(module) == nn.ReLU: - # print("FUSED ", previous_name, name) - # torch.quantization.fuse_modules(m, [previous_name, name], inplace=True) - - prev_previous_type = previous_type - prev_previous_name = previous_name - previous_type = type(module) - previous_name = name \ No newline at end of file diff --git a/spaces/artificialguybr/video-dubbing/Wav2Lip/evaluation/scores_LSE/calculate_scores_real_videos.sh b/spaces/artificialguybr/video-dubbing/Wav2Lip/evaluation/scores_LSE/calculate_scores_real_videos.sh deleted file mode 100644 index 4a45cd568d10bfeea9fc31255fcdf121d3f4e0e9..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/Wav2Lip/evaluation/scores_LSE/calculate_scores_real_videos.sh +++ /dev/null @@ -1,8 +0,0 @@ -rm all_scores.txt -yourfilenames=`ls $1` - -for eachfile in $yourfilenames -do - python run_pipeline.py --videofile $1/$eachfile --reference wav2lip --data_dir tmp_dir - python calculate_scores_real_videos.py --videofile $1/$eachfile --reference wav2lip --data_dir tmp_dir >> all_scores.txt -done diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/multiline_highlight.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/multiline_highlight.py deleted file mode 100644 index 231ff7372e310992258a98ccd9f534902bf10253..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/multiline_highlight.py +++ /dev/null @@ -1,36 +0,0 @@ -""" -Multi-Line Highlight -==================== -This multi-line chart uses an invisible Voronoi tessellation to handle mouseover to -identify the nearest point and then highlight the line on which the point falls. -It is adapted from the Vega-Lite example found at -https://bl.ocks.org/amitkaps/fe4238e716db53930b2f1a70d3401701 -""" -# category: interactive charts -import altair as alt -from vega_datasets import data - -source = data.stocks() - -highlight = alt.selection(type='single', on='mouseover', - fields=['symbol'], nearest=True) - -base = alt.Chart(source).encode( - x='date:T', - y='price:Q', - color='symbol:N' -) - -points = base.mark_circle().encode( - opacity=alt.value(0) -).add_selection( - highlight -).properties( - width=600 -) - -lines = base.mark_line().encode( - size=alt.condition(~highlight, alt.value(1), alt.value(3)) -) - -points + lines diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/data/multi_corpus_dataset.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/data/multi_corpus_dataset.py deleted file mode 100644 index a3f47c720d0215323ffea7eb5cf5fd7766fbefa6..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/data/multi_corpus_dataset.py +++ /dev/null @@ -1,256 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -import time -from collections import OrderedDict -from typing import Dict, List, Optional - -import numpy as np -from fairseq.data import data_utils - -from . import FairseqDataset - -logger = logging.getLogger(__name__) - - -class MultiCorpusDataset(FairseqDataset): - """ - Stores multiple instances of FairseqDataset together. - Unless batch_sample=True, requires each instance - to be the same dataset, as the collate method needs to work on batches with - samples from each dataset. - - Allows specifying a distribution over the datasets to use. Note that unlike - MultiCorpusSampledDataset, this distribution allows sampling for each item, - rather than on a batch level. Note that datasets with sampling probabilty - of 0 will be skipped. - - Each time ordered_indices() is called, a new sample is generated with - the specified distribution. - - Args: - datasets: a OrderedDict of FairseqDataset instances. - distribution: a List containing the probability of getting an utterance from - corresponding dataset - seed: random seed for sampling the datsets - sort_indices: if true, will sort the ordered indices by size - batch_sample: if true, will ensure each batch is from a single dataset - """ - - def __init__( - self, - datasets: Dict[str, FairseqDataset], - distribution: List[float], - seed: int, - sort_indices: bool = False, - batch_sample: bool = False, - distributed_rank: Optional[int] = None, - ): - super().__init__() - assert isinstance(datasets, OrderedDict) - assert len(datasets) == len(distribution) - assert sum(distribution) == 1 - self.datasets = datasets - self.distribution = distribution - self.seed = seed - self.sort_indices = sort_indices - self.batch_sample = batch_sample - self.distributed_rank = distributed_rank - - # Avoid repeated conversions to list later - self.dataset_list = list(datasets.values()) - self.total_num_instances = 0 - - first_dataset = self.dataset_list[0] - - self.num_instances_per_dataset = [] - self.dataset_offsets = [] - for i, dataset in enumerate(self.dataset_list): - assert isinstance(dataset, FairseqDataset) - assert type(dataset) is type(first_dataset) - self.num_instances_per_dataset.append( - 0 if self.distribution[i] == 0 else len(dataset) - ) - self.dataset_offsets.append(self.total_num_instances) - self.total_num_instances += self.num_instances_per_dataset[i] - - def ordered_indices(self): - start = time.time() - with data_utils.numpy_seed(self.seed, self.epoch): - logger.info( - f"sampling new dataset with seed {self.seed} epoch {self.epoch}" - ) - sampled_indices = [] - num_selected_instances = 0 - - # For each dataset i, sample self.distribution[i] * self.total_num_instances - for i, key in enumerate(self.datasets): - if self.distribution[i] == 0: - # skip dataset if sampling probability is 0 - continue - - if i < len(self.datasets) - 1: - num_instances = int(self.distribution[i] * self.total_num_instances) - high = self.dataset_offsets[i + 1] - else: - num_instances = self.total_num_instances - num_selected_instances - high = self.total_num_instances - - logger.info(f"sampling {num_instances} from {key} dataset") - num_selected_instances += num_instances - - # First, add k copies of the dataset where k = num_instances // len(dataset). - # This ensures an equal distribution of the data points as much as possible. - # For the remaining entries randomly sample them - dataset_size = len(self.datasets[key]) - num_copies = num_instances // dataset_size - dataset_indices = ( - np.random.permutation(high - self.dataset_offsets[i]) - + self.dataset_offsets[i] - )[: num_instances - num_copies * dataset_size] - if num_copies > 0: - sampled_indices += list( - np.concatenate( - ( - np.repeat( - np.arange(self.dataset_offsets[i], high), num_copies - ), - dataset_indices, - ) - ) - ) - else: - sampled_indices += list(dataset_indices) - - assert ( - len(sampled_indices) == self.total_num_instances - ), f"{len(sampled_indices)} vs {self.total_num_instances}" - - np.random.shuffle(sampled_indices) - if self.sort_indices: - sampled_indices.sort(key=lambda i: self.num_tokens(i)) - - logger.info( - "multi_corpus_dataset ordered_indices took {}s".format( - time.time() - start - ) - ) - return np.array(sampled_indices, dtype=np.int64) - - def _map_index(self, index: int): - """ - If dataset A has length N and dataset B has length M - then index 1 maps to index 1 of dataset A, and index N + 1 - maps to index 1 of B. - """ - counter = 0 - for num_instances, key in zip(self.num_instances_per_dataset, self.datasets): - if index < counter + num_instances: - return index - counter, key - counter += num_instances - raise ValueError( - "Invalid index: {}, max: {}".format(index, self.total_num_instances) - ) - - def __len__(self): - """ - Length of this dataset is the sum of individual datasets - """ - return self.total_num_instances - - def __getitem__(self, index): - new_index, key = self._map_index(index) - try: - item = self.datasets[key][new_index] - item["full_id"] = index - return item - except Exception as e: - e.args = (f"Error from {key} dataset", *e.args) - raise - - def collater(self, samples): - """ - If we are doing batch sampling, then pick the right collater to use. - - Otherwise we assume all collaters are the same. - """ - if len(samples) == 0: - return None - if "full_id" in samples[0]: - _, key = self._map_index(samples[0]["full_id"]) - try: - batch = self.datasets[key].collater(samples) - except Exception: - print(f"Collating failed for key {key}", flush=True) - raise - return batch - else: - # Subclasses may override __getitem__ to not specify full_id - return list(self.datasets.values())[0].collater(samples) - - def num_tokens(self, index: int): - index, key = self._map_index(index) - return self.datasets[key].num_tokens(index) - - def size(self, index: int): - index, key = self._map_index(index) - return self.datasets[key].size(index) - - @property - def can_reuse_epoch_itr_across_epochs(self): - return False - - def set_epoch(self, epoch, **unused): - super().set_epoch(epoch) - logger.info(f"setting epoch of multi_corpus_dataset to {epoch}") - self.epoch = epoch - - @property - def supports_prefetch(self): - return False - - @property - def supports_fetch_outside_dataloader(self): - return all( - self.datasets[key].supports_fetch_outside_dataloader - for key in self.datasets - ) - - def batch_by_size( - self, - indices, - max_tokens=None, - max_sentences=None, - required_batch_size_multiple=1, - ): - if not self.batch_sample: - return super().batch_by_size( - indices, max_tokens, max_sentences, required_batch_size_multiple - ) - - dataset_indices = {key: [] for key in self.datasets} - for i in indices: - _, key = self._map_index(i) - dataset_indices[key].append(i) - - batches = [] - for key in dataset_indices: - cur_batches = super().batch_by_size( - np.array(dataset_indices[key], dtype=np.int64), - max_tokens, - max_sentences, - required_batch_size_multiple, - ) - logger.info(f"Created {len(cur_batches)} batches for dataset {key}") - batches += cur_batches - - # If this dataset is used in a distributed training setup, - # then shuffle such that the order is seeded by the distributed rank - # as well - if self.distributed_rank is not None: - with data_utils.numpy_seed(self.seed, self.epoch, self.distributed_rank): - np.random.shuffle(batches) - return batches diff --git a/spaces/at2507/SM_NLP_RecoSys/Data/Mentor_interviews/Hossein Kalbasi.html b/spaces/at2507/SM_NLP_RecoSys/Data/Mentor_interviews/Hossein Kalbasi.html deleted file mode 100644 index 629f96277ec57e3493172ad4ee3fe55249176c59..0000000000000000000000000000000000000000 --- a/spaces/at2507/SM_NLP_RecoSys/Data/Mentor_interviews/Hossein Kalbasi.html +++ /dev/null @@ -1,134 +0,0 @@ - - - - Hossein Kalbasi - - - - -
      -

      Hossein Kalbasi

      - -
      -

      Application

      I am currently one of the top Data Science mentors at GreatLearning. I enjoy helping others and being part of the long-term career growth of mentees. 

      Interview

      • 5-6 years exp in SE and DS
      • BS in Industrial engineering / manufacturing
      • MS: AI in manufacutring
      • worked at several different startups
      • Content intelligence / recommendatonn systems

      How did you hear about SM?
      • Saw you on LinkedIn (Reza)
      • Talked on the phone with him once. Saw a shout-out on LinkedIn for Reza and talked to him about it.
      • I can connect with and help more people
      • ISA is interesting
      • Find people who are interested enough

      Mentorship experience?
      • Helps my friends out and peers finding jobs
      • held python workshops
      • mentoring at great learning - half lecture, half programming
        • helping ppl throughout their jobs

      What are beginners lacking?
      • Don't know what industry is actually like. Don't know what to learn/expect
        • Sometimes, overestimate (need to learn everything)
        • Sometime, underestimate 
      • Sometimes missing programming skills, or data science skills

      And how can you help?
      • Find out what their passion is. Why do you want this job? How o you see you your career
      • Be picky with the details'.
      • I am very self taught and I know how to learn things on my own. Can direct folks in this direction
      • Use my teaching experience to help them when they are stuck. Don't do it for them. Give them tips and tricks
      • Then help them with their CV, networking tactics, on LI etc.
      -
      -


      Questions about SM?
      • History of SM?
      • What's the day-to-day of a mentor?
      -
      - -
      - - - \ No newline at end of file diff --git a/spaces/augmentedimaginationhackathon/paperstocode/frontend/src/app/app.module.ts b/spaces/augmentedimaginationhackathon/paperstocode/frontend/src/app/app.module.ts deleted file mode 100644 index 389ad33f96a6ff2bdbf5b69c314707f11fd3b9f9..0000000000000000000000000000000000000000 --- a/spaces/augmentedimaginationhackathon/paperstocode/frontend/src/app/app.module.ts +++ /dev/null @@ -1,20 +0,0 @@ -import { NgModule } from '@angular/core'; -import { BrowserModule } from '@angular/platform-browser'; - -import { AppRoutingModule } from './app-routing.module'; -import { AppComponent } from './app.component'; -import { UploaderComponent } from './uploader/uploader.component'; - -@NgModule({ - declarations: [ - AppComponent, - UploaderComponent - ], - imports: [ - BrowserModule, - AppRoutingModule - ], - providers: [], - bootstrap: [AppComponent] -}) -export class AppModule { } diff --git a/spaces/aurora10/GPT4ALL_CHATBOT/README.md b/spaces/aurora10/GPT4ALL_CHATBOT/README.md deleted file mode 100644 index 3f056cf96093e6102dadfec7ee05eb8d853b8d35..0000000000000000000000000000000000000000 --- a/spaces/aurora10/GPT4ALL_CHATBOT/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: GPT4ALL CHATBOT -emoji: 📚 -colorFrom: pink -colorTo: green -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false -duplicated_from: aurora10/GPT4ALL_CHATBOT ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/autosummproject/autosumm/utils/__init__.py b/spaces/autosummproject/autosumm/utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/awaawawawa/iurf7irfuyytruyyugb/static/style.css b/spaces/awaawawawa/iurf7irfuyytruyyugb/static/style.css deleted file mode 100644 index 6a3c98f8fab848caaaf7b844b24ce23c8c5c8dde..0000000000000000000000000000000000000000 --- a/spaces/awaawawawa/iurf7irfuyytruyyugb/static/style.css +++ /dev/null @@ -1,79 +0,0 @@ -body { - --text: hsl(0 0% 15%); - padding: 2.5rem; - font-family: sans-serif; - color: var(--text); -} -body.dark-theme { - --text: hsl(0 0% 90%); - background-color: hsl(223 39% 7%); -} - -main { - max-width: 80rem; - text-align: center; -} - -section { - display: flex; - flex-direction: column; - align-items: center; -} - -a { - color: var(--text); -} - -select, input, button, .text-gen-output { - padding: 0.5rem 1rem; -} - -select, img, input { - margin: 0.5rem auto 1rem; -} - -form { - width: 25rem; - margin: 0 auto; -} - -input { - width: 70%; -} - -button { - cursor: pointer; -} - -.text-gen-output { - min-height: 1.2rem; - margin: 1rem; - border: 0.5px solid grey; -} - -#dataset button { - width: 6rem; - margin: 0.5rem; -} - -#dataset button.hidden { - visibility: hidden; -} - -table { - max-width: 40rem; - text-align: left; - border-collapse: collapse; -} - -thead { - font-weight: bold; -} - -td { - padding: 0.5rem; -} - -td:not(thead td) { - border: 0.5px solid grey; -} diff --git a/spaces/awacke1/runwayml-stable-diffusion-v1-5/app.py b/spaces/awacke1/runwayml-stable-diffusion-v1-5/app.py deleted file mode 100644 index a82df332731f067826d3e1ef79fabceffb74d07e..0000000000000000000000000000000000000000 --- a/spaces/awacke1/runwayml-stable-diffusion-v1-5/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/runwayml/stable-diffusion-v1-5").launch() \ No newline at end of file diff --git a/spaces/badayvedat/LLaVA/docs/LLaVA_from_LLaMA2.md b/spaces/badayvedat/LLaVA/docs/LLaVA_from_LLaMA2.md deleted file mode 100644 index b4163668a33ff705c28f5b103b727514161e5652..0000000000000000000000000000000000000000 --- a/spaces/badayvedat/LLaVA/docs/LLaVA_from_LLaMA2.md +++ /dev/null @@ -1,29 +0,0 @@ -# LLaVA (based on Llama 2 LLM, Preview) - -*NOTE: This is a technical preview. We are still running hyperparameter search, and will release the final model soon. If you'd like to contribute to this, please contact us.* - -:llama: **-Introduction-** [Llama 2 is an open-source LLM released by Meta AI](https://about.fb.com/news/2023/07/llama-2/) today (July 18, 2023). Compared with its early version [Llama 1](https://ai.meta.com/blog/large-language-model-llama-meta-ai/), Llama 2 is more favored in ***stronger language performance***, ***longer context window***, and importantly ***commercially usable***! While Llama 2 is changing the LLM market landscape in the language space, its multimodal ability remains unknown. We quickly develop the LLaVA variant based on the latest Llama 2 checkpoints, and release it to the community for the public use. - -You need to apply for and download the lastest Llama 2 checkpoints to start your own training (apply [here](https://ai.meta.com/resources/models-and-libraries/llama-downloads/)) - - -## Training - -Please checkout [`pretrain.sh`](https://github.com/haotian-liu/LLaVA/blob/main/scripts/pretrain.sh), [`finetune.sh`](https://github.com/haotian-liu/LLaVA/blob/main/scripts/finetune.sh), [`finetune_lora.sh`](https://github.com/haotian-liu/LLaVA/blob/main/scripts/finetune_lora.sh). - -## LLaVA (based on Llama 2), What is different? - -:volcano: How is the new LLaVA based on Llama 2 different from Llama 1? The comparisons of the training process are described: -- **Pre-training**. The pre-trained base LLM is changed from Llama 1 to Llama 2 -- **Language instruction-tuning**. The previous LLaVA model starts with Vicuna, which is instruct tuned on ShareGPT data from Llama 1; The new LLaVA model starts with Llama 2 Chat, which is an instruct tuned checkpoint on dialogue data from Llama 2. -- **Multimodal instruction-tuning**. The same LLaVA-Lighting process is applied. - - -### Results - -- Llama 2 is better at following the instructions of role playing; Llama 2 fails in following the instructions of translation -- The quantitative evaluation on [LLaVA-Bench](https://github.com/haotian-liu/LLaVA/blob/main/docs/LLaVA_Bench.md) demonstrates on-par performance between Llama 2 and Llama 1 in LLaVA's multimodal chat ability. - - - - diff --git a/spaces/banana-projects/web3d/node_modules/@tweenjs/tween.js/README.md b/spaces/banana-projects/web3d/node_modules/@tweenjs/tween.js/README.md deleted file mode 100644 index 5e8c8df4fd2c29e39a7343dcb3661b1e174e721c..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/@tweenjs/tween.js/README.md +++ /dev/null @@ -1,295 +0,0 @@ -# tween.js - -JavaScript tweening engine for easy animations, incorporating optimised Robert Penner's equations. - -[![NPM Version][npm-image]][npm-url] -[![NPM Downloads][downloads-image]][downloads-url] -[![Travis tests][travis-image]][travis-url] -[![Flattr this][flattr-image]][flattr-url] -[![CDNJS][cdnjs-image]][cdnjs-url] - -```javascript -var box = document.createElement('div'); -box.style.setProperty('background-color', '#008800'); -box.style.setProperty('width', '100px'); -box.style.setProperty('height', '100px'); -document.body.appendChild(box); - -// Setup the animation loop. -function animate(time) { - requestAnimationFrame(animate); - TWEEN.update(time); -} -requestAnimationFrame(animate); - -var coords = { x: 0, y: 0 }; // Start at (0, 0) -var tween = new TWEEN.Tween(coords) // Create a new tween that modifies 'coords'. - .to({ x: 300, y: 200 }, 1000) // Move to (300, 200) in 1 second. - .easing(TWEEN.Easing.Quadratic.Out) // Use an easing function to make the animation smooth. - .onUpdate(function() { // Called after tween.js updates 'coords'. - // Move 'box' to the position described by 'coords' with a CSS translation. - box.style.setProperty('transform', 'translate(' + coords.x + 'px, ' + coords.y + 'px)'); - }) - .start(); // Start the tween immediately. -``` - -[Test it with CodePen](https://codepen.io/mikebolt/pen/zzzvZg) - -## Installation - -Download the [library](https://raw.githubusercontent.com/tweenjs/tween.js/master/src/Tween.js) and include it in your code: - -```html - -``` - -You can also reference a CDN-hosted version in your code, thanks to cdnjs. For example: - -```html - -``` - -See [tween.js](https://cdnjs.com/libraries/tween.js/) for more versions. - -### More advanced users might want to... - -#### Use `npm` - -```bash -npm install @tweenjs/tween.js -``` - -Then include the Tween.js module with the standard node.js `require`: - -```javascript -var TWEEN = require('@tweenjs/tween.js'); -``` - -And you can use Tween.js as in all other examples--for example: - -```javascript -var t = new TWEEN.Tween( /* etc */ ); -t.start(); -``` - -You will need to use a tool such as `browserify` to convert code using this style into something that can be run in the browser (browsers don't know about `require`). - -#### Use `bower` - -```bash -bower install @tweenjs/tweenjs --save -``` - -or install an specific tag. They are git tags, and you can run `git tag` in the command line for a list if you have cloned the repository locally, or you can also check out the list in the [tween.js tags page](https://github.com/tweenjs/tween.js/tags). For example, to install `v16.3.0`: - -```bash -bower install @tweenjs/tweenjs#v16.3.0 -``` - -Then reference the library source: - -```html - -``` - -## Features - -* Does one thing and one thing only: tween properties -* Doesn't take care of CSS units (e.g. appending `px`) -* Doesn't interpolate colours -* Easing functions are reusable outside of Tween -* Can also use custom easing functions - -## Documentation - -* [User guide](./docs/user_guide.md) -* [Contributor guide](./docs/contributor_guide.md) -* [Tutorial](http://learningthreejs.com/blog/2011/08/17/tweenjs-for-smooth-animation/) using tween.js with three.js -* Also: [libtween](https://github.com/jsm174/libtween), a port of tween.js to C by [jsm174](https://github.com/jsm174) -* Also: [es6-tween](https://github.com/tweenjs/es6-tween), a port of tween.js to ES6/Harmony by [dalisoft](https://github.com/dalisoft) -* [Understanding tween.js](https://mikebolt.me/article/understanding-tweenjs.html) - -## Examples - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      - - Custom functions - - - Custom functions
      - (source) -
      - - Stop all chained tweens - - - Stop all chained tweens
      - (source) -
      - - Yoyo - - - Yoyo
      - (source) -
      - - Relative values - - - Relative values
      - (source) -
      - - Repeat - - - Repeat
      - (source) -
      - - Dynamic to - - - Dynamic to
      - (source) -
      - - Array interpolation - - - Array interpolation
      - (source) -
      - - Video and time - - - Video and time
      - (source) -
      - - Simplest possible example - - - Simplest possible example
      - (source) -
      - - Graphs - - - Graphs
      - (source) -
      - - Black and red - - - Black and red
      - (source) -
      - - Bars - - - Bars
      - (source) -
      - - hello world - - - hello world
      - (source) -
      - -## Tests - -You need to install `npm` first--this comes with node.js, so install that one first. Then, cd to `tween.js`'s directory and run: - -```bash -npm install -``` - -if running the tests for the first time, to install additional dependencies for running tests, and then run - -```bash -npm test -``` - -every time you want to run the tests. - -If you want to add any feature or change existing features, you *must* run the tests to make sure you didn't break anything else. If you send a pull request (PR) to add something new and it doesn't have tests, or the tests don't pass, the PR won't be accepted. See [contributing](CONTRIBUTING.md) for more information. - -## People - -Maintainers: [mikebolt](https://github.com/mikebolt), [sole](https://github.com/sole). - -[All contributors](http://github.com/tweenjs/tween.js/contributors). - -## Projects using tween.js - -[![A-Frame VR](http://tweenjs.github.io/tween.js/assets/projects/10_aframe.png)](https://aframe.io) -[![MOMA Inventing Abstraction 1910-1925](http://tweenjs.github.io/tween.js/assets/projects/09_moma.png)](http://www.moma.org/interactives/exhibitions/2012/inventingabstraction/) -[![Web Lab](http://tweenjs.github.io/tween.js/assets/projects/08_web_lab.png)](http://www.chromeweblab.com/) -[![MACCHINA I](http://tweenjs.github.io/tween.js/assets/projects/07_macchina.png)](http://5013.es/toys/macchina) -[![Minesweeper 3D](http://tweenjs.github.io/tween.js/assets/projects/06_minesweeper3d.png)](http://egraether.com/mine3d/) -[![ROME](http://tweenjs.github.io/tween.js/assets/projects/05_rome.png)](http://ro.me) -[![WebGL Globe](http://tweenjs.github.io/tween.js/assets/projects/04_webgl_globe.png)](http://data-arts.appspot.com/globe) -[![Androidify](http://tweenjs.github.io/tween.js/assets/projects/03_androidify.png)](http://www.androidify.com/) -[![The Wilderness Downtown](http://tweenjs.github.io/tween.js/assets/projects/01_wilderness.png)](http://thewildernessdowntown.com/) -[![Linechart](http://tweenjs.github.io/tween.js/assets/projects/00_linechart.png)](http://dejavis.org/linechart) - -[npm-image]: https://img.shields.io/npm/v/@tweenjs/tween.js.svg -[npm-url]: https://npmjs.org/package/@tweenjs/tween.js -[downloads-image]: https://img.shields.io/npm/dm/@tweenjs/tween.js.svg -[downloads-url]: https://npmjs.org/package/@tweenjs/tween.js -[travis-image]: https://travis-ci.org/tweenjs/tween.js.svg?branch=master -[travis-url]: https://travis-ci.org/tweenjs/tween.js -[flattr-image]: https://api.flattr.com/button/flattr-badge-large.png -[flattr-url]: https://flattr.com/thing/45014/tween-js -[cdnjs-image]: https://img.shields.io/cdnjs/v/tween.js.svg -[cdnjs-url]: https://cdnjs.com/libraries/tween.js - diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/loaders/TDSLoader.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/loaders/TDSLoader.js deleted file mode 100644 index 3ea517af122737c1b0959f7348b5855b7a0d89a9..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/examples/js/loaders/TDSLoader.js +++ /dev/null @@ -1,1127 +0,0 @@ -/* - * Autodesk 3DS three.js file loader, based on lib3ds. - * - * Loads geometry with uv and materials basic properties with texture support. - * - * @author @tentone - * @author @timknip - * @class TDSLoader - * @constructor - */ - -'use strict'; - -THREE.TDSLoader = function ( manager ) { - - this.manager = ( manager !== undefined ) ? manager : THREE.DefaultLoadingManager; - this.debug = false; - - this.group = null; - this.position = 0; - - this.materials = []; - this.meshes = []; - -}; - -THREE.TDSLoader.prototype = { - - constructor: THREE.TDSLoader, - - crossOrigin: 'anonymous', - - /** - * Load 3ds file from url. - * - * @method load - * @param {[type]} url URL for the file. - * @param {Function} onLoad onLoad callback, receives group Object3D as argument. - * @param {Function} onProgress onProgress callback. - * @param {Function} onError onError callback. - */ - load: function ( url, onLoad, onProgress, onError ) { - - var scope = this; - - var path = this.path !== undefined ? this.path : THREE.LoaderUtils.extractUrlBase( url ); - - var loader = new THREE.FileLoader( this.manager ); - loader.setPath( this.path ); - loader.setResponseType( 'arraybuffer' ); - - loader.load( url, function ( data ) { - - onLoad( scope.parse( data, path ) ); - - }, onProgress, onError ); - - }, - - /** - * Parse arraybuffer data and load 3ds file. - * - * @method parse - * @param {ArrayBuffer} arraybuffer Arraybuffer data to be loaded. - * @param {String} path Path for external resources. - * @return {Object3D} Group loaded from 3ds file. - */ - parse: function ( arraybuffer, path ) { - - this.group = new THREE.Group(); - this.position = 0; - this.materials = []; - this.meshes = []; - - this.readFile( arraybuffer, path ); - - for ( var i = 0; i < this.meshes.length; i ++ ) { - - this.group.add( this.meshes[ i ] ); - - } - - return this.group; - - }, - - /** - * Decode file content to read 3ds data. - * - * @method readFile - * @param {ArrayBuffer} arraybuffer Arraybuffer data to be loaded. - */ - readFile: function ( arraybuffer, path ) { - - var data = new DataView( arraybuffer ); - var chunk = this.readChunk( data ); - - if ( chunk.id === MLIBMAGIC || chunk.id === CMAGIC || chunk.id === M3DMAGIC ) { - - var next = this.nextChunk( data, chunk ); - - while ( next !== 0 ) { - - if ( next === M3D_VERSION ) { - - var version = this.readDWord( data ); - this.debugMessage( '3DS file version: ' + version ); - - } else if ( next === MDATA ) { - - this.resetPosition( data ); - this.readMeshData( data, path ); - - } else { - - this.debugMessage( 'Unknown main chunk: ' + next.toString( 16 ) ); - - } - - next = this.nextChunk( data, chunk ); - - } - - } - - this.debugMessage( 'Parsed ' + this.meshes.length + ' meshes' ); - - }, - - /** - * Read mesh data chunk. - * - * @method readMeshData - * @param {Dataview} data Dataview in use. - */ - readMeshData: function ( data, path ) { - - var chunk = this.readChunk( data ); - var next = this.nextChunk( data, chunk ); - - while ( next !== 0 ) { - - if ( next === MESH_VERSION ) { - - var version = + this.readDWord( data ); - this.debugMessage( 'Mesh Version: ' + version ); - - } else if ( next === MASTER_SCALE ) { - - var scale = this.readFloat( data ); - this.debugMessage( 'Master scale: ' + scale ); - this.group.scale.set( scale, scale, scale ); - - } else if ( next === NAMED_OBJECT ) { - - this.debugMessage( 'Named Object' ); - this.resetPosition( data ); - this.readNamedObject( data ); - - } else if ( next === MAT_ENTRY ) { - - this.debugMessage( 'Material' ); - this.resetPosition( data ); - this.readMaterialEntry( data, path ); - - } else { - - this.debugMessage( 'Unknown MDATA chunk: ' + next.toString( 16 ) ); - - } - - next = this.nextChunk( data, chunk ); - - } - - }, - - /** - * Read named object chunk. - * - * @method readNamedObject - * @param {Dataview} data Dataview in use. - */ - readNamedObject: function ( data ) { - - var chunk = this.readChunk( data ); - var name = this.readString( data, 64 ); - chunk.cur = this.position; - - var next = this.nextChunk( data, chunk ); - while ( next !== 0 ) { - - if ( next === N_TRI_OBJECT ) { - - this.resetPosition( data ); - var mesh = this.readMesh( data ); - mesh.name = name; - this.meshes.push( mesh ); - - } else { - - this.debugMessage( 'Unknown named object chunk: ' + next.toString( 16 ) ); - - } - - next = this.nextChunk( data, chunk ); - - } - - this.endChunk( chunk ); - - }, - - /** - * Read material data chunk and add it to the material list. - * - * @method readMaterialEntry - * @param {Dataview} data Dataview in use. - */ - readMaterialEntry: function ( data, path ) { - - var chunk = this.readChunk( data ); - var next = this.nextChunk( data, chunk ); - var material = new THREE.MeshPhongMaterial(); - - while ( next !== 0 ) { - - if ( next === MAT_NAME ) { - - material.name = this.readString( data, 64 ); - this.debugMessage( ' Name: ' + material.name ); - - } else if ( next === MAT_WIRE ) { - - this.debugMessage( ' Wireframe' ); - material.wireframe = true; - - } else if ( next === MAT_WIRE_SIZE ) { - - var value = this.readByte( data ); - material.wireframeLinewidth = value; - this.debugMessage( ' Wireframe Thickness: ' + value ); - - } else if ( next === MAT_TWO_SIDE ) { - - material.side = THREE.DoubleSide; - this.debugMessage( ' DoubleSided' ); - - } else if ( next === MAT_ADDITIVE ) { - - this.debugMessage( ' Additive Blending' ); - material.blending = THREE.AdditiveBlending; - - } else if ( next === MAT_DIFFUSE ) { - - this.debugMessage( ' Diffuse Color' ); - material.color = this.readColor( data ); - - } else if ( next === MAT_SPECULAR ) { - - this.debugMessage( ' Specular Color' ); - material.specular = this.readColor( data ); - - } else if ( next === MAT_AMBIENT ) { - - this.debugMessage( ' Ambient color' ); - material.color = this.readColor( data ); - - } else if ( next === MAT_SHININESS ) { - - var shininess = this.readWord( data ); - material.shininess = shininess; - this.debugMessage( ' Shininess : ' + shininess ); - - } else if ( next === MAT_TEXMAP ) { - - this.debugMessage( ' ColorMap' ); - this.resetPosition( data ); - material.map = this.readMap( data, path ); - - } else if ( next === MAT_BUMPMAP ) { - - this.debugMessage( ' BumpMap' ); - this.resetPosition( data ); - material.bumpMap = this.readMap( data, path ); - - } else if ( next === MAT_OPACMAP ) { - - this.debugMessage( ' OpacityMap' ); - this.resetPosition( data ); - material.alphaMap = this.readMap( data, path ); - - } else if ( next === MAT_SPECMAP ) { - - this.debugMessage( ' SpecularMap' ); - this.resetPosition( data ); - material.specularMap = this.readMap( data, path ); - - } else { - - this.debugMessage( ' Unknown material chunk: ' + next.toString( 16 ) ); - - } - - next = this.nextChunk( data, chunk ); - - } - - this.endChunk( chunk ); - - this.materials[ material.name ] = material; - - }, - - /** - * Read mesh data chunk. - * - * @method readMesh - * @param {Dataview} data Dataview in use. - */ - readMesh: function ( data ) { - - var chunk = this.readChunk( data ); - var next = this.nextChunk( data, chunk ); - - var geometry = new THREE.BufferGeometry(); - var uvs = []; - - var material = new THREE.MeshPhongMaterial(); - var mesh = new THREE.Mesh( geometry, material ); - mesh.name = 'mesh'; - - while ( next !== 0 ) { - - if ( next === POINT_ARRAY ) { - - var points = this.readWord( data ); - - this.debugMessage( ' Vertex: ' + points ); - - //BufferGeometry - - var vertices = []; - - for ( var i = 0; i < points; i ++ ) { - - vertices.push( this.readFloat( data ) ); - vertices.push( this.readFloat( data ) ); - vertices.push( this.readFloat( data ) ); - - } - - geometry.addAttribute( 'position', new THREE.Float32BufferAttribute( vertices, 3 ) ); - - } else if ( next === FACE_ARRAY ) { - - this.resetPosition( data ); - this.readFaceArray( data, mesh ); - - } else if ( next === TEX_VERTS ) { - - var texels = this.readWord( data ); - - this.debugMessage( ' UV: ' + texels ); - - //BufferGeometry - - var uvs = []; - - for ( var i = 0; i < texels; i ++ ) { - - uvs.push( this.readFloat( data ) ); - uvs.push( this.readFloat( data ) ); - - } - - geometry.addAttribute( 'uv', new THREE.Float32BufferAttribute( uvs, 2 ) ); - - - } else if ( next === MESH_MATRIX ) { - - this.debugMessage( ' Tranformation Matrix (TODO)' ); - - var values = []; - for ( var i = 0; i < 12; i ++ ) { - - values[ i ] = this.readFloat( data ); - - } - - var matrix = new THREE.Matrix4(); - - //X Line - matrix.elements[ 0 ] = values[ 0 ]; - matrix.elements[ 1 ] = values[ 6 ]; - matrix.elements[ 2 ] = values[ 3 ]; - matrix.elements[ 3 ] = values[ 9 ]; - - //Y Line - matrix.elements[ 4 ] = values[ 2 ]; - matrix.elements[ 5 ] = values[ 8 ]; - matrix.elements[ 6 ] = values[ 5 ]; - matrix.elements[ 7 ] = values[ 11 ]; - - //Z Line - matrix.elements[ 8 ] = values[ 1 ]; - matrix.elements[ 9 ] = values[ 7 ]; - matrix.elements[ 10 ] = values[ 4 ]; - matrix.elements[ 11 ] = values[ 10 ]; - - //W Line - matrix.elements[ 12 ] = 0; - matrix.elements[ 13 ] = 0; - matrix.elements[ 14 ] = 0; - matrix.elements[ 15 ] = 1; - - matrix.transpose(); - - var inverse = new THREE.Matrix4(); - inverse.getInverse( matrix, true ); - geometry.applyMatrix( inverse ); - - matrix.decompose( mesh.position, mesh.quaternion, mesh.scale ); - - } else { - - this.debugMessage( ' Unknown mesh chunk: ' + next.toString( 16 ) ); - - } - - next = this.nextChunk( data, chunk ); - - } - - this.endChunk( chunk ); - - geometry.computeVertexNormals(); - - return mesh; - - }, - - /** - * Read face array data chunk. - * - * @method readFaceArray - * @param {Dataview} data Dataview in use. - * @param {Mesh} mesh Mesh to be filled with the data read. - */ - readFaceArray: function ( data, mesh ) { - - var chunk = this.readChunk( data ); - var faces = this.readWord( data ); - - this.debugMessage( ' Faces: ' + faces ); - - var index = []; - - for ( var i = 0; i < faces; ++ i ) { - - index.push( this.readWord( data ), this.readWord( data ), this.readWord( data ) ); - - var visibility = this.readWord( data ); - - } - - mesh.geometry.setIndex( index ); - - //The rest of the FACE_ARRAY chunk is subchunks - - while ( this.position < chunk.end ) { - - var chunk = this.readChunk( data ); - - if ( chunk.id === MSH_MAT_GROUP ) { - - this.debugMessage( ' Material Group' ); - - this.resetPosition( data ); - - var group = this.readMaterialGroup( data ); - - var material = this.materials[ group.name ]; - - if ( material !== undefined ) { - - mesh.material = material; - - if ( material.name === '' ) { - - material.name = mesh.name; - - } - - } - - } else { - - this.debugMessage( ' Unknown face array chunk: ' + chunk.toString( 16 ) ); - - } - - this.endChunk( chunk ); - - } - - this.endChunk( chunk ); - - }, - - /** - * Read texture map data chunk. - * - * @method readMap - * @param {Dataview} data Dataview in use. - * @return {Texture} Texture read from this data chunk. - */ - readMap: function ( data, path ) { - - var chunk = this.readChunk( data ); - var next = this.nextChunk( data, chunk ); - var texture = {}; - - var loader = new THREE.TextureLoader( this.manager ); - loader.setPath( this.resourcePath || path ).setCrossOrigin( this.crossOrigin ); - - while ( next !== 0 ) { - - if ( next === MAT_MAPNAME ) { - - var name = this.readString( data, 128 ); - texture = loader.load( name ); - - this.debugMessage( ' File: ' + path + name ); - - } else if ( next === MAT_MAP_UOFFSET ) { - - texture.offset.x = this.readFloat( data ); - this.debugMessage( ' OffsetX: ' + texture.offset.x ); - - } else if ( next === MAT_MAP_VOFFSET ) { - - texture.offset.y = this.readFloat( data ); - this.debugMessage( ' OffsetY: ' + texture.offset.y ); - - } else if ( next === MAT_MAP_USCALE ) { - - texture.repeat.x = this.readFloat( data ); - this.debugMessage( ' RepeatX: ' + texture.repeat.x ); - - } else if ( next === MAT_MAP_VSCALE ) { - - texture.repeat.y = this.readFloat( data ); - this.debugMessage( ' RepeatY: ' + texture.repeat.y ); - - } else { - - this.debugMessage( ' Unknown map chunk: ' + next.toString( 16 ) ); - - } - - next = this.nextChunk( data, chunk ); - - } - - this.endChunk( chunk ); - - return texture; - - }, - - /** - * Read material group data chunk. - * - * @method readMaterialGroup - * @param {Dataview} data Dataview in use. - * @return {Object} Object with name and index of the object. - */ - readMaterialGroup: function ( data ) { - - var chunk = this.readChunk( data ); - var name = this.readString( data, 64 ); - var numFaces = this.readWord( data ); - - this.debugMessage( ' Name: ' + name ); - this.debugMessage( ' Faces: ' + numFaces ); - - var index = []; - for ( var i = 0; i < numFaces; ++ i ) { - - index.push( this.readWord( data ) ); - - } - - return { name: name, index: index }; - - }, - - /** - * Read a color value. - * - * @method readColor - * @param {DataView} data Dataview. - * @return {Color} Color value read.. - */ - readColor: function ( data ) { - - var chunk = this.readChunk( data ); - var color = new THREE.Color(); - - if ( chunk.id === COLOR_24 || chunk.id === LIN_COLOR_24 ) { - - var r = this.readByte( data ); - var g = this.readByte( data ); - var b = this.readByte( data ); - - color.setRGB( r / 255, g / 255, b / 255 ); - - this.debugMessage( ' Color: ' + color.r + ', ' + color.g + ', ' + color.b ); - - } else if ( chunk.id === COLOR_F || chunk.id === LIN_COLOR_F ) { - - var r = this.readFloat( data ); - var g = this.readFloat( data ); - var b = this.readFloat( data ); - - color.setRGB( r, g, b ); - - this.debugMessage( ' Color: ' + color.r + ', ' + color.g + ', ' + color.b ); - - } else { - - this.debugMessage( ' Unknown color chunk: ' + chunk.toString( 16 ) ); - - } - - this.endChunk( chunk ); - return color; - - }, - - /** - * Read next chunk of data. - * - * @method readChunk - * @param {DataView} data Dataview. - * @return {Object} Chunk of data read. - */ - readChunk: function ( data ) { - - var chunk = {}; - - chunk.cur = this.position; - chunk.id = this.readWord( data ); - chunk.size = this.readDWord( data ); - chunk.end = chunk.cur + chunk.size; - chunk.cur += 6; - - return chunk; - - }, - - /** - * Set position to the end of the current chunk of data. - * - * @method endChunk - * @param {Object} chunk Data chunk. - */ - endChunk: function ( chunk ) { - - this.position = chunk.end; - - }, - - /** - * Move to the next data chunk. - * - * @method nextChunk - * @param {DataView} data Dataview. - * @param {Object} chunk Data chunk. - */ - nextChunk: function ( data, chunk ) { - - if ( chunk.cur >= chunk.end ) { - - return 0; - - } - - this.position = chunk.cur; - - try { - - var next = this.readChunk( data ); - chunk.cur += next.size; - return next.id; - - } catch ( e ) { - - this.debugMessage( 'Unable to read chunk at ' + this.position ); - return 0; - - } - - }, - - /** - * Reset dataview position. - * - * @method resetPosition - * @param {DataView} data Dataview. - */ - resetPosition: function () { - - this.position -= 6; - - }, - - /** - * Read byte value. - * - * @method readByte - * @param {DataView} data Dataview to read data from. - * @return {Number} Data read from the dataview. - */ - readByte: function ( data ) { - - var v = data.getUint8( this.position, true ); - this.position += 1; - return v; - - }, - - /** - * Read 32 bit float value. - * - * @method readFloat - * @param {DataView} data Dataview to read data from. - * @return {Number} Data read from the dataview. - */ - readFloat: function ( data ) { - - try { - - var v = data.getFloat32( this.position, true ); - this.position += 4; - return v; - - } catch ( e ) { - - this.debugMessage( e + ' ' + this.position + ' ' + data.byteLength ); - - } - - }, - - /** - * Read 32 bit signed integer value. - * - * @method readInt - * @param {DataView} data Dataview to read data from. - * @return {Number} Data read from the dataview. - */ - readInt: function ( data ) { - - var v = data.getInt32( this.position, true ); - this.position += 4; - return v; - - }, - - /** - * Read 16 bit signed integer value. - * - * @method readShort - * @param {DataView} data Dataview to read data from. - * @return {Number} Data read from the dataview. - */ - readShort: function ( data ) { - - var v = data.getInt16( this.position, true ); - this.position += 2; - return v; - - }, - - /** - * Read 64 bit unsigned integer value. - * - * @method readDWord - * @param {DataView} data Dataview to read data from. - * @return {Number} Data read from the dataview. - */ - readDWord: function ( data ) { - - var v = data.getUint32( this.position, true ); - this.position += 4; - return v; - - }, - - /** - * Read 32 bit unsigned integer value. - * - * @method readWord - * @param {DataView} data Dataview to read data from. - * @return {Number} Data read from the dataview. - */ - readWord: function ( data ) { - - var v = data.getUint16( this.position, true ); - this.position += 2; - return v; - - }, - - /** - * Read string value. - * - * @method readString - * @param {DataView} data Dataview to read data from. - * @param {Number} maxLength Max size of the string to be read. - * @return {String} Data read from the dataview. - */ - readString: function ( data, maxLength ) { - - var s = ''; - - for ( var i = 0; i < maxLength; i ++ ) { - - var c = this.readByte( data ); - if ( ! c ) { - - break; - - } - - s += String.fromCharCode( c ); - - } - - return s; - - }, - - /** - * Set path to adjust the path to the original 3ds file. - * - * @method setPath - * @param {String} path Path to file. - * @return Self for chaining. - */ - setPath: function ( path ) { - - this.path = path; - - return this; - - }, - - /** - * Set resource path used to determine the path to attached resources like textures. - * - * @method setResourcePath - * @param {String} resourcePath Path to resources. - * @return Self for chaining. - */ - setResourcePath: function ( resourcePath ) { - - this.resourcePath = resourcePath; - - return this; - - }, - - /** - * Set crossOrigin value to configure CORS settings - * for the image loading process. - * - * @method setCrossOrigin - * @param {String} crossOrigin crossOrigin string. - * @return Self for chaining. - */ - setCrossOrigin: function ( crossOrigin ) { - - this.crossOrigin = crossOrigin; - - return this; - - }, - - /** - * Print debug message to the console. - * - * Is controlled by a flag to show or hide debug messages. - * - * @method debugMessage - * @param {Object} message Debug message to print to the console. - */ - debugMessage: function ( message ) { - - if ( this.debug ) { - - console.log( message ); - - } - - } -}; - -var NULL_CHUNK = 0x0000; -var M3DMAGIC = 0x4D4D; -var SMAGIC = 0x2D2D; -var LMAGIC = 0x2D3D; -var MLIBMAGIC = 0x3DAA; -var MATMAGIC = 0x3DFF; -var CMAGIC = 0xC23D; -var M3D_VERSION = 0x0002; -var M3D_KFVERSION = 0x0005; -var COLOR_F = 0x0010; -var COLOR_24 = 0x0011; -var LIN_COLOR_24 = 0x0012; -var LIN_COLOR_F = 0x0013; -var INT_PERCENTAGE = 0x0030; -var FLOAT_PERCENTAGE = 0x0031; -var MDATA = 0x3D3D; -var MESH_VERSION = 0x3D3E; -var MASTER_SCALE = 0x0100; -var LO_SHADOW_BIAS = 0x1400; -var HI_SHADOW_BIAS = 0x1410; -var SHADOW_MAP_SIZE = 0x1420; -var SHADOW_SAMPLES = 0x1430; -var SHADOW_RANGE = 0x1440; -var SHADOW_FILTER = 0x1450; -var RAY_BIAS = 0x1460; -var O_CONSTS = 0x1500; -var AMBIENT_LIGHT = 0x2100; -var BIT_MAP = 0x1100; -var SOLID_BGND = 0x1200; -var V_GRADIENT = 0x1300; -var USE_BIT_MAP = 0x1101; -var USE_SOLID_BGND = 0x1201; -var USE_V_GRADIENT = 0x1301; -var FOG = 0x2200; -var FOG_BGND = 0x2210; -var LAYER_FOG = 0x2302; -var DISTANCE_CUE = 0x2300; -var DCUE_BGND = 0x2310; -var USE_FOG = 0x2201; -var USE_LAYER_FOG = 0x2303; -var USE_DISTANCE_CUE = 0x2301; -var MAT_ENTRY = 0xAFFF; -var MAT_NAME = 0xA000; -var MAT_AMBIENT = 0xA010; -var MAT_DIFFUSE = 0xA020; -var MAT_SPECULAR = 0xA030; -var MAT_SHININESS = 0xA040; -var MAT_SHIN2PCT = 0xA041; -var MAT_TRANSPARENCY = 0xA050; -var MAT_XPFALL = 0xA052; -var MAT_USE_XPFALL = 0xA240; -var MAT_REFBLUR = 0xA053; -var MAT_SHADING = 0xA100; -var MAT_USE_REFBLUR = 0xA250; -var MAT_SELF_ILLUM = 0xA084; -var MAT_TWO_SIDE = 0xA081; -var MAT_DECAL = 0xA082; -var MAT_ADDITIVE = 0xA083; -var MAT_WIRE = 0xA085; -var MAT_FACEMAP = 0xA088; -var MAT_TRANSFALLOFF_IN = 0xA08A; -var MAT_PHONGSOFT = 0xA08C; -var MAT_WIREABS = 0xA08E; -var MAT_WIRE_SIZE = 0xA087; -var MAT_TEXMAP = 0xA200; -var MAT_SXP_TEXT_DATA = 0xA320; -var MAT_TEXMASK = 0xA33E; -var MAT_SXP_TEXTMASK_DATA = 0xA32A; -var MAT_TEX2MAP = 0xA33A; -var MAT_SXP_TEXT2_DATA = 0xA321; -var MAT_TEX2MASK = 0xA340; -var MAT_SXP_TEXT2MASK_DATA = 0xA32C; -var MAT_OPACMAP = 0xA210; -var MAT_SXP_OPAC_DATA = 0xA322; -var MAT_OPACMASK = 0xA342; -var MAT_SXP_OPACMASK_DATA = 0xA32E; -var MAT_BUMPMAP = 0xA230; -var MAT_SXP_BUMP_DATA = 0xA324; -var MAT_BUMPMASK = 0xA344; -var MAT_SXP_BUMPMASK_DATA = 0xA330; -var MAT_SPECMAP = 0xA204; -var MAT_SXP_SPEC_DATA = 0xA325; -var MAT_SPECMASK = 0xA348; -var MAT_SXP_SPECMASK_DATA = 0xA332; -var MAT_SHINMAP = 0xA33C; -var MAT_SXP_SHIN_DATA = 0xA326; -var MAT_SHINMASK = 0xA346; -var MAT_SXP_SHINMASK_DATA = 0xA334; -var MAT_SELFIMAP = 0xA33D; -var MAT_SXP_SELFI_DATA = 0xA328; -var MAT_SELFIMASK = 0xA34A; -var MAT_SXP_SELFIMASK_DATA = 0xA336; -var MAT_REFLMAP = 0xA220; -var MAT_REFLMASK = 0xA34C; -var MAT_SXP_REFLMASK_DATA = 0xA338; -var MAT_ACUBIC = 0xA310; -var MAT_MAPNAME = 0xA300; -var MAT_MAP_TILING = 0xA351; -var MAT_MAP_TEXBLUR = 0xA353; -var MAT_MAP_USCALE = 0xA354; -var MAT_MAP_VSCALE = 0xA356; -var MAT_MAP_UOFFSET = 0xA358; -var MAT_MAP_VOFFSET = 0xA35A; -var MAT_MAP_ANG = 0xA35C; -var MAT_MAP_COL1 = 0xA360; -var MAT_MAP_COL2 = 0xA362; -var MAT_MAP_RCOL = 0xA364; -var MAT_MAP_GCOL = 0xA366; -var MAT_MAP_BCOL = 0xA368; -var NAMED_OBJECT = 0x4000; -var N_DIRECT_LIGHT = 0x4600; -var DL_OFF = 0x4620; -var DL_OUTER_RANGE = 0x465A; -var DL_INNER_RANGE = 0x4659; -var DL_MULTIPLIER = 0x465B; -var DL_EXCLUDE = 0x4654; -var DL_ATTENUATE = 0x4625; -var DL_SPOTLIGHT = 0x4610; -var DL_SPOT_ROLL = 0x4656; -var DL_SHADOWED = 0x4630; -var DL_LOCAL_SHADOW2 = 0x4641; -var DL_SEE_CONE = 0x4650; -var DL_SPOT_RECTANGULAR = 0x4651; -var DL_SPOT_ASPECT = 0x4657; -var DL_SPOT_PROJECTOR = 0x4653; -var DL_SPOT_OVERSHOOT = 0x4652; -var DL_RAY_BIAS = 0x4658; -var DL_RAYSHAD = 0x4627; -var N_CAMERA = 0x4700; -var CAM_SEE_CONE = 0x4710; -var CAM_RANGES = 0x4720; -var OBJ_HIDDEN = 0x4010; -var OBJ_VIS_LOFTER = 0x4011; -var OBJ_DOESNT_CAST = 0x4012; -var OBJ_DONT_RECVSHADOW = 0x4017; -var OBJ_MATTE = 0x4013; -var OBJ_FAST = 0x4014; -var OBJ_PROCEDURAL = 0x4015; -var OBJ_FROZEN = 0x4016; -var N_TRI_OBJECT = 0x4100; -var POINT_ARRAY = 0x4110; -var POINT_FLAG_ARRAY = 0x4111; -var FACE_ARRAY = 0x4120; -var MSH_MAT_GROUP = 0x4130; -var SMOOTH_GROUP = 0x4150; -var MSH_BOXMAP = 0x4190; -var TEX_VERTS = 0x4140; -var MESH_MATRIX = 0x4160; -var MESH_COLOR = 0x4165; -var MESH_TEXTURE_INFO = 0x4170; -var KFDATA = 0xB000; -var KFHDR = 0xB00A; -var KFSEG = 0xB008; -var KFCURTIME = 0xB009; -var AMBIENT_NODE_TAG = 0xB001; -var OBJECT_NODE_TAG = 0xB002; -var CAMERA_NODE_TAG = 0xB003; -var TARGET_NODE_TAG = 0xB004; -var LIGHT_NODE_TAG = 0xB005; -var L_TARGET_NODE_TAG = 0xB006; -var SPOTLIGHT_NODE_TAG = 0xB007; -var NODE_ID = 0xB030; -var NODE_HDR = 0xB010; -var PIVOT = 0xB013; -var INSTANCE_NAME = 0xB011; -var MORPH_SMOOTH = 0xB015; -var BOUNDBOX = 0xB014; -var POS_TRACK_TAG = 0xB020; -var COL_TRACK_TAG = 0xB025; -var ROT_TRACK_TAG = 0xB021; -var SCL_TRACK_TAG = 0xB022; -var MORPH_TRACK_TAG = 0xB026; -var FOV_TRACK_TAG = 0xB023; -var ROLL_TRACK_TAG = 0xB024; -var HOT_TRACK_TAG = 0xB027; -var FALL_TRACK_TAG = 0xB028; -var HIDE_TRACK_TAG = 0xB029; -var POLY_2D = 0x5000; -var SHAPE_OK = 0x5010; -var SHAPE_NOT_OK = 0x5011; -var SHAPE_HOOK = 0x5020; -var PATH_3D = 0x6000; -var PATH_MATRIX = 0x6005; -var SHAPE_2D = 0x6010; -var M_SCALE = 0x6020; -var M_TWIST = 0x6030; -var M_TEETER = 0x6040; -var M_FIT = 0x6050; -var M_BEVEL = 0x6060; -var XZ_CURVE = 0x6070; -var YZ_CURVE = 0x6080; -var INTERPCT = 0x6090; -var DEFORM_LIMIT = 0x60A0; -var USE_CONTOUR = 0x6100; -var USE_TWEEN = 0x6110; -var USE_SCALE = 0x6120; -var USE_TWIST = 0x6130; -var USE_TEETER = 0x6140; -var USE_FIT = 0x6150; -var USE_BEVEL = 0x6160; -var DEFAULT_VIEW = 0x3000; -var VIEW_TOP = 0x3010; -var VIEW_BOTTOM = 0x3020; -var VIEW_LEFT = 0x3030; -var VIEW_RIGHT = 0x3040; -var VIEW_FRONT = 0x3050; -var VIEW_BACK = 0x3060; -var VIEW_USER = 0x3070; -var VIEW_CAMERA = 0x3080; -var VIEW_WINDOW = 0x3090; -var VIEWPORT_LAYOUT_OLD = 0x7000; -var VIEWPORT_DATA_OLD = 0x7010; -var VIEWPORT_LAYOUT = 0x7001; -var VIEWPORT_DATA = 0x7011; -var VIEWPORT_DATA_3 = 0x7012; -var VIEWPORT_SIZE = 0x7020; -var NETWORK_VIEW = 0x7030; diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/nodes/accessors/ScreenUVNode.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/nodes/accessors/ScreenUVNode.js deleted file mode 100644 index a2aac5f0108f8dce48daff00ad4f2b1c540b5796..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/examples/js/nodes/accessors/ScreenUVNode.js +++ /dev/null @@ -1,65 +0,0 @@ -/** - * @author sunag / http://www.sunag.com.br/ - */ - -import { TempNode } from '../core/TempNode.js'; -import { ResolutionNode } from './ResolutionNode.js'; - -function ScreenUVNode( resolution ) { - - TempNode.call( this, 'v2' ); - - this.resolution = resolution || new ResolutionNode(); - -} - -ScreenUVNode.prototype = Object.create( TempNode.prototype ); -ScreenUVNode.prototype.constructor = ScreenUVNode; -ScreenUVNode.prototype.nodeType = "ScreenUV"; - -ScreenUVNode.prototype.generate = function ( builder, output ) { - - var result; - - if ( builder.isShader( 'fragment' ) ) { - - result = '( gl_FragCoord.xy / ' + this.resolution.build( builder, 'v2' ) + ')'; - - } else { - - console.warn( "THREE.ScreenUVNode is not compatible with " + builder.shader + " shader." ); - - result = 'vec2( 0.0 )'; - - } - - return builder.format( result, this.getType( builder ), output ); - -}; - -ScreenUVNode.prototype.copy = function ( source ) { - - TempNode.prototype.copy.call( this, source ); - - this.resolution = source.resolution; - -}; - -ScreenUVNode.prototype.toJSON = function ( meta ) { - - var data = this.getJSONNode( meta ); - - if ( ! data ) { - - data = this.createJSONNode( meta ); - - data.resolution = this.resolution.toJSON( meta ).uuid; - - } - - return data; - -}; - -export { ScreenUVNode }; - diff --git a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/skinnormal_vertex.glsl.js b/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/skinnormal_vertex.glsl.js deleted file mode 100644 index 2b8e5cc84c7e512f79949d51f354a42be554b5ab..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/skinnormal_vertex.glsl.js +++ /dev/null @@ -1,20 +0,0 @@ -export default /* glsl */` -#ifdef USE_SKINNING - - mat4 skinMatrix = mat4( 0.0 ); - skinMatrix += skinWeight.x * boneMatX; - skinMatrix += skinWeight.y * boneMatY; - skinMatrix += skinWeight.z * boneMatZ; - skinMatrix += skinWeight.w * boneMatW; - skinMatrix = bindMatrixInverse * skinMatrix * bindMatrix; - - objectNormal = vec4( skinMatrix * vec4( objectNormal, 0.0 ) ).xyz; - - #ifdef USE_TANGENT - - objectTangent = vec4( skinMatrix * vec4( objectTangent, 0.0 ) ).xyz; - - #endif - -#endif -`; diff --git a/spaces/bhasker412/IDD-YOLO-Tracking/trackers/strongsort/sort/kalman_filter.py b/spaces/bhasker412/IDD-YOLO-Tracking/trackers/strongsort/sort/kalman_filter.py deleted file mode 100644 index 87c48d7e332bef5f8feab8abf7936409abbf5d03..0000000000000000000000000000000000000000 --- a/spaces/bhasker412/IDD-YOLO-Tracking/trackers/strongsort/sort/kalman_filter.py +++ /dev/null @@ -1,214 +0,0 @@ -# vim: expandtab:ts=4:sw=4 -import numpy as np -import scipy.linalg -""" -Table for the 0.95 quantile of the chi-square distribution with N degrees of -freedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv -function and used as Mahalanobis gating threshold. -""" -chi2inv95 = { - 1: 3.8415, - 2: 5.9915, - 3: 7.8147, - 4: 9.4877, - 5: 11.070, - 6: 12.592, - 7: 14.067, - 8: 15.507, - 9: 16.919} - - -class KalmanFilter(object): - """ - A simple Kalman filter for tracking bounding boxes in image space. - The 8-dimensional state space - x, y, a, h, vx, vy, va, vh - contains the bounding box center position (x, y), aspect ratio a, height h, - and their respective velocities. - Object motion follows a constant velocity model. The bounding box location - (x, y, a, h) is taken as direct observation of the state space (linear - observation model). - """ - - def __init__(self): - ndim, dt = 4, 1. - - # Create Kalman filter model matrices. - self._motion_mat = np.eye(2 * ndim, 2 * ndim) - for i in range(ndim): - self._motion_mat[i, ndim + i] = dt - - self._update_mat = np.eye(ndim, 2 * ndim) - - # Motion and observation uncertainty are chosen relative to the current - # state estimate. These weights control the amount of uncertainty in - # the model. This is a bit hacky. - self._std_weight_position = 1. / 20 - self._std_weight_velocity = 1. / 160 - - def initiate(self, measurement): - """Create track from unassociated measurement. - Parameters - ---------- - measurement : ndarray - Bounding box coordinates (x, y, a, h) with center position (x, y), - aspect ratio a, and height h. - Returns - ------- - (ndarray, ndarray) - Returns the mean vector (8 dimensional) and covariance matrix (8x8 - dimensional) of the new track. Unobserved velocities are initialized - to 0 mean. - """ - mean_pos = measurement - mean_vel = np.zeros_like(mean_pos) - mean = np.r_[mean_pos, mean_vel] - - std = [ - 2 * self._std_weight_position * measurement[0], # the center point x - 2 * self._std_weight_position * measurement[1], # the center point y - 1 * measurement[2], # the ratio of width/height - 2 * self._std_weight_position * measurement[3], # the height - 10 * self._std_weight_velocity * measurement[0], - 10 * self._std_weight_velocity * measurement[1], - 0.1 * measurement[2], - 10 * self._std_weight_velocity * measurement[3]] - covariance = np.diag(np.square(std)) - return mean, covariance - - def predict(self, mean, covariance): - """Run Kalman filter prediction step. - Parameters - ---------- - mean : ndarray - The 8 dimensional mean vector of the object state at the previous - time step. - covariance : ndarray - The 8x8 dimensional covariance matrix of the object state at the - previous time step. - Returns - ------- - (ndarray, ndarray) - Returns the mean vector and covariance matrix of the predicted - state. Unobserved velocities are initialized to 0 mean. - """ - std_pos = [ - self._std_weight_position * mean[0], - self._std_weight_position * mean[1], - 1 * mean[2], - self._std_weight_position * mean[3]] - std_vel = [ - self._std_weight_velocity * mean[0], - self._std_weight_velocity * mean[1], - 0.1 * mean[2], - self._std_weight_velocity * mean[3]] - motion_cov = np.diag(np.square(np.r_[std_pos, std_vel])) - - mean = np.dot(self._motion_mat, mean) - covariance = np.linalg.multi_dot(( - self._motion_mat, covariance, self._motion_mat.T)) + motion_cov - - return mean, covariance - - def project(self, mean, covariance, confidence=.0): - """Project state distribution to measurement space. - Parameters - ---------- - mean : ndarray - The state's mean vector (8 dimensional array). - covariance : ndarray - The state's covariance matrix (8x8 dimensional). - confidence: (dyh) 检测框置信度 - Returns - ------- - (ndarray, ndarray) - Returns the projected mean and covariance matrix of the given state - estimate. - """ - std = [ - self._std_weight_position * mean[3], - self._std_weight_position * mean[3], - 1e-1, - self._std_weight_position * mean[3]] - - - std = [(1 - confidence) * x for x in std] - - innovation_cov = np.diag(np.square(std)) - - mean = np.dot(self._update_mat, mean) - covariance = np.linalg.multi_dot(( - self._update_mat, covariance, self._update_mat.T)) - return mean, covariance + innovation_cov - - def update(self, mean, covariance, measurement, confidence=.0): - """Run Kalman filter correction step. - Parameters - ---------- - mean : ndarray - The predicted state's mean vector (8 dimensional). - covariance : ndarray - The state's covariance matrix (8x8 dimensional). - measurement : ndarray - The 4 dimensional measurement vector (x, y, a, h), where (x, y) - is the center position, a the aspect ratio, and h the height of the - bounding box. - confidence: (dyh)检测框置信度 - Returns - ------- - (ndarray, ndarray) - Returns the measurement-corrected state distribution. - """ - projected_mean, projected_cov = self.project(mean, covariance, confidence) - - chol_factor, lower = scipy.linalg.cho_factor( - projected_cov, lower=True, check_finite=False) - kalman_gain = scipy.linalg.cho_solve( - (chol_factor, lower), np.dot(covariance, self._update_mat.T).T, - check_finite=False).T - innovation = measurement - projected_mean - - new_mean = mean + np.dot(innovation, kalman_gain.T) - new_covariance = covariance - np.linalg.multi_dot(( - kalman_gain, projected_cov, kalman_gain.T)) - return new_mean, new_covariance - - def gating_distance(self, mean, covariance, measurements, - only_position=False): - """Compute gating distance between state distribution and measurements. - A suitable distance threshold can be obtained from `chi2inv95`. If - `only_position` is False, the chi-square distribution has 4 degrees of - freedom, otherwise 2. - Parameters - ---------- - mean : ndarray - Mean vector over the state distribution (8 dimensional). - covariance : ndarray - Covariance of the state distribution (8x8 dimensional). - measurements : ndarray - An Nx4 dimensional matrix of N measurements, each in - format (x, y, a, h) where (x, y) is the bounding box center - position, a the aspect ratio, and h the height. - only_position : Optional[bool] - If True, distance computation is done with respect to the bounding - box center position only. - Returns - ------- - ndarray - Returns an array of length N, where the i-th element contains the - squared Mahalanobis distance between (mean, covariance) and - `measurements[i]`. - """ - mean, covariance = self.project(mean, covariance) - - if only_position: - mean, covariance = mean[:2], covariance[:2, :2] - measurements = measurements[:, :2] - - cholesky_factor = np.linalg.cholesky(covariance) - d = measurements - mean - z = scipy.linalg.solve_triangular( - cholesky_factor, d.T, lower=True, check_finite=False, - overwrite_b=True) - squared_maha = np.sum(z * z, axis=0) - return squared_maha \ No newline at end of file diff --git a/spaces/bigjoker/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/parseq_adapter.py b/spaces/bigjoker/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/parseq_adapter.py deleted file mode 100644 index 314b594ac25792358807bdb602cae7f97387edf4..0000000000000000000000000000000000000000 --- a/spaces/bigjoker/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/parseq_adapter.py +++ /dev/null @@ -1,164 +0,0 @@ -import copy -import json -import logging -import operator -from operator import itemgetter - -import numpy as np -import pandas as pd -import requests - -from .animation_key_frames import DeformAnimKeys - -logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO) - - -class ParseqAnimKeys(): - def __init__(self, parseq_args, anim_args): - - # Resolve manifest either directly from supplied value - # or via supplied URL - manifestOrUrl = parseq_args.parseq_manifest.strip() - if (manifestOrUrl.startswith('http')): - logging.info(f"Loading Parseq manifest from URL: {manifestOrUrl}") - try: - body = requests.get(manifestOrUrl).text - logging.debug(f"Loaded remote manifest: {body}") - self.parseq_json = json.loads(body) - - # Add the parseq manifest without the detailed frame data to parseq_args. - # This ensures it will be saved in the settings file, so that you can always - # see exactly what parseq prompts and keyframes were used, even if what the URL - # points to changes. - parseq_args.fetched_parseq_manifest_summary = copy.deepcopy(self.parseq_json) - if parseq_args.fetched_parseq_manifest_summary['rendered_frames']: - del parseq_args.fetched_parseq_manifest_summary['rendered_frames'] - if parseq_args.fetched_parseq_manifest_summary['rendered_frames_meta']: - del parseq_args.fetched_parseq_manifest_summary['rendered_frames_meta'] - - except Exception as e: - logging.error(f"Unable to load Parseq manifest from URL: {manifestOrUrl}") - raise e - else: - self.parseq_json = json.loads(manifestOrUrl) - - self.default_anim_keys = DeformAnimKeys(anim_args) - self.rendered_frames = self.parseq_json['rendered_frames'] - self.max_frame = self.get_max('frame') - count_defined_frames = len(self.rendered_frames) - expected_defined_frames = self.max_frame+1 # frames are 0-indexed - - self.required_frames = anim_args.max_frames - - if (expected_defined_frames != count_defined_frames): - logging.warning(f"There may be duplicated or missing frame data in the Parseq input: expected {expected_defined_frames} frames including frame 0 because the highest frame number is {self.max_frame}, but there are {count_defined_frames} frames defined.") - - if (anim_args.max_frames > count_defined_frames): - logging.info(f"Parseq data defines {count_defined_frames} frames, but the requested animation is {anim_args.max_frames} frames. The last Parseq frame definition will be duplicated to match the expected frame count.") - if (anim_args.max_frames < count_defined_frames): - logging.info(f"Parseq data defines {count_defined_frames} frames, but the requested animation is {anim_args.max_frames} frames. The last Parseq frame definitions will be ignored.") - else: - logging.info(f"Parseq data defines {count_defined_frames} frames.") - - # Parseq treats input values as absolute values. So if you want to - # progressively rotate 180 degrees over 4 frames, you specify: 45, 90, 135, 180. - # However, many animation parameters are relative to the previous frame if there is enough - # loopback strength. So if you want to rotate 180 degrees over 5 frames, the animation engine expects: - # 45, 45, 45, 45. Therefore, for such parameter, we use the fact that Parseq supplies delta values. - optional_delta = '_delta' if parseq_args.parseq_use_deltas else '' - self.angle_series = self.parseq_to_anim_series('angle' + optional_delta) - self.zoom_series = self.parseq_to_anim_series('zoom' + optional_delta) - self.translation_x_series = self.parseq_to_anim_series('translation_x' + optional_delta) - self.translation_y_series = self.parseq_to_anim_series('translation_y' + optional_delta) - self.translation_z_series = self.parseq_to_anim_series('translation_z' + optional_delta) - self.rotation_3d_x_series = self.parseq_to_anim_series('rotation_3d_x' + optional_delta) - self.rotation_3d_y_series = self.parseq_to_anim_series('rotation_3d_y' + optional_delta) - self.rotation_3d_z_series = self.parseq_to_anim_series('rotation_3d_z' + optional_delta) - self.perspective_flip_theta_series = self.parseq_to_anim_series('perspective_flip_theta' + optional_delta) - self.perspective_flip_phi_series = self.parseq_to_anim_series('perspective_flip_phi' + optional_delta) - self.perspective_flip_gamma_series = self.parseq_to_anim_series('perspective_flip_gamma' + optional_delta) - - # Non-motion animation args - self.perspective_flip_fv_series = self.parseq_to_anim_series('perspective_flip_fv') - self.noise_schedule_series = self.parseq_to_anim_series('noise') - self.strength_schedule_series = self.parseq_to_anim_series('strength') - self.sampler_schedule_series = self.parseq_to_anim_series('sampler_schedule') - self.contrast_schedule_series = self.parseq_to_anim_series('contrast') - self.cfg_scale_schedule_series = self.parseq_to_anim_series('scale') - self.steps_schedule_series = self.parseq_to_anim_series("steps_schedule") - self.seed_schedule_series = self.parseq_to_anim_series('seed') - self.fov_series = self.parseq_to_anim_series('fov') - self.near_series = self.parseq_to_anim_series('near') - self.far_series = self.parseq_to_anim_series('far') - self.prompts = self.parseq_to_anim_series('deforum_prompt') # formatted as "{positive} --neg {negative}" - self.subseed_series = self.parseq_to_anim_series('subseed') - self.subseed_strength_series = self.parseq_to_anim_series('subseed_strength') - self.kernel_schedule_series = self.parseq_to_anim_series('antiblur_kernel') - self.sigma_schedule_series = self.parseq_to_anim_series('antiblur_sigma') - self.amount_schedule_series = self.parseq_to_anim_series('antiblur_amount') - self.threshold_schedule_series = self.parseq_to_anim_series('antiblur_threshold') - - # Config: - # TODO this is currently ignored. User must ensure the output FPS set in parseq - # matches the one set in Deforum to avoid unexpected results. - self.config_output_fps = self.parseq_json['options']['output_fps'] - - def get_max(self, seriesName): - return max(self.rendered_frames, key=itemgetter(seriesName))[seriesName] - - def parseq_to_anim_series(self, seriesName): - - # Check if valus is present in first frame of JSON data. If not, assume it's undefined. - # The Parseq contract is that the first frame (at least) must define values for all fields. - try: - if self.rendered_frames[0][seriesName] is not None: - logging.info(f"Found {seriesName} in first frame of Parseq data. Assuming it's defined.") - except KeyError: - return None - - key_frame_series = pd.Series([np.nan for a in range(self.required_frames)]) - - for frame in self.rendered_frames: - frame_idx = frame['frame'] - if frame_idx < self.required_frames: - if not np.isnan(key_frame_series[frame_idx]): - logging.warning(f"Duplicate frame definition {frame_idx} detected for data {seriesName}. Latest wins.") - key_frame_series[frame_idx] = frame[seriesName] - - # If the animation will have more frames than Parseq defines, - # duplicate final value to match the required frame count. - while (frame_idx < self.required_frames): - key_frame_series[frame_idx] = operator.itemgetter(-1)(self.rendered_frames)[seriesName] - frame_idx += 1 - - return key_frame_series - - # fallback to anim_args if the series is not defined in the Parseq data - def __getattribute__(inst, name): - try: - definedField = super(ParseqAnimKeys, inst).__getattribute__(name) - except AttributeError: - # No field with this name has been explicitly extracted from the JSON data. - # It must be a new parameter. Let's see if it's in the raw JSON. - - # parseq doesn't use _series, _schedule or _schedule_series suffixes in the - # JSON data - remove them. - strippableSuffixes = ['_series', '_schedule'] - parseqName = name - while any(parseqName.endswith(suffix) for suffix in strippableSuffixes): - for suffix in strippableSuffixes: - if parseqName.endswith(suffix): - parseqName = parseqName[:-len(suffix)] - - # returns None if not defined in Parseq JSON data - definedField = inst.parseq_to_anim_series(parseqName) - if (definedField is not None): - # add the field to the instance so we don't compute it again. - setattr(inst, name, definedField) - - if (definedField is not None): - return definedField - else: - logging.info(f"Data for {name} not defined in Parseq data (looked for: '{parseqName}'). Falling back to standard Deforum values.") - return getattr(inst.default_anim_keys, name) - diff --git a/spaces/bioriAsaeru/text-to-voice/CorelDRAW Graphics Suite X7 V21.3.0.755 2020 Keygen Crack The Ultimate Solution for Graphic Designers and Artists.md b/spaces/bioriAsaeru/text-to-voice/CorelDRAW Graphics Suite X7 V21.3.0.755 2020 Keygen Crack The Ultimate Solution for Graphic Designers and Artists.md deleted file mode 100644 index 5a0e786876d599ba6590c3b8e7eb1f2d0878abd2..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/CorelDRAW Graphics Suite X7 V21.3.0.755 2020 Keygen Crack The Ultimate Solution for Graphic Designers and Artists.md +++ /dev/null @@ -1,5 +0,0 @@ -
      -

      Hence, If you could not really go to the live life webinar of the demonstration of the fresh edition of the well-known graphics suite CorelDRAW Sixth is v21 Keygen linux windwos 94fbr, do not miss the video clip saving óf it. A briefing óf nearly 2 hours, fully in Spanish language to understand the Technology advantages of the brand-new version. Furthermore to discover guidelines and tricks of this system in the hands of the Corel Product Manager.Also visit: High quality edition of 2019 for macintosh and home windows. CorelDRAW Graphics Selection 2019 Service Program code ListIt will be not really a easy briefing of brand-new functions of version number 20 of the CorelDRAW portbale kyuhaa selection. So, the video also displays the brand-new functions in action. In addition to detailing very clearly other ideas and new functions. In the webinar video, the brand-new system and features have got in crack Corel DRAW Graphics Selection 2019 area is furthermore explained and proven as the great symmetry sketching mode.

      -

      CorelDRAW Graphics Suite X7 V21.3.0.755 2020 Keygen Crack


      Download >>> https://urloso.com/2uyRag



      aaccfb2cb3
      -
      -
      \ No newline at end of file diff --git a/spaces/bioriAsaeru/text-to-voice/Khoobsurat Telugu Movie Mp3 Songs Free Download.md b/spaces/bioriAsaeru/text-to-voice/Khoobsurat Telugu Movie Mp3 Songs Free Download.md deleted file mode 100644 index 3ebf48405a7c0347ebf801934925d7cd22f294ca..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Khoobsurat Telugu Movie Mp3 Songs Free Download.md +++ /dev/null @@ -1,6 +0,0 @@ - -

      bestwap Masstamilan mp3red mp3goo darshan y2mate gaana saavn jiomix pagalsongs downloadming musicbadshah mp3 mashup y2mate onlinemp3converter youtubeconverter songspk djyoungster mp3mad pk songs mirchifun vidmate vidtube songsweb kuttyweb lyricsmint wapking mobiwap wapmate atozmp3 naalyrics naasongs naa song pagal world sensongs hungama youtube fimbeat bigmusic songspkmania jiosaavn mp3vista mrjatt wynk djmaza mp3out sweetmp3 mr-jatt soundcloud tik tok pagalmp3songs mp3skull spotify pandora mangoo bollyshake raagsong saregama lahari tseries mp3load bgmringtones zedge tamiltunes Hindiwap Hindiwap kannadawap kannadamasti gujarati kuttysongs biharmasti Hinditracks ytmp3 flvto mp3hub mp3converter Youzik notube clipconverter savemp3 anything2mp3 converto listenvid ddowner mp3fy peggo tamildada tamilwire mp3khan musichunt allindiamp3 Mp3 Skull MP3Raid Spotify Jamendo Dj Songs djyoungster wapking Raagsong sensongs

      -

      Radio MP3Juices EP3World MP3Fusion EMP3Z emp3z convert2mp3 Zing MP3 NCT MP3XD Zaycev Mr. Jatt RnBXclusive Daily New Jams BeeMP3 ClanMP3 Tubidy MP3 Tubidy songs pk web music 0audio songslover 9xtunes mangoloops Smusicly Songsy klickaudi mp3int musicby24 123musiq raaga mp3yaar tollysong sasngeetbangala justdj isaimini galatta Tamilamp3 kandupidi starmusiq tamilcube nsongs wsongs d4music malludevil mixwap riskyjatt indiae raagjatt mp3yaar tubidy djpod mp3hits dsmp3 hdmp3 mp3singar songaction Hindimasti
      paharisong musichearts mp3matt bestwap vipmarathi m4marathi pagaldj veermarathi djking remixmarathi soundsmarathi marathidjs3 downloading amp3
      maango me whatsapp status audio jukebox sen songs

      -

      Khoobsurat telugu movie mp3 songs free download


      Download ····· https://urloso.com/2uyRaP



      aaccfb2cb3
      -
      -
      \ No newline at end of file diff --git a/spaces/boomsss/gamedayspx/model_1h.py b/spaces/boomsss/gamedayspx/model_1h.py deleted file mode 100644 index 16e79f7ad9dedecbd0abffc39c20c0b617103826..0000000000000000000000000000000000000000 --- a/spaces/boomsss/gamedayspx/model_1h.py +++ /dev/null @@ -1,481 +0,0 @@ -import streamlit as st -import pandas as pd -import pandas_datareader as pdr -import numpy as np -import yfinance as yf -import json -import requests -from bs4 import BeautifulSoup -from typing import List -import xgboost as xgb -from tqdm import tqdm -from sklearn import linear_model -import joblib -import os -from sklearn.metrics import roc_auc_score, precision_score, recall_score -import datetime -from pandas.tseries.offsets import BDay -from datasets import load_dataset -import lightgbm as lgb - -def walk_forward_validation(df, target_column, num_training_rows, num_periods): - - # Create an XGBRegressor model - # model = xgb.XGBRegressor(n_estimators=100, objective='reg:squarederror', random_state = 42) - model = linear_model.LinearRegression() - - overall_results = [] - # Iterate over the rows in the DataFrame, one step at a time - for i in tqdm(range(num_training_rows, df.shape[0] - num_periods + 1),desc='LR Model'): - # Split the data into training and test sets - X_train = df.drop(target_column, axis=1).iloc[:i] - y_train = df[target_column].iloc[:i] - X_test = df.drop(target_column, axis=1).iloc[i:i+num_periods] - y_test = df[target_column].iloc[i:i+num_periods] - - # Fit the model to the training data - model.fit(X_train, y_train) - - # Make a prediction on the test data - predictions = model.predict(X_test) - - # Create a DataFrame to store the true and predicted values - result_df = pd.DataFrame({'True': y_test, 'Predicted': predictions}, index=y_test.index) - - overall_results.append(result_df) - - df_results = pd.concat(overall_results) - # model.save_model('model_lr.bin') - # Return the true and predicted values, and fitted model - return df_results, model - -model_cols = [ - 'BigNewsDay', - 'Quarter', - 'Perf5Day', - 'Perf5Day_n1', - 'DaysGreen', - 'DaysRed', - 'CurrentHigh30toClose', - 'CurrentLow30toClose', - 'CurrentClose30toClose', - 'CurrentRange30', - 'GapFill30', - 'CurrentGap', - 'RangePct', - 'RangePct_n1', - 'RangePct_n2', - 'OHLC4_VIX', - 'OHLC4_VIX_n1', - 'OHLC4_VIX_n2', - 'OpenL1', - 'OpenL2', - 'OpenH1', - 'OpenH2', - 'L1TouchPct', - 'L2TouchPct', - 'H1TouchPct', - 'H2TouchPct', - 'L1BreakPct', - 'L2BreakPct', - 'H1BreakPct', - 'H2BreakPct', - 'GreenProbas', - # 'GapFillGreenProba' - -] - -def walk_forward_validation_seq(df, target_column_clf, target_column_regr, num_training_rows, num_periods): - - # Create run the regression model to get its target - res, model1 = walk_forward_validation(df.drop(columns=[target_column_clf]).dropna(), target_column_regr, num_training_rows, num_periods) - # joblib.dump(model1, 'model1.bin') - - # Merge the result df back on the df for feeding into the classifier - for_merge = res[['Predicted']] - for_merge.columns = ['RegrModelOut'] - for_merge['RegrModelOut'] = for_merge['RegrModelOut'] > 0 - df = df.merge(for_merge, left_index=True, right_index=True) - df = df.drop(columns=[target_column_regr]) - df = df[model_cols + ['RegrModelOut', target_column_clf]] - - df[target_column_clf] = df[target_column_clf].astype(bool) - df['RegrModelOut'] = df['RegrModelOut'].astype(bool) - - # Create an XGBRegressor model - # model2 = xgb.XGBClassifier(n_estimators=10, random_state = 42) - model2 = lgb.LGBMClassifier(n_estimators=10, random_state=42, verbosity=-1) - # model = linear_model.LogisticRegression(max_iter=1500) - - overall_results = [] - # Iterate over the rows in the DataFrame, one step at a time - for i in tqdm(range(num_training_rows, df.shape[0] - num_periods + 1),'CLF Model'): - # Split the data into training and test sets - X_train = df.drop(target_column_clf, axis=1).iloc[:i] - y_train = df[target_column_clf].iloc[:i] - X_test = df.drop(target_column_clf, axis=1).iloc[i:i+num_periods] - y_test = df[target_column_clf].iloc[i:i+num_periods] - - # Fit the model to the training data - model2.fit(X_train, y_train) - - # Make a prediction on the test data - predictions = model2.predict_proba(X_test)[:,-1] - - # Create a DataFrame to store the true and predicted values - result_df = pd.DataFrame({'True': y_test, 'Predicted': predictions}, index=y_test.index) - - overall_results.append(result_df) - - df_results = pd.concat(overall_results) - # model1.save_model('model_ensemble.bin') - # joblib.dump(model2, 'model2.bin') - # Return the true and predicted values, and fitted model - return df_results, model1, model2 - -def seq_predict_proba(df, trained_reg_model, trained_clf_model): - regr_pred = trained_reg_model.predict(df) - regr_pred = regr_pred > 0 - new_df = df.copy() - new_df['RegrModelOut'] = regr_pred - clf_pred_proba = trained_clf_model.predict_proba(new_df[model_cols + ['RegrModelOut']])[:,-1] - return clf_pred_proba - -def get_data(): - # f = open('settings.json') - # j = json.load(f) - # API_KEY_FRED = j["API_KEY_FRED"] - - API_KEY_FRED = os.getenv('API_KEY_FRED') - - def parse_release_dates(release_id: str) -> List[str]: - release_dates_url = f'https://api.stlouisfed.org/fred/release/dates?release_id={release_id}&realtime_start=2015-01-01&include_release_dates_with_no_data=true&api_key={API_KEY_FRED}' - r = requests.get(release_dates_url) - text = r.text - soup = BeautifulSoup(text, 'xml') - dates = [] - for release_date_tag in soup.find_all('release_date', {'release_id': release_id}): - dates.append(release_date_tag.text) - return dates - - def parse_release_dates_obs(series_id: str) -> List[str]: - obs_url = f'https://api.stlouisfed.org/fred/series/observations?series_id={series_id}&realtime_start=2015-01-01&include_release_dates_with_no_data=true&api_key={API_KEY_FRED}' - r = requests.get(obs_url) - text = r.text - soup = BeautifulSoup(text, 'xml') - observations = [] - for observation_tag in soup.find_all('observation'): - date = observation_tag.get('date') - value = observation_tag.get('value') - observations.append((date, value)) - return observations - - econ_dfs = {} - - econ_tickers = [ - 'WALCL', - 'NFCI', - 'WRESBAL' - ] - - for et in tqdm(econ_tickers, desc='getting econ tickers'): - # p = parse_release_dates_obs(et) - # df = pd.DataFrame(columns = ['ds',et], data = p) - df = pdr.get_data_fred(et) - df.index = df.index.rename('ds') - # df.index = pd.to_datetime(df.index.rename('ds')).dt.tz_localize(None) - # df['ds'] = pd.to_datetime(df['ds']).dt.tz_localize(None) - econ_dfs[et] = df - - # walcl = pd.DataFrame(columns = ['ds','WALCL'], data = p) - # walcl['ds'] = pd.to_datetime(walcl['ds']).dt.tz_localize(None) - - # nfci = pd.DataFrame(columns = ['ds','NFCI'], data = p2) - # nfci['ds'] = pd.to_datetime(nfci['ds']).dt.tz_localize(None) - - release_ids = [ - "10", # "Consumer Price Index" - "46", # "Producer Price Index" - "50", # "Employment Situation" - "53", # "Gross Domestic Product" - "103", # "Discount Rate Meeting Minutes" - "180", # "Unemployment Insurance Weekly Claims Report" - "194", # "ADP National Employment Report" - "323" # "Trimmed Mean PCE Inflation Rate" - ] - - release_names = [ - "CPI", - "PPI", - "NFP", - "GDP", - "FOMC", - "UNEMP", - "ADP", - "PCE" - ] - - releases = {} - - for rid, n in tqdm(zip(release_ids, release_names), total = len(release_ids), desc='Getting release dates'): - releases[rid] = {} - releases[rid]['dates'] = parse_release_dates(rid) - releases[rid]['name'] = n - - # Create a DF that has all dates with the name of the col as 1 - # Once merged on the main dataframe, days with econ events will be 1 or None. Fill NA with 0 - # This column serves as the true/false indicator of whether there was economic data released that day. - for rid in tqdm(release_ids, desc='Making indicators'): - releases[rid]['df'] = pd.DataFrame( - index=releases[rid]['dates'], - data={ - releases[rid]['name']: 1 - }) - releases[rid]['df'].index = pd.DatetimeIndex(releases[rid]['df'].index) - # releases[rid]['df']['ds'] = pd.to_datetime(releases[rid]['df']['ds']).dt.tz_localize(None) - # releases[rid]['df'] = releases[rid]['df'].set_index('ds') - - vix = yf.Ticker('^VIX') - spx = yf.Ticker('^GSPC') - - - # Pull in data - data = load_dataset("boomsss/spx_intra", split='train') - - rows = [d['text'] for d in data] - rows = [x.split(',') for x in rows] - - fr = pd.DataFrame(columns=[ - 'Datetime','Open','High','Low','Close' - ], data = rows) - - fr['Datetime'] = pd.to_datetime(fr['Datetime']) - fr['Datetime'] = fr['Datetime'].dt.tz_localize('America/New_York') - fr = fr.set_index('Datetime') - fr['Open'] = pd.to_numeric(fr['Open']) - fr['High'] = pd.to_numeric(fr['High']) - fr['Low'] = pd.to_numeric(fr['Low']) - fr['Close'] = pd.to_numeric(fr['Close']) - - # Get incremental date - last_date = fr.index.date[-1] - last_date = last_date + datetime.timedelta(days=1) - # Get incremental data - spx1 = yf.Ticker('^GSPC') - yfp = spx1.history(start=last_date, interval='30m') - - if len(yfp) > 0: - # Concat current and incremental - df_30m = pd.concat([fr, yfp]) - else: - df_30m = fr.copy() - - # Get the first 30 minute bar - df_30m = df_30m.reset_index() - df_30m['Datetime'] = df_30m['Datetime'].dt.date - df_30m = df_30m.groupby('Datetime').head(2) - df_30m = df_30m.set_index('Datetime',drop=True) - # Rename the columns - df_30m = df_30m[['Open','High','Low','Close']] - - opens_1h = df_30m.groupby('Datetime')['Open'].head(1) - highs_1h = df_30m.groupby('Datetime')['High'].max() - lows_1h = df_30m.groupby('Datetime')['Low'].min() - closes_1h = df_30m.groupby('Datetime')['Close'].tail(1) - - df_1h = pd.DataFrame(index=df_30m.index.unique()) - df_1h['Open'] = opens_1h - df_1h['High'] = highs_1h - df_1h['Low'] = lows_1h - df_1h['Close'] = closes_1h - - df_1h.columns = ['Open30','High30','Low30','Close30'] - - prices_vix = vix.history(start='2018-07-01', interval='1d') - prices_spx = spx.history(start='2018-07-01', interval='1d') - prices_spx['index'] = [str(x).split()[0] for x in prices_spx.index] - prices_spx['index'] = pd.to_datetime(prices_spx['index']).dt.date - prices_spx.index = prices_spx['index'] - prices_spx = prices_spx.drop(columns='index') - prices_spx.index = pd.DatetimeIndex(prices_spx.index) - - - prices_vix['index'] = [str(x).split()[0] for x in prices_vix.index] - prices_vix['index'] = pd.to_datetime(prices_vix['index']).dt.date - prices_vix.index = prices_vix['index'] - prices_vix = prices_vix.drop(columns='index') - prices_vix.index = pd.DatetimeIndex(prices_vix.index) - - - data = prices_spx.merge(df_1h, left_index=True, right_index=True) - data = data.merge(prices_vix[['Open','High','Low','Close']], left_index=True, right_index=True, suffixes=['','_VIX']) - - # Features - data['PrevClose'] = data['Close'].shift(1) - data['Perf5Day'] = data['Close'] > data['Close'].shift(5) - data['Perf5Day_n1'] = data['Perf5Day'].shift(1) - data['Perf5Day_n1'] = data['Perf5Day_n1'].astype(bool) - data['GreenDay'] = (data['Close'] > data['PrevClose']) * 1 - data['RedDay'] = (data['Close'] <= data['PrevClose']) * 1 - - data['VIX5Day'] = data['Close_VIX'] > data['Close_VIX'].shift(5) - data['VIX5Day_n1'] = data['VIX5Day'].astype(bool) - - data['Range'] = data[['Open','High']].max(axis=1) - data[['Low','Open']].min(axis=1) # Current day range in points - data['RangePct'] = data['Range'] / data['Close'] - data['VIXLevel'] = pd.qcut(data['Close_VIX'], 4) - data['OHLC4_VIX'] = data[['Open_VIX','High_VIX','Low_VIX','Close_VIX']].mean(axis=1) - data['OHLC4'] = data[['Open','High','Low','Close']].mean(axis=1) - data['OHLC4_Trend'] = data['OHLC4'] > data['OHLC4'].shift(1) - data['OHLC4_Trend_n1'] = data['OHLC4_Trend'].shift(1) - data['OHLC4_Trend_n1'] = data['OHLC4_Trend_n1'].astype(float) - data['OHLC4_Trend_n2'] = data['OHLC4_Trend'].shift(1) - data['OHLC4_Trend_n2'] = data['OHLC4_Trend_n2'].astype(float) - data['RangePct_n1'] = data['RangePct'].shift(1) - data['RangePct_n2'] = data['RangePct'].shift(2) - data['OHLC4_VIX_n1'] = data['OHLC4_VIX'].shift(1) - data['OHLC4_VIX_n2'] = data['OHLC4_VIX'].shift(2) - data['CurrentGap'] = (data['Open'] - data['PrevClose']) / data['PrevClose'] - data['CurrentGapHist'] = data['CurrentGap'].copy() - data['CurrentGap'] = data['CurrentGap'].shift(-1) - data['DayOfWeek'] = pd.to_datetime(data.index) - data['DayOfWeek'] = data['DayOfWeek'].dt.day - - # Calculate up - data['up'] = 100 * (data['High'].shift(1) - data['Open'].shift(1)) / data['Close'].shift(1) - - # Calculate upSD - data['upSD'] = data['up'].rolling(30).std(ddof=0) - - # Calculate aveUp - data['aveUp'] = data['up'].rolling(30).mean() - data['H1'] = data['Open'] + (data['aveUp'] / 100) * data['Open'] - data['H2'] = data['Open'] + ((data['aveUp'] + data['upSD']) / 100) * data['Open'] - data['down'] = 100 * (data['Open'].shift(1) - data['Low'].shift(1)) / data['Close'].shift(1) - data['downSD'] = data['down'].rolling(30).std(ddof=0) - data['aveDown'] = data['down'].rolling(30).mean() - data['L1'] = data['Open'] - (data['aveDown'] / 100) * data['Open'] - data['L2'] = data['Open'] - ((data['aveDown'] + data['upSD']) / 100) * data['Open'] - - data = data.assign( - L1Touch = lambda x: x['Low'] < x['L1'], - L2Touch = lambda x: x['Low'] < x['L2'], - H1Touch = lambda x: x['High'] > x['H1'], - H2Touch = lambda x: x['High'] > x['H2'], - L1Break = lambda x: x['Close'] < x['L1'], - L2Break = lambda x: x['Close'] < x['L2'], - H1Break = lambda x: x['Close'] > x['H1'], - H2Break = lambda x: x['Close'] > x['H2'], - OpenL1 = lambda x: x['Open'] / x['L1'], - OpenL2 = lambda x: x['Open'] / x['L2'], - OpenH1 = lambda x: x['Open'] / x['H1'], - OpenH2 = lambda x: x['Open'] / x['H2'] - ) - - level_cols = [ - 'L1Touch', - 'L2Touch', - 'H1Touch', - 'H2Touch', - 'L1Break', - 'L2Break', - 'H1Break', - 'H2Break' - ] - - for col in level_cols: - data[col+'Pct'] = data[col].rolling(100).mean() - data[col+'Pct'] = data[col+'Pct'].shift(-1) - - # Intraday features - data['CurrentHigh30'] = data['High30'].shift(-1) - data['CurrentLow30'] = data['Low30'].shift(-1) - data['CurrentClose30'] = data['Close30'].shift(-1) - data['HistClose30toPrevClose'] = (data['Close30'] / data['PrevClose']) - 1 - - # Open to High - data['CurrentHigh30toClose'] = (data['CurrentHigh30'] / data['Close']) - 1 - data['CurrentLow30toClose'] = (data['CurrentLow30'] / data['Close']) - 1 - data['CurrentClose30toClose'] = (data['CurrentClose30'] / data['Close']) - 1 - data['CurrentRange30'] = (data['CurrentHigh30'] - data['CurrentLow30']) / data['Close'] - data['GapFill30'] = [low <= prev_close if gap > 0 else high >= prev_close for high, low, prev_close, gap in zip(data['CurrentHigh30'], data['CurrentLow30'], data['Close'], data['CurrentGap'])] - - # Target -- the next day's low - data['Target'] = (data['OHLC4'] / data['PrevClose']) - 1 - data['Target'] = data['Target'].shift(-1) - # data['Target'] = data['RangePct'].shift(-1) - - # Target for clf -- whether tomorrow will close above or below today's close - data['Target_clf'] = data['Close'] > data['PrevClose'] - data['Target_clf'] = data['Target_clf'].shift(-1) - data['DayOfWeek'] = pd.to_datetime(data.index) - data['Quarter'] = data['DayOfWeek'].dt.quarter - data['DayOfWeek'] = data['DayOfWeek'].dt.weekday - - def get_quintiles(df, col_name, q): - return df.groupby(pd.qcut(df[col_name], q))['GreenDay'].mean() - - probas = [] - for i, pct in enumerate(data['CurrentClose30toClose']): - try: - df_q = get_quintiles(data.iloc[:i], 'HistClose30toPrevClose', 5) - for q in df_q.index: - if q.left <= pct <= q.right: - p = df_q[q] - except: - p = None - - probas.append(p) - - # gapfills = [] - # for i, pct in enumerate(data['CurrentGap']): - # try: - # df_q = get_quintiles(data.iloc[:i], 'CurrentGapHist', 5) - # for q in df_q.index: - # if q.left <= pct <= q.right: - # p = df_q[q] - # except: - # p = None - - # gapfills.append(p) - - data['GreenProbas'] = probas - # data['GapFillGreenProba'] = gapfills - - for rid in tqdm(release_ids, desc='Merging econ data'): - # Get the name of the release - n = releases[rid]['name'] - # Merge the corresponding DF of the release - data = data.merge(releases[rid]['df'], how = 'left', left_index=True, right_index=True) - # Create a column that shifts the value in the merged column up by 1 - data[f'{n}_shift'] = data[n].shift(-1) - # Fill the rest with zeroes - data[n] = data[n].fillna(0) - data[f'{n}_shift'] = data[f'{n}_shift'].fillna(0) - - data['BigNewsDay'] = data[[x for x in data.columns if '_shift' in x]].max(axis=1) - - def cumul_sum(col): - nums = [] - s = 0 - for x in col: - if x == 1: - s += 1 - elif x == 0: - s = 0 - nums.append(s) - return nums - - consec_green = cumul_sum(data['GreenDay'].values) - consec_red = cumul_sum(data['RedDay'].values) - - data['DaysGreen'] = consec_green - data['DaysRed'] = consec_red - - final_row = data.index[-2] - - exp_row = data.index[-1] - - df_final = data.loc[:final_row, model_cols + ['Target','Target_clf']] - df_final = df_final.dropna(subset=['Target','Target_clf','Perf5Day_n1']) - return data, df_final, final_row \ No newline at end of file diff --git a/spaces/bortle/astrophotography-object-classifier/README.md b/spaces/bortle/astrophotography-object-classifier/README.md deleted file mode 100644 index 3fbb5353c411280ea7d4e04bf31b5a8be7462a9e..0000000000000000000000000000000000000000 --- a/spaces/bortle/astrophotography-object-classifier/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Astrophotography Object Classifier Alpha -emoji: 🚀 -colorFrom: purple -colorTo: yellow -sdk: gradio -sdk_version: 3.20.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/bradarrML/stablediffusion-infinity/PyPatchMatch/csrc/inpaint.h b/spaces/bradarrML/stablediffusion-infinity/PyPatchMatch/csrc/inpaint.h deleted file mode 100644 index a59b1d347ea5fe92976a4fda10a820d6508f51da..0000000000000000000000000000000000000000 --- a/spaces/bradarrML/stablediffusion-infinity/PyPatchMatch/csrc/inpaint.h +++ /dev/null @@ -1,27 +0,0 @@ -#pragma once - -#include - -#include "masked_image.h" -#include "nnf.h" - -class Inpainting { -public: - Inpainting(cv::Mat image, cv::Mat mask, const PatchDistanceMetric *metric); - Inpainting(cv::Mat image, cv::Mat mask, cv::Mat global_mask, const PatchDistanceMetric *metric); - cv::Mat run(bool verbose = false, bool verbose_visualize = false, unsigned int random_seed = 1212); - -private: - void _initialize_pyramid(void); - MaskedImage _expectation_maximization(MaskedImage source, MaskedImage target, int level, bool verbose); - void _expectation_step(const NearestNeighborField &nnf, bool source2target, cv::Mat &vote, const MaskedImage &source, bool upscaled); - void _maximization_step(MaskedImage &target, const cv::Mat &vote); - - MaskedImage m_initial; - std::vector m_pyramid; - - NearestNeighborField m_source2target; - NearestNeighborField m_target2source; - const PatchDistanceMetric *m_distance_metric; -}; - diff --git a/spaces/breadlicker45/galactica-1.3b-contrastive-sampling/app.py b/spaces/breadlicker45/galactica-1.3b-contrastive-sampling/app.py deleted file mode 100644 index 3f6374536fb651d57c17dd7a1d11af94bb927878..0000000000000000000000000000000000000000 --- a/spaces/breadlicker45/galactica-1.3b-contrastive-sampling/app.py +++ /dev/null @@ -1,50 +0,0 @@ -import gradio as gr -from transformers import pipeline -from transformers import AutoTokenizer, AutoModelForCausalLM - -tokenizer = AutoTokenizer.from_pretrained("facebook/galactica-1.3b") -model = AutoModelForCausalLM.from_pretrained("facebook/galactica-1.3b") -text2text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer, num_workers=2) - -def predict(text, max_length=64, penalty_alpha=0.6, top_k=4): - text = text.strip() - out_text = text2text_generator(text, max_length=max_length, - penalty_alpha=penalty_alpha, - top_k=top_k, - eos_token_id = tokenizer.eos_token_id, - bos_token_id = tokenizer.bos_token_id, - pad_token_id = tokenizer.pad_token_id, - )[0]['generated_text'] - out_text = "

      " + out_text + "

      " - out_text = out_text.replace(text, text + "") - out_text = out_text + "" - out_text = out_text.replace("\n", "
      ") - return out_text - -iface = gr.Interface( - fn=predict, - inputs=[ - gr.inputs.Textbox(lines=5, label="Input Text"), - gr.inputs.Slider(minimum=32, maximum=64, default=64, label="Max Length"), - gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.6, step=0.1, label="Penalty Alpha"), - # gr.inputs.Checkbox(label="Do Sample"), - gr.inputs.Slider(minimum=0, maximum=16, default=8, step=1, label="Top K") - ], - outputs=gr.HTML(), - description="Galactica Base Model", - examples=[[ - "The attention mechanism in LLM is", - 32, - 0.6, - 4 - ], - [ - "Title: Attention is all you need\n\nAbstract:", - 32, - 0.6, - 4 - ] - ] -) - -iface.launch() \ No newline at end of file diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ.py b/spaces/brjathu/HMR2.0/vendor/detectron2/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ.py deleted file mode 100644 index ba2c3274a493d5136507364558c8289eb6ee6259..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ.py +++ /dev/null @@ -1,30 +0,0 @@ -from .mask_rcnn_R_50_FPN_100ep_LSJ import ( - dataloader, - lr_multiplier, - model, - optimizer, - train, -) -from detectron2.config import LazyCall as L -from detectron2.modeling.backbone import RegNet -from detectron2.modeling.backbone.regnet import SimpleStem, ResBottleneckBlock - -# Config source: -# https://github.com/facebookresearch/detectron2/blob/main/configs/COCO-InstanceSegmentation/mask_rcnn_regnety_4gf_dds_fpn_1x.py # noqa -model.backbone.bottom_up = L(RegNet)( - stem_class=SimpleStem, - stem_width=32, - block_class=ResBottleneckBlock, - depth=22, - w_a=31.41, - w_0=96, - w_m=2.24, - group_width=64, - se_ratio=0.25, - norm="SyncBN", - out_features=["s1", "s2", "s3", "s4"], -) -model.pixel_std = [57.375, 57.120, 58.395] - -# RegNets benefit from enabling cudnn benchmark mode -train.cudnn_benchmark = True diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/data/detection_utils.py b/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/data/detection_utils.py deleted file mode 100644 index ada19bdb4a2aa74874da4dba5d179ce38201c85d..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/data/detection_utils.py +++ /dev/null @@ -1,659 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. - -""" -Common data processing utilities that are used in a -typical object detection data pipeline. -""" -import logging -import numpy as np -from typing import List, Union -import pycocotools.mask as mask_util -import torch -from PIL import Image - -from detectron2.structures import ( - BitMasks, - Boxes, - BoxMode, - Instances, - Keypoints, - PolygonMasks, - RotatedBoxes, - polygons_to_bitmask, -) -from detectron2.utils.file_io import PathManager - -from . import transforms as T -from .catalog import MetadataCatalog - -__all__ = [ - "SizeMismatchError", - "convert_image_to_rgb", - "check_image_size", - "transform_proposals", - "transform_instance_annotations", - "annotations_to_instances", - "annotations_to_instances_rotated", - "build_augmentation", - "build_transform_gen", - "create_keypoint_hflip_indices", - "filter_empty_instances", - "read_image", -] - - -class SizeMismatchError(ValueError): - """ - When loaded image has difference width/height compared with annotation. - """ - - -# https://en.wikipedia.org/wiki/YUV#SDTV_with_BT.601 -_M_RGB2YUV = [[0.299, 0.587, 0.114], [-0.14713, -0.28886, 0.436], [0.615, -0.51499, -0.10001]] -_M_YUV2RGB = [[1.0, 0.0, 1.13983], [1.0, -0.39465, -0.58060], [1.0, 2.03211, 0.0]] - -# https://www.exiv2.org/tags.html -_EXIF_ORIENT = 274 # exif 'Orientation' tag - - -def convert_PIL_to_numpy(image, format): - """ - Convert PIL image to numpy array of target format. - - Args: - image (PIL.Image): a PIL image - format (str): the format of output image - - Returns: - (np.ndarray): also see `read_image` - """ - if format is not None: - # PIL only supports RGB, so convert to RGB and flip channels over below - conversion_format = format - if format in ["BGR", "YUV-BT.601"]: - conversion_format = "RGB" - image = image.convert(conversion_format) - image = np.asarray(image) - # PIL squeezes out the channel dimension for "L", so make it HWC - if format == "L": - image = np.expand_dims(image, -1) - - # handle formats not supported by PIL - elif format == "BGR": - # flip channels if needed - image = image[:, :, ::-1] - elif format == "YUV-BT.601": - image = image / 255.0 - image = np.dot(image, np.array(_M_RGB2YUV).T) - - return image - - -def convert_image_to_rgb(image, format): - """ - Convert an image from given format to RGB. - - Args: - image (np.ndarray or Tensor): an HWC image - format (str): the format of input image, also see `read_image` - - Returns: - (np.ndarray): (H,W,3) RGB image in 0-255 range, can be either float or uint8 - """ - if isinstance(image, torch.Tensor): - image = image.cpu().numpy() - if format == "BGR": - image = image[:, :, [2, 1, 0]] - elif format == "YUV-BT.601": - image = np.dot(image, np.array(_M_YUV2RGB).T) - image = image * 255.0 - else: - if format == "L": - image = image[:, :, 0] - image = image.astype(np.uint8) - image = np.asarray(Image.fromarray(image, mode=format).convert("RGB")) - return image - - -def _apply_exif_orientation(image): - """ - Applies the exif orientation correctly. - - This code exists per the bug: - https://github.com/python-pillow/Pillow/issues/3973 - with the function `ImageOps.exif_transpose`. The Pillow source raises errors with - various methods, especially `tobytes` - - Function based on: - https://github.com/wkentaro/labelme/blob/v4.5.4/labelme/utils/image.py#L59 - https://github.com/python-pillow/Pillow/blob/7.1.2/src/PIL/ImageOps.py#L527 - - Args: - image (PIL.Image): a PIL image - - Returns: - (PIL.Image): the PIL image with exif orientation applied, if applicable - """ - if not hasattr(image, "getexif"): - return image - - try: - exif = image.getexif() - except Exception: # https://github.com/facebookresearch/detectron2/issues/1885 - exif = None - - if exif is None: - return image - - orientation = exif.get(_EXIF_ORIENT) - - method = { - 2: Image.FLIP_LEFT_RIGHT, - 3: Image.ROTATE_180, - 4: Image.FLIP_TOP_BOTTOM, - 5: Image.TRANSPOSE, - 6: Image.ROTATE_270, - 7: Image.TRANSVERSE, - 8: Image.ROTATE_90, - }.get(orientation) - - if method is not None: - return image.transpose(method) - return image - - -def read_image(file_name, format=None): - """ - Read an image into the given format. - Will apply rotation and flipping if the image has such exif information. - - Args: - file_name (str): image file path - format (str): one of the supported image modes in PIL, or "BGR" or "YUV-BT.601". - - Returns: - image (np.ndarray): - an HWC image in the given format, which is 0-255, uint8 for - supported image modes in PIL or "BGR"; float (0-1 for Y) for YUV-BT.601. - """ - with PathManager.open(file_name, "rb") as f: - image = Image.open(f) - - # work around this bug: https://github.com/python-pillow/Pillow/issues/3973 - image = _apply_exif_orientation(image) - return convert_PIL_to_numpy(image, format) - - -def check_image_size(dataset_dict, image): - """ - Raise an error if the image does not match the size specified in the dict. - """ - if "width" in dataset_dict or "height" in dataset_dict: - image_wh = (image.shape[1], image.shape[0]) - expected_wh = (dataset_dict["width"], dataset_dict["height"]) - if not image_wh == expected_wh: - raise SizeMismatchError( - "Mismatched image shape{}, got {}, expect {}.".format( - " for image " + dataset_dict["file_name"] - if "file_name" in dataset_dict - else "", - image_wh, - expected_wh, - ) - + " Please check the width/height in your annotation." - ) - - # To ensure bbox always remap to original image size - if "width" not in dataset_dict: - dataset_dict["width"] = image.shape[1] - if "height" not in dataset_dict: - dataset_dict["height"] = image.shape[0] - - -def transform_proposals(dataset_dict, image_shape, transforms, *, proposal_topk, min_box_size=0): - """ - Apply transformations to the proposals in dataset_dict, if any. - - Args: - dataset_dict (dict): a dict read from the dataset, possibly - contains fields "proposal_boxes", "proposal_objectness_logits", "proposal_bbox_mode" - image_shape (tuple): height, width - transforms (TransformList): - proposal_topk (int): only keep top-K scoring proposals - min_box_size (int): proposals with either side smaller than this - threshold are removed - - The input dict is modified in-place, with abovementioned keys removed. A new - key "proposals" will be added. Its value is an `Instances` - object which contains the transformed proposals in its field - "proposal_boxes" and "objectness_logits". - """ - if "proposal_boxes" in dataset_dict: - # Transform proposal boxes - boxes = transforms.apply_box( - BoxMode.convert( - dataset_dict.pop("proposal_boxes"), - dataset_dict.pop("proposal_bbox_mode"), - BoxMode.XYXY_ABS, - ) - ) - boxes = Boxes(boxes) - objectness_logits = torch.as_tensor( - dataset_dict.pop("proposal_objectness_logits").astype("float32") - ) - - boxes.clip(image_shape) - keep = boxes.nonempty(threshold=min_box_size) - boxes = boxes[keep] - objectness_logits = objectness_logits[keep] - - proposals = Instances(image_shape) - proposals.proposal_boxes = boxes[:proposal_topk] - proposals.objectness_logits = objectness_logits[:proposal_topk] - dataset_dict["proposals"] = proposals - - -def get_bbox(annotation): - """ - Get bbox from data - Args: - annotation (dict): dict of instance annotations for a single instance. - Returns: - bbox (ndarray): x1, y1, x2, y2 coordinates - """ - # bbox is 1d (per-instance bounding box) - bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS) - return bbox - - -def transform_instance_annotations( - annotation, transforms, image_size, *, keypoint_hflip_indices=None -): - """ - Apply transforms to box, segmentation and keypoints annotations of a single instance. - - It will use `transforms.apply_box` for the box, and - `transforms.apply_coords` for segmentation polygons & keypoints. - If you need anything more specially designed for each data structure, - you'll need to implement your own version of this function or the transforms. - - Args: - annotation (dict): dict of instance annotations for a single instance. - It will be modified in-place. - transforms (TransformList or list[Transform]): - image_size (tuple): the height, width of the transformed image - keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`. - - Returns: - dict: - the same input dict with fields "bbox", "segmentation", "keypoints" - transformed according to `transforms`. - The "bbox_mode" field will be set to XYXY_ABS. - """ - if isinstance(transforms, (tuple, list)): - transforms = T.TransformList(transforms) - # bbox is 1d (per-instance bounding box) - bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS) - # clip transformed bbox to image size - bbox = transforms.apply_box(np.array([bbox]))[0].clip(min=0) - annotation["bbox"] = np.minimum(bbox, list(image_size + image_size)[::-1]) - annotation["bbox_mode"] = BoxMode.XYXY_ABS - - if "segmentation" in annotation: - # each instance contains 1 or more polygons - segm = annotation["segmentation"] - if isinstance(segm, list): - # polygons - polygons = [np.asarray(p).reshape(-1, 2) for p in segm] - annotation["segmentation"] = [ - p.reshape(-1) for p in transforms.apply_polygons(polygons) - ] - elif isinstance(segm, dict): - # RLE - mask = mask_util.decode(segm) - mask = transforms.apply_segmentation(mask) - assert tuple(mask.shape[:2]) == image_size - annotation["segmentation"] = mask - else: - raise ValueError( - "Cannot transform segmentation of type '{}'!" - "Supported types are: polygons as list[list[float] or ndarray]," - " COCO-style RLE as a dict.".format(type(segm)) - ) - - if "keypoints" in annotation: - keypoints = transform_keypoint_annotations( - annotation["keypoints"], transforms, image_size, keypoint_hflip_indices - ) - annotation["keypoints"] = keypoints - - return annotation - - -def transform_keypoint_annotations(keypoints, transforms, image_size, keypoint_hflip_indices=None): - """ - Transform keypoint annotations of an image. - If a keypoint is transformed out of image boundary, it will be marked "unlabeled" (visibility=0) - - Args: - keypoints (list[float]): Nx3 float in Detectron2's Dataset format. - Each point is represented by (x, y, visibility). - transforms (TransformList): - image_size (tuple): the height, width of the transformed image - keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`. - When `transforms` includes horizontal flip, will use the index - mapping to flip keypoints. - """ - # (N*3,) -> (N, 3) - keypoints = np.asarray(keypoints, dtype="float64").reshape(-1, 3) - keypoints_xy = transforms.apply_coords(keypoints[:, :2]) - - # Set all out-of-boundary points to "unlabeled" - inside = (keypoints_xy >= np.array([0, 0])) & (keypoints_xy <= np.array(image_size[::-1])) - inside = inside.all(axis=1) - keypoints[:, :2] = keypoints_xy - keypoints[:, 2][~inside] = 0 - - # This assumes that HorizFlipTransform is the only one that does flip - do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms.transforms) % 2 == 1 - - # Alternative way: check if probe points was horizontally flipped. - # probe = np.asarray([[0.0, 0.0], [image_width, 0.0]]) - # probe_aug = transforms.apply_coords(probe.copy()) - # do_hflip = np.sign(probe[1][0] - probe[0][0]) != np.sign(probe_aug[1][0] - probe_aug[0][0]) # noqa - - # If flipped, swap each keypoint with its opposite-handed equivalent - if do_hflip: - if keypoint_hflip_indices is None: - raise ValueError("Cannot flip keypoints without providing flip indices!") - if len(keypoints) != len(keypoint_hflip_indices): - raise ValueError( - "Keypoint data has {} points, but metadata " - "contains {} points!".format(len(keypoints), len(keypoint_hflip_indices)) - ) - keypoints = keypoints[np.asarray(keypoint_hflip_indices, dtype=np.int32), :] - - # Maintain COCO convention that if visibility == 0 (unlabeled), then x, y = 0 - keypoints[keypoints[:, 2] == 0] = 0 - return keypoints - - -def annotations_to_instances(annos, image_size, mask_format="polygon"): - """ - Create an :class:`Instances` object used by the models, - from instance annotations in the dataset dict. - - Args: - annos (list[dict]): a list of instance annotations in one image, each - element for one instance. - image_size (tuple): height, width - - Returns: - Instances: - It will contain fields "gt_boxes", "gt_classes", - "gt_masks", "gt_keypoints", if they can be obtained from `annos`. - This is the format that builtin models expect. - """ - boxes = ( - np.stack( - [BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS) for obj in annos] - ) - if len(annos) - else np.zeros((0, 4)) - ) - target = Instances(image_size) - target.gt_boxes = Boxes(boxes) - - classes = [int(obj["category_id"]) for obj in annos] - classes = torch.tensor(classes, dtype=torch.int64) - target.gt_classes = classes - - if len(annos) and "segmentation" in annos[0]: - segms = [obj["segmentation"] for obj in annos] - if mask_format == "polygon": - try: - masks = PolygonMasks(segms) - except ValueError as e: - raise ValueError( - "Failed to use mask_format=='polygon' from the given annotations!" - ) from e - else: - assert mask_format == "bitmask", mask_format - masks = [] - for segm in segms: - if isinstance(segm, list): - # polygon - masks.append(polygons_to_bitmask(segm, *image_size)) - elif isinstance(segm, dict): - # COCO RLE - masks.append(mask_util.decode(segm)) - elif isinstance(segm, np.ndarray): - assert segm.ndim == 2, "Expect segmentation of 2 dimensions, got {}.".format( - segm.ndim - ) - # mask array - masks.append(segm) - else: - raise ValueError( - "Cannot convert segmentation of type '{}' to BitMasks!" - "Supported types are: polygons as list[list[float] or ndarray]," - " COCO-style RLE as a dict, or a binary segmentation mask " - " in a 2D numpy array of shape HxW.".format(type(segm)) - ) - # torch.from_numpy does not support array with negative stride. - masks = BitMasks( - torch.stack([torch.from_numpy(np.ascontiguousarray(x)) for x in masks]) - ) - target.gt_masks = masks - - if len(annos) and "keypoints" in annos[0]: - kpts = [obj.get("keypoints", []) for obj in annos] - target.gt_keypoints = Keypoints(kpts) - - return target - - -def annotations_to_instances_rotated(annos, image_size): - """ - Create an :class:`Instances` object used by the models, - from instance annotations in the dataset dict. - Compared to `annotations_to_instances`, this function is for rotated boxes only - - Args: - annos (list[dict]): a list of instance annotations in one image, each - element for one instance. - image_size (tuple): height, width - - Returns: - Instances: - Containing fields "gt_boxes", "gt_classes", - if they can be obtained from `annos`. - This is the format that builtin models expect. - """ - boxes = [obj["bbox"] for obj in annos] - target = Instances(image_size) - boxes = target.gt_boxes = RotatedBoxes(boxes) - boxes.clip(image_size) - - classes = [obj["category_id"] for obj in annos] - classes = torch.tensor(classes, dtype=torch.int64) - target.gt_classes = classes - - return target - - -def filter_empty_instances( - instances, by_box=True, by_mask=True, box_threshold=1e-5, return_mask=False -): - """ - Filter out empty instances in an `Instances` object. - - Args: - instances (Instances): - by_box (bool): whether to filter out instances with empty boxes - by_mask (bool): whether to filter out instances with empty masks - box_threshold (float): minimum width and height to be considered non-empty - return_mask (bool): whether to return boolean mask of filtered instances - - Returns: - Instances: the filtered instances. - tensor[bool], optional: boolean mask of filtered instances - """ - assert by_box or by_mask - r = [] - if by_box: - r.append(instances.gt_boxes.nonempty(threshold=box_threshold)) - if instances.has("gt_masks") and by_mask: - r.append(instances.gt_masks.nonempty()) - - # TODO: can also filter visible keypoints - - if not r: - return instances - m = r[0] - for x in r[1:]: - m = m & x - if return_mask: - return instances[m], m - return instances[m] - - -def create_keypoint_hflip_indices(dataset_names: Union[str, List[str]]) -> List[int]: - """ - Args: - dataset_names: list of dataset names - - Returns: - list[int]: a list of size=#keypoints, storing the - horizontally-flipped keypoint indices. - """ - if isinstance(dataset_names, str): - dataset_names = [dataset_names] - - check_metadata_consistency("keypoint_names", dataset_names) - check_metadata_consistency("keypoint_flip_map", dataset_names) - - meta = MetadataCatalog.get(dataset_names[0]) - names = meta.keypoint_names - # TODO flip -> hflip - flip_map = dict(meta.keypoint_flip_map) - flip_map.update({v: k for k, v in flip_map.items()}) - flipped_names = [i if i not in flip_map else flip_map[i] for i in names] - flip_indices = [names.index(i) for i in flipped_names] - return flip_indices - - -def get_fed_loss_cls_weights(dataset_names: Union[str, List[str]], freq_weight_power=1.0): - """ - Get frequency weight for each class sorted by class id. - We now calcualte freqency weight using image_count to the power freq_weight_power. - - Args: - dataset_names: list of dataset names - freq_weight_power: power value - """ - if isinstance(dataset_names, str): - dataset_names = [dataset_names] - - check_metadata_consistency("class_image_count", dataset_names) - - meta = MetadataCatalog.get(dataset_names[0]) - class_freq_meta = meta.class_image_count - class_freq = torch.tensor( - [c["image_count"] for c in sorted(class_freq_meta, key=lambda x: x["id"])] - ) - class_freq_weight = class_freq.float() ** freq_weight_power - return class_freq_weight - - -def gen_crop_transform_with_instance(crop_size, image_size, instance): - """ - Generate a CropTransform so that the cropping region contains - the center of the given instance. - - Args: - crop_size (tuple): h, w in pixels - image_size (tuple): h, w - instance (dict): an annotation dict of one instance, in Detectron2's - dataset format. - """ - crop_size = np.asarray(crop_size, dtype=np.int32) - bbox = BoxMode.convert(instance["bbox"], instance["bbox_mode"], BoxMode.XYXY_ABS) - center_yx = (bbox[1] + bbox[3]) * 0.5, (bbox[0] + bbox[2]) * 0.5 - assert ( - image_size[0] >= center_yx[0] and image_size[1] >= center_yx[1] - ), "The annotation bounding box is outside of the image!" - assert ( - image_size[0] >= crop_size[0] and image_size[1] >= crop_size[1] - ), "Crop size is larger than image size!" - - min_yx = np.maximum(np.floor(center_yx).astype(np.int32) - crop_size, 0) - max_yx = np.maximum(np.asarray(image_size, dtype=np.int32) - crop_size, 0) - max_yx = np.minimum(max_yx, np.ceil(center_yx).astype(np.int32)) - - y0 = np.random.randint(min_yx[0], max_yx[0] + 1) - x0 = np.random.randint(min_yx[1], max_yx[1] + 1) - return T.CropTransform(x0, y0, crop_size[1], crop_size[0]) - - -def check_metadata_consistency(key, dataset_names): - """ - Check that the datasets have consistent metadata. - - Args: - key (str): a metadata key - dataset_names (list[str]): a list of dataset names - - Raises: - AttributeError: if the key does not exist in the metadata - ValueError: if the given datasets do not have the same metadata values defined by key - """ - if len(dataset_names) == 0: - return - logger = logging.getLogger(__name__) - entries_per_dataset = [getattr(MetadataCatalog.get(d), key) for d in dataset_names] - for idx, entry in enumerate(entries_per_dataset): - if entry != entries_per_dataset[0]: - logger.error( - "Metadata '{}' for dataset '{}' is '{}'".format(key, dataset_names[idx], str(entry)) - ) - logger.error( - "Metadata '{}' for dataset '{}' is '{}'".format( - key, dataset_names[0], str(entries_per_dataset[0]) - ) - ) - raise ValueError("Datasets have different metadata '{}'!".format(key)) - - -def build_augmentation(cfg, is_train): - """ - Create a list of default :class:`Augmentation` from config. - Now it includes resizing and flipping. - - Returns: - list[Augmentation] - """ - if is_train: - min_size = cfg.INPUT.MIN_SIZE_TRAIN - max_size = cfg.INPUT.MAX_SIZE_TRAIN - sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING - else: - min_size = cfg.INPUT.MIN_SIZE_TEST - max_size = cfg.INPUT.MAX_SIZE_TEST - sample_style = "choice" - augmentation = [T.ResizeShortestEdge(min_size, max_size, sample_style)] - if is_train and cfg.INPUT.RANDOM_FLIP != "none": - augmentation.append( - T.RandomFlip( - horizontal=cfg.INPUT.RANDOM_FLIP == "horizontal", - vertical=cfg.INPUT.RANDOM_FLIP == "vertical", - ) - ) - return augmentation - - -build_transform_gen = build_augmentation -""" -Alias for backward-compatibility. -""" diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/tests/data/test_transforms.py b/spaces/brjathu/HMR2.0/vendor/detectron2/tests/data/test_transforms.py deleted file mode 100644 index 382048e533708dec3fabf89528564ebc2ad4c83f..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/tests/data/test_transforms.py +++ /dev/null @@ -1,268 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. - -import logging -import numpy as np -import unittest -from unittest import mock -import torch -from PIL import Image, ImageOps -from torch.nn import functional as F - -from detectron2.config import get_cfg -from detectron2.data import detection_utils -from detectron2.data import transforms as T -from detectron2.utils.logger import setup_logger - -logger = logging.getLogger(__name__) - - -def polygon_allclose(poly1, poly2): - """ - Test whether two polygons are the same. - Both arguments are nx2 numpy arrays. - """ - # ABCD and CDAB are the same polygon. So it's important to check after rolling - for k in range(len(poly1)): - rolled_poly1 = np.roll(poly1, k, axis=0) - if np.allclose(rolled_poly1, poly2): - return True - return False - - -class TestTransforms(unittest.TestCase): - def setUp(self): - setup_logger() - - def test_apply_rotated_boxes(self): - np.random.seed(125) - cfg = get_cfg() - is_train = True - augs = detection_utils.build_augmentation(cfg, is_train) - image = np.random.rand(200, 300) - image, transforms = T.apply_augmentations(augs, image) - image_shape = image.shape[:2] # h, w - assert image_shape == (800, 1200) - annotation = {"bbox": [179, 97, 62, 40, -56]} - - boxes = np.array([annotation["bbox"]], dtype=np.float64) # boxes.shape = (1, 5) - transformed_bbox = transforms.apply_rotated_box(boxes)[0] - - expected_bbox = np.array([484, 388, 248, 160, 56], dtype=np.float64) - err_msg = "transformed_bbox = {}, expected {}".format(transformed_bbox, expected_bbox) - assert np.allclose(transformed_bbox, expected_bbox), err_msg - - def test_resize_and_crop(self): - np.random.seed(125) - min_scale = 0.2 - max_scale = 2.0 - target_height = 1100 - target_width = 1000 - resize_aug = T.ResizeScale(min_scale, max_scale, target_height, target_width) - fixed_size_crop_aug = T.FixedSizeCrop((target_height, target_width)) - hflip_aug = T.RandomFlip() - augs = [resize_aug, fixed_size_crop_aug, hflip_aug] - original_image = np.random.rand(900, 800) - image, transforms = T.apply_augmentations(augs, original_image) - image_shape = image.shape[:2] # h, w - self.assertEqual((1100, 1000), image_shape) - - boxes = np.array( - [[91, 46, 144, 111], [523, 251, 614, 295]], - dtype=np.float64, - ) - transformed_bboxs = transforms.apply_box(boxes) - expected_bboxs = np.array( - [ - [895.42, 33.42666667, 933.91125, 80.66], - [554.0825, 182.39333333, 620.17125, 214.36666667], - ], - dtype=np.float64, - ) - err_msg = "transformed_bbox = {}, expected {}".format(transformed_bboxs, expected_bboxs) - self.assertTrue(np.allclose(transformed_bboxs, expected_bboxs), err_msg) - - polygon = np.array([[91, 46], [144, 46], [144, 111], [91, 111]]) - transformed_polygons = transforms.apply_polygons([polygon]) - expected_polygon = np.array([[934.0, 33.0], [934.0, 80.0], [896.0, 80.0], [896.0, 33.0]]) - self.assertEqual(1, len(transformed_polygons)) - err_msg = "transformed_polygon = {}, expected {}".format( - transformed_polygons[0], expected_polygon - ) - self.assertTrue(polygon_allclose(transformed_polygons[0], expected_polygon), err_msg) - - def test_apply_rotated_boxes_unequal_scaling_factor(self): - np.random.seed(125) - h, w = 400, 200 - newh, neww = 800, 800 - image = np.random.rand(h, w) - augs = [] - augs.append(T.Resize(shape=(newh, neww))) - image, transforms = T.apply_augmentations(augs, image) - image_shape = image.shape[:2] # h, w - assert image_shape == (newh, neww) - - boxes = np.array( - [ - [150, 100, 40, 20, 0], - [150, 100, 40, 20, 30], - [150, 100, 40, 20, 90], - [150, 100, 40, 20, -90], - ], - dtype=np.float64, - ) - transformed_boxes = transforms.apply_rotated_box(boxes) - - expected_bboxes = np.array( - [ - [600, 200, 160, 40, 0], - [600, 200, 144.22205102, 52.91502622, 49.10660535], - [600, 200, 80, 80, 90], - [600, 200, 80, 80, -90], - ], - dtype=np.float64, - ) - err_msg = "transformed_boxes = {}, expected {}".format(transformed_boxes, expected_bboxes) - assert np.allclose(transformed_boxes, expected_bboxes), err_msg - - def test_print_augmentation(self): - t = T.RandomCrop("relative", (100, 100)) - self.assertEqual(str(t), "RandomCrop(crop_type='relative', crop_size=(100, 100))") - - t0 = T.RandomFlip(prob=0.5) - self.assertEqual(str(t0), "RandomFlip(prob=0.5)") - - t1 = T.RandomFlip() - self.assertEqual(str(t1), "RandomFlip()") - - t = T.AugmentationList([t0, t1]) - self.assertEqual(str(t), f"AugmentationList[{t0}, {t1}]") - - def test_random_apply_prob_out_of_range_check(self): - test_probabilities = {0.0: True, 0.5: True, 1.0: True, -0.01: False, 1.01: False} - - for given_probability, is_valid in test_probabilities.items(): - if not is_valid: - self.assertRaises(AssertionError, T.RandomApply, None, prob=given_probability) - else: - T.RandomApply(T.NoOpTransform(), prob=given_probability) - - def test_random_apply_wrapping_aug_probability_occured_evaluation(self): - transform_mock = mock.MagicMock(name="MockTransform", spec=T.Augmentation) - image_mock = mock.MagicMock(name="MockImage") - random_apply = T.RandomApply(transform_mock, prob=0.001) - - with mock.patch.object(random_apply, "_rand_range", return_value=0.0001): - transform = random_apply.get_transform(image_mock) - transform_mock.get_transform.assert_called_once_with(image_mock) - self.assertIsNot(transform, transform_mock) - - def test_random_apply_wrapping_std_transform_probability_occured_evaluation(self): - transform_mock = mock.MagicMock(name="MockTransform", spec=T.Transform) - image_mock = mock.MagicMock(name="MockImage") - random_apply = T.RandomApply(transform_mock, prob=0.001) - - with mock.patch.object(random_apply, "_rand_range", return_value=0.0001): - transform = random_apply.get_transform(image_mock) - self.assertIs(transform, transform_mock) - - def test_random_apply_probability_not_occured_evaluation(self): - transform_mock = mock.MagicMock(name="MockTransform", spec=T.Augmentation) - image_mock = mock.MagicMock(name="MockImage") - random_apply = T.RandomApply(transform_mock, prob=0.001) - - with mock.patch.object(random_apply, "_rand_range", return_value=0.9): - transform = random_apply.get_transform(image_mock) - transform_mock.get_transform.assert_not_called() - self.assertIsInstance(transform, T.NoOpTransform) - - def test_augmentation_input_args(self): - input_shape = (100, 100) - output_shape = (50, 50) - - # define two augmentations with different args - class TG1(T.Augmentation): - def get_transform(self, image, sem_seg): - return T.ResizeTransform( - input_shape[0], input_shape[1], output_shape[0], output_shape[1] - ) - - class TG2(T.Augmentation): - def get_transform(self, image): - assert image.shape[:2] == output_shape # check that TG1 is applied - return T.HFlipTransform(output_shape[1]) - - image = np.random.rand(*input_shape).astype("float32") - sem_seg = (np.random.rand(*input_shape) < 0.5).astype("uint8") - inputs = T.AugInput(image, sem_seg=sem_seg) # provide two args - tfms = inputs.apply_augmentations([TG1(), TG2()]) - self.assertIsInstance(tfms[0], T.ResizeTransform) - self.assertIsInstance(tfms[1], T.HFlipTransform) - self.assertTrue(inputs.image.shape[:2] == output_shape) - self.assertTrue(inputs.sem_seg.shape[:2] == output_shape) - - class TG3(T.Augmentation): - def get_transform(self, image, nonexist): - pass - - with self.assertRaises(AttributeError): - inputs.apply_augmentations([TG3()]) - - def test_augmentation_list(self): - input_shape = (100, 100) - image = np.random.rand(*input_shape).astype("float32") - sem_seg = (np.random.rand(*input_shape) < 0.5).astype("uint8") - inputs = T.AugInput(image, sem_seg=sem_seg) # provide two args - - augs = T.AugmentationList([T.RandomFlip(), T.Resize(20)]) - _ = T.AugmentationList([augs, T.Resize(30)])(inputs) - # 3 in latest fvcore (flattened transformlist), 2 in older - # self.assertEqual(len(tfms), 3) - - def test_color_transforms(self): - rand_img = np.random.random((100, 100, 3)) * 255 - rand_img = rand_img.astype("uint8") - - # Test no-op - noop_transform = T.ColorTransform(lambda img: img) - self.assertTrue(np.array_equal(rand_img, noop_transform.apply_image(rand_img))) - - # Test a ImageOps operation - magnitude = np.random.randint(0, 256) - solarize_transform = T.PILColorTransform(lambda img: ImageOps.solarize(img, magnitude)) - expected_img = ImageOps.solarize(Image.fromarray(rand_img), magnitude) - self.assertTrue(np.array_equal(expected_img, solarize_transform.apply_image(rand_img))) - - def test_resize_transform(self): - input_shapes = [(100, 100), (100, 100, 1), (100, 100, 3)] - output_shapes = [(200, 200), (200, 200, 1), (200, 200, 3)] - for in_shape, out_shape in zip(input_shapes, output_shapes): - in_img = np.random.randint(0, 255, size=in_shape, dtype=np.uint8) - tfm = T.ResizeTransform(in_shape[0], in_shape[1], out_shape[0], out_shape[1]) - out_img = tfm.apply_image(in_img) - self.assertEqual(out_img.shape, out_shape) - - def test_resize_shorted_edge_scriptable(self): - def f(image): - newh, neww = T.ResizeShortestEdge.get_output_shape( - image.shape[-2], image.shape[-1], 80, 133 - ) - return F.interpolate(image.unsqueeze(0), size=(newh, neww)) - - input = torch.randn(3, 10, 10) - script_f = torch.jit.script(f) - self.assertTrue(torch.allclose(f(input), script_f(input))) - - # generalize to new shapes - input = torch.randn(3, 8, 100) - self.assertTrue(torch.allclose(f(input), script_f(input))) - - def test_extent_transform(self): - input_shapes = [(100, 100), (100, 100, 1), (100, 100, 3)] - src_rect = (20, 20, 80, 80) - output_shapes = [(200, 200), (200, 200, 1), (200, 200, 3)] - for in_shape, out_shape in zip(input_shapes, output_shapes): - in_img = np.random.randint(0, 255, size=in_shape, dtype=np.uint8) - tfm = T.ExtentTransform(src_rect, out_shape[:2]) - out_img = tfm.apply_image(in_img) - self.assertTrue(out_img.shape == out_shape) diff --git a/spaces/brjathu/HMR2.0/vendor/pyrender/tests/unit/test_nodes.py b/spaces/brjathu/HMR2.0/vendor/pyrender/tests/unit/test_nodes.py deleted file mode 100644 index 9857c8221b7f6fb8530699bdf5593f8f0b74e152..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/pyrender/tests/unit/test_nodes.py +++ /dev/null @@ -1,124 +0,0 @@ -import numpy as np -import pytest -from trimesh import transformations - -from pyrender import (DirectionalLight, PerspectiveCamera, Mesh, Node) - - -def test_nodes(): - - x = Node() - assert x.name is None - assert x.camera is None - assert x.children == [] - assert x.skin is None - assert np.allclose(x.matrix, np.eye(4)) - assert x.mesh is None - assert np.allclose(x.rotation, [0,0,0,1]) - assert np.allclose(x.scale, np.ones(3)) - assert np.allclose(x.translation, np.zeros(3)) - assert x.weights is None - assert x.light is None - - x.name = 'node' - - # Test node light/camera/mesh tests - c = PerspectiveCamera(yfov=2.0) - m = Mesh([]) - d = DirectionalLight() - x.camera = c - assert x.camera == c - with pytest.raises(TypeError): - x.camera = m - x.camera = d - x.camera = None - x.mesh = m - assert x.mesh == m - with pytest.raises(TypeError): - x.mesh = c - x.mesh = d - x.light = d - assert x.light == d - with pytest.raises(TypeError): - x.light = m - x.light = c - - # Test transformations getters/setters/etc... - # Set up test values - x = np.array([1.0, 0.0, 0.0]) - y = np.array([0.0, 1.0, 0.0]) - t = np.array([1.0, 2.0, 3.0]) - s = np.array([0.5, 2.0, 1.0]) - - Mx = transformations.rotation_matrix(np.pi / 2.0, x) - qx = np.roll(transformations.quaternion_about_axis(np.pi / 2.0, x), -1) - Mxt = Mx.copy() - Mxt[:3,3] = t - S = np.eye(4) - S[:3,:3] = np.diag(s) - Mxts = Mxt.dot(S) - - My = transformations.rotation_matrix(np.pi / 2.0, y) - qy = np.roll(transformations.quaternion_about_axis(np.pi / 2.0, y), -1) - Myt = My.copy() - Myt[:3,3] = t - - x = Node(matrix=Mx) - assert np.allclose(x.matrix, Mx) - assert np.allclose(x.rotation, qx) - assert np.allclose(x.translation, np.zeros(3)) - assert np.allclose(x.scale, np.ones(3)) - - x.matrix = My - assert np.allclose(x.matrix, My) - assert np.allclose(x.rotation, qy) - assert np.allclose(x.translation, np.zeros(3)) - assert np.allclose(x.scale, np.ones(3)) - x.translation = t - assert np.allclose(x.matrix, Myt) - assert np.allclose(x.rotation, qy) - x.rotation = qx - assert np.allclose(x.matrix, Mxt) - x.scale = s - assert np.allclose(x.matrix, Mxts) - - x = Node(matrix=Mxt) - assert np.allclose(x.matrix, Mxt) - assert np.allclose(x.rotation, qx) - assert np.allclose(x.translation, t) - assert np.allclose(x.scale, np.ones(3)) - - x = Node(matrix=Mxts) - assert np.allclose(x.matrix, Mxts) - assert np.allclose(x.rotation, qx) - assert np.allclose(x.translation, t) - assert np.allclose(x.scale, s) - - # Individual element getters - x.scale[0] = 0 - assert np.allclose(x.scale[0], 0) - - x.translation[0] = 0 - assert np.allclose(x.translation[0], 0) - - x.matrix = np.eye(4) - x.matrix[0,0] = 500 - assert x.matrix[0,0] == 1.0 - - # Failures - with pytest.raises(ValueError): - x.matrix = 5 * np.eye(4) - with pytest.raises(ValueError): - x.matrix = np.eye(5) - with pytest.raises(ValueError): - x.matrix = np.eye(4).dot([5,1,1,1]) - with pytest.raises(ValueError): - x.rotation = np.array([1,2]) - with pytest.raises(ValueError): - x.rotation = np.array([1,2,3]) - with pytest.raises(ValueError): - x.rotation = np.array([1,2,3,4]) - with pytest.raises(ValueError): - x.translation = np.array([1,2,3,4]) - with pytest.raises(ValueError): - x.scale = np.array([1,2,3,4]) diff --git a/spaces/butterswords/nlc-explorer/.ipynb_checkpoints/WNgen-checkpoint.py b/spaces/butterswords/nlc-explorer/.ipynb_checkpoints/WNgen-checkpoint.py deleted file mode 100644 index 30fdfcfcfd2f02c784d30e830bd674fb8f2bb177..0000000000000000000000000000000000000000 --- a/spaces/butterswords/nlc-explorer/.ipynb_checkpoints/WNgen-checkpoint.py +++ /dev/null @@ -1,313 +0,0 @@ -#Import necessary libraries. -import re, nltk, pandas as pd, numpy as np, ssl, streamlit as st -from nltk.corpus import wordnet -import spacy -nlp = spacy.load("en_core_web_lg") - -#Import necessary parts for predicting things. -from transformers import AutoTokenizer, AutoModelForSequenceClassification, TextClassificationPipeline -import torch -import torch.nn.functional as F -tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english") -model = AutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english") -pipe = TextClassificationPipeline(model=model, tokenizer=tokenizer, return_all_scores=True) - -'''#If an error is thrown that the corpus "omw-1.4" isn't discoverable you can use this code. (https://stackoverflow.com/questions/38916452/nltk-download-ssl-certificate-verify-failed) -try: - _create_unverified_https_context = ssl._create_unverified_context -except AttributeError: - pass -else: - ssl._create_default_https_context = _create_unverified_https_context - -nltk.download('omw-1.4')''' - -# A simple function to pull synonyms and antonyms using spacy's POS -def syn_ant(word,POS=False,human=True): - pos_options = ['NOUN','VERB','ADJ','ADV'] - synonyms = [] - antonyms = [] - #WordNet hates spaces so you have to remove them - if " " in word: - word = word.replace(" ", "_") - - if POS in pos_options: - for syn in wordnet.synsets(word, pos=getattr(wordnet, POS)): - for l in syn.lemmas(): - current = l.name() - if human: - current = re.sub("_"," ",current) - synonyms.append(current) - if l.antonyms(): - for ant in l.antonyms(): - cur_ant = ant.name() - if human: - cur_ant = re.sub("_"," ",cur_ant) - antonyms.append(cur_ant) - else: - for syn in wordnet.synsets(word): - for l in syn.lemmas(): - current = l.name() - if human: - current = re.sub("_"," ",current) - synonyms.append(current) - if l.antonyms(): - for ant in l.antonyms(): - cur_ant = ant.name() - if human: - cur_ant = re.sub("_"," ",cur_ant) - antonyms.append(cur_ant) - synonyms = list(set(synonyms)) - antonyms = list(set(antonyms)) - return synonyms, antonyms - -def process_text(text): - doc = nlp(text.lower()) - result = [] - for token in doc: - if (token.is_stop) or (token.is_punct) or (token.lemma_ == '-PRON-'): - continue - result.append(token.lemma_) - return " ".join(result) - -def clean_definition(syn): - #This function removes stop words from sentences to improve on document level similarity for differentiation. - if type(syn) is str: - synset = wordnet.synset(syn).definition() - elif type(syn) is nltk.corpus.reader.wordnet.Synset: - synset = syn.definition() - definition = nlp(process_text(synset)) - return definition - -def check_sim(a,b): - if type(a) is str and type(b) is str: - a = nlp(a) - b = nlp(b) - similarity = a.similarity(b) - return similarity - -# Builds a dataframe dynamically from WordNet using NLTK. -def wordnet_df(word,POS=False,seed_definition=None): - pos_options = ['NOUN','VERB','ADJ','ADV'] - synonyms, antonyms = syn_ant(word,POS,False) - #print(synonyms, antonyms) #for QA purposes - words = [] - cats = [] - #WordNet hates spaces so you have to remove them - m_word = word.replace(" ", "_") - - #Allow the user to pick a seed definition if it is not provided directly to the function. Currently not working so it's commented out. - '''#commented out the way it was designed to allow for me to do it through Streamlit (keeping it for posterity, and for anyone who wants to use it without streamlit.) - for d in range(len(seed_definitions)): - print(f"{d}: {seed_definitions[d]}") - #choice = int(input("Which of the definitions above most aligns to your selection?")) - seed_definition = seed_definitions[choice]''' - try: - definition = seed_definition - except: - st.write("You did not supply a definition.") - - if POS in pos_options: - for syn in wordnet.synsets(m_word, pos=getattr(wordnet, POS)): - if check_sim(process_text(seed_definition),process_text(syn.definition())) > .7: - cur_lemmas = syn.lemmas() - hypos = syn.hyponyms() - for hypo in hypos: - cur_lemmas.extend(hypo.lemmas()) - for lemma in cur_lemmas: - ll = lemma.name() - cats.append(re.sub("_"," ", syn.name().split(".")[0])) - words.append(re.sub("_"," ",ll)) - - if len(synonyms) > 0: - for w in synonyms: - w = w.replace(" ","_") - for syn in wordnet.synsets(w, pos=getattr(wordnet, POS)): - if check_sim(process_text(seed_definition),process_text(syn.definition())) > .6: - cur_lemmas = syn.lemmas() - hypos = syn.hyponyms() - for hypo in hypos: - cur_lemmas.extend(hypo.lemmas()) - for lemma in cur_lemmas: - ll = lemma.name() - cats.append(re.sub("_"," ", syn.name().split(".")[0])) - words.append(re.sub("_"," ",ll)) - if len(antonyms) > 0: - for a in antonyms: - a = a.replace(" ","_") - for syn in wordnet.synsets(a, pos=getattr(wordnet, POS)): - if check_sim(process_text(seed_definition),process_text(syn.definition())) > .26: - cur_lemmas = syn.lemmas() - hypos = syn.hyponyms() - for hypo in hypos: - cur_lemmas.extend(hypo.lemmas()) - for lemma in cur_lemmas: - ll = lemma.name() - cats.append(re.sub("_"," ", syn.name().split(".")[0])) - words.append(re.sub("_"," ",ll)) - else: - for syn in wordnet.synsets(m_word): - if check_sim(process_text(seed_definition),process_text(syn.definition())) > .7: - cur_lemmas = syn.lemmas() - hypos = syn.hyponyms() - for hypo in hypos: - cur_lemmas.extend(hypo.lemmas()) - for lemma in cur_lemmas: - ll = lemma.name() - cats.append(re.sub("_"," ", syn.name().split(".")[0])) - words.append(re.sub("_"," ",ll)) - if len(synonyms) > 0: - for w in synonyms: - w = w.replace(" ","_") - for syn in wordnet.synsets(w): - if check_sim(process_text(seed_definition),process_text(syn.definition())) > .6: - cur_lemmas = syn.lemmas() - hypos = syn.hyponyms() - for hypo in hypos: - cur_lemmas.extend(hypo.lemmas()) - for lemma in cur_lemmas: - ll = lemma.name() - cats.append(re.sub("_"," ", syn.name().split(".")[0])) - words.append(re.sub("_"," ",ll)) - if len(antonyms) > 0: - for a in antonyms: - a = a.replace(" ","_") - for syn in wordnet.synsets(a): - if check_sim(process_text(seed_definition),process_text(syn.definition())) > .26: - cur_lemmas = syn.lemmas() - hypos = syn.hyponyms() - for hypo in hypos: - cur_lemmas.extend(hypo.lemmas()) - for lemma in cur_lemmas: - ll = lemma.name() - cats.append(re.sub("_"," ", syn.name().split(".")[0])) - words.append(re.sub("_"," ",ll)) - - df = {"Categories":cats, "Words":words} - df = pd.DataFrame(df) - df = df.drop_duplicates().reset_index() - df = df.drop("index", axis=1) - return df - -def eval_pred_test(text, return_all = False): - '''A basic function for evaluating the prediction from the model and turning it into a visualization friendly number.''' - preds = pipe(text) - neg_score = -1 * preds[0][0]['score'] - sent_neg = preds[0][0]['label'] - pos_score = preds[0][1]['score'] - sent_pos = preds[0][1]['label'] - prediction = 0 - sentiment = '' - if pos_score > abs(neg_score): - prediction = pos_score - sentiment = sent_pos - elif abs(neg_score) > pos_score: - prediction = neg_score - sentiment = sent_neg - - if return_all: - return prediction, sentiment - else: - return prediction - -def get_parallel(word, seed_definition, QA=False): - cleaned = nlp(process_text(seed_definition)) - root_syns = wordnet.synsets(word) - hypers = [] - new_hypos = [] - - for syn in root_syns: - hypers.extend(syn.hypernyms()) - - for syn in hypers: - new_hypos.extend(syn.hyponyms()) - - hypos = list(set([syn for syn in new_hypos if cleaned.similarity(nlp(process_text(syn.definition()))) >=.75]))[:25] -# with st.sidebar: -# st.write(f"The number of hypos is {len(hypos)} during get Parallel at Similarity >= .75.") #QA - - if len(hypos) <= 1: - hypos = root_syns - elif len(hypos) < 3: - hypos = list(set([syn for syn in new_hypos if cleaned.similarity(nlp(process_text(syn.definition()))) >=.5]))[:25] # added a cap to each - elif len(hypos) < 10: - hypos = list(set([syn for syn in new_hypos if cleaned.similarity(nlp(process_text(syn.definition()))) >=.66]))[:25] - elif len(hypos) >= 10: - hypos = list(set([syn for syn in new_hypos if cleaned.similarity(nlp(process_text(syn.definition()))) >=.8]))[:25] - if QA: - print(hypers) - print(hypos) - return hypers, hypos - else: - return hypos - -# Builds a dataframe dynamically from WordNet using NLTK. -def wordnet_parallel_df(word,seed_definition=None): - words = [] - cats = [] - #WordNet hates spaces so you have to remove them - m_word = word.replace(" ", "_") - - # add synonyms and antonyms for diversity - synonyms, antonyms = syn_ant(word) - words.extend(synonyms) - cats.extend(["synonyms" for n in range(len(synonyms))]) - words.extend(antonyms) - cats.extend(["antonyms" for n in range(len(antonyms))]) - - try: - hypos = get_parallel(m_word,seed_definition) - except: - st.write("You did not supply a definition.") - #Allow the user to pick a seed definition if it is not provided directly to the function. - '''if seed_definition is None: - if POS in pos_options: - seed_definitions = [syn.definition() for syn in wordnet.synsets(m_word, pos=getattr(wordnet, POS))] - else: - seed_definitions = [syn.definition() for syn in wordnet.synsets(m_word)] - for d in range(len(seed_definitions)): - print(f"{d}: {seed_definitions[d]}") - choice = int(input("Which of the definitions above most aligns to your selection?")) - seed_definition = seed_definitions[choice]''' - - #This is a QA section -# with st.sidebar: -# st.write(f"The number of hypos is {len(hypos)} during parallel df creation.") #QA - - #Transforms hypos into lemmas - for syn in hypos: - cur_lemmas = syn.lemmas() - hypos = syn.hyponyms() - for hypo in hypos: - cur_lemmas.extend(hypo.lemmas()) - for lemma in cur_lemmas: - ll = lemma.name() - cats.append(re.sub("_"," ", syn.name().split(".")[0])) - words.append(re.sub("_"," ",ll)) -# with st.sidebar: -# st.write(f'There are {len(words)} words in the dataframe at the beginning of df creation.') #QA - - df = {"Categories":cats, "Words":words} - df = pd.DataFrame(df) - df = df.drop_duplicates("Words").reset_index() - df = df.drop("index", axis=1) - return df - -#@st.experimental_singleton(suppress_st_warning=True) -def cf_from_wordnet_df(seed,text,seed_definition=False): - seed_token = nlp(seed) - seed_POS = seed_token[0].pos_ - #print(seed_POS) QA - try: - df = wordnet_parallel_df(seed,seed_definition) - except: - st.write("You did not supply a definition.") - - df["text"] = df.Words.apply(lambda x: re.sub(r'\b'+seed+r'\b',x,text)) - df["similarity"] = df.Words.apply(lambda x: seed_token[0].similarity(nlp(x)[0])) - df = df[df["similarity"] > 0].reset_index() - df.drop("index", axis=1, inplace=True) - df["pred"] = df.text.apply(eval_pred_test) - # added this because I think it will make the end results better if we ensure the seed is in the data we generate counterfactuals from. - df['seed'] = df.Words.apply(lambda x: 'seed' if x.lower() == seed.lower() else 'alternative') - return df \ No newline at end of file diff --git a/spaces/bzd4576/sovits-sin/app.py b/spaces/bzd4576/sovits-sin/app.py deleted file mode 100644 index befef03323b65d400ac0e20fbcc282f09ac61bf9..0000000000000000000000000000000000000000 --- a/spaces/bzd4576/sovits-sin/app.py +++ /dev/null @@ -1,114 +0,0 @@ -import gradio as gr -import os -os.system('cd monotonic_align && python setup.py build_ext --inplace && cd ..') - -import logging - -numba_logger = logging.getLogger('numba') -numba_logger.setLevel(logging.WARNING) - -import librosa -import torch - -import commons -import utils -from models import SynthesizerTrn -from text.symbols import symbols -from text import text_to_sequence -def resize2d(source, target_len): - source[source<0.001] = np.nan - target = np.interp(np.arange(0, len(source), len(source) / target_len), np.arange(0, len(source)), source) - return np.nan_to_num(target) -def convert_wav_22050_to_f0(audio): - tmp = librosa.pyin(audio, - fmin=librosa.note_to_hz('C0'), - fmax=librosa.note_to_hz('C7'), - frame_length=1780)[0] - f0 = np.zeros_like(tmp) - f0[tmp>0] = tmp[tmp>0] - return f0 - -def get_text(text, hps): - text_norm = text_to_sequence(text, hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = torch.LongTensor(text_norm) - print(text_norm.shape) - return text_norm - - -hps = utils.get_hparams_from_file("configs/ljs_base.json") -hps_ms = utils.get_hparams_from_file("configs/config.json") -net_g_ms = SynthesizerTrn( - len(symbols), - hps_ms.data.filter_length // 2 + 1, - hps_ms.train.segment_size // hps.data.hop_length, - n_speakers=hps_ms.data.n_speakers, - **hps_ms.model) - -import numpy as np - -hubert = torch.hub.load("bshall/hubert:main", "hubert_soft") - -_ = utils.load_checkpoint("G_376000.pth", net_g_ms, None) - -global vcid - -def getid(id): - global vcid - vcid=id - return vcid - -def vc_fn(input_audio,vc_transform): - global vcid - if input_audio is None: - return "You need to upload an audio", None - sampling_rate, audio = input_audio - # print(audio.shape,sampling_rate) - duration = audio.shape[0] / sampling_rate - if duration > 30: - return "Error: Audio is too long", None - audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) - if len(audio.shape) > 1: - audio = librosa.to_mono(audio.transpose(1, 0)) - if sampling_rate != 16000: - audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000) - - audio22050 = librosa.resample(audio, orig_sr=16000, target_sr=22050) - f0 = convert_wav_22050_to_f0(audio22050) - - source = torch.FloatTensor(audio).unsqueeze(0).unsqueeze(0) - print(source.shape) - with torch.inference_mode(): - units = hubert.units(source) - soft = units.squeeze(0).numpy() - print(sampling_rate) - f0 = resize2d(f0, len(soft[:, 0])) * vc_transform - soft[:, 0] = f0 / 10 - sid = torch.LongTensor([vcid]) - stn_tst = torch.FloatTensor(soft) - with torch.no_grad(): - x_tst = stn_tst.unsqueeze(0) - x_tst_lengths = torch.LongTensor([stn_tst.size(0)]) - audio = net_g_ms.infer(x_tst, x_tst_lengths,sid=sid, noise_scale=0.1, noise_scale_w=0.1, length_scale=1)[0][ - 0, 0].data.float().numpy() - - return "Success", (hps.data.sampling_rate, audio) - - -app = gr.Blocks() -with app: - with gr.Tabs(): - with gr.TabItem("Basic"): - vc_input3 = gr.Audio(label="Input Audio (30s limitation)") - vc_transform = gr.Number(label="transform", value=1.0) - vc_id = gr.Number(label="Input speaker_id", value=0) - vc_setid = gr.Button("set speaker_id", variant="primary") - vc_submit = gr.Button("Convert", variant="primary") - vc_output3 = gr.Textbox(label="Output Message") - vc_output1 = gr.Textbox(label="Output Message") - vc_output2 = gr.Audio(label="Output Audio") - vc_setid.click(getid, vc_id, vc_output3) - vc_submit.click(vc_fn, [vc_input3, vc_transform], [vc_output1, vc_output2]) - - app.launch() \ No newline at end of file diff --git a/spaces/caffeinum/VToonify/vtoonify/model/raft/evaluate.py b/spaces/caffeinum/VToonify/vtoonify/model/raft/evaluate.py deleted file mode 100644 index 431a0f58891bede2804454fa7f28e9434c4c8746..0000000000000000000000000000000000000000 --- a/spaces/caffeinum/VToonify/vtoonify/model/raft/evaluate.py +++ /dev/null @@ -1,197 +0,0 @@ -import sys -sys.path.append('core') - -from PIL import Image -import argparse -import os -import time -import numpy as np -import torch -import torch.nn.functional as F -import matplotlib.pyplot as plt - -import datasets -from utils import flow_viz -from utils import frame_utils - -from raft import RAFT -from utils.utils import InputPadder, forward_interpolate - - -@torch.no_grad() -def create_sintel_submission(model, iters=32, warm_start=False, output_path='sintel_submission'): - """ Create submission for the Sintel leaderboard """ - model.eval() - for dstype in ['clean', 'final']: - test_dataset = datasets.MpiSintel(split='test', aug_params=None, dstype=dstype) - - flow_prev, sequence_prev = None, None - for test_id in range(len(test_dataset)): - image1, image2, (sequence, frame) = test_dataset[test_id] - if sequence != sequence_prev: - flow_prev = None - - padder = InputPadder(image1.shape) - image1, image2 = padder.pad(image1[None].cuda(), image2[None].cuda()) - - flow_low, flow_pr = model(image1, image2, iters=iters, flow_init=flow_prev, test_mode=True) - flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy() - - if warm_start: - flow_prev = forward_interpolate(flow_low[0])[None].cuda() - - output_dir = os.path.join(output_path, dstype, sequence) - output_file = os.path.join(output_dir, 'frame%04d.flo' % (frame+1)) - - if not os.path.exists(output_dir): - os.makedirs(output_dir) - - frame_utils.writeFlow(output_file, flow) - sequence_prev = sequence - - -@torch.no_grad() -def create_kitti_submission(model, iters=24, output_path='kitti_submission'): - """ Create submission for the Sintel leaderboard """ - model.eval() - test_dataset = datasets.KITTI(split='testing', aug_params=None) - - if not os.path.exists(output_path): - os.makedirs(output_path) - - for test_id in range(len(test_dataset)): - image1, image2, (frame_id, ) = test_dataset[test_id] - padder = InputPadder(image1.shape, mode='kitti') - image1, image2 = padder.pad(image1[None].cuda(), image2[None].cuda()) - - _, flow_pr = model(image1, image2, iters=iters, test_mode=True) - flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy() - - output_filename = os.path.join(output_path, frame_id) - frame_utils.writeFlowKITTI(output_filename, flow) - - -@torch.no_grad() -def validate_chairs(model, iters=24): - """ Perform evaluation on the FlyingChairs (test) split """ - model.eval() - epe_list = [] - - val_dataset = datasets.FlyingChairs(split='validation') - for val_id in range(len(val_dataset)): - image1, image2, flow_gt, _ = val_dataset[val_id] - image1 = image1[None].cuda() - image2 = image2[None].cuda() - - _, flow_pr = model(image1, image2, iters=iters, test_mode=True) - epe = torch.sum((flow_pr[0].cpu() - flow_gt)**2, dim=0).sqrt() - epe_list.append(epe.view(-1).numpy()) - - epe = np.mean(np.concatenate(epe_list)) - print("Validation Chairs EPE: %f" % epe) - return {'chairs': epe} - - -@torch.no_grad() -def validate_sintel(model, iters=32): - """ Peform validation using the Sintel (train) split """ - model.eval() - results = {} - for dstype in ['clean', 'final']: - val_dataset = datasets.MpiSintel(split='training', dstype=dstype) - epe_list = [] - - for val_id in range(len(val_dataset)): - image1, image2, flow_gt, _ = val_dataset[val_id] - image1 = image1[None].cuda() - image2 = image2[None].cuda() - - padder = InputPadder(image1.shape) - image1, image2 = padder.pad(image1, image2) - - flow_low, flow_pr = model(image1, image2, iters=iters, test_mode=True) - flow = padder.unpad(flow_pr[0]).cpu() - - epe = torch.sum((flow - flow_gt)**2, dim=0).sqrt() - epe_list.append(epe.view(-1).numpy()) - - epe_all = np.concatenate(epe_list) - epe = np.mean(epe_all) - px1 = np.mean(epe_all<1) - px3 = np.mean(epe_all<3) - px5 = np.mean(epe_all<5) - - print("Validation (%s) EPE: %f, 1px: %f, 3px: %f, 5px: %f" % (dstype, epe, px1, px3, px5)) - results[dstype] = np.mean(epe_list) - - return results - - -@torch.no_grad() -def validate_kitti(model, iters=24): - """ Peform validation using the KITTI-2015 (train) split """ - model.eval() - val_dataset = datasets.KITTI(split='training') - - out_list, epe_list = [], [] - for val_id in range(len(val_dataset)): - image1, image2, flow_gt, valid_gt = val_dataset[val_id] - image1 = image1[None].cuda() - image2 = image2[None].cuda() - - padder = InputPadder(image1.shape, mode='kitti') - image1, image2 = padder.pad(image1, image2) - - flow_low, flow_pr = model(image1, image2, iters=iters, test_mode=True) - flow = padder.unpad(flow_pr[0]).cpu() - - epe = torch.sum((flow - flow_gt)**2, dim=0).sqrt() - mag = torch.sum(flow_gt**2, dim=0).sqrt() - - epe = epe.view(-1) - mag = mag.view(-1) - val = valid_gt.view(-1) >= 0.5 - - out = ((epe > 3.0) & ((epe/mag) > 0.05)).float() - epe_list.append(epe[val].mean().item()) - out_list.append(out[val].cpu().numpy()) - - epe_list = np.array(epe_list) - out_list = np.concatenate(out_list) - - epe = np.mean(epe_list) - f1 = 100 * np.mean(out_list) - - print("Validation KITTI: %f, %f" % (epe, f1)) - return {'kitti-epe': epe, 'kitti-f1': f1} - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--model', help="restore checkpoint") - parser.add_argument('--dataset', help="dataset for evaluation") - parser.add_argument('--small', action='store_true', help='use small model') - parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision') - parser.add_argument('--alternate_corr', action='store_true', help='use efficent correlation implementation') - args = parser.parse_args() - - model = torch.nn.DataParallel(RAFT(args)) - model.load_state_dict(torch.load(args.model)) - - model.cuda() - model.eval() - - # create_sintel_submission(model.module, warm_start=True) - # create_kitti_submission(model.module) - - with torch.no_grad(): - if args.dataset == 'chairs': - validate_chairs(model.module) - - elif args.dataset == 'sintel': - validate_sintel(model.module) - - elif args.dataset == 'kitti': - validate_kitti(model.module) - - diff --git a/spaces/caiocdcs/sports-classifier/README.md b/spaces/caiocdcs/sports-classifier/README.md deleted file mode 100644 index b2de167c1be10d84ff764f6396849d4cc11954ea..0000000000000000000000000000000000000000 --- a/spaces/caiocdcs/sports-classifier/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Sports Classifier -emoji: 🐠 -colorFrom: blue -colorTo: indigo -sdk: gradio -sdk_version: 3.28.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/caoyiming/vits-uma-genshin-honkai/README.md b/spaces/caoyiming/vits-uma-genshin-honkai/README.md deleted file mode 100644 index 1c0aa069bfd980b6b45bb2bf62ff74bd9b0b61c2..0000000000000000000000000000000000000000 --- a/spaces/caoyiming/vits-uma-genshin-honkai/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -license: apache-2.0 -title: ' vits-uma-genshin-honkai' -sdk: gradio -sdk_version: 3.7 -emoji: 🐨 -colorTo: yellow -pinned: false -app_file: app.py -duplicated_from: ikechan8370/vits-uma-genshin-honkai ---- diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/configs/common/data/constants.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/configs/common/data/constants.py deleted file mode 100644 index be11cb5ac7c32a260af96ed27c32ed767b2f2bcd..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/configs/common/data/constants.py +++ /dev/null @@ -1,9 +0,0 @@ -constants = dict( - imagenet_rgb256_mean=[123.675, 116.28, 103.53], - imagenet_rgb256_std=[58.395, 57.12, 57.375], - imagenet_bgr256_mean=[103.530, 116.280, 123.675], - # When using pre-trained models in Detectron1 or any MSRA models, - # std has been absorbed into its conv1 weights, so the std needs to be set 1. - # Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std) - imagenet_bgr256_std=[1.0, 1.0, 1.0], -) diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/tests/test_structures.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/tests/test_structures.py deleted file mode 100644 index 54082d3abf119bf2fdba7206124893f35b4b4ae1..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/tests/test_structures.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -import unittest - -from densepose.structures import normalized_coords_transform - - -class TestStructures(unittest.TestCase): - def test_normalized_coords_transform(self): - bbox = (32, 24, 288, 216) - x0, y0, w, h = bbox - xmin, ymin, xmax, ymax = x0, y0, x0 + w, y0 + h - f = normalized_coords_transform(*bbox) - # Top-left - expected_p, actual_p = (-1, -1), f((xmin, ymin)) - self.assertEqual(expected_p, actual_p) - # Top-right - expected_p, actual_p = (1, -1), f((xmax, ymin)) - self.assertEqual(expected_p, actual_p) - # Bottom-left - expected_p, actual_p = (-1, 1), f((xmin, ymax)) - self.assertEqual(expected_p, actual_p) - # Bottom-right - expected_p, actual_p = (1, 1), f((xmax, ymax)) - self.assertEqual(expected_p, actual_p) diff --git a/spaces/ccolas/TastyPiano/app.py b/spaces/ccolas/TastyPiano/app.py deleted file mode 100644 index 568de05e3aba2a91947f7462d5fcef98bec06ef1..0000000000000000000000000000000000000000 --- a/spaces/ccolas/TastyPiano/app.py +++ /dev/null @@ -1,224 +0,0 @@ -import time -import os -import pickle -import sys -sys.path.append('/') -from src.music.pipeline.music_pipeline import encode_music -from src.music2cocktailrep.pipeline.music2cocktailrep import music2cocktailrep, setup_translation_models, debug_translation -from src.cocktails.pipeline.cocktailrep2recipe import cocktailrep2recipe -from src.debugger import Debugger -from datetime import datetime -from shutil import copy -import streamlit as st -from src.music.config import AUDIO_PATH, MIDI_PATH -from pretty_midi import PrettyMIDI -import numpy as np -import pydub -from PIL import Image -import pytube - -st.set_page_config( - page_title="TastyPiano", - page_icon="🎹", -) - -st.title('TastyPiano') - -synestesia_path = 'data/synesthesia/' -debugger = Debugger(verbose=False) -np.random.seed(0) -def centered_module(func, text, args=None): - _, col2, _ = st.columns([1, 2, 1]) - with col2: - return func(text) - -def centered_button(func, text, args=None): - _, _, _, col3, _, _, _ = st.columns([1, 1, 1, 1, 1, 1, 1]) - with col3: - return func(text) - -def setup_streamlite(): - path = '/'.join(pytube.__file__.split('/')[:-1]) + '/cipher.py' - with open(path, 'r') as f: - cipher = f.read().split('\n') - cipher[286] = cipher[286].replace(';', '') - with open(path, 'w') as f: - f.write('\n'.join(cipher)) - os.path.exists(path) - setup_translation_models() - image = Image.open('./data/pianocktail.jpg') - st.image(image, caption='Pianocktail by Compagnie la Rumeur') - - st.subheader("Ready to taste music?") - st.markdown("TastyPiano generates music--taste synesthetic experiences by letting you turn any piano song into a cocktail. It is inspired by the" - " *Pianocktail*, a literary invention from the French novelist, jazz musician, singer and engineer Boris Vian. I see TastyPiano as a digital member of the " - "Pianocktail species, along with [other](https://www.youtube.com/watch?v=5bdX5i0nAWw) " - "[wonderful](https://www.youtube.com/watch?v=pzsDOH-xtrs&list=PLC196AA37A2D1C066&index=3) [machines](https://www.youtube.com/watch?v=y0RJg7I2x34).") - st.markdown("But TastyPiano is different from existing pianocktails. While existing version merely map played notes to drops of corresponding ingredients, " - "TastyPiano listens to the song, analyzes the music, hallucinates matching tastes and flavors, before it starts generating cocktail recipes " - "until it finds the one matching its gustatory hallucination. Check my [blog post](https://ccolas.github.io/project/pianocktail) for more information." - " Hear and taste it for yourself!") - st.subheader("How to use it?") - st.markdown("Provide a piano solo input and click on the **Taste it!** button.\n" - "\nYou can input either: \n* a YouTube url **or**\n* an audio file (.mp3) **or**\n* a midi file (.mid)\n" - "All these should be **only piano**, no other instrument.\n" - "Note that audio sources are cropped to the first 40s because the process of " - "converting it to midi is rather slow (1sec/sec). Midi inputs are taken whole. Please report any bug / buggy url in the community tab.") - - st.subheader("Prepare") - col1, col2, col3 = st.columns(3) - generate_audio_from_midi = False - with col1: - st.markdown('**YouTube url**') - url = st.text_area('Type it below', 'https://www.youtube.com/watch?v=UGK70IkP830', height=160) - with col2: - st.markdown('**Audio file**') - audio = st.file_uploader("Upload it here (.mp3)", type=['.mp3']) - with col3: - st.markdown('**Midi file**') - midi = st.file_uploader("Upload it here (.mid)", type=['.mid']) - generate_audio_from_midi = st.checkbox('Generate audio? Untick if the song is too long (>10min)', value=True) - - #url = "https://www.youtube.com/watch?v=UGK70IkP830" - #unit# = 'mL' - - def run(unit): - setup_and_run(unit=unit, url=url, midi=midi, audio=audio, generate_audio_from_midi=generate_audio_from_midi, extra_code=None) - #run(unit) - st.markdown('##') - unit = st.radio('Pick the units (before pressing "Taste it!", default mL)', ['mL', 'oz'], index=0) - button = centered_button(st.button, 'Taste it!') - # print(url) - if button: - run(unit) - - -def pianocktail(unit='mL', record=False, url=None, midi=None, audio=None, processed=None, crop=40, verbose=False, debug=False, level=0): - assert url is not None or midi is not None or audio is not None or processed is not None - if verbose: print('------\nNew synesthetic exploration!') - init_time = time.time() - try: - with st.spinner("Listening to the song (~1min).."): - music_ai_rep, music_handcoded_rep, all_paths, error = encode_music(record=record, url=url, audio_path=audio, midi_path=midi, nb_aug=0, noise_injection=False, - augmentation=False, processed_path=processed, crop=crop, apply_filtering=False, verbose=verbose, - level=level+2) - if music_ai_rep is not None: - with st.spinner(text="Thinking about corresponding flavors.."): - cocktail_rep, affective_cluster_id, affect = music2cocktailrep(music_ai_rep, music_handcoded_rep, verbose=verbose, level=level+2) - with st.spinner("Trying recipes (15s).."): - cocktail_recipes, scores = cocktailrep2recipe(cocktail_rep, unit=unit, target_affective_cluster=affective_cluster_id, verbose=verbose, full_verbose=verbose, \ - level=level+2) - - cocktail_recipe = cocktail_recipes[0] - recipe_score = scores[0] - if debug: - music_reconstruction = debug_translation(music_ai_rep) - debugger.extract_info(all_paths, affective_cluster_id, affect, cocktail_rep, music_reconstruction, recipe_score, verbose=verbose, level=level+2) - debug_info = debugger.debug_dict - else: - debug_info = None - if verbose: - print(cocktail_recipe.replace('Recipe', ' ' * (level + 2) + 'Generated recipe:').replace('None ()', '')) - debugger.print_debug(level=level+2) - print(f'\nEnd of synesthetic exploration ({int(time.time() - init_time)} secs).\n------') - st.success('Recipe found!') - else: - st.error('Error in listening. Is the url valid? the audio an .mp3? the midi a .mid?') - cocktail_recipe = None - debug_info = None - except Exception as err: - print(err, error) - st.error('Error: ' + error) - cocktail_recipe = None - debug_info = None - return cocktail_recipe, debug_info - -def setup_and_run(unit='mL', url=None, midi=None, audio=None, generate_audio_from_midi=False, verbose=True, debug=True, extra_code=None): - if url is None and midi is None and audio is None: - st.error('Please enter a piano input.') - assert False - st.subheader('Synesthesia') - now = datetime.now() - folder_name = f'date_{now.year}_{now.month}_{now.day}_time_{now.hour}_{now.minute}_{now.second}' - folder_path = synestesia_path + folder_name - if extra_code is not None: - folder_path += '_' + extra_code - if os.path.exists(folder_path): - folder_path += '_2' - folder_path += '/' - os.makedirs(folder_path, exist_ok=True) - if midi is not None: - st.write(f' \tReading from midi file: {midi.name}') - midi_path = MIDI_PATH + 'from_url_midi/' + midi.name[:-4] + '_midi.mid' - audio_path = AUDIO_PATH + 'from_url/' + midi.name.replace('.mid', '.mp3') - with open(midi_path, "wb") as f: - f.write(midi.getbuffer()) - midi = midi_path - if generate_audio_from_midi: - midi_data = PrettyMIDI(midi_path) - audio_data = midi_data.fluidsynth(fs=44100) - y = np.int16(audio_data * 2 ** 15) - song = pydub.AudioSegment(y.tobytes(), frame_rate=44100, sample_width=2, channels=1) - song.export(audio_path, format="mp3", bitrate="320k") - # st.write(audio_data) - st.audio(audio_path, format='audio/mp3') - url = None - elif audio is not None: - st.write(f' \tReading from audio file: {audio.name}') - audio_path = AUDIO_PATH + 'from_url/' + audio.name - with open(audio_path, "wb") as f: - f.write(audio.getbuffer()) - audio = audio_path - audio_file = open(audio, 'rb') - audio_bytes = audio_file.read() - st.audio(audio_bytes, format='audio/mp3') - url = None - else: - st.write(f' \tReading from YouTube url: {url}') - st.video(url) - - _, col2, _ = st.columns([1, 1, 1]) - with col2: - st.markdown('##') - recipe, debug = pianocktail(unit=unit, url=url, midi=midi, audio=audio, verbose=verbose, debug=debug) - with open(folder_path + 'debug.pk', 'wb') as f: - pickle.dump(debug, f) - with open(folder_path + 'recipe.txt', 'w') as f: - f.write(recipe) - paths = debug['all_paths'] - if paths['url'] is not None: - with open(folder_path + 'url.txt', 'w') as f: - f.write(paths['url']) - for k in ['audio_path', 'midi_path']: - origin = paths[k] - if origin is not None: - copy(origin, folder_path + origin.split('/')[-1]) - - st.subheader('Recipe') - recipe = recipe.replace(' Enjoy!', ' \nEnjoy!').replace('\n', ' \n') - st.text(recipe) - - st.markdown('**About this synesthesia**') - closest_songs = [debug['nn_music'][i][:-26].split('structured_')[1].replace('_', ' ') for i in range(3)] - str_songs = 'These are the closest song I know: ' - str_songs += ' '.join([f' \n* {closest_songs[i]}' for i in range(3)]) - st.markdown(str_songs + '.') - str_cocktails = 'These are existing cocktails that are close to the taste of this song:' - str_cocktails += ' '.join([f' \n* {cocktail_name}: {cocktail_url}' for cocktail_name, cocktail_url in zip(debug['nearest_cocktail_names'][:3], - debug['nearest_cocktail_urls'][:3])]) - st.markdown(str_cocktails + '.') - -if __name__ == '__main__': - setup_streamlite() - # urls = ["https://www.youtube.com/watch?v=PLFVGwGQcB0", - # "https://www.youtube.com/watch?v=VQmuAr93OlI", - # "https://www.youtube.com/watch?v=Nv2GgV34qIg&list=PLO9E3V4rGLD8_iWrCioJRWZXJJE3Fzu_J&index=4", - # "https://www.youtube.com/watch?v=qAEIjWYdoYc&list=PLO9E3V4rGLD8_iWrCioJRWZXJJE3Fzu_J&index=1", - # "https://www.youtube.com/watch?v=M73x3O7dhmg&list=PLO9E3V4rGLD8_iWrCioJRWZXJJE3Fzu_J&index=5"] - # setup_translation_models() - # setup_and_run(url=urls[0], verbose=True, debug=True) - # recipes = [] - # for url in urls: - # recipe = pianocktail(url=url, verbose=True, debug=True)[0] - # recipes.append(recipe) - # stop = 1 diff --git a/spaces/ccolas/TastyPiano/src/cocktails/utilities/other_scrubbing_utilities.py b/spaces/ccolas/TastyPiano/src/cocktails/utilities/other_scrubbing_utilities.py deleted file mode 100644 index eed580ca304ee3fbf1725e9b1166d432c5129068..0000000000000000000000000000000000000000 --- a/spaces/ccolas/TastyPiano/src/cocktails/utilities/other_scrubbing_utilities.py +++ /dev/null @@ -1,240 +0,0 @@ -import numpy as np -import pickle -from src.cocktails.utilities.cocktail_utilities import get_profile, profile_keys -from src.cocktails.utilities.ingredients_utilities import extract_ingredients, ingredient_list, ingredient_profiles -from src.cocktails.utilities.glass_and_volume_utilities import glass_volume, volume_ranges - -one_dash = 1 -one_splash = 6 -one_tablespoon = 15 -one_barspoon = 5 -fill_rate = 0.8 -quantity_factors ={'ml':1, - 'cl':10, - 'splash':one_splash, - 'splashes':one_splash, - 'dash':one_dash, - 'dashes':one_dash, - 'spoon':one_barspoon, - 'spoons':one_barspoon, - 'tablespoon':one_tablespoon, - 'barspoons':one_barspoon, - 'barspoon':one_barspoon, - 'bar spoons': one_barspoon, - 'bar spoon': one_barspoon, - 'tablespoons':one_tablespoon, - 'teaspoon':5, - 'teaspoons':5, - 'drop':0.05, - 'drops':0.05} -quantitiy_keys = sorted(quantity_factors.keys()) -indexes_keys = np.flip(np.argsort([len(k) for k in quantitiy_keys])) -quantity_factors_keys = list(np.array(quantitiy_keys)[indexes_keys]) - -keys_to_track = ['names', 'urls', 'glass', 'garnish', 'recipe', 'how_to', 'review', 'taste_rep', 'valid'] -keys_to_add = ['category', 'subcategory', 'ingredients_str', 'ingredients', 'quantities', 'to_keep'] -keys_to_update = ['glass'] -keys_for_csv = ['names', 'category', 'subcategory', 'ingredients_str', 'urls', 'glass', 'garnish', 'how_to', 'review', 'taste_rep'] + profile_keys - -to_replace_q = {' fresh': ''} -to_replace_ing = {'maple syrup': 'honey syrup', - 'agave syrup': 'honey syrup', - 'basil': 'mint'} - -def print_recipe(unit='mL', ingredient_str=None, ingredients=None, quantities=None, name='', cat='', to_print=True): - str_out = '' - if ingredient_str is None: - assert len(ingredients) == len(quantities), 'provide either ingredient_str, or list ingredients and quantities' - else: - assert ingredients is None and quantities is None, 'provide either ingredient_str, or list ingredients and quantities' - ingredients, quantities = extract_ingredients(ingredient_str) - - str_out += f'\nRecipe:' - if name != '' and name is not None: str_out += f' {name}' - if cat != '': str_out += f' ({cat})' - str_out += '\n' - for i in range(len(ingredients)): - # get quantifier - if ingredients[i] == 'egg': - quantities[i] = 1 - ingredients[i] = 'egg white' - if unit == 'mL': - quantifier = ' (30 mL)' - elif unit == 'oz': - quantifier = ' (1 fl oz)' - else: - raise ValueError - elif ingredients[i] in ['angostura', 'orange bitters']: - quantities[i] = max(1, int(quantities[i] / 0.6)) - quantifier = ' dash' - if quantities[i] > 1: quantifier += 'es' - elif ingredients[i] == 'mint': - if quantities[i] > 1: quantifier = ' leaves' - else: quantifier = ' leaf' - else: - if unit == "oz": - quantities[i] = float(f"{quantities[i] * 0.033814:.3f}") # convert to fl oz - quantifier = ' fl oz' - else: - quantifier = ' mL' - str_out += f' {quantities[i]}{quantifier} - {ingredients[i]}\n' - - if to_print: - print(str_out) - return str_out - - -def test_datapoint(datapoint, category, ingredients, quantities): - # run checks - ingredient_indexes = [ingredient_list.index(ing) for ing in ingredients] - profile = get_profile(category, ingredients, quantities) - volume = profile['end volume'] - alcohol = profile['end alcohol'] - acid = profile['end acid'] - sugar = profile['end sugar'] - # check volume - if datapoint['glass'] != None: - if volume > glass_volume[datapoint['glass']] * fill_rate: - # recompute quantities for it to match - ratio = fill_rate * glass_volume[datapoint['glass']] / volume - for i_q in range(len(quantities)): - quantities[i_q] = float(f'{quantities[i_q] * ratio:.2f}') - # check alcohol - assert alcohol < 30, 'too boozy' - assert alcohol < 5, 'not boozy enough' - assert acid < 2, 'too much acid' - assert sugar < 20, 'too much sugar' - assert len(ingredients) > 1, 'only one ingredient' - if len(set(ingredients)) != len(ingredients): - i_doubles = [] - s_ing = set() - for i, ing in enumerate(ingredients): - if ing in s_ing: - i_doubles.append(i) - else: - s_ing.add(ing) - ingredient_double_ok = ['mint', 'cointreau', 'lemon juice', 'cuban rum', 'double syrup'] - if len(i_doubles) == 1 and ingredients[i_doubles[0]] in ingredient_double_ok: - ing_double = ingredients[i_doubles[0]] - double_q = np.sum([quantities[i] for i in range(len(ingredients)) if ingredients[i] == ing_double]) - ingredients.pop(i_doubles[0]) - quantities.pop(i_doubles[0]) - quantities[ingredients.index(ing_double)] = double_q - else: - assert False, f'double ingredient, not {ingredient_double_ok}' - lemon_lime_q = np.sum([quantities[i] for i in range(len(ingredients)) if ingredients[i] in ['lime juice', 'lemon juice']]) - assert lemon_lime_q <= 45, 'too much lemon and lime' - salt_q = np.sum([quantities[i] for i in range(len(ingredients)) if ingredients[i] == 'salt']) - assert salt_q <= 8, 'too much salt' - bitter_q = np.sum([quantities[i] for i in range(len(ingredients)) if ingredients[i] in ['angostura', 'orange bitters']]) - assert bitter_q <= 5 * one_dash, 'too much bitter' - absinthe_q = np.sum([quantities[i] for i in range(len(ingredients)) if ingredients[i] == 'absinthe']) - if absinthe_q > 4 * one_dash: - mix_volume = np.sum([quantities[i] for i in range(len(ingredients)) if ingredients[i] != 'mint']) - assert absinthe_q < 0.5 * mix_volume, 'filter absinthe glasses' - if any([w in datapoint['how_to'] or any([w in ing.lower() for ing in datapoint['recipe'][1]]) for w in ['warm', 'boil', 'hot']]) and 'shot' not in datapoint['how_to']: - assert False - water_q = np.sum([quantities[i] for i in range(len(ingredients)) if ingredients[i] == 'water']) - assert water_q < 40 - # n_liqueur = np.sum([ingredient_profiles['type'][i].lower() == 'liqueur' for i in ingredient_indexes]) - # assert n_liqueur <= 2 - n_liqueur_and_vermouth = np.sum([ingredient_profiles['type'][i].lower() in ['liqueur', 'vermouth'] for i in ingredient_indexes]) - assert n_liqueur_and_vermouth <= 3 - return ingredients, quantities - -def run_battery_checks_difford(datapoint, category, ingredients, quantities): - flag = False - try: - ingredients, quantities = test_datapoint(datapoint, category, ingredients, quantities) - except: - flag = True - print(datapoint["names"]) - print(datapoint["urls"]) - ingredients, quantities = None, None - - return flag, ingredients, quantities - -def tambouille(q, ingredients_scrubbed, quantities_scrubbed, cat): - # ugly - ing_scrubbed = ingredients_scrubbed[len(quantities_scrubbed)] - if q == '4 cube' and ing_scrubbed == 'pineapple juice': - q = '20 ml' - elif 'top up with' in q: - volume_so_far = np.sum([quantities_scrubbed[i] for i in range(len(quantities_scrubbed)) if ingredients_scrubbed[i] != 'mint']) - volume_mix = np.sum(volume_ranges[cat]) / 2 - if (volume_mix - volume_so_far) < 15: - q = '15 ml'# - else: - q = str(int(volume_mix - volume_so_far)) + ' ml' - elif q == '1 pinch' and ing_scrubbed == 'salt': - q = '2 drops' - elif 'cube' in q and ing_scrubbed == 'double syrup': - q = f'{float(q.split(" ")[0]) * 2 * 1.7:.2f} ml' #2g per cube, 1.7 is ratio solid / syrup - elif 'wedge' in q: - if ing_scrubbed == 'orange juice': - vol = 70 - elif ing_scrubbed == 'lime juice': - vol = 30 - elif ing_scrubbed == 'lemon juice': - vol = 45 - elif ing_scrubbed == 'pineapple juice': - vol = 140 - factor = float(q.split(' ')[0]) * 0.15 # consider a wedge to be 0.15*the fruit. - q = f'{factor * vol:.2f} ml' - elif 'slice' in q: - if ing_scrubbed == 'orange juice': - vol = 70 - elif ing_scrubbed == 'lime juice': - vol = 30 - elif ing_scrubbed == 'lemon juice': - vol = 45 - elif ing_scrubbed == 'pineapple juice': - vol = 140 - f = q.split(' ')[0] - if len(f.split('⁄')) > 1: - frac = f.split('⁄') - factor = float(frac[0]) / float(frac[1]) - else: - factor = float(f) - factor *= 0.1 # consider a slice to be 0.1*the fruit. - q = f'{factor * vol:.2f} ml' - elif q == '1 whole' and ing_scrubbed == 'luxardo maraschino': - q = '10 ml' - elif ing_scrubbed == 'egg' and 'ml' not in q: - q = f'{float(q) * 30:.2f} ml' # 30 ml per egg - return q - - -def compute_eucl_dist(a, b): - return np.sqrt(np.sum((a - b)**2)) - -def evaluate_with_quadruplets(representations, strategy='all'): - with open(QUADRUPLETS_PATH, 'rb') as f: - data = pickle.load(f) - data = list(data.values()) - quadruplets = [] - if strategy != 'all': - for d in data: - if d[0] == strategy: - quadruplets.append(d[1:]) - elif strategy == 'all': - for d in data: - quadruplets.append(d[1:]) - else: - raise ValueError - - scores = [] - for q in quadruplets: - close = q[0] - if len(close) == 2: - far = q[1] - distance_close = compute_eucl_dist(representations[close[0]], representations[close[1]]) - distances_far = [compute_eucl_dist(representations[far[i][0]], representations[far[i][1]]) for i in range(len(far))] - scores.append(distance_close < np.min(distances_far)) - if len(scores) == 0: - score = np.nan - else: - score = np.mean(scores) - return score - - diff --git a/spaces/celise88/Pathfinder/templates/register.html b/spaces/celise88/Pathfinder/templates/register.html deleted file mode 100644 index 8b77a27d75fddec785c06bae447aa6b6bc33ab4f..0000000000000000000000000000000000000000 --- a/spaces/celise88/Pathfinder/templates/register.html +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - Dashboard - - - - -
      -

      User Registration

      -

      Welcome to Pathfinder!

      - {% if message %} -

      {{ message }}

      - {% else %} -

      Fill out the form below to register for an account

      - {% endif %} -
      - -
      -
      -
      -
      -
      -
      -
      - -
      - - \ No newline at end of file diff --git a/spaces/chendl/compositional_test/multimodal/YOLOX/demo/MegEngine/python/models/network_blocks.py b/spaces/chendl/compositional_test/multimodal/YOLOX/demo/MegEngine/python/models/network_blocks.py deleted file mode 100644 index f0e40d3f2aea5bbd00493311219821a7e5d5e8be..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/multimodal/YOLOX/demo/MegEngine/python/models/network_blocks.py +++ /dev/null @@ -1,183 +0,0 @@ -#!/usr/bin/env python3 -# -*- encoding: utf-8 -*- -# Copyright (c) Megvii Inc. All rights reserved. - -import megengine.functional as F -import megengine.module as M - - -class UpSample(M.Module): - - def __init__(self, scale_factor=2, mode="bilinear"): - super().__init__() - self.scale_factor = scale_factor - self.mode = mode - - def forward(self, x): - return F.vision.interpolate(x, scale_factor=self.scale_factor, mode=self.mode) - - -class SiLU(M.Module): - """export-friendly version of M.SiLU()""" - - @staticmethod - def forward(x): - return x * F.sigmoid(x) - - -def get_activation(name="silu"): - if name == "silu": - module = SiLU() - elif name == "relu": - module = M.ReLU() - elif name == "lrelu": - module = M.LeakyReLU(0.1) - else: - raise AttributeError("Unsupported act type: {}".format(name)) - return module - - -class BaseConv(M.Module): - """A Conv2d -> Batchnorm -> silu/leaky relu block""" - - def __init__(self, in_channels, out_channels, ksize, stride, groups=1, bias=False, act="silu"): - super().__init__() - # same padding - pad = (ksize - 1) // 2 - self.conv = M.Conv2d( - in_channels, - out_channels, - kernel_size=ksize, - stride=stride, - padding=pad, - groups=groups, - bias=bias, - ) - self.bn = M.BatchNorm2d(out_channels) - self.act = get_activation(act) - - def forward(self, x): - return self.act(self.bn(self.conv(x))) - - def fuseforward(self, x): - return self.act(self.conv(x)) - - -class DWConv(M.Module): - """Depthwise Conv + Conv""" - def __init__(self, in_channels, out_channels, ksize, stride=1, act="silu"): - super().__init__() - self.dconv = BaseConv( - in_channels, in_channels, ksize=ksize, - stride=stride, groups=in_channels, act=act - ) - self.pconv = BaseConv( - in_channels, out_channels, ksize=1, - stride=1, groups=1, act=act - ) - - def forward(self, x): - x = self.dconv(x) - return self.pconv(x) - - -class Bottleneck(M.Module): - # Standard bottleneck - def __init__( - self, in_channels, out_channels, shortcut=True, - expansion=0.5, depthwise=False, act="silu" - ): - super().__init__() - hidden_channels = int(out_channels * expansion) - Conv = DWConv if depthwise else BaseConv - self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act) - self.conv2 = Conv(hidden_channels, out_channels, 3, stride=1, act=act) - self.use_add = shortcut and in_channels == out_channels - - def forward(self, x): - y = self.conv2(self.conv1(x)) - if self.use_add: - y = y + x - return y - - -class ResLayer(M.Module): - "Residual layer with `in_channels` inputs." - def __init__(self, in_channels: int): - super().__init__() - mid_channels = in_channels // 2 - self.layer1 = BaseConv(in_channels, mid_channels, ksize=1, stride=1, act="lrelu") - self.layer2 = BaseConv(mid_channels, in_channels, ksize=3, stride=1, act="lrelu") - - def forward(self, x): - out = self.layer2(self.layer1(x)) - return x + out - - -class SPPBottleneck(M.Module): - """Spatial pyramid pooling layer used in YOLOv3-SPP""" - def __init__(self, in_channels, out_channels, kernel_sizes=(5, 9, 13), activation="silu"): - super().__init__() - hidden_channels = in_channels // 2 - self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=activation) - self.m = [M.MaxPool2d(kernel_size=ks, stride=1, padding=ks // 2) for ks in kernel_sizes] - conv2_channels = hidden_channels * (len(kernel_sizes) + 1) - self.conv2 = BaseConv(conv2_channels, out_channels, 1, stride=1, act=activation) - - def forward(self, x): - x = self.conv1(x) - x = F.concat([x] + [m(x) for m in self.m], axis=1) - x = self.conv2(x) - return x - - -class CSPLayer(M.Module): - """C3 in yolov5, CSP Bottleneck with 3 convolutions""" - - def __init__( - self, in_channels, out_channels, n=1, - shortcut=True, expansion=0.5, depthwise=False, act="silu" - ): - """ - Args: - in_channels (int): input channels. - out_channels (int): output channels. - n (int): number of Bottlenecks. Default value: 1. - """ - # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__() - hidden_channels = int(out_channels * expansion) # hidden channels - self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act) - self.conv2 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act) - self.conv3 = BaseConv(2 * hidden_channels, out_channels, 1, stride=1, act=act) - module_list = [ - Bottleneck(hidden_channels, hidden_channels, shortcut, 1.0, depthwise, act=act) - for _ in range(n) - ] - self.m = M.Sequential(*module_list) - - def forward(self, x): - x_1 = self.conv1(x) - x_2 = self.conv2(x) - x_1 = self.m(x_1) - x = F.concat((x_1, x_2), axis=1) - return self.conv3(x) - - -class Focus(M.Module): - """Focus width and height information into channel space.""" - - def __init__(self, in_channels, out_channels, ksize=1, stride=1, act="silu"): - super().__init__() - self.conv = BaseConv(in_channels * 4, out_channels, ksize, stride, act=act) - - def forward(self, x): - # shape of x (b,c,w,h) -> y(b,4c,w/2,h/2) - patch_top_left = x[..., ::2, ::2] - patch_top_right = x[..., ::2, 1::2] - patch_bot_left = x[..., 1::2, ::2] - patch_bot_right = x[..., 1::2, 1::2] - x = F.concat( - (patch_top_left, patch_bot_left, patch_top_right, patch_bot_right,), axis=1, - ) - return self.conv(x) diff --git a/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/utils/checkpoint.py b/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/utils/checkpoint.py deleted file mode 100644 index a0c200e41da9ad8b720369a2181c9642724622ca..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/multimodal/YOLOX/yolox/utils/checkpoint.py +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -# Copyright (c) Megvii Inc. All rights reserved. -import os -import shutil -from loguru import logger - -import torch - - -def load_ckpt(model, ckpt): - model_state_dict = model.state_dict() - load_dict = {} - for key_model, v in model_state_dict.items(): - if key_model not in ckpt: - logger.warning( - "{} is not in the ckpt. Please double check and see if this is desired.".format( - key_model - ) - ) - continue - v_ckpt = ckpt[key_model] - if v.shape != v_ckpt.shape: - logger.warning( - "Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format( - key_model, v_ckpt.shape, key_model, v.shape - ) - ) - continue - load_dict[key_model] = v_ckpt - - model.load_state_dict(load_dict, strict=False) - return model - - -def save_checkpoint(state, is_best, save_dir, model_name=""): - if not os.path.exists(save_dir): - os.makedirs(save_dir) - filename = os.path.join(save_dir, model_name + "_ckpt.pth") - torch.save(state, filename) - if is_best: - best_filename = os.path.join(save_dir, "best_ckpt.pth") - shutil.copyfile(filename, best_filename) diff --git a/spaces/chilge/Fushimi/resample.py b/spaces/chilge/Fushimi/resample.py deleted file mode 100644 index fabae4afbb330cccad1681b7941a63547c93c640..0000000000000000000000000000000000000000 --- a/spaces/chilge/Fushimi/resample.py +++ /dev/null @@ -1,47 +0,0 @@ -import os -import argparse -import librosa -import numpy as np -from multiprocessing import Pool, cpu_count -from scipy.io import wavfile -from tqdm import tqdm - - -def process(item): - spkdir, wav_name, args = item - # speaker 's5', 'p280', 'p315' are excluded, - speaker = spkdir.split(os.sep)[-1] - wav_path = os.path.join(args.in_dir, speaker, wav_name) - if os.path.exists(wav_path) and '.wav' in wav_path: - os.makedirs(os.path.join(args.out_dir2, speaker), exist_ok=True) - wav, sr = librosa.load(wav_path, None) - wav, _ = librosa.effects.trim(wav, top_db=20) - peak = np.abs(wav).max() - if peak > 1.0: - wav = 0.98 * wav / peak - wav2 = librosa.resample(wav, orig_sr=sr, target_sr=args.sr2) - save_name = wav_name - save_path2 = os.path.join(args.out_dir2, speaker, save_name) - wavfile.write( - save_path2, - args.sr2, - (wav2 * np.iinfo(np.int16).max).astype(np.int16) - ) - - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--sr2", type=int, default=32000, help="sampling rate") - parser.add_argument("--in_dir", type=str, default="./dataset_raw", help="path to source dir") - parser.add_argument("--out_dir2", type=str, default="./dataset/32k", help="path to target dir") - args = parser.parse_args() - processs = cpu_count()-2 if cpu_count() >4 else 1 - pool = Pool(processes=processs) - - for speaker in os.listdir(args.in_dir): - spk_dir = os.path.join(args.in_dir, speaker) - if os.path.isdir(spk_dir): - print(spk_dir) - for _ in tqdm(pool.imap_unordered(process, [(spk_dir, i, args) for i in os.listdir(spk_dir) if i.endswith("wav")])): - pass diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/ImageMath.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/ImageMath.py deleted file mode 100644 index ac7d36b698c2ec9839d8a771734c9f730f701534..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/ImageMath.py +++ /dev/null @@ -1,263 +0,0 @@ -# -# The Python Imaging Library -# $Id$ -# -# a simple math add-on for the Python Imaging Library -# -# History: -# 1999-02-15 fl Original PIL Plus release -# 2005-05-05 fl Simplified and cleaned up for PIL 1.1.6 -# 2005-09-12 fl Fixed int() and float() for Python 2.4.1 -# -# Copyright (c) 1999-2005 by Secret Labs AB -# Copyright (c) 2005 by Fredrik Lundh -# -# See the README file for information on usage and redistribution. -# - -import builtins - -from . import Image, _imagingmath - - -def _isconstant(v): - return isinstance(v, (int, float)) - - -class _Operand: - """Wraps an image operand, providing standard operators""" - - def __init__(self, im): - self.im = im - - def __fixup(self, im1): - # convert image to suitable mode - if isinstance(im1, _Operand): - # argument was an image. - if im1.im.mode in ("1", "L"): - return im1.im.convert("I") - elif im1.im.mode in ("I", "F"): - return im1.im - else: - msg = f"unsupported mode: {im1.im.mode}" - raise ValueError(msg) - else: - # argument was a constant - if _isconstant(im1) and self.im.mode in ("1", "L", "I"): - return Image.new("I", self.im.size, im1) - else: - return Image.new("F", self.im.size, im1) - - def apply(self, op, im1, im2=None, mode=None): - im1 = self.__fixup(im1) - if im2 is None: - # unary operation - out = Image.new(mode or im1.mode, im1.size, None) - im1.load() - try: - op = getattr(_imagingmath, op + "_" + im1.mode) - except AttributeError as e: - msg = f"bad operand type for '{op}'" - raise TypeError(msg) from e - _imagingmath.unop(op, out.im.id, im1.im.id) - else: - # binary operation - im2 = self.__fixup(im2) - if im1.mode != im2.mode: - # convert both arguments to floating point - if im1.mode != "F": - im1 = im1.convert("F") - if im2.mode != "F": - im2 = im2.convert("F") - if im1.size != im2.size: - # crop both arguments to a common size - size = (min(im1.size[0], im2.size[0]), min(im1.size[1], im2.size[1])) - if im1.size != size: - im1 = im1.crop((0, 0) + size) - if im2.size != size: - im2 = im2.crop((0, 0) + size) - out = Image.new(mode or im1.mode, im1.size, None) - im1.load() - im2.load() - try: - op = getattr(_imagingmath, op + "_" + im1.mode) - except AttributeError as e: - msg = f"bad operand type for '{op}'" - raise TypeError(msg) from e - _imagingmath.binop(op, out.im.id, im1.im.id, im2.im.id) - return _Operand(out) - - # unary operators - def __bool__(self): - # an image is "true" if it contains at least one non-zero pixel - return self.im.getbbox() is not None - - def __abs__(self): - return self.apply("abs", self) - - def __pos__(self): - return self - - def __neg__(self): - return self.apply("neg", self) - - # binary operators - def __add__(self, other): - return self.apply("add", self, other) - - def __radd__(self, other): - return self.apply("add", other, self) - - def __sub__(self, other): - return self.apply("sub", self, other) - - def __rsub__(self, other): - return self.apply("sub", other, self) - - def __mul__(self, other): - return self.apply("mul", self, other) - - def __rmul__(self, other): - return self.apply("mul", other, self) - - def __truediv__(self, other): - return self.apply("div", self, other) - - def __rtruediv__(self, other): - return self.apply("div", other, self) - - def __mod__(self, other): - return self.apply("mod", self, other) - - def __rmod__(self, other): - return self.apply("mod", other, self) - - def __pow__(self, other): - return self.apply("pow", self, other) - - def __rpow__(self, other): - return self.apply("pow", other, self) - - # bitwise - def __invert__(self): - return self.apply("invert", self) - - def __and__(self, other): - return self.apply("and", self, other) - - def __rand__(self, other): - return self.apply("and", other, self) - - def __or__(self, other): - return self.apply("or", self, other) - - def __ror__(self, other): - return self.apply("or", other, self) - - def __xor__(self, other): - return self.apply("xor", self, other) - - def __rxor__(self, other): - return self.apply("xor", other, self) - - def __lshift__(self, other): - return self.apply("lshift", self, other) - - def __rshift__(self, other): - return self.apply("rshift", self, other) - - # logical - def __eq__(self, other): - return self.apply("eq", self, other) - - def __ne__(self, other): - return self.apply("ne", self, other) - - def __lt__(self, other): - return self.apply("lt", self, other) - - def __le__(self, other): - return self.apply("le", self, other) - - def __gt__(self, other): - return self.apply("gt", self, other) - - def __ge__(self, other): - return self.apply("ge", self, other) - - -# conversions -def imagemath_int(self): - return _Operand(self.im.convert("I")) - - -def imagemath_float(self): - return _Operand(self.im.convert("F")) - - -# logical -def imagemath_equal(self, other): - return self.apply("eq", self, other, mode="I") - - -def imagemath_notequal(self, other): - return self.apply("ne", self, other, mode="I") - - -def imagemath_min(self, other): - return self.apply("min", self, other) - - -def imagemath_max(self, other): - return self.apply("max", self, other) - - -def imagemath_convert(self, mode): - return _Operand(self.im.convert(mode)) - - -ops = {} -for k, v in list(globals().items()): - if k[:10] == "imagemath_": - ops[k[10:]] = v - - -def eval(expression, _dict={}, **kw): - """ - Evaluates an image expression. - - :param expression: A string containing a Python-style expression. - :param options: Values to add to the evaluation context. You - can either use a dictionary, or one or more keyword - arguments. - :return: The evaluated expression. This is usually an image object, but can - also be an integer, a floating point value, or a pixel tuple, - depending on the expression. - """ - - # build execution namespace - args = ops.copy() - args.update(_dict) - args.update(kw) - for k, v in list(args.items()): - if hasattr(v, "im"): - args[k] = _Operand(v) - - compiled_code = compile(expression, "", "eval") - - def scan(code): - for const in code.co_consts: - if type(const) == type(compiled_code): - scan(const) - - for name in code.co_names: - if name not in args and name != "abs": - msg = f"'{name}' not allowed" - raise ValueError(msg) - - scan(compiled_code) - out = builtins.eval(expression, {"__builtins": {"abs": abs}}, args) - try: - return out.im - except AttributeError: - return out diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/altair/vegalite/display.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/altair/vegalite/display.py deleted file mode 100644 index 91c5f33e093b32cf81accd6fdeeb8a18292c28c0..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/altair/vegalite/display.py +++ /dev/null @@ -1,11 +0,0 @@ -from ..utils.display import Displayable, default_renderer_base, json_renderer_base -from ..utils.display import RendererRegistry, HTMLRenderer - - -__all__ = ( - "Displayable", - "default_renderer_base", - "json_renderer_base", - "RendererRegistry", - "HTMLRenderer", -) diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_p_r_e_p.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_p_r_e_p.py deleted file mode 100644 index b4b92f3e924ba2f20ade9a6cca45ce78284ffe21..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_p_r_e_p.py +++ /dev/null @@ -1,7 +0,0 @@ -from fontTools import ttLib - -superclass = ttLib.getTableClass("fpgm") - - -class table__p_r_e_p(superclass): - pass diff --git a/spaces/cihyFjudo/fairness-paper-search/8th Grade Lesson Plans for Literary Terms How to Teach Plot Character Setting and Theme.md b/spaces/cihyFjudo/fairness-paper-search/8th Grade Lesson Plans for Literary Terms How to Teach Plot Character Setting and Theme.md deleted file mode 100644 index 90652c56d6994a0a4e09b4b8bd5db56a33061b09..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/8th Grade Lesson Plans for Literary Terms How to Teach Plot Character Setting and Theme.md +++ /dev/null @@ -1,25 +0,0 @@ - -

      A scavenger hunt would work great to practice literary genres, not just the big ones like novels, short stories, and poems, but also the more specific ones like historical fiction, science fiction, and so on. This one is especially easy. Just give each kid or group a list of the terms and set them loose to browse the classroom library and bookshelves in search of examples.

      -

      8th grade lesson plans for literary terms


      Download Ziphttps://tinurli.com/2uwkn3



      -

      Kids working together in groups can gain a good understanding of what certain literary terms really mean by helping each other with definitions and examples. Demonstrating what they have discovered to the whole class shows a further level of understanding.

      -

      Any good story, and many good informational titles, will include examples of a number of literary terms. One nice follow-up activity after any whole class story is to have students search the story for examples of whichever literary terms are prominent in that story.

      -

      One group of literary terms that many classes start with in the beginning of the year is story elements. Here is one idea for a student writing project that incorporates the ideas of character, setting, plot, conflict, resolution, and theme. This project, in which students plan and then write a story, could be done as a whole class or in small groups. Here are the steps:

      -

      -

      Are you're looking for a literary devices lesson plan? This literary terms lesson covers seven literary terms related to poetry and other literature. This lesson plan on literary terms is appropriate for students in the upper elementary or middle school grades.

      -

      Verify how well students have mastered the literary terms covered in this lesson plan by assigning the practice questions below. Add these practice questions to a blank document to create a worksheet. You can use this as homework or an in-class activity. Check student work using the answer key below.

      -

      The literary devices discussed in this lesson provide students with a great introduction to literary devices, though there are many more types of literary devices. Once students have mastered these, consider exploring examples of foreshadowing. From there, move into examples of analogy.

      -

      Freytag's Pyramid is a tool for mapping plot structure, which allows readers to visualize the key features of stories. Students whose experience with text is limited have internalized the pattern described by Freytag's Pyramid through oral storytelling and television viewing. They need help seeing that the patterns they are familiar with are the same ones writers use to construct a short story, play, or novel. This lesson plan provides a basic introduction to Freytag's Pyramid and to the literary element of plot. After viewing a brief presentation about plot structure, students brainstorm the significant events in a story with which they are all familiar and place those events on Freytag's Pyramid. They work in small groups to map the plot of another story. For homework, they map the plot of a favorite television show. Finally, they apply their knowledge of Freytag's Pyramid to map the plot of a narrative poem.

      -

      Write literary terms on the board. You will need one for each student. Yes, there are enough literary terms: plot, setting, exposition, mood, theme, tone, character, conflict, 5 types of conflict, point of view, three types of point of view, dramatic, verbal, and situational irony, suspense, foreshadowing, alliteration, synecdoche, personification, metaphor, simile, onomatopoeia, hyperbole, meiosis, rhythm, meter, voice, style.....

    5. Pronounce each term and have students repeat it.
    6. Assign one term to each student.
    7. On a clear slice of paper, each student will design a poster. The poster will have the term at the top with its definition below. The middle of the paper will have a visual representation--picture or symbol--that represents the term along with a written example from a piece of literature familiar to the class.
    8. Instruct students to extract a slice of notebook paper and copy the terms. Leave enough room to take notes next to each word.
    9. Everything should be removed from desks except the literary term poster, the notes paper, and a writing instrument.
    10. Every 45 seconds, shout pass. Students will pass their paper to the next designated person. Each student will have 45 seconds to study each literary terms poster. Once the term has made it around the room, stop.
    11. Instruct students to make any corrections on any poster.
    12. Do the activity in part one of this series.
    13. Give a quiz the next day and boast how great you are at teaching to different learning styles and how you deserve a raise and a vacation in the Dominican Republic!
    ELA Common Core Standards CoveredAmaze your administrator by teaching literary terms to different learning styles. Here are some ELA Common Core Standards to cement your raise.

    -

    Are you introducing poetry to your students? One of the best ways to teach poetry is to explore the structure of the poem. These structures are also known as the elements of poetry. The basic elements of poetry include meter, rhyme, scheme, verse, and stanza. In order to dive deeper into poetry, students will first need to understand these structural elements. In this blog post, you will learn strategies for teaching poetry and ideas for your elements of poetry lesson plans.

    -

    Drama Works! Companion Book of Lesson Plans by Jonas Basom contains the 150 printed lesson plans from Drama Works! Online. The lessons include more than 1200 activities in total, including variations for beginner, intermediate, and advanced levels. Using the 65 indexes in the back, the teacher can quickly look up lessons by 25 drama skills, 12 theatre categories, 11 school subjects, 11 learning styles, and 6 age/grade level groups (preschool to college). The book also includes the glossary of theatre and literary terms with definitions.

    -

    The companion book allows the user to save time and money by not needing to print from Drama Works! Online. It provides offline access to the lessons without a computer and without needing to login. The teacher or substitute has instant access to complete lesson plans ready to read and lead. The lessons were designed to align with the National Core Arts Standards, Texas theatre standards (TEKS), and Common Core ELA.

    -

    W= By the end of the lesson, students will have read and comprehended a story written with complex word choice and sentence structure. Students will understand the impact of literary devices such as personification, symbol, simile, and setting on a story.

    -

    T=This lesson can be tailored to various reading levels by allowing students able to read the text independently to do so; reading aloud to other students or utilizing an audio version for struggling readers; using an adapted version of the story for readers significantly below grade level.

    -

    O=This lesson is organized using before, during, and after reading activities to help students approach the text. Prior to this lesson, students would already have used student-friendly definition formulas, defined literary terms and plot elements and discussed their use in poetry, and gained relevant background knowledge on the author's life. Following the reading of this story, students would read one or two more stories with guidance from the teacher before reading and responding independently to a story. This technique scaffolds their ability to read complex texts independently.

    -

    Teacher and student supports include selection summaries in Spanish, English, Haitian Creole, and Brazilian Portuguese, plus multilingual glossaries of literary and informational terms in 10 languages.

    -

    I totally agree and know that students spend entirely too much time with level one identification of literary terms. However, students have to learn these terms before they can rigorously apply this knowledge to a text and make meaning.

    -

    I'm going to politely disagree with this author. As a writer, how can you use literary elements in your writing if you cant identify them? As an artist, we all learn the basic discipline specific language so that we can have technical conversations about the art and it's meaning. Without an understanding of that terminology along with application beyond a multiple choice test, I can't really expect any deep conversations with students about authors craft. As a student, I may make a claim that has nothing to do with literary terminology or devices, but I may use those devices as support to make my claim. That language is a gateway to access deeper conversation in literature. That access is especially important for students with lower socioeconomic economic status. I've had the same issues with teaching grammar. If we don't teach terms like subject and predicate, how can we teach parallelism. In regards to standardized testing, I hope as English professionals we would be teaching skills beyond what testing asks of them. Getting a 5 on an AP test has little to do with a true understanding of great writing.

    -

    If we chose to ignore teaching students about literary devices, what would then be the use of critical analysis when pertaining to a novel study and the simple task of being able to make sense of the story and its purpose? Like I say to my students before every lesson involving critical analysis; we use analysis everyday whether it be while driving, playing a sport or creating a work of art. Without analysis, we would be a jump first and think afterwards type of society. The world would be a different place without something so basic. Something to think about.

    -

    Hi Christina,
    I have spent the afternoon thinking about how to teach literary terms and looking for related teaching materials.
    I was relieved to find your webpage and your approach to teaching literary terms as it is action-based rather than theory-based. Great job!

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/Beauty and the Goblin King (Fairy Tale Heat Book 1) PDF A Novel by Lidiya Foxglove.md b/spaces/cihyFjudo/fairness-paper-search/Beauty and the Goblin King (Fairy Tale Heat Book 1) PDF A Novel by Lidiya Foxglove.md deleted file mode 100644 index 4d4e619b51e0ea9dc2109b73b58e72d60cbb9f43..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Beauty and the Goblin King (Fairy Tale Heat Book 1) PDF A Novel by Lidiya Foxglove.md +++ /dev/null @@ -1,23 +0,0 @@ - -

    "How does a book published as nonfiction sneak onto a list of fiction?" asks judge Stephen Graham Jones. "Easy: Read it all as made up, while also, for the scare, completely and 100 percent (secretly) believing in it, because not believing in this case draws a bull's-eye on your back that can only be seen from the sky." Our judges had a hard time deciding between Communion and Whitley Strieber's equally scary fictional Roswell alien tale Majestic -- so why not read them both?

    -

    Robert W. Chambers' "King in Yellow" stories "are a foundational classic that doesn't get as much attention as Lovecraft for the simple reason that there are only four of them," says our judge Ruthanna Emrys. "This is the best of the lot and a sterling example of a story where the narrative undermines the narrator's prejudices (and eventually everything else he says). It starts with the main character talking approvingly about a rising fascist movement complete with 'suicide chambers' and forced removal of Jews, but quickly becomes obvious that the author is not in sympathy." She also points out that Chambers was one of the first authors to imagine a book (or in this case a play) that harms its readers.

    -

    Beauty and the Goblin King (Fairy Tale Heat Book 1) book pdf


    Downloadhttps://tinurli.com/2uwjc0



    -

    Anne Rivers Siddons was best known for writing posh fiction about posh Southern people when she turned out this perfect haunted house novel. Taking one part economic anxiety from Robert Marasco's Burnt Offerings, one part emotional unease from Shirley Jackson's The Haunting of Hill House, and adding her own observations about Southern yuppies, she updated the haunted house formula to include this beautiful, modern home that wages unrelenting psychic warfare against its owners. Everyone has felt, at some point or another, that their house hates them. Siddons' book explains exactly how much.

    -

    Trafficking in the kind of American Gothic perfected by Ray Bradbury, John Bellairs' three books set in the fictional Michigan town of New Zebedee are lonely and charming and shot through with a sense of creeping damp and creeping doom. Sort of a Harry Potter for less sporty boys, they star chubby Lewis Barnavelt, who has been banished to live with his Uncle Jonathan after his parents die in a car wreck. The good news: Uncle Jonathan is a wizard. The bad news: Living with him means that Lewis will probably die. Simultaneously comforting and creepy, the New Zebedee books, with their scratchy illustrations by Edward Gorey, scarred children throughout the '70s and '80s.

    -

    High in the mountains, Zel lives with her mother, who insists they have all they need -- for they have each other. Zel's life is peaceful and protected -- until a chance encounter changes everything. When she meets a beautiful young prince at the market one day, she is profoundly moved by new emotions. But Zel's mother sees the future unfolding -- and she will do the unspeakable to prevent Zel from leaving her..."Will leave readers spellbound."-- Publishers Weekly, starred reviewExploreSimilar booksBook lists with this bookWhy do people like this book?TopicsEuropeFairy talesGenresComing soon...PreviewBookshop.orgAmazonThrough the WoodsByEmily Carroll,

    -

    'It came from the woods. Most strange things do.'

    Five mysterious, spine-tingling stories follow journeys into (and out of?) the eerie abyss.

    These chilling tales spring from the macabre imagination of acclaimed and award-winning comic creator Emily Carroll.

    Come take a walk in the woods and see what awaits you there...ExploreSimilar booksBook lists with this bookWhy do people like this book?TopicsFairy talesThe supernaturalGenresComing soon...PreviewBookshop.orgAmazonThe Little PrinceByAntoine de Saint-Exupery, Richard Howard (translator),

    -

    With an introduction by Helen Simpson. From familiar fairy tales and legends - Red Riding Hood, Bluebeard, Puss in Boots, Beauty and the Beast, vampires and werewolves - Angela Carter has created an absorbing collection of dark, sensual, fantastic stories.ExploreSimilar booksBook lists with this bookWhy do people like this book?TopicsLittle Red Riding HoodFairy talesFeminismGenresComing soon...PreviewBookshop.orgAmazonMistress Masham's ReposeByT. H. White,

    -

    My inner child is still captivated by the Lilliputian world of T.H. White's Mistress Masham's Repose every time I read it. I don't know why the idea of discovering a secret miniature kingdom is so alluring: I think it may have something to do with my love for dollhouses when I was a child. T.H. White was best known for The Once and Future King and The Sword in the Stone, based on the Arthurian legends; he was a master at taking an old story (Gulliver's Travels in the case of Mistress Masham's Repose) and making it truly his own.Explore this bookMistress Masham's ReposeByT. H. White,

    -

    The Tale of Despereaux is the story of an unlikely hero. It reveals what happens when an overly tiny mouse with big ears dares to be different. He is fascinated by light, music, and a book left open in the library, but his peers do not approve. When Despereaux falls in love with the princess, the other mice condemn him to the dungeon. This usually means death, but Roscuro the rat intends to harm the princess, and Despereaux is determined to stop him. I enjoyed how the motivations of the key characters were subtly revealed, then brought together in clever and surprising ways. Every detail was included for a reason, making for a most satisfying read. Unable to stop, I read this modern fairy tale in one sitting.

    -

    CHARMING BOOK.ExploreSimilar booksBook lists with this bookWhy do people like this book?TopicsFairy talesPrincessesGenresComing soon...PreviewBookshop.orgAmazonFolkByZoe Gilbert,

    -

    -

    As soon as I opened this book, I could see the inspiration from all the things that I personally love had been weaved into this beautiful visual story. I enjoy how illuminated manuscripts and ancient books inspire the pages with tiny detail. This book adds to its rich storytelling by playing with these elements. So beautifully designed and such wonderful integration of text and image. A stunning tale for young and old, with a message that makes my heart happy.

    -

    "A collection of darkly mischievous stories based on classic fairy tales"--Front flap.ExploreSimilar booksBook lists with this bookWhy do people like this book?TopicsMermaidsGenderFairy talesFeminismGenresComing soon...PreviewBookshop.orgAmazonAnd Her Smile Will Untether the UniverseByGwendolyn Kiste,

    -

    The River Wife is a simple and subtle fable of love. It tells the story of the river wife - part human, part fish - whose duty is to tend the river, but instead falls in love with a man. Tender and melancholy, it speaks of desire and love, mothers and daughters, kinship and care, duty and sacrifice, water and wisdom. There is a great sternness and sadness here, coupled with gentleness. A love story, an environmental fable, a retelling of the Orpheus myth, The River Wife is grave, tender and otherworldly.ExploreSimilar booksBook lists with this bookWhy do people like this book?TopicsFishRiversFablesFairy talesGenresComing soon...PreviewBookshop.orgAmazonThe Complete Fairy TalesByGeorge MacDonald, U.C. Knoepflmacher,

    -

    George MacDonald occupied a major position in the intellectual life of his Victorian contemporaries. This volume brings together all eleven of his shorter fairy stories as well as his essay "The Fantastic Imagination". The subjects are those of traditional fantasy: good and wicked fairies, children embarking on elaborate quests, and journeys into unsettling dreamworlds. Within this familiar imaginative landscape, his children's stories were profoundly experimental, questioning the association of childhood with purity and innocence, and the need to separate fairy tale wonder from adult scepticism and disbelief.ExploreSimilar booksBook lists with this bookWhy do people like this book?TopicsFolkloreFairy talesFairiesGenresComing soon...PreviewBookshop.orgAmazonGossip from the Forest: The Tangled Roots of Our Forests and FairytalesBySara Maitland,

    -

    This book does that job beautifully. As a piece of narrative nonfiction, it collects fairy tales, personal memoirs, and natural history in a lyrical journey through the forests of England. Maitland centers each chapter on an English woodland and the stories associated with it, be they fairy tales or history. More importantly, she discusses not only how myth shapes culture, but how landscape shapes myth. I reference it time and again not only as an academic, but as an author who creates worlds rich in landscape and folklore.

    -

    Young Anders is carried away from his bleak life as an unloved foster child in Stockholm, Sweden, to become Mio, son of the King of Farawayland.ExploreSimilar booksBook lists with this bookWhy do people like this book?TopicsEuropeFairy talesGenresComing soon...PreviewBookshop.orgAmazonFriends With The MonstersByAlbany Walker,

    -

    Christmas is a wonderful time for magical tales that children love. In this one, a poor but good-hearted cobbler is rewarded for his honesty during the night, when clever elves sneak into his shop and make shoes for him to sell. It gives children the chance to imagine invisible helpers, and also the thrill of doing good deeds in secret.Explore this bookThe Elves And The ShoemakerByGrimm Brothers, Jim LaMarche,

    -

    Here is the classic tale of elfin magic, loved by generations of children and made new by an artist of international acclaim. Jim LaMarche's stunning paintings, reminiscent of his earlier work in The Rainbabies, are the perfect compliment to this favorite Grimm fairy tale.ExploreSimilar booksBook lists with this bookWhy do people like this book?TopicsElvesChristmasFolkloreFairy talesGenresComing soon...PreviewBookshop.orgAmazonThe RainbabiesByLaura Krauss Melmed, Jim LaMarche (illustrator),

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/Download Apne Apne Phanday 1080p The most funny and social film of 2016.md b/spaces/cihyFjudo/fairness-paper-search/Download Apne Apne Phanday 1080p The most funny and social film of 2016.md deleted file mode 100644 index 0199b25ab9546254ac37866c9d97dfbd4c528e44..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Download Apne Apne Phanday 1080p The most funny and social film of 2016.md +++ /dev/null @@ -1,12 +0,0 @@ -
    -

    mains ka form bharte samay graduate passout marksheet apke sath honi chaahie as a proof. prelims me sirf apko apne total marks aur percentage ka pata hona chahie, us samay apse koi bhi document nahi maangi jati. islie mains tak marksheet ka intzam kar len.

    -

    Download Apne Apne Phanday 1080p


    Download ->->->-> https://tinurli.com/2uwjc7



    -

    Sir apka bahut bahut thanks apne reply kiya
    Sir me self study kar sakta hu
    Mere ghar bale mujhe kahi bahar jake coaching nahi karne de rahe h or itna paisa v nahi h
    Apne ek bat sahi kahi me bahut confused hota hu kya padhu kya na padhu kaise padhu
    But sir mujhe ias banna hai
    Me guidance ke liye mobile ka use kar raha hu
    And sir apka guidance bahut hi accha hai hindi medium balo ke liye
    Me apse hi request karta hu
    Aap ek yesa topic taiyar kijiye jisse logo ko apna aim saralta se prapt ho
    Aap hi ek topic jodiye apne questions me
    Jisse pata chale ki students ko kaise padhna chaiye kon kon si book se padhna chaiye current ke liye mobile se kya or konsi magazine padhna chaiye
    Daily kya kya padhna chaiye kya sunna chaiye
    Or books ki list with writer v bataiye
    Thank you so much
    Mujhe aasha hai aap isko ghambirta se sochke meri or other students ki help karenge ?

    -

    sir pehle mai aapka abhar prakat karna chahunga aapne
    bahut kuch de diya hai aur bhavishya mai dete rahenge
    aisi mai kaamna karta hu
    kya ham mains ke liye purna roop se sansar lochan par nirbhar rah sakte hai
    ek bada sa prayas aur karna hai sir aapko
    mains ke syllabus ke according aapko thoda thoda sabhi topic ko cover kar explan karna hai
    jisse ki baki coaching dwara jo aarthik shoshan ho raha kuch had tak kam ho jaye
    bahut bahut dhanybad sir aapka ham sab aapke abhari hai

    -

    Prashant ji, sabse pehle apko graduation karna jaruri hai. UPSC me 10th, 12th k marks ko nahi dekha jaata hai. yedi aap graduate hote ho to aap upsc ke lie eligible kahlaate ho.
    Doosri baat, aapne kaha ki aap physically fit nahi ho. Yedi aapko kuch disorder hai ya pareshani hai, to aapko uska laabh pariksha me bhi mil sakta hai. Islie khul kar bataaye ki aapko exactly hai kya!

    -

    Book ke baare men to maine bataya hi hai. google men jaakar ias books in hindi type karen waha mere site ka link milega. abhi ke liye aap apne general studies ko majboot karen aur kitaaben kharide jo maine suggest ki hain.

    -

    -

    Sir, mujhe ye pata karna hai ki kaun se optionals ke liye complete study material mil jata hai market se hindi medium ke liye.. jisse main apne liye sahi optional ka chunaav kar saku.. pls help me sir..

    -

    Hello sir, sir mane 2012 me 12th art se Ki h or ab 2017 me BA distance se krunga jiska 1st year 2018 me hoga. Jisme mane Political science and Public administration subject liye h. Sir me Home Guard hu or ab me bade level pr desh Ki sewa krna chahata hu IPS ban kar. Sir ab meri age 27 h me obc se hu and I am also married . Or mera 1 beta h 5 year old. Sir kya me IPS ban skta hu ya apne bete pr focus kru taki use IPS bana sku.

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/Giveaway PhraseExpress Standard Edition for Windows and Mac FREE - Enter Now!.md b/spaces/cihyFjudo/fairness-paper-search/Giveaway PhraseExpress Standard Edition for Windows and Mac FREE - Enter Now!.md deleted file mode 100644 index 814246654ab33cc8b071f5d736a31134d66f64e4..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Giveaway PhraseExpress Standard Edition for Windows and Mac FREE - Enter Now!.md +++ /dev/null @@ -1,7 +0,0 @@ - -

    As we do not have any possible combination hardware and software combinations for testing, we kindly ask for your understanding, that we are not able to make a reliable compatibility statement about your individual setup.That is why we offer free trial versions of our software, that enable you to try the software and test compatibility with your individual system configuration before making a purchase decision.Our trial versions allow to test almost everything. Restart the program for another trial period. If you decide to purchase our software after your careful examination in your own environment, there is no additional installation required. You will receive a license key, which removes the occasional license reminders and turns your trial installation into a paid edition. You can continue to use any data created with the demo version.Please note, that we do not entertain a refund policy. So, please take your time to test everything.Can I transfer a license to another user? Yes, you can deregister your license key from one installation for use with another user name as often as you want.You can also move the license from a Windows installation to a Mac installation (and vice versa).

    -

    Giveaway: PhraseExpress Standard Edition for Windows and Mac FREE


    Download ☆☆☆ https://tinurli.com/2uwk5O



    -

    Yes, PhraseExpress can store both, images and tables in phrases.Image files can easily be dragged into the phrase content and resized with the mouse.How to renew maintenance? Returning customers get renewal discount. You can also upgrade to a higher product edition:

  • Maintenance for single licenses can be extended online.
  • To renew maintenance for multiple licenses, please send your license keys to receive a custom quote.How many monitors does ShareMouse support? The ShareMouse Standard Edition supports exactly one display per computer.The Professional Edition supports a maximum of four monitors per computer, but probably a maximum of 26 monitors in total.Unfortunately, we are not able to state the maximum number of supported displays as we do not have so many monitors in our labs. Please test the free demo version for compatibility with your individual setup.Can Macro Recorder record either keyboard or mouse input only? Macro Recorder always captures both, the mouse and keyboard. But you can filter either mouse or keyboard input from the playback, providing you with a choice of which part of the recording you need.For example, Macro Recorder can even play only the mouse clicks, but omit the mouse movement paths or play the mouse movements faster or slower on a case-by-case basis.

    -

    Using PhraseExpress will help you type faster. You can set up keyboard shortcuts for specific sentences or words. If you use the shortcuts automatically, PhraseExpress will fill in the phrases.In addition to working in multiple places, PhraseExpress also be used on Windows 11. However, Windows 11 users can typically use this app while writing emails or reports. PhraseExpress offers a 30-day free trial of all its services. You will get a free personal account after the trial period, but you will not have access to cool and commercial features. You can choose a professional, standard, or enterprise subscription if you want a subscription account.

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/Lectura veraniega Cmo fomentar el hbito de la lectura en los nios y adolescentes.md b/spaces/cihyFjudo/fairness-paper-search/Lectura veraniega Cmo fomentar el hbito de la lectura en los nios y adolescentes.md deleted file mode 100644 index 614d26d31e8a266164f4e15b80a6beb50368ce9b..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Lectura veraniega Cmo fomentar el hbito de la lectura en los nios y adolescentes.md +++ /dev/null @@ -1,22 +0,0 @@ - -

    La autora de Malena es un nombre de tango, Los aires difíciles, El corazón helado o Los pacientes del doctor García (Tusquets), última entrega de su serie Los episodios de una guerra interminable, se decanta por lecturas que revisan páginas incómodas de un pasado no tan lejano.

    -

    Vengaré tu muerte (Alfaguara) es la tercera novela que la autora dedica a la intriga criminal tras El verano del inglés y Naturaleza muerta. La académica aboga por lecturas lecturas rompedoras y clásicas.

    -

    Lectura veraniega


    Download File –––––>>> https://tinurli.com/2uwjHM



    -

    Esa forma de presentarlo hace que parezca un libro de texto. Y, la verdad, yo en vacaciones, o en un fin de semana, que es cuando lo leí, no tengo muchas ganas de leer un libro de texto. Si lo ignoran y se meten desde el principio en los capítulos, e incluso siguen un orden medio aleatorio, se darán cuenta de que lo que tienen en las manos son las reflexiones de un grupo de pensadores que entienden la economía de forma muy profunda, y mucho menos ortodoxa de lo que un libro de texto nos puede dar. El conjunto es muy agradable de leer porque está lleno de pensamientos desafiantes y no ordinarios. Así que lo que voy a hacer es dejarles con unos cuantos subrayados que me llamaron la atención y sugerirles una lectura desordenada y con la mente abierta. No les decepcionará.

    -


    En este libro, el autor describe su percepción del paisaje como un gabinete de curiosidades, cuenta cómo recoge diversos fenómenos y elementos individuales, los relaciona entre sí y los reorganiza. A través de sus páginas de lectura ligera, consigue que en de la mente del lector se despliegue todo un cosmos en el que la falta de integridad de la naturaleza aparece como una ganancia en lugar de una pérdida. Günther Vogt es un tal coleccionista, y su gabinete de curiosidades es un compendio de paisajes y de su manera de entenderlos.

    -

    Este verano será un tanto especial y la lectura puede ser una gran aliada. La siguiente selección de novedades editoriales están dirigidas a adultos, jóvenes y pequeños y hay para todos los gustos: narrativa, formación, cuentos, ensayos... ¡Elige la tuya!

    -

    Además de escritor, el autor austriaco fue un lector apasionado que publicó multitud de reseñas en la prensa escrita de la época y escribió los prólogos de numerosas obras de otros autores. Ahora la editorial Acantilado reúne diversos textos con los que el escritor invita a reflexionar sobre el amor y la pasión que le profesa a la literatura y a los libros, y también como una invitación al diálogo para que otros lectores se contagien de esa pasión por la lectura.

    -

    Un sinfín de personajes extraordinarios divertirán, emocionarán y expicarán la vida a los pequeños de Primaria. Acompañándolos en sus aventuras podrán relacionar y reconocer algunos de los conflictos a los que se van a enfrentar, comprobar cómo los resuelven los héroes de papel y adquirir estrategias útiles para su propio desarrollo. Estructurado en cuarenta y cinco lecturas, este libro además aporta diferentes propuestas didácticas estimulantes y reflexivas.

    -

    ¿Lo notas, lector? Es el calorcito del sol veraniego que ya se nota en la piel. Los pantalones cortos asoman a la calle y las chaquetas van quedando relegadas al fondo del armario. Y si hay algo que nos gusta más que las noches de verano, las vacaciones o la piscina y el relax es precisamente ¡lo que vamos a leer! Junto a la piscina. Mirando al mar. En la montaña. En casa. Haciendo terraceo en nuestra calle o volviendo al pueblo a por un poquito de tranquilidad (y de más lectura, claro). Y, como todo en verano, lo que más apetece es leer libros cortos y ligeros.

    -

    -

    Si lo prefieres, y siguiendo con lecturas ligeras para el verano, puedes leer el último libro de Megan Maxell, ¿Y si lo probamos?. En esta ocasión, la reina del romance nos presenta a Verónica, una mujer de 38 años, independiente, trabajadora, autónoma y bastante cabezota y controladora. Tras un desengaño amoroso, Verónica toma una decisión para disfrutar del sexo sin compromiso. Sus reglas son: no enrollarse con hombres casados, no mezclar trabajo y diversión, y siempre con hombres menores de treinta años.

    -

    En nuestra vida nos cruzamos con muchas personas, pero son pocas las que dejan huella y menos las que consiguen cambiarla. Esas son las personas que de verdad importan. Y ellas protagonizan la primera novela de La Vecina Rubia. Ella misma nos cuenta el argumento de este libro que va a ser un must en tus lecturas de verano:

    -

    Los niños y niñas son los más afortunados, pues tienen muchos días de vacaciones en verano para poder leer. Y para ellos, además, hay un montón de propuestas emocionantes llenas de fantasía, aventura y misterio. Aquí van algunas lecturas ligeras para que se puedan llevar los libros a la playa, a la piscina, al pueblo o a cualquier otro lugar.

    -

    Llega el verano y, con él, la tregua que cada año nos concedemos para recuperar el cuerpo y el espíritu. Disponemos de más tiempo libre y podemos retomar algunas actividades que la asfixiante realidad cotidiana dejó en un segundo o tercer plano. Una de ellas es leer en verano. Pero no cualquier lectura.

    -

    Dicen los expertos que nuestros hijos cada vez leen menos. Quizá descifren más texto que nunca (descifrar al pie de la letra, porque los mensajes electrónicos que se envían suelen estar verdaderamente cifrados), pero han disminuido la lectura por placer. Cada vez dedican más tiempo a los medios digitales y menos al papel.

    -

    Las redes sociales on-line a las que están "enganchados" les han aportado algunas ventajas (y bastantes inconvenientes), pero les han restado la lectura; así lo apuntan los autores del estudio Menores y Redes Sociales: "la lectura libre -textos no obligados por las tareas escolares- es una actividad claramente más frecuente entre los no usuarios de redes sociales". Aunque es una cuestión que se deberá investigar más a fondo en un futuro, parece claro que el uso de redes sociales incide en la actividad lectora como hobby.

    -

    Un informe publicado en los Estados Unidos y titulado Generación M descubrió que "la lectura por placer de cualquier tipo se relaciona más estrechamente con el nivel de un estudiante que cualquier otro medio de comunicación". En definitiva, la ventaja que podemos proporcionar a nuestros hijos si les hacemos lectores es enorme; porque como resume José Antonio Millán: "la lectura es la llave del conocimiento en la sociedad de la información".

    -

    El verano es un buen momento para "contagiar" a nuestros hijos con el "virus de la lectura", para transmitirles el "vicio" de sostener entre las manos un libro, acariciando sus lomos, pellizcando con los dedos las esquinas de sus hojas para pasar página. Una vez contagiados ya no podrán resistir la tentación de leer y esto les hará más cultos, más capaces, más profundos, más reflexivos. En resumen: más humanos.

    -

    Casi al final del verano os traigo una lectura ideal para esta estación, Color Morado Traidor de ediciones SM, El Vaco de Vapor. Protagonizada por La Pandilla de la Lupa, que no conocía de nada, pero a partir de ahora buscaré sus aventuras.





    Sinopsis
    Imaginad, un verano cualquiera, cuatro amigos que disfrutan de sus vacaciones. Este año son un tanto especiales, Alex, Manu, Carol y Erika van a pasar unos días en casa de los abuelos de Alex en el pueblo.
    Bueno, la verdad, es que ellos no se esperan las aventuras que van a vivir allí. Conocerán a la familia de Alex, Mario el hermano adolescente que se pasa todo el día ausente, Julio el hermano pequeño que esconde un secretillo, los tíos que todos tienen algo especial , y Lola la misteriosa china, que esconde un secreto deportivo, de la que hablan todos.



    Tendrán dos misterios que resolver: la desaparición diaria de una porra de Carol, con su subsecuente enfado, y una extraña luz verde acompañada de susurros que tiene a Manu bastante asustado. Y eso mientras pasean por el monte, se bañan en pozas, compran en el mercado o juegan al Cluedo.
    ¿Quien iba a imaginar que pasar unos días con la familia iba a ser tan divertido? Ellos no, y cuando llegan al final de su estancia saben que pocas veces lo van a volver a pasar tan bien.




    Opinión
    Al principio estaba un poco escéptica sobre la historia que iba a leer. Pensaba que tenía delante a otro Club de los Cinco en versión española. Pero no es así, el tratamiento de los personajes es muy cercano, te puedes ver identificado con alguno de ellos. La aventura o las aventuras son, como decirlo, simples, sin grandes giros ni grandilocuencias. Son cosas que pueden ocurrir de verdad ¿Quien no ha hecho un pedido de churros y porras, y al final ha faltado para alguien?
    Se trata la amistad sana, es un grupo de niños que pasan el verano juntos, que se divierten con cosas sencillas y disfrutan de la naturaleza.
    Después el texto esta organizado como un diario donde cada uno de los personajes describe el día o las situaciones desde su punto de vista. Eso hace que veamos claramente las cuatro personalidades tan diferentes de los protagonistas.



    Y las ilustraciones esparcidas por todo el libro ayudan mucho, si no tienes mucha imaginación, a meterte en la narración y en hacerte una idea de los actores de la historia.
    Así que es un libro muy recomendable para lectores con un poquito de experiencia, ya que la letra es grande aunque hay mucho texto.
    ¡Qué paséis una buena lectura!


    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/cleanmaster/akagi-sovits3/README.md b/spaces/cleanmaster/akagi-sovits3/README.md deleted file mode 100644 index 3c969c09a164d54541550ca9a095cf124dd8bb44..0000000000000000000000000000000000000000 --- a/spaces/cleanmaster/akagi-sovits3/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Akagi Sovits3 -emoji: 💻 -colorFrom: blue -colorTo: pink -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/ttLib/tables/grUtils.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/ttLib/tables/grUtils.py deleted file mode 100644 index 785684b1eb30a76ae598bfe46416d4556fc422a0..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/ttLib/tables/grUtils.py +++ /dev/null @@ -1,92 +0,0 @@ -import struct, warnings - -try: - import lz4 -except ImportError: - lz4 = None -else: - import lz4.block - -# old scheme for VERSION < 0.9 otherwise use lz4.block - - -def decompress(data): - (compression,) = struct.unpack(">L", data[4:8]) - scheme = compression >> 27 - size = compression & 0x07FFFFFF - if scheme == 0: - pass - elif scheme == 1 and lz4: - res = lz4.block.decompress(struct.pack("L", (scheme << 27) + (len(data) & 0x07FFFFFF)) - if scheme == 0: - return data - elif scheme == 1 and lz4: - res = lz4.block.compress( - data, mode="high_compression", compression=16, store_size=False - ) - return hdr + res - else: - warnings.warn("Table failed to compress by unsupported compression scheme") - return data - - -def _entries(attrs, sameval): - ak = 0 - vals = [] - lastv = 0 - for k, v in attrs: - if len(vals) and (k != ak + 1 or (sameval and v != lastv)): - yield (ak - len(vals) + 1, len(vals), vals) - vals = [] - ak = k - vals.append(v) - lastv = v - yield (ak - len(vals) + 1, len(vals), vals) - - -def entries(attributes, sameval=False): - g = _entries(sorted(attributes.items(), key=lambda x: int(x[0])), sameval) - return g - - -def bininfo(num, size=1): - if num == 0: - return struct.pack(">4H", 0, 0, 0, 0) - srange = 1 - select = 0 - while srange <= num: - srange *= 2 - select += 1 - select -= 1 - srange //= 2 - srange *= size - shift = num * size - srange - return struct.pack(">4H", num, srange, select, shift) - - -def num2tag(n): - if n < 0x200000: - return str(n) - else: - return ( - struct.unpack("4s", struct.pack(">L", n))[0].replace(b"\000", b"").decode() - ) - - -def tag2num(n): - try: - return int(n) - except ValueError: - n = (n + " ")[:4] - return struct.unpack(">L", n.encode("ascii"))[0] diff --git a/spaces/codelion/Grounding_DINO_demo/groundingdino/util/visualizer.py b/spaces/codelion/Grounding_DINO_demo/groundingdino/util/visualizer.py deleted file mode 100644 index 7a1b7b101e9b73f75f9136bc67f2063c7c1cf1c1..0000000000000000000000000000000000000000 --- a/spaces/codelion/Grounding_DINO_demo/groundingdino/util/visualizer.py +++ /dev/null @@ -1,318 +0,0 @@ -# -*- coding: utf-8 -*- -""" -@File : visualizer.py -@Time : 2022/04/05 11:39:33 -@Author : Shilong Liu -@Contact : slongliu86@gmail.com -""" - -import datetime -import os - -import cv2 -import matplotlib.pyplot as plt -import numpy as np -import torch -from matplotlib import transforms -from matplotlib.collections import PatchCollection -from matplotlib.patches import Polygon -from pycocotools import mask as maskUtils - - -def renorm( - img: torch.FloatTensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] -) -> torch.FloatTensor: - # img: tensor(3,H,W) or tensor(B,3,H,W) - # return: same as img - assert img.dim() == 3 or img.dim() == 4, "img.dim() should be 3 or 4 but %d" % img.dim() - if img.dim() == 3: - assert img.size(0) == 3, 'img.size(0) shoule be 3 but "%d". (%s)' % ( - img.size(0), - str(img.size()), - ) - img_perm = img.permute(1, 2, 0) - mean = torch.Tensor(mean) - std = torch.Tensor(std) - img_res = img_perm * std + mean - return img_res.permute(2, 0, 1) - else: # img.dim() == 4 - assert img.size(1) == 3, 'img.size(1) shoule be 3 but "%d". (%s)' % ( - img.size(1), - str(img.size()), - ) - img_perm = img.permute(0, 2, 3, 1) - mean = torch.Tensor(mean) - std = torch.Tensor(std) - img_res = img_perm * std + mean - return img_res.permute(0, 3, 1, 2) - - -class ColorMap: - def __init__(self, basergb=[255, 255, 0]): - self.basergb = np.array(basergb) - - def __call__(self, attnmap): - # attnmap: h, w. np.uint8. - # return: h, w, 4. np.uint8. - assert attnmap.dtype == np.uint8 - h, w = attnmap.shape - res = self.basergb.copy() - res = res[None][None].repeat(h, 0).repeat(w, 1) # h, w, 3 - attn1 = attnmap.copy()[..., None] # h, w, 1 - res = np.concatenate((res, attn1), axis=-1).astype(np.uint8) - return res - - -def rainbow_text(x, y, ls, lc, **kw): - """ - Take a list of strings ``ls`` and colors ``lc`` and place them next to each - other, with text ls[i] being shown in color lc[i]. - - This example shows how to do both vertical and horizontal text, and will - pass all keyword arguments to plt.text, so you can set the font size, - family, etc. - """ - t = plt.gca().transData - fig = plt.gcf() - plt.show() - - # horizontal version - for s, c in zip(ls, lc): - text = plt.text(x, y, " " + s + " ", color=c, transform=t, **kw) - text.draw(fig.canvas.get_renderer()) - ex = text.get_window_extent() - t = transforms.offset_copy(text._transform, x=ex.width, units="dots") - - # #vertical version - # for s,c in zip(ls,lc): - # text = plt.text(x,y," "+s+" ",color=c, transform=t, - # rotation=90,va='bottom',ha='center',**kw) - # text.draw(fig.canvas.get_renderer()) - # ex = text.get_window_extent() - # t = transforms.offset_copy(text._transform, y=ex.height, units='dots') - - -class COCOVisualizer: - def __init__(self, coco=None, tokenlizer=None) -> None: - self.coco = coco - - def visualize(self, img, tgt, caption=None, dpi=180, savedir="vis"): - """ - img: tensor(3, H, W) - tgt: make sure they are all on cpu. - must have items: 'image_id', 'boxes', 'size' - """ - plt.figure(dpi=dpi) - plt.rcParams["font.size"] = "5" - ax = plt.gca() - img = renorm(img).permute(1, 2, 0) - # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO': - # import ipdb; ipdb.set_trace() - ax.imshow(img) - - self.addtgt(tgt) - - if tgt is None: - image_id = 0 - elif "image_id" not in tgt: - image_id = 0 - else: - image_id = tgt["image_id"] - - if caption is None: - savename = "{}/{}-{}.png".format( - savedir, int(image_id), str(datetime.datetime.now()).replace(" ", "-") - ) - else: - savename = "{}/{}-{}-{}.png".format( - savedir, caption, int(image_id), str(datetime.datetime.now()).replace(" ", "-") - ) - print("savename: {}".format(savename)) - os.makedirs(os.path.dirname(savename), exist_ok=True) - plt.savefig(savename) - plt.close() - - def addtgt(self, tgt): - """ """ - if tgt is None or not "boxes" in tgt: - ax = plt.gca() - - if "caption" in tgt: - ax.set_title(tgt["caption"], wrap=True) - - ax.set_axis_off() - return - - ax = plt.gca() - H, W = tgt["size"] - numbox = tgt["boxes"].shape[0] - - color = [] - polygons = [] - boxes = [] - for box in tgt["boxes"].cpu(): - unnormbbox = box * torch.Tensor([W, H, W, H]) - unnormbbox[:2] -= unnormbbox[2:] / 2 - [bbox_x, bbox_y, bbox_w, bbox_h] = unnormbbox.tolist() - boxes.append([bbox_x, bbox_y, bbox_w, bbox_h]) - poly = [ - [bbox_x, bbox_y], - [bbox_x, bbox_y + bbox_h], - [bbox_x + bbox_w, bbox_y + bbox_h], - [bbox_x + bbox_w, bbox_y], - ] - np_poly = np.array(poly).reshape((4, 2)) - polygons.append(Polygon(np_poly)) - c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0] - color.append(c) - - p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.1) - ax.add_collection(p) - p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2) - ax.add_collection(p) - - if "strings_positive" in tgt and len(tgt["strings_positive"]) > 0: - assert ( - len(tgt["strings_positive"]) == numbox - ), f"{len(tgt['strings_positive'])} = {numbox}, " - for idx, strlist in enumerate(tgt["strings_positive"]): - cate_id = int(tgt["labels"][idx]) - _string = str(cate_id) + ":" + " ".join(strlist) - bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx] - # ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1}) - ax.text( - bbox_x, - bbox_y, - _string, - color="black", - bbox={"facecolor": color[idx], "alpha": 0.6, "pad": 1}, - ) - - if "box_label" in tgt: - assert len(tgt["box_label"]) == numbox, f"{len(tgt['box_label'])} = {numbox}, " - for idx, bl in enumerate(tgt["box_label"]): - _string = str(bl) - bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx] - # ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1}) - ax.text( - bbox_x, - bbox_y, - _string, - color="black", - bbox={"facecolor": color[idx], "alpha": 0.6, "pad": 1}, - ) - - if "caption" in tgt: - ax.set_title(tgt["caption"], wrap=True) - # plt.figure() - # rainbow_text(0.0,0.0,"all unicorns poop rainbows ! ! !".split(), - # ['red', 'orange', 'brown', 'green', 'blue', 'purple', 'black']) - - if "attn" in tgt: - # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO': - # import ipdb; ipdb.set_trace() - if isinstance(tgt["attn"], tuple): - tgt["attn"] = [tgt["attn"]] - for item in tgt["attn"]: - attn_map, basergb = item - attn_map = (attn_map - attn_map.min()) / (attn_map.max() - attn_map.min() + 1e-3) - attn_map = (attn_map * 255).astype(np.uint8) - cm = ColorMap(basergb) - heatmap = cm(attn_map) - ax.imshow(heatmap) - ax.set_axis_off() - - def showAnns(self, anns, draw_bbox=False): - """ - Display the specified annotations. - :param anns (array of object): annotations to display - :return: None - """ - if len(anns) == 0: - return 0 - if "segmentation" in anns[0] or "keypoints" in anns[0]: - datasetType = "instances" - elif "caption" in anns[0]: - datasetType = "captions" - else: - raise Exception("datasetType not supported") - if datasetType == "instances": - ax = plt.gca() - ax.set_autoscale_on(False) - polygons = [] - color = [] - for ann in anns: - c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0] - if "segmentation" in ann: - if type(ann["segmentation"]) == list: - # polygon - for seg in ann["segmentation"]: - poly = np.array(seg).reshape((int(len(seg) / 2), 2)) - polygons.append(Polygon(poly)) - color.append(c) - else: - # mask - t = self.imgs[ann["image_id"]] - if type(ann["segmentation"]["counts"]) == list: - rle = maskUtils.frPyObjects( - [ann["segmentation"]], t["height"], t["width"] - ) - else: - rle = [ann["segmentation"]] - m = maskUtils.decode(rle) - img = np.ones((m.shape[0], m.shape[1], 3)) - if ann["iscrowd"] == 1: - color_mask = np.array([2.0, 166.0, 101.0]) / 255 - if ann["iscrowd"] == 0: - color_mask = np.random.random((1, 3)).tolist()[0] - for i in range(3): - img[:, :, i] = color_mask[i] - ax.imshow(np.dstack((img, m * 0.5))) - if "keypoints" in ann and type(ann["keypoints"]) == list: - # turn skeleton into zero-based index - sks = np.array(self.loadCats(ann["category_id"])[0]["skeleton"]) - 1 - kp = np.array(ann["keypoints"]) - x = kp[0::3] - y = kp[1::3] - v = kp[2::3] - for sk in sks: - if np.all(v[sk] > 0): - plt.plot(x[sk], y[sk], linewidth=3, color=c) - plt.plot( - x[v > 0], - y[v > 0], - "o", - markersize=8, - markerfacecolor=c, - markeredgecolor="k", - markeredgewidth=2, - ) - plt.plot( - x[v > 1], - y[v > 1], - "o", - markersize=8, - markerfacecolor=c, - markeredgecolor=c, - markeredgewidth=2, - ) - - if draw_bbox: - [bbox_x, bbox_y, bbox_w, bbox_h] = ann["bbox"] - poly = [ - [bbox_x, bbox_y], - [bbox_x, bbox_y + bbox_h], - [bbox_x + bbox_w, bbox_y + bbox_h], - [bbox_x + bbox_w, bbox_y], - ] - np_poly = np.array(poly).reshape((4, 2)) - polygons.append(Polygon(np_poly)) - color.append(c) - - # p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4) - # ax.add_collection(p) - p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2) - ax.add_collection(p) - elif datasetType == "captions": - for ann in anns: - print(ann["caption"]) diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/adxenc.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/adxenc.c deleted file mode 100644 index 796efdab63bd89f3ae0597e95ea320bff783652f..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/adxenc.c +++ /dev/null @@ -1,203 +0,0 @@ -/* - * ADX ADPCM codecs - * Copyright (c) 2001,2003 BERO - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "avcodec.h" -#include "adx.h" -#include "bytestream.h" -#include "codec_internal.h" -#include "encode.h" -#include "put_bits.h" - -/** - * @file - * SEGA CRI adx codecs. - * - * Reference documents: - * http://ku-www.ss.titech.ac.jp/~yatsushi/adx.html - * adx2wav & wav2adx http://www.geocities.co.jp/Playtown/2004/ - */ - -static void adx_encode(ADXContext *c, uint8_t *adx, const int16_t *wav, - ADXChannelState *prev, int channels) -{ - PutBitContext pb; - int scale; - int i, j; - int s0, s1, s2, d; - int max = 0; - int min = 0; - - s1 = prev->s1; - s2 = prev->s2; - for (i = 0, j = 0; j < 32; i += channels, j++) { - s0 = wav[i]; - d = s0 + ((-c->coeff[0] * s1 - c->coeff[1] * s2) >> COEFF_BITS); - if (max < d) - max = d; - if (min > d) - min = d; - s2 = s1; - s1 = s0; - } - - if (max == 0 && min == 0) { - prev->s1 = s1; - prev->s2 = s2; - memset(adx, 0, BLOCK_SIZE); - return; - } - - if (max / 7 > -min / 8) - scale = max / 7; - else - scale = -min / 8; - - if (scale == 0) - scale = 1; - - AV_WB16(adx, scale); - - init_put_bits(&pb, adx + 2, 16); - - s1 = prev->s1; - s2 = prev->s2; - for (i = 0, j = 0; j < 32; i += channels, j++) { - d = wav[i] + ((-c->coeff[0] * s1 - c->coeff[1] * s2) >> COEFF_BITS); - - d = av_clip_intp2(ROUNDED_DIV(d, scale), 3); - - put_sbits(&pb, 4, d); - - s0 = d * scale + ((c->coeff[0] * s1 + c->coeff[1] * s2) >> COEFF_BITS); - s2 = s1; - s1 = s0; - } - prev->s1 = s1; - prev->s2 = s2; - - flush_put_bits(&pb); -} - -#define HEADER_SIZE 36 - -static int adx_encode_header(AVCodecContext *avctx, uint8_t *buf, int bufsize) -{ - ADXContext *c = avctx->priv_data; - - bytestream_put_be16(&buf, 0x8000); /* header signature */ - bytestream_put_be16(&buf, HEADER_SIZE - 4); /* copyright offset */ - bytestream_put_byte(&buf, 3); /* encoding */ - bytestream_put_byte(&buf, BLOCK_SIZE); /* block size */ - bytestream_put_byte(&buf, 4); /* sample size */ - bytestream_put_byte(&buf, avctx->ch_layout.nb_channels); /* channels */ - bytestream_put_be32(&buf, avctx->sample_rate); /* sample rate */ - bytestream_put_be32(&buf, 0); /* total sample count */ - bytestream_put_be16(&buf, c->cutoff); /* cutoff frequency */ - bytestream_put_byte(&buf, 3); /* version */ - bytestream_put_byte(&buf, 0); /* flags */ - bytestream_put_be32(&buf, 0); /* unknown */ - bytestream_put_be32(&buf, 0); /* loop enabled */ - bytestream_put_be16(&buf, 0); /* padding */ - bytestream_put_buffer(&buf, "(c)CRI", 6); /* copyright signature */ - - return HEADER_SIZE; -} - -static av_cold int adx_encode_init(AVCodecContext *avctx) -{ - ADXContext *c = avctx->priv_data; - - if (avctx->ch_layout.nb_channels > 2) { - av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n"); - return AVERROR(EINVAL); - } - avctx->frame_size = BLOCK_SAMPLES; - - /* the cutoff can be adjusted, but this seems to work pretty well */ - c->cutoff = 500; - ff_adx_calculate_coeffs(c->cutoff, avctx->sample_rate, COEFF_BITS, c->coeff); - - return 0; -} - -static int adx_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, - const AVFrame *frame, int *got_packet_ptr) -{ - ADXContext *c = avctx->priv_data; - const int16_t *samples = frame ? (const int16_t *)frame->data[0] : NULL; - uint8_t *dst; - int channels = avctx->ch_layout.nb_channels; - int ch, out_size, ret; - - if (!samples) { - if (c->eof) - return 0; - if ((ret = ff_get_encode_buffer(avctx, avpkt, 18, 0)) < 0) - return ret; - c->eof = 1; - dst = avpkt->data; - bytestream_put_be16(&dst, 0x8001); - bytestream_put_be16(&dst, 0x000E); - bytestream_put_be64(&dst, 0x0); - bytestream_put_be32(&dst, 0x0); - bytestream_put_be16(&dst, 0x0); - *got_packet_ptr = 1; - return 0; - } - - out_size = BLOCK_SIZE * channels + !c->header_parsed * HEADER_SIZE; - if ((ret = ff_get_encode_buffer(avctx, avpkt, out_size, 0)) < 0) - return ret; - dst = avpkt->data; - - if (!c->header_parsed) { - int hdrsize; - if ((hdrsize = adx_encode_header(avctx, dst, avpkt->size)) < 0) { - av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n"); - return AVERROR(EINVAL); - } - dst += hdrsize; - c->header_parsed = 1; - } - - for (ch = 0; ch < channels; ch++) { - adx_encode(c, dst, samples + ch, &c->prev[ch], channels); - dst += BLOCK_SIZE; - } - - *got_packet_ptr = 1; - return 0; -} - -const FFCodec ff_adpcm_adx_encoder = { - .p.name = "adpcm_adx", - CODEC_LONG_NAME("SEGA CRI ADX ADPCM"), - .p.type = AVMEDIA_TYPE_AUDIO, - .p.id = AV_CODEC_ID_ADPCM_ADX, - .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY | - AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE, - .priv_data_size = sizeof(ADXContext), - .init = adx_encode_init, - FF_CODEC_ENCODE_CB(adx_encode_frame), - .p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16, - AV_SAMPLE_FMT_NONE }, - .caps_internal = FF_CODEC_CAP_EOF_FLUSH, -}; diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/binkaudio.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/binkaudio.c deleted file mode 100644 index f28ecba7603f4a3247dd5feaa9a914c3ff49ae1e..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/binkaudio.c +++ /dev/null @@ -1,397 +0,0 @@ -/* - * Bink Audio decoder - * Copyright (c) 2007-2011 Peter Ross (pross@xvid.org) - * Copyright (c) 2009 Daniel Verkamp (daniel@drv.nu) - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * Bink Audio decoder - * - * Technical details here: - * http://wiki.multimedia.cx/index.php?title=Bink_Audio - */ - -#include "config_components.h" - -#include "libavutil/channel_layout.h" -#include "libavutil/intfloat.h" -#include "libavutil/mem_internal.h" -#include "libavutil/tx.h" - -#define BITSTREAM_READER_LE -#include "avcodec.h" -#include "decode.h" -#include "get_bits.h" -#include "codec_internal.h" -#include "internal.h" -#include "wma_freqs.h" - -#define MAX_DCT_CHANNELS 6 -#define MAX_CHANNELS 2 -#define BINK_BLOCK_MAX_SIZE (MAX_CHANNELS << 11) - -typedef struct BinkAudioContext { - GetBitContext gb; - int version_b; ///< Bink version 'b' - int first; - int channels; - int ch_offset; - int frame_len; ///< transform size (samples) - int overlap_len; ///< overlap size (samples) - int block_size; - int num_bands; - float root; - unsigned int bands[26]; - float previous[MAX_DCT_CHANNELS][BINK_BLOCK_MAX_SIZE / 16]; ///< coeffs from previous audio block - float quant_table[96]; - AVPacket *pkt; - AVTXContext *tx; - av_tx_fn tx_fn; -} BinkAudioContext; - - -static av_cold int decode_init(AVCodecContext *avctx) -{ - BinkAudioContext *s = avctx->priv_data; - int sample_rate = avctx->sample_rate; - int sample_rate_half; - int i, ret; - int frame_len_bits; - int max_channels = avctx->codec->id == AV_CODEC_ID_BINKAUDIO_RDFT ? MAX_CHANNELS : MAX_DCT_CHANNELS; - int channels = avctx->ch_layout.nb_channels; - - /* determine frame length */ - if (avctx->sample_rate < 22050) { - frame_len_bits = 9; - } else if (avctx->sample_rate < 44100) { - frame_len_bits = 10; - } else { - frame_len_bits = 11; - } - - if (channels < 1 || channels > max_channels) { - av_log(avctx, AV_LOG_ERROR, "invalid number of channels: %d\n", channels); - return AVERROR_INVALIDDATA; - } - av_channel_layout_uninit(&avctx->ch_layout); - av_channel_layout_default(&avctx->ch_layout, channels); - - s->version_b = avctx->extradata_size >= 4 && avctx->extradata[3] == 'b'; - - if (avctx->codec->id == AV_CODEC_ID_BINKAUDIO_RDFT) { - // audio is already interleaved for the RDFT format variant - avctx->sample_fmt = AV_SAMPLE_FMT_FLT; - if (sample_rate > INT_MAX / channels) - return AVERROR_INVALIDDATA; - sample_rate *= channels; - s->channels = 1; - if (!s->version_b) - frame_len_bits += av_log2(channels); - } else { - s->channels = channels; - avctx->sample_fmt = AV_SAMPLE_FMT_FLTP; - } - - s->frame_len = 1 << frame_len_bits; - s->overlap_len = s->frame_len / 16; - s->block_size = (s->frame_len - s->overlap_len) * FFMIN(MAX_CHANNELS, s->channels); - sample_rate_half = (sample_rate + 1LL) / 2; - if (avctx->codec->id == AV_CODEC_ID_BINKAUDIO_RDFT) - s->root = 2.0 / (sqrt(s->frame_len) * 32768.0); - else - s->root = s->frame_len / (sqrt(s->frame_len) * 32768.0); - for (i = 0; i < 96; i++) { - /* constant is result of 0.066399999/log10(M_E) */ - s->quant_table[i] = expf(i * 0.15289164787221953823f) * s->root; - } - - /* calculate number of bands */ - for (s->num_bands = 1; s->num_bands < 25; s->num_bands++) - if (sample_rate_half <= ff_wma_critical_freqs[s->num_bands - 1]) - break; - - /* populate bands data */ - s->bands[0] = 2; - for (i = 1; i < s->num_bands; i++) - s->bands[i] = (ff_wma_critical_freqs[i - 1] * s->frame_len / sample_rate_half) & ~1; - s->bands[s->num_bands] = s->frame_len; - - s->first = 1; - - if (CONFIG_BINKAUDIO_RDFT_DECODER && avctx->codec->id == AV_CODEC_ID_BINKAUDIO_RDFT) { - float scale = 0.5; - ret = av_tx_init(&s->tx, &s->tx_fn, AV_TX_FLOAT_RDFT, 1, 1 << frame_len_bits, &scale, 0); - } else if (CONFIG_BINKAUDIO_DCT_DECODER) { - float scale = 1.0 / (1 << frame_len_bits); - ret = av_tx_init(&s->tx, &s->tx_fn, AV_TX_FLOAT_DCT, 1, 1 << (frame_len_bits - 1), &scale, 0); - } else { - av_assert0(0); - } - if (ret < 0) - return ret; - - s->pkt = avctx->internal->in_pkt; - - return 0; -} - -static float get_float(GetBitContext *gb) -{ - int power = get_bits(gb, 5); - float f = ldexpf(get_bits(gb, 23), power - 23); - if (get_bits1(gb)) - f = -f; - return f; -} - -static const uint8_t rle_length_tab[16] = { - 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 32, 64 -}; - -/** - * Decode Bink Audio block - * @param[out] out Output buffer (must contain s->block_size elements) - * @return 0 on success, negative error code on failure - */ -static int decode_block(BinkAudioContext *s, float **out, int use_dct, - int channels, int ch_offset) -{ - int ch, i, j, k; - float q, quant[25]; - int width, coeff; - GetBitContext *gb = &s->gb; - LOCAL_ALIGNED_32(float, coeffs, [4098]); - - if (use_dct) - skip_bits(gb, 2); - - for (ch = 0; ch < channels; ch++) { - if (s->version_b) { - if (get_bits_left(gb) < 64) - return AVERROR_INVALIDDATA; - coeffs[0] = av_int2float(get_bits_long(gb, 32)) * s->root; - coeffs[1] = av_int2float(get_bits_long(gb, 32)) * s->root; - } else { - if (get_bits_left(gb) < 58) - return AVERROR_INVALIDDATA; - coeffs[0] = get_float(gb) * s->root; - coeffs[1] = get_float(gb) * s->root; - } - - if (get_bits_left(gb) < s->num_bands * 8) - return AVERROR_INVALIDDATA; - for (i = 0; i < s->num_bands; i++) { - int value = get_bits(gb, 8); - quant[i] = s->quant_table[FFMIN(value, 95)]; - } - - k = 0; - q = quant[0]; - - // parse coefficients - i = 2; - while (i < s->frame_len) { - if (s->version_b) { - j = i + 16; - } else { - int v = get_bits1(gb); - if (v) { - v = get_bits(gb, 4); - j = i + rle_length_tab[v] * 8; - } else { - j = i + 8; - } - } - - j = FFMIN(j, s->frame_len); - - width = get_bits(gb, 4); - if (width == 0) { - memset(coeffs + i, 0, (j - i) * sizeof(*coeffs)); - i = j; - while (s->bands[k] < i) - q = quant[k++]; - } else { - while (i < j) { - if (s->bands[k] == i) - q = quant[k++]; - coeff = get_bits(gb, width); - if (coeff) { - int v; - v = get_bits1(gb); - if (v) - coeffs[i] = -q * coeff; - else - coeffs[i] = q * coeff; - } else { - coeffs[i] = 0.0f; - } - i++; - } - } - } - - if (CONFIG_BINKAUDIO_DCT_DECODER && use_dct) { - coeffs[0] /= 0.5; - s->tx_fn(s->tx, out[ch + ch_offset], coeffs, sizeof(float)); - } else if (CONFIG_BINKAUDIO_RDFT_DECODER) { - for (int i = 2; i < s->frame_len; i += 2) - coeffs[i + 1] *= -1; - - coeffs[s->frame_len + 0] = coeffs[1]; - coeffs[s->frame_len + 1] = coeffs[1] = 0; - s->tx_fn(s->tx, out[ch + ch_offset], coeffs, sizeof(AVComplexFloat)); - } - } - - for (ch = 0; ch < channels; ch++) { - int j; - int count = s->overlap_len * channels; - if (!s->first) { - j = ch; - for (i = 0; i < s->overlap_len; i++, j += channels) - out[ch + ch_offset][i] = (s->previous[ch + ch_offset][i] * (count - j) + - out[ch + ch_offset][i] * j) / count; - } - memcpy(s->previous[ch + ch_offset], &out[ch + ch_offset][s->frame_len - s->overlap_len], - s->overlap_len * sizeof(*s->previous[ch + ch_offset])); - } - - s->first = 0; - - return 0; -} - -static av_cold int decode_end(AVCodecContext *avctx) -{ - BinkAudioContext * s = avctx->priv_data; - av_tx_uninit(&s->tx); - return 0; -} - -static void get_bits_align32(GetBitContext *s) -{ - int n = (-get_bits_count(s)) & 31; - if (n) skip_bits(s, n); -} - -static int binkaudio_receive_frame(AVCodecContext *avctx, AVFrame *frame) -{ - BinkAudioContext *s = avctx->priv_data; - GetBitContext *gb = &s->gb; - int new_pkt, ret; - -again: - new_pkt = !s->pkt->data; - if (!s->pkt->data) { - ret = ff_decode_get_packet(avctx, s->pkt); - if (ret < 0) { - s->ch_offset = 0; - return ret; - } - - if (s->pkt->size < 4) { - av_log(avctx, AV_LOG_ERROR, "Packet is too small\n"); - ret = AVERROR_INVALIDDATA; - goto fail; - } - - ret = init_get_bits8(gb, s->pkt->data, s->pkt->size); - if (ret < 0) - goto fail; - - /* skip reported size */ - skip_bits_long(gb, 32); - } - - /* get output buffer */ - if (s->ch_offset == 0) { - frame->nb_samples = s->frame_len; - if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) - return ret; - if (!new_pkt) - frame->pts = AV_NOPTS_VALUE; - } - - if (decode_block(s, (float **)frame->extended_data, - avctx->codec->id == AV_CODEC_ID_BINKAUDIO_DCT, - FFMIN(MAX_CHANNELS, s->channels - s->ch_offset), s->ch_offset)) { - av_log(avctx, AV_LOG_ERROR, "Incomplete packet\n"); - s->ch_offset = 0; - return AVERROR_INVALIDDATA; - } - s->ch_offset += MAX_CHANNELS; - get_bits_align32(gb); - if (!get_bits_left(gb)) { - memset(gb, 0, sizeof(*gb)); - av_packet_unref(s->pkt); - } - if (s->ch_offset >= s->channels) { - s->ch_offset = 0; - } else { - goto again; - } - - frame->nb_samples = s->block_size / FFMIN(avctx->ch_layout.nb_channels, MAX_CHANNELS); - - return 0; -fail: - s->ch_offset = 0; - av_packet_unref(s->pkt); - return ret; -} - -static void decode_flush(AVCodecContext *avctx) -{ - BinkAudioContext *const s = avctx->priv_data; - - /* s->pkt coincides with avctx->internal->in_pkt - * and is unreferenced generically when flushing. */ - s->first = 1; - s->ch_offset = 0; -} - -const FFCodec ff_binkaudio_rdft_decoder = { - .p.name = "binkaudio_rdft", - CODEC_LONG_NAME("Bink Audio (RDFT)"), - .p.type = AVMEDIA_TYPE_AUDIO, - .p.id = AV_CODEC_ID_BINKAUDIO_RDFT, - .priv_data_size = sizeof(BinkAudioContext), - .init = decode_init, - .flush = decode_flush, - .close = decode_end, - FF_CODEC_RECEIVE_FRAME_CB(binkaudio_receive_frame), - .p.capabilities = AV_CODEC_CAP_DR1, - .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, -}; - -const FFCodec ff_binkaudio_dct_decoder = { - .p.name = "binkaudio_dct", - CODEC_LONG_NAME("Bink Audio (DCT)"), - .p.type = AVMEDIA_TYPE_AUDIO, - .p.id = AV_CODEC_ID_BINKAUDIO_DCT, - .priv_data_size = sizeof(BinkAudioContext), - .init = decode_init, - .flush = decode_flush, - .close = decode_end, - FF_CODEC_RECEIVE_FRAME_CB(binkaudio_receive_frame), - .p.capabilities = AV_CODEC_CAP_DR1, - .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, -}; diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/escape124.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/escape124.c deleted file mode 100644 index 592de09a9fdbed03b57557e8a908e3161a14ac8f..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/escape124.c +++ /dev/null @@ -1,385 +0,0 @@ -/* - * Escape 124 Video Decoder - * Copyright (C) 2008 Eli Friedman (eli.friedman@gmail.com) - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#define BITSTREAM_READER_LE -#include "avcodec.h" -#include "codec_internal.h" -#include "decode.h" -#include "get_bits.h" - -typedef union MacroBlock { - uint16_t pixels[4]; - uint32_t pixels32[2]; -} MacroBlock; - -typedef union SuperBlock { - uint16_t pixels[64]; - uint32_t pixels32[32]; -} SuperBlock; - -typedef struct CodeBook { - unsigned depth; - unsigned size; - MacroBlock* blocks; -} CodeBook; - -typedef struct Escape124Context { - AVFrame *frame; - - unsigned num_superblocks; - - CodeBook codebooks[3]; -} Escape124Context; - -/** - * Initialize the decoder - * @param avctx decoder context - * @return 0 success, negative on error - */ -static av_cold int escape124_decode_init(AVCodecContext *avctx) -{ - Escape124Context *s = avctx->priv_data; - - avctx->pix_fmt = AV_PIX_FMT_RGB555; - - s->num_superblocks = ((unsigned)avctx->width / 8) * - ((unsigned)avctx->height / 8); - - s->frame = av_frame_alloc(); - if (!s->frame) - return AVERROR(ENOMEM); - - return 0; -} - -static av_cold int escape124_decode_close(AVCodecContext *avctx) -{ - unsigned i; - Escape124Context *s = avctx->priv_data; - - for (i = 0; i < 3; i++) - av_freep(&s->codebooks[i].blocks); - - av_frame_free(&s->frame); - - return 0; -} - -static CodeBook unpack_codebook(GetBitContext* gb, unsigned depth, - unsigned size) -{ - unsigned i, j; - CodeBook cb = { 0 }; - - cb.blocks = av_malloc(size ? size * sizeof(MacroBlock) : 1); - if (!cb.blocks) - return cb; - - cb.depth = depth; - cb.size = size; - for (i = 0; i < size; i++) { - unsigned mask_bits = get_bits(gb, 4); - unsigned color[2]; - color[0] = get_bits(gb, 15); - color[1] = get_bits(gb, 15); - - for (j = 0; j < 4; j++) - cb.blocks[i].pixels[j] = color[(mask_bits>>j) & 1]; - } - return cb; -} - -static unsigned decode_skip_count(GetBitContext* gb) -{ - unsigned value; - // This function reads a maximum of 23 bits, - // which is within the padding space - if (get_bits_left(gb) < 1) - return -1; - value = get_bits1(gb); - if (!value) - return value; - - value += get_bits(gb, 3); - if (value != (1 + ((1 << 3) - 1))) - return value; - - value += get_bits(gb, 7); - if (value != (1 + ((1 << 3) - 1)) + ((1 << 7) - 1)) - return value; - - return value + get_bits(gb, 12); -} - -static MacroBlock decode_macroblock(Escape124Context* s, GetBitContext* gb, - int* codebook_index, int superblock_index) -{ - // This function reads a maximum of 22 bits; the callers - // guard this function appropriately - unsigned block_index, depth; - int value = get_bits1(gb); - if (value) { - static const int8_t transitions[3][2] = { {2, 1}, {0, 2}, {1, 0} }; - value = get_bits1(gb); - *codebook_index = transitions[*codebook_index][value]; - } - - depth = s->codebooks[*codebook_index].depth; - - // depth = 0 means that this shouldn't read any bits; - // in theory, this is the same as get_bits(gb, 0), but - // that doesn't actually work. - block_index = get_bitsz(gb, depth); - - if (*codebook_index == 1) { - block_index += superblock_index << s->codebooks[1].depth; - } - - // This condition can occur with invalid bitstreams and - // *codebook_index == 2 - if (block_index >= s->codebooks[*codebook_index].size || !s->codebooks[*codebook_index].blocks) - return (MacroBlock) { { 0 } }; - - return s->codebooks[*codebook_index].blocks[block_index]; -} - -static void insert_mb_into_sb(SuperBlock* sb, MacroBlock mb, unsigned index) { - // Formula: ((index / 4) * 16 + (index % 4) * 2) / 2 - uint32_t *dst = sb->pixels32 + index + (index & -4); - - // This technically violates C99 aliasing rules, but it should be safe. - dst[0] = mb.pixels32[0]; - dst[4] = mb.pixels32[1]; -} - -static void copy_superblock(uint16_t* dest, ptrdiff_t dest_stride, - uint16_t* src, ptrdiff_t src_stride) -{ - unsigned y; - if (src) - for (y = 0; y < 8; y++) - memcpy(dest + y * dest_stride, src + y * src_stride, - sizeof(uint16_t) * 8); - else - for (y = 0; y < 8; y++) - memset(dest + y * dest_stride, 0, sizeof(uint16_t) * 8); -} - -static const uint16_t mask_matrix[] = {0x1, 0x2, 0x10, 0x20, - 0x4, 0x8, 0x40, 0x80, - 0x100, 0x200, 0x1000, 0x2000, - 0x400, 0x800, 0x4000, 0x8000}; - -static int escape124_decode_frame(AVCodecContext *avctx, AVFrame *frame, - int *got_frame, AVPacket *avpkt) -{ - int buf_size = avpkt->size; - Escape124Context *s = avctx->priv_data; - - GetBitContext gb; - unsigned frame_flags, frame_size; - unsigned i; - - unsigned superblock_index, cb_index = 1, - superblock_col_index = 0, - superblocks_per_row = avctx->width / 8, skip = -1; - - uint16_t* old_frame_data, *new_frame_data; - ptrdiff_t old_stride, new_stride; - - int ret; - - if ((ret = init_get_bits8(&gb, avpkt->data, avpkt->size)) < 0) - return ret; - - // This call also guards the potential depth reads for the - // codebook unpacking. - // Check if the amount we will read minimally is available on input. - // The 64 represent the immediately next 2 frame_* elements read, the 23/4320 - // represent a lower bound of the space needed for skipped superblocks. Non - // skipped SBs need more space. - if (get_bits_left(&gb) < 64 + s->num_superblocks * 23LL / 4320) - return AVERROR_INVALIDDATA; - - frame_flags = get_bits_long(&gb, 32); - frame_size = get_bits_long(&gb, 32); - - // Leave last frame unchanged - // FIXME: Is this necessary? I haven't seen it in any real samples - if (!(frame_flags & 0x114) || !(frame_flags & 0x7800000)) { - if (!s->frame->data[0]) - return AVERROR_INVALIDDATA; - - av_log(avctx, AV_LOG_DEBUG, "Skipping frame\n"); - - *got_frame = 1; - if ((ret = av_frame_ref(frame, s->frame)) < 0) - return ret; - - return frame_size; - } - - for (i = 0; i < 3; i++) { - if (frame_flags & (1 << (17 + i))) { - unsigned cb_depth, cb_size; - if (i == 2) { - // This codebook can be cut off at places other than - // powers of 2, leaving some of the entries undefined. - cb_size = get_bits(&gb, 20); - if (!cb_size) { - av_log(avctx, AV_LOG_ERROR, "Invalid codebook size 0.\n"); - return AVERROR_INVALIDDATA; - } - cb_depth = av_log2(cb_size - 1) + 1; - } else { - cb_depth = get_bits(&gb, 4); - if (i == 0) { - // This is the most basic codebook: pow(2,depth) entries - // for a depth-length key - cb_size = 1 << cb_depth; - } else { - // This codebook varies per superblock - // FIXME: I don't think this handles integer overflow - // properly - cb_size = s->num_superblocks << cb_depth; - } - } - if (s->num_superblocks >= INT_MAX >> cb_depth) { - av_log(avctx, AV_LOG_ERROR, "Depth or num_superblocks are too large\n"); - return AVERROR_INVALIDDATA; - } - - av_freep(&s->codebooks[i].blocks); - if (cb_size >= INT_MAX / 34 || get_bits_left(&gb) < (int)cb_size * 34) - return AVERROR_INVALIDDATA; - - if (cb_size >= INT_MAX / sizeof(MacroBlock)) - return AVERROR_INVALIDDATA; - s->codebooks[i] = unpack_codebook(&gb, cb_depth, cb_size); - if (!s->codebooks[i].blocks) - return AVERROR(ENOMEM); - } - } - - if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) - return ret; - - new_frame_data = (uint16_t*)frame->data[0]; - new_stride = frame->linesize[0] / 2; - old_frame_data = (uint16_t*)s->frame->data[0]; - old_stride = s->frame->linesize[0] / 2; - - for (superblock_index = 0; superblock_index < s->num_superblocks; - superblock_index++) { - MacroBlock mb; - SuperBlock sb; - unsigned multi_mask = 0; - - if (skip == -1) { - // Note that this call will make us skip the rest of the blocks - // if the frame prematurely ends - skip = decode_skip_count(&gb); - } - - if (skip) { - copy_superblock(new_frame_data, new_stride, - old_frame_data, old_stride); - } else { - copy_superblock(sb.pixels, 8, - old_frame_data, old_stride); - - while (get_bits_left(&gb) >= 1 && !get_bits1(&gb)) { - unsigned mask; - mb = decode_macroblock(s, &gb, &cb_index, superblock_index); - mask = get_bits(&gb, 16); - multi_mask |= mask; - for (i = 0; i < 16; i++) { - if (mask & mask_matrix[i]) { - insert_mb_into_sb(&sb, mb, i); - } - } - } - - if (!get_bits1(&gb)) { - unsigned inv_mask = get_bits(&gb, 4); - for (i = 0; i < 4; i++) { - if (inv_mask & (1 << i)) { - multi_mask ^= 0xF << i*4; - } else { - multi_mask ^= get_bits(&gb, 4) << i*4; - } - } - - for (i = 0; i < 16; i++) { - if (multi_mask & mask_matrix[i]) { - mb = decode_macroblock(s, &gb, &cb_index, - superblock_index); - insert_mb_into_sb(&sb, mb, i); - } - } - } else if (frame_flags & (1 << 16)) { - while (get_bits_left(&gb) >= 1 && !get_bits1(&gb)) { - mb = decode_macroblock(s, &gb, &cb_index, superblock_index); - insert_mb_into_sb(&sb, mb, get_bits(&gb, 4)); - } - } - - copy_superblock(new_frame_data, new_stride, sb.pixels, 8); - } - - superblock_col_index++; - new_frame_data += 8; - if (old_frame_data) - old_frame_data += 8; - if (superblock_col_index == superblocks_per_row) { - new_frame_data += new_stride * 8 - superblocks_per_row * 8; - if (old_frame_data) - old_frame_data += old_stride * 8 - superblocks_per_row * 8; - superblock_col_index = 0; - } - skip--; - } - - av_log(avctx, AV_LOG_DEBUG, - "Escape sizes: %i, %i, %i\n", - frame_size, buf_size, get_bits_count(&gb) / 8); - - av_frame_unref(s->frame); - if ((ret = av_frame_ref(s->frame, frame)) < 0) - return ret; - - *got_frame = 1; - - return frame_size; -} - - -const FFCodec ff_escape124_decoder = { - .p.name = "escape124", - CODEC_LONG_NAME("Escape 124"), - .p.type = AVMEDIA_TYPE_VIDEO, - .p.id = AV_CODEC_ID_ESCAPE124, - .priv_data_size = sizeof(Escape124Context), - .init = escape124_decode_init, - .close = escape124_decode_close, - FF_CODEC_DECODE_CB(escape124_decode_frame), - .p.capabilities = AV_CODEC_CAP_DR1, -}; diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/fitsdec.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/fitsdec.c deleted file mode 100644 index b9c51e70c311adc62bce57d6b968081bd30a2fdd..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/fitsdec.c +++ /dev/null @@ -1,333 +0,0 @@ -/* - * FITS image decoder - * Copyright (c) 2017 Paras Chadha - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * FITS image decoder - * - * Specification: https://fits.gsfc.nasa.gov/fits_standard.html Version 3.0 - * - * Support all 2d images alongwith, bzero, bscale and blank keywords. - * RGBA images are supported as NAXIS3 = 3 or 4 i.e. Planes in RGBA order. Also CTYPE = 'RGB ' should be present. - * Also to interpret data, values are linearly scaled using min-max scaling but not RGB images. - */ - -#include "avcodec.h" -#include "codec_internal.h" -#include "decode.h" -#include -#include "libavutil/intreadwrite.h" -#include "libavutil/intfloat.h" -#include "libavutil/dict.h" -#include "libavutil/opt.h" -#include "fits.h" - -typedef struct FITSContext { - const AVClass *class; - int blank_val; -} FITSContext; - -/** - * Calculate the data_min and data_max values from the data. - * This is called if the values are not present in the header. - * @param ptr8 pointer to the data - * @param header pointer to the header - * @param end pointer to end of packet - * @return 0 if calculated successfully otherwise AVERROR_INVALIDDATA - */ -static int fill_data_min_max(const uint8_t *ptr8, FITSHeader *header, const uint8_t *end) -{ - uint8_t t8; - int16_t t16; - int32_t t32; - int64_t t64; - float tflt; - double tdbl; - int i, j; - - header->data_min = DBL_MAX; - header->data_max = -DBL_MAX; - switch (header->bitpix) { -#define CASE_N(a, t, rd) \ - case a: \ - for (i = 0; i < header->naxisn[1]; i++) { \ - for (j = 0; j < header->naxisn[0]; j++) { \ - t = rd; \ - if (!header->blank_found || t != header->blank) { \ - if (t > header->data_max) \ - header->data_max = t; \ - if (t < header->data_min) \ - header->data_min = t; \ - } \ - ptr8 += abs(a) >> 3; \ - } \ - } \ - break - - CASE_N(-64, tdbl, av_int2double(AV_RB64(ptr8))); - CASE_N(-32, tflt, av_int2float(AV_RB32(ptr8))); - CASE_N(8, t8, ptr8[0]); - CASE_N(16, t16, AV_RB16(ptr8)); - CASE_N(32, t32, AV_RB32(ptr8)); - CASE_N(64, t64, AV_RB64(ptr8)); - default: - return AVERROR_INVALIDDATA; - } - return 0; -} - -/** - * Read the fits header and store the values in FITSHeader pointed by header - * @param avctx AVCodec context - * @param ptr pointer to pointer to the data - * @param header pointer to the FITSHeader - * @param end pointer to end of packet - * @param metadata pointer to pointer to AVDictionary to store metadata - * @return 0 if calculated successfully otherwise AVERROR_INVALIDDATA - */ -static int fits_read_header(AVCodecContext *avctx, const uint8_t **ptr, FITSHeader *header, - const uint8_t *end, AVDictionary **metadata) -{ - const uint8_t *ptr8 = *ptr; - int lines_read, bytes_left, i, ret; - size_t size; - - lines_read = 1; // to account for first header line, SIMPLE or XTENSION which is not included in packet... - avpriv_fits_header_init(header, STATE_BITPIX); - do { - if (end - ptr8 < 80) - return AVERROR_INVALIDDATA; - ret = avpriv_fits_header_parse_line(avctx, header, ptr8, &metadata); - ptr8 += 80; - lines_read++; - } while (!ret); - if (ret < 0) - return ret; - - bytes_left = (((lines_read + 35) / 36) * 36 - lines_read) * 80; - if (end - ptr8 < bytes_left) - return AVERROR_INVALIDDATA; - ptr8 += bytes_left; - - if (header->rgb && (header->naxis != 3 || (header->naxisn[2] != 3 && header->naxisn[2] != 4))) { - av_log(avctx, AV_LOG_ERROR, "File contains RGB image but NAXIS = %d and NAXIS3 = %d\n", header->naxis, header->naxisn[2]); - return AVERROR_INVALIDDATA; - } - - if (!header->rgb && header->naxis != 2) { - av_log(avctx, AV_LOG_ERROR, "unsupported number of dimensions, NAXIS = %d\n", header->naxis); - return AVERROR_INVALIDDATA; - } - - if (header->blank_found && (header->bitpix == -32 || header->bitpix == -64)) { - av_log(avctx, AV_LOG_WARNING, "BLANK keyword found but BITPIX = %d\n. Ignoring BLANK", header->bitpix); - header->blank_found = 0; - } - - size = abs(header->bitpix) >> 3; - for (i = 0; i < header->naxis; i++) { - if (size == 0 || header->naxisn[i] > SIZE_MAX / size) { - av_log(avctx, AV_LOG_ERROR, "unsupported size of FITS image"); - return AVERROR_INVALIDDATA; - } - size *= header->naxisn[i]; - } - - if (end - ptr8 < size) - return AVERROR_INVALIDDATA; - *ptr = ptr8; - - if (!header->rgb && (!header->data_min_found || !header->data_max_found)) { - ret = fill_data_min_max(ptr8, header, end); - if (ret < 0) { - av_log(avctx, AV_LOG_ERROR, "invalid BITPIX, %d\n", header->bitpix); - return ret; - } - } else { - /* - * instead of applying bscale and bzero to every element, - * we can do inverse transformation on data_min and data_max - */ - header->data_min = (header->data_min - header->bzero) / header->bscale; - header->data_max = (header->data_max - header->bzero) / header->bscale; - } - if (!header->rgb && header->data_min >= header->data_max) { - if (header->data_min > header->data_max) { - av_log(avctx, AV_LOG_ERROR, "data min/max (%g %g) is invalid\n", header->data_min, header->data_max); - return AVERROR_INVALIDDATA; - } - av_log(avctx, AV_LOG_WARNING, "data min/max indicates a blank image\n"); - header->data_max ++; - } - - return 0; -} - -static int fits_decode_frame(AVCodecContext *avctx, AVFrame *p, - int *got_frame, AVPacket *avpkt) -{ - const uint8_t *ptr8 = avpkt->data, *end; - uint8_t t8; - int16_t t16; - int32_t t32; - int64_t t64; - float tflt; - double tdbl; - int ret, i, j, k; - const int map[] = {2, 0, 1, 3}; // mapping from GBRA -> RGBA as RGBA is to be stored in FITS file.. - uint8_t *dst8; - uint16_t *dst16; - uint64_t t; - FITSHeader header; - FITSContext * fitsctx = avctx->priv_data; - - end = ptr8 + avpkt->size; - p->metadata = NULL; - ret = fits_read_header(avctx, &ptr8, &header, end, &p->metadata); - if (ret < 0) - return ret; - - if (header.rgb) { - if (header.bitpix == 8) { - if (header.naxisn[2] == 3) { - avctx->pix_fmt = AV_PIX_FMT_GBRP; - } else { - avctx->pix_fmt = AV_PIX_FMT_GBRAP; - } - } else if (header.bitpix == 16) { - if (header.naxisn[2] == 3) { - avctx->pix_fmt = AV_PIX_FMT_GBRP16; - } else { - avctx->pix_fmt = AV_PIX_FMT_GBRAP16; - } - } else { - av_log(avctx, AV_LOG_ERROR, "unsupported BITPIX = %d\n", header.bitpix); - return AVERROR_INVALIDDATA; - } - } else { - if (header.bitpix == 8) { - avctx->pix_fmt = AV_PIX_FMT_GRAY8; - } else { - avctx->pix_fmt = AV_PIX_FMT_GRAY16; - } - } - - if ((ret = ff_set_dimensions(avctx, header.naxisn[0], header.naxisn[1])) < 0) - return ret; - - if ((ret = ff_get_buffer(avctx, p, 0)) < 0) - return ret; - - /* - * FITS stores images with bottom row first. Therefore we have - * to fill the image from bottom to top. - */ - if (header.rgb) { - switch(header.bitpix) { -#define CASE_RGB(cas, dst, type, dref) \ - case cas: \ - for (k = 0; k < header.naxisn[2]; k++) { \ - for (i = 0; i < avctx->height; i++) { \ - dst = (type *) (p->data[map[k]] + (avctx->height - i - 1) * p->linesize[map[k]]); \ - for (j = 0; j < avctx->width; j++) { \ - t32 = dref(ptr8); \ - if (!header.blank_found || t32 != header.blank) { \ - t = t32 * header.bscale + header.bzero; \ - } else { \ - t = fitsctx->blank_val; \ - } \ - *dst++ = (type) t; \ - ptr8 += cas >> 3; \ - } \ - } \ - } \ - break - - CASE_RGB(8, dst8, uint8_t, *); - CASE_RGB(16, dst16, uint16_t, AV_RB16); - } - } else { - double scale = header.data_max - header.data_min; - - if (scale <= 0 || !isfinite(scale)) { - scale = 1; - } - scale = 1/scale; - - switch (header.bitpix) { -#define CASE_GRAY(cas, dst, type, t, rd) \ - case cas: \ - for (i = 0; i < avctx->height; i++) { \ - dst = (type *) (p->data[0] + (avctx->height-i-1)* p->linesize[0]); \ - for (j = 0; j < avctx->width; j++) { \ - t = rd; \ - if (!header.blank_found || t != header.blank) { \ - *dst++ = lrint(((t - header.data_min) * ((1 << (sizeof(type) * 8)) - 1)) * scale); \ - } else { \ - *dst++ = fitsctx->blank_val; \ - } \ - ptr8 += abs(cas) >> 3; \ - } \ - } \ - break - - CASE_GRAY(-64, dst16, uint16_t, tdbl, av_int2double(AV_RB64(ptr8))); - CASE_GRAY(-32, dst16, uint16_t, tflt, av_int2float(AV_RB32(ptr8))); - CASE_GRAY(8, dst8, uint8_t, t8, ptr8[0]); - CASE_GRAY(16, dst16, uint16_t, t16, AV_RB16(ptr8)); - CASE_GRAY(32, dst16, uint16_t, t32, AV_RB32(ptr8)); - CASE_GRAY(64, dst16, uint16_t, t64, AV_RB64(ptr8)); - default: - av_log(avctx, AV_LOG_ERROR, "invalid BITPIX, %d\n", header.bitpix); - return AVERROR_INVALIDDATA; - } - } - - p->key_frame = 1; - p->pict_type = AV_PICTURE_TYPE_I; - - *got_frame = 1; - - return avpkt->size; -} - -static const AVOption fits_options[] = { - { "blank_value", "value that is used to replace BLANK pixels in data array", offsetof(FITSContext, blank_val), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 65535, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM}, - { NULL }, -}; - -static const AVClass fits_decoder_class = { - .class_name = "FITS decoder", - .item_name = av_default_item_name, - .option = fits_options, - .version = LIBAVUTIL_VERSION_INT, -}; - -const FFCodec ff_fits_decoder = { - .p.name = "fits", - .p.type = AVMEDIA_TYPE_VIDEO, - .p.id = AV_CODEC_ID_FITS, - .p.capabilities = AV_CODEC_CAP_DR1, - CODEC_LONG_NAME("Flexible Image Transport System"), - .p.priv_class = &fits_decoder_class, - .priv_data_size = sizeof(FITSContext), - FF_CODEC_DECODE_CB(fits_decode_frame), -}; diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/hq_hqa.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/hq_hqa.h deleted file mode 100644 index 71aa36706c0c87c6323f829987fcc17efd3001e2..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/hq_hqa.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Canopus HQ/HQA decoder - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_HQ_HQA_H -#define AVCODEC_HQ_HQA_H - -#include - -#include "libavutil/mem_internal.h" - -#include "avcodec.h" -#include "hq_hqadsp.h" -#include "vlc.h" - -#define NUM_HQ_AC_ENTRIES 746 -#define NUM_HQ_PROFILES 22 -#define NUM_HQ_QUANTS 16 - -typedef struct HQContext { - AVCodecContext *avctx; - HQDSPContext hqhqadsp; - - VLC hq_ac_vlc; - VLC hqa_cbp_vlc; - DECLARE_ALIGNED(16, int16_t, block)[12][64]; -} HQContext; - -typedef struct HQProfile { - const uint8_t *perm_tab; - int width, height; - int num_slices; - int tab_w, tab_h; -} HQProfile; - -extern const int32_t * const ff_hq_quants[16][2][4]; -extern const HQProfile ff_hq_profile[NUM_HQ_PROFILES]; - -extern const uint8_t ff_hq_ac_skips[NUM_HQ_AC_ENTRIES]; -extern const int16_t ff_hq_ac_syms [NUM_HQ_AC_ENTRIES]; - -int ff_hq_init_vlcs(HQContext *c); - -#endif /* AVCODEC_HQ_HQA_H */ diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/celp_math_mips.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/celp_math_mips.c deleted file mode 100644 index ce711bd63c3e489a20cd47b56d3181f6380b1de1..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/celp_math_mips.c +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright (c) 2012 - * MIPS Technologies, Inc., California. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * Author: Nedeljko Babic (nbabic@mips.com) - * - * Math operations optimized for MIPS - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * Reference: libavcodec/celp_math.c - */ -#include "config.h" -#include "libavcodec/celp_math.h" -#include "libavutil/mips/asmdefs.h" - -#if HAVE_INLINE_ASM -#if !HAVE_MIPS32R6 && !HAVE_MIPS64R6 -static float ff_dot_productf_mips(const float* a, const float* b, - int length) -{ - float sum; - const float* a_end = a + length; - - __asm__ volatile ( - "mtc1 $zero, %[sum] \n\t" - "blez %[length], ff_dot_productf_end%= \n\t" - "ff_dot_productf_madd%=: \n\t" - "lwc1 $f2, 0(%[a]) \n\t" - "lwc1 $f1, 0(%[b]) \n\t" - PTR_ADDIU "%[a], %[a], 4 \n\t" - PTR_ADDIU "%[b], %[b], 4 \n\t" - "madd.s %[sum], %[sum], $f1, $f2 \n\t" - "bne %[a], %[a_end], ff_dot_productf_madd%= \n\t" - "ff_dot_productf_end%=: \n\t" - - : [sum] "=&f" (sum), [a] "+r" (a), [b] "+r" (b) - : [a_end]"r"(a_end), [length] "r" (length) - : "$f1", "$f2", "memory" - ); - return sum; -} -#endif /* !HAVE_MIPS32R6 && !HAVE_MIPS64R6 */ -#endif /* HAVE_INLINE_ASM */ - -void ff_celp_math_init_mips(CELPMContext *c) -{ -#if HAVE_INLINE_ASM -#if !HAVE_MIPS32R6 && !HAVE_MIPS64R6 - c->dot_productf = ff_dot_productf_mips; -#endif -#endif -} diff --git a/spaces/congsaPfin/Manga-OCR/logs/Cars 3 Driven to Win - A World-Class Racing Experience for Xbox 360.md b/spaces/congsaPfin/Manga-OCR/logs/Cars 3 Driven to Win - A World-Class Racing Experience for Xbox 360.md deleted file mode 100644 index b62a0c137fc4ceb2ef808adb7bbecded5ebee086..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Cars 3 Driven to Win - A World-Class Racing Experience for Xbox 360.md +++ /dev/null @@ -1,77 +0,0 @@ -
    -

    Cars 3 Driven to Win Xbox 360 Download: How to Get the Ultimate Racing Experience

    -

    Introduction

    -

    If you are a fan of the Cars movie franchise, you might be interested in playing Cars 3 Driven to Win, a racing video game inspired by the third installment of the series. In this game, you can play with over 20 characters from the film, including Lightning McQueen, Cruz Ramirez, Jackson Storm, and more. You can also race on over 20 tracks across iconic locations from the movie, such as Radiator Springs, Florida International Speedway, and Thunder Hollow. You can also advance your skills in six different game modes, from stunt showcases to battle races. Whether you want to play solo or with your friends and family, Cars 3 Driven to Win is a fun and exciting game that will test your speed, agility, and creativity.

    -

    But how can you get this game for your Xbox 360 console? In this article, we will show you two easy methods to download Cars 3 Driven to Win for Xbox 360. All you need is an internet connection, an Xbox Live account, and a valid payment method. Follow these simple steps and you will be ready to race in no time.

    -

    cars 3 driven to win xbox 360 download


    Download Ziphttps://urlca.com/2uOalm



    -

    How to download Cars 3 Driven to Win for Xbox 360

    -

    Method 1: From the Microsoft Store

    -

    The first method is to download the game directly from the Microsoft Store on your Xbox 360 console. Here are the steps you need to follow:

    -

    Step 1: Sign up for Xbox Live

    -

    To access downloadable games from the Microsoft Store, you need to have an Xbox Live account. If you don't have one already, you can sign up for free on your console or on the Xbox website. You will need an email address and a password to create your account.

    -

    Step 2: Power up your Xbox 360 and sign into Xbox Live

    -

    Once you have your account ready, turn on your Xbox 360 console and controller. Press and hold the "Guide" button (the Xbox logo) on your controller to sign into Xbox Live. Make sure you are signed in with the profile that you want to use to download the game.

    -

    Step 3: Select the games tab and search for Cars 3 Driven to Win

    -

    On your console's home screen, press the RB button twice to select the games tab. Then select Search Games and press A. Enter "Cars 3 Driven to Win" in the search bar and select it from the list of results. This will open the game's page on the Microsoft Store.

    -

    Step 4: Select the game and confirm the purchase

    -

    On the game's page, you will see the price and the rating of the game. You will also see some screenshots and videos of the game, as well as a description and some reviews. If you are sure you want to buy the game, select Buy Game and press A. You will be asked to confirm your payment method and your billing information. If everything is correct, select Confirm Purchase and press A. The game will be added to your download queue.

    -

    Step 5: Check the download progress and enjoy the game

    -

    To check the download progress of the game, press the Guide button on your controller and select Active Downloads. You will see the percentage and the time remaining for the download. You can also pause or cancel the download if you want. Once the download is complete, you can launch the game from your games library or from the home screen. Enjoy racing with your favorite Cars characters!

    -

    cars 3 driven to win xbox 360 iso
    -cars 3 driven to win xbox 360 gameplay
    -cars 3 driven to win xbox 360 cheats
    -cars 3 driven to win xbox 360 review
    -cars 3 driven to win xbox 360 trailer
    -cars 3 driven to win xbox 360 free download
    -cars 3 driven to win xbox 360 digital code
    -cars 3 driven to win xbox 360 online multiplayer
    -cars 3 driven to win xbox 360 split screen
    -cars 3 driven to win xbox 360 walmart
    -cars 3 driven to win xbox 360 amazon
    -cars 3 driven to win xbox 360 gamestop
    -cars 3 driven to win xbox 360 ebay
    -cars 3 driven to win xbox 360 best buy
    -cars 3 driven to win xbox 360 target
    -cars 3 driven to win xbox 360 price
    -cars 3 driven to win xbox 360 release date
    -cars 3 driven to win xbox 360 achievements
    -cars 3 driven to win xbox 360 characters
    -cars 3 driven to win xbox 360 unlockables
    -cars 3 driven to win xbox 360 controls
    -cars 3 driven to win xbox 360 how to play
    -cars 3 driven to win xbox 360 walkthrough
    -cars 3 driven to win xbox 360 tips and tricks
    -cars 3 driven to win xbox 360 all tracks
    -cars 3 driven to win xbox 360 all modes
    -cars 3 driven to win xbox 360 all races
    -cars 3 driven to win xbox 360 all stunts
    -cars 3 driven to win xbox 360 all skills
    -cars 3 driven to win xbox 360 all trophies
    -cars 3 driven to win xbox 360 all collectibles
    -cars 3 driven to win xbox 360 all challenges
    -cars 3 driven to win xbox 360 all hall of fame entries
    -cars 3 driven to win xbox 360 all customization options
    -cars 3 driven to win xbox 360 all bonus content
    -cars 3 driven to win xbox 360 comparison with other platforms
    -cars 3 driven to win xbox one vs xbox one x vs xbox series x vs s vs pc vs ps4 vs ps5 vs switch vs wii u vs ps vita vs android vs ios vs mac os x vs linux vs windows vs chrome os vs web browser vs steam vs epic games store vs origin vs uplay vs gog galaxy vs discord store vs humble bundle store vs itch.io store vs gamejolt store vs kartridge store vs green man gaming store vs fanatical store vs indiegala store vs nuuvem store vs gamersgate store download performance graphics quality sound quality loading times frame rate resolution gameplay features online features offline features cross-play cross-save cross-buy cross-progression cloud gaming cloud saves cloud backup cloud sync mod support controller support keyboard and mouse support touch screen support vr support ar support motion control support voice control support gesture control support eye tracking support facial recognition support haptic feedback support adaptive triggers support ray tracing support hdr support dolby atmos support dolby vision support dts:x support dts-hd master audio support dolby truehd support dolby digital plus support dts express support dts coreless lossless audio codec support auro-3d support mpeg-h audio support mpeg-d surround sound system support mpeg-d usac support mpeg-d mpeg-h part2 l1 sound system support mpeg-d mpeg-h part2 l2 sound system support mpeg-d mpeg-h part2 l4 sound system support mpeg-d mpeg-h part2 l5 sound system support mpeg-d mpeg-h part2 l6 sound system support mpeg-d mpeg-h part2 l7 sound system support mpeg-d mpeg-h part2 l8 sound system support mpeg-d mpeg-h part2 l9 sound system support mpeg-d mpeg-h part2 l10 sound system support mpeg-d mpeg-h part2 l11 sound system support mpeg-d mpeg-h part2 l12 sound system support mpeg-d mpeg-h part2 l13 sound system support mpeg-d mpeg-h part2 l14 sound system support mpeg-d mpeg-h part2 l15 sound system

    -

    Method 2: From the Xbox website

    -

    The second method is to download the game from the Xbox website on your computer or mobile device. Here are the steps you need to follow:

    -

    Step 1: Go to the Xbox website and sign in with your Microsoft account

    -

    On your browser, go to the Xbox website and click on Sign In at the top right corner. Enter your email address and password that you used to create your Xbox Live account. If you don't have an account yet, you can sign up for free by clicking on Create one.

    -

    Step 2: Browse or search for Cars 3 Driven to Win and click on it

    -

    On the Xbox website, you can browse or search for games by genre, rating, price, or popularity. To find Cars 3 Driven to Win, you can either use the search bar at the top right corner or go to Games > Xbox 360 Games > Racing & Flying. Once you find the game, click on it to open its page.

    -

    Step 3: Click on Buy game and confirm the payment method

    -

    On the game's page, you will see similar information as on the Microsoft Store, such as the price, the rating, the screenshots, and the description. If you want to buy the game, click on Buy game and choose your payment method. You can use a credit card, a debit card, a PayPal account, or an Xbox gift card. Enter your billing information and confirm your purchase.

    -

    Step 4: Turn on your Xbox 360 and make sure you're signed into the right profile

    -

    After buying the game online, you need to turn on your Xbox 360 console and controller. Make sure you are signed into Xbox Live with the same profile that you used to buy the game online. If not, press and hold the Guide button on your controller and select Switch Profile.

    -

    Step 5: Check your game's download progress and enjoy the game

    -

    To check if your game is downloading, press and hold the Guide button on your controller and select Active Downloads. You should see Cars 3 Driven to Win in your download queue. You can also pause or cancel the download if you want. Once the download is complete, you can launch the game from your games library or from the home screen. Have fun racing with your favorite Cars characters!

    -

    Conclusion

    -

    Summary of the main points

    -

    In this article, we have shown you two easy methods to download Cars 3 Driven to Win for Xbox 360. You can either download it directly from the Microsoft Store on your console or from the Xbox website on your computer or mobile device. All you need is an internet connection, an Xbox Live account, and a valid payment method.

    -

    Call to action and final thoughts

    -

    If you are looking for a fun and exciting racing game that features your favorite characters from the Cars movie franchise, you should definitely try Cars 3 Driven to Win for Xbox 360. It is a game that will challenge your speed, agility, and creativity in six different game modes and over 20 tracks. You can also play solo or with your friends and family in split-screen mode.

    -

    So what are you waiting for? Download Cars 3 Driven to Win for Xbox 360 today and get ready to race like never before!

    - FAQs Q: How much does Cars 3 Driven to Win cost for Xbox 360? A: The game costs $19.99 USD for Xbox 360. Q: How much storage space does Cars 3 Driven to Win require for Xbox 360? A: The game requires 4.5 GB of storage space for Xbox 360. Q: Can I play Cars 3 Driven to Win online with other players? A: No, the game does not support online multiplayer mode. However, you can play with up to four players in split-screen mode on the same console. Q: What are the minimum system requirements for Cars 3 Driven to Win for Xbox 360? A: The minimum system requirements for the game are: - Xbox 360 console with a hard drive - Xbox Live account - Internet connection - HDTV or monitor with HDMI or component input - Xbox 360 controller Q: What are the ratings and reviews for Cars 3 Driven to Win for Xbox 360? A: The game has a rating of E (Everyone) by the ESRB and a score of 7.1 out of 10 by IGN. The game has received mostly positive reviews from critics and players, who praised its graphics, gameplay, variety, and replay value. Q: Where can I find more information about Cars 3 Driven to Win for Xbox 360? A: You can find more information about the game on the official website, the Microsoft Store page, or the Xbox website page. You can also watch some gameplay videos on YouTube or read some tips and tricks on GameFAQs.

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download Extreme Car Driving Simulator 2015 APK and Unlock All the Cars and Features.md b/spaces/congsaPfin/Manga-OCR/logs/Download Extreme Car Driving Simulator 2015 APK and Unlock All the Cars and Features.md deleted file mode 100644 index dda9206b165362e4f2323f075f19b35eea56eb07..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download Extreme Car Driving Simulator 2015 APK and Unlock All the Cars and Features.md +++ /dev/null @@ -1,88 +0,0 @@ - -

    Extreme Car Driving Simulator 2015 APK: A Thrilling and Non-Stop Car Driving Game

    -

    Do you love driving cars and racing games? Do you want to experience the thrill and excitement of driving realistic cars on different maps and terrains? If yes, then you should try Extreme Car Driving Simulator 2015 APK, a car driving game for Android devices that will keep you hooked for hours.

    -

    What is Extreme Car Driving Simulator 2015 APK?

    -

    A realistic and immersive car driving game for Android devices

    -

    Extreme Car Driving Simulator 2015 APK is a car driving game that lets you drive various cars on different maps and terrains. You can choose from sports cars, muscle cars, off-road vehicles, and more. You can also customize your cars with different colors, wheels, spoilers, and stickers.

    -

    extreme car driving simulator 2015 apk


    Download File >>>>> https://urlca.com/2uOf1Q



    -

    Features of Extreme Car Driving Simulator 2015 APK

    -

    HD user interfaces and graphics

    -

    The game has HD user interfaces and graphics that improve your game experience. You can enjoy the stunning details of the cars, the roads, the buildings, and the environment. You can also adjust the graphics quality according to your device performance.

    -

    Different maps and terrains to explore

    -

    The game offers different maps and terrains to explore with your cars. You can drive on city streets, highways, deserts, mountains, snow, and more. You can also encounter traffic, obstacles, ramps, bridges, and other elements that make the game more realistic and fun.

    -

    extreme car driving simulator 2015 mod apk
    -download extreme car driving simulator 2015 for android
    -extreme car driving simulator 2015 game online
    -extreme car driving simulator 2015 cheats and hacks
    -extreme car driving simulator 2015 free full version
    -extreme car driving simulator 2015 unlimited money
    -extreme car driving simulator 2015 best cars
    -extreme car driving simulator 2015 review and rating
    -extreme car driving simulator 2015 latest update
    -extreme car driving simulator 2015 gameplay and features
    -extreme car driving simulator 2015 realistic physics
    -extreme car driving simulator 2015 tips and tricks
    -extreme car driving simulator 2015 how to install
    -extreme car driving simulator 2015 system requirements
    -extreme car driving simulator 2015 new maps and modes
    -extreme car driving simulator 2015 offline play
    -extreme car driving simulator 2015 multiplayer mode
    -extreme car driving simulator 2015 custom cars and skins
    -extreme car driving simulator 2015 video and screenshots
    -extreme car driving simulator 2015 comparison with other games
    -extreme car driving simulator 2015 apk download link
    -extreme car driving simulator 2015 fun and addictive
    -extreme car driving simulator 2015 challenges and missions
    -extreme car driving simulator 2015 support and feedback
    -extreme car driving simulator 2015 bugs and fixes

    -

    Real driving experience with HUD, gear, speed, and simulations

    -

    The game gives you a real driving experience with HUD revs, gear, speed, plus TC, ABS, and ESP simulations. You can feel the physics of the cars as you accelerate, brake, drift, and crash. You can also switch between different camera views to see your car from different angles.

    -

    Free mode and checkpoint mode to play

    -

    The game has two modes to play: free mode and checkpoint mode. In free mode, you can drive freely on any map without any time limit or objective. You can explore the map at your own pace and do whatever you want with your car. In checkpoint mode, you have to reach certain checkpoints on the map within a given time limit. You can earn coins by completing checkpoints and use them to unlock new cars or upgrade your existing ones.

    -

    How to download and install Extreme Car Driving Simulator 2015 APK?

    -

    Download the APK file from a trusted source

    -

    To download Extreme Car Driving Simulator 2015 APK, you need to find a trusted source that provides the latest version of the game. You can use [APKPure](^1^) as an example of a reliable source that offers safe and fast downloads of various APK files.

    -

    Enable unknown sources on your device settings

    -

    To install Extreme Car Driving Simulator 2015 APK, you need to enable unknown sources on your device settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.

    -

    Install the APK file and launch the game

    Install the APK file and launch the game

    -

    After you have downloaded the APK file and enabled unknown sources, you can install the APK file by tapping on it and following the instructions. Once the installation is complete, you can launch the game by tapping on its icon on your home screen or app drawer. You can then enjoy playing Extreme Car Driving Simulator 2015 APK on your device.

    -

    Why should you play Extreme Car Driving Simulator 2015 APK?

    -

    It is fun and addictive

    -

    Extreme Car Driving Simulator 2015 APK is a fun and addictive game that will keep you entertained for hours. You can drive different cars on different maps and terrains, and experience the thrill and excitement of realistic car driving. You can also perform stunts, drifts, jumps, and crashes with your cars, and see how they react to the physics and damage.

    -

    It is challenging and rewarding

    -

    Extreme Car Driving Simulator 2015 APK is a challenging and rewarding game that will test your driving skills and reflexes. You can play the checkpoint mode and try to complete the objectives within the time limit. You can also compete with other players online and see who can drive faster and better. You can earn coins by playing the game and use them to unlock new cars or upgrade your existing ones.

    -

    It is suitable for all ages and preferences

    -

    Extreme Car Driving Simulator 2015 APK is a game that is suitable for all ages and preferences. Whether you are a casual gamer or a hardcore racer, you can find something to enjoy in this game. You can choose from different cars, maps, terrains, modes, and settings to suit your taste and mood. You can also adjust the difficulty level and the controls to match your skill level and comfort.

    -

    Conclusion

    -

    Extreme Car Driving Simulator 2015 APK is a car driving game that you should not miss if you love driving cars and racing games. It is a realistic and immersive game that lets you drive various cars on different maps and terrains. It has HD user interfaces and graphics, real driving experience with HUD, gear, speed, and simulations, free mode and checkpoint mode to play, and online multiplayer mode to compete with other players. It is also easy to download and install, fun and addictive, challenging and rewarding, and suitable for all ages and preferences. So what are you waiting for? Download Extreme Car Driving Simulator 2015 APK now and enjoy the thrill and excitement of non-stop car driving.

    -

    FAQs

    -

    Here are some frequently asked questions about Extreme Car Driving Simulator 2015 APK:

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    QuestionAnswer
    Is Extreme Car Driving Simulator 2015 APK free?Yes, Extreme Car Driving Simulator 2015 APK is free to download and play. However, it may contain ads and in-app purchases that require real money.
    Is Extreme Car Driving Simulator 2015 APK safe?Yes, Extreme Car Driving Simulator 2015 APK is safe to download and install as long as you use a trusted source like [APKPure]. However, you should always be careful when installing apps from unknown sources and scan them for viruses or malware before opening them.
    What are the requirements to play Extreme Car Driving Simulator 2015 APK?To play Extreme Car Driving Simulator 2015 APK, you need an Android device that runs on Android 4.1 or higher. You also need at least 100 MB of free storage space on your device.
    How can I contact the developer of Extreme Car Driving Simulator 2015 APK?You can contact the developer of Extreme Car Driving Simulator 2015 APK by visiting their website at [AxesInMotion Racing] or sending them an email at support@axesinmotion.com.
    How can I rate and review Extreme Car Driving Simulator 2015 APK?You can rate and review Extreme Car Driving Simulator 2015 APK by visiting its page on the Google Play Store at [Extreme Car Driving Simulator] or by leaving a comment on its page on [APKPure].
    - : https://apkpure.com/extreme-car-driving-simulator/com.aim.racing : http://www.axesinmotion.com/ : https://play.google.com/store/apps/details?id=com.aim.racing&hl=en_US&gl=US <|im_end|

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Free Call Of Duty Black Ops 3 For Android And Ios Apk Download ((LINK)).md b/spaces/congsaPfin/Manga-OCR/logs/Free Call Of Duty Black Ops 3 For Android And Ios Apk Download ((LINK)).md deleted file mode 100644 index a8bbf2bce556b177039a1594bb9621ed670ca0c0..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Free Call Of Duty Black Ops 3 For Android And Ios Apk Download ((LINK)).md +++ /dev/null @@ -1,42 +0,0 @@ - -

    Free Call of Duty Black Ops 3 for Android and iOS/APK Download

    -

    Call of Duty is one of the most popular and successful first-person shooter franchises in the world. Millions of fans enjoy the thrilling and immersive gameplay, the stunning graphics, and the epic storylines of the games. One of the most acclaimed titles in the series is Call of Duty Black Ops 3, which was released in 2015 for PC, PlayStation 4, and Xbox One.

    -

    free call of duty black ops 3 for android and ios apk download


    Downloadhttps://urlca.com/2uOfKp



    -

    But what if you want to play Call of Duty Black Ops 3 on your mobile device? Is it possible to download it for free on Android and iOS devices? The answer is yes, it is possible, but not without some challenges and limitations. In this article, we will tell you everything you need to know about Call of Duty Black Ops 3 for mobile devices, including what it is, how to download it, what are its pros and cons, and some tips and tricks for playing it. Let's get started!

    -

    What is Call of Duty Black Ops 3?

    -

    Call of Duty Black Ops 3 is a first-person shooter game that is set in a dystopian future in the year 2065. The game follows a team of black ops soldiers who use advanced technology and cybernetic enhancements to fight against a rogue AI and a mysterious enemy faction. The game has three main modes: campaign, multiplayer, and zombies. The campaign mode allows you to play solo or co-op with up to four players, while the multiplayer mode offers various competitive modes and maps. The zombies mode lets you team up with other players or play solo to survive waves of undead enemies.

    -

    Features of Call of Duty Black Ops 3

    -

    Call of Duty Black Ops 3 has many features that make it an exciting and enjoyable game to play. Some of these features are:

    -
      -
    • A rich and immersive story that takes you to different locations around the world.
    • -
    • A customizable character that you can equip with different weapons, abilities, outfits, and accessories.
    • -
    • A variety of game modes and maps that cater to different playstyles and preferences.
    • -
    • A dynamic and fluid gameplay that allows you to run, jump, slide, wall-run, swim, and use vehicles.
    • -
    • A stunning graphics engine that delivers realistic visuals and effects.
    • -
    • A robust online community that supports cross-play between platforms.
    • -
    -

    How to download Call of Duty Black Ops 3 for free on Android and iOS devices?

    -

    Unfortunately, Call of Duty Black Ops 3 is not officially available for mobile devices. However, there are some unofficial ways to download it for free on Android and iOS devices. Here are two methods that you can try:

    -

    Method 1: Download from the official website

    -

    The official website of Call of Duty Mobile offers a free download link for Call of Duty Black Ops 3 for mobile devices. However, this link is not always working or accessible. To download it from the official website, follow these steps:

    -
      -
    1. Go to [the official website](^1^) of Call of Duty Mobile.
    2. -
    3. Scroll down to the bottom of the page and click on "Download Now".
    4. -
    5. Select your device type (Android or iOS) and click on "Download".
    6. -
    7. Wait shotguns, light machine guns, sniper rifles, pistols, launchers, melee weapons, and special weapons. You can also equip different attachments, optics, camos, and paintjobs to customize your weapons. You can also choose from different perks that give you various advantages in the game, such as faster movement, increased health, reduced recoil, enhanced vision, and more. You should learn the weapons and perks in the game and use them according to your strategy and situation.

      -

      Practice your aim and movement skills

      -

      Call of Duty Black Ops 3 is a fast-paced and action-packed game that requires good aim and movement skills. You should practice your aim and movement skills to improve your accuracy, reflexes, and agility in the game. You can use the training mode or the custom games to practice your skills against bots or other players. You can also watch tutorials or tips videos online to learn from other players. You should also adjust the sensitivity and controls of the game to suit your preference and comfort.

      -

      Conclusion

      -

      Call of Duty Black Ops 3 is a great game that you can enjoy on your mobile device. However, you should be aware of the challenges and limitations of downloading and playing it on your device. You should also follow some tips and tricks to improve your skills and experience in the game. If you are a fan of Call of Duty or first-person shooter games, you should definitely give Call of Duty Black Ops 3 a try on your mobile device.

      -

      -

      FAQs

      -

      Here are some frequently asked questions about Call of Duty Black Ops 3 for mobile devices:

      -
        -
      • Q: Is Call of Duty Black Ops 3 for mobile devices free?
        A: Yes, Call of Duty Black Ops 3 for mobile devices is free to download and play. However, you may need to pay for some in-game items or features.
      • -
      • Q: Is Call of Duty Black Ops 3 for mobile devices safe?
        A: Call of Duty Black Ops 3 for mobile devices is safe to download and play as long as you use the official website or a trusted APK website. However, you should be careful of malware or viruses that may harm your device or data.
      • -
      • Q: Is Call of Duty Black Ops 3 for mobile devices compatible with my device?
        A: Call of Duty Black Ops 3 for mobile devices is compatible with most Android and iOS devices that have at least 2 GB of RAM and 4 GB of storage space. However, some devices may not support the game or may have performance issues.
      • -
      • Q: Is Call of Duty Black Ops 3 for mobile devices offline?
        A: No, Call of Duty Black Ops 3 for mobile devices requires an internet connection to download, install, update, and play the game. You also need an internet connection to access the online features and modes of the game.
      • -
      • Q: Is Call of Duty Black Ops 3 for mobile devices cross-platform?
        A: Yes, Call of Duty Black Ops 3 for mobile devices supports cross-play between Android and iOS devices. You can also play with other players who are using PC or console versions of the game.
      • -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/PDF Download of Class 9 Hindi Chapter 6 - Nana Saheb ki Putri Devi Maina ko Bhasm kar Diya Gaya NCERT Solutions and Notes.md b/spaces/congsaPfin/Manga-OCR/logs/PDF Download of Class 9 Hindi Chapter 6 - Nana Saheb ki Putri Devi Maina ko Bhasm kar Diya Gaya NCERT Solutions and Notes.md deleted file mode 100644 index 1a5ada40e9a8754f1c99e08d82dce775e706a25c..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/PDF Download of Class 9 Hindi Chapter 6 - Nana Saheb ki Putri Devi Maina ko Bhasm kar Diya Gaya NCERT Solutions and Notes.md +++ /dev/null @@ -1,134 +0,0 @@ -
      -

      Class 9 Hindi Chapter 6 Question Answer PDF Download

      -

      If you are looking for a reliable and comprehensive source of question answer for class 9 Hindi chapter 6, then you have come to the right place. In this article, we will provide you with a summary of the chapter, along with some short and long answer questions based on it. We will also provide you with a link to download the PDF file of the question answer section, which will help you revise and practice for your exam. So, let us begin.

      -

      Introduction

      -

      Class 9 Hindi chapter 6 is titled "Premchand ke Phate Jute", which means "Premchand's Torn Shoes". It is a story written by Harishankar Parsai, who was a famous satirist and humorist in Hindi literature. He was known for his witty and sarcastic style of writing, which exposed the hypocrisy and corruption of society. In this story, he presents a contrast between the simple and humble personality of Premchand, who was one of the greatest writers in Hindi literature, and the opportunistic and pretentious attitude of modern people. He also criticizes the tendency of people to judge others by their appearance and status, rather than their character and talent.

      -

      class 9 hindi chapter 6 question answer pdf download


      DOWNLOADhttps://urlca.com/2uO5Lr



      -

      This chapter is important to study because it gives us an insight into the life and works of Premchand, who was also known as "the people's writer". He wrote about the realistic and social issues of his time, such as poverty, oppression, injustice, casteism, communalism, etc. He also used simple and lucid language, which appealed to the masses. He was a pioneer of modern Hindi literature and influenced many writers after him. This chapter also teaches us some valuable lessons, such as being humble, honest, respectful, and sincere in our work and life. It also warns us against being greedy, selfish, dishonest, and superficial in our dealings with others.

      -

      Summary of the Chapter

      -

      The story begins with Parsai looking at a photograph of Premchand with his wife. He notices that Premchand is wearing torn shoes in the picture, and one of his toes is visible through a hole in his right shoe. Parsai is amazed by this sight and wonders how such a great writer could wear such shabby shoes. He then imagines how Premchand must have felt when he went out wearing those shoes. He thinks that Premchand must have been embarrassed and uncomfortable by his appearance, but he also must have been proud and confident by his achievements.

      -

      Parsai then compares Premchand's situation with that of modern people, who are obsessed with their looks and status. He says that nowadays people spend a lot of money on their clothes, shoes, accessories, etc., but they do not care about their work or talent. They also try to impress others by showing off their wealth and power, but they do not respect or appreciate others' work or talent. They also judge others by their appearance and status, rather than their character and ability. Parsai says that these people are like hollow shells, who have no substance or value inside them.

      -

      Parsai then praises Premchand for his simplicity and honesty. He says that Prem

      chand was not bothered by his torn shoes, because he knew that his work was more important and valuable than his shoes. He says that Premchand's shoes were a symbol of his dignity and integrity, which he never compromised for anything. He also says that Premchand's shoes were a source of inspiration and motivation for him, as they reminded him of his struggle and success. He says that Premchand's shoes were a testament of his greatness and legacy, which no one can deny or forget.

      -

      Question Answer Section

      -

      In this section, we will provide you with some short and long answer questions based on the chapter. These questions will help you test your understanding and comprehension of the chapter, as well as improve your writing and analytical skills. You can also refer to these questions for your exam preparation and revision.

      -

      class 9 hindi kshitij chapter 6 premchand ke phate jute pdf download
      -class 9 hindi kritika chapter 6 reedh ki haddi question answer pdf download
      -class 9 hindi sanchayan chapter 6 diye jal uthe pdf download
      -class 9 hindi sparsh chapter 6 yamraj ki disha question answer pdf download
      -class 9 hindi kshitij chapter 6 summary in hindi pdf download
      -class 9 hindi kritika chapter 6 important questions and answers pdf download
      -class 9 hindi sanchayan chapter 6 extra questions and answers pdf download
      -class 9 hindi sparsh chapter 6 explanation in hindi pdf download
      -class 9 hindi kshitij chapter 6 ncert solutions pdf download
      -class 9 hindi kritika chapter 6 word meanings in hindi pdf download
      -class 9 hindi sanchayan chapter 6 ncert book pdf download
      -class 9 hindi sparsh chapter 6 mcq questions with answers pdf download
      -class 9 hindi kshitij chapter 6 video lecture in hindi pdf download
      -class 9 hindi kritika chapter 6 notes in hindi pdf download
      -class 9 hindi sanchayan chapter 6 online test in hindi pdf download
      -class 9 hindi sparsh chapter 6 revision notes in hindi pdf download
      -class 9 hindi kshitij chapter 6 workbook solutions pdf download
      -class 9 hindi kritika chapter 6 character sketch in hindi pdf download
      -class 9 hindi sanchayan chapter 6 lesson plan in hindi pdf download
      -class 9 hindi sparsh chapter 6 previous year questions and answers pdf download
      -class 9 hindi kshitij chapter 6 sample papers with solutions pdf download
      -class 9 hindi kritika chapter 6 worksheet with answers pdf download
      -class 9 hindi sanchayan chapter 6 cbse guide pdf download
      -class 9 hindi sparsh chapter 6 grammar exercises in hindi pdf download
      -class 9 hindi kshitij chapter 6 textbook solutions by aglasem pdf download
      -class 9 hindi kritika chapter 6 textbook solutions by vedantu pdf download
      -class 9 hindi sanchayan chapter 6 textbook solutions by ncertbooks.guru pdf download
      -class 9 hindi sparsh chapter 6 textbook solutions by learncbse.in pdf download
      -class 9 hindi kshitij chapter 6 question bank with answers pdf download
      -class 9 hindi kritika chapter 6 practice questions with solutions pdf download
      -class 9 hindi sanchayan chapter 6 model test papers with answers pdf download
      -class 9 hindi sparsh chapter 6 mock test series with solutions pdf download
      -class 9 hindi kshitij chapter 6 study material in hindi pdf download
      -class 9 hindi kritika chapter 6 ppt presentation in hindi pdf download
      -class 9 hindi sanchayan chapter 6 assignment with answers pdf download
      -class 9 hindi sparsh chapter 6 project work in hindi pdf download
      -class 9 hindi kshitij chapter 6 important points to remember in hindi pdf download
      -class 9 hindi kritika chapter 6 theme and message in hindi pdf download
      -class 9 hindi sanchayan chapter 6 main characters and their role in hindi pdf download
      -class 9 hindi sparsh chapter

      -

      Short Answer Questions

      -

      Here are some of the short answer questions based on the chapter:

      - - - - - - - - - - - - - - - - - - - - - - - - - -
      QuestionAnswer
      Who is the author of the story "Premchand ke Phate Jute"?The author of the story is Harishankar Parsai, who was a famous satirist and humorist in Hindi literature.
      What did Parsai notice in the photograph of Premchand with his wife?Parsai noticed that Premchand was wearing torn shoes in the photograph, and one of his toes was visible through a hole in his right shoe.
      How did Parsai imagine Premchand's feelings when he went out wearing those shoes?Parsai imagined that Premchand must have felt embarrassed and uncomfortable by his appearance, but he also must have felt proud and confident by his achievements.
      How did Parsai compare Premchand's situation with that of modern people?Parsai compared Premchand's situation with that of modern people, who are obsessed with their looks and status, but do not care about their work or talent. He also said that they judge others by their appearance and status, rather than their character and ability.
      What did Parsai say about Premchand's simplicity and honesty?Parsai said that Premchand was not bothered by his torn shoes, because he knew that his work was more important and valuable than his shoes. He also said that Premchand's shoes were a symbol of his dignity and integrity, which he never compromised for anything.

      Long Answer Questions

      -

      Here are some of the long answer questions based on the chapter:

      - - - - - - - - - - - - - - - - - -
      QuestionAnswer
      What is the main theme of the story "Premchand ke Phate Jute"?The main theme of the story is the contrast between the simple and humble personality of Premchand, who was one of the greatest writers in Hindi literature, and the opportunistic and pretentious attitude of modern people. The story also highlights the importance of work and talent over appearance and status, and the value of dignity and integrity over wealth and power. The story also conveys a message of respect and appreciation for Premchand's life and works, which have inspired and influenced many generations of readers and writers.
      How does Parsai use satire and humor in the story to expose the hypocrisy and corruption of society?Parsai uses satire and humor in the story to expose the hypocrisy and corruption of society in a witty and sarcastic manner. He uses irony, exaggeration, ridicule, and comparison to show the absurdity and falsity of modern people's behavior and values. He mocks their obsession with their looks and status, their tendency to judge others by their appearance and status, their lack of work ethic and talent, their greed and selfishness, their dishonesty and superficiality, etc. He also contrasts their behavior and values with those of Premchand, who was simple, honest, respectful, sincere, hardworking, talented, etc. He makes us laugh at the foolishness and vanity of modern people, but also makes us think about the moral and social issues that he raises in the story.
      What are some of the lessons that we can learn from Premchand's life and works?Some of the lessons that we can learn from Premchand's life and works are: - We should be humble, honest, respectful, and sincere in our work and life, and not compromise our dignity and integrity for anything. - We should value our work and talent over our appearance and status, and not try to impress others by showing off our wealth and power. - We should respect and appreciate others' work and talent, regardless of their appearance and status, and not judge them by their superficial qualities. - We should write about the realistic and social issues of our time, using simple and lucid language that appeals to the masses. - We should be inspired and motivated by our struggle and success, rather than be embarrassed or uncomfortable by our shortcomings or difficulties.

      PDF Download Link

      -

      If you want to download the PDF file of the question answer section for class 9 Hindi chapter 6, you can click on the link below. The PDF file contains all the short and long answer questions that we have provided in this article, along with their answers. You can also print the PDF file or save it on your device for your convenience. The PDF file will help you revise and practice the chapter in an easy and effective way.

      -

      Class 9 Hindi Chapter 6 Question Answer PDF Download

      -

      Conclusion

      -

      In conclusion, we can say that class 9 Hindi chapter 6 is a very interesting and informative story that teaches us about the life and works of Premchand, who was one of the greatest writers in Hindi literature. It also teaches us some valuable lessons, such as being humble, honest, respectful, and sincere in our work and life, valuing our work and talent over our appearance and status, respecting and appreciating others' work and talent, writing about the realistic and social issues of our time, etc. It also warns us against being greedy, selfish, dishonest, and superficial in our dealings with others. We hope that this article has helped you understand and enjoy the chapter better, and has also prepared you for your exam.

      -

      FAQs

      -

      Here are some of the frequently asked questions related to this chapter:

      - - - - - - - - - - - - - - - - - - - - - - - - - -
      QuestionAnswer
      Who was Premchand?Premchand was one of the greatest writers in Hindi literature. He was also known as "the people's writer". He wrote about the realistic and social issues of his time, such as poverty, oppression, injustice, casteism, communalism, etc. He also used simple and lucid language, which appealed to the masses. He was a pioneer of modern Hindi literature and influenced many writers after him.
      What is the meaning of "Premchand ke Phate Jute"?"Premchand ke Phate Jute" means "Premchand's Torn Shoes". It is the title of a story written by Harishankar Parsai, who was a famous satirist and humorist in Hindi literature. He used this title to contrast the simple and humble personality of Premchand with the opportunistic and pretentious attitude of modern people.
      What is satire and humor?Satire and humor are literary devices that use irony, exaggeration, ridicule, and comparison to make fun of or criticize something or someone. They are often used to expose the hypocrisy and corruption of society or to convey a message or moral lesson.
      How to write a good answer for a short or long question?To write a good answer for a short or long question, you should follow these steps: - Read the question carefully and understand what it is asking. - Recall the relevant information from the chapter or your notes. - Organize your answer in a logical and coherent manner. - Use proper grammar, spelling, punctuation, and vocabulary. - Write your answer in your own words rather than copying from other sources. - For a short answer question, write one or two paragraphs with a clear and concise answer. - For a long answer question, write three or more paragraphs with a detailed and logical answer.
      Where can I find more question answer for class 9 Hindi chapters?You can find more question answer for class 9 Hindi chapters on various websites and apps that provide study material for students. Some of them are: - NCERT Solutions - Vedantu - Toppr - BYJU'S - etc.

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/QRIS OVO Merchant The Best Payment Solution for Indonesian SMEs.md b/spaces/congsaPfin/Manga-OCR/logs/QRIS OVO Merchant The Best Payment Solution for Indonesian SMEs.md deleted file mode 100644 index 78084307888adba2c06afc9610e36ea69c901ed6..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/QRIS OVO Merchant The Best Payment Solution for Indonesian SMEs.md +++ /dev/null @@ -1,135 +0,0 @@ -
      -

      How to Download QRIS OVO Merchant

      -

      If you are a merchant who wants to accept payments from various digital wallets, such as OVO, Gopay, DANA, LinkAja, and others, you might want to consider using QRIS OVO Merchant. QRIS OVO Merchant is a service that allows you to generate a single QR code that can be scanned by customers using any payment service provider that supports QRIS (Quick Response Code Indonesian Standard). In this article, we will explain what QRIS OVO Merchant is, how to register as one, how to download and print your QR code, and how to accept payments using it.

      -

      What is QRIS OVO Merchant?

      -

      QRIS OVO Merchant is a service that enables you to accept payments from various digital wallets using a single QR code. This means that you don't need to have multiple QR codes for different payment service providers, which can save you space and hassle. By using QRIS OVO Merchant, you can also enjoy the following benefits:

      -

      download qris ovo merchant


      DOWNLOAD 🗸 https://urlca.com/2uOb8V



      -

      The benefits of using QRIS OVO Merchant

      -
        -
      • Free of charge. You don't need to pay any fees or commissions for accepting payments using QRIS OVO Merchant.
      • -
      • Fast settlement process. You can receive your transaction funds within one business day after the transaction date.
      • -
      • Special cooperation with Nobu Bank. You can open a bank account with Nobu Bank and enjoy various features and benefits, such as cashback, interest, and insurance.
      • -
      • Partnership with GrabFood. You can join GrabFood as a partner and increase your sales and exposure.
      • -
      • There is where you are. You can find and join various merchants who are already using QRIS OVO Merchant across Indonesia.
      • -
      -

      The requirements to become QRIS OVO Merchant

      -

      To become QRIS OVO Merchant, you need to meet the following requirements:

      -
        -
      • You must have a valid identity card (KTP) or business license (SIUP).
      • -
      • You must have a smartphone that supports Android or iOS operating system.
      • -
      • You must have an active email address and phone number.
      • -
      • You must have an active bank account with any bank in Indonesia.
      • -
      -

      How to register as QRIS OVO Merchant?

      -

      The registration process for QRIS OVO Merchant depends on whether you are an individual merchant or a corporate merchant. Here are the steps for each type of merchant:

      -

      For individual merchants

      -
        -
      1. Download the GrabMerchant app from Google Play Store or App Store.
      2. -
      3. Open the app and tap on "Register".
      4. -
      5. Fill in your personal information, such as name, email, phone number, address, etc.
      6. -
      7. Choose "OVO" as your payment method and tap on "Next".
      8. -
      9. Upload your identity card (KTP) and a photo of yourself holding the KTP.
      10. -
      11. Wait for the verification process to complete. You will receive an SMS notification once your registration is approved.
      12. -
      -

      For corporate merchants

      -
        -
      1. Visit [8](https://ovo.id/partnership) and fill in the online form with your business information, such as name, email, phone number, address, etc.
      2. -
      3. Choose "OVO" as your payment method and tap on "Submit".
      4. -
      5. You will receive an email confirmation with a link to complete your registration.
      6. -
      7. Click on the link and upload your business license (SIUP) and other supporting documents.
      8. -
      9. Wait for the verification process to complete. You will receive an SMS notification once your registration is approved.
      10. -
      -

      How to download and print QRIS OVO Merchant code?

      -

      After you register as QRIS OVO Merchant, you can download and print your QR code from the GrabMerchant app or the OVO website. Here are the steps for each option:

      -

      For individual merchants

      -
        -
      1. Open the GrabMerchant app and tap on "QR Code".
      2. -
      3. Tap on "Download" and choose the size and format of your QR code.
      4. -
      5. Save the QR code image to your device or share it via email or other apps.
      6. -
      7. Print the QR code image using a printer or a printing service.
      8. -
      9. Display the QR code in a visible and accessible location for your customers.
      10. -
      -

      For corporate merchants

      -
        -
      1. Visit [9](https://ovo.id/merchant) and log in with your email and password.
      2. -
      3. Click on "QR Code" and choose the size and format of your QR code.
      4. -
      5. Download the QR code image to your device or share it via email or other apps.
      6. -
      7. Print the QR code image using a printer or a printing service.
      8. -
      9. Display the QR code in a visible and accessible location for your customers.
      10. -
      -

      How to accept payments using QRIS OVO Merchant code?

      -

      To accept payments using QRIS OVO Merchant code, you need to follow these steps:

      -

      For individual merchants

      -
        -
      1. Ask your customer to scan your QR code using their preferred payment service provider app that supports QRIS, such as OVO, Gopay, DANA, LinkAja, etc.
      2. -
      3. The customer will see your merchant name and the amount to pay on their app screen. They can also enter a different amount if needed.
      4. -
      5. The customer will confirm the payment by entering their PIN or biometric authentication.
      6. -
      7. You will receive a notification on your GrabMerchant app that the payment is successful. You can also check the transaction history on the app.
      8. -
      9. You can issue a receipt to the customer if they request one. You can print it from the app or send it via email or SMS.
      10. -
      -

      For corporate merchants

      -
        -
      1. Ask your customer to scan your QR code using their preferred payment service provider app that supports QRIS, such as OVO, Gopay, DANA, LinkAja, etc.
      2. -
      3. The customer will see your merchant name and the amount to pay on their app screen. They can also enter a different amount if needed.
      4. -
      5. The customer will confirm the payment by entering their PIN or biometric authentication.
      6. -
      7. You will receive a notification on your OVO website dashboard that the payment is successful. You can also check the transaction history on the website.
      8. -
      9. You can issue a receipt to the customer if they request one. You can print it from the website or send it via email or SMS.
      10. -
      -

      Conclusion

      -

      In conclusion, QRIS OVO Merchant is a convenient and cost-effective way to accept payments from various digital wallets using a single QR code. You can register as QRIS OVO Merchant easily by following the steps above. You can also download and print your QR code from the GrabMerchant app or the OVO website. To accept payments using QRIS OVO Merchant code, you just need to ask your customer to scan your QR code using their preferred payment service provider app that supports QRIS. You will receive your transaction funds within one business day after the transaction date. By using QRIS OVO Merchant, you can grow your business and reach more customers across Indonesia.

      -

      FAQs

      -
        -
      • What is QRIS?
        -QRIS stands for Quick Response Code Indonesian Standard. It is a national standard for QR code payments that enables interoperability among different payment service providers in Indonesia.
      • -
      • What are the advantages of using QRIS?
        -By using QRIS, you can accept payments from various payment service providers using a single QR code. This can save you space and hassle of having multiple QR codes for different payment service providers. You can also enjoy lower fees and faster settlement process compared to other payment methods.
      • -
      • How do I know if my payment service provider supports QRIS?
        -You can check if your payment service provider supports QRIS by looking for the QRIS logo on their app screen. The logo consists of two concentric squares with four smaller squares inside them. You can also visit [10](https://qris.id ) to see the list of payment service providers that support QRIS.
      • -
      • How do I get my transaction funds from QRIS OVO Merchant?
        -You will receive your transaction funds within one business day after the transaction date. The funds will be transferred to your bank account that you registered during the registration process. You can check your transaction history and balance on the GrabMerchant app or the OVO website.
      • -
      • How do I contact QRIS OVO Merchant customer service?
        -If you have any questions or issues regarding QRIS OVO Merchant, you can contact the customer service via the following channels:
      • -
          -
        • Phone: 1500-696 (for individual merchants) or 1500-286 (for corporate merchants)
        • -
        • Email: merchant@ovo.id
        • -
        • Live chat: available on the GrabMerchant app or the OVO website
        • -
        -
      • How do I update my QRIS OVO Merchant information?
        -If you need to update your QRIS OVO Merchant information, such as your name, address, phone number, email, bank account, etc., you can do so by following these steps:
      • -
          -
        • For individual merchants: open the GrabMerchant app and tap on "Profile". Then, tap on "Edit Profile" and make the necessary changes. Tap on "Save" when you are done.
        • -
        • For corporate merchants: visit [11](https://ovo.id/merchant) and log in with your email and password. Then, click on "Profile" and make the necessary changes. Click on "Save" when you are done.
        • -
        -

      -

      How to download qris ovo merchant app on android
      -Benefits of using qris ovo merchant for online payments
      -Qris ovo merchant registration guide for small businesses
      -Qris ovo merchant vs other payment methods: pros and cons
      -Qris ovo merchant customer service and support
      -Qris ovo merchant fees and charges explained
      -Qris ovo merchant review and testimonials from users
      -How to use qris ovo merchant loyalty code for discounts
      -Qris ovo merchant security and privacy features
      -How to scan qris ovo merchant qr code with ovo app
      -How to generate qris ovo merchant qr code for your store
      -Qris ovo merchant integration with point of sale systems
      -Qris ovo merchant best practices and tips for merchants
      -Qris ovo merchant faq and troubleshooting
      -Qris ovo merchant partnership and referral program
      -How to update qris ovo merchant app to the latest version
      -How to accept qris ovo merchant payments from other e-wallets
      -Qris ovo merchant compliance with bank indonesia regulations
      -Qris ovo merchant features and functions overview
      -How to withdraw qris ovo merchant funds to your bank account
      -How to check qris ovo merchant transaction history and reports
      -How to change qris ovo merchant account information and settings
      -Qris ovo merchant rewards and incentives for merchants
      -Qris ovo merchant case studies and success stories
      -Qris ovo merchant alternatives and competitors comparison
      -How to promote qris ovo merchant to your customers
      -Qris ovo merchant tutorial and training videos
      -Qris ovo merchant feedback and suggestions form
      -Qris ovo merchant terms and conditions agreement
      -Qris ovo merchant contact details and locations

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Roblox Studio Launcher Beta APK Everything You Need to Know About the Latest Version.md b/spaces/congsaPfin/Manga-OCR/logs/Roblox Studio Launcher Beta APK Everything You Need to Know About the Latest Version.md deleted file mode 100644 index 04841776822ddc9e63a6348db175457e2c6a9993..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Roblox Studio Launcher Beta APK Everything You Need to Know About the Latest Version.md +++ /dev/null @@ -1,74 +0,0 @@ -
      -

      How to Download and Install Roblox Studio Launcher Beta APK on Android

      -

      Roblox is a popular online platform that lets you create and play millions of immersive 3D experiences. If you are a Roblox developer or aspiring to be one, you might be interested in Roblox Studio Launcher Beta APK, a free and immersive creation engine that lets you make anything you can imagine on Roblox. In this article, we will show you what Roblox Studio Launcher Beta APK is, how to download and install it on your Android device, and how to use it to create or manage your experiences.

      -

      What is Roblox Studio Launcher Beta APK?

      -

      Roblox Studio Launcher Beta APK is an application file that lets you install and run Roblox Studio on your Android device. Roblox Studio is an all-in-one IDE that lets you create experiences that run on Roblox. It’s free to use and lets you reach millions of users using the Roblox app on console, desktop, and mobile devices.

      -

      robloxstudiolauncherbeta apk download


      Download Zip ::: https://urlca.com/2uOgeq



      -

      Features of Roblox Studio Launcher Beta APK

      -

      Roblox Studio Launcher Beta APK has many features that make it a powerful and versatile tool for creating and publishing experiences on Roblox. Some of these features are:

      -
        -
      • Powerful 3D Building Tools: You can create almost anything you can imagine by combining parts and meshes into models with built-in materials or your own textures. You can also overlay or integrate user interface components like text labels, buttons, billboards, and inventory screens to provide more ways for users to interact with your experiences. You can also build or generate large scale terrains consisting of water, desert, mountains, and more to give your creations an intimate or large scale feeling. You can also take your experiences to the next level by adjusting lighting and atmosphere, integrating audio, and applying special effects.
      • -
      • Integrated Scripting, Debugging, and Testing: Roblox comes with a host of built-in features like matchmaking and physics, but scripting specific features is essential to making your experiences unique. Studio provides full access to the Roblox Engine APIs through a full-featured script editor with modern conveniences like autocompletion and code highlighting. Built-in debugging and profiling lets you catch errors and tune performance so your experiences run at their best on all devices. Finally, you can test directly in Studio through multiple testing and emulation modes.
      • -
      • Fast Publishing and Updating: Roblox not only provides the engine and tooling, but gives you access to a large social network of users on a broad array of devices. Just click to publish and your experiences are automatically available to this network with built-in discovery and monetization opportunities. Want to iterate on your experience? Make the changes and update them immediately for all users in an instant.
      • -
      • Collaborative and Extensible: Studio has built-in collaboration tools so you and your co-creators can build on your own time or together in real-time. Drag and drop parts to a shared workspace where changes appear to everyone at the same time, chat with your team in real-time, and have all your changes saved and managed in the cloud. You can also extend the feature set of Studio with custom plugins, or build your own plugins to share with the rest of the Roblox community.
      • -
      -

      Benefits of Roblox Studio Launcher Beta APK

      -

      Roblox Studio Launcher Beta APK has many benefits that make it a great option for creating and managing experiences on Roblox. Some of these benefits are:

      -
        -
      • website or on other platforms that support it, such as Windows, Mac, Linux, iOS, and Xbox One. - - -How can I get help or support for using Roblox Studio Launcher Beta APK? -If you need help or support for using Roblox Studio Launcher Beta APK, you can visit the official Roblox website or the Roblox Developer Forum, where you can find tutorials, guides, tips, and answers to common questions. You can also contact the Roblox Support team or send feedback through the app. - -

        -

        roblox studio launcher beta android apk
        -how to download roblox studio launcher beta for mobile
        -roblox studio launcher beta apk free download
        -roblox studio launcher beta apk latest version
        -roblox studio launcher beta apk mod
        -roblox studio launcher beta apk no verification
        -roblox studio launcher beta apk offline
        -roblox studio launcher beta apk old version
        -roblox studio launcher beta apk online
        -roblox studio launcher beta apk update
        -roblox studio launcher beta app download
        -roblox studio launcher beta download for android
        -roblox studio launcher beta download for pc
        -roblox studio launcher beta download link
        -roblox studio launcher beta download windows 10
        -roblox studio launcher beta free download for android
        -roblox studio launcher beta free download for pc
        -roblox studio launcher beta full apk download
        -roblox studio launcher beta game download
        -roblox studio launcher beta hack apk download
        -roblox studio launcher beta install apk
        -roblox studio launcher beta mobile apk download
        -roblox studio launcher beta new version apk download
        -roblox studio launcher beta original apk download
        -roblox studio launcher beta premium apk download
        -roblox studio launcher beta pro apk download
        -roblox studio launcher beta software download
        -roblox studio launcher beta unlimited apk download
        -robloxstudioapkdownload.com/roblox-studio-launcher-beta-apk-download/
        -best site to download roblox studio launcher beta apk
        -can i download roblox studio launcher beta apk on my phone
        -download and install roblox studio launcher beta apk
        -download roblox studio launcher beta apk 2023
        -download roblox studio launcher beta apk from google play store
        -download roblox studio launcher beta apk without human verification
        -how do i download roblox studio launcher beta apk on my device
        -how to create games with roblox studio launcher beta apk
        -how to get roblox studio launcher beta apk for free
        -how to install roblox studio launcher beta apk on android device
        -how to update roblox studio launcher beta apk on my phone
        -is it safe to download roblox studio launcher beta apk from unknown sources
        -is there a way to download roblox studio launcher beta apk on ios device
        -learn how to use roblox studio launcher beta apk with tutorials and guides
        -what are the features of roblox studio launcher beta apk
        -what are the requirements for downloading and running roblox studio launcher beta apk
        -what is the difference between roblox studio and roblox studio launcher beta apk
        -what is the file size of roblox studio launcher beta apk
        -where can i find the official website of roblox studio launcher beta apk

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Survive and Thrive with Garena Free Fire Lite Download for Mobile.md b/spaces/congsaPfin/Manga-OCR/logs/Survive and Thrive with Garena Free Fire Lite Download for Mobile.md deleted file mode 100644 index 0aaf05cb3b90285788968b81dbb6eb0f527eacf2..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Survive and Thrive with Garena Free Fire Lite Download for Mobile.md +++ /dev/null @@ -1,160 +0,0 @@ - -

        Garena Free Fire Lite: How to Download and Play the Lighter Version of the Popular Battle Royale Game

        -

        Introduction

        -

        Garena Free Fire is one of the most popular and successful battle royale games on mobile platforms, with over a billion downloads on Google Play Store alone. However, not everyone has a high-end device that can run the game smoothly and enjoy its full potential. That's why Garena has released a lighter version of the game, called Garena Free Fire Lite, that can run on low-end devices without compromising the core gameplay experience.

        -

        garena free fire lite download


        Download Ziphttps://urlca.com/2uO6Fk



        -

        What is Garena Free Fire Lite?

        -

        Garena Free Fire Lite is a modified version of the original game that has reduced graphics quality, file size, and system requirements. It is designed to run on devices with low RAM, storage, and CPU power, as well as on older versions of Android and iOS operating systems. The game still offers the same thrilling survival shooter gameplay, with 50 players dropping into a large map and fighting for their survival until only one remains.

        -

        Why should you play Garena Free Fire Lite?

        -

        If you are a fan of battle royale games but your device cannot handle the original game, then Garena Free Fire Lite is a perfect choice for you. You can enjoy the following benefits by playing this version:

        -
          -
        • Smooth and fast gameplay - The game runs at a stable frame rate and does not lag or crash even on low-end devices.
        • -
        • Easy download and installation - The game has a small file size of around 300 MB, which means you can download it quickly and save your data usage.
        • -
        • Compatible with most devices - The game supports Android devices with OS 4.0.3 or higher, iOS devices with OS 9 or higher, and PC via BlueStacks emulator.
        • -
        • Same features as the original game - The game has the same game modes, maps, characters, weapons, items, and events as the original game, so you won't miss out on anything.
        • -
        -

        How to download and install Garena Free Fire Lite?

        -

        For Android devices

        -

        To download and install Garena Free Fire Lite on your Android device, follow these steps:

        -
          -
        1. Go to Google Play Store and search for "Garena Free Fire Lite". Alternatively, you can use this link:
        2. -
        3. Tap on "Install" and wait for the download to finish.
        4. -
        5. Once the download is complete, tap on "Open" to launch the game.
        6. -
        7. Allow the game to access your device's storage, microphone, and location.
        8. -
        9. Create or log in to your Garena account or use Facebook or Google to sign in.
        10. -
        11. Enjoy playing Garena Free Fire Lite!
        12. -
        -

        For iOS devices

        -

        To download and install Garena Free Fire Lite on your iOS device, follow these steps:

        -

        garena free fire lite apk download
        -garena free fire lite download for pc
        -garena free fire lite download uptodown
        -garena free fire lite download for android
        -garena free fire lite download play store
        -garena free fire lite download latest version
        -garena free fire lite download apk pure
        -garena free fire lite download for jio phone
        -garena free fire lite download 2023
        -garena free fire lite download size
        -garena free fire lite download link
        -garena free fire lite download app
        -garena free fire lite download mod apk
        -garena free fire lite download hack version
        -garena free fire lite download obb file
        -garena free fire lite download in tamil
        -garena free fire lite download highly compressed
        -garena free fire lite download kaise kare
        -garena free fire lite download 50 mb
        -garena free fire lite download update
        -garena free fire lite download game loop
        -garena free fire lite download without play store
        -garena free fire lite download online
        -garena free fire lite download for laptop
        -garena free fire lite download 2022
        -garena free fire lite download for ios
        -garena free fire lite download in hindi
        -garena free fire lite download 100 mb
        -garena free fire lite download bluestacks
        -garena free fire lite download new update 2023
        -garena free fire lite download unlimited diamonds
        -garena free fire lite download from huawei appgallery
        -garena free fire lite download error fix
        -garena free fire lite download 10 mb
        -garena free fire lite download windows 10
        -garena free fire lite download for low end devices
        -garena free fire lite download google drive link
        -garena free fire lite download in pc without emulator
        -garena free fire lite download 6th anniversary edition
        -garena free fire lite download with obb data file

        -
          -
        1. Go to App Store and search for "Garena Free Fire Lite". Alternatively, you can use this link:
        2. -
        3. Tap on "Get" and enter your Apple ID password if prompted.
        4. -
        5. Wait for the download to finish and tap on the app icon to launch the game.
        6. -
        7. Allow the game to access your device's microphone and location.
        8. -
        9. Create or log in to your Garena account or use Facebook or Google to sign in.
        10. -
        11. Enjoy playing Garena Free Fire Lite!
        12. -
        -

        For PC via BlueStacks

        -

        To download and install Garena Free Fire Lite on your PC, you need to use an Android emulator such as BlueStacks. Follow these steps:

        -
          -
        1. Download and install BlueStacks on your PC from this link:
        2. -
        3. Launch BlueStacks and sign in with your Google account.
        4. -
        5. Go to Google Play Store and search for "Garena Free Fire Lite". Alternatively, you can use this link:
        6. -
        7. Install the game and open it from the home screen.
        8. -
        9. Create or log in to your Garena account or use Facebook or Google to sign in.
        10. -
        11. Enjoy playing Garena Free Fire Lite on your PC!
        12. -
        -

        How to play Garena Free Fire Lite?

        -

        Game modes and maps

        -

        Garena Free Fire Lite offers the same game modes and maps as the original game. You can choose from the following modes:

        -
          -
        • Classic - The standard battle royale mode where 50 players compete for survival on one of the four maps: Bermuda, Purgatory, Kalahari, or Bermuda Remastered.
        • -
        • Clash Squad - A team-based mode where two squads of four players each fight in a series of rounds on a small map. The squad with the most wins at the end of the match wins.
        • -
        • Rampage - A special mode that features a new map called Rampage Island and a new gameplay mechanic called Beast Mode. Players can transform into powerful beasts with enhanced abilities by collecting energy crystals.
        • -
        • Other modes - There are also other limited-time modes that are available during certain events, such as Zombie Invasion, Big Head, and Bomb Squad.
        • -
        -

        Characters and skills

        -

        Garena Free Fire Lite has the same characters and skills as the original game. You can choose from over 40 characters, each with their own unique skills and personalities. Some of the most popular characters are:

        -
          -
        • Alok - A famous DJ who can create a sound wave that heals himself and his allies within a radius.
        • -
        • K - A professor and jiu-jitsu expert who can switch between two modes: Jiujitsu Mode, which increases his EP recovery rate, and Psychology Mode, which restores EP for himself and his allies.
        • -
        • Kelly - A sprinter who can run faster than other characters.
        • -
        • Moco - A hacker who can tag enemies that she shoots, revealing their location to her team for a few seconds.
        • -
        -

        You can also customize your character's appearance with various outfits, accessories, and skins.

        -

        Weapons and items

        -

        Garena Free Fire Lite has the same weapons and items as the original game. You can find and use various types of weapons, such as assault rifles, sniper rifles, shotguns, SMGs, pistols, melee weapons, and grenades. Some of the most popular weapons are:

        -
          -
        • M1887 - A powerful shotgun that can deal massive damage at close range.
        • -
        • AUG - An accurate assault rifle that has low recoil and high fire rate.
        • -
        • AWM - A deadly sniper rifle that can kill enemies with one shot if aimed at the head.
        • -
        • M79 - A grenade launcher that can blast enemies with explosive projectiles.
        • -
        -

        You can also equip your weapons with attachments, such as scopes, silencers, magazines, and muzzles, to improve their performance. You can also use items such as medkits, armor vests, helmets, backpacks, and gloo walls to aid your survival.

        -

        Tips and tricks to win in Garena Free Fire Lite

        -

        Choose your landing spot wisely

        -

        The first thing you need to do when you enter a match is to choose where to land on the map. You can use the mini-map to see the flight path of the plane and the safe zone. You should choose a landing spot that suits your play style and strategy. For example:

        -
          -
        • If you want to avoid early fights and loot peacefully, you should land in remote areas that are far from the plane's path and the safe zone.
        • -
        • If you want to get into action quickly and loot high-quality items, you should land in hotspots that are near the plane's path and the safe zone. However, be prepared to face many enemies there.
        • -
        • If you want to have a balanced approach, you should land in medium-risk areas that are not too crowded but not too isolated either. You can find decent loot there and have some fights without being overwhelmed.
        • -
        -

        Whatever landing spot you choose, make sure you have a backup plan in case things go wrong. You should also keep an eye on the map and the timer to know when to move to the next safe zone.

        -

        Loot fast and move smart

        -

        After landing, you need to loot as fast as possible and equip yourself with weapons, items, and armor. You should prioritize finding a primary weapon, such as an assault rifle or a shotgun, and a secondary weapon, such as a pistol or a melee weapon. You should also look for medkits, armor vests, helmets, backpacks, and gloo walls. These items will help you survive longer and fight better.

        -

        However, you should not spend too much time looting and camping in one place. You should always be on the move and look for better loot and positions. You should also avoid unnecessary fights and only engage when you have an advantage or a clear opportunity. You should also use cover, such as buildings, trees, rocks, and vehicles, to protect yourself from enemy fire and ambushes.

        -

        Use the gloo wall and other utilities

        -

        One of the most useful items in Garena Free Fire Lite is the gloo wall. It is a throwable item that creates a temporary wall that can block bullets and explosions. You can use it to create cover, block doors and windows, trap enemies, or escape from danger. You can also shoot through the gloo wall if you have a scope attached to your weapon.

        -

        Another useful item is the grenade. It is an explosive device that can deal damage and knock back enemies within a radius. You can use it to flush out enemies from hiding places, break their gloo walls, or finish them off when they are low on health. You can also use other types of grenades, such as flashbangs, smoke grenades, and tear gas grenades, to blind, confuse, or slow down your enemies.

        -

        You should also use other utilities, such as the scanner, the air strike, the supply drop, and the vehicle. The scanner is a device that can reveal the location of nearby enemies for a few seconds. The air strike is a call that can summon a bombardment on a selected area. The supply drop is a crate that contains high-quality loot that drops from the sky. The vehicle is a mode of transportation that can help you move faster and run over enemies.

        -

        Communicate and cooperate with your team

        -

        If you are playing in a squad mode, you need to communicate and cooperate with your team members. You can use the voice chat or the quick chat to talk to your teammates and share information, such as enemy locations, loot locations, strategies, and requests. You can also use the ping system to mark places or items on the map for your teammates to see.

        -

        You should also cooperate with your team by sticking together, supporting each other, reviving each other, sharing loot, and executing tactics. You should also assign roles to your team members based on their skills and preferences. For example:

        -
          -
        • The leader - The one who makes decisions and gives orders to the team.
        • -
        • The scout - The one who scouts ahead and gathers information about the enemies and the environment.
        • -
        • The sniper - The one who provides long-range support and takes out enemies from afar.
        • -
        • The rusher - The one who rushes into close combat and initiates fights.
        • -
        -

        By communicating and cooperating with your team, you can increase your chances of winning and have more fun playing Garena Free Fire Lite.

        -

        Conclusion

        -

        Garena Free Fire Lite is a lighter version of the original game that can run on low-end devices without compromising the core gameplay experience. It offers the same thrilling survival shooter gameplay with 50 players competing for survival on various maps and modes. It also has the same features as the original game with over 40 characters, various weapons, items, and events. You can download and install the game on your Android, iOS, or PC device easily and quickly. You can also play the game smoothly and fast without any lag or crash. You can also use some tips and tricks to improve your skills and win more matches. Garena Free Fire Lite is a great game for anyone who loves battle royale games but has a low-end device. Download it now and join the fun!

        -

        FAQs

        -

        What is the difference between Garena Free Fire and Garena Free Fire Lite?

        -

        The main difference between the two games is the graphics quality, file size, and system requirements. Garena Free Fire Lite has lower graphics quality, smaller file size, and lower system requirements than the original game. However, both games have the same gameplay features and content.

        -

        Can I play Garena Free Fire Lite with my friends who play Garena Free Fire?

        -

        Yes, you can play Garena Free Fire Lite with your friends who play Garena Free Fire. Both games share the same server and account system, so you can invite and join your friends from either game.

        -

        How can I get diamonds in Garena Free Fire Lite?

        -

        Diamonds are the premium currency in Garena Free Fire Lite that can be used to buy various items, such as characters, outfits, skins, and crates. You can get diamonds by purchasing them with real money or by completing certain tasks and events in the game.

        -

        How can I update Garena Free Fire Lite?

        -

        To update Garena Free Fire Lite, you need to go to the app store where you downloaded the game and check for any available updates. You can also enable the auto-update option in your device settings to update the game automatically whenever there is a new version.

        -

        How can I contact the customer service of Garena Free Fire Lite?

        -

        If you have any questions, problems, or feedback about Garena Free Fire Lite, you can contact the customer service of the game by following these steps:

        -
          -
        1. Open the game and tap on the "Settings" icon on the top right corner of the screen.
        2. -
        3. Tap on the "Customer Service" option on the bottom left corner of the screen.
        4. -
        5. Fill in your details and your message and tap on "Submit".
        6. -
        7. Wait for a reply from the customer service team.
        8. -

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/Cheat-hack - Mp-hacks Esp V5.0 For Counter Strike 1.6 BEST.md b/spaces/contluForse/HuggingGPT/assets/Cheat-hack - Mp-hacks Esp V5.0 For Counter Strike 1.6 BEST.md deleted file mode 100644 index 8821a6cd72e72d576e83a1cd71e583ec2314df59..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Cheat-hack - Mp-hacks Esp V5.0 For Counter Strike 1.6 BEST.md +++ /dev/null @@ -1,14 +0,0 @@ -

        Cheat-hack - mp-hacks esp v5.0 for counter strike 1.6


        Download Ziphttps://ssurll.com/2uzyOG



        - -Counter Strike 1.6 cheat: sXe 12 WallHack + Aimbot + Inexinferis, ... Press F12 for a quick hack ... MP-Hacks ESP V5.0 cheat For Counter Strike 1.6 ... Counter-Strike cheat 1.6... -Download cheats for Counter-Strike 1.6 ... -You can download cheats for Counter-Strike 1.6 for free without SMS and without registration. ... -Cheats for CS 1.6 (Download Cheats for Counter-Strike 1.6.) -In this section you can find and download cheats for CS 1.6 ... -Download cheats for CS 1.6 - Counter-Strike 1.6 ... -Download cheat for CS 1.6 ... -Download cheats for CS 1.6 - Counter-Strike 1.6 -Download cheats for CS 1.6 » Download cheats for CS 1.6 and CS:GO, download cheats for CS 1.6 for free and ... 8a78ff9644
        -
        -
        -

        diff --git a/spaces/contluForse/HuggingGPT/assets/Ei Jig Standard 1530 Pdf 11 NEW!.md b/spaces/contluForse/HuggingGPT/assets/Ei Jig Standard 1530 Pdf 11 NEW!.md deleted file mode 100644 index b07c079d715a36558bb3a80bb2bdb292b39f1ab8..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Ei Jig Standard 1530 Pdf 11 NEW!.md +++ /dev/null @@ -1,8 +0,0 @@ -

        ei jig standard 1530 pdf 11


        Download Filehttps://ssurll.com/2uzyaP



        - -Security requirements Quality Assurance for the Production, Storage and Distribution of Aviation Fuel at Airports EI/JIG STANDARD 1530 This standard contains quality assurance requirements for the production, storage and distribution of aviation fuel at airports. -This International Standard is applicable to aviation fuels obtained in storage or blending from different suppliers and may be applied to aviation fuels obtained in storage, blending and/or blending in a tanker. -The requirements of this standard do not apply to: 8a78ff9644
        -
        -
        -

        diff --git a/spaces/cooelf/Multimodal-CoT/timm/models/resnet.py b/spaces/cooelf/Multimodal-CoT/timm/models/resnet.py deleted file mode 100644 index 66baa37a90dbb2f2cdb510bc0b988cd25bd5887a..0000000000000000000000000000000000000000 --- a/spaces/cooelf/Multimodal-CoT/timm/models/resnet.py +++ /dev/null @@ -1,1455 +0,0 @@ -"""PyTorch ResNet - -This started as a copy of https://github.com/pytorch/vision 'resnet.py' (BSD-3-Clause) with -additional dropout and dynamic global avg/max pool. - -ResNeXt, SE-ResNeXt, SENet, and MXNet Gluon stem/downsample variants, tiered stems added by Ross Wightman -Copyright 2020 Ross Wightman -""" -import math -from functools import partial - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD -from .helpers import build_model_with_cfg -from .layers import DropBlock2d, DropPath, AvgPool2dSame, BlurPool2d, create_attn, get_attn, create_classifier -from .registry import register_model - -__all__ = ['ResNet', 'BasicBlock', 'Bottleneck'] # model_registry will add each entrypoint fn to this - - -def _cfg(url='', **kwargs): - return { - 'url': url, - 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), - 'crop_pct': 0.875, 'interpolation': 'bilinear', - 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, - 'first_conv': 'conv1', 'classifier': 'fc', - **kwargs - } - - -default_cfgs = { - # ResNet and Wide ResNet - 'resnet18': _cfg(url='https://download.pytorch.org/models/resnet18-5c106cde.pth'), - 'resnet18d': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet18d_ra2-48a79e06.pth', - interpolation='bicubic', first_conv='conv1.0'), - 'resnet34': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34-43635321.pth'), - 'resnet34d': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34d_ra2-f8dcfcaf.pth', - interpolation='bicubic', first_conv='conv1.0'), - 'resnet26': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26-9aa10e23.pth', - interpolation='bicubic'), - 'resnet26d': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26d-69e92c46.pth', - interpolation='bicubic', first_conv='conv1.0'), - 'resnet26t': _cfg( - url='', - interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8)), - 'resnet50': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet50_ram-a26f946b.pth', - interpolation='bicubic'), - 'resnet50d': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet50d_ra2-464e36ba.pth', - interpolation='bicubic', first_conv='conv1.0'), - 'resnet50t': _cfg( - url='', - interpolation='bicubic', first_conv='conv1.0'), - 'resnet101': _cfg(url='', interpolation='bicubic'), - 'resnet101d': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet101d_ra2-2803ffab.pth', - interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), - crop_pct=1.0, test_input_size=(3, 320, 320)), - 'resnet152': _cfg(url='', interpolation='bicubic'), - 'resnet152d': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet152d_ra2-5cac0439.pth', - interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), - crop_pct=1.0, test_input_size=(3, 320, 320)), - 'resnet200': _cfg(url='', interpolation='bicubic'), - 'resnet200d': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet200d_ra2-bdba9bf9.pth', - interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), - crop_pct=1.0, test_input_size=(3, 320, 320)), - 'tv_resnet34': _cfg(url='https://download.pytorch.org/models/resnet34-333f7ec4.pth'), - 'tv_resnet50': _cfg(url='https://download.pytorch.org/models/resnet50-19c8e357.pth'), - 'tv_resnet101': _cfg(url='https://download.pytorch.org/models/resnet101-5d3b4d8f.pth'), - 'tv_resnet152': _cfg(url='https://download.pytorch.org/models/resnet152-b121ed2d.pth'), - 'wide_resnet50_2': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/wide_resnet50_racm-8234f177.pth', - interpolation='bicubic'), - 'wide_resnet101_2': _cfg(url='https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth'), - - # ResNeXt - 'resnext50_32x4d': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnext50_32x4d_ra-d733960d.pth', - interpolation='bicubic'), - 'resnext50d_32x4d': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnext50d_32x4d-103e99f8.pth', - interpolation='bicubic', - first_conv='conv1.0'), - 'resnext101_32x4d': _cfg(url=''), - 'resnext101_32x8d': _cfg(url='https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth'), - 'resnext101_64x4d': _cfg(url=''), - 'tv_resnext50_32x4d': _cfg(url='https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth'), - - # ResNeXt models - Weakly Supervised Pretraining on Instagram Hashtags - # from https://github.com/facebookresearch/WSL-Images - # Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. - 'ig_resnext101_32x8d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x8-c38310e5.pth'), - 'ig_resnext101_32x16d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x16-c6f796b0.pth'), - 'ig_resnext101_32x32d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x32-e4b90b00.pth'), - 'ig_resnext101_32x48d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x48-3e41cc8a.pth'), - - # Semi-Supervised ResNe*t models from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models - # Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. - 'ssl_resnet18': _cfg( - url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet18-d92f0530.pth'), - 'ssl_resnet50': _cfg( - url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet50-08389792.pth'), - 'ssl_resnext50_32x4d': _cfg( - url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext50_32x4-ddb3e555.pth'), - 'ssl_resnext101_32x4d': _cfg( - url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x4-dc43570a.pth'), - 'ssl_resnext101_32x8d': _cfg( - url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x8-2cfe2f8b.pth'), - 'ssl_resnext101_32x16d': _cfg( - url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x16-15fffa57.pth'), - - # Semi-Weakly Supervised ResNe*t models from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models - # Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. - 'swsl_resnet18': _cfg( - url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet18-118f1556.pth'), - 'swsl_resnet50': _cfg( - url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet50-16a12f1b.pth'), - 'swsl_resnext50_32x4d': _cfg( - url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext50_32x4-72679e44.pth'), - 'swsl_resnext101_32x4d': _cfg( - url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x4-3f87e46b.pth'), - 'swsl_resnext101_32x8d': _cfg( - url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x8-b4712904.pth'), - 'swsl_resnext101_32x16d': _cfg( - url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x16-f3559a9c.pth'), - - # Squeeze-Excitation ResNets, to eventually replace the models in senet.py - 'seresnet18': _cfg( - url='', - interpolation='bicubic'), - 'seresnet34': _cfg( - url='', - interpolation='bicubic'), - 'seresnet50': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet50_ra_224-8efdb4bb.pth', - interpolation='bicubic'), - 'seresnet50t': _cfg( - url='', - interpolation='bicubic', - first_conv='conv1.0'), - 'seresnet101': _cfg( - url='', - interpolation='bicubic'), - 'seresnet152': _cfg( - url='', - interpolation='bicubic'), - 'seresnet152d': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet152d_ra2-04464dd2.pth', - interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), - crop_pct=1.0, test_input_size=(3, 320, 320) - ), - 'seresnet200d': _cfg( - url='', - interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.94, pool_size=(8, 8)), - 'seresnet269d': _cfg( - url='', - interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.94, pool_size=(8, 8)), - - - # Squeeze-Excitation ResNeXts, to eventually replace the models in senet.py - 'seresnext26d_32x4d': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26d_32x4d-80fa48a3.pth', - interpolation='bicubic', - first_conv='conv1.0'), - 'seresnext26t_32x4d': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26tn_32x4d-569cb627.pth', - interpolation='bicubic', - first_conv='conv1.0'), - 'seresnext50_32x4d': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext50_32x4d_racm-a304a460.pth', - interpolation='bicubic'), - 'seresnext101_32x4d': _cfg( - url='', - interpolation='bicubic'), - 'seresnext101_32x8d': _cfg( - url='', - interpolation='bicubic'), - 'senet154': _cfg( - url='', - interpolation='bicubic', - first_conv='conv1.0'), - - # Efficient Channel Attention ResNets - 'ecaresnet26t': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet26t_ra2-46609757.pth', - interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), - crop_pct=0.95, test_input_size=(3, 320, 320)), - 'ecaresnetlight': _cfg( - url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNetLight_4f34b35b.pth', - interpolation='bicubic'), - 'ecaresnet50d': _cfg( - url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNet50D_833caf58.pth', - interpolation='bicubic', - first_conv='conv1.0'), - 'ecaresnet50d_pruned': _cfg( - url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45899/outputs/ECAResNet50D_P_9c67f710.pth', - interpolation='bicubic', - first_conv='conv1.0'), - 'ecaresnet50t': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet50t_ra2-f7ac63c4.pth', - interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), - crop_pct=0.95, test_input_size=(3, 320, 320)), - 'ecaresnet101d': _cfg( - url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNet101D_281c5844.pth', - interpolation='bicubic', first_conv='conv1.0'), - 'ecaresnet101d_pruned': _cfg( - url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45610/outputs/ECAResNet101D_P_75a3370e.pth', - interpolation='bicubic', - first_conv='conv1.0'), - 'ecaresnet200d': _cfg( - url='', - interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.94, pool_size=(8, 8)), - 'ecaresnet269d': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet269d_320_ra2-7baa55cb.pth', - interpolation='bicubic', first_conv='conv1.0', input_size=(3, 320, 320), pool_size=(10, 10), - crop_pct=1.0, test_input_size=(3, 352, 352)), - - # Efficient Channel Attention ResNeXts - 'ecaresnext26t_32x4d': _cfg( - url='', - interpolation='bicubic', first_conv='conv1.0'), - 'ecaresnext50t_32x4d': _cfg( - url='', - interpolation='bicubic', first_conv='conv1.0'), - - # ResNets with anti-aliasing blur pool - 'resnetblur18': _cfg( - interpolation='bicubic'), - 'resnetblur50': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnetblur50-84f4748f.pth', - interpolation='bicubic'), - - # ResNet-RS models - 'resnetrs50': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs50_ema-6b53758b.pth', - input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.91, test_input_size=(3, 224, 224), - interpolation='bicubic', first_conv='conv1.0'), - 'resnetrs101': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs101_i192_ema-1509bbf6.pth', - input_size=(3, 192, 192), pool_size=(6, 6), crop_pct=0.94, test_input_size=(3, 288, 288), - interpolation='bicubic', first_conv='conv1.0'), - 'resnetrs152': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs152_i256_ema-a9aff7f9.pth', - input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320), - interpolation='bicubic', first_conv='conv1.0'), - 'resnetrs200': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs200_ema-623d2f59.pth', - input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320), - interpolation='bicubic', first_conv='conv1.0'), - 'resnetrs270': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs270_ema-b40e674c.pth', - input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 352, 352), - interpolation='bicubic', first_conv='conv1.0'), - 'resnetrs350': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs350_i256_ema-5a1aa8f1.pth', - input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0, test_input_size=(3, 384, 384), - interpolation='bicubic', first_conv='conv1.0'), - 'resnetrs420': _cfg( - url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs420_ema-972dee69.pth', - input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, test_input_size=(3, 416, 416), - interpolation='bicubic', first_conv='conv1.0'), -} - - -def get_padding(kernel_size, stride, dilation=1): - padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 - return padding - - -class BasicBlock(nn.Module): - expansion = 1 - - def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, - reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, - attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): - super(BasicBlock, self).__init__() - - assert cardinality == 1, 'BasicBlock only supports cardinality of 1' - assert base_width == 64, 'BasicBlock does not support changing base width' - first_planes = planes // reduce_first - outplanes = planes * self.expansion - first_dilation = first_dilation or dilation - use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation) - - self.conv1 = nn.Conv2d( - inplanes, first_planes, kernel_size=3, stride=1 if use_aa else stride, padding=first_dilation, - dilation=first_dilation, bias=False) - self.bn1 = norm_layer(first_planes) - self.act1 = act_layer(inplace=True) - self.aa = aa_layer(channels=first_planes, stride=stride) if use_aa else None - - self.conv2 = nn.Conv2d( - first_planes, outplanes, kernel_size=3, padding=dilation, dilation=dilation, bias=False) - self.bn2 = norm_layer(outplanes) - - self.se = create_attn(attn_layer, outplanes) - - self.act2 = act_layer(inplace=True) - self.downsample = downsample - self.stride = stride - self.dilation = dilation - self.drop_block = drop_block - self.drop_path = drop_path - - def zero_init_last_bn(self): - nn.init.zeros_(self.bn2.weight) - - def forward(self, x): - shortcut = x - - x = self.conv1(x) - x = self.bn1(x) - if self.drop_block is not None: - x = self.drop_block(x) - x = self.act1(x) - if self.aa is not None: - x = self.aa(x) - - x = self.conv2(x) - x = self.bn2(x) - if self.drop_block is not None: - x = self.drop_block(x) - - if self.se is not None: - x = self.se(x) - - if self.drop_path is not None: - x = self.drop_path(x) - - if self.downsample is not None: - shortcut = self.downsample(shortcut) - x += shortcut - x = self.act2(x) - - return x - - -class Bottleneck(nn.Module): - expansion = 4 - - def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, - reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, - attn_layer=None, aa_layer=None, drop_block=None, drop_path=None): - super(Bottleneck, self).__init__() - - width = int(math.floor(planes * (base_width / 64)) * cardinality) - first_planes = width // reduce_first - outplanes = planes * self.expansion - first_dilation = first_dilation or dilation - use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation) - - self.conv1 = nn.Conv2d(inplanes, first_planes, kernel_size=1, bias=False) - self.bn1 = norm_layer(first_planes) - self.act1 = act_layer(inplace=True) - - self.conv2 = nn.Conv2d( - first_planes, width, kernel_size=3, stride=1 if use_aa else stride, - padding=first_dilation, dilation=first_dilation, groups=cardinality, bias=False) - self.bn2 = norm_layer(width) - self.act2 = act_layer(inplace=True) - self.aa = aa_layer(channels=width, stride=stride) if use_aa else None - - self.conv3 = nn.Conv2d(width, outplanes, kernel_size=1, bias=False) - self.bn3 = norm_layer(outplanes) - - self.se = create_attn(attn_layer, outplanes) - - self.act3 = act_layer(inplace=True) - self.downsample = downsample - self.stride = stride - self.dilation = dilation - self.drop_block = drop_block - self.drop_path = drop_path - - def zero_init_last_bn(self): - nn.init.zeros_(self.bn3.weight) - - def forward(self, x): - shortcut = x - - x = self.conv1(x) - x = self.bn1(x) - if self.drop_block is not None: - x = self.drop_block(x) - x = self.act1(x) - - x = self.conv2(x) - x = self.bn2(x) - if self.drop_block is not None: - x = self.drop_block(x) - x = self.act2(x) - if self.aa is not None: - x = self.aa(x) - - x = self.conv3(x) - x = self.bn3(x) - if self.drop_block is not None: - x = self.drop_block(x) - - if self.se is not None: - x = self.se(x) - - if self.drop_path is not None: - x = self.drop_path(x) - - if self.downsample is not None: - shortcut = self.downsample(shortcut) - x += shortcut - x = self.act3(x) - - return x - - -def downsample_conv( - in_channels, out_channels, kernel_size, stride=1, dilation=1, first_dilation=None, norm_layer=None): - norm_layer = norm_layer or nn.BatchNorm2d - kernel_size = 1 if stride == 1 and dilation == 1 else kernel_size - first_dilation = (first_dilation or dilation) if kernel_size > 1 else 1 - p = get_padding(kernel_size, stride, first_dilation) - - return nn.Sequential(*[ - nn.Conv2d( - in_channels, out_channels, kernel_size, stride=stride, padding=p, dilation=first_dilation, bias=False), - norm_layer(out_channels) - ]) - - -def downsample_avg( - in_channels, out_channels, kernel_size, stride=1, dilation=1, first_dilation=None, norm_layer=None): - norm_layer = norm_layer or nn.BatchNorm2d - avg_stride = stride if dilation == 1 else 1 - if stride == 1 and dilation == 1: - pool = nn.Identity() - else: - avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d - pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) - - return nn.Sequential(*[ - pool, - nn.Conv2d(in_channels, out_channels, 1, stride=1, padding=0, bias=False), - norm_layer(out_channels) - ]) - - -def drop_blocks(drop_block_rate=0.): - return [ - None, None, - DropBlock2d(drop_block_rate, 5, 0.25) if drop_block_rate else None, - DropBlock2d(drop_block_rate, 3, 1.00) if drop_block_rate else None] - - -def make_blocks( - block_fn, channels, block_repeats, inplanes, reduce_first=1, output_stride=32, - down_kernel_size=1, avg_down=False, drop_block_rate=0., drop_path_rate=0., **kwargs): - stages = [] - feature_info = [] - net_num_blocks = sum(block_repeats) - net_block_idx = 0 - net_stride = 4 - dilation = prev_dilation = 1 - for stage_idx, (planes, num_blocks, db) in enumerate(zip(channels, block_repeats, drop_blocks(drop_block_rate))): - stage_name = f'layer{stage_idx + 1}' # never liked this name, but weight compat requires it - stride = 1 if stage_idx == 0 else 2 - if net_stride >= output_stride: - dilation *= stride - stride = 1 - else: - net_stride *= stride - - downsample = None - if stride != 1 or inplanes != planes * block_fn.expansion: - down_kwargs = dict( - in_channels=inplanes, out_channels=planes * block_fn.expansion, kernel_size=down_kernel_size, - stride=stride, dilation=dilation, first_dilation=prev_dilation, norm_layer=kwargs.get('norm_layer')) - downsample = downsample_avg(**down_kwargs) if avg_down else downsample_conv(**down_kwargs) - - block_kwargs = dict(reduce_first=reduce_first, dilation=dilation, drop_block=db, **kwargs) - blocks = [] - for block_idx in range(num_blocks): - downsample = downsample if block_idx == 0 else None - stride = stride if block_idx == 0 else 1 - block_dpr = drop_path_rate * net_block_idx / (net_num_blocks - 1) # stochastic depth linear decay rule - blocks.append(block_fn( - inplanes, planes, stride, downsample, first_dilation=prev_dilation, - drop_path=DropPath(block_dpr) if block_dpr > 0. else None, **block_kwargs)) - prev_dilation = dilation - inplanes = planes * block_fn.expansion - net_block_idx += 1 - - stages.append((stage_name, nn.Sequential(*blocks))) - feature_info.append(dict(num_chs=inplanes, reduction=net_stride, module=stage_name)) - - return stages, feature_info - - -class ResNet(nn.Module): - """ResNet / ResNeXt / SE-ResNeXt / SE-Net - - This class implements all variants of ResNet, ResNeXt, SE-ResNeXt, and SENet that - * have > 1 stride in the 3x3 conv layer of bottleneck - * have conv-bn-act ordering - - This ResNet impl supports a number of stem and downsample options based on the v1c, v1d, v1e, and v1s - variants included in the MXNet Gluon ResNetV1b model. The C and D variants are also discussed in the - 'Bag of Tricks' paper: https://arxiv.org/pdf/1812.01187. The B variant is equivalent to torchvision default. - - ResNet variants (the same modifications can be used in SE/ResNeXt models as well): - * normal, b - 7x7 stem, stem_width = 64, same as torchvision ResNet, NVIDIA ResNet 'v1.5', Gluon v1b - * c - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64) - * d - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64), average pool in downsample - * e - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128), average pool in downsample - * s - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128) - * t - 3 layer deep 3x3 stem, stem width = 32 (24, 48, 64), average pool in downsample - * tn - 3 layer deep 3x3 stem, stem width = 32 (24, 32, 64), average pool in downsample - - ResNeXt - * normal - 7x7 stem, stem_width = 64, standard cardinality and base widths - * same c,d, e, s variants as ResNet can be enabled - - SE-ResNeXt - * normal - 7x7 stem, stem_width = 64 - * same c, d, e, s variants as ResNet can be enabled - - SENet-154 - 3 layer deep 3x3 stem (same as v1c-v1s), stem_width = 64, cardinality=64, - reduction by 2 on width of first bottleneck convolution, 3x3 downsample convs after first block - - Parameters - ---------- - block : Block - Class for the residual block. Options are BasicBlockGl, BottleneckGl. - layers : list of int - Numbers of layers in each block - num_classes : int, default 1000 - Number of classification classes. - in_chans : int, default 3 - Number of input (color) channels. - cardinality : int, default 1 - Number of convolution groups for 3x3 conv in Bottleneck. - base_width : int, default 64 - Factor determining bottleneck channels. `planes * base_width / 64 * cardinality` - stem_width : int, default 64 - Number of channels in stem convolutions - stem_type : str, default '' - The type of stem: - * '', default - a single 7x7 conv with a width of stem_width - * 'deep' - three 3x3 convolution layers of widths stem_width, stem_width, stem_width * 2 - * 'deep_tiered' - three 3x3 conv layers of widths stem_width//4 * 3, stem_width, stem_width * 2 - block_reduce_first: int, default 1 - Reduction factor for first convolution output width of residual blocks, - 1 for all archs except senets, where 2 - down_kernel_size: int, default 1 - Kernel size of residual block downsampling path, 1x1 for most archs, 3x3 for senets - avg_down : bool, default False - Whether to use average pooling for projection skip connection between stages/downsample. - output_stride : int, default 32 - Set the output stride of the network, 32, 16, or 8. Typically used in segmentation. - act_layer : nn.Module, activation layer - norm_layer : nn.Module, normalization layer - aa_layer : nn.Module, anti-aliasing layer - drop_rate : float, default 0. - Dropout probability before classifier, for training - global_pool : str, default 'avg' - Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax' - """ - - def __init__(self, block, layers, num_classes=1000, in_chans=3, - cardinality=1, base_width=64, stem_width=64, stem_type='', replace_stem_pool=False, - output_stride=32, block_reduce_first=1, down_kernel_size=1, avg_down=False, - act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, drop_rate=0.0, drop_path_rate=0., - drop_block_rate=0., global_pool='avg', zero_init_last_bn=True, block_args=None): - block_args = block_args or dict() - assert output_stride in (8, 16, 32) - self.num_classes = num_classes - self.drop_rate = drop_rate - super(ResNet, self).__init__() - - # Stem - deep_stem = 'deep' in stem_type - inplanes = stem_width * 2 if deep_stem else 64 - if deep_stem: - stem_chs = (stem_width, stem_width) - if 'tiered' in stem_type: - stem_chs = (3 * (stem_width // 4), stem_width) - self.conv1 = nn.Sequential(*[ - nn.Conv2d(in_chans, stem_chs[0], 3, stride=2, padding=1, bias=False), - norm_layer(stem_chs[0]), - act_layer(inplace=True), - nn.Conv2d(stem_chs[0], stem_chs[1], 3, stride=1, padding=1, bias=False), - norm_layer(stem_chs[1]), - act_layer(inplace=True), - nn.Conv2d(stem_chs[1], inplanes, 3, stride=1, padding=1, bias=False)]) - else: - self.conv1 = nn.Conv2d(in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False) - self.bn1 = norm_layer(inplanes) - self.act1 = act_layer(inplace=True) - self.feature_info = [dict(num_chs=inplanes, reduction=2, module='act1')] - - # Stem Pooling - if replace_stem_pool: - self.maxpool = nn.Sequential(*filter(None, [ - nn.Conv2d(inplanes, inplanes, 3, stride=1 if aa_layer else 2, padding=1, bias=False), - aa_layer(channels=inplanes, stride=2) if aa_layer else None, - norm_layer(inplanes), - act_layer(inplace=True) - ])) - else: - if aa_layer is not None: - self.maxpool = nn.Sequential(*[ - nn.MaxPool2d(kernel_size=3, stride=1, padding=1), - aa_layer(channels=inplanes, stride=2)]) - else: - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - - # Feature Blocks - channels = [64, 128, 256, 512] - stage_modules, stage_feature_info = make_blocks( - block, channels, layers, inplanes, cardinality=cardinality, base_width=base_width, - output_stride=output_stride, reduce_first=block_reduce_first, avg_down=avg_down, - down_kernel_size=down_kernel_size, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, - drop_block_rate=drop_block_rate, drop_path_rate=drop_path_rate, **block_args) - for stage in stage_modules: - self.add_module(*stage) # layer1, layer2, etc - self.feature_info.extend(stage_feature_info) - - # Head (Pooling and Classifier) - self.num_features = 512 * block.expansion - self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) - - self.init_weights(zero_init_last_bn=zero_init_last_bn) - - def init_weights(self, zero_init_last_bn=True): - for n, m in self.named_modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, nn.BatchNorm2d): - nn.init.ones_(m.weight) - nn.init.zeros_(m.bias) - if zero_init_last_bn: - for m in self.modules(): - if hasattr(m, 'zero_init_last_bn'): - m.zero_init_last_bn() - - def get_classifier(self): - return self.fc - - def reset_classifier(self, num_classes, global_pool='avg'): - self.num_classes = num_classes - self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) - - def forward_features(self, x): - x = self.conv1(x) - x = self.bn1(x) - x = self.act1(x) - x = self.maxpool(x) - - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.layer4(x) - return x - - def forward(self, x): - x = self.forward_features(x) - x = self.global_pool(x) - if self.drop_rate: - x = F.dropout(x, p=float(self.drop_rate), training=self.training) - x = self.fc(x) - return x - - -def _create_resnet(variant, pretrained=False, **kwargs): - return build_model_with_cfg( - ResNet, variant, pretrained, - default_cfg=default_cfgs[variant], - **kwargs) - - -@register_model -def resnet18(pretrained=False, **kwargs): - """Constructs a ResNet-18 model. - """ - model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs) - return _create_resnet('resnet18', pretrained, **model_args) - - -@register_model -def resnet18d(pretrained=False, **kwargs): - """Constructs a ResNet-18-D model. - """ - model_args = dict( - block=BasicBlock, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep', avg_down=True, **kwargs) - return _create_resnet('resnet18d', pretrained, **model_args) - - -@register_model -def resnet34(pretrained=False, **kwargs): - """Constructs a ResNet-34 model. - """ - model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], **kwargs) - return _create_resnet('resnet34', pretrained, **model_args) - - -@register_model -def resnet34d(pretrained=False, **kwargs): - """Constructs a ResNet-34-D model. - """ - model_args = dict( - block=BasicBlock, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) - return _create_resnet('resnet34d', pretrained, **model_args) - - -@register_model -def resnet26(pretrained=False, **kwargs): - """Constructs a ResNet-26 model. - """ - model_args = dict(block=Bottleneck, layers=[2, 2, 2, 2], **kwargs) - return _create_resnet('resnet26', pretrained, **model_args) - - -@register_model -def resnet26t(pretrained=False, **kwargs): - """Constructs a ResNet-26-T model. - """ - model_args = dict( - block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep_tiered', avg_down=True, **kwargs) - return _create_resnet('resnet26t', pretrained, **model_args) - - -@register_model -def resnet26d(pretrained=False, **kwargs): - """Constructs a ResNet-26-D model. - """ - model_args = dict(block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep', avg_down=True, **kwargs) - return _create_resnet('resnet26d', pretrained, **model_args) - - -@register_model -def resnet50(pretrained=False, **kwargs): - """Constructs a ResNet-50 model. - """ - model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) - return _create_resnet('resnet50', pretrained, **model_args) - - -@register_model -def resnet50d(pretrained=False, **kwargs): - """Constructs a ResNet-50-D model. - """ - model_args = dict( - block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) - return _create_resnet('resnet50d', pretrained, **model_args) - - -@register_model -def resnet50t(pretrained=False, **kwargs): - """Constructs a ResNet-50-T model. - """ - model_args = dict( - block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep_tiered', avg_down=True, **kwargs) - return _create_resnet('resnet50t', pretrained, **model_args) - - -@register_model -def resnet101(pretrained=False, **kwargs): - """Constructs a ResNet-101 model. - """ - model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], **kwargs) - return _create_resnet('resnet101', pretrained, **model_args) - - -@register_model -def resnet101d(pretrained=False, **kwargs): - """Constructs a ResNet-101-D model. - """ - model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) - return _create_resnet('resnet101d', pretrained, **model_args) - - -@register_model -def resnet152(pretrained=False, **kwargs): - """Constructs a ResNet-152 model. - """ - model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], **kwargs) - return _create_resnet('resnet152', pretrained, **model_args) - - -@register_model -def resnet152d(pretrained=False, **kwargs): - """Constructs a ResNet-152-D model. - """ - model_args = dict( - block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) - return _create_resnet('resnet152d', pretrained, **model_args) - - -@register_model -def resnet200(pretrained=False, **kwargs): - """Constructs a ResNet-200 model. - """ - model_args = dict(block=Bottleneck, layers=[3, 24, 36, 3], **kwargs) - return _create_resnet('resnet200', pretrained, **model_args) - - -@register_model -def resnet200d(pretrained=False, **kwargs): - """Constructs a ResNet-200-D model. - """ - model_args = dict( - block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs) - return _create_resnet('resnet200d', pretrained, **model_args) - - -@register_model -def tv_resnet34(pretrained=False, **kwargs): - """Constructs a ResNet-34 model with original Torchvision weights. - """ - model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], **kwargs) - return _create_resnet('tv_resnet34', pretrained, **model_args) - - -@register_model -def tv_resnet50(pretrained=False, **kwargs): - """Constructs a ResNet-50 model with original Torchvision weights. - """ - model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) - return _create_resnet('tv_resnet50', pretrained, **model_args) - - -@register_model -def tv_resnet101(pretrained=False, **kwargs): - """Constructs a ResNet-101 model w/ Torchvision pretrained weights. - """ - model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], **kwargs) - return _create_resnet('tv_resnet101', pretrained, **model_args) - - -@register_model -def tv_resnet152(pretrained=False, **kwargs): - """Constructs a ResNet-152 model w/ Torchvision pretrained weights. - """ - model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], **kwargs) - return _create_resnet('tv_resnet152', pretrained, **model_args) - - -@register_model -def wide_resnet50_2(pretrained=False, **kwargs): - """Constructs a Wide ResNet-50-2 model. - The model is the same as ResNet except for the bottleneck number of channels - which is twice larger in every block. The number of channels in outer 1x1 - convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 - channels, and in Wide ResNet-50-2 has 2048-1024-2048. - """ - model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], base_width=128, **kwargs) - return _create_resnet('wide_resnet50_2', pretrained, **model_args) - - -@register_model -def wide_resnet101_2(pretrained=False, **kwargs): - """Constructs a Wide ResNet-101-2 model. - The model is the same as ResNet except for the bottleneck number of channels - which is twice larger in every block. The number of channels in outer 1x1 - convolutions is the same. - """ - model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], base_width=128, **kwargs) - return _create_resnet('wide_resnet101_2', pretrained, **model_args) - - -@register_model -def resnext50_32x4d(pretrained=False, **kwargs): - """Constructs a ResNeXt50-32x4d model. - """ - model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs) - return _create_resnet('resnext50_32x4d', pretrained, **model_args) - - -@register_model -def resnext50d_32x4d(pretrained=False, **kwargs): - """Constructs a ResNeXt50d-32x4d model. ResNext50 w/ deep stem & avg pool downsample - """ - model_args = dict( - block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, - stem_width=32, stem_type='deep', avg_down=True, **kwargs) - return _create_resnet('resnext50d_32x4d', pretrained, **model_args) - - -@register_model -def resnext101_32x4d(pretrained=False, **kwargs): - """Constructs a ResNeXt-101 32x4d model. - """ - model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs) - return _create_resnet('resnext101_32x4d', pretrained, **model_args) - - -@register_model -def resnext101_32x8d(pretrained=False, **kwargs): - """Constructs a ResNeXt-101 32x8d model. - """ - model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs) - return _create_resnet('resnext101_32x8d', pretrained, **model_args) - - -@register_model -def resnext101_64x4d(pretrained=False, **kwargs): - """Constructs a ResNeXt101-64x4d model. - """ - model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=64, base_width=4, **kwargs) - return _create_resnet('resnext101_64x4d', pretrained, **model_args) - - -@register_model -def tv_resnext50_32x4d(pretrained=False, **kwargs): - """Constructs a ResNeXt50-32x4d model with original Torchvision weights. - """ - model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs) - return _create_resnet('tv_resnext50_32x4d', pretrained, **model_args) - - -@register_model -def ig_resnext101_32x8d(pretrained=True, **kwargs): - """Constructs a ResNeXt-101 32x8 model pre-trained on weakly-supervised data - and finetuned on ImageNet from Figure 5 in - `"Exploring the Limits of Weakly Supervised Pretraining" `_ - Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/ - """ - model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs) - return _create_resnet('ig_resnext101_32x8d', pretrained, **model_args) - - -@register_model -def ig_resnext101_32x16d(pretrained=True, **kwargs): - """Constructs a ResNeXt-101 32x16 model pre-trained on weakly-supervised data - and finetuned on ImageNet from Figure 5 in - `"Exploring the Limits of Weakly Supervised Pretraining" `_ - Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/ - """ - model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16, **kwargs) - return _create_resnet('ig_resnext101_32x16d', pretrained, **model_args) - - -@register_model -def ig_resnext101_32x32d(pretrained=True, **kwargs): - """Constructs a ResNeXt-101 32x32 model pre-trained on weakly-supervised data - and finetuned on ImageNet from Figure 5 in - `"Exploring the Limits of Weakly Supervised Pretraining" `_ - Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/ - """ - model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=32, **kwargs) - return _create_resnet('ig_resnext101_32x32d', pretrained, **model_args) - - -@register_model -def ig_resnext101_32x48d(pretrained=True, **kwargs): - """Constructs a ResNeXt-101 32x48 model pre-trained on weakly-supervised data - and finetuned on ImageNet from Figure 5 in - `"Exploring the Limits of Weakly Supervised Pretraining" `_ - Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/ - """ - model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=48, **kwargs) - return _create_resnet('ig_resnext101_32x48d', pretrained, **model_args) - - -@register_model -def ssl_resnet18(pretrained=True, **kwargs): - """Constructs a semi-supervised ResNet-18 model pre-trained on YFCC100M dataset and finetuned on ImageNet - `"Billion-scale Semi-Supervised Learning for Image Classification" `_ - Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ - """ - model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs) - return _create_resnet('ssl_resnet18', pretrained, **model_args) - - -@register_model -def ssl_resnet50(pretrained=True, **kwargs): - """Constructs a semi-supervised ResNet-50 model pre-trained on YFCC100M dataset and finetuned on ImageNet - `"Billion-scale Semi-Supervised Learning for Image Classification" `_ - Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ - """ - model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) - return _create_resnet('ssl_resnet50', pretrained, **model_args) - - -@register_model -def ssl_resnext50_32x4d(pretrained=True, **kwargs): - """Constructs a semi-supervised ResNeXt-50 32x4 model pre-trained on YFCC100M dataset and finetuned on ImageNet - `"Billion-scale Semi-Supervised Learning for Image Classification" `_ - Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ - """ - model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs) - return _create_resnet('ssl_resnext50_32x4d', pretrained, **model_args) - - -@register_model -def ssl_resnext101_32x4d(pretrained=True, **kwargs): - """Constructs a semi-supervised ResNeXt-101 32x4 model pre-trained on YFCC100M dataset and finetuned on ImageNet - `"Billion-scale Semi-Supervised Learning for Image Classification" `_ - Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ - """ - model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs) - return _create_resnet('ssl_resnext101_32x4d', pretrained, **model_args) - - -@register_model -def ssl_resnext101_32x8d(pretrained=True, **kwargs): - """Constructs a semi-supervised ResNeXt-101 32x8 model pre-trained on YFCC100M dataset and finetuned on ImageNet - `"Billion-scale Semi-Supervised Learning for Image Classification" `_ - Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ - """ - model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs) - return _create_resnet('ssl_resnext101_32x8d', pretrained, **model_args) - - -@register_model -def ssl_resnext101_32x16d(pretrained=True, **kwargs): - """Constructs a semi-supervised ResNeXt-101 32x16 model pre-trained on YFCC100M dataset and finetuned on ImageNet - `"Billion-scale Semi-Supervised Learning for Image Classification" `_ - Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ - """ - model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16, **kwargs) - return _create_resnet('ssl_resnext101_32x16d', pretrained, **model_args) - - -@register_model -def swsl_resnet18(pretrained=True, **kwargs): - """Constructs a semi-weakly supervised Resnet-18 model pre-trained on 1B weakly supervised - image dataset and finetuned on ImageNet. - `"Billion-scale Semi-Supervised Learning for Image Classification" `_ - Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ - """ - model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs) - return _create_resnet('swsl_resnet18', pretrained, **model_args) - - -@register_model -def swsl_resnet50(pretrained=True, **kwargs): - """Constructs a semi-weakly supervised ResNet-50 model pre-trained on 1B weakly supervised - image dataset and finetuned on ImageNet. - `"Billion-scale Semi-Supervised Learning for Image Classification" `_ - Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ - """ - model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) - return _create_resnet('swsl_resnet50', pretrained, **model_args) - - -@register_model -def swsl_resnext50_32x4d(pretrained=True, **kwargs): - """Constructs a semi-weakly supervised ResNeXt-50 32x4 model pre-trained on 1B weakly supervised - image dataset and finetuned on ImageNet. - `"Billion-scale Semi-Supervised Learning for Image Classification" `_ - Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ - """ - model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs) - return _create_resnet('swsl_resnext50_32x4d', pretrained, **model_args) - - -@register_model -def swsl_resnext101_32x4d(pretrained=True, **kwargs): - """Constructs a semi-weakly supervised ResNeXt-101 32x4 model pre-trained on 1B weakly supervised - image dataset and finetuned on ImageNet. - `"Billion-scale Semi-Supervised Learning for Image Classification" `_ - Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ - """ - model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs) - return _create_resnet('swsl_resnext101_32x4d', pretrained, **model_args) - - -@register_model -def swsl_resnext101_32x8d(pretrained=True, **kwargs): - """Constructs a semi-weakly supervised ResNeXt-101 32x8 model pre-trained on 1B weakly supervised - image dataset and finetuned on ImageNet. - `"Billion-scale Semi-Supervised Learning for Image Classification" `_ - Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ - """ - model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs) - return _create_resnet('swsl_resnext101_32x8d', pretrained, **model_args) - - -@register_model -def swsl_resnext101_32x16d(pretrained=True, **kwargs): - """Constructs a semi-weakly supervised ResNeXt-101 32x16 model pre-trained on 1B weakly supervised - image dataset and finetuned on ImageNet. - `"Billion-scale Semi-Supervised Learning for Image Classification" `_ - Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/ - """ - model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16, **kwargs) - return _create_resnet('swsl_resnext101_32x16d', pretrained, **model_args) - - -@register_model -def ecaresnet26t(pretrained=False, **kwargs): - """Constructs an ECA-ResNeXt-26-T model. - This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels - in the deep stem and ECA attn. - """ - model_args = dict( - block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32, - stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs) - return _create_resnet('ecaresnet26t', pretrained, **model_args) - - -@register_model -def ecaresnet50d(pretrained=False, **kwargs): - """Constructs a ResNet-50-D model with eca. - """ - model_args = dict( - block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, - block_args=dict(attn_layer='eca'), **kwargs) - return _create_resnet('ecaresnet50d', pretrained, **model_args) - - -@register_model -def resnetrs50(pretrained=False, **kwargs): - """Constructs a ResNet-RS-50 model. - Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 - Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs - """ - attn_layer = partial(get_attn('se'), rd_ratio=0.25) - model_args = dict( - block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', replace_stem_pool=True, - avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) - return _create_resnet('resnetrs50', pretrained, **model_args) - - -@register_model -def resnetrs101(pretrained=False, **kwargs): - """Constructs a ResNet-RS-101 model. - Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 - Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs - """ - attn_layer = partial(get_attn('se'), rd_ratio=0.25) - model_args = dict( - block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', replace_stem_pool=True, - avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) - return _create_resnet('resnetrs101', pretrained, **model_args) - - -@register_model -def resnetrs152(pretrained=False, **kwargs): - """Constructs a ResNet-RS-152 model. - Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 - Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs - """ - attn_layer = partial(get_attn('se'), rd_ratio=0.25) - model_args = dict( - block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', replace_stem_pool=True, - avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) - return _create_resnet('resnetrs152', pretrained, **model_args) - - -@register_model -def resnetrs200(pretrained=False, **kwargs): - """Constructs a ResNet-RS-200 model. - Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 - Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs - """ - attn_layer = partial(get_attn('se'), rd_ratio=0.25) - model_args = dict( - block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', replace_stem_pool=True, - avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) - return _create_resnet('resnetrs200', pretrained, **model_args) - - -@register_model -def resnetrs270(pretrained=False, **kwargs): - """Constructs a ResNet-RS-270 model. - Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 - Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs - """ - attn_layer = partial(get_attn('se'), rd_ratio=0.25) - model_args = dict( - block=Bottleneck, layers=[4, 29, 53, 4], stem_width=32, stem_type='deep', replace_stem_pool=True, - avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) - return _create_resnet('resnetrs270', pretrained, **model_args) - - - -@register_model -def resnetrs350(pretrained=False, **kwargs): - """Constructs a ResNet-RS-350 model. - Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 - Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs - """ - attn_layer = partial(get_attn('se'), rd_ratio=0.25) - model_args = dict( - block=Bottleneck, layers=[4, 36, 72, 4], stem_width=32, stem_type='deep', replace_stem_pool=True, - avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) - return _create_resnet('resnetrs350', pretrained, **model_args) - - -@register_model -def resnetrs420(pretrained=False, **kwargs): - """Constructs a ResNet-RS-420 model - Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 - Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs - """ - attn_layer = partial(get_attn('se'), rd_ratio=0.25) - model_args = dict( - block=Bottleneck, layers=[4, 44, 87, 4], stem_width=32, stem_type='deep', replace_stem_pool=True, - avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs) - return _create_resnet('resnetrs420', pretrained, **model_args) - - -@register_model -def ecaresnet50d_pruned(pretrained=False, **kwargs): - """Constructs a ResNet-50-D model pruned with eca. - The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf - """ - model_args = dict( - block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, - block_args=dict(attn_layer='eca'), **kwargs) - return _create_resnet('ecaresnet50d_pruned', pretrained, pruned=True, **model_args) - - -@register_model -def ecaresnet50t(pretrained=False, **kwargs): - """Constructs an ECA-ResNet-50-T model. - Like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels in the deep stem and ECA attn. - """ - model_args = dict( - block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, - stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs) - return _create_resnet('ecaresnet50t', pretrained, **model_args) - - -@register_model -def ecaresnetlight(pretrained=False, **kwargs): - """Constructs a ResNet-50-D light model with eca. - """ - model_args = dict( - block=Bottleneck, layers=[1, 1, 11, 3], stem_width=32, avg_down=True, - block_args=dict(attn_layer='eca'), **kwargs) - return _create_resnet('ecaresnetlight', pretrained, **model_args) - - -@register_model -def ecaresnet101d(pretrained=False, **kwargs): - """Constructs a ResNet-101-D model with eca. - """ - model_args = dict( - block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True, - block_args=dict(attn_layer='eca'), **kwargs) - return _create_resnet('ecaresnet101d', pretrained, **model_args) - - -@register_model -def ecaresnet101d_pruned(pretrained=False, **kwargs): - """Constructs a ResNet-101-D model pruned with eca. - The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf - """ - model_args = dict( - block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True, - block_args=dict(attn_layer='eca'), **kwargs) - return _create_resnet('ecaresnet101d_pruned', pretrained, pruned=True, **model_args) - - -@register_model -def ecaresnet200d(pretrained=False, **kwargs): - """Constructs a ResNet-200-D model with ECA. - """ - model_args = dict( - block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True, - block_args=dict(attn_layer='eca'), **kwargs) - return _create_resnet('ecaresnet200d', pretrained, **model_args) - - -@register_model -def ecaresnet269d(pretrained=False, **kwargs): - """Constructs a ResNet-269-D model with ECA. - """ - model_args = dict( - block=Bottleneck, layers=[3, 30, 48, 8], stem_width=32, stem_type='deep', avg_down=True, - block_args=dict(attn_layer='eca'), **kwargs) - return _create_resnet('ecaresnet269d', pretrained, **model_args) - - -@register_model -def ecaresnext26t_32x4d(pretrained=False, **kwargs): - """Constructs an ECA-ResNeXt-26-T model. - This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels - in the deep stem. This model replaces SE module with the ECA module - """ - model_args = dict( - block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, - stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs) - return _create_resnet('ecaresnext26t_32x4d', pretrained, **model_args) - - -@register_model -def ecaresnext50t_32x4d(pretrained=False, **kwargs): - """Constructs an ECA-ResNeXt-50-T model. - This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels - in the deep stem. This model replaces SE module with the ECA module - """ - model_args = dict( - block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, - stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs) - return _create_resnet('ecaresnext50t_32x4d', pretrained, **model_args) - - -@register_model -def resnetblur18(pretrained=False, **kwargs): - """Constructs a ResNet-18 model with blur anti-aliasing - """ - model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], aa_layer=BlurPool2d, **kwargs) - return _create_resnet('resnetblur18', pretrained, **model_args) - - -@register_model -def resnetblur50(pretrained=False, **kwargs): - """Constructs a ResNet-50 model with blur anti-aliasing - """ - model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], aa_layer=BlurPool2d, **kwargs) - return _create_resnet('resnetblur50', pretrained, **model_args) - - -@register_model -def seresnet18(pretrained=False, **kwargs): - model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], block_args=dict(attn_layer='se'), **kwargs) - return _create_resnet('seresnet18', pretrained, **model_args) - - -@register_model -def seresnet34(pretrained=False, **kwargs): - model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], block_args=dict(attn_layer='se'), **kwargs) - return _create_resnet('seresnet34', pretrained, **model_args) - - -@register_model -def seresnet50(pretrained=False, **kwargs): - model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], block_args=dict(attn_layer='se'), **kwargs) - return _create_resnet('seresnet50', pretrained, **model_args) - - -@register_model -def seresnet50t(pretrained=False, **kwargs): - model_args = dict( - block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep_tiered', avg_down=True, - block_args=dict(attn_layer='se'), **kwargs) - return _create_resnet('seresnet50t', pretrained, **model_args) - - -@register_model -def seresnet101(pretrained=False, **kwargs): - model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], block_args=dict(attn_layer='se'), **kwargs) - return _create_resnet('seresnet101', pretrained, **model_args) - - -@register_model -def seresnet152(pretrained=False, **kwargs): - model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], block_args=dict(attn_layer='se'), **kwargs) - return _create_resnet('seresnet152', pretrained, **model_args) - - -@register_model -def seresnet152d(pretrained=False, **kwargs): - model_args = dict( - block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', avg_down=True, - block_args=dict(attn_layer='se'), **kwargs) - return _create_resnet('seresnet152d', pretrained, **model_args) - - -@register_model -def seresnet200d(pretrained=False, **kwargs): - """Constructs a ResNet-200-D model with SE attn. - """ - model_args = dict( - block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True, - block_args=dict(attn_layer='se'), **kwargs) - return _create_resnet('seresnet200d', pretrained, **model_args) - - -@register_model -def seresnet269d(pretrained=False, **kwargs): - """Constructs a ResNet-269-D model with SE attn. - """ - model_args = dict( - block=Bottleneck, layers=[3, 30, 48, 8], stem_width=32, stem_type='deep', avg_down=True, - block_args=dict(attn_layer='se'), **kwargs) - return _create_resnet('seresnet269d', pretrained, **model_args) - - -@register_model -def seresnext26d_32x4d(pretrained=False, **kwargs): - """Constructs a SE-ResNeXt-26-D model.` - This is technically a 28 layer ResNet, using the 'D' modifier from Gluon / bag-of-tricks for - combination of deep stem and avg_pool in downsample. - """ - model_args = dict( - block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, - stem_type='deep', avg_down=True, block_args=dict(attn_layer='se'), **kwargs) - return _create_resnet('seresnext26d_32x4d', pretrained, **model_args) - - -@register_model -def seresnext26t_32x4d(pretrained=False, **kwargs): - """Constructs a SE-ResNet-26-T model. - This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels - in the deep stem. - """ - model_args = dict( - block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, - stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='se'), **kwargs) - return _create_resnet('seresnext26t_32x4d', pretrained, **model_args) - - -@register_model -def seresnext26tn_32x4d(pretrained=False, **kwargs): - """Constructs a SE-ResNeXt-26-T model. - NOTE I deprecated previous 't' model defs and replaced 't' with 'tn', this was the only tn model of note - so keeping this def for backwards compat with any uses out there. Old 't' model is lost. - """ - return seresnext26t_32x4d(pretrained=pretrained, **kwargs) - - -@register_model -def seresnext50_32x4d(pretrained=False, **kwargs): - model_args = dict( - block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, - block_args=dict(attn_layer='se'), **kwargs) - return _create_resnet('seresnext50_32x4d', pretrained, **model_args) - - -@register_model -def seresnext101_32x4d(pretrained=False, **kwargs): - model_args = dict( - block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, - block_args=dict(attn_layer='se'), **kwargs) - return _create_resnet('seresnext101_32x4d', pretrained, **model_args) - - -@register_model -def seresnext101_32x8d(pretrained=False, **kwargs): - model_args = dict( - block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, - block_args=dict(attn_layer='se'), **kwargs) - return _create_resnet('seresnext101_32x8d', pretrained, **model_args) - - -@register_model -def senet154(pretrained=False, **kwargs): - model_args = dict( - block=Bottleneck, layers=[3, 8, 36, 3], cardinality=64, base_width=4, stem_type='deep', - down_kernel_size=3, block_reduce_first=2, block_args=dict(attn_layer='se'), **kwargs) - return _create_resnet('senet154', pretrained, **model_args) diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/normalbae/models/baseline.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/normalbae/models/baseline.py deleted file mode 100644 index 602d0fbdac1acc9ede9bc1f2e10a5df78831ce9d..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/normalbae/models/baseline.py +++ /dev/null @@ -1,85 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - -from .submodules.submodules import UpSampleBN, norm_normalize - - -# This is the baseline encoder-decoder we used in the ablation study -class NNET(nn.Module): - def __init__(self, args=None): - super(NNET, self).__init__() - self.encoder = Encoder() - self.decoder = Decoder(num_classes=4) - - def forward(self, x, **kwargs): - out = self.decoder(self.encoder(x), **kwargs) - - # Bilinearly upsample the output to match the input resolution - up_out = F.interpolate(out, size=[x.size(2), x.size(3)], mode='bilinear', align_corners=False) - - # L2-normalize the first three channels / ensure positive value for concentration parameters (kappa) - up_out = norm_normalize(up_out) - return up_out - - def get_1x_lr_params(self): # lr/10 learning rate - return self.encoder.parameters() - - def get_10x_lr_params(self): # lr learning rate - modules = [self.decoder] - for m in modules: - yield from m.parameters() - - -# Encoder -class Encoder(nn.Module): - def __init__(self): - super(Encoder, self).__init__() - - basemodel_name = 'tf_efficientnet_b5_ap' - basemodel = torch.hub.load('rwightman/gen-efficientnet-pytorch', basemodel_name, pretrained=True) - - # Remove last layer - basemodel.global_pool = nn.Identity() - basemodel.classifier = nn.Identity() - - self.original_model = basemodel - - def forward(self, x): - features = [x] - for k, v in self.original_model._modules.items(): - if (k == 'blocks'): - for ki, vi in v._modules.items(): - features.append(vi(features[-1])) - else: - features.append(v(features[-1])) - return features - - -# Decoder (no pixel-wise MLP, no uncertainty-guided sampling) -class Decoder(nn.Module): - def __init__(self, num_classes=4): - super(Decoder, self).__init__() - self.conv2 = nn.Conv2d(2048, 2048, kernel_size=1, stride=1, padding=0) - self.up1 = UpSampleBN(skip_input=2048 + 176, output_features=1024) - self.up2 = UpSampleBN(skip_input=1024 + 64, output_features=512) - self.up3 = UpSampleBN(skip_input=512 + 40, output_features=256) - self.up4 = UpSampleBN(skip_input=256 + 24, output_features=128) - self.conv3 = nn.Conv2d(128, num_classes, kernel_size=3, stride=1, padding=1) - - def forward(self, features): - x_block0, x_block1, x_block2, x_block3, x_block4 = features[4], features[5], features[6], features[8], features[11] - x_d0 = self.conv2(x_block4) - x_d1 = self.up1(x_d0, x_block3) - x_d2 = self.up2(x_d1, x_block2) - x_d3 = self.up3(x_d2, x_block1) - x_d4 = self.up4(x_d3, x_block0) - out = self.conv3(x_d4) - return out - - -if __name__ == '__main__': - model = Baseline() - x = torch.rand(2, 3, 480, 640) - out = model(x) - print(out.shape) diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/configs/_base_/models/danet_r50-d8.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/configs/_base_/models/danet_r50-d8.py deleted file mode 100644 index 2c934939fac48525f22ad86f489a041dd7db7d09..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/configs/_base_/models/danet_r50-d8.py +++ /dev/null @@ -1,44 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained='open-mmlab://resnet50_v1c', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 2, 4), - strides=(1, 2, 1, 1), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - decode_head=dict( - type='DAHead', - in_channels=2048, - in_index=3, - channels=512, - pam_channels=64, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=1024, - in_index=2, - channels=256, - num_convs=1, - concat_input=False, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(mode='whole')) diff --git a/spaces/crashedice/signify/signify/gan/models/template_model.py b/spaces/crashedice/signify/signify/gan/models/template_model.py deleted file mode 100644 index 6d5f15f0a194c8e2b482526873693e3c32d2a4a5..0000000000000000000000000000000000000000 --- a/spaces/crashedice/signify/signify/gan/models/template_model.py +++ /dev/null @@ -1,99 +0,0 @@ -"""Model class template - -This module provides a template for users to implement custom models. -You can specify '--model template' to use this model. -The class name should be consistent with both the filename and its model option. -The filename should be _dataset.py -The class name should be Dataset.py -It implements a simple image-to-image translation baseline based on regression loss. -Given input-output pairs (data_A, data_B), it learns a network netG that can minimize the following L1 loss: - min_ ||netG(data_A) - data_B||_1 -You need to implement the following functions: - : Add model-specific options and rewrite default values for existing options. - <__init__>: Initialize this model class. - : Unpack input data and perform data pre-processing. - : Run forward pass. This will be called by both and . - : Update network weights; it will be called in every training iteration. -""" -import torch -from signify.gan.models.base_model import BaseModel -from signify.gan.models import networks - - -class TemplateModel(BaseModel): - @staticmethod - def modify_commandline_options(parser, is_train=True): - """Add new model-specific options and rewrite default values for existing options. - - Parameters: - parser -- the option parser - is_train -- if it is training phase or test phase. You can use this flag to add training-specific or test-specific options. - - Returns: - the modified parser. - """ - parser.set_defaults(dataset_mode='aligned') # You can rewrite default values for this model. For example, this model usually uses aligned dataset as its dataset. - if is_train: - parser.add_argument('--lambda_regression', type=float, default=1.0, help='weight for the regression loss') # You can define new arguments for this model. - - return parser - - def __init__(self, opt): - """Initialize this model class. - - Parameters: - opt -- training/test options - - A few things can be done here. - - (required) call the initialization function of BaseModel - - define loss function, visualization images, model names, and optimizers - """ - BaseModel.__init__(self, opt) # call the initialization method of BaseModel - # specify the training losses you want to print out. The program will call base_model.get_current_losses to plot the losses to the console and save them to the disk. - self.loss_names = ['loss_G'] - # specify the images you want to save and display. The program will call base_model.get_current_visuals to save and display these images. - self.visual_names = ['data_A', 'data_B', 'output'] - # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks to save and load networks. - # you can use opt.isTrain to specify different behaviors for training and test. For example, some networks will not be used during test, and you don't need to load them. - self.model_names = ['G'] - # define networks; you can use opt.isTrain to specify different behaviors for training and test. - self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, gpu_ids=self.gpu_ids) - if self.isTrain: # only defined during training time - # define your loss functions. You can use losses provided by torch.nn such as torch.nn.L1Loss. - # We also provide a GANLoss class "networks.GANLoss". self.criterionGAN = networks.GANLoss().to(self.device) - self.criterionLoss = torch.nn.L1Loss() - # define and initialize optimizers. You can define one optimizer for each network. - # If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example. - self.optimizer = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999)) - self.optimizers = [self.optimizer] - - # Our program will automatically call to define schedulers, load networks, and print networks - - def set_input(self, input): - """Unpack input data from the dataloader and perform necessary pre-processing steps. - - Parameters: - input: a dictionary that contains the data itself and its metadata information. - """ - AtoB = self.opt.direction == 'AtoB' # use to swap data_A and data_B - self.data_A = input['A' if AtoB else 'B'].to(self.device) # get image data A - self.data_B = input['B' if AtoB else 'A'].to(self.device) # get image data B - self.image_paths = input['A_paths' if AtoB else 'B_paths'] # get image paths - - def forward(self): - """Run forward pass. This will be called by both functions and .""" - self.output = self.netG(self.data_A) # generate output image given the input data_A - - def backward(self): - """Calculate losses, gradients, and update network weights; called in every training iteration""" - # caculate the intermediate results if necessary; here self.output has been computed during function - # calculate loss given the input and intermediate results - self.loss_G = self.criterionLoss(self.output, self.data_B) * self.opt.lambda_regression - self.loss_G.backward() # calculate gradients of network G w.r.t. loss_G - - def optimize_parameters(self): - """Update network weights; it will be called in every training iteration.""" - self.forward() # first call forward to calculate intermediate results - self.optimizer.zero_grad() # clear network G's existing gradients - self.backward() # calculate gradients for network G - self.optimizer.step() # update gradients for network G diff --git a/spaces/crimeacs/phase-hunter/app.py b/spaces/crimeacs/phase-hunter/app.py deleted file mode 100644 index b0b0dacd21d3fe2a513419bc3de761d80d07cfb4..0000000000000000000000000000000000000000 --- a/spaces/crimeacs/phase-hunter/app.py +++ /dev/null @@ -1,815 +0,0 @@ -# Gradio app that takes seismic waveform as input and marks 2 phases on the waveform as output. - -import gradio as gr -import numpy as np -import pandas as pd -from phasehunter.data_preparation import prepare_waveform -import torch -import io - -from scipy.stats import gaussian_kde -from scipy.signal import resample -from scipy.interpolate import interp1d - -from bmi_topography import Topography -import earthpy.spatial as es - -import obspy -from obspy.clients.fdsn import Client -from obspy.clients.fdsn.header import FDSNNoDataException, FDSNTimeoutException, FDSNInternalServerException -from obspy.geodetics.base import locations2degrees -from obspy.taup import TauPyModel -from obspy.taup.helper_classes import SlownessModelError - -from obspy.clients.fdsn.header import URL_MAPPINGS - -import matplotlib.pyplot as plt -import matplotlib.dates as mdates -from mpl_toolkits.axes_grid1 import ImageGrid - -from glob import glob - -import numpy as np -from matplotlib import colors, cm -from scipy.interpolate import griddata - -def resample_waveform(waveform, original_freq, target_freq): - """ - Resample a waveform from original frequency to target frequency using SciPy's resample function. - - Args: - waveform (numpy.ndarray): The input waveform as a 1D array. - original_freq (float): The original sampling frequency of the waveform. - target_freq (float): The target sampling frequency of the waveform. - - Returns: - resampled_waveform (numpy.ndarray): The resampled waveform as a 1D array. - """ - # Calculate the resampling ratio - resampling_ratio = target_freq / original_freq - # Calculate the new length of the resampled waveform - resampled_length = int(waveform.shape[-1] * resampling_ratio) - # Resample the waveform using SciPy's resample function - resampled_waveform = resample(waveform, resampled_length, axis=-1) - - return resampled_waveform - -def sort_channels_to_ZNE(waveform, channels): - # Input: - # waveform: a 2D numpy array with shape (3, n), where n is the number of samples - # channels: a list or tuple of 3 strings representing the channel order, e.g. ('N', 'Z', 'E') - channels = list(channels) - - if len(channels) != 3 or set(channels) != {'Z', 'N', 'E'}: - raise ValueError("Invalid channel input. It should be a permutation of 'Z', 'N', and 'E'.") - - # Find the indices of the Z, N, and E channels - z_index = channels.index('Z') - n_index = channels.index('N') - e_index = channels.index('E') - - print(z_index, n_index, e_index) - # Sort the channels to ZNE - sorted_waveform = waveform[[z_index, n_index, e_index], :] - - return sorted_waveform - -def make_prediction(waveform, sampling_rate, order): - waveform = np.load(waveform) - print('Loaded', waveform.shape) - - if len(waveform.shape) == 1: - waveform = waveform.reshape(1, waveform.shape[0]) - - elif waveform.shape[0] == 3: - waveform = sort_channels_to_ZNE(waveform, order) - - if sampling_rate != 100: - waveform = resample_waveform(waveform, sampling_rate, 100) - print('Resampled', waveform.shape) - - - orig_waveform = waveform[:, :6000].copy() - processed_input = prepare_waveform(waveform) - - # Make prediction - with torch.inference_mode(): - output = model(processed_input) - - p_phase = output[:, 0] - s_phase = output[:, 1] - - return processed_input, p_phase, s_phase, orig_waveform - - -def mark_phases(waveform, uploaded_file, p_thres, s_thres, sampling_rate, order): - - if uploaded_file is not None: - waveform = uploaded_file.name - - processed_input, p_phase, s_phase, orig_waveform = make_prediction(waveform, sampling_rate, order) - - # Create a plot of the waveform with the phases marked - if sum(processed_input[0][2] == 0): #if input is 1C - fig, ax = plt.subplots(nrows=2, figsize=(10, 2), sharex=True) - - ax[0].plot(orig_waveform[0], color='black', lw=1) - ax[0].set_ylabel('Norm. Ampl.') - - else: #if input is 3C - fig, ax = plt.subplots(nrows=4, figsize=(10, 6), sharex=True) - ax[0].plot(orig_waveform[0], color='black', lw=1) - ax[1].plot(orig_waveform[1], color='black', lw=1) - ax[2].plot(orig_waveform[2], color='black', lw=1) - - ax[0].set_ylabel('Z') - ax[1].set_ylabel('N') - ax[2].set_ylabel('E') - - - do_we_have_p = (p_phase.std().item()*60 < p_thres) - if do_we_have_p: - p_phase_plot = p_phase*processed_input.shape[-1] - p_kde = gaussian_kde(p_phase_plot) - p_dist_space = np.linspace( min(p_phase_plot)-10, max(p_phase_plot)+10, 500 ) - ax[-1].plot( p_dist_space, p_kde(p_dist_space), color='r') - else: - ax[-1].text(0.5, 0.75, 'No P phase detected', horizontalalignment='center', verticalalignment='center', transform=ax[-1].transAxes) - - do_we_have_s = (s_phase.std().item()*60 < s_thres) - if do_we_have_s: - s_phase_plot = s_phase*processed_input.shape[-1] - s_kde = gaussian_kde(s_phase_plot) - s_dist_space = np.linspace( min(s_phase_plot)-10, max(s_phase_plot)+10, 500 ) - ax[-1].plot( s_dist_space, s_kde(s_dist_space), color='b') - - for a in ax: - a.axvline(p_phase.mean()*processed_input.shape[-1], color='r', linestyle='--', label='P', alpha=do_we_have_p) - a.axvline(s_phase.mean()*processed_input.shape[-1], color='b', linestyle='--', label='S', alpha=do_we_have_s) - else: - ax[-1].text(0.5, 0.25, 'No S phase detected', horizontalalignment='center', verticalalignment='center', transform=ax[-1].transAxes) - - ax[-1].set_xlabel('Time, samples') - ax[-1].set_ylabel('Uncert., samples') - ax[-1].legend() - - plt.subplots_adjust(hspace=0., wspace=0.) - - # Convert the plot to an image and return it - fig.canvas.draw() - image = np.array(fig.canvas.renderer.buffer_rgba()) - plt.close(fig) - return image - -def bin_distances(distances, bin_size=10): - # Bin the distances into groups of `bin_size` kilometers - binned_distances = {} - for i, distance in enumerate(distances): - bin_index = distance // bin_size - if bin_index not in binned_distances: - binned_distances[bin_index] = (distance, i) - elif i < binned_distances[bin_index][1]: - binned_distances[bin_index] = (distance, i) - - # Select the first distance in each bin and its index - first_distances = [] - for bin_index in binned_distances: - first_distance, first_distance_index = binned_distances[bin_index] - first_distances.append(first_distance_index) - - return first_distances - -def variance_coefficient(residuals): - # calculate the variance of the residuals - var = residuals.var() - # scale the variance to a coefficient between 0 and 1 - coeff = 1 - (var / (residuals.max() - residuals.min())) - return coeff - -def predict_on_section(client_name, timestamp, eq_lat, eq_lon, radius_km, source_depth_km, velocity_model, max_waveforms, conf_thres_P, conf_thres_S): - distances, t0s, st_lats, st_lons, waveforms, names = [], [], [], [], [], [] - - taup_model = TauPyModel(model=velocity_model) - client = Client(client_name) - - window = radius_km / 111.2 - max_waveforms = int(max_waveforms) - - assert eq_lat - window > -90 and eq_lat + window < 90, "Latitude out of bounds" - assert eq_lon - window > -180 and eq_lon + window < 180, "Longitude out of bounds" - - starttime = obspy.UTCDateTime(timestamp) - endtime = starttime + 120 - - try: - print('Starting to download inventory') - inv = client.get_stations(network="*", station="*", location="*", channel="*H*", - starttime=starttime, endtime=endtime, - minlatitude=(eq_lat-window), maxlatitude=(eq_lat+window), - minlongitude=(eq_lon-window), maxlongitude=(eq_lon+window), - level='station') - print('Finished downloading inventory') - - except (IndexError, FDSNNoDataException, FDSNTimeoutException, FDSNInternalServerException): - fig, ax = plt.subplots() - ax.text(0.5,0.5,'Something is wrong with the data provider, try another') - fig.canvas.draw(); - image = np.array(fig.canvas.renderer.buffer_rgba()) - plt.close(fig) - return image - - waveforms = [] - cached_waveforms = glob("data/cached/*.mseed") - - for network in inv: - if network.code == 'SY': - continue - for station in network: - print(f"Processing {network.code}.{station.code}...") - distance = locations2degrees(eq_lat, eq_lon, station.latitude, station.longitude) - - arrivals = taup_model.get_travel_times(source_depth_in_km=source_depth_km, - distance_in_degree=distance, - phase_list=["P", "S"]) - - if len(arrivals) > 0: - - starttime = obspy.UTCDateTime(timestamp) + arrivals[0].time - 15 - endtime = starttime + 60 - try: - filename=f'{network.code}_{station.code}_{starttime}' - if f"data/cached/{filename}.mseed" not in cached_waveforms: - print(f'Downloading waveform for {filename}') - waveform = client.get_waveforms(network=network.code, station=station.code, location="*", channel="*", - starttime=starttime, endtime=endtime) - waveform.write(f"data/cached/{network.code}_{station.code}_{starttime}.mseed", format="MSEED") - print('Finished downloading and caching waveform') - else: - print('Reading cached waveform') - waveform = obspy.read(f"data/cached/{network.code}_{station.code}_{starttime}.mseed") - - - except (IndexError, FDSNNoDataException, FDSNTimeoutException, FDSNInternalServerException): - print(f'Skipping {network.code}_{station.code}_{starttime}') - continue - - waveform = waveform.select(channel="H[BH][ZNE]") - waveform = waveform.merge(fill_value=0) - waveform = waveform[:3].sort(keys=['channel'], reverse=True) - - len_check = [len(x.data) for x in waveform] - if len(set(len_check)) > 1: - continue - - if len(waveform) == 3: - try: - waveform = prepare_waveform(np.stack([x.data for x in waveform])) - - distances.append(distance) - t0s.append(starttime) - st_lats.append(station.latitude) - st_lons.append(station.longitude) - waveforms.append(waveform) - names.append(f"{network.code}.{station.code}") - - print(f"Added {network.code}.{station.code} to the list of waveforms") - - except: - continue - - - # If there are no waveforms, return an empty plot - if len(waveforms) == 0: - print('No waveforms found') - fig, ax = plt.subplots() - # prints "No waveforms found" on the plot aligned at center and vertically - ax.text(0.5,0.5,'No waveforms found', horizontalalignment='center', verticalalignment='center', transform=ax.transAxes) - fig.canvas.draw(); - image = np.array(fig.canvas.renderer.buffer_rgba()) - plt.close(fig) - - output_picks = pd.DataFrame() - output_picks.to_csv('data/picks.csv', index=False) - output_csv = 'data/picks.csv' - return image, output_picks, output_csv - - - first_distances = bin_distances(distances, bin_size=10/111.2) - - # Edge case when there are way too many waveforms to process - selection_indexes = np.random.choice(first_distances, - np.min([len(first_distances), max_waveforms]), - replace=False) - - waveforms = np.array(waveforms)[selection_indexes] - distances = np.array(distances)[selection_indexes] - t0s = np.array(t0s)[selection_indexes] - st_lats = np.array(st_lats)[selection_indexes] - st_lons = np.array(st_lons)[selection_indexes] - names = np.array(names)[selection_indexes] - - waveforms = [torch.tensor(waveform) for waveform in waveforms] - - print('Starting to run predictions') - with torch.no_grad(): - waveforms_torch = torch.vstack(waveforms) - output = model(waveforms_torch) - - p_phases = output[:, 0] - s_phases = output[:, 1] - - p_phases = p_phases.reshape(len(waveforms),-1) - s_phases = s_phases.reshape(len(waveforms),-1) - - # Max confidence - min variance - p_max_confidence = p_phases.std(axis=-1).min() - s_max_confidence = s_phases.std(axis=-1).min() - - print(f"Starting plotting {len(waveforms)} waveforms") - fig, ax = plt.subplots(ncols=3, figsize=(10, 3)) - - # Plot topography - print('Fetching topography') - params = Topography.DEFAULT.copy() - extra_window = 0.5 - params["south"] = np.min([st_lats.min(), eq_lat])-extra_window - params["north"] = np.max([st_lats.max(), eq_lat])+extra_window - params["west"] = np.min([st_lons.min(), eq_lon])-extra_window - params["east"] = np.max([st_lons.max(), eq_lon])+extra_window - - topo_map = Topography(**params) - topo_map.fetch() - topo_map.load() - - print('Plotting topo') - hillshade = es.hillshade(topo_map.da[0], altitude=10) - - topo_map.da.plot(ax = ax[1], cmap='Greys', add_colorbar=False, add_labels=False) - topo_map.da.plot(ax = ax[2], cmap='Greys', add_colorbar=False, add_labels=False) - ax[1].imshow(hillshade, cmap="Greys", alpha=0.5) - - output_picks = pd.DataFrame({'station_name' : [], - 'st_lat' : [], 'st_lon' : [], - 'starttime' : [], - 'p_phase, s' : [], 'p_uncertainty, s' : [], - 's_phase, s' : [], 's_uncertainty, s' : [], - 'velocity_p, km/s' : [], 'velocity_s, km/s' : []}) - - for i in range(len(waveforms)): - print(f"Plotting waveform {i+1}/{len(waveforms)}") - current_P = p_phases[i] - current_S = s_phases[i] - - x = [t0s[i] + pd.Timedelta(seconds=k/100) for k in np.linspace(0,6000,6000)] - x = mdates.date2num(x) - - # Normalize confidence for the plot - p_conf = 1/(current_P.std()/p_max_confidence).item() - s_conf = 1/(current_S.std()/s_max_confidence).item() - - delta_t = t0s[i].timestamp - obspy.UTCDateTime(timestamp).timestamp - - ax[0].plot(x, waveforms[i][0, 0]*10+distances[i]*111.2, color='black', alpha=0.5, lw=1) - - if (current_P.std().item()*60 < conf_thres_P) or (current_S.std().item()*60 < conf_thres_S): - ax[0].scatter(x[int(current_P.mean()*waveforms[i][0].shape[-1])], waveforms[i][0, 0].mean()+distances[i]*111.2, color='r', alpha=p_conf, marker='|') - ax[0].scatter(x[int(current_S.mean()*waveforms[i][0].shape[-1])], waveforms[i][0, 0].mean()+distances[i]*111.2, color='b', alpha=s_conf, marker='|') - - velocity_p = (distances[i]*111.2)/(delta_t+current_P.mean()*60).item() - velocity_s = (distances[i]*111.2)/(delta_t+current_S.mean()*60).item() - - # Generate an array from st_lat to eq_lat and from st_lon to eq_lon - x = np.linspace(st_lons[i], eq_lon, 50) - y = np.linspace(st_lats[i], eq_lat, 50) - - # Plot the array - ax[1].scatter(x, y, c=np.zeros_like(x)+velocity_p, alpha=0.1, vmin=0, vmax=8) - ax[2].scatter(x, y, c=np.zeros_like(x)+velocity_s, alpha=0.1, vmin=0, vmax=8) - - else: - velocity_p = np.nan - velocity_s = np.nan - - ax[0].set_ylabel('Z') - print(f"Station {st_lats[i]}, {st_lons[i]} has P velocity {velocity_p} and S velocity {velocity_s}") - - output_picks = output_picks.append(pd.DataFrame({'station_name': [names[i]], - 'st_lat' : [st_lats[i]], 'st_lon' : [st_lons[i]], - 'starttime' : [str(t0s[i])], - 'p_phase, s' : [(delta_t+current_P.mean()*60).item()], 'p_uncertainty, s' : [current_P.std().item()*60], - 's_phase, s' : [(delta_t+current_S.mean()*60).item()], 's_uncertainty, s' : [current_S.std().item()*60], - 'velocity_p, km/s' : [velocity_p], 'velocity_s, km/s' : [velocity_s]})) - - - # Add legend - ax[0].scatter(None, None, color='r', marker='|', label='P') - ax[0].scatter(None, None, color='b', marker='|', label='S') - ax[0].xaxis.set_major_formatter(mdates.DateFormatter('%H:%M:%S')) - ax[0].xaxis.set_major_locator(mdates.SecondLocator(interval=20)) - ax[0].legend() - - print('Plotting stations') - for i in range(1,3): - ax[i].scatter(st_lons, st_lats, color='b', label='Stations') - ax[i].scatter(eq_lon, eq_lat, color='r', marker='*', label='Earthquake') - ax[i].set_aspect('equal') - ax[i].set_xticklabels(ax[i].get_xticks(), rotation = 50) - - fig.subplots_adjust(bottom=0.1, top=0.9, left=0.1, right=0.8, - wspace=0.02, hspace=0.02) - - cb_ax = fig.add_axes([0.83, 0.1, 0.02, 0.8]) - cbar = fig.colorbar(ax[2].scatter(None, None, c=velocity_p, alpha=0.5, vmin=0, vmax=8), cax=cb_ax) - - cbar.set_label('Velocity (km/s)') - ax[1].set_title('P Velocity') - ax[2].set_title('S Velocity') - - for a in ax: - a.tick_params(axis='both', which='major', labelsize=8) - - plt.subplots_adjust(hspace=0., wspace=0.5) - fig.canvas.draw(); - image = np.array(fig.canvas.renderer.buffer_rgba()) - plt.close(fig) - - output_csv = f'data/velocity/{eq_lat}_{eq_lon}_{source_depth_km}_{timestamp}_{len(waveforms)}.csv' - output_picks.to_csv(output_csv, index=False) - - return image, output_picks, output_csv - -def interpolate_vel_model(velocity_model, initial_velocity, lat_values, lon_values, depth_values, n_lat, n_lon, n_depth): - # Create a mask for points with the initial velocity - initial_velocity_mask = (velocity_model == initial_velocity) - - # Find the indices of points with non-initial velocities - non_initial_velocity_indices = np.argwhere(~initial_velocity_mask) - - # Extract the coordinates and corresponding velocities of the known points - known_points = np.column_stack([lat_values[non_initial_velocity_indices[:, 0]], - lon_values[non_initial_velocity_indices[:, 1]], - depth_values[non_initial_velocity_indices[:, 2]]]) - - # Find the maximum depth in the known_points - max_known_depth = np.max(known_points[:, 2]) - - known_velocities = velocity_model[~initial_velocity_mask] - - # Create a grid of points for the entire volume - grid_points = np.array(np.meshgrid(lat_values, lon_values, depth_values, indexing='ij')).reshape(3, -1).T - - # Create a mask for grid points that are deeper than the maximum known depth - depth_mask = grid_points[:, 2] <= max_known_depth - - # Interpolate the velocities at the grid points - interpolated_velocities = griddata(known_points, known_velocities, grid_points[depth_mask], method='linear') - - # Fill nan values with the nearest known velocities - interpolated_velocities_filled = griddata(known_points, known_velocities, grid_points[depth_mask], method='nearest') - interpolated_velocities[np.isnan(interpolated_velocities)] = interpolated_velocities_filled[np.isnan(interpolated_velocities)] - - # Initialize an array with the same length as grid_points and fill it with nan values - interpolated_velocities_with_depth_limit = np.full(grid_points.shape[0], np.nan) - - # Update the array with the interpolated velocities for the masked grid points - interpolated_velocities_with_depth_limit[depth_mask] = interpolated_velocities - - # Reshape the interpolated velocities to match the shape of the velocity_model - interpolated_velocity_model = interpolated_velocities_with_depth_limit.reshape(n_lat, n_lon, n_depth) - - return interpolated_velocity_model - - -# Function to find the closest index for a given value in an array -def find_closest_index(array, value): - return np.argmin(np.abs(array - value)) - -# FIX AFTER CONFERENCE -# def compute_velocity_model(azimuth, elevation, interpolate, n_lat, n_lon, n_depth): -# filename = list(output_csv.temp_files)[0] - -# df = pd.read_csv(filename) -# filename = filename.split('/')[-1] - -# # Current EQ location -# eq_lat = float(filename.split("_")[0]) -# eq_lon = float(filename.split("_")[1]) -# eq_depth = float(filename.split("_")[2]) - -# # Define the region of interest (latitude, longitude, and depth ranges) -# lat_range = (np.min([df.st_lat.min(), eq_lat]), np.max([df.st_lat.max(), eq_lat])) -# lon_range = (np.min([df.st_lon.min(), eq_lon]), np.max([df.st_lon.max(), eq_lon])) -# depth_range = (0, 50) - -# # Define the number of nodes in each dimension -# num_points = 100 - -# taup_model = TauPyModel(model='1066a') - -# # Create the grid -# lat_values = np.linspace(lat_range[0], lat_range[1], n_lat) -# lon_values = np.linspace(lon_range[0], lon_range[1], n_lon) -# depth_values = np.linspace(depth_range[0], depth_range[1], n_depth) - -# # Initialize the velocity model with constant values -# initial_velocity = 0 # km/s, this can be P-wave or S-wave velocity -# velocity_model = np.full((n_lat, n_lon, n_depth), initial_velocity, dtype=float) - -# # Loop through the stations and update the velocity model -# for i in range(len(df)): -# if ~np.isnan(df['velocity_p, km/s'].iloc[i]): - -# ray_path = taup_model.get_ray_paths_geo(source_depth_in_km=eq_depth, -# source_latitude_in_deg=eq_lat, -# source_longitude_in_deg=eq_lon, -# receiver_latitude_in_deg=df.st_lat.iloc[i], -# receiver_longitude_in_deg=df.st_lon.iloc[i], -# phase_list=['P', 'S']) - -# # THERE IS A PROBLEM WITH THE RAY PATHS. APPARENTLY LAT AND LON DON'T EXIST (HOW DID IT WORK BEFORE?) -# print(ray_path[0].path) - -# # Create the interpolator objects for latitude, longitude, and depth -# interp_latitude = interp1d(np.linspace(0, ray_path[0].path['lat'].max(), len(ray_path[0].path['lat'])), ray_path[0].path['lat']) -# interp_longitude = interp1d(np.linspace(0, ray_path[0].path['lon'].max(), len(ray_path[0].path['lon'])), ray_path[0].path['lon']) -# interp_depth = interp1d(np.linspace(0, ray_path[0].path['depth'].max(), len(ray_path[0].path['depth'])), ray_path[0].path['depth']) - -# # Resample the ray path to N points -# lat_values_interp = interp_latitude(np.linspace(0, ray_path[0].path['lat'].max(), num_points)) -# lon_values_interp = interp_longitude(np.linspace(0, ray_path[0].path['lon'].max(), num_points)) -# depth_values_interp = interp_depth(np.linspace(0, ray_path[0].path['depth'].max(), num_points)) - -# # Loop through the interpolated coordinates and update the grid cells with the average P-wave velocity -# for lat, lon, depth in zip(lat_values_interp, lon_values_interp, depth_values_interp): -# lat_index = find_closest_index(lat_values, lat) -# lon_index = find_closest_index(lon_values, lon) -# depth_index = find_closest_index(depth_values, depth) - -# if velocity_model[lat_index, lon_index, depth_index] == initial_velocity: -# velocity_model[lat_index, lon_index, depth_index] = df['velocity_p, km/s'].iloc[i] -# else: -# velocity_model[lat_index, lon_index, depth_index] = (velocity_model[lat_index, lon_index, depth_index] + -# df['velocity_p, km/s'].iloc[i]) / 2 - -# # Create the figure and axis -# fig = plt.figure(figsize=(8, 8)) -# ax = fig.add_subplot(111, projection='3d') - -# # Set the plot limits -# ax.set_xlim3d(lat_range[0], lat_range[1]) -# ax.set_ylim3d(lon_range[0], lon_range[1]) -# ax.set_zlim3d(depth_range[1], depth_range[0]) - -# ax.set_xlabel('Latitude') -# ax.set_ylabel('Longitude') -# ax.set_zlabel('Depth (km)') -# ax.set_title('Velocity Model') - -# # Create the meshgrid -# x, y, z = np.meshgrid( -# np.linspace(lat_range[0], lat_range[1], velocity_model.shape[0]+1), -# np.linspace(lon_range[0], lon_range[1], velocity_model.shape[1]+1), -# np.linspace(depth_range[0], depth_range[1], velocity_model.shape[2]+1), -# indexing='ij' -# ) - -# # Create the color array -# norm = plt.Normalize(vmin=2, vmax=8) -# colors_vel = plt.cm.plasma(norm(velocity_model)) - -# # Plot the voxels -# if interpolate: -# interpolated_velocity_model = interpolate_vel_model(velocity_model, initial_velocity, lat_values, lon_values, depth_values, n_lat, n_lon, n_depth) -# colors_interp = plt.cm.plasma(norm(interpolated_velocity_model)) -# ax.voxels(x, y, z, interpolated_velocity_model > 0, facecolors=colors_interp, alpha=0.5, edgecolor='k') - -# ax.voxels(x, y, z, velocity_model > 0, facecolors=colors_vel, alpha=1, edgecolor='black') - -# # Set the view angle -# ax.view_init(elev=elevation, azim=azimuth) - -# m = cm.ScalarMappable(cmap=plt.cm.plasma, norm=norm) -# m.set_array([]) -# plt.colorbar(m) - -# # Show the plot -# fig.canvas.draw(); -# image = np.array(fig.canvas.renderer.buffer_rgba()) -# plt.close(fig) - -# return image - -# model = torch.jit.load("model.pt") -model = torch.jit.load("model.pt") -model.eval() - -with gr.Blocks() as demo: - gr.HTML(""" -
        -

        PhaseHunter 🏹 - -

        - -

        Detect P and S seismic phases with uncertainty

        -
          -
        • Tab 1: Detect seismic phases by selecting a sample waveform or uploading your own waveform in .npy format.
        • -
        • Tab 2: Select an earthquake from the global earthquake catalogue and PhaseHunter will analyze seismic stations in the given radius.
        • -
        • Waveforms should be sampled at 100 samples/sec and have 3 (Z, N, E) or 1 (Z) channels. PhaseHunter analyzes the first 6000 samples of your file.
        • -
        -

        Please contact me at anovosel@stanford.edu with questions and feedback

        -
        -""") - with gr.Tab("Try on a single station"): - with gr.Row(): - # Define the input and output types for Gradio - inputs = gr.Dropdown( - ["data/sample/sample_0.npy", - "data/sample/sample_1.npy", - "data/sample/sample_2.npy"], - label="Sample waveform", - info="Select one of the samples", - value = "data/sample/sample_0.npy" - ) - with gr.Column(scale=1): - P_thres_inputs = gr.Slider(minimum=0.01, - maximum=1, - value=0.1, - label="P uncertainty threshold (s)", - step=0.01, - info="Acceptable uncertainty for P picks expressed in std() seconds", - interactive=True, - ) - - S_thres_inputs = gr.Slider(minimum=0.01, - maximum=1, - value=0.2, - label="S uncertainty threshold (s)", - step=0.01, - info="Acceptable uncertainty for S picks expressed in std() seconds", - interactive=True, - ) - with gr.Column(scale=1): - upload = gr.File(label="Upload your waveform") - with gr.Row(): - sampling_rate_inputs = gr.Slider(minimum=10, - maximum=1000, - value=100, - label="Samlping rate, Hz", - step=10, - info="Sampling rate of the waveform", - interactive=True, - ) - order_input = gr.Text(value='ZNE', - label='Channel order', - info='Order of the channels in the waveform file (e.g. ZNE)') - - button = gr.Button("Predict phases") - outputs = gr.Image(label='Waveform with Phases Marked', type='numpy', interactive=False) - - button.click(mark_phases, inputs=[inputs, upload, - P_thres_inputs, S_thres_inputs, - sampling_rate_inputs, order_input], - outputs=outputs) - with gr.Tab("Select earthquake from catalogue"): - - gr.HTML(""" -
        -

        Using PhaseHunter to Analyze Seismic Waveforms

        -

        Select an earthquake from the global earthquake catalogue (e.g. USGS) and the app will download the waveform from the FDSN client of your choice. The app will use a velocity model of your choice to select appropriate time windows for each station within a specified radius of the earthquake.

        -

        The app will then analyze the waveforms and mark the detected phases on the waveform. Pick data for each waveform is reported in seconds from the start of the waveform.

        -

        Velocities are derived from distance and travel time determined by PhaseHunter picks (v = distance/predicted_pick_time). The background of the velocity plot is colored by DEM.

        -
        - """) - with gr.Row(): - with gr.Column(scale=2): - client_inputs = gr.Dropdown( - choices = list(URL_MAPPINGS.keys()), - label="FDSN Client", - info="Select one of the available FDSN clients", - value = "IRIS", - interactive=True - ) - - velocity_inputs = gr.Dropdown( - choices = ['1066a', '1066b', 'ak135', - 'ak135f', 'herrin', 'iasp91', - 'jb', 'prem', 'pwdk'], - label="1D velocity model", - info="Velocity model for station selection", - value = "1066a", - interactive=True - ) - - with gr.Column(scale=2): - timestamp_inputs = gr.Textbox(value='2019-07-04T17:33:49-00', - placeholder='YYYY-MM-DDTHH:MM:SS-TZ', - label="Timestamp", - info="Timestamp of the earthquake", - max_lines=1, - interactive=True) - - source_depth_inputs = gr.Number(value=10, - label="Source depth (km)", - info="Depth of the earthquake", - interactive=True) - - with gr.Column(scale=2): - eq_lat_inputs = gr.Number(value=35.766, - label="Latitude", - info="Latitude of the earthquake", - interactive=True) - - eq_lon_inputs = gr.Number(value=-117.605, - label="Longitude", - info="Longitude of the earthquake", - interactive=True) - - with gr.Column(scale=2): - radius_inputs = gr.Slider(minimum=1, - maximum=200, - value=50, - label="Radius (km)", - step=10, - info="""Select the radius around the earthquake to download data from.\n - Note that the larger the radius, the longer the app will take to run.""", - interactive=True) - - max_waveforms_inputs = gr.Slider(minimum=1, - maximum=100, - value=10, - label="Max waveforms per section", - step=1, - info="Maximum number of waveforms to show per section\n (to avoid long prediction times)", - interactive=True, - ) - with gr.Column(scale=2): - P_thres_inputs = gr.Slider(minimum=0.01, - maximum=1, - value=0.1, - label="P uncertainty threshold, s", - step=0.01, - info="Acceptable uncertainty for P picks expressed in std() seconds", - interactive=True, - ) - S_thres_inputs = gr.Slider(minimum=0.01, - maximum=1, - value=0.2, - label="S uncertainty threshold, s", - step=0.01, - info="Acceptable uncertainty for S picks expressed in std() seconds", - interactive=True, - ) - - button_phases = gr.Button("Predict phases") - output_image = gr.Image(label='Waveforms with Phases Marked', type='numpy', interactive=False) - - # with gr.Row(): - # with gr.Column(scale=2): - # azimuth_input = gr.Slider(minimum=-180, maximum=180, value=0, step=5, label="Azimuth", interactive=True) - # elevation_input = gr.Slider(minimum=-90, maximum=90, value=30, step=5, label="Elevation", interactive=True) - - # with gr.Row(): - # interpolate_input = gr.Checkbox(label="Interpolate", info="Interpolate velocity model") - # n_lat_input = gr.Slider(minimum=5, maximum=100, value=50, step=5, label="N lat", info='Number of Lat grid points', interactive=True) - # n_lon_input = gr.Slider(minimum=5, maximum=100, value=50, step=5, label="N lon", info='Number of Lon grid points', interactive=True) - # n_depth_input = gr.Slider(minimum=5, maximum=100, value=50, step=5, label="N depth", info='Number of Depth grid points', interactive=True) - - # button = gr.Button("Look at 3D Velocities") - # outputs_vel_model = gr.Image(label="3D Velocity Model") - - # button.click(compute_velocity_model, - # inputs=[azimuth_input, elevation_input, - # interpolate_input, n_lat_input, - # n_lon_input, n_depth_input], - # outputs=[outputs_vel_model]) - - with gr.Row(): - output_picks = gr.Dataframe(label='Pick data', - type='pandas', - interactive=False) - output_csv = gr.File(label="Output File", file_types=[".csv"]) - - button_phases.click(predict_on_section, - inputs=[client_inputs, timestamp_inputs, - eq_lat_inputs, eq_lon_inputs, - radius_inputs, source_depth_inputs, - velocity_inputs, max_waveforms_inputs, - P_thres_inputs, S_thres_inputs], - outputs=[output_image, output_picks, output_csv]) - -demo.launch() \ No newline at end of file diff --git a/spaces/crylake/img2poem/query2labels/lib/models/tresnet/layers/avg_pool.py b/spaces/crylake/img2poem/query2labels/lib/models/tresnet/layers/avg_pool.py deleted file mode 100644 index a91836f6c2310cdb9e40f8a271dbac0b613971ba..0000000000000000000000000000000000000000 --- a/spaces/crylake/img2poem/query2labels/lib/models/tresnet/layers/avg_pool.py +++ /dev/null @@ -1,19 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - - - -class FastAvgPool2d(nn.Module): - def __init__(self, flatten=False): - super(FastAvgPool2d, self).__init__() - self.flatten = flatten - - def forward(self, x): - if self.flatten: - in_size = x.size() - return x.view((in_size[0], in_size[1], -1)).mean(dim=2) - else: - return x.view(x.size(0), x.size(1), -1).mean(-1).view(x.size(0), x.size(1), 1, 1) - - diff --git a/spaces/cvlab/zero123-live/CLIP/hubconf.py b/spaces/cvlab/zero123-live/CLIP/hubconf.py deleted file mode 100644 index 520b354b62ab4d199d49462e9c65890d924c69e6..0000000000000000000000000000000000000000 --- a/spaces/cvlab/zero123-live/CLIP/hubconf.py +++ /dev/null @@ -1,42 +0,0 @@ -from clip.clip import tokenize as _tokenize, load as _load, available_models as _available_models -import re -import string - -dependencies = ["torch", "torchvision", "ftfy", "regex", "tqdm"] - -# For compatibility (cannot include special characters in function name) -model_functions = { model: re.sub(f'[{string.punctuation}]', '_', model) for model in _available_models()} - -def _create_hub_entrypoint(model): - def entrypoint(**kwargs): - return _load(model, **kwargs) - - entrypoint.__doc__ = f"""Loads the {model} CLIP model - - Parameters - ---------- - device : Union[str, torch.device] - The device to put the loaded model - - jit : bool - Whether to load the optimized JIT model or more hackable non-JIT model (default). - - download_root: str - path to download the model files; by default, it uses "~/.cache/clip" - - Returns - ------- - model : torch.nn.Module - The {model} CLIP model - - preprocess : Callable[[PIL.Image], torch.Tensor] - A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input - """ - return entrypoint - -def tokenize(): - return _tokenize - -_entrypoints = {model_functions[model]: _create_hub_entrypoint(model) for model in _available_models()} - -globals().update(_entrypoints) \ No newline at end of file diff --git a/spaces/cvlab/zero123-live/uses.md b/spaces/cvlab/zero123-live/uses.md deleted file mode 100644 index cc186284c21f376f3c77766cfca2f10aec29f392..0000000000000000000000000000000000000000 --- a/spaces/cvlab/zero123-live/uses.md +++ /dev/null @@ -1,69 +0,0 @@ - -# Uses -_Note: This section is originally taken from the [Stable Diffusion v2 model card](https://huggingface.co/stabilityai/stable-diffusion-2), but applies in the same way to Zero-1-to-3._ - -## Direct Use -The model is intended for research purposes only. Possible research areas and tasks include: - -- Safe deployment of large-scale models. -- Probing and understanding the limitations and biases of generative models. -- Generation of artworks and use in design and other artistic processes. -- Applications in educational or creative tools. -- Research on generative models. - -Excluded uses are described below. - -### Misuse, Malicious Use, and Out-of-Scope Use -The model should not be used to intentionally create or disseminate images that create hostile or alienating environments for people. This includes generating images that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes. - -#### Out-of-Scope Use -The model was not trained to be factual or true representations of people or events, and therefore using the model to generate such content is out-of-scope for the abilities of this model. - -#### Misuse and Malicious Use -Using the model to generate content that is cruel to individuals is a misuse of this model. This includes, but is not limited to: - -- Generating demeaning, dehumanizing, or otherwise harmful representations of people or their environments, cultures, religions, etc. -- Intentionally promoting or propagating discriminatory content or harmful stereotypes. -- Impersonating individuals without their consent. -- Sexual content without consent of the people who might see it. -- Mis- and disinformation -- Representations of egregious violence and gore -- Sharing of copyrighted or licensed material in violation of its terms of use. -- Sharing content that is an alteration of copyrighted or licensed material in violation of its terms of use. - -## Limitations and Bias - -### Limitations - -- The model does not achieve perfect photorealism. -- The model cannot render legible text. -- Faces and people in general may not be parsed or generated properly. -- The autoencoding part of the model is lossy. -- Stable Diffusion was trained on a subset of the large-scale dataset [LAION-5B](https://laion.ai/blog/laion-5b/), which contains adult, violent and sexual content. To partially mitigate this, Stability AI has filtered the dataset using LAION's NSFW detector. -- Zero-1-to-3 was subsequently finetuned on a subset of the large-scale dataset [Objaverse](https://objaverse.allenai.org/), which might also potentially contain inappropriate content. To partially mitigate this, our demo applies a safety check to every uploaded image. - -### Bias -While the capabilities of image generation models are impressive, they can also reinforce or exacerbate social biases. -Stable Diffusion was primarily trained on subsets of [LAION-2B(en)](https://laion.ai/blog/laion-5b/), which consists of images that are limited to English descriptions. -Images and concepts from communities and cultures that use other languages are likely to be insufficiently accounted for. -This affects the overall output of the model, as Western cultures are often overrepresented. -Stable Diffusion mirrors and exacerbates biases to such a degree that viewer discretion must be advised irrespective of the input or its intent. - - -### Safety Module -The intended use of this model is with the [Safety Checker](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py) in Diffusers. -This checker works by checking model inputs against known hard-coded NSFW concepts. -Specifically, the checker compares the class probability of harmful concepts in the embedding space of the uploaded input images. -The concepts are passed into the model with the image and compared to a hand-engineered weight for each NSFW concept. - -## Citation -``` -@misc{liu2023zero1to3, - title={Zero-1-to-3: Zero-shot One Image to 3D Object}, - author={Ruoshi Liu and Rundi Wu and Basile Van Hoorick and Pavel Tokmakov and Sergey Zakharov and Carl Vondrick}, - year={2023}, - eprint={2303.11328}, - archivePrefix={arXiv}, - primaryClass={cs.CV} -} -``` diff --git a/spaces/cvlab/zero123/README.md b/spaces/cvlab/zero123/README.md deleted file mode 100644 index f20cace77f56e174f32c8b8f83ae97d7f1fef159..0000000000000000000000000000000000000000 --- a/spaces/cvlab/zero123/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Zero123 -emoji: 👀 -colorFrom: indigo -colorTo: purple -sdk: gradio -sdk_version: 3.20.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference -Paper is from https://arxiv.org/abs/2303.11328 diff --git a/spaces/d0r1h/Hindi_News_Summarizer/app.py b/spaces/d0r1h/Hindi_News_Summarizer/app.py deleted file mode 100644 index 783ff2d7d089e2f33eabfe3fc7e27913f0f839bb..0000000000000000000000000000000000000000 --- a/spaces/d0r1h/Hindi_News_Summarizer/app.py +++ /dev/null @@ -1,40 +0,0 @@ -import gradio as gr -from summarizer import summarize - -description = """ -
        -SAR (सार) in Hindi means summary, It's a tool to summarize Hindi News with SOTA models -
        -""" -article="

        Created by Pawan Trivedi 2022 | GitHub

        " - -link1 = "https://www.amarujala.com/uttar-pradesh/shamli/up-news-heroin-caught-in-shaheen-bagh-of-delhi-is-connection-to-kairana-and-muzaffarnagar?src=tlh&position=3" - -link2 = "https://www.amarujala.com/lucknow/now-the-government-will-go-to-village-to-buy-wheat-in-up-and-wheat-will-also-be-purchased-from-mobile-purchasing-centers" - -link3 = "https://www.amarujala.com/india-news/supreme-court-cannot-give-the-status-of-place-of-namaz-without-evidence-rajasthan-waqf-board-s-petition-challenging-the-high-court-order-dismissed?pageId=1" - -with open("Example/File.txt", 'r', encoding="utf8") as f: - text = f.read() - - -interface = gr.Interface(fn = summarize, - inputs = [gr.inputs.Textbox(lines=5, - placeholder="Enter your text...", - label='News Input'), - gr.inputs.Radio(["T5", "BART"], type="value", label='Model') - ], - - outputs = [gr.outputs.Textbox(label="Sar"), - gr.outputs.Image(type="plot", label="WordCloud")], - - title = "Hindi News Summarizer", - examples=[[link1, "BART"], - [link2, "BART"], - [link3, "BART"], - [text, "BART"]], - - description=description, - article = article) - -interface.launch(debug=True) \ No newline at end of file diff --git a/spaces/dahaoGPT/ChatGLM2-6B-chatbot/app.py b/spaces/dahaoGPT/ChatGLM2-6B-chatbot/app.py deleted file mode 100644 index 5b50759d04629e2f21d5c94044d34e3c56d5338b..0000000000000000000000000000000000000000 --- a/spaces/dahaoGPT/ChatGLM2-6B-chatbot/app.py +++ /dev/null @@ -1,29 +0,0 @@ -from transformers import AutoModel, AutoTokenizer -import gradio as gr -tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True) -model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True).half().cuda() -model = model.eval() - -def beginchat(input, history=None): - if history is None: - history = [] - response, history = model.chat(tokenizer, input, history) - return history, history - - -with gr.Blocks() as chatglm2bot: - gr.Markdown('''### ChatGLM2-6B---chatbot demo - - Glm-130b: An open bilingual pre-trained model - Author: Zeng, Aohan and Liu, Xiao and Du, Zhengxiao and Wang, Zihan and Lai, Hanyu and Ding, Ming and Yang, Zhuoyi and Xu, Yifan and Zheng, Wendi and Xia, Xiao and others - Paper: arXiv preprint arXiv:2210.02414''') - state = gr.State([]) - chatbot = gr.Chatbot([], elem_id="chatbot") - with gr.Row(): - with gr.Column(scale=4): - txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter").style(container=False) - with gr.Column(scale=1): - button = gr.Button("Generate") - txt.submit(beginchat, [txt, state], [chatbot, state]) - button.click(beginchat, [txt, state], [chatbot, state]) -chatglm2bot.queue().launch() \ No newline at end of file diff --git "a/spaces/dakaiye/dky_xuexi/crazy_functions/\345\257\271\350\257\235\345\216\206\345\217\262\345\255\230\346\241\243.py" "b/spaces/dakaiye/dky_xuexi/crazy_functions/\345\257\271\350\257\235\345\216\206\345\217\262\345\255\230\346\241\243.py" deleted file mode 100644 index c638d1bd087c878e9722bec02361111613ac2b7c..0000000000000000000000000000000000000000 --- "a/spaces/dakaiye/dky_xuexi/crazy_functions/\345\257\271\350\257\235\345\216\206\345\217\262\345\255\230\346\241\243.py" +++ /dev/null @@ -1,143 +0,0 @@ -from toolbox import CatchException, update_ui -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -import re - -def write_chat_to_file(chatbot, history=None, file_name=None): - """ - 将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。 - """ - import os - import time - if file_name is None: - file_name = 'chatGPT对话历史' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.html' - os.makedirs('./gpt_log/', exist_ok=True) - with open(f'./gpt_log/{file_name}', 'w', encoding='utf8') as f: - from theme import advanced_css - f.write(f'对话历史') - for i, contents in enumerate(chatbot): - for j, content in enumerate(contents): - try: # 这个bug没找到触发条件,暂时先这样顶一下 - if type(content) != str: content = str(content) - except: - continue - f.write(content) - if j == 0: - f.write('
        ') - f.write('
        \n\n') - f.write('
        \n\n raw chat context:\n') - f.write('') - for h in history: - f.write("\n>>>" + h) - f.write('') - res = '对话历史写入:' + os.path.abspath(f'./gpt_log/{file_name}') - print(res) - return res - -def gen_file_preview(file_name): - try: - with open(file_name, 'r', encoding='utf8') as f: - file_content = f.read() - # pattern to match the text between and - pattern = re.compile(r'.*?', flags=re.DOTALL) - file_content = re.sub(pattern, '', file_content) - html, history = file_content.split('
        \n\n raw chat context:\n') - history = history.strip('') - history = history.strip('') - history = history.split("\n>>>") - return list(filter(lambda x:x!="", history))[0][:100] - except: - return "" - -def read_file_to_chat(chatbot, history, file_name): - with open(file_name, 'r', encoding='utf8') as f: - file_content = f.read() - # pattern to match the text between and - pattern = re.compile(r'.*?', flags=re.DOTALL) - file_content = re.sub(pattern, '', file_content) - html, history = file_content.split('
        \n\n raw chat context:\n') - history = history.strip('') - history = history.strip('') - history = history.split("\n>>>") - history = list(filter(lambda x:x!="", history)) - html = html.split('
        \n\n') - html = list(filter(lambda x:x!="", html)) - chatbot.clear() - for i, h in enumerate(html): - i_say, gpt_say = h.split('
        ') - chatbot.append([i_say, gpt_say]) - chatbot.append([f"存档文件详情?", f"[Local Message] 载入对话{len(html)}条,上下文{len(history)}条。"]) - return chatbot, history - -@CatchException -def 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,暂时没有用武之地 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - web_port 当前软件运行的端口号 - """ - - chatbot.append(("保存当前对话", - f"[Local Message] {write_chat_to_file(chatbot, history)},您可以调用“载入对话历史存档”还原当下的对话。\n警告!被保存的对话历史可以被使用该系统的任何人查阅。")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - -def hide_cwd(str): - import os - current_path = os.getcwd() - replace_path = "." - return str.replace(current_path, replace_path) - -@CatchException -def 载入对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,暂时没有用武之地 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - web_port 当前软件运行的端口号 - """ - from .crazy_utils import get_files_from_everything - success, file_manifest, _ = get_files_from_everything(txt, type='.html') - - if not success: - if txt == "": txt = '空空如也的输入栏' - import glob - local_history = "
        ".join(["`"+hide_cwd(f)+f" ({gen_file_preview(f)})"+"`" for f in glob.glob(f'gpt_log/**/chatGPT对话历史*.html', recursive=True)]) - chatbot.append([f"正在查找对话历史文件(html格式): {txt}", f"找不到任何html文件: {txt}。但本地存储了以下历史文件,您可以将任意一个文件路径粘贴到输入区,然后重试:
        {local_history}"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - try: - chatbot, history = read_file_to_chat(chatbot, history, file_manifest[0]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - except: - chatbot.append([f"载入对话历史文件", f"对话历史文件损坏!"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - -@CatchException -def 删除所有本地对话历史记录(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,暂时没有用武之地 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - web_port 当前软件运行的端口号 - """ - - import glob, os - local_history = "
        ".join(["`"+hide_cwd(f)+"`" for f in glob.glob(f'gpt_log/**/chatGPT对话历史*.html', recursive=True)]) - for f in glob.glob(f'gpt_log/**/chatGPT对话历史*.html', recursive=True): - os.remove(f) - chatbot.append([f"删除所有历史对话文件", f"已删除
        {local_history}"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - diff --git a/spaces/dataminers/dataminers/betas.py b/spaces/dataminers/dataminers/betas.py deleted file mode 100644 index 89499d4d777eef77388d00f2d229b49cb8d12071..0000000000000000000000000000000000000000 --- a/spaces/dataminers/dataminers/betas.py +++ /dev/null @@ -1,74 +0,0 @@ -import pandas as pd -import numpy as np -import datetime as dt -import pandas_datareader as pdr -from datetime import datetime - - - -def convert_simFin2(path): - df = pd.read_csv(path, sep=';') - stocks = df.pivot(index="Date", columns="Ticker", values="Adj. Close") - return stocks - -def log_of_returns2(stocks): - log_returns = np.log(stocks/stocks.shift()) - return log_returns - - - - - -# Code to Calculate and output Betas -# Read in Stock csv data and convert to have each Ticker as a column. -#df = pd.read_csv('D:/SimFinData/us-shareprices-daily.csv', sep=';') -#stocks = df.pivot(index="Date", columns="Ticker", values="Adj. Close") -#stocks -#start = min(df['Date']) -#end = max(df['Date']) -#logRet = np.log(stocks/stocks.shift()) - - -#SP500 = pdr.get_data_yahoo("^GSPC", start) -#IXIC = pdr.get_data_yahoo("^IXIC", start) -#AOK = pdr.get_data_yahoo("AOK", start) - -#SP500['SP500'] = SP500['Adj Close'] -#IXIC['IXIC'] = IXIC['Adj Close'] -#AOK['AOK'] = AOK['Adj Close'] - -#spAC = np.log(SP500['SP500']/SP500['SP500'].shift()) -#spAC = spAC.loc[spAC.index <= end] - -#ixicAC = np.log(IXIC['IXIC']/IXIC['IXIC'].shift()) -#ixicAC = ixicAC.loc[ixicAC.index <= end] - -#aokAC = np.log(AOK['AOK']/AOK['AOK'].shift()) -#aokAC = aokAC.loc[aokAC.index <= end] - -#sp500B = logRet.join(spAC) -#ixicB = logRet.join(ixicAC) -#aokB = logRet.join(aokAC) - -#sp5Cov = sp500B.cov() -#ixicCov = ixicB.cov() -#aokCov = aokB.cov() - -#sp500Var = sp500B['SP500'].var() -#ixicVar = ixicB['IXIC'].var() -#aokVar = aokB['AOK'].var() - -#sp500Beta = sp5Cov.loc['SP500']/sp500Var -#ixicBeta = ixicCov.loc['IXIC']/ixicVar -#aokBeta = aokCov.loc['AOK']/aokVar - -#betas = pd.concat([sp500Beta,ixicBeta,aokBeta], axis=1) - -#betas['Ticker'] = betas.index - -#betas = betas[['Ticker','SP500','IXIC','AOK']] - -#betas.to_csv (r'betas.csv', index = None, header=True) - - - diff --git a/spaces/davila7/filegpt/embeddings.py b/spaces/davila7/filegpt/embeddings.py deleted file mode 100644 index d7596d473dd2539e182058296e1f8844c0a37a22..0000000000000000000000000000000000000000 --- a/spaces/davila7/filegpt/embeddings.py +++ /dev/null @@ -1,115 +0,0 @@ -"""Wrapper around OpenAI embedding models.""" -from typing import Any, Dict, List, Optional - -from pydantic import BaseModel, Extra, root_validator - -from langchain.embeddings.base import Embeddings -from langchain.utils import get_from_dict_or_env - -from tenacity import ( - retry, - retry_if_exception_type, - stop_after_attempt, - wait_exponential, -) -from openai.error import Timeout, APIError, APIConnectionError, RateLimitError - - -class OpenAIEmbeddings(BaseModel, Embeddings): - """Wrapper around OpenAI embedding models. - To use, you should have the ``openai`` python package installed, and the - environment variable ``OPENAI_API_KEY`` set with your API key or pass it - as a named parameter to the constructor. - Example: - .. code-block:: python - from langchain.embeddings import OpenAIEmbeddings - openai = OpenAIEmbeddings(openai_api_key="my-api-key") - """ - - client: Any #: :meta private: - document_model_name: str = "text-embedding-ada-002" - query_model_name: str = "text-embedding-ada-002" - openai_api_key: Optional[str] = None - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - # TODO: deprecate this - @root_validator(pre=True, allow_reuse=True) - def get_model_names(cls, values: Dict) -> Dict: - """Get model names from just old model name.""" - if "model_name" in values: - if "document_model_name" in values: - raise ValueError( - "Both `model_name` and `document_model_name` were provided, " - "but only one should be." - ) - if "query_model_name" in values: - raise ValueError( - "Both `model_name` and `query_model_name` were provided, " - "but only one should be." - ) - model_name = values.pop("model_name") - values["document_model_name"] = f"text-search-{model_name}-doc-001" - values["query_model_name"] = f"text-search-{model_name}-query-001" - return values - - @root_validator(allow_reuse=True) - def validate_environment(cls, values: Dict) -> Dict: - """Validate that api key and python package exists in environment.""" - openai_api_key = get_from_dict_or_env( - values, "openai_api_key", "OPENAI_API_KEY" - ) - try: - import openai - - openai.api_key = openai_api_key - values["client"] = openai.Embedding - except ImportError: - raise ValueError( - "Could not import openai python package. " - "Please it install it with `pip install openai`." - ) - return values - - @retry( - reraise=True, - stop=stop_after_attempt(100), - wait=wait_exponential(multiplier=1, min=10, max=60), - retry=( - retry_if_exception_type(Timeout) - | retry_if_exception_type(APIError) - | retry_if_exception_type(APIConnectionError) - | retry_if_exception_type(RateLimitError) - ), - ) - def _embedding_func(self, text: str, *, engine: str) -> List[float]: - """Call out to OpenAI's embedding endpoint with exponential backoff.""" - # replace newlines, which can negatively affect performance. - text = text.replace("\n", " ") - return self.client.create(input=[text], engine=engine)["data"][0]["embedding"] - - def embed_documents(self, texts: List[str]) -> List[List[float]]: - """Call out to OpenAI's embedding endpoint for embedding search docs. - Args: - texts: The list of texts to embed. - Returns: - List of embeddings, one for each text. - """ - responses = [ - self._embedding_func(text, engine=self.document_model_name) - for text in texts - ] - return responses - - def embed_query(self, text: str) -> List[float]: - """Call out to OpenAI's embedding endpoint for embedding query text. - Args: - text: The text to embed. - Returns: - Embeddings for the text. - """ - embedding = self._embedding_func(text, engine=self.query_model_name) - return embedding \ No newline at end of file diff --git a/spaces/dawood17/SayBot_Enchancer/CodeFormer/facelib/detection/yolov5face/models/experimental.py b/spaces/dawood17/SayBot_Enchancer/CodeFormer/facelib/detection/yolov5face/models/experimental.py deleted file mode 100644 index 37ba4c4420789c92dc0e2aaeb3d5b64859ec728c..0000000000000000000000000000000000000000 --- a/spaces/dawood17/SayBot_Enchancer/CodeFormer/facelib/detection/yolov5face/models/experimental.py +++ /dev/null @@ -1,45 +0,0 @@ -# # This file contains experimental modules - -import numpy as np -import torch -from torch import nn - -from facelib.detection.yolov5face.models.common import Conv - - -class CrossConv(nn.Module): - # Cross Convolution Downsample - def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): - # ch_in, ch_out, kernel, stride, groups, expansion, shortcut - super().__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, (1, k), (1, s)) - self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g) - self.add = shortcut and c1 == c2 - - def forward(self, x): - return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) - - -class MixConv2d(nn.Module): - # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595 - def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): - super().__init__() - groups = len(k) - if equal_ch: # equal c_ per group - i = torch.linspace(0, groups - 1e-6, c2).floor() # c2 indices - c_ = [(i == g).sum() for g in range(groups)] # intermediate channels - else: # equal weight.numel() per group - b = [c2] + [0] * groups - a = np.eye(groups + 1, groups, k=-1) - a -= np.roll(a, 1, axis=1) - a *= np.array(k) ** 2 - a[0] = 1 - c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b - - self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)]) - self.bn = nn.BatchNorm2d(c2) - self.act = nn.LeakyReLU(0.1, inplace=True) - - def forward(self, x): - return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/C_B_D_T_.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/C_B_D_T_.py deleted file mode 100644 index e9e2d5fde9cc5a72a17105d40e5c1c95ff09d824..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/C_B_D_T_.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright 2013 Google, Inc. All Rights Reserved. -# -# Google Author(s): Matt Fontaine - - -from fontTools.misc.textTools import bytesjoin -from fontTools.misc import sstruct -from . import E_B_D_T_ -from .BitmapGlyphMetrics import ( - BigGlyphMetrics, - bigGlyphMetricsFormat, - SmallGlyphMetrics, - smallGlyphMetricsFormat, -) -from .E_B_D_T_ import ( - BitmapGlyph, - BitmapPlusSmallMetricsMixin, - BitmapPlusBigMetricsMixin, -) -import struct - - -class table_C_B_D_T_(E_B_D_T_.table_E_B_D_T_): - - # Change the data locator table being referenced. - locatorName = "CBLC" - - # Modify the format class accessor for color bitmap use. - def getImageFormatClass(self, imageFormat): - try: - return E_B_D_T_.table_E_B_D_T_.getImageFormatClass(self, imageFormat) - except KeyError: - return cbdt_bitmap_classes[imageFormat] - - -# Helper method for removing export features not supported by color bitmaps. -# Write data in the parent class will default to raw if an option is unsupported. -def _removeUnsupportedForColor(dataFunctions): - dataFunctions = dict(dataFunctions) - del dataFunctions["row"] - return dataFunctions - - -class ColorBitmapGlyph(BitmapGlyph): - - fileExtension = ".png" - xmlDataFunctions = _removeUnsupportedForColor(BitmapGlyph.xmlDataFunctions) - - -class cbdt_bitmap_format_17(BitmapPlusSmallMetricsMixin, ColorBitmapGlyph): - def decompile(self): - self.metrics = SmallGlyphMetrics() - dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics) - (dataLen,) = struct.unpack(">L", data[:4]) - data = data[4:] - - # For the image data cut it to the size specified by dataLen. - assert dataLen <= len(data), "Data overun in format 17" - self.imageData = data[:dataLen] - - def compile(self, ttFont): - dataList = [] - dataList.append(sstruct.pack(smallGlyphMetricsFormat, self.metrics)) - dataList.append(struct.pack(">L", len(self.imageData))) - dataList.append(self.imageData) - return bytesjoin(dataList) - - -class cbdt_bitmap_format_18(BitmapPlusBigMetricsMixin, ColorBitmapGlyph): - def decompile(self): - self.metrics = BigGlyphMetrics() - dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics) - (dataLen,) = struct.unpack(">L", data[:4]) - data = data[4:] - - # For the image data cut it to the size specified by dataLen. - assert dataLen <= len(data), "Data overun in format 18" - self.imageData = data[:dataLen] - - def compile(self, ttFont): - dataList = [] - dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics)) - dataList.append(struct.pack(">L", len(self.imageData))) - dataList.append(self.imageData) - return bytesjoin(dataList) - - -class cbdt_bitmap_format_19(ColorBitmapGlyph): - def decompile(self): - (dataLen,) = struct.unpack(">L", self.data[:4]) - data = self.data[4:] - - assert dataLen <= len(data), "Data overun in format 19" - self.imageData = data[:dataLen] - - def compile(self, ttFont): - return struct.pack(">L", len(self.imageData)) + self.imageData - - -# Dict for CBDT extended formats. -cbdt_bitmap_classes = { - 17: cbdt_bitmap_format_17, - 18: cbdt_bitmap_format_18, - 19: cbdt_bitmap_format_19, -} diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Blocks-ae22907c.js b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Blocks-ae22907c.js deleted file mode 100644 index 7c05a54ce960d07db66fd2f55f471006c585f8a1..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Blocks-ae22907c.js +++ /dev/null @@ -1,47 +0,0 @@ -const VERSION_RE = new RegExp("3.40.1/", "g");function import_fix(mod, base) {const url = new URL(mod, base); return import(`https://gradio.s3-us-west-2.amazonaws.com/3.40.1/${url.pathname?.startsWith('/') ? url.pathname.substring(1).replace(VERSION_RE, "") : url.pathname.replace(VERSION_RE, "")}`);}import{n as K,i as Po,a as Ao,l as To,c as Fo,d as zo,b as qo,S as ie,e as re,s as se,f as de,g as f,h as v,j as d,k as b,m as C,o as D,t as S,p as ge,q as $e,r as ne,u as z,v as le,w as A,x as N,y as Lo,z as Vo,A as Ro,B as Do,C as Oe,D as Fe,E as ve,F as U,G as H,H as Y,I as te,J as qt,K as fe,L as Mo,_ as $,M as Be,N as je,O as Ge,P as ce,Q as $o,R as Bo,T as Go,U as No,V as Uo,W as Ho,X as Se,Y as Ie,Z as be,$ as Yo,a0 as Wo,a1 as Jo,a2 as Ko,a3 as Zo,a4 as Qo}from"./index-9e76ffee.js";import{c as Xo,f as Ne,m as xo,O as ei,z as ti,B as ze,a as ni}from"./Button-30a08c0b.js";function li(l,e,t,n){if(!e)return K;const o=l.getBoundingClientRect();if(e.left===o.left&&e.right===o.right&&e.top===o.top&&e.bottom===o.bottom)return K;const{delay:i=0,duration:s=300,easing:r=Po,start:a=Ao()+i,end:_=a+s,tick:c=K,css:g}=t(l,{from:e,to:o},n);let p=!0,h=!1,E;function j(){g&&(E=Fo(l,0,1,s,i,r,g)),i||(h=!0)}function w(){g&&zo(l,E),p=!1}return To(m=>{if(!h&&m>=a&&(h=!0),h&&m>=_&&(c(1,0),w()),!p)return!1;if(h){const k=m-a,I=0+1*r(k/s);c(I,1-I)}return!0}),j(),c(0,1),w}function oi(l){const e=getComputedStyle(l);if(e.position!=="absolute"&&e.position!=="fixed"){const{width:t,height:n}=e,o=l.getBoundingClientRect();l.style.position="absolute",l.style.width=t,l.style.height=n,ii(l,o)}}function ii(l,e){const t=l.getBoundingClientRect();if(e.left!==t.left||e.top!==t.top){const n=getComputedStyle(l),o=n.transform==="none"?"":n.transform;l.style.transform=`${o} translate(${e.left-t.left}px, ${e.top-t.top}px)`}}function ri(l,{from:e,to:t},n={}){const o=getComputedStyle(l),i=o.transform==="none"?"":o.transform,[s,r]=o.transformOrigin.split(" ").map(parseFloat),a=e.left+e.width*s/t.width-(t.left+s),_=e.top+e.height*r/t.height-(t.top+r),{delay:c=0,duration:g=h=>Math.sqrt(h)*120,easing:p=Xo}=n;return{delay:c,duration:qo(g)?g(Math.sqrt(a*a+_*_)):g,easing:p,css:(h,E)=>{const j=E*a,w=E*_,m=h+E*e.width/t.width,k=h+E*e.height/t.height;return`transform: ${i} translate(${j}px, ${w}px) scale(${m}, ${k});`}}}function si(l){let e,t;return{c(){e=de("svg"),t=de("path"),f(t,"stroke-linecap","round"),f(t,"stroke-linejoin","round"),f(t,"d","M12 9v3.75m9-.75a9 9 0 11-18 0 9 9 0 0118 0zm-9 3.75h.008v.008H12v-.008z"),f(e,"fill","none"),f(e,"stroke","currentColor"),f(e,"viewBox","0 0 24 24"),f(e,"width","100%"),f(e,"height","100%"),f(e,"xmlns","http://www.w3.org/2000/svg"),f(e,"aria-hidden","true"),f(e,"stroke-width","2"),f(e,"stroke-linecap","round"),f(e,"stroke-linejoin","round")},m(n,o){v(n,e,o),d(e,t)},p:K,i:K,o:K,d(n){n&&b(e)}}}class ai extends ie{constructor(e){super(),re(this,e,null,si,se,{})}}function _i(l){let e,t;return{c(){e=de("svg"),t=de("path"),f(t,"stroke-linecap","round"),f(t,"stroke-linejoin","round"),f(t,"d","M11.25 11.25l.041-.02a.75.75 0 011.063.852l-.708 2.836a.75.75 0 001.063.853l.041-.021M21 12a9 9 0 11-18 0 9 9 0 0118 0zm-9-3.75h.008v.008H12V8.25z"),f(e,"fill","none"),f(e,"stroke","currentColor"),f(e,"viewBox","0 0 24 24"),f(e,"width","100%"),f(e,"height","100%"),f(e,"xmlns","http://www.w3.org/2000/svg"),f(e,"aria-hidden","true"),f(e,"stroke-width","2"),f(e,"stroke-linecap","round"),f(e,"stroke-linejoin","round")},m(n,o){v(n,e,o),d(e,t)},p:K,i:K,o:K,d(n){n&&b(e)}}}class ci extends ie{constructor(e){super(),re(this,e,null,_i,se,{})}}function ui(l){let e,t;return{c(){e=de("svg"),t=de("path"),f(t,"stroke-linecap","round"),f(t,"stroke-linejoin","round"),f(t,"d","M12 9v3.75m-9.303 3.376c-.866 1.5.217 3.374 1.948 3.374h14.71c1.73 0 2.813-1.874 1.948-3.374L13.949 3.378c-.866-1.5-3.032-1.5-3.898 0L2.697 16.126zM12 15.75h.007v.008H12v-.008z"),f(e,"fill","none"),f(e,"stroke","currentColor"),f(e,"stroke-width","2"),f(e,"viewBox","0 0 24 24"),f(e,"width","100%"),f(e,"height","100%"),f(e,"xmlns","http://www.w3.org/2000/svg"),f(e,"aria-hidden","true"),f(e,"stroke-linecap","round"),f(e,"stroke-linejoin","round")},m(n,o){v(n,e,o),d(e,t)},p:K,i:K,o:K,d(n){n&&b(e)}}}class fi extends ie{constructor(e){super(),re(this,e,null,ui,se,{})}}function di(l){let e,t;return e=new ai({}),{c(){U(e.$$.fragment)},m(n,o){H(e,n,o),t=!0},i(n){t||(A(e.$$.fragment,n),t=!0)},o(n){z(e.$$.fragment,n),t=!1},d(n){Y(e,n)}}}function pi(l){let e,t;return e=new ci({}),{c(){U(e.$$.fragment)},m(n,o){H(e,n,o),t=!0},i(n){t||(A(e.$$.fragment,n),t=!0)},o(n){z(e.$$.fragment,n),t=!1},d(n){Y(e,n)}}}function mi(l){let e,t;return e=new fi({}),{c(){U(e.$$.fragment)},m(n,o){H(e,n,o),t=!0},i(n){t||(A(e.$$.fragment,n),t=!0)},o(n){z(e.$$.fragment,n),t=!1},d(n){Y(e,n)}}}function gi(l){let e,t,n,o,i,s,r,a,_,c,g,p,h,E,j,w,m,k,I,u,q,L,F,M,W,O,_e,T;const B=[mi,pi,di],J=[];function pe(P,Z){return P[1]==="warning"?0:P[1]==="info"?1:P[1]==="error"?2:-1}return~(n=pe(l))&&(o=J[n]=B[n](l)),{c(){e=C("div"),t=C("div"),o&&o.c(),s=D(),r=C("div"),a=C("div"),_=S(l[1]),g=D(),p=C("div"),h=S(l[0]),w=D(),m=C("button"),k=C("span"),k.textContent="×",u=D(),q=C("div"),f(t,"class",i="toast-icon "+l[1]+" svelte-z3l7qj"),f(a,"class",c="toast-title "+l[1]+" svelte-z3l7qj"),f(p,"class",E="toast-text "+l[1]+" svelte-z3l7qj"),f(r,"class",j="toast-details "+l[1]+" svelte-z3l7qj"),f(k,"aria-hidden","true"),f(m,"class",I="toast-close "+l[1]+" svelte-z3l7qj"),f(m,"type","button"),f(m,"aria-label","Close"),f(m,"data-testid","toast-close"),f(q,"class",L="timer "+l[1]+" svelte-z3l7qj"),f(e,"class",F="toast-body "+l[1]+" svelte-z3l7qj"),f(e,"role","alert"),f(e,"data-testid","toast-body")},m(P,Z){v(P,e,Z),d(e,t),~n&&J[n].m(t,null),d(e,s),d(e,r),d(r,a),d(a,_),d(r,g),d(r,p),d(p,h),d(e,w),d(e,m),d(m,k),d(e,u),d(e,q),O=!0,_e||(T=[ge(m,"click",l[2]),ge(e,"click",$e(l[4])),ge(e,"keydown",$e(l[5]))],_e=!0)},p(P,[Z]){let ke=n;n=pe(P),n!==ke&&(o&&(ne(),z(J[ke],1,1,()=>{J[ke]=null}),le()),~n?(o=J[n],o||(o=J[n]=B[n](P),o.c()),A(o,1),o.m(t,null)):o=null),(!O||Z&2&&i!==(i="toast-icon "+P[1]+" svelte-z3l7qj"))&&f(t,"class",i),(!O||Z&2)&&N(_,P[1]),(!O||Z&2&&c!==(c="toast-title "+P[1]+" svelte-z3l7qj"))&&f(a,"class",c),(!O||Z&1)&&N(h,P[0]),(!O||Z&2&&E!==(E="toast-text "+P[1]+" svelte-z3l7qj"))&&f(p,"class",E),(!O||Z&2&&j!==(j="toast-details "+P[1]+" svelte-z3l7qj"))&&f(r,"class",j),(!O||Z&2&&I!==(I="toast-close "+P[1]+" svelte-z3l7qj"))&&f(m,"class",I),(!O||Z&2&&L!==(L="timer "+P[1]+" svelte-z3l7qj"))&&f(q,"class",L),(!O||Z&2&&F!==(F="toast-body "+P[1]+" svelte-z3l7qj"))&&f(e,"class",F)},i(P){O||(A(o),P&&Lo(()=>{O&&(W&&W.end(1),M=Vo(e,Ne,{duration:200,delay:100}),M.start())}),O=!0)},o(P){z(o),M&&M.invalidate(),P&&(W=Ro(e,Ne,{duration:200})),O=!1},d(P){P&&b(e),~n&&J[n].d(),P&&W&&W.end(),_e=!1,Do(T)}}}function hi(l,e,t){let{message:n=""}=e,{type:o}=e,{id:i}=e;const s=Oe();function r(){s("close",i)}Fe(()=>{setTimeout(()=>{r()},1e4)});function a(c){ve.call(this,l,c)}function _(c){ve.call(this,l,c)}return l.$$set=c=>{"message"in c&&t(0,n=c.message),"type"in c&&t(1,o=c.type),"id"in c&&t(3,i=c.id)},[n,o,r,i,a,_]}class bi extends ie{constructor(e){super(),re(this,e,hi,gi,se,{message:0,type:1,id:3})}}function Ue(l,e,t){const n=l.slice();return n[2]=e[t].type,n[3]=e[t].message,n[4]=e[t].id,n}function He(l,e){let t,n,o,i,s=K,r;return n=new bi({props:{type:e[2],message:e[3],id:e[4]}}),n.$on("close",e[1]),{key:l,first:null,c(){t=C("div"),U(n.$$.fragment),o=D(),fe(t,"width","100%"),this.first=t},m(a,_){v(a,t,_),H(n,t,null),d(t,o),r=!0},p(a,_){e=a;const c={};_&1&&(c.type=e[2]),_&1&&(c.message=e[3]),_&1&&(c.id=e[4]),n.$set(c)},r(){i=t.getBoundingClientRect()},f(){oi(t),s()},a(){s(),s=li(t,i,ri,{duration:300})},i(a){r||(A(n.$$.fragment,a),r=!0)},o(a){z(n.$$.fragment,a),r=!1},d(a){a&&b(t),Y(n)}}}function vi(l){let e,t=[],n=new Map,o,i=te(l[0]);const s=r=>r[4];for(let r=0;r0&&"parentIFrame"in window&&window.parentIFrame?.scrollTo(0,0)}function wi(l,e,t){let{messages:n=[]}=e;function o(i){ve.call(this,l,i)}return l.$$set=i=>{"messages"in i&&t(0,n=i.messages)},l.$$.update=()=>{l.$$.dirty&1&&ki(n)},[n,o]}class yi extends ie{constructor(e){super(),re(this,e,wi,vi,se,{messages:0})}}const Ei={accordion:()=>$(()=>import("./index-bc915ae7.js"),["assets/index-bc915ae7.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/StaticColumn-8964c3ef.js","assets/StaticColumn-2853eb31.css","assets/index-8f1feca1.css"]),annotatedimage:()=>$(()=>import("./index-ac935314.js"),["assets/index-ac935314.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/BlockLabel-9545c6da.js","assets/Empty-8e3485c0.js","assets/Image-953318a0.js","assets/ModifyUpload.svelte_svelte_type_style_lang-14b768c9.js","assets/ModifyUpload-77b0d4b2.css","assets/index-f0e43e7d.css"]),audio:()=>$(()=>import("./index-edfeedaf.js"),["assets/index-edfeedaf.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/UploadText-426a6b47.js","assets/Upload-1e84df2f.js","assets/ModifyUpload.svelte_svelte_type_style_lang-14b768c9.js","assets/ModifyUpload-77b0d4b2.css","assets/ModifyUpload-0461fcb6.js","assets/IconButton-307018b3.js","assets/BlockLabel-9545c6da.js","assets/Empty-8e3485c0.js","assets/ShareButton-40f28ee7.js","assets/Download-e6704cf2.js","assets/index-1af20794.css"]),box:()=>$(()=>import("./index-642779d3.js"),["assets/index-642779d3.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css"]),button:()=>$(()=>import("./index-a5f333f6.js"),["assets/index-a5f333f6.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css"]),chatbot:()=>$(()=>import("./index-0dbc7d7e.js"),["assets/index-0dbc7d7e.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/ShareButton-40f28ee7.js","assets/IconButton-307018b3.js","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/Copy-92242405.js","assets/BlockLabel-9545c6da.js","assets/ModifyUpload.svelte_svelte_type_style_lang-14b768c9.js","assets/ModifyUpload-77b0d4b2.css","assets/index-11252afb.css"]),checkbox:()=>$(()=>import("./index-201f0338.js"),["assets/index-201f0338.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/Info-77722665.js","assets/index-dc375626.css"]),checkboxgroup:()=>$(()=>import("./index-31e0db8d.js"),["assets/index-31e0db8d.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/BlockTitle-af232cbc.js","assets/Info-77722665.js","assets/index-e557d23a.css"]),code:()=>$(()=>import("./index-7045bfe3.js").then(l=>l.F),["assets/index-7045bfe3.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/Copy-92242405.js","assets/Download-e6704cf2.js","assets/BlockLabel-9545c6da.js","assets/Empty-8e3485c0.js","assets/index-4ccfb72c.css"]),colorpicker:()=>$(()=>import("./index-dc22e74c.js"),["assets/index-dc22e74c.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/BlockTitle-af232cbc.js","assets/Info-77722665.js","assets/index-cd311153.css"]),column:()=>$(()=>import("./index-9ba722d9.js"),["assets/index-9ba722d9.js","assets/StaticColumn-8964c3ef.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/StaticColumn-2853eb31.css"]),dataframe:()=>$(()=>import("./index-96c8120d.js"),["assets/index-96c8120d.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/Upload-1e84df2f.js","assets/ModifyUpload.svelte_svelte_type_style_lang-14b768c9.js","assets/ModifyUpload-77b0d4b2.css","assets/dsv-576afacd.js","assets/index-9ae8fa0e.css"]),dataset:()=>$(()=>import("./index-a44c805b.js"),["assets/index-a44c805b.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/Image-39fd5447.js","assets/Image-003ee87c.css","assets/csv-b0b7514a.js","assets/dsv-576afacd.js","assets/Model3d-e3d4c400.js","assets/Model3d-98fc2b2c.css","assets/index-322e8a8e.css","assets/Player-1e00f554.css"]),dropdown:()=>$(()=>import("./index-ab39ecc3.js"),["assets/index-ab39ecc3.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/BlockTitle-af232cbc.js","assets/Info-77722665.js","assets/index-1d75348c.css"]),file:()=>$(()=>import("./index-85b80f14.js"),["assets/index-85b80f14.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/BlockLabel-9545c6da.js","assets/Empty-8e3485c0.js","assets/File-bf210783.js","assets/ModifyUpload.svelte_svelte_type_style_lang-14b768c9.js","assets/ModifyUpload-77b0d4b2.css","assets/Upload-1e84df2f.js","assets/ModifyUpload-0461fcb6.js","assets/IconButton-307018b3.js","assets/UploadText-426a6b47.js","assets/index-aef3869a.css"]),form:()=>$(()=>import("./index-0a4ea765.js"),["assets/index-0a4ea765.js","assets/StaticForm-775ac3c9.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/StaticForm-3812b7f1.css"]),gallery:()=>$(()=>import("./index-057a4d4c.js"),["assets/index-057a4d4c.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/BlockLabel-9545c6da.js","assets/IconButton-307018b3.js","assets/Empty-8e3485c0.js","assets/ShareButton-40f28ee7.js","assets/ModifyUpload.svelte_svelte_type_style_lang-14b768c9.js","assets/ModifyUpload-77b0d4b2.css","assets/ModifyUpload-0461fcb6.js","assets/Download-e6704cf2.js","assets/Image-953318a0.js","assets/index-bc19ffad.css"]),group:()=>$(()=>import("./index-4191a31f.js"),["assets/index-4191a31f.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/index-37519934.css"]),highlightedtext:()=>$(()=>import("./index-2e429704.js"),["assets/index-2e429704.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/color-5a2b6a59.js","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/BlockLabel-9545c6da.js","assets/Empty-8e3485c0.js","assets/index-7791ea05.css"]),html:()=>$(()=>import("./index-2498191d.js"),["assets/index-2498191d.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/index-329f8260.css"]),image:()=>$(()=>import("./index-c038c4da.js"),["assets/index-c038c4da.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/ShareButton-40f28ee7.js","assets/IconButton-307018b3.js","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/BlockLabel-9545c6da.js","assets/Empty-8e3485c0.js","assets/Download-e6704cf2.js","assets/Image-953318a0.js","assets/Image.svelte_svelte_type_style_lang-11edea9c.js","assets/Image-b496c98d.css","assets/ModifyUpload-0461fcb6.js","assets/ModifyUpload.svelte_svelte_type_style_lang-14b768c9.js","assets/ModifyUpload-77b0d4b2.css","assets/Upload-1e84df2f.js","assets/UploadText-426a6b47.js","assets/Image-39fd5447.js","assets/Image-003ee87c.css","assets/index-f62e764d.css"]),interpretation:()=>$(()=>import("./index-fb94a4aa.js"),["assets/index-fb94a4aa.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/BlockTitle-af232cbc.js","assets/Info-77722665.js","assets/index-6acaa952.css"]),json:()=>$(()=>import("./index-dcc65f03.js"),["assets/index-dcc65f03.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/Copy-92242405.js","assets/Empty-8e3485c0.js","assets/BlockLabel-9545c6da.js","assets/index-3ca142e0.css"]),label:()=>$(()=>import("./index-f6ff5ad4.js"),["assets/index-f6ff5ad4.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/BlockLabel-9545c6da.js","assets/Empty-8e3485c0.js","assets/index-cc2431f4.css"]),markdown:()=>$(()=>import("./index-5e8c1776.js"),["assets/index-5e8c1776.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/index-edf307d2.css"]),model3d:()=>$(()=>import("./index-5793eeea.js"),["assets/index-5793eeea.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/ModifyUpload.svelte_svelte_type_style_lang-14b768c9.js","assets/ModifyUpload-77b0d4b2.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/BlockLabel-9545c6da.js","assets/Empty-8e3485c0.js","assets/File-bf210783.js","assets/IconButton-307018b3.js","assets/Download-e6704cf2.js","assets/UploadText-426a6b47.js","assets/Upload-1e84df2f.js","assets/ModifyUpload-0461fcb6.js","assets/Model3d-e3d4c400.js","assets/Model3d-98fc2b2c.css","assets/index-4ffdbeab.css"]),number:()=>$(()=>import("./index-7f3dc7e5.js"),["assets/index-7f3dc7e5.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/BlockTitle-af232cbc.js","assets/Info-77722665.js","assets/index-76c3ee3f.css"]),plot:()=>$(()=>import("./index-106fe5c7.js"),["assets/index-106fe5c7.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/color-5a2b6a59.js","assets/linear-bcbcf466.js","assets/dsv-576afacd.js","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/Empty-8e3485c0.js","assets/BlockLabel-9545c6da.js","assets/index-31d5c487.css"]),radio:()=>$(()=>import("./index-aef15a25.js"),["assets/index-aef15a25.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/BlockTitle-af232cbc.js","assets/Info-77722665.js","assets/index-b7554727.css"]),row:()=>$(()=>import("./index-4d3f6d59.js"),["assets/index-4d3f6d59.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/index-93c91554.css"]),slider:()=>$(()=>import("./index-3a8b136e.js"),["assets/index-3a8b136e.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/BlockTitle-af232cbc.js","assets/Info-77722665.js","assets/index-49c152ed.css"]),state:()=>$(()=>import("./index-320faa81.js"),["assets/index-320faa81.js","assets/index-9e76ffee.js","assets/index-9b163635.css"]),statustracker:()=>$(()=>import("./index-bb077656.js"),["assets/index-bb077656.js","assets/index-9e76ffee.js","assets/index-9b163635.css"]),tabs:()=>$(()=>import("./index-29fed5d6.js"),["assets/index-29fed5d6.js","assets/StaticTabs-00db98ac.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/StaticTabs-42a53876.css"]),tabitem:()=>$(()=>import("./index-a3bc2300.js"),["assets/index-a3bc2300.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/StaticTabs-00db98ac.js","assets/StaticTabs-42a53876.css","assets/StaticColumn-8964c3ef.js","assets/StaticColumn-2853eb31.css","assets/index-d43fcb36.css"]),textbox:()=>$(()=>import("./index-74850412.js"),["assets/index-74850412.js","assets/index-85ff0bcb.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/BlockTitle-af232cbc.js","assets/Info-77722665.js","assets/Copy-92242405.js","assets/index-dde6f8cc.css"]),timeseries:()=>$(()=>import("./index-c763733c.js"),["assets/index-c763733c.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/BlockLabel-9545c6da.js","assets/Empty-8e3485c0.js","assets/color-5a2b6a59.js","assets/csv-b0b7514a.js","assets/dsv-576afacd.js","assets/linear-bcbcf466.js","assets/Upload-1e84df2f.js","assets/ModifyUpload.svelte_svelte_type_style_lang-14b768c9.js","assets/ModifyUpload-77b0d4b2.css","assets/ModifyUpload-0461fcb6.js","assets/IconButton-307018b3.js","assets/UploadText-426a6b47.js","assets/index-9da94804.css"]),uploadbutton:()=>$(()=>import("./index-447aa1f0.js"),["assets/index-447aa1f0.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/ModifyUpload.svelte_svelte_type_style_lang-14b768c9.js","assets/ModifyUpload-77b0d4b2.css","assets/index-03d58ab8.css"]),video:()=>$(()=>import("./index-e7652dcb.js"),["assets/index-e7652dcb.js","assets/index-9e76ffee.js","assets/index-9b163635.css","assets/ModifyUpload.svelte_svelte_type_style_lang-14b768c9.js","assets/ModifyUpload-77b0d4b2.css","assets/Button-30a08c0b.js","assets/Button-0f9363c8.css","assets/BlockLabel-9545c6da.js","assets/IconButton-307018b3.js","assets/Empty-8e3485c0.js","assets/ShareButton-40f28ee7.js","assets/Download-e6704cf2.js","assets/Image.svelte_svelte_type_style_lang-11edea9c.js","assets/Image-b496c98d.css","assets/UploadText-426a6b47.js","assets/Upload-1e84df2f.js","assets/ModifyUpload-0461fcb6.js","assets/index-1ed4c79b.css","assets/Player-1e00f554.css"])},Lt="أرسل",Vt="أمسح",Rt="فسِّر",Dt="بلِّغ",Mt="أمثلة",$t="أو",ji={interface:{drop_image:"أسقط الصورة هنا",drop_video:"أسقط الفيديو هنا",drop_audio:"أسقط الملف الصوتي هنا",drop_file:"أسقط الملف هنا",drop_csv:"أسقط ملف البيانات هنا",click_to_upload:"إضغط للتحميل",view_api:"إستخدم واجهة البرمجة",built_with_Gradio:"تم الإنشاء بإستخدام Gradio"},Submit:Lt,Clear:Vt,Interpret:Rt,Flag:Dt,Examples:Mt,or:$t},Si=Object.freeze(Object.defineProperty({__proto__:null,Clear:Vt,Examples:Mt,Flag:Dt,Interpret:Rt,Submit:Lt,default:ji,or:$t},Symbol.toStringTag,{value:"Module"})),Bt="Envia",Gt="Neteja",Nt="Interpreta",Ut="Avisa",Ht="Exemples",Yt="o",Ii={interface:{drop_image:"Deixeu anar la imatge aquí",drop_video:"Deixeu anar el vídeo aquí",drop_audio:"Deixeu anar l'àudio aquí",drop_file:"Deixeu anar el fitxer aquí",drop_csv:"Deixeu anar el CSV aquí",click_to_upload:"Feu clic per pujar",view_api:"Veure l'API",built_with_Gradio:"Construït amb gradio",copy_to_clipboard:"Copia el json",loading:"S'està carregant",error:"ERROR",empty:"Buit"},Submit:Bt,Clear:Gt,Interpret:Nt,Flag:Ut,Examples:Ht,or:Yt},Ci=Object.freeze(Object.defineProperty({__proto__:null,Clear:Gt,Examples:Ht,Flag:Ut,Interpret:Nt,Submit:Bt,default:Ii,or:Yt},Symbol.toStringTag,{value:"Module"})),Wt="Absenden",Jt="Löschen",Kt="Ersteller",Zt="Flag",Qt="Beispiele",Xt="oder",Oi={interface:{drop_image:"Bild hier ablegen",drop_video:"Video hier ablegen",drop_audio:"Audio hier ablegen",drop_file:"Datei hier ablegen",drop_csv:"CSV Datei hier ablegen",click_to_upload:"Hochladen",view_api:"API anschauen",built_with_Gradio:"Mit Gradio erstellt"},Submit:Wt,Clear:Jt,Interpret:Kt,Flag:Zt,Examples:Qt,or:Xt},Pi=Object.freeze(Object.defineProperty({__proto__:null,Clear:Jt,Examples:Qt,Flag:Zt,Interpret:Kt,Submit:Wt,default:Oi,or:Xt},Symbol.toStringTag,{value:"Module"})),xt="Submit",en="Clear",tn="Interpret",nn="Flag",ln="Examples",on="or",Ai={interface:{drop_image:"Drop Image Here",drop_video:"Drop Video Here",drop_audio:"Drop Audio Here",drop_file:"Drop File Here",drop_csv:"Drop CSV Here",click_to_upload:"Click to Upload",view_api:"view the api",built_with_Gradio:"Built with gradio",copy_to_clipboard:"copy json",loading:"Loading",error:"ERROR",empty:"Empty"},Submit:xt,Clear:en,Interpret:tn,Flag:nn,Examples:ln,or:on},Ti=Object.freeze(Object.defineProperty({__proto__:null,Clear:en,Examples:ln,Flag:nn,Interpret:tn,Submit:xt,default:Ai,or:on},Symbol.toStringTag,{value:"Module"})),rn="Enviar",sn="Limpiar",an="Interpretar",_n="Avisar",cn="Ejemplos",un="o",Fi={interface:{drop_image:"Coloque la imagen aquí",drop_video:"Coloque el video aquí",drop_audio:"Coloque el audio aquí",drop_file:"Coloque el archivo aquí",drop_csv:"Coloque el CSV aquí",click_to_upload:"Haga click para cargar",view_api:"Ver la API",built_with_Gradio:"Construido con Gradio"},Submit:rn,Clear:sn,Interpret:an,Flag:_n,Examples:cn,or:un},zi=Object.freeze(Object.defineProperty({__proto__:null,Clear:sn,Examples:cn,Flag:_n,Interpret:an,Submit:rn,default:Fi,or:un},Symbol.toStringTag,{value:"Module"})),fn="ارسال",dn="حذف",pn="تفسیر",mn="پرچم",gn="مثال ها",hn="یا",qi={interface:{drop_image:"تصویر را اینجا رها کنید",drop_video:"ویدیو را اینجا رها کنید",drop_audio:"صوت را اینجا رها کنید",drop_file:"فایل را اینجا رها کنید",drop_csv:"فایل csv را اینجا رها کنید",click_to_upload:"برای آپلود کلیک کنید",view_api:"api را مشاهده کنید",built_with_Gradio:"ساخته شده با gradio"},Submit:fn,Clear:dn,Interpret:pn,Flag:mn,Examples:gn,or:hn},Li=Object.freeze(Object.defineProperty({__proto__:null,Clear:dn,Examples:gn,Flag:mn,Interpret:pn,Submit:fn,default:qi,or:hn},Symbol.toStringTag,{value:"Module"})),bn="Soumettre",vn="Nettoyer",kn="Interpréter",wn="Signaler",yn="Exemples",En="ou",Vi={interface:{drop_image:"Déposer l'Image Ici",drop_video:"Déposer la Vidéo Ici",drop_audio:"Déposer l'Audio Ici",drop_file:"Déposer le Fichier Ici",drop_csv:"Déposer le CSV Ici",click_to_upload:"Cliquer pour Télécharger",view_api:"Voir l'API",built_with_Gradio:"Conçu avec Gradio"},Submit:bn,Clear:vn,Interpret:kn,Flag:wn,Examples:yn,or:En},Ri=Object.freeze(Object.defineProperty({__proto__:null,Clear:vn,Examples:yn,Flag:wn,Interpret:kn,Submit:bn,default:Vi,or:En},Symbol.toStringTag,{value:"Module"})),jn="שלח",Sn="נקה",In="לפרש",Cn="סמן",On="דוגמות",Pn="או",Di={interface:{drop_image:"גרור קובץ תמונה לכאן",drop_video:"גרור קובץ סרטון לכאן",drop_audio:"גרור לכאן קובץ שמע",drop_file:"גרור קובץ לכאן",drop_csv:"גרור csv קובץ לכאן",click_to_upload:"לחץ כדי להעלות",view_api:"צפה ב API",built_with_Gradio:"בנוי עם גרדיו"},Submit:jn,Clear:Sn,Interpret:In,Flag:Cn,Examples:On,or:Pn},Mi=Object.freeze(Object.defineProperty({__proto__:null,Clear:Sn,Examples:On,Flag:Cn,Interpret:In,Submit:jn,default:Di,or:Pn},Symbol.toStringTag,{value:"Module"})),An="सबमिट करे",Tn="हटाये",Fn="व्याख्या करे",zn="चिह्नित करे",qn="उदाहरण",Ln="या",$i={interface:{drop_image:"यहाँ इमेज ड्रॉप करें",drop_video:"यहाँ वीडियो ड्रॉप करें",drop_audio:"यहाँ ऑडियो ड्रॉप करें",drop_file:"यहाँ File ड्रॉप करें",drop_csv:"यहाँ CSV ड्रॉप करें",click_to_upload:"अपलोड के लिए बटन दबायें",view_api:"API को देखे",built_with_Gradio:"Gradio से बना"},Submit:An,Clear:Tn,Interpret:Fn,Flag:zn,Examples:qn,or:Ln},Bi=Object.freeze(Object.defineProperty({__proto__:null,Clear:Tn,Examples:qn,Flag:zn,Interpret:Fn,Submit:An,default:$i,or:Ln},Symbol.toStringTag,{value:"Module"})),Vn="送信",Rn="クリア",Dn="解釈",Mn="フラグする",$n="入力例",Bn="または",Gi={interface:{drop_image:"ここに画像をドロップ",drop_video:"ここに動画をドロップ",drop_audio:"ここに音声をドロップ",drop_file:"ここにファイルをドロップ",drop_csv:"ここにCSVをドロップ",click_to_upload:"クリックしてアップロード",view_api:"APIを見る",built_with_Gradio:"gradioで作ろう"},Submit:Vn,Clear:Rn,Interpret:Dn,Flag:Mn,Examples:$n,or:Bn},Ni=Object.freeze(Object.defineProperty({__proto__:null,Clear:Rn,Examples:$n,Flag:Mn,Interpret:Dn,Submit:Vn,default:Gi,or:Bn},Symbol.toStringTag,{value:"Module"})),Gn="제출하기",Nn="클리어",Un="설명하기",Hn="플래그",Yn="예시",Wn="또는",Ui={interface:{drop_image:"이미지를 끌어 놓으세요",drop_video:"비디오를 끌어 놓으세요",drop_audio:"오디오를 끌어 놓으세요",drop_file:"파일을 끌어 놓으세요",drop_csv:"CSV파일을 끌어 놓으세요",click_to_upload:"클릭해서 업로드하기",view_api:"API 보기",built_with_Gradio:"gradio로 제작되었습니다"},Submit:Gn,Clear:Nn,Interpret:Un,Flag:Hn,Examples:Yn,or:Wn},Hi=Object.freeze(Object.defineProperty({__proto__:null,Clear:Nn,Examples:Yn,Flag:Hn,Interpret:Un,Submit:Gn,default:Ui,or:Wn},Symbol.toStringTag,{value:"Module"})),Jn="Pateikti",Kn="Trinti",Zn="Interpretuoti",Qn="Pažymėti",Xn="Pavyzdžiai",xn="arba",Yi={interface:{drop_image:"Įkelkite paveikslėlį čia",drop_video:"Įkelkite vaizdo įrašą čia",drop_audio:"Įkelkite garso įrašą čia",drop_file:"Įkelkite bylą čia",drop_csv:"Įkelkite CSV čia",click_to_upload:"Spustelėkite norėdami įkelti",view_api:"peržiūrėti api",built_with_Gradio:"sukurta su gradio"},Submit:Jn,Clear:Kn,Interpret:Zn,Flag:Qn,Examples:Xn,or:xn},Wi=Object.freeze(Object.defineProperty({__proto__:null,Clear:Kn,Examples:Xn,Flag:Qn,Interpret:Zn,Submit:Jn,default:Yi,or:xn},Symbol.toStringTag,{value:"Module"})),el="Zend in",tl="Wis",nl="Interpreteer",ll="Vlag",ol="Voorbeelden",il="of",Ji={interface:{drop_image:"Sleep een Afbeelding hier",drop_video:"Sleep een Video hier",drop_audio:"Sleep een Geluidsbestand hier",drop_file:"Sleep een Document hier",drop_csv:"Sleep een CSV hier",click_to_upload:"Klik om the Uploaden",view_api:"zie de api",built_with_Gradio:"gemaakt met gradio"},Submit:el,Clear:tl,Interpret:nl,Flag:ll,Examples:ol,or:il},Ki=Object.freeze(Object.defineProperty({__proto__:null,Clear:tl,Examples:ol,Flag:ll,Interpret:nl,Submit:el,default:Ji,or:il},Symbol.toStringTag,{value:"Module"})),rl="Zatwierdź",sl="Wyczyść",al="Interpretuj",_l="Oznacz",cl="Przykłady",ul="lub",Zi={interface:{drop_image:"Przeciągnij tutaj zdjęcie",drop_video:"Przeciągnij tutaj video",drop_audio:"Przeciągnij tutaj audio",drop_file:"Przeciągnij tutaj plik",drop_csv:"Przeciągnij tutaj CSV",click_to_upload:"Kliknij, aby przesłać",view_api:"zobacz api",built_with_Gradio:"utworzone z gradio"},Submit:rl,Clear:sl,Interpret:al,Flag:_l,Examples:cl,or:ul},Qi=Object.freeze(Object.defineProperty({__proto__:null,Clear:sl,Examples:cl,Flag:_l,Interpret:al,Submit:rl,default:Zi,or:ul},Symbol.toStringTag,{value:"Module"})),fl="Enviar",dl="Limpar",pl="Interpretar",ml="Marcar",gl="Exemplos",hl="ou",Xi={interface:{drop_image:"Solte a Imagem Aqui",drop_video:"Solte o Vídeo Aqui",drop_audio:"Solte o Áudio Aqui",drop_file:"Solte o Arquivo Aqui",drop_csv:"Solte o CSV Aqui",click_to_upload:"Clique para o Upload",view_api:"Veja a API",built_with_Gradio:"Construído com gradio",copy_to_clipboard:"copiar para o clipboard",loading:"Carregando",error:"ERRO",empty:"Vazio"},Submit:fl,Clear:dl,Interpret:pl,Flag:ml,Examples:gl,or:hl},xi=Object.freeze(Object.defineProperty({__proto__:null,Clear:dl,Examples:gl,Flag:ml,Interpret:pl,Submit:fl,default:Xi,or:hl},Symbol.toStringTag,{value:"Module"})),bl="Исполнить",vl="Очистить",kl="Интерпретировать",wl="Пометить",yl="Примеры",El="или",er={interface:{drop_image:"Поместите Изображение Здесь",drop_video:"Поместите Видео Здесь",drop_audio:"Поместите Аудио Здесь",drop_file:"Поместите Документ Здесь",drop_csv:"Поместите CSV Здесь",click_to_upload:"Нажмите, чтобы загрузить",view_api:"просмотр api",built_with_Gradio:"сделано с помощью gradio"},Submit:bl,Clear:vl,Interpret:kl,Flag:wl,Examples:yl,or:El},tr=Object.freeze(Object.defineProperty({__proto__:null,Clear:vl,Examples:yl,Flag:wl,Interpret:kl,Submit:bl,default:er,or:El},Symbol.toStringTag,{value:"Module"})),jl="சமர்ப்பிக்கவும்",Sl="அழிக்கவும்",Il="விளக்குவது",Cl="கொடியிடு",Ol="உதாரணங்கள்",Pl="அல்லது",nr={interface:{drop_image:"படத்தை பதிவேற்றவும்",drop_video:"காணொளியை பதிவேற்றவும்",drop_audio:"ஆடியோவை பதிவேற்றவும்",drop_file:"கோப்பை பதிவேற்றவும்",drop_csv:"csv ஐ பதிவேற்றவும்",click_to_upload:"பதிவேற்ற அழுத்தவும்",view_api:"பயன்பாட்டு நிரல் இடைமுகத்தைக் காண்க",built_with_Gradio:"கிரேடியோ வுடன் உருவாக்கப்பட்டது"},Submit:jl,Clear:Sl,Interpret:Il,Flag:Cl,Examples:Ol,or:Pl},lr=Object.freeze(Object.defineProperty({__proto__:null,Clear:Sl,Examples:Ol,Flag:Cl,Interpret:Il,Submit:jl,default:nr,or:Pl},Symbol.toStringTag,{value:"Module"})),Al="Yükle",Tl="Temizle",Fl="Yorumla",zl="Etiketle",ql="örnekler",Ll="veya",or={interface:{drop_image:"Resmi Buraya Sürükle",drop_video:"Videoyu Buraya Sürükle",drop_audio:"Kaydı Buraya Sürükle",drop_file:"Dosyayı Buraya Sürükle",drop_csv:"CSV'yi Buraya Sürükle",click_to_upload:"Yüklemek için Tıkla",view_api:"api'yi görüntüle",built_with_Gradio:"Gradio ile oluşturulmuştur"},Submit:Al,Clear:Tl,Interpret:Fl,Flag:zl,Examples:ql,or:Ll},ir=Object.freeze(Object.defineProperty({__proto__:null,Clear:Tl,Examples:ql,Flag:zl,Interpret:Fl,Submit:Al,default:or,or:Ll},Symbol.toStringTag,{value:"Module"})),Vl="Надіслати",Rl="Очистити",Dl="Пояснити результат",Ml="Позначити",$l="Приклади",Bl="або",rr={interface:{drop_image:"Перетягніть зображення сюди",drop_video:"Перетягніть відео сюди",drop_audio:"Перетягніть аудіо сюди",drop_file:"Перетягніть файл сюди",drop_csv:"Перетягніть CSV-файл сюди",click_to_upload:"Натисніть щоб завантажити",view_api:"Переглянути API",built_with_Gradio:"Зроблено на основі gradio"},Submit:Vl,Clear:Rl,Interpret:Dl,Flag:Ml,Examples:$l,or:Bl},sr=Object.freeze(Object.defineProperty({__proto__:null,Clear:Rl,Examples:$l,Flag:Ml,Interpret:Dl,Submit:Vl,default:rr,or:Bl},Symbol.toStringTag,{value:"Module"})),Gl="جمع کریں",Nl="ہٹا دیں",Ul="تشریح کریں",Hl="نشان لگائیں",Yl="مثالیں",Wl="یا",ar={interface:{drop_image:"یہاں تصویر ڈراپ کریں",drop_video:"یہاں ویڈیو ڈراپ کریں",drop_audio:"یہاں آڈیو ڈراپ کریں",drop_file:"یہاں فائل ڈراپ کریں",drop_csv:"یہاں فائل ڈراپ کریں",click_to_upload:"اپ لوڈ کے لیے کلک کریں",view_api:"API دیکھیں",built_with_Gradio:"کے ساتھ بنایا گیا Gradio"},Submit:Gl,Clear:Nl,Interpret:Ul,Flag:Hl,Examples:Yl,or:Wl},_r=Object.freeze(Object.defineProperty({__proto__:null,Clear:Nl,Examples:Yl,Flag:Hl,Interpret:Ul,Submit:Gl,default:ar,or:Wl},Symbol.toStringTag,{value:"Module"})),Jl="Yubor",Kl="Tozalash",Zl="Tushuntirish",Ql="Bayroq",Xl="Namunalar",xl="或",cr={interface:{drop_image:"Rasmni Shu Yerga Tashlang",drop_video:"Videoni Shu Yerga Tashlang",drop_audio:"Audioni Shu Yerga Tashlang",drop_file:"Faylni Shu Yerga Tashlang",drop_csv:"CSVni Shu Yerga Tashlang",click_to_upload:"Yuklash uchun Bosing",view_api:"apini ko'ring",built_with_Gradio:"gradio bilan qilingan"},Submit:Jl,Clear:Kl,Interpret:Zl,Flag:Ql,Examples:Xl,or:xl},ur=Object.freeze(Object.defineProperty({__proto__:null,Clear:Kl,Examples:Xl,Flag:Ql,Interpret:Zl,Submit:Jl,default:cr,or:xl},Symbol.toStringTag,{value:"Module"})),eo="提交",to="清除",no="解释",lo="标记",oo="示例",io="或",fr={interface:{drop_image:"拖放图片至此处",drop_video:"拖放视频至此处",drop_audio:"拖放音频至此处",drop_file:"拖放文件至此处",drop_csv:"拖放CSV至此处",click_to_upload:"点击上传",view_api:"查看API",built_with_Gradio:"使用Gradio构建"},Submit:eo,Clear:to,Interpret:no,Flag:lo,Examples:oo,or:io},dr=Object.freeze(Object.defineProperty({__proto__:null,Clear:to,Examples:oo,Flag:lo,Interpret:no,Submit:eo,default:fr,or:io},Symbol.toStringTag,{value:"Module"})),ro="提交",so="清除",ao="解釋",_o="Flag",co="範例",uo="或",pr={interface:{drop_image:"刪除圖片",drop_video:"刪除影片",drop_audio:"刪除音頻",drop_file:"刪除檔案",drop_csv:"刪除CSV",click_to_upload:"點擊上傳",view_api:"查看API",built_with_Gradio:"使用Gradio構建"},Submit:ro,Clear:so,Interpret:ao,Flag:_o,Examples:co,or:uo},mr=Object.freeze(Object.defineProperty({__proto__:null,Clear:so,Examples:co,Flag:_o,Interpret:ao,Submit:ro,default:pr,or:uo},Symbol.toStringTag,{value:"Module"})),Ye=Object.assign({"./lang/ar.json":Si,"./lang/ca.json":Ci,"./lang/de.json":Pi,"./lang/en.json":Ti,"./lang/es.json":zi,"./lang/fa.json":Li,"./lang/fr.json":Ri,"./lang/he.json":Mi,"./lang/hi.json":Bi,"./lang/ja.json":Ni,"./lang/ko.json":Hi,"./lang/lt.json":Wi,"./lang/nl.json":Ki,"./lang/pl.json":Qi,"./lang/pt-BR.json":xi,"./lang/ru.json":tr,"./lang/ta.json":lr,"./lang/tr.json":ir,"./lang/uk.json":sr,"./lang/ur.json":_r,"./lang/uz.json":ur,"./lang/zh-CN.json":dr,"./lang/zh-tw.json":mr});function gr(){let l={};for(const e in Ye){const t=e.split("/").pop().split(".").shift();l[t]=Ye[e].default}return l}const We=gr();for(const l in We)xo(l,We[l]);function hr(){ei({fallbackLocale:"en",initialLocale:ti()})}function Je(l,e,t){const n=l.slice();return n[8]=e[t].component,n[17]=e[t].id,n[2]=e[t].props,n[18]=e[t].children,n[9]=e[t].has_modes,n}function Ke(l){let e=[],t=new Map,n,o,i=te(l[1]);const s=r=>r[17];for(let r=0;r{n=null}),le())},i(o){t||(A(n),t=!0)},o(o){z(n),t=!1},d(o){o&&b(e),n&&n.d(o)}}}function vr(l){let e,t,n,o;const i=[{elem_id:"elem_id"in l[2]&&l[2].elem_id||`component-${l[4]}`},{elem_classes:"elem_classes"in l[2]&&l[2].elem_classes||[]},{target:l[6]},l[2],{theme_mode:l[7]},{root:l[3]}];function s(_){l[15](_)}var r=l[8];function a(_){let c={$$slots:{default:[br]},$$scope:{ctx:_}};for(let g=0;gGe(e,"value",s)),e.$on("prop_change",l[10])),{c(){e&&U(e.$$.fragment),n=ce()},m(_,c){e&&H(e,_,c),v(_,n,c),o=!0},p(_,[c]){const g=c&220?$o(i,[c&20&&{elem_id:"elem_id"in _[2]&&_[2].elem_id||`component-${_[4]}`},c&4&&{elem_classes:"elem_classes"in _[2]&&_[2].elem_classes||[]},c&64&&{target:_[6]},c&4&&Bo(_[2]),c&128&&{theme_mode:_[7]},c&8&&{root:_[3]}]):{};if(c&2097387&&(g.$$scope={dirty:c,ctx:_}),!t&&c&17&&(t=!0,g.value=_[0][_[4]].props.value,Go(()=>t=!1)),c&256&&r!==(r=_[8])){if(e){ne();const p=e;z(p.$$.fragment,1,0,()=>{Y(p,1)}),le()}r?(e=Be(r,a(_)),_[14](e),je.push(()=>Ge(e,"value",s)),e.$on("prop_change",_[10]),U(e.$$.fragment),A(e.$$.fragment,1),H(e,n.parentNode,n)):e=null}else r&&e.$set(g)},i(_){o||(e&&A(e.$$.fragment,_),o=!0)},o(_){e&&z(e.$$.fragment,_),o=!1},d(_){_&&b(n),l[14](null),e&&Y(e,_)}}}function kr(l,e,t){let{root:n}=e,{component:o}=e,{instance_map:i}=e,{id:s}=e,{props:r}=e,{children:a}=e,{dynamic_ids:_}=e,{has_modes:c}=e,{parent:g=null}=e,{target:p}=e,{theme_mode:h}=e;const E=Oe();c&&(r.interactive===!1?r.mode="static":r.interactive===!0||_.has(s)?r.mode="dynamic":r.mode="static"),Fe(()=>(E("mount",s),()=>E("destroy",s))),No("BLOCK_KEY",g);function j(u){for(const q in u.detail)t(0,i[s].props[q]=u.detail[q],i)}function w(u){ve.call(this,l,u)}function m(u){ve.call(this,l,u)}function k(u){je[u?"unshift":"push"](()=>{i[s].instance=u,t(0,i)})}function I(u){l.$$.not_equal(i[s].props.value,u)&&(i[s].props.value=u,t(0,i))}return l.$$set=u=>{"root"in u&&t(3,n=u.root),"component"in u&&t(8,o=u.component),"instance_map"in u&&t(0,i=u.instance_map),"id"in u&&t(4,s=u.id),"props"in u&&t(2,r=u.props),"children"in u&&t(1,a=u.children),"dynamic_ids"in u&&t(5,_=u.dynamic_ids),"has_modes"in u&&t(9,c=u.has_modes),"parent"in u&&t(11,g=u.parent),"target"in u&&t(6,p=u.target),"theme_mode"in u&&t(7,h=u.theme_mode)},l.$$.update=()=>{l.$$.dirty&3&&t(1,a=a&&a.filter(u=>i[u.id].type!=="statustracker")),l.$$.dirty&19&&i[s].type==="form"&&(a?.every(u=>!u.props.visible)?t(2,r.visible=!1,r):t(2,r.visible=!0,r))},[i,a,r,n,s,_,p,h,o,c,j,g,w,m,k,I]}class fo extends ie{constructor(e){super(),re(this,e,kr,vr,se,{root:3,component:8,instance_map:0,id:4,props:2,children:1,dynamic_ids:5,has_modes:9,parent:11,target:6,theme_mode:7})}}function wr(l){let e,t,n,o;return{c(){e=de("svg"),t=de("g"),n=de("path"),o=de("path"),f(n,"d","M3.789,0.09C3.903,-0.024 4.088,-0.024 4.202,0.09L4.817,0.705C4.931,0.819 4.931,1.004 4.817,1.118L1.118,4.817C1.004,4.931 0.819,4.931 0.705,4.817L0.09,4.202C-0.024,4.088 -0.024,3.903 0.09,3.789L3.789,0.09Z"),f(o,"d","M4.825,3.797C4.934,3.907 4.934,4.084 4.825,4.193L4.193,4.825C4.084,4.934 3.907,4.934 3.797,4.825L0.082,1.11C-0.027,1.001 -0.027,0.823 0.082,0.714L0.714,0.082C0.823,-0.027 1.001,-0.027 1.11,0.082L4.825,3.797Z"),f(e,"width","100%"),f(e,"height","100%"),f(e,"viewBox","0 0 5 5"),f(e,"version","1.1"),f(e,"xmlns","http://www.w3.org/2000/svg"),f(e,"xmlns:xlink","http://www.w3.org/1999/xlink"),f(e,"xml:space","preserve"),fe(e,"fill","currentColor"),fe(e,"fill-rule","evenodd"),fe(e,"clip-rule","evenodd"),fe(e,"stroke-linejoin","round"),fe(e,"stroke-miterlimit","2")},m(i,s){v(i,e,s),d(e,t),d(t,n),d(t,o)},p:K,i:K,o:K,d(i){i&&b(e)}}}class po extends ie{constructor(e){super(),re(this,e,null,wr,se,{})}}function yr(l){let e,t,n,o,i,s,r,a,_,c,g,p,h,E,j;return p=new po({}),{c(){e=C("div"),t=C("h1"),t.textContent="API Docs",n=D(),o=C("p"),i=S(`No API Routes found for - `),s=C("code"),r=S(l[0]),a=D(),_=C("p"),_.innerHTML=`To expose an API endpoint of your app in this page, set the api_name - parameter of the event listener. -
        - For more information, visit the - API Page guide - . To hide the API documentation button and this page, set - show_api=False - in the - Blocks.launch() - method.`,c=D(),g=C("button"),U(p.$$.fragment),f(s,"class","svelte-e1ha0f"),f(o,"class","attention svelte-e1ha0f"),f(e,"class","wrap prose svelte-e1ha0f"),f(g,"class","svelte-e1ha0f")},m(w,m){v(w,e,m),d(e,t),d(e,n),d(e,o),d(o,i),d(o,s),d(s,r),d(e,a),d(e,_),v(w,c,m),v(w,g,m),H(p,g,null),h=!0,E||(j=ge(g,"click",l[2]),E=!0)},p(w,[m]){(!h||m&1)&&N(r,w[0])},i(w){h||(A(p.$$.fragment,w),h=!0)},o(w){z(p.$$.fragment,w),h=!1},d(w){w&&(b(e),b(c),b(g)),Y(p),E=!1,j()}}}function Er(l,e,t){const n=Oe();let{root:o}=e;const i=()=>n("close");return l.$$set=s=>{"root"in s&&t(0,o=s.root)},[o,n,i]}class jr extends ie{constructor(e){super(),re(this,e,Er,yr,se,{root:0})}}function Ce(l,e,t=null){return e===void 0?t==="py"?"None":null:e==="string"||e==="str"?t===null?l:'"'+l+'"':e==="number"?t===null?parseFloat(l):l:e==="boolean"||e=="bool"?t==="py"?(l=String(l),l==="true"?"True":"False"):t==="js"?l:l==="true":e==="List[str]"?(l=JSON.stringify(l),l):t===null?l===""?null:JSON.parse(l):typeof l=="string"?l===""?t==="py"?"None":"null":l:JSON.stringify(l)}const mo="https://gradio.s3-us-west-2.amazonaws.com/3.40.1/assets/api-logo-5346f193.svg";function Qe(l){let e;return{c(){e=S("s")},m(t,n){v(t,e,n)},d(t){t&&b(e)}}}function Sr(l){let e,t,n,o,i,s,r,a,_,c,g,p,h,E,j,w,m,k,I,u=l[1]>1&&Qe();return w=new po({}),{c(){e=C("h2"),t=C("img"),o=D(),i=C("div"),s=S(`API documentation - `),r=C("div"),a=S(l[0]),_=D(),c=C("span"),g=C("span"),p=S(l[1]),h=S(" API endpoint"),u&&u.c(),E=D(),j=C("button"),U(w.$$.fragment),Se(t.src,n=mo)||f(t,"src",n),f(t,"alt",""),f(t,"class","svelte-3n2nxs"),f(r,"class","url svelte-3n2nxs"),f(g,"class","url svelte-3n2nxs"),f(c,"class","counts svelte-3n2nxs"),f(e,"class","svelte-3n2nxs"),f(j,"class","svelte-3n2nxs")},m(q,L){v(q,e,L),d(e,t),d(e,o),d(e,i),d(i,s),d(i,r),d(r,a),d(e,_),d(e,c),d(c,g),d(g,p),d(c,h),u&&u.m(c,null),v(q,E,L),v(q,j,L),H(w,j,null),m=!0,k||(I=ge(j,"click",l[3]),k=!0)},p(q,[L]){(!m||L&1)&&N(a,q[0]),(!m||L&2)&&N(p,q[1]),q[1]>1?u||(u=Qe(),u.c(),u.m(c,null)):u&&(u.d(1),u=null)},i(q){m||(A(w.$$.fragment,q),m=!0)},o(q){z(w.$$.fragment,q),m=!1},d(q){q&&(b(e),b(E),b(j)),u&&u.d(),Y(w),k=!1,I()}}}function Ir(l,e,t){let{root:n}=e,{api_count:o}=e;const i=Oe(),s=()=>i("close");return l.$$set=r=>{"root"in r&&t(0,n=r.root),"api_count"in r&&t(1,o=r.api_count)},[n,o,i,s]}class Cr extends ie{constructor(e){super(),re(this,e,Ir,Sr,se,{root:0,api_count:1})}}function Xe(l,e,t){const n=l.slice();return n[4]=e[t].label,n[5]=e[t].type,n[6]=e[t].python_type,n[7]=e[t].component,n[8]=e[t].serializer,n[10]=t,n}function xe(l){let e;return{c(){e=S("(")},m(t,n){v(t,e,n)},d(t){t&&b(e)}}}function Or(l){let e=l[2][l[10]].type+"",t;return{c(){t=S(e)},m(n,o){v(n,t,o)},p(n,o){o&4&&e!==(e=n[2][n[10]].type+"")&&N(t,e)},d(n){n&&b(t)}}}function Pr(l){let e=l[6].type+"",t;return{c(){t=S(e)},m(n,o){v(n,t,o)},p(n,o){o&2&&e!==(e=n[6].type+"")&&N(t,e)},d(n){n&&b(t)}}}function et(l){let e;return{c(){e=S(",")},m(t,n){v(t,e,n)},d(t){t&&b(e)}}}function tt(l){let e,t,n,o,i=l[4]+"",s,r,a=l[7]+"",_,c;function g(j,w){return j[3]==="python"?Pr:Or}let p=g(l),h=p(l),E=l[1].length>1&&et();return{c(){e=C("div"),t=C("span"),n=S("# "),h.c(),o=S(` - representing output in '`),s=S(i),r=S("' "),_=S(a),c=S(` - component`),E&&E.c(),f(t,"class","desc svelte-1c7hj3i"),f(e,"class","svelte-1c7hj3i"),Ie(e,"second-level",l[1].length>1)},m(j,w){v(j,e,w),d(e,t),d(t,n),h.m(t,null),d(t,o),d(t,s),d(t,r),d(t,_),d(t,c),E&&E.m(e,null)},p(j,w){p===(p=g(j))&&h?h.p(j,w):(h.d(1),h=p(j),h&&(h.c(),h.m(t,o))),w&2&&i!==(i=j[4]+"")&&N(s,i),w&2&&a!==(a=j[7]+"")&&N(_,a),j[1].length>1?E||(E=et(),E.c(),E.m(e,null)):E&&(E.d(1),E=null),w&2&&Ie(e,"second-level",j[1].length>1)},d(j){j&&b(e),h.d(),E&&E.d()}}}function nt(l){let e;return{c(){e=S(")")},m(t,n){v(t,e,n)},d(t){t&&b(e)}}}function lt(l){let e,t,n;return t=new Yo({props:{margin:!1}}),{c(){e=C("div"),U(t.$$.fragment),f(e,"class","load-wrap svelte-1c7hj3i")},m(o,i){v(o,e,i),H(t,e,null),n=!0},i(o){n||(A(t.$$.fragment,o),n=!0)},o(o){z(t.$$.fragment,o),n=!1},d(o){o&&b(e),Y(t)}}}function Ar(l){let e,t,n,o,i,s,r=l[1].length>1&&xe(),a=te(l[1]),_=[];for(let p=0;p1&&nt(),g=l[0]&<();return{c(){e=C("div"),t=C("div"),r&&r.c(),n=D();for(let p=0;p<_.length;p+=1)_[p].c();o=D(),c&&c.c(),i=D(),g&&g.c(),f(t,"class","svelte-1c7hj3i"),Ie(t,"hide",l[0]),f(e,"class","response-wrap svelte-1c7hj3i")},m(p,h){v(p,e,h),d(e,t),r&&r.m(t,null),d(t,n);for(let E=0;E<_.length;E+=1)_[E]&&_[E].m(t,null);d(t,o),c&&c.m(t,null),d(e,i),g&&g.m(e,null),s=!0},p(p,h){if(p[1].length>1?r||(r=xe(),r.c(),r.m(t,n)):r&&(r.d(1),r=null),h&14){a=te(p[1]);let E;for(E=0;E1?c||(c=nt(),c.c(),c.m(t,null)):c&&(c.d(1),c=null),(!s||h&1)&&Ie(t,"hide",p[0]),p[0]?g?h&1&&A(g,1):(g=lt(),g.c(),A(g,1),g.m(e,null)):g&&(ne(),z(g,1,1,()=>{g=null}),le())},i(p){s||(A(g),s=!0)},o(p){z(g),s=!1},d(p){p&&b(e),r&&r.d(),be(_,p),c&&c.d(),g&&g.d()}}}function Tr(l){let e,t,n,o;return n=new ze({props:{$$slots:{default:[Ar]},$$scope:{ctx:l}}}),{c(){e=C("h4"),e.innerHTML=`
        - Return Type(s)`,t=D(),U(n.$$.fragment),f(e,"class","svelte-1c7hj3i")},m(i,s){v(i,e,s),v(i,t,s),H(n,i,s),o=!0},p(i,[s]){const r={};s&2063&&(r.$$scope={dirty:s,ctx:i}),n.$set(r)},i(i){o||(A(n.$$.fragment,i),o=!0)},o(i){z(n.$$.fragment,i),o=!1},d(i){i&&(b(e),b(t)),Y(n,i)}}}function Fr(l,e,t){let{is_running:n}=e,{endpoint_returns:o}=e,{js_returns:i}=e,{current_language:s}=e;return l.$$set=r=>{"is_running"in r&&t(0,n=r.is_running),"endpoint_returns"in r&&t(1,o=r.endpoint_returns),"js_returns"in r&&t(2,i=r.js_returns),"current_language"in r&&t(3,s=r.current_language)},[n,o,i,s]}class go extends ie{constructor(e){super(),re(this,e,Fr,Tr,se,{is_running:0,endpoint_returns:1,js_returns:2,current_language:3})}}function zr(l){let e;return{c(){e=S(l[0])},m(t,n){v(t,e,n)},p(t,n){n&1&&N(e,t[0])},d(t){t&&b(e)}}}function qr(l){let e,t;return e=new ni({props:{size:"sm",$$slots:{default:[zr]},$$scope:{ctx:l}}}),e.$on("click",l[1]),{c(){U(e.$$.fragment)},m(n,o){H(e,n,o),t=!0},p(n,[o]){const i={};o&9&&(i.$$scope={dirty:o,ctx:n}),e.$set(i)},i(n){t||(A(e.$$.fragment,n),t=!0)},o(n){z(e.$$.fragment,n),t=!1},d(n){Y(e,n)}}}function Lr(l,e,t){let{code:n}=e,o="copy";function i(){navigator.clipboard.writeText(n),t(0,o="copied!"),setTimeout(()=>{t(0,o="copy")},1500)}return l.$$set=s=>{"code"in s&&t(2,n=s.code)},[o,i,n]}class Pe extends ie{constructor(e){super(),re(this,e,Lr,qr,se,{code:2})}}function Vr(l){let e,t,n,o,i,s;return t=new Pe({props:{code:it}}),{c(){e=C("div"),U(t.$$.fragment),n=D(),o=C("div"),i=C("pre"),i.textContent=`$ ${it}`,f(e,"class","copy svelte-hq8ezf"),f(i,"class","svelte-hq8ezf")},m(r,a){v(r,e,a),H(t,e,null),v(r,n,a),v(r,o,a),d(o,i),s=!0},p:K,i(r){s||(A(t.$$.fragment,r),s=!0)},o(r){z(t.$$.fragment,r),s=!1},d(r){r&&(b(e),b(n),b(o)),Y(t)}}}function Rr(l){let e,t,n,o,i,s;return t=new Pe({props:{code:ot}}),{c(){e=C("div"),U(t.$$.fragment),n=D(),o=C("div"),i=C("pre"),i.textContent=`$ ${ot}`,f(e,"class","copy svelte-hq8ezf"),f(i,"class","svelte-hq8ezf")},m(r,a){v(r,e,a),H(t,e,null),v(r,n,a),v(r,o,a),d(o,i),s=!0},p:K,i(r){s||(A(t.$$.fragment,r),s=!0)},o(r){z(t.$$.fragment,r),s=!1},d(r){r&&(b(e),b(n),b(o)),Y(t)}}}function Dr(l){let e,t,n,o;const i=[Rr,Vr],s=[];function r(a,_){return a[0]==="python"?0:a[0]==="javascript"?1:-1}return~(t=r(l))&&(n=s[t]=i[t](l)),{c(){e=C("code"),n&&n.c(),f(e,"class","svelte-hq8ezf")},m(a,_){v(a,e,_),~t&&s[t].m(e,null),o=!0},p(a,_){let c=t;t=r(a),t===c?~t&&s[t].p(a,_):(n&&(ne(),z(s[c],1,1,()=>{s[c]=null}),le()),~t?(n=s[t],n?n.p(a,_):(n=s[t]=i[t](a),n.c()),A(n,1),n.m(e,null)):n=null)},i(a){o||(A(n),o=!0)},o(a){z(n),o=!1},d(a){a&&b(e),~t&&s[t].d()}}}function Mr(l){let e,t;return e=new ze({props:{$$slots:{default:[Dr]},$$scope:{ctx:l}}}),{c(){U(e.$$.fragment)},m(n,o){H(e,n,o),t=!0},p(n,[o]){const i={};o&3&&(i.$$scope={dirty:o,ctx:n}),e.$set(i)},i(n){t||(A(e.$$.fragment,n),t=!0)},o(n){z(e.$$.fragment,n),t=!1},d(n){Y(e,n)}}}let ot="pip install gradio_client",it="npm i -D @gradio/client";function $r(l,e,t){let{current_language:n}=e;return l.$$set=o=>{"current_language"in o&&t(0,n=o.current_language)},[n]}class Br extends ie{constructor(e){super(),re(this,e,$r,Mr,se,{current_language:0})}}function Gr(l){let e,t,n,o;return{c(){e=C("h3"),t=S(`fn_index: - `),n=C("span"),o=S(l[1]),f(n,"class","post svelte-41kcm6"),f(e,"class","svelte-41kcm6")},m(i,s){v(i,e,s),d(e,t),d(e,n),d(n,o)},p(i,s){s&2&&N(o,i[1])},d(i){i&&b(e)}}}function Nr(l){let e,t,n,o="/"+l[0],i;return{c(){e=C("h3"),t=S(`api_name: - `),n=C("span"),i=S(o),f(n,"class","post svelte-41kcm6"),f(e,"class","svelte-41kcm6")},m(s,r){v(s,e,r),d(e,t),d(e,n),d(n,i)},p(s,r){r&1&&o!==(o="/"+s[0])&&N(i,o)},d(s){s&&b(e)}}}function Ur(l){let e;function t(i,s){return i[2]?Nr:Gr}let n=t(l),o=n(l);return{c(){o.c(),e=ce()},m(i,s){o.m(i,s),v(i,e,s)},p(i,[s]){n===(n=t(i))&&o?o.p(i,s):(o.d(1),o=n(i),o&&(o.c(),o.m(e.parentNode,e)))},i:K,o:K,d(i){i&&b(e),o.d(i)}}}function Hr(l,e,t){let{api_name:n=null}=e,{fn_index:o=null}=e,{named:i}=e;return l.$$set=s=>{"api_name"in s&&t(0,n=s.api_name),"fn_index"in s&&t(1,o=s.fn_index),"named"in s&&t(2,i=s.named)},[n,o,i]}class ho extends ie{constructor(e){super(),re(this,e,Hr,Ur,se,{api_name:0,fn_index:1,named:2})}}function rt(l,e,t){const n=l.slice();return n[14]=e[t].label,n[15]=e[t].type,n[16]=e[t].python_type,n[17]=e[t].component,n[18]=e[t].example_input,n[19]=e[t].serializer,n[21]=t,n}function st(l,e,t){const n=l.slice();return n[14]=e[t].label,n[15]=e[t].type,n[16]=e[t].python_type,n[17]=e[t].component,n[18]=e[t].example_input,n[19]=e[t].serializer,n[21]=t,n}function at(l,e,t){const n=l.slice();return n[14]=e[t].label,n[15]=e[t].type,n[16]=e[t].python_type,n[17]=e[t].component,n[18]=e[t].example_input,n[19]=e[t].serializer,n[21]=t,n}function Yr(l){let e,t;return e=new ho({props:{named:l[6],fn_index:l[1]}}),{c(){U(e.$$.fragment)},m(n,o){H(e,n,o),t=!0},p(n,o){const i={};o&64&&(i.named=n[6]),o&2&&(i.fn_index=n[1]),e.$set(i)},i(n){t||(A(e.$$.fragment,n),t=!0)},o(n){z(e.$$.fragment,n),t=!1},d(n){Y(e,n)}}}function Wr(l){let e,t;return e=new ho({props:{named:l[6],api_name:l[0].api_name}}),{c(){U(e.$$.fragment)},m(n,o){H(e,n,o),t=!0},p(n,o){const i={};o&64&&(i.named=n[6]),o&1&&(i.api_name=n[0].api_name),e.$set(i)},i(n){t||(A(e.$$.fragment,n),t=!0)},o(n){z(e.$$.fragment,n),t=!1},d(n){Y(e,n)}}}function Jr(l){let e,t,n,o,i,s,r,a,_,c,g,p,h,E,j;t=new Pe({props:{code:l[9]?.innerText}});let w=te(l[11]),m=[];for(let F=0;F{s[c]=null}),le()),~t?(n=s[t],n?n.p(a,_):(n=s[t]=i[t](a),n.c()),A(n,1),n.m(e,null)):n=null)},i(a){o||(A(n),o=!0)},o(a){z(n),o=!1},d(a){a&&b(e),~t&&s[t].d()}}}function ls(l){let e,t,n,o,i,s;const r=[Wr,Yr],a=[];function _(c,g){return c[6]?0:1}return t=_(l),n=a[t]=r[t](l),i=new ze({props:{$$slots:{default:[ns]},$$scope:{ctx:l}}}),{c(){e=C("div"),n.c(),o=D(),U(i.$$.fragment),f(e,"class","container svelte-1d98qmk")},m(c,g){v(c,e,g),a[t].m(e,null),d(e,o),H(i,e,null),s=!0},p(c,[g]){let p=t;t=_(c),t===p?a[t].p(c,g):(ne(),z(a[p],1,1,()=>{a[p]=null}),le(),n=a[t],n?n.p(c,g):(n=a[t]=r[t](c),n.c()),A(n,1),n.m(e,o));const h={};g&16778239&&(h.$$scope={dirty:g,ctx:c}),i.$set(h)},i(c){s||(A(n),A(i.$$.fragment,c),s=!0)},o(c){z(n),z(i.$$.fragment,c),s=!1},d(c){c&&b(e),a[t].d(),Y(i)}}}function os(l,e,t){let{dependency:n}=e,{dependency_index:o}=e,{root:i}=e,{dependency_failures:s}=e,{endpoint_parameters:r}=e,{js_parameters:a}=e,{named:_}=e,{current_language:c}=e,g,p,h=["Audio","File","Image","Video"],E=r.filter(m=>h.includes(m.component));function j(m){je[m?"unshift":"push"](()=>{g=m,t(8,g)})}function w(m){je[m?"unshift":"push"](()=>{p=m,t(9,p)})}return l.$$set=m=>{"dependency"in m&&t(0,n=m.dependency),"dependency_index"in m&&t(1,o=m.dependency_index),"root"in m&&t(2,i=m.root),"dependency_failures"in m&&t(3,s=m.dependency_failures),"endpoint_parameters"in m&&t(4,r=m.endpoint_parameters),"js_parameters"in m&&t(5,a=m.js_parameters),"named"in m&&t(6,_=m.named),"current_language"in m&&t(7,c=m.current_language)},[n,o,i,s,r,a,_,c,g,p,h,E,j,w]}class bo extends ie{constructor(e){super(),re(this,e,os,ls,se,{dependency:0,dependency_index:1,root:2,dependency_failures:3,endpoint_parameters:4,js_parameters:5,named:6,current_language:7})}}const is="https://gradio.s3-us-west-2.amazonaws.com/3.40.1/assets/python-20e39c92.svg",rs="https://gradio.s3-us-west-2.amazonaws.com/3.40.1/assets/javascript-850cf94b.svg";function mt(l,e,t){const n=l.slice();return n[18]=e[t],n[20]=t,n}function gt(l,e,t){const n=l.slice();return n[18]=e[t],n[20]=t,n}function ht(l,e,t){const n=l.slice();return n[22]=e[t][0],n[23]=e[t][1],n}function bt(l){let e,t,n,o,i;const s=[as,ss],r=[];function a(_,c){return c&32&&(e=null),e==null&&(e=!!(Object.keys(_[5].named_endpoints).length+Object.keys(_[5].unnamed_endpoints).length)),e?0:1}return t=a(l,-1),n=r[t]=s[t](l),{c(){n.c(),o=ce()},m(_,c){r[t].m(_,c),v(_,o,c),i=!0},p(_,c){let g=t;t=a(_,c),t===g?r[t].p(_,c):(ne(),z(r[g],1,1,()=>{r[g]=null}),le(),n=r[t],n?n.p(_,c):(n=r[t]=s[t](_),n.c()),A(n,1),n.m(o.parentNode,o))},i(_){i||(A(n),i=!0)},o(_){z(n),i=!1},d(_){_&&b(o),r[t].d(_)}}}function ss(l){let e,t;return e=new jr({props:{root:l[0]}}),e.$on("close",l[12]),{c(){U(e.$$.fragment)},m(n,o){H(e,n,o),t=!0},p(n,o){const i={};o&1&&(i.root=n[0]),e.$set(i)},i(n){t||(A(e.$$.fragment,n),t=!0)},o(n){z(e.$$.fragment,n),t=!1},d(n){Y(e,n)}}}function as(l){let e,t,n,o,i,s,r,a,_,c,g,p=Object.keys(l[5].named_endpoints).length,h,E,j=Object.keys(l[5].unnamed_endpoints).length,w,m;t=new Cr({props:{root:l[0],api_count:Object.keys(l[5].named_endpoints).length+Object.keys(l[5].unnamed_endpoints).length}}),t.$on("close",l[10]);let k=te(l[7]),I=[];for(let T=0;Tz(L[T],1,1,()=>{L[T]=null});let M=j&&Et(),W=te(l[1]),O=[];for(let T=0;Tz(O[T],1,1,()=>{O[T]=null});return{c(){e=C("div"),U(t.$$.fragment),n=D(),o=C("div"),i=C("div"),i.innerHTML=`

        Use the gradio_client - Python library or the - @gradio/client Javascript package to query the demo via API.

        `,s=D(),r=C("div"),a=C("div");for(let T=0;T{n=null}),le())},i(o){t||(A(n),t=!0)},o(o){z(n),t=!1},d(o){o&&b(e),n&&n.d(o)}}}function Et(l){let e;return{c(){e=C("h2"),e.textContent="Unnamed Endpoints",f(e,"class","header svelte-bdjvpc")},m(t,n){v(t,e,n)},d(t){t&&b(e)}}}function jt(l){let e,t,n,o,i,s;return t=new bo({props:{named:!1,endpoint_parameters:l[5].unnamed_endpoints[l[20]].parameters,js_parameters:l[6].unnamed_endpoints[l[20]].parameters,dependency:l[18],dependency_index:l[20],current_language:l[2],root:l[0],dependency_failures:l[4]}}),o=new go({props:{endpoint_returns:l[5].unnamed_endpoints[l[20]].returns,js_returns:l[6].unnamed_endpoints[l[20]].returns,is_running:l[3],current_language:l[2]}}),{c(){e=C("div"),U(t.$$.fragment),n=D(),U(o.$$.fragment),i=D(),f(e,"class","endpoint-container svelte-bdjvpc")},m(r,a){v(r,e,a),H(t,e,null),d(e,n),H(o,e,null),d(e,i),s=!0},p(r,a){const _={};a&32&&(_.endpoint_parameters=r[5].unnamed_endpoints[r[20]].parameters),a&64&&(_.js_parameters=r[6].unnamed_endpoints[r[20]].parameters),a&2&&(_.dependency=r[18]),a&4&&(_.current_language=r[2]),a&1&&(_.root=r[0]),a&16&&(_.dependency_failures=r[4]),t.$set(_);const c={};a&32&&(c.endpoint_returns=r[5].unnamed_endpoints[r[20]].returns),a&64&&(c.js_returns=r[6].unnamed_endpoints[r[20]].returns),a&8&&(c.is_running=r[3]),a&4&&(c.current_language=r[2]),o.$set(c)},i(r){s||(A(t.$$.fragment,r),A(o.$$.fragment,r),s=!0)},o(r){z(t.$$.fragment,r),z(o.$$.fragment,r),s=!1},d(r){r&&b(e),Y(t),Y(o)}}}function St(l){let e,t,n=l[5].unnamed_endpoints[l[20]]&&jt(l);return{c(){n&&n.c(),e=ce()},m(o,i){n&&n.m(o,i),v(o,e,i),t=!0},p(o,i){o[5].unnamed_endpoints[o[20]]?n?(n.p(o,i),i&32&&A(n,1)):(n=jt(o),n.c(),A(n,1),n.m(e.parentNode,e)):n&&(ne(),z(n,1,1,()=>{n=null}),le())},i(o){t||(A(n),t=!0)},o(o){z(n),t=!1},d(o){o&&b(e),n&&n.d(o)}}}function _s(l){let e,t,n=l[5]&&bt(l);return{c(){n&&n.c(),e=ce()},m(o,i){n&&n.m(o,i),v(o,e,i),t=!0},p(o,[i]){o[5]?n?(n.p(o,i),i&32&&A(n,1)):(n=bt(o),n.c(),A(n,1),n.m(e.parentNode,e)):n&&(ne(),z(n,1,1,()=>{n=null}),le())},i(o){t||(A(n),t=!0)},o(o){z(n),t=!1},d(o){o&&b(e),n&&n.d(o)}}}function cs(l,e,t){let{instance_map:n}=e,{dependencies:o}=e,{root:i}=e,{app:s}=e;i===""&&(i=location.protocol+"//"+location.host+location.pathname),i.endsWith("/")||(i+="/");let r="python";const a=[["python",is],["javascript",rs]];let _=!1;o.map(k=>k.inputs.map(I=>{let u=n[I].documentation?.example_data;return u===void 0?u="":typeof u=="object"&&(u=JSON.stringify(u)),u})),o.map(k=>new Array(k.outputs.length));let c=o.map(k=>new Array(k.inputs.length).fill(!1));async function g(){return await(await fetch(i+"info")).json()}async function p(){return await s.view_api()}let h,E;g().then(k=>t(5,h=k)),p().then(k=>t(6,E=k)),Fe(()=>(document.body.style.overflow="hidden","parentIFrame"in window&&window.parentIFrame?.scrollTo(0,0),()=>{document.body.style.overflow="auto"}));function j(k){ve.call(this,l,k)}const w=k=>t(2,r=k);function m(k){ve.call(this,l,k)}return l.$$set=k=>{"instance_map"in k&&t(8,n=k.instance_map),"dependencies"in k&&t(1,o=k.dependencies),"root"in k&&t(0,i=k.root),"app"in k&&t(9,s=k.app)},[i,o,r,_,c,h,E,a,n,s,j,w,m]}class us extends ie{constructor(e){super(),re(this,e,cs,_s,se,{instance_map:8,dependencies:1,root:0,app:9})}}const fs="https://gradio.s3-us-west-2.amazonaws.com/3.40.1/assets/logo-0a070fcf.svg";const{document:Ee}=Zo;function It(l){return Ee.title=l[3],{c:K,m:K,d:K}}function Ct(l){let e,t,n,o;return{c(){e=C("script"),e.innerHTML="",n=D(),o=C("script"),o.textContent=`window.dataLayer = window.dataLayer || []; - function gtag() { - dataLayer.push(arguments); - } - gtag("js", new Date()); - gtag("config", "UA-156449732-1");`,e.async=!0,e.defer=!0,Se(e.src,t="https://www.googletagmanager.com/gtag/js?id=UA-156449732-1")||f(e,"src",t)},m(i,s){v(i,e,s),v(i,n,s),v(i,o,s)},d(i){i&&(b(e),b(n),b(o))}}}function Ot(l){let e,t;return e=new fo({props:{has_modes:l[12].has_modes,component:l[12].component,id:l[12].id,props:l[12].props,children:l[12].children,dynamic_ids:l[17],instance_map:l[18],root:l[1],target:l[5],theme_mode:l[10]}}),e.$on("mount",l[20]),e.$on("destroy",l[27]),{c(){U(e.$$.fragment)},m(n,o){H(e,n,o),t=!0},p(n,o){const i={};o[0]&4096&&(i.has_modes=n[12].has_modes),o[0]&4096&&(i.component=n[12].component),o[0]&4096&&(i.id=n[12].id),o[0]&4096&&(i.props=n[12].props),o[0]&4096&&(i.children=n[12].children),o[0]&2&&(i.root=n[1]),o[0]&32&&(i.target=n[5]),o[0]&1024&&(i.theme_mode=n[10]),e.$set(i)},i(n){t||(A(e.$$.fragment,n),t=!0)},o(n){z(e.$$.fragment,n),t=!1},d(n){Y(e,n)}}}function Pt(l){let e,t,n,o,i,s,r=l[6]&&At(l);return{c(){e=C("footer"),r&&r.c(),t=D(),n=C("a"),o=S(`Built with Gradio - `),i=C("img"),Se(i.src,s=fs)||f(i,"src",s),f(i,"alt","logo"),f(i,"class","svelte-1ax1toq"),f(n,"href","https://gradio.app"),f(n,"class","built-with svelte-1ax1toq"),f(n,"target","_blank"),f(n,"rel","noreferrer"),f(e,"class","svelte-1ax1toq")},m(a,_){v(a,e,_),r&&r.m(e,null),d(e,t),d(e,n),d(n,o),d(n,i)},p(a,_){a[6]?r?r.p(a,_):(r=At(a),r.c(),r.m(e,t)):r&&(r.d(1),r=null)},d(a){a&&b(e),r&&r.d()}}}function At(l){let e,t,n,o,i,s,r,a;return{c(){e=C("button"),t=S("Use via API "),n=C("img"),i=D(),s=C("div"),s.textContent="·",Se(n.src,o=mo)||f(n,"src",o),f(n,"alt",""),f(n,"class","svelte-1ax1toq"),f(e,"class","show-api svelte-1ax1toq"),f(s,"class","svelte-1ax1toq")},m(_,c){v(_,e,c),d(e,t),d(e,n),v(_,i,c),v(_,s,c),r||(a=ge(e,"click",l[28]),r=!0)},p:K,d(_){_&&(b(e),b(i),b(s)),r=!1,a()}}}function Tt(l){let e,t,n,o,i,s,r,a;return i=new us({props:{instance_map:l[18],dependencies:l[2],root:l[1],app:l[11]}}),i.$on("close",l[30]),{c(){e=C("div"),t=C("div"),n=D(),o=C("div"),U(i.$$.fragment),f(t,"class","backdrop svelte-1ax1toq"),f(o,"class","api-docs-wrap svelte-1ax1toq"),f(e,"class","api-docs svelte-1ax1toq")},m(_,c){v(_,e,c),d(e,t),d(e,n),d(e,o),H(i,o,null),s=!0,r||(a=ge(t,"click",l[29]),r=!0)},p(_,c){const g={};c[0]&4&&(g.dependencies=_[2]),c[0]&2&&(g.root=_[1]),c[0]&2048&&(g.app=_[11]),i.$set(g)},i(_){s||(A(i.$$.fragment,_),s=!0)},o(_){z(i.$$.fragment,_),s=!1},d(_){_&&b(e),Y(i),r=!1,a()}}}function Ft(l){let e,t;return e=new yi({props:{messages:l[14]}}),e.$on("close",l[19]),{c(){U(e.$$.fragment)},m(n,o){H(e,n,o),t=!0},p(n,o){const i={};o[0]&16384&&(i.messages=n[14]),e.$set(i)},i(n){t||(A(e.$$.fragment,n),t=!0)},o(n){z(e.$$.fragment,n),t=!1},d(n){Y(e,n)}}}function ds(l){let e,t,n,o,i,s,r,a,_,c,g=l[8]&&It(l),p=l[4]&&Ct(),h=l[0]&&Ot(l),E=l[7]&&Pt(l),j=l[13]&&l[0]&&Tt(l),w=l[14]&&Ft(l);return{c(){g&&g.c(),e=ce(),p&&p.c(),t=ce(),n=D(),o=C("div"),i=C("div"),h&&h.c(),s=D(),E&&E.c(),r=D(),j&&j.c(),a=D(),w&&w.c(),_=ce(),f(i,"class","contain"),fe(i,"flex-grow",l[9]?"1":"auto"),f(o,"class","wrap svelte-1ax1toq"),fe(o,"min-height",l[9]?"100%":"auto")},m(m,k){g&&g.m(Ee.head,null),d(Ee.head,e),p&&p.m(Ee.head,null),d(Ee.head,t),v(m,n,k),v(m,o,k),d(o,i),h&&h.m(i,null),d(o,s),E&&E.m(o,null),v(m,r,k),j&&j.m(m,k),v(m,a,k),w&&w.m(m,k),v(m,_,k),c=!0},p(m,k){m[8]?g||(g=It(m),g.c(),g.m(e.parentNode,e)):g&&(g.d(1),g=null),m[4]?p||(p=Ct(),p.c(),p.m(t.parentNode,t)):p&&(p.d(1),p=null),m[0]?h?(h.p(m,k),k[0]&1&&A(h,1)):(h=Ot(m),h.c(),A(h,1),h.m(i,null)):h&&(ne(),z(h,1,1,()=>{h=null}),le()),k[0]&512&&fe(i,"flex-grow",m[9]?"1":"auto"),m[7]?E?E.p(m,k):(E=Pt(m),E.c(),E.m(o,null)):E&&(E.d(1),E=null),k[0]&512&&fe(o,"min-height",m[9]?"100%":"auto"),m[13]&&m[0]?j?(j.p(m,k),k[0]&8193&&A(j,1)):(j=Tt(m),j.c(),A(j,1),j.m(a.parentNode,a)):j&&(ne(),z(j,1,1,()=>{j=null}),le()),m[14]?w?(w.p(m,k),k[0]&16384&&A(w,1)):(w=Ft(m),w.c(),A(w,1),w.m(_.parentNode,_)):w&&(ne(),z(w,1,1,()=>{w=null}),le())},i(m){c||(A(h),A(j),A(w),c=!0)},o(m){z(h),z(j),z(w),c=!1},d(m){m&&(b(n),b(o),b(r),b(a),b(_)),g&&g.d(m),b(e),p&&p.d(m),b(t),h&&h.d(),E&&E.d(),j&&j.d(m),w&&w.d(m)}}}const ps=/^'([^]+)'$/,ms="There is a long queue of requests pending. Duplicate this Space to skip.",gs="On mobile, the connection can break if this tab is unfocused or the device sleeps, losing your position in queue.",hs="Lost connection due to leaving page. Rejoining queue...",bs=15,vs=10;function zt(l,e,t){for(const n of t)for(const o of n[e])if(o===l)return!0;return!1}function ks(l){return Array.isArray(l)&&l.length===0||l===""||l===0||!l}function ws(l,e,t){let n;hr();let{root:o}=e,{components:i}=e,{layout:s}=e,{dependencies:r}=e,{title:a="Gradio"}=e,{analytics_enabled:_=!1}=e,{target:c}=e,{autoscroll:g}=e,{show_api:p=!0}=e,{show_footer:h=!0}=e,{control_page_title:E=!1}=e,{app_mode:j}=e,{theme_mode:w}=e,{app:m}=e,{space_id:k}=e,I=Wo();Jo(l,I,y=>t(26,n=y));let u={id:s.id,type:"column",props:{},has_modes:!1,instance:{},component:{}};i.push(u);const q=Object.getPrototypeOf(async function(){}).constructor;r.forEach(y=>{if(y.js){const V=y.backend_fn?y.inputs.length===1:y.outputs.length===1;try{y.frontend_fn=new q("__fn_args",`let result = await (${y.js})(...__fn_args); - return (${V} && !Array.isArray(result)) ? [result] : result;`)}catch(R){console.error("Could not parse custom js method."),console.error(R)}}});let F=new URLSearchParams(window.location.search).get("view")==="api";function M(y){t(13,F=y);let V=new URLSearchParams(window.location.search);y?V.set("view","api"):V.delete("view"),history.replaceState(null,"","?"+V.toString())}const W=new Set;for(const y of i){const{id:V,props:R}=y;(zt(V,"inputs",r)||!zt(V,"outputs",r)&&ks(R?.value))&&W.add(V)}let O=i.reduce((y,V)=>(y[V.id]=V,y),{});async function _e(y){try{const V=await Ei[y]();return{name:y,component:V}}catch(V){throw console.error(`failed to load: ${y}`),console.error(V),V}}const T=new Set,B=new Map;async function J(y){let V=O[y.id];const R=(await B.get(V.type)).component;V.component=R.Component,R.document&&(V.documentation=R.document(V.props)),R.modes&&R.modes.length>1&&(V.has_modes=!0),y.children&&(V.children=y.children.map(G=>O[G.id]),await Promise.all(y.children.map(G=>J(G))))}i.forEach(async y=>{const V=_e(y.type);T.add(V),B.set(y.type,V)});let{ready:pe=!1}=e;Promise.all(Array.from(T)).then(()=>{J(s).then(async()=>{t(0,pe=!0)}).catch(y=>{console.error(y)})});function P(y,V){const R=r[V].outputs;y?.forEach((G,ae)=>{const ue=O[R[ae]];if(ue.props.value_is_output=!0,typeof G=="object"&&G!==null&&G.__type__==="update")for(const[x,ee]of Object.entries(G))x!=="__type__"&&(ue.props[x]=ee);else ue.props.value=G}),t(12,u)}let Z=new Map;function ke(y,V,R){y?.props||(y.props={}),y.props[V]=R,t(12,u)}let me=[],oe=[];function we(y,V,R){return{message:y,fn_index:V,type:R,id:++vo}}let vo=-1,Ae=!1;document.addEventListener("visibilitychange",function(){document.visibilityState==="hidden"&&(Ae=!0)});const qe=/Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent);let Le=!1,Ve=!1;async function ye(y,V=null){let R=r[y];const G=I.get_status_for_fn(y);if(t(14,oe=oe.filter(({fn_index:x})=>x!==y)),R.cancels&&await Promise.all(R.cancels.map(async x=>{const ee=Z.get(x);return ee?.cancel(),ee})),G==="pending"||G==="generating")return;let ae={fn_index:y,data:R.inputs.map(x=>O[x].props.value),event_data:R.collects_event_data?V:null};R.frontend_fn?R.frontend_fn(ae.data.concat(R.outputs.map(x=>O[x].props.value))).then(x=>{R.backend_fn?(ae.data=x,ue()):P(x,y)}):R.backend_fn&&ue();function ue(){const x=m.submit(ae.fn_index,ae.data,ae.event_data).on("data",({data:ee,fn_index:X})=>{P(ee,X)}).on("status",({fn_index:ee,...X})=>{if(I.update({...X,status:X.stage,progress:X.progress_data,fn_index:ee}),!Le&&k!==null&&X.position!==void 0&&X.position>=2&&X.eta!==void 0&&X.eta>bs&&(Le=!0,t(14,oe=[we(ms,ee,"warning"),...oe])),!Ve&&qe&&X.eta!==void 0&&X.eta>vs&&(Ve=!0,t(14,oe=[we(gs,ee,"warning"),...oe])),X.stage==="complete"&&(r.map(async(Q,he)=>{Q.trigger_after===ee&&ye(he)}),x.destroy()),X.broken&&qe&&Ae)window.setTimeout(()=>{t(14,oe=[we(hs,ee,"error"),...oe])},0),ye(y,V),Ae=!1;else if(X.stage==="error"){if(X.message){const Q=X.message.replace(ps,(he,Te)=>Te);t(14,oe=[we(Q,ee,"error"),...oe])}r.map(async(Q,he)=>{Q.trigger_after===ee&&!Q.trigger_only_on_success&&ye(he)}),x.destroy()}}).on("log",({log:ee,fn_index:X,level:Q})=>{t(14,oe=[we(ee,X,Q),...oe])});Z.set(y,x)}}function ko(y,V){if(k===null)return;const R=new URL(`https://huggingface.co/spaces/${k}/discussions/new`);y!==void 0&&y.length>0&&R.searchParams.set("title",y),R.searchParams.set("description",V),window.open(R.toString(),"_blank")}function wo(y){const V=y.detail;t(14,oe=oe.filter(R=>R.id!==V))}const yo=y=>!!(y&&new URL(y,location.href).origin!==location.origin);let Re=[],De=[];async function Eo(){await Qo();for(var y=c.getElementsByTagName("a"),V=0;V{let{targets:ae,trigger:ue,inputs:x,outputs:ee}=R;const X=ae.map(Q=>[Q,O[Q]]);ae.length===0&&!me[G]?.includes(-1)&&ue==="load"&&ee.every(Q=>O?.[Q].instance)&&x.every(Q=>O?.[Q].instance)&&(ye(G),me[G]=[-1]),X.filter(Q=>!!Q&&!!Q[1]).forEach(([Q,{instance:he}])=>{me[G]?.includes(Q)||!he||(he?.$on(ue,Te=>{ye(G,Te.detail)}),me[G]||(me[G]=[]),me[G].push(Q))})}),i.forEach(R=>{R.props.show_share_button&&!De.includes(R.id)&&(De.push(R.id),R.instance.$on("share",G=>{const{title:ae,description:ue}=G.detail;ko(ae,ue)}))}),i.forEach(R=>{Re.includes(R.id)||R.instance&&(Re.push(R.id),R.instance.$on("error",G=>{t(14,oe=[we(G.detail,-1,"error"),...oe])}))})}function Me(y){me=me.map(V=>V.filter(R=>R!==y))}r.forEach((y,V)=>{I.register(V,y.inputs,y.outputs)});function jo(y){for(const R in y){let G=y[R],ae=r[G.fn_index];G.scroll_to_output=ae.scroll_to_output,G.show_progress=ae.show_progress,ke(O[R],"loading_status",G)}const V=I.get_inputs_to_update();for(const[R,G]of V)ke(O[R],"pending",G==="pending")}const So=({detail:y})=>Me(y),Io=()=>{M(!F)},Co=()=>{M(!1)},Oo=()=>{M(!1)};return l.$$set=y=>{"root"in y&&t(1,o=y.root),"components"in y&&t(22,i=y.components),"layout"in y&&t(23,s=y.layout),"dependencies"in y&&t(2,r=y.dependencies),"title"in y&&t(3,a=y.title),"analytics_enabled"in y&&t(4,_=y.analytics_enabled),"target"in y&&t(5,c=y.target),"autoscroll"in y&&t(24,g=y.autoscroll),"show_api"in y&&t(6,p=y.show_api),"show_footer"in y&&t(7,h=y.show_footer),"control_page_title"in y&&t(8,E=y.control_page_title),"app_mode"in y&&t(9,j=y.app_mode),"theme_mode"in y&&t(10,w=y.theme_mode),"app"in y&&t(11,m=y.app),"space_id"in y&&t(25,k=y.space_id),"ready"in y&&t(0,pe=y.ready)},l.$$.update=()=>{l.$$.dirty[0]&16777216&&Ko.update(y=>({...y,autoscroll:g})),l.$$.dirty[0]&67108864&&jo(n)},[pe,o,r,a,_,c,p,h,E,j,w,m,u,F,oe,I,M,W,O,wo,Eo,Me,i,s,g,k,n,So,Io,Co,Oo]}class js extends ie{constructor(e){super(),re(this,e,ws,ds,se,{root:1,components:22,layout:23,dependencies:2,title:3,analytics_enabled:4,target:5,autoscroll:24,show_api:6,show_footer:7,control_page_title:8,app_mode:9,theme_mode:10,app:11,space_id:25,ready:0},null,[-1,-1])}}export{js as default}; -//# sourceMappingURL=Blocks-ae22907c.js.map diff --git a/spaces/deepghs/anime_image_classification/preprocess.py b/spaces/deepghs/anime_image_classification/preprocess.py deleted file mode 100644 index 969ca5480c42d20b87f6dcdbd7e31f93e1094386..0000000000000000000000000000000000000000 --- a/spaces/deepghs/anime_image_classification/preprocess.py +++ /dev/null @@ -1,19 +0,0 @@ -from typing import Tuple, Optional - -import numpy as np -from PIL import Image -from imgutils.data import rgb_encode - - -def _img_encode(image: Image.Image, size: Tuple[int, int] = (384, 384), - normalize: Optional[Tuple[float, float]] = (0.5, 0.5)): - image = image.resize(size, Image.BILINEAR) - data = rgb_encode(image, order_='CHW') - - if normalize is not None: - mean_, std_ = normalize - mean = np.asarray([mean_]).reshape((-1, 1, 1)) - std = np.asarray([std_]).reshape((-1, 1, 1)) - data = (data - mean) / std - - return data.astype(np.float32) diff --git a/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/face3d/models/arcface_torch/utils/__init__.py b/spaces/deepskyreal/ai-mixer-hotchpotch/sad_talker/src/face3d/models/arcface_torch/utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/dentadelta123/grammarly/app.py b/spaces/dentadelta123/grammarly/app.py deleted file mode 100644 index 93073e0f67c2bb11f18b28e520f15bc529439060..0000000000000000000000000000000000000000 --- a/spaces/dentadelta123/grammarly/app.py +++ /dev/null @@ -1,35 +0,0 @@ -from transformers import T5Tokenizer, T5ForConditionalGeneration -import torch -import gradio as gr - -device = 'cuda' if torch.cuda.is_available() else 'cpu' -model = T5ForConditionalGeneration.from_pretrained("vennify/t5-base-grammar-correction") -tokenizer = T5Tokenizer.from_pretrained("vennify/t5-base-grammar-correction") -model.to(device) -model.eval() - - -def generate_text(text): - text = f'grammar: {text}' - input_ids = tokenizer( - text, return_tensors="pt" - ).input_ids - input_ids = input_ids.to(device) - - outputs = model.generate(input_ids, max_length=512, early_stopping=True) - - return tokenizer.decode(outputs[0], skip_special_tokens=True) - - -with gr.Blocks() as deeplearning: - with gr.Row(): - with gr.Column(): - text = gr.TextArea(placeholder="Enter your text here...") - button = gr.Button(label="Correct") - output = gr.outputs.Textbox(label="Corrected Text") - - - button.click(generate_text, inputs=text, outputs=output) - - -deeplearning.launch() \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/CRACK Adobe After Effects CC 12.1.0.168 Final Multilanguage [ChingLiu] [EXCLUSIVE].md b/spaces/diacanFperku/AutoGPT/CRACK Adobe After Effects CC 12.1.0.168 Final Multilanguage [ChingLiu] [EXCLUSIVE].md deleted file mode 100644 index e61b333d29f73ac05d50b1d5b13e683eada30a01..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/CRACK Adobe After Effects CC 12.1.0.168 Final Multilanguage [ChingLiu] [EXCLUSIVE].md +++ /dev/null @@ -1,5 +0,0 @@ -
        -

        mydowy45434553 [url= [url= Futurama Episode 4.13: Tauron Part 2 (2004) download 720p][url= [url=hudmashand [url= sesspaphpag [url= Telling beans for berries, Ttutzipncreeni lNeMeao hd.in.pomodoro [url= linutpbolu [url=]Mediaroma.eu[/url] sesspaphpag [url= 2nd star wars download 720p [url= mydowy45434553 [url= renardiopp [url= mlsvdksr [url= 06 - Inve$ View Download Full Version 2020 [url= i aaaxvn fm (GOSTAR for pc) [url= Mommy e tatuar no cor [url= rekis.kk [url= Phil collins hello i must be going lossless download 720p [url=

        Citro is less worried about Dylan's father having sex with young boys, Florida (zippin979) 12857218 [url= gebo, 08002388671 [url= Adobe After Effects CC 2017 Full Crack Multilanguage Keygen [ChingLiu][ChingLiu] [url= Elf, 0C5EB5F01 [url= eub4cdfb3 [url= neenikai [url= jk.13youtube.com [url= thama8 [url= [url= FTP NETWORK SITE] [url= re]LISBON 2016 [url= McWorld [url= 2nd star wars, [url= pwetloc [url= rD0Fsujajf [url= [url= pkcLDjGYERZZ [url= SODA [url= tolona weilddad.com [url= cauauefdy [url= howdie [url= stafuor1.info [url= tPCKvh8CKS [url= b69dfaab 5]]]][url= vasilii.org [url= xtvteu [url= host.6vid.to [url= b6a3ceb3 [url= [url= xtvtye [url= s2vapbb [url= [url= jcjxnccu [url= svenskna SvtNrJENS [url= [url= [url= jcjxnccu [url= gujqjt [url= c3yjt][url= [url= [url= bls6t](http://www.geetikawinz.com/pub/mass-sex-houdini-download/crystal-clear-4gb/11/download/159621521771/8208) Hannibal, boracay-chingpond.net, iMGSRC.RU [url= [url= PemsCobra Narspis 0d00cc6c-535d-437c-9d1a-85f196f6 [url= Hemmisfoto i (15) iMGSRC.RU [url= Sweet girls 12yo on the beach, 22098205NXZ iMGSRC.RU [url= roms neoragex 5.

        -

        CRACK Adobe After Effects CC 12.1.0.168 Final Multilanguage [ChingLiu]


        Download File ===== https://gohhs.com/2uFVxe



        899543212b
        -
        -
        \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/Gta 4 Episodes From Liberty City Key 100 Working Utorrent.md b/spaces/diacanFperku/AutoGPT/Gta 4 Episodes From Liberty City Key 100 Working Utorrent.md deleted file mode 100644 index 86d552d4e078941d1a6894e3c49957539994e04c..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Gta 4 Episodes From Liberty City Key 100 Working Utorrent.md +++ /dev/null @@ -1,6 +0,0 @@ -

        gta 4 episodes from liberty city key 100% working utorrent


        Download Ziphttps://gohhs.com/2uFUYU



        -
        -Lista de juegos PSX para PS3 en PKG SOLO CFW 4. ... Studio Deen Adapté de l'histoire nommée Luger Code 1951 de Ry?to Hagi. ... His work is one of the. Game manual ps3 game Folklore. [GUIDA] Sostituzione HDD ps3. torrent - EmuRoms. ... godzilla dlc [jpn] grand theft auto iv episodes from liberty city dlc [us/eu] grand ... 4d29de3e1b
        -
        -
        -

        diff --git a/spaces/diaoren/OpenSetObstacleDetection/opendet2/modeling/roi_heads/roi_heads.py b/spaces/diaoren/OpenSetObstacleDetection/opendet2/modeling/roi_heads/roi_heads.py deleted file mode 100644 index 479f3c5c3ce02e694a13bd4d83fb22622a59073d..0000000000000000000000000000000000000000 --- a/spaces/diaoren/OpenSetObstacleDetection/opendet2/modeling/roi_heads/roi_heads.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import logging -from typing import Dict, List - -import numpy as np -import torch -import torch.nn.functional as F -from detectron2.config import configurable -from detectron2.layers import ShapeSpec -from detectron2.modeling.poolers import ROIPooler -from detectron2.modeling.roi_heads.box_head import build_box_head -from detectron2.modeling.roi_heads.roi_heads import ( - ROI_HEADS_REGISTRY, StandardROIHeads, add_ground_truth_to_proposals) -from detectron2.structures import Boxes, Instances, pairwise_iou -from detectron2.utils.events import get_event_storage -from detectron2.utils.registry import Registry -from torch import nn - -from .fast_rcnn import build_roi_box_output_layers - -logger = logging.getLogger(__name__) - - -@ROI_HEADS_REGISTRY.register() -class OpenSetStandardROIHeads(StandardROIHeads): - - @torch.no_grad() - def label_and_sample_proposals(self, proposals: List[Instances], targets: List[Instances]) -> List[Instances]: - if self.proposal_append_gt: - proposals = add_ground_truth_to_proposals(targets, proposals) - - proposals_with_gt = [] - - num_fg_samples = [] - num_bg_samples = [] - for proposals_per_image, targets_per_image in zip(proposals, targets): - has_gt = len(targets_per_image) > 0 - match_quality_matrix = pairwise_iou( - targets_per_image.gt_boxes, proposals_per_image.proposal_boxes - ) - matched_idxs, matched_labels = self.proposal_matcher( - match_quality_matrix) - sampled_idxs, gt_classes = self._sample_proposals( - matched_idxs, matched_labels, targets_per_image.gt_classes - ) - - # Set target attributes of the sampled proposals: - proposals_per_image = proposals_per_image[sampled_idxs] - proposals_per_image.gt_classes = gt_classes - # NOTE: add iou of each proposal - ious, _ = match_quality_matrix.max(dim=0) - proposals_per_image.iou = ious[sampled_idxs] - - if has_gt: - sampled_targets = matched_idxs[sampled_idxs] - for (trg_name, trg_value) in targets_per_image.get_fields().items(): - if trg_name.startswith("gt_") and not proposals_per_image.has(trg_name): - proposals_per_image.set( - trg_name, trg_value[sampled_targets]) - - num_bg_samples.append( - (gt_classes == self.num_classes).sum().item()) - num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1]) - proposals_with_gt.append(proposals_per_image) - - # Log the number of fg/bg samples that are selected for training ROI heads - storage = get_event_storage() - storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples)) - storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples)) - - return proposals_with_gt - - @classmethod - def _init_box_head(cls, cfg, input_shape): - # fmt: off - in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES#['p2','p3','p4','p5'] - pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION#7 - pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features)#(0.25, 0.125, 0.0625, 0.03125) - sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO#0 - pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE#'ROIAlignV2' - # fmt: on - - # If StandardROIHeads is applied on multiple feature maps (as in FPN), - # then we share the same predictors and therefore the channel counts must be the same - in_channels = [input_shape[f].channels for f in in_features]#[256, 256, 256, 256] - # Check all channel counts are equal - assert len(set(in_channels)) == 1, in_channels - in_channels = in_channels[0] - - # - box_pooler = ROIPooler( - output_size=pooler_resolution, - scales=pooler_scales, - sampling_ratio=sampling_ratio, - pooler_type=pooler_type, - ) - - #flatten+fc+relu+fc+relu将(256,7,7)转为1024 - box_head = build_box_head( - cfg, ShapeSpec(channels=in_channels, - height=pooler_resolution, width=pooler_resolution) - ) - # register output layers - box_predictor = build_roi_box_output_layers(cfg, box_head.output_shape) - return { - "box_in_features": in_features, - "box_pooler": box_pooler, - "box_head": box_head, - "box_predictor": box_predictor, - } - diff --git a/spaces/digitalxingtong/Jiuxia-Bert-Vits2/data_utils.py b/spaces/digitalxingtong/Jiuxia-Bert-Vits2/data_utils.py deleted file mode 100644 index 2c98d3dc8b9572bd05859033a74d155425a2a2ab..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Jiuxia-Bert-Vits2/data_utils.py +++ /dev/null @@ -1,332 +0,0 @@ -import time -import os -import random -import numpy as np -import torch -import torch.utils.data -import torchaudio -import commons -from mel_processing import spectrogram_torch, mel_spectrogram_torch, spec_to_mel_torch -from utils import load_wav_to_torch, load_filepaths_and_text -from text import cleaned_text_to_sequence, get_bert - -"""Multi speaker version""" - - -class TextAudioSpeakerLoader(torch.utils.data.Dataset): - """ - 1) loads audio, speaker_id, text pairs - 2) normalizes text and converts them to sequences of integers - 3) computes spectrograms from audio files. - """ - - def __init__(self, audiopaths_sid_text, hparams): - self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text) - self.max_wav_value = hparams.max_wav_value - self.sampling_rate = hparams.sampling_rate - self.filter_length = hparams.filter_length - self.hop_length = hparams.hop_length - self.win_length = hparams.win_length - self.sampling_rate = hparams.sampling_rate - self.spk_map = hparams.spk2id - self.hparams = hparams - - self.use_mel_spec_posterior = getattr(hparams, "use_mel_posterior_encoder", False) - if self.use_mel_spec_posterior: - self.n_mel_channels = getattr(hparams, "n_mel_channels", 80) - - self.cleaned_text = getattr(hparams, "cleaned_text", False) - - self.add_blank = hparams.add_blank - self.min_text_len = getattr(hparams, "min_text_len", 1) - self.max_text_len = getattr(hparams, "max_text_len", 300) - - random.seed(1234) - random.shuffle(self.audiopaths_sid_text) - self._filter() - - def _filter(self): - """ - Filter text & store spec lengths - """ - # Store spectrogram lengths for Bucketing - # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2) - # spec_length = wav_length // hop_length - - audiopaths_sid_text_new = [] - lengths = [] - skipped = 0 - for _id, spk, language, text, phones, tone, word2ph in self.audiopaths_sid_text: - audiopath = f'{_id}' - if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len: - phones = phones.split(" ") - tone = [int(i) for i in tone.split(" ")] - word2ph = [int(i) for i in word2ph.split(" ")] - audiopaths_sid_text_new.append([audiopath, spk, language, text, phones, tone, word2ph]) - lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length)) - else: - skipped += 1 - print("skipped: ", skipped, ", total: ", len(self.audiopaths_sid_text)) - self.audiopaths_sid_text = audiopaths_sid_text_new - self.lengths = lengths - - def get_audio_text_speaker_pair(self, audiopath_sid_text): - # separate filename, speaker_id and text - audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text - - bert, phones, tone, language = self.get_text(text, word2ph, phones, tone, language, audiopath) - - spec, wav = self.get_audio(audiopath) - sid = torch.LongTensor([int(self.spk_map[sid])]) - return (phones, spec, wav, sid, tone, language, bert) - - def get_audio(self, filename): - audio_norm, sampling_rate = torchaudio.load(filename, frame_offset=0, num_frames=-1, normalize=True, channels_first=True) - ''' - audio, sampling_rate = load_wav_to_torch(filename) - if sampling_rate != self.sampling_rate: - raise ValueError("{} {} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate)) - audio_norm = audio / self.max_wav_value - audio_norm = audio_norm.unsqueeze(0) - ''' - spec_filename = filename.replace(".wav", ".spec.pt") - if self.use_mel_spec_posterior: - spec_filename = spec_filename.replace(".spec.pt", ".mel.pt") - if os.path.exists(spec_filename): - spec = torch.load(spec_filename) - else: - if self.use_mel_spec_posterior: - # if os.path.exists(filename.replace(".wav", ".spec.pt")): - # # spec, n_fft, num_mels, sampling_rate, fmin, fmax - # spec = spec_to_mel_torch( - # torch.load(filename.replace(".wav", ".spec.pt")), - # self.filter_length, self.n_mel_channels, self.sampling_rate, - # self.hparams.mel_fmin, self.hparams.mel_fmax) - spec = mel_spectrogram_torch(audio_norm, self.filter_length, - self.n_mel_channels, self.sampling_rate, self.hop_length, - self.win_length, self.hparams.mel_fmin, self.hparams.mel_fmax, center=False) - else: - spec = spectrogram_torch(audio_norm, self.filter_length, - self.sampling_rate, self.hop_length, self.win_length, - center=False) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename) - return spec, audio_norm - - def get_text(self, text, word2ph, phone, tone, language_str, wav_path): - # print(text, word2ph,phone, tone, language_str) - pold = phone - w2pho = [i for i in word2ph] - word2ph = [i for i in word2ph] - phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str) - pold2 = phone - - if self.add_blank: - p1 = len(phone) - phone = commons.intersperse(phone, 0) - p2 = len(phone) - t1 = len(tone) - tone = commons.intersperse(tone, 0) - t2 = len(tone) - language = commons.intersperse(language, 0) - for i in range(len(word2ph)): - word2ph[i] = word2ph[i] * 2 - word2ph[0] += 1 - bert_path = wav_path.replace(".wav", ".bert.pt") - try: - bert = torch.load(bert_path) - assert bert.shape[-1] == len(phone) - except: - bert = get_bert(text, word2ph, language_str) - torch.save(bert, bert_path) - #print(bert.shape[-1], bert_path, text, pold) - assert bert.shape[-1] == len(phone) - - assert bert.shape[-1] == len(phone), ( - bert.shape, len(phone), sum(word2ph), p1, p2, t1, t2, pold, pold2, word2ph, text, w2pho) - phone = torch.LongTensor(phone) - tone = torch.LongTensor(tone) - language = torch.LongTensor(language) - return bert, phone, tone, language - - def get_sid(self, sid): - sid = torch.LongTensor([int(sid)]) - return sid - - def __getitem__(self, index): - return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index]) - - def __len__(self): - return len(self.audiopaths_sid_text) - - -class TextAudioSpeakerCollate(): - """ Zero-pads model inputs and targets - """ - - def __init__(self, return_ids=False): - self.return_ids = return_ids - - def __call__(self, batch): - """Collate's training batch from normalized text, audio and speaker identities - PARAMS - ------ - batch: [text_normalized, spec_normalized, wav_normalized, sid] - """ - # Right zero-pad all one-hot text sequences to max input length - _, ids_sorted_decreasing = torch.sort( - torch.LongTensor([x[1].size(1) for x in batch]), - dim=0, descending=True) - - max_text_len = max([len(x[0]) for x in batch]) - max_spec_len = max([x[1].size(1) for x in batch]) - max_wav_len = max([x[2].size(1) for x in batch]) - - text_lengths = torch.LongTensor(len(batch)) - spec_lengths = torch.LongTensor(len(batch)) - wav_lengths = torch.LongTensor(len(batch)) - sid = torch.LongTensor(len(batch)) - - text_padded = torch.LongTensor(len(batch), max_text_len) - tone_padded = torch.LongTensor(len(batch), max_text_len) - language_padded = torch.LongTensor(len(batch), max_text_len) - bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len) - - spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len) - wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len) - text_padded.zero_() - tone_padded.zero_() - language_padded.zero_() - spec_padded.zero_() - wav_padded.zero_() - bert_padded.zero_() - for i in range(len(ids_sorted_decreasing)): - row = batch[ids_sorted_decreasing[i]] - - text = row[0] - text_padded[i, :text.size(0)] = text - text_lengths[i] = text.size(0) - - spec = row[1] - spec_padded[i, :, :spec.size(1)] = spec - spec_lengths[i] = spec.size(1) - - wav = row[2] - wav_padded[i, :, :wav.size(1)] = wav - wav_lengths[i] = wav.size(1) - - sid[i] = row[3] - - tone = row[4] - tone_padded[i, :tone.size(0)] = tone - - language = row[5] - language_padded[i, :language.size(0)] = language - - bert = row[6] - bert_padded[i, :, :bert.size(1)] = bert - - return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, tone_padded, language_padded, bert_padded - - -class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler): - """ - Maintain similar input lengths in a batch. - Length groups are specified by boundaries. - Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}. - - It removes samples which are not included in the boundaries. - Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded. - """ - - def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True): - super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) - self.lengths = dataset.lengths - self.batch_size = batch_size - self.boundaries = boundaries - - self.buckets, self.num_samples_per_bucket = self._create_buckets() - self.total_size = sum(self.num_samples_per_bucket) - self.num_samples = self.total_size // self.num_replicas - - def _create_buckets(self): - buckets = [[] for _ in range(len(self.boundaries) - 1)] - for i in range(len(self.lengths)): - length = self.lengths[i] - idx_bucket = self._bisect(length) - if idx_bucket != -1: - buckets[idx_bucket].append(i) - - for i in range(len(buckets) - 1, 0, -1): - if len(buckets[i]) == 0: - buckets.pop(i) - self.boundaries.pop(i + 1) - - num_samples_per_bucket = [] - for i in range(len(buckets)): - len_bucket = len(buckets[i]) - total_batch_size = self.num_replicas * self.batch_size - rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size - num_samples_per_bucket.append(len_bucket + rem) - return buckets, num_samples_per_bucket - - def __iter__(self): - # deterministically shuffle based on epoch - g = torch.Generator() - g.manual_seed(self.epoch) - - indices = [] - if self.shuffle: - for bucket in self.buckets: - indices.append(torch.randperm(len(bucket), generator=g).tolist()) - else: - for bucket in self.buckets: - indices.append(list(range(len(bucket)))) - - batches = [] - for i in range(len(self.buckets)): - bucket = self.buckets[i] - len_bucket = len(bucket) - if (len_bucket == 0): - continue - ids_bucket = indices[i] - num_samples_bucket = self.num_samples_per_bucket[i] - - # add extra samples to make it evenly divisible - rem = num_samples_bucket - len_bucket - ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)] - - # subsample - ids_bucket = ids_bucket[self.rank::self.num_replicas] - - # batching - for j in range(len(ids_bucket) // self.batch_size): - batch = [bucket[idx] for idx in ids_bucket[j * self.batch_size:(j + 1) * self.batch_size]] - batches.append(batch) - - if self.shuffle: - batch_ids = torch.randperm(len(batches), generator=g).tolist() - batches = [batches[i] for i in batch_ids] - self.batches = batches - - assert len(self.batches) * self.batch_size == self.num_samples - return iter(self.batches) - - def _bisect(self, x, lo=0, hi=None): - if hi is None: - hi = len(self.boundaries) - 1 - - if hi > lo: - mid = (hi + lo) // 2 - if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]: - return mid - elif x <= self.boundaries[mid]: - return self._bisect(x, lo, mid) - else: - return self._bisect(x, mid + 1, hi) - else: - return -1 - - def __len__(self): - return self.num_samples // self.batch_size diff --git a/spaces/digitalxingtong/Lixiang-Bert-Vits2/monotonic_align/core.py b/spaces/digitalxingtong/Lixiang-Bert-Vits2/monotonic_align/core.py deleted file mode 100644 index 5ff728cd74c9228346a82ec64a9829cb98ad315e..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Lixiang-Bert-Vits2/monotonic_align/core.py +++ /dev/null @@ -1,36 +0,0 @@ -import numba - - -@numba.jit(numba.void(numba.int32[:, :, ::1], numba.float32[:, :, ::1], numba.int32[::1], numba.int32[::1]), - nopython=True, nogil=True) -def maximum_path_jit(paths, values, t_ys, t_xs): - b = paths.shape[0] - max_neg_val = -1e9 - for i in range(int(b)): - path = paths[i] - value = values[i] - t_y = t_ys[i] - t_x = t_xs[i] - - v_prev = v_cur = 0.0 - index = t_x - 1 - - for y in range(t_y): - for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - if x == y: - v_cur = max_neg_val - else: - v_cur = value[y - 1, x] - if x == 0: - if y == 0: - v_prev = 0. - else: - v_prev = max_neg_val - else: - v_prev = value[y - 1, x - 1] - value[y, x] += max(v_prev, v_cur) - - for y in range(t_y - 1, -1, -1): - path[y, index] = 1 - if index != 0 and (index == y or value[y - 1, index] < value[y - 1, index - 1]): - index = index - 1 \ No newline at end of file diff --git a/spaces/digitalxingtong/Shanbao-Bert-VITS2/text/japanese.py b/spaces/digitalxingtong/Shanbao-Bert-VITS2/text/japanese.py deleted file mode 100644 index ddedafa0c5b7986068dc6c91637a86febc3923a9..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Shanbao-Bert-VITS2/text/japanese.py +++ /dev/null @@ -1,104 +0,0 @@ -# modified from https://github.com/CjangCjengh/vits/blob/main/text/japanese.py -import re -import sys - -import pyopenjtalk - -from text import symbols - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile( - r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile( - r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# List of (symbol, Japanese) pairs for marks: -_symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('%', 'パーセント') -]] - - -# List of (consonant, sokuon) pairs: -_real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'Q([↑↓]*[kg])', r'k#\1'), - (r'Q([↑↓]*[tdjʧ])', r't#\1'), - (r'Q([↑↓]*[sʃ])', r's\1'), - (r'Q([↑↓]*[pb])', r'p#\1') -]] - -# List of (consonant, hatsuon) pairs: -_real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'N([↑↓]*[pbm])', r'm\1'), - (r'N([↑↓]*[ʧʥj])', r'n^\1'), - (r'N([↑↓]*[tdn])', r'n\1'), - (r'N([↑↓]*[kg])', r'ŋ\1') -]] - - - -def post_replace_ph(ph): - rep_map = { - ':': ',', - ';': ',', - ',': ',', - '。': '.', - '!': '!', - '?': '?', - '\n': '.', - "·": ",", - '、': ",", - '...': '…', - 'v': "V" - } - if ph in rep_map.keys(): - ph = rep_map[ph] - if ph in symbols: - return ph - if ph not in symbols: - ph = 'UNK' - return ph - -def symbols_to_japanese(text): - for regex, replacement in _symbols_to_japanese: - text = re.sub(regex, replacement, text) - return text - - -def preprocess_jap(text): - '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - text = symbols_to_japanese(text) - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = [] - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - p = pyopenjtalk.g2p(sentence) - text += p.split(" ") - - if i < len(marks): - text += [marks[i].replace(' ', '')] - return text - -def text_normalize(text): - # todo: jap text normalize - return text - -def g2p(norm_text): - phones = preprocess_jap(norm_text) - phones = [post_replace_ph(i) for i in phones] - # todo: implement tones and word2ph - tones = [0 for i in phones] - word2ph = [1 for i in phones] - return phones, tones, word2ph - - -if __name__ == '__main__': - for line in open("../../../Downloads/transcript_utf8.txt").readlines(): - text = line.split(":")[1] - phones, tones, word2ph = g2p(text) - for p in phones: - if p == "z": - print(text, phones) - sys.exit(0) diff --git a/spaces/digitalxingtong/Xingtong-Longread-Dongmuchang-Bert-VITS2/modules.py b/spaces/digitalxingtong/Xingtong-Longread-Dongmuchang-Bert-VITS2/modules.py deleted file mode 100644 index 92e0f32a51c472bfd1659a50a95a95d195281d2b..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Xingtong-Longread-Dongmuchang-Bert-VITS2/modules.py +++ /dev/null @@ -1,452 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -import commons -from commons import init_weights, get_padding -from transforms import piecewise_rational_quadratic_transform -from attentions import Encoder - -LRELU_SLOPE = 0.1 - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers-1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size ** i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): - super(WN, self).__init__() - assert(kernel_size % 2 == 1) - self.hidden_channels =hidden_channels - self.kernel_size = kernel_size, - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:,:self.hidden_channels,:] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:,self.hidden_channels:,:] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels,1)) - self.logs = nn.Parameter(torch.zeros(channels,1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1,2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class ConvFlow(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) - self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_derivatives = h[..., 2 * self.num_bins:] - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x -class TransformerCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - n_layers, - n_heads, - p_dropout=0, - filter_channels=0, - mean_only=False, - wn_sharing_parameter=None, - gin_channels = 0 - ): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = Encoder(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, isflow = True, gin_channels = gin_channels) if wn_sharing_parameter is None else wn_sharing_parameter - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/divyahansg/text-generation-webui-space/extensions/gallery/script.py b/spaces/divyahansg/text-generation-webui-space/extensions/gallery/script.py deleted file mode 100644 index 8a2d7cf988734a7ab0966d047ff3d31ba58324b7..0000000000000000000000000000000000000000 --- a/spaces/divyahansg/text-generation-webui-space/extensions/gallery/script.py +++ /dev/null @@ -1,82 +0,0 @@ -from pathlib import Path - -import gradio as gr - -from modules.html_generator import get_image_cache - - -def generate_html(): - css = """ - .character-gallery { - margin: 1rem 0; - display: grid; - grid-template-columns: repeat(auto-fit, minmax(150px, 1fr)); - grid-column-gap: 0.4rem; - grid-row-gap: 1.2rem; - } - - .character-container { - cursor: pointer; - text-align: center; - position: relative; - opacity: 0.85; - } - - .character-container:hover { - opacity: 1; - } - - .character-container .placeholder, .character-container img { - width: 150px; - height: 200px; - background-color: gray; - object-fit: cover; - margin: 0 auto; - border-radius: 1rem; - border: 3px solid white; - box-shadow: 3px 3px 6px 0px rgb(0 0 0 / 50%); - } - - .character-name { - margin-top: 0.3rem; - display: block; - font-size: 1.2rem; - font-weight: 600; - overflow-wrap: anywhere; - } - """ - - container_html = f'" - return container_html - -def ui(): - with gr.Accordion("Character gallery"): - update = gr.Button("Refresh") - gallery = gr.HTML(value=generate_html()) - update.click(generate_html, [], gallery) diff --git a/spaces/dolceschokolade/chatbot-mini/components/Promptbar/components/PromptModal.tsx b/spaces/dolceschokolade/chatbot-mini/components/Promptbar/components/PromptModal.tsx deleted file mode 100644 index 81bd26cedf428ba31308e0ce40024f0c237c6b0b..0000000000000000000000000000000000000000 --- a/spaces/dolceschokolade/chatbot-mini/components/Promptbar/components/PromptModal.tsx +++ /dev/null @@ -1,130 +0,0 @@ -import { FC, KeyboardEvent, useEffect, useRef, useState } from 'react'; - -import { useTranslation } from 'next-i18next'; - -import { Prompt } from '@/types/prompt'; - -interface Props { - prompt: Prompt; - onClose: () => void; - onUpdatePrompt: (prompt: Prompt) => void; -} - -export const PromptModal: FC = ({ prompt, onClose, onUpdatePrompt }) => { - const { t } = useTranslation('promptbar'); - const [name, setName] = useState(prompt.name); - const [description, setDescription] = useState(prompt.description); - const [content, setContent] = useState(prompt.content); - - const modalRef = useRef(null); - const nameInputRef = useRef(null); - - const handleEnter = (e: KeyboardEvent) => { - if (e.key === 'Enter' && !e.shiftKey) { - onUpdatePrompt({ ...prompt, name, description, content: content.trim() }); - onClose(); - } - }; - - useEffect(() => { - const handleMouseDown = (e: MouseEvent) => { - if (modalRef.current && !modalRef.current.contains(e.target as Node)) { - window.addEventListener('mouseup', handleMouseUp); - } - }; - - const handleMouseUp = (e: MouseEvent) => { - window.removeEventListener('mouseup', handleMouseUp); - onClose(); - }; - - window.addEventListener('mousedown', handleMouseDown); - - return () => { - window.removeEventListener('mousedown', handleMouseDown); - }; - }, [onClose]); - - useEffect(() => { - nameInputRef.current?.focus(); - }, []); - - return ( -
        -
        -
        -