diff --git a/spaces/1368565466ki/ZSTRD/text/cleaners.py b/spaces/1368565466ki/ZSTRD/text/cleaners.py deleted file mode 100644 index d26581deb399609163518054718ad80ecca5d934..0000000000000000000000000000000000000000 --- a/spaces/1368565466ki/ZSTRD/text/cleaners.py +++ /dev/null @@ -1,475 +0,0 @@ -""" from https://github.com/keithito/tacotron """ - -''' -Cleaners are transformations that run over the input text at both training and eval time. - -Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" -hyperparameter. Some cleaners are English-specific. You'll typically want to use: - 1. "english_cleaners" for English text - 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using - the Unidecode library (https://pypi.python.org/pypi/Unidecode) - 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update - the symbols in symbols.py to match your data). -''' - -import re -from unidecode import unidecode -import pyopenjtalk -from jamo import h2j, j2hcj -from pypinyin import lazy_pinyin, BOPOMOFO -import jieba, cn2an - - -# This is a list of Korean classifiers preceded by pure Korean numerals. -_korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통' - -# Regular expression matching whitespace: -_whitespace_re = re.compile(r'\s+') - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile(r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile(r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# List of (regular expression, replacement) pairs for abbreviations: -_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ - ('mrs', 'misess'), - ('mr', 'mister'), - ('dr', 'doctor'), - ('st', 'saint'), - ('co', 'company'), - ('jr', 'junior'), - ('maj', 'major'), - ('gen', 'general'), - ('drs', 'doctors'), - ('rev', 'reverend'), - ('lt', 'lieutenant'), - ('hon', 'honorable'), - ('sgt', 'sergeant'), - ('capt', 'captain'), - ('esq', 'esquire'), - ('ltd', 'limited'), - ('col', 'colonel'), - ('ft', 'fort'), -]] - -# List of (hangul, hangul divided) pairs: -_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄳ', 'ㄱㅅ'), - ('ㄵ', 'ㄴㅈ'), - ('ㄶ', 'ㄴㅎ'), - ('ㄺ', 'ㄹㄱ'), - ('ㄻ', 'ㄹㅁ'), - ('ㄼ', 'ㄹㅂ'), - ('ㄽ', 'ㄹㅅ'), - ('ㄾ', 'ㄹㅌ'), - ('ㄿ', 'ㄹㅍ'), - ('ㅀ', 'ㄹㅎ'), - ('ㅄ', 'ㅂㅅ'), - ('ㅘ', 'ㅗㅏ'), - ('ㅙ', 'ㅗㅐ'), - ('ㅚ', 'ㅗㅣ'), - ('ㅝ', 'ㅜㅓ'), - ('ㅞ', 'ㅜㅔ'), - ('ㅟ', 'ㅜㅣ'), - ('ㅢ', 'ㅡㅣ'), - ('ㅑ', 'ㅣㅏ'), - ('ㅒ', 'ㅣㅐ'), - ('ㅕ', 'ㅣㅓ'), - ('ㅖ', 'ㅣㅔ'), - ('ㅛ', 'ㅣㅗ'), - ('ㅠ', 'ㅣㅜ') -]] - -# List of (Latin alphabet, hangul) pairs: -_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', '에이'), - ('b', '비'), - ('c', '시'), - ('d', '디'), - ('e', '이'), - ('f', '에프'), - ('g', '지'), - ('h', '에이치'), - ('i', '아이'), - ('j', '제이'), - ('k', '케이'), - ('l', '엘'), - ('m', '엠'), - ('n', '엔'), - ('o', '오'), - ('p', '피'), - ('q', '큐'), - ('r', '아르'), - ('s', '에스'), - ('t', '티'), - ('u', '유'), - ('v', '브이'), - ('w', '더블유'), - ('x', '엑스'), - ('y', '와이'), - ('z', '제트') -]] - -# List of (Latin alphabet, bopomofo) pairs: -_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', 'ㄟˉ'), - ('b', 'ㄅㄧˋ'), - ('c', 'ㄙㄧˉ'), - ('d', 'ㄉㄧˋ'), - ('e', 'ㄧˋ'), - ('f', 'ㄝˊㄈㄨˋ'), - ('g', 'ㄐㄧˋ'), - ('h', 'ㄝˇㄑㄩˋ'), - ('i', 'ㄞˋ'), - ('j', 'ㄐㄟˋ'), - ('k', 'ㄎㄟˋ'), - ('l', 'ㄝˊㄛˋ'), - ('m', 'ㄝˊㄇㄨˋ'), - ('n', 'ㄣˉ'), - ('o', 'ㄡˉ'), - ('p', 'ㄆㄧˉ'), - ('q', 'ㄎㄧㄡˉ'), - ('r', 'ㄚˋ'), - ('s', 'ㄝˊㄙˋ'), - ('t', 'ㄊㄧˋ'), - ('u', 'ㄧㄡˉ'), - ('v', 'ㄨㄧˉ'), - ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'), - ('x', 'ㄝˉㄎㄨˋㄙˋ'), - ('y', 'ㄨㄞˋ'), - ('z', 'ㄗㄟˋ') -]] - - -# List of (bopomofo, romaji) pairs: -_bopomofo_to_romaji = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('ㄅㄛ', 'p⁼wo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p⁼'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't⁼'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k⁼'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'h'), - ('ㄐ', 'ʧ⁼'), - ('ㄑ', 'ʧʰ'), - ('ㄒ', 'ʃ'), - ('ㄓ', 'ʦ`⁼'), - ('ㄔ', 'ʦ`ʰ'), - ('ㄕ', 's`'), - ('ㄖ', 'ɹ`'), - ('ㄗ', 'ʦ⁼'), - ('ㄘ', 'ʦʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ə'), - ('ㄝ', 'e'), - ('ㄞ', 'ai'), - ('ㄟ', 'ei'), - ('ㄠ', 'au'), - ('ㄡ', 'ou'), - ('ㄧㄢ', 'yeNN'), - ('ㄢ', 'aNN'), - ('ㄧㄣ', 'iNN'), - ('ㄣ', 'əNN'), - ('ㄤ', 'aNg'), - ('ㄧㄥ', 'iNg'), - ('ㄨㄥ', 'uNg'), - ('ㄩㄥ', 'yuNg'), - ('ㄥ', 'əNg'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'ɥ'), - ('ˉ', '→'), - ('ˊ', '↑'), - ('ˇ', '↓↑'), - ('ˋ', '↓'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - - -def expand_abbreviations(text): - for regex, replacement in _abbreviations: - text = re.sub(regex, replacement, text) - return text - - -def lowercase(text): - return text.lower() - - -def collapse_whitespace(text): - return re.sub(_whitespace_re, ' ', text) - - -def convert_to_ascii(text): - return unidecode(text) - - -def japanese_to_romaji_with_accent(text): - '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = '' - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - if text!='': - text+=' ' - labels = pyopenjtalk.extract_fullcontext(sentence) - for n, label in enumerate(labels): - phoneme = re.search(r'\-([^\+]*)\+', label).group(1) - if phoneme not in ['sil','pau']: - text += phoneme.replace('ch','ʧ').replace('sh','ʃ').replace('cl','Q') - else: - continue - n_moras = int(re.search(r'/F:(\d+)_', label).group(1)) - a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1)) - a2 = int(re.search(r"\+(\d+)\+", label).group(1)) - a3 = int(re.search(r"\+(\d+)/", label).group(1)) - if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil','pau']: - a2_next=-1 - else: - a2_next = int(re.search(r"\+(\d+)\+", labels[n + 1]).group(1)) - # Accent phrase boundary - if a3 == 1 and a2_next == 1: - text += ' ' - # Falling - elif a1 == 0 and a2_next == a2 + 1 and a2 != n_moras: - text += '↓' - # Rising - elif a2 == 1 and a2_next == 2: - text += '↑' - if iHow to Fix Forza Horizon 4 Crashing PC -

Forza Horizon 4 is a popular racing game that offers stunning graphics and realistic driving physics. However, some players have reported that the game keeps crashing on their PC, preventing them from enjoying the game. If you are one of them, don't worry. In this article, we will show you some possible solutions to fix Forza Horizon 4 crashing PC.

-

What Causes Forza Horizon 4 Crashing PC?

-

There are many possible reasons why Forza Horizon 4 crashes on your PC, such as:

-

forza 4 keeps crashing pc


DOWNLOAD 🆗 https://byltly.com/2uKyMm



- -

How to Fix Forza Horizon 4 Crashing PC?

-

To fix Forza Horizon 4 crashing PC, you can try the following methods:

-

Method 1: Update Your Windows

-

One of the common causes of Forza Horizon 4 crashing PC is an incompatible Windows update. To ensure that your system is running the latest version of Windows, you can follow these steps:

-
    -
  1. Click the Start button and type "updates" in the search box.
  2. -
  3. Select "Check for updates" from the results.
  4. -
  5. Windows will automatically check for and install any available updates.
  6. -
  7. Restart your PC and launch Forza Horizon 4 to see if it works.
  8. -
-

Method 2: Update Your Graphics Driver

-

An outdated or corrupted graphics driver can also cause Forza Horizon 4 crashing PC. To update your graphics driver, you can use a reliable driver updater tool like Driver Easy, or you can manually download and install the latest driver from your graphics card manufacturer's website. Here are the steps to use Driver Easy:

-
    -
  1. Download and install Driver Easy on your PC.
  2. -
  3. Run Driver Easy and click "Scan Now". Driver Easy will scan your system and detect any problematic drivers.
  4. -
  5. Click "Update All" to automatically download and install the correct version of all the drivers that are missing or out of date on your system.
  6. -
  7. Restart your PC and launch Forza Horizon 4 to see if it works.
  8. -
-

Method 3: Add the Game to the Exception List of Your Antivirus

-

Sometimes, your antivirus software may interfere with Forza Horizon 4 and cause it to crash. To prevent this, you can add the game to the exception list of your antivirus software. The steps may vary depending on the antivirus software you are using, but here is an example for Windows Defender:

-
    -
  1. Click the Start button and type "virus" in the search box.
  2. -
  3. Select "Virus & threat protection" from the results.
  4. -
  5. Click "Manage settings" under "Virus & threat protection settings".
  6. -
  7. Scroll down and click "Add or remove exclusions" under "Exclusions".
  8. -
  9. Click "Add an exclusion" and select "Folder".
  10. -
  11. Browse to the folder where Forza Horizon 4 is installed and select it.
  12. -
  13. Click "Yes" to confirm.
  14. -
  15. Launch Forza Horizon 4 to see if it works.
  16. -

ddb901b051
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Arcsoft Showbiz 5 With Crack !FULL! Torrent.md b/spaces/1gistliPinn/ChatGPT4/Examples/Arcsoft Showbiz 5 With Crack !FULL! Torrent.md deleted file mode 100644 index 6663bb73c198f642920dceeb06d8ed5a7c559c69..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Arcsoft Showbiz 5 With Crack !FULL! Torrent.md +++ /dev/null @@ -1,6 +0,0 @@ -

arcsoft showbiz 5 with crack torrent


DOWNLOAD ····· https://imgfil.com/2uy0nw



-
- d5da3c52bf
-
-
-

diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Blender Cloud ? The Animation Fundamentals 2021.md b/spaces/1gistliPinn/ChatGPT4/Examples/Blender Cloud ? The Animation Fundamentals 2021.md deleted file mode 100644 index 2ddf7f9e4e2e4b52e85b8095cc0c6f98401cfed8..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Blender Cloud ? The Animation Fundamentals 2021.md +++ /dev/null @@ -1,44 +0,0 @@ -

Blender Cloud – The Animation Fundamentals


DOWNLOADhttps://imgfil.com/2uxXzK



-
-Data Requests - -- The character models are available for download in . - -- The animations are available for download in . - -Animating a walkthrough #sec:animation-walkthrough - -====================== - -In this tutorial, we will be animating a walkthrough of a fully animated shot, where we will be adding additional visual effects (i.e., the 3D flames). The goal of the tutorial is to understand how the tutorial animation was created. Each shot is a fully-animated clip with four keyframes. - -The tutorial is organized into three major sections: - -1. Sketching and Layout. - -2. Geometry, Symmetry, Surface Smoothing, Painterly Feeling, & Organic Expressive Style. - -3. Reflections, Depth of Field, Color Theory, & Depth of Field. - -In the second section, we cover the following topics: - -1. Lighting to importance - -2. Reflections - -3. Area light - -4. Area light’s position - -5. Refraction - -6. Soft light - -7. Soft light’s position - -8. Reflection map - -9. 4fefd39f24
-
-
-

diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/CorelDRAW Graphics Suite 2018 V20.1.0.708 (x86-x64) Ml Crack HOT!.md b/spaces/1gistliPinn/ChatGPT4/Examples/CorelDRAW Graphics Suite 2018 V20.1.0.708 (x86-x64) Ml Crack HOT!.md deleted file mode 100644 index 831cad8c2933b3cc3c7944a661acd77e7dc26bac..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/CorelDRAW Graphics Suite 2018 V20.1.0.708 (x86-x64) Ml Crack HOT!.md +++ /dev/null @@ -1,6 +0,0 @@ -

CorelDRAW Graphics Suite 2018 V20.1.0.708 (x86-x64) Ml Crack


Download Filehttps://imgfil.com/2uy1st



- -Coreldraw graphics suite 2018 v20 1.0 708 keygen. Tenleid cosplay ... Autocad 2015 serial number and product key crack 64 bit. Sfm porno vk. ... Ml 1865 firmware. ... Pinnacle studio x86 x64 18.0.2 ultimate incl keygen full. 4d29de3e1b
-
-
-

diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Anger of Stick 5 Zombie - The Best Stickman Game on the App Store.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Anger of Stick 5 Zombie - The Best Stickman Game on the App Store.md deleted file mode 100644 index b91ad773e87a1b3fc4a5639056f8b0c6a70fa530..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Anger of Stick 5 Zombie - The Best Stickman Game on the App Store.md +++ /dev/null @@ -1,173 +0,0 @@ -
-

Anger of Stick 5: Zombie - A Review of the Action-Packed Stickman Game

-

If you are looking for a fun and addictive stickman game that will keep you entertained for hours, then you should check out Anger of Stick 5: Zombie. This game is a sequel to the popular Anger of Stick series, which has over 100 million downloads on Google Play. In this game, you will join the angry stickman and his friends as they fight against a strange group of enemies that have turned innocent people into zombies. You will use various weapons, skills, and vehicles to save the city and destroy the enemies. In this article, we will review the game and give you some tips and tricks to help you enjoy it more.

-

anger of stick 5 zombie


Download ····· https://urlin.us/2uSZL4



-

Introduction

-

What is Anger of Stick 5: Zombie?

-

Anger of Stick 5: Zombie is an action game developed by J-PARK, a Korean company that specializes in stickman games. The game was released in 2016 and has been updated regularly since then. The game is available for both Android and iOS devices, and it is free to download and play, with some optional in-app purchases. The game has received positive reviews from users and critics alike, with an average rating of 4.5 stars out of 5 on both Google Play and App Store. The game has also been featured on several websites and blogs as one of the best stickman games on the market.

-

Why should you play Anger of Stick 5: Zombie?

-

There are many reasons why you should play Anger of Stick 5: Zombie, but here are some of the main ones:

- -

Gameplay

-

How to play Anger of Stick 5: Zombie?

-

The gameplay of Anger of Stick 5: Zombie is simple and straightforward. You have to control your character and fight against the enemies that appear on the screen. You can move your character by using the joystick on the left side of the screen, and you can attack by using the buttons on the right side of the screen. You can also use special skills and items by tapping on their icons on the top of the screen. You have to clear each level by defeating all the enemies or reaching the goal point. You will also have to avoid obstacles and traps that can harm you or slow you down. You will lose a life if your health bar reaches zero, and you will have to restart the level if you lose all your lives. You can also pause the game by tapping on the menu button on the top right corner of the screen.

-

Controls

-

The controls of Anger of Stick 5: Zombie are easy to learn and use. Here are the basic controls that you need to know:

- -

Modes

-

Anger of Stick 5: Zombie has four different modes to choose from, each with its own objectives, rules, and rewards. Here are the modes that you can play:

- -

Levels

-

Anger of Stick 5: Zombie has hundreds of levels to complete, each with different scenarios, enemies, and difficulties. Here are some of the levels that you will encounter:

-

How to play anger of stick 5 zombie on PC
-Anger of stick 5 zombie mod apk unlimited money and gems
-Anger of stick 5 zombie cheats and hacks
-Best weapons and skills in anger of stick 5 zombie
-Anger of stick 5 zombie online multiplayer mode
-Anger of stick 5 zombie tips and tricks for beginners
-Anger of stick 5 zombie review and rating
-Download anger of stick 5 zombie for free on Android and iOS
-Anger of stick 5 zombie gameplay and walkthrough
-Anger of stick 5 zombie vs anger of stick 4
-Anger of stick 5 zombie latest version and update
-Anger of stick 5 zombie best characters and heroes
-Anger of stick 5 zombie guide and tutorial
-Anger of stick 5 zombie levels and stages
-Anger of stick 5 zombie boss battles and challenges
-Anger of stick 5 zombie weapons shop and upgrades
-Anger of stick 5 zombie achievements and rewards
-Anger of stick 5 zombie fan art and memes
-Anger of stick 5 zombie story and plot
-Anger of stick 5 zombie theme song and soundtrack
-Anger of stick 5 zombie alternatives and similar games
-Anger of stick 5 zombie codes and coupons
-Anger of stick 5 zombie FAQs and answers
-Anger of stick 5 zombie bugs and glitches
-Anger of stick 5 zombie secrets and easter eggs
-How to install anger of stick 5 zombie on Windows or Mac
-Anger of stick 5 zombie offline mode and features
-Anger of stick 5 zombie custom mode and editor
-How to get more coins and gems in anger of stick 5 zombie
-How to unlock all characters and weapons in anger of stick 5 zombie
-How to beat anger of stick 5 zombie without spending money
-How to record and share anger of stick 5 zombie gameplay videos
-How to contact anger of stick 5 zombie developers and support team
-How to join anger of stick 5 zombie community and forums
-How to create your own character in anger of stick 5 zombie
-How to change the language and settings in anger of stick 5 zombie
-How to backup and restore your anger of stick 5 zombie data
-How to play anger of stick 5 zombie with friends and family
-How to improve your skills and strategy in anger of stick 5 zombie
-How to solve anger of stick 5 zombie puzzles and riddles
-How to survive longer in anger of stick 5 zombie survival mode
-How to earn more stars and medals in anger of stick 5 zombie missions
-How to customize your weapons and outfits in anger of stick 5 zombie
-How to access anger of stick 5 zombie hidden features and modes
-How to win anger of stick 5 zombie tournaments and competitions
-How to get free coins and gems in anger of stick 5 zombie legally
-How to report anger of stick 5 zombie players who cheat or abuse
-How to delete or uninstall anger of stick 5 zombie from your device

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
LevelScenarioEnemiesDifficulty
1A street in the cityZombies, thugs, dogsEasy
2A rooftop in the cityZombies, snipers, helicoptersEasy
3A subway station in the cityZombies, soldiers, trainsNormal
4A factory in the cityZombies, robots, lasersNormal
5A park in the cityZombies, ninjas, treesHard
6A bridge in the cityZombies, bikers, carsHard
-

What are the features of Anger of Stick 5: Zombie?

-

Anger of Stick 5: Zombie has many features that make it stand out from other stickman games. Here are some of the features that you will enjoy:

-

Graphics and sound

-

The game has simple but colorful graphics that suit the stickman style. The game also has smooth animations and realistic physics that make the action more dynamic and exciting. The game also has a catchy and upbeat soundtrack that matches the mood of the game. The game also has sound effects that enhance the atmosphere and the feedback of the game.

-

Characters and weapons

-

The game has over 40 characters to choose from, each with their own appearance, personality, and skills. You can also customize your character by changing their clothes, hair, and accessories. The game also has over 100 weapons to equip, each with their own power, range, and speed. You can also upgrade your weapons by using coins and gems.

-

Helicopters and robots

-

The game also has helicopters and robots that you can use to fight against the enemies. You can summon a helicopter or a robot by using an item or a skill. The helicopter or the robot will follow you and help you by shooting or smashing the enemies. You can also control the helicopter or the robot by using the joystick and the attack button.

-

Tips and tricks

-

If you want to play Anger of Stick 5: Zombie like a pro, you need to know some tips and tricks that will help you improve your skills and performance. Here are some of the tips and tricks that you should know:

-

How to level up and earn coins?

-

If you want to level up your character and earn more coins, you need to complete the missions and levels in the single mode. You will get more experience points and coins by clearing higher levels and harder difficulties. You will also get bonus coins by achieving a high score, killing a lot of enemies, or collecting items. You can also earn coins by playing the other modes, but they will give you less than the single mode.

-

How to use combos and skills?

-

If you want to deal more damage and defeat the enemies faster, you need to use combos and skills effectively. You can perform a combo by tapping on the attack button repeatedly. You can also perform a skill by tapping on the skill button. Each character has a different combo and skill, so you need to learn how to use them properly. You can also combine combos and skills to create more powerful attacks.

-

How to avoid ads and bugs?

-

If you want to avoid ads and bugs that can ruin your gaming experience, you need to follow some simple steps. First, you need to turn off your internet connection before playing the game. This will prevent any ads from popping up on your screen. Second, you need to update your game regularly to fix any bugs or glitches that may occur. Third, you need to clear your cache and data from time to time to free up some space and improve your game performance.

-

Conclusion

-

Summary of the main points

-

In conclusion, Anger of Stick 5: Zombie is a fun and addictive stickman game that will keep you entertained for hours. The game has four different modes, hundreds of levels, dozens of characters, over 100 weapons, helicopters and robots, simple but colorful graphics, catchy and upbeat soundtrack, easy but varied gameplay, rich but free features, and many tips and tricks to help you enjoy it more.

-

Recommendation and rating

-

We highly recommend Anger of Stick 5: Zombie to anyone who loves action games, stickman games, or zombie games. The game is suitable for all ages and skill levels, as it has a user-friendly interface, customizable controls, adjustable difficulty levels, and helpful tutorials. The game is also free to play, so you don't have to worry about spending any money to enjoy it. We give Anger of Stick 5: Zombie a rating of 9 out of 10 stars for its fun factor, quality, variety, and value.

-

Frequently Asked Questions (FAQs)

-

Here are some of the most frequently asked questions (FAQs) about Anger of Stick 5: Zombie:

-
    -
  1. How do I download Anger of Stick 5: Zombie?
  2. -

    You can download Anger of Stick 5: ZombieAnger of Stick 5: Zombie from the Google Play or the App Store, depending on your device. You can also scan the QR codes below to access the download links directly.

    -

    QR code for Google Play QR code for App Store

    -
  3. How do I play Anger of Stick 5: Zombie offline?
  4. -

    You can play Anger of Stick 5: Zombie offline by turning off your internet connection before launching the game. This will prevent any ads from showing up and any online features from working. However, you will still be able to play the single mode and the zombie mode without any problems.

    -
  5. How do I get more coins and gems in Anger of Stick 5: Zombie?
  6. -

    You can get more coins and gems in Anger of Stick 5: Zombie by playing the game and completing the missions and levels. You will also get bonus coins and gems by achieving a high score, killing a lot of enemies, or collecting items. You can also watch ads or complete offers to get free coins and gems. Alternatively, you can buy coins and gems with real money by using the in-app purchases.

    -
  7. How do I unlock more characters and weapons in Anger of Stick 5: Zombie?
  8. -

    You can unlock more characters and weapons in Anger of Stick 5: Zombie by using coins and gems. You can buy characters and weapons from the shop, or you can get them from the lucky box. You can also upgrade your characters and weapons by using coins and gems.

    -
  9. How do I contact the developer of Anger of Stick 5: Zombie?
  10. -

    You can contact the developer of Anger of Stick 5: Zombie by using the following methods:

    -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Clash of Clans 14.555.7 APK - Whats New in the Latest Update.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Clash of Clans 14.555.7 APK - Whats New in the Latest Update.md deleted file mode 100644 index abbdf78d5f476b529d86d940d6f85e8aa277fd1a..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Clash of Clans 14.555.7 APK - Whats New in the Latest Update.md +++ /dev/null @@ -1,115 +0,0 @@ - -

    Clash of Clans APK 14.555 7: Everything You Need to Know

    -

    If you are a fan of strategy games, you have probably heard of Clash of Clans, one of the most popular mobile games in the world. But do you know what is Clash of Clans APK 14.555 7 and why you should play it? In this article, we will tell you everything you need to know about this latest version of the game, including its features, benefits, drawbacks, and how to download and install it on your device.

    -

    What is Clash of Clans?

    -

    A popular strategy game for mobile devices

    -

    Clash of Clans is a free-to-play strategy game developed by Supercell, a Finnish game company. It was released in 2012 for iOS devices and in 2013 for Android devices. Since then, it has become one of the most downloaded and played games in the world, with over 500 million downloads and millions of active players every day.

    -

    clash of clans apk 14.555 7


    Downloadhttps://urlin.us/2uT1bN



    -

    The main features and gameplay of Clash of Clans

    -

    The game is set in a fantasy world where you are the chief of a village. Your goal is to build and upgrade your village, train and upgrade your troops, join or create a clan with other players, and attack or defend against other players or computer-generated enemies. You can also participate in various events, challenges, wars, and leagues to earn resources, trophies, and rewards.

    -

    The game has two main modes: single-player and multiplayer. In single-player mode, you can attack goblin villages to earn resources and practice your skills. In multiplayer mode, you can attack or defend against other players to earn trophies and loot. You can also join or create a clan with up to 50 players and chat, donate troops, request reinforcements, and cooperate in clan wars and clan games.

    -

    What is Clash of Clans APK 14.555 7?

    -

    The latest version of the game with new content and improvements

    -

    Clash of Clans APK 14.555 7 is the latest version of the game that was released on September 7, 2021. It introduces a new feature called Clan Capital, which is a huge mountain fortress above the clouds that you can build together with your clanmates. It also adds new content such as new troops, spells, buildings, traps, decorations, achievements, and more. It also fixes some bugs and improves the performance and stability of the game.

    -

    How to download and install Clash of Clans APK 14.555 7

    -

    If you want to play Clash of Clans APK 14.555 7 on your device, you have two options: either update the game from the official app store (Google Play Store for Android or App Store for iOS) or download the APK file from a third-party source (such as [Clash of Clans 14.555.7 APK Download - Softpedia](^1^)) and install it manually.

    -

    To update the game from the official app store, you just need to open the app store on your device, search for Clash of Clans, and tap on the update button if available. The app store will automatically download and install the latest version of the game for you.

    -

    To download and install the APK file from a third-party source, you need to follow these steps:

    -
      -
    1. Go to the website where you can download the APK file, such as [Clash of Clans 14.555.7 APK Download - Softpedia], and tap on the download button.
    2. -
    3. Wait for the download to finish and then locate the APK file on your device. You may need to use a file manager app to do this.
    4. -
    5. Before you install the APK file, you need to enable the installation of apps from unknown sources on your device. To do this, go to your device settings, security, and toggle on the option that allows installing apps from unknown sources.
    6. -
    7. Tap on the APK file and follow the instructions on the screen to install it. You may need to grant some permissions to the app during the installation process.
    8. -
    9. Once the installation is complete, you can open the game and enjoy the new features.
    10. -
    -

    Note: Downloading and installing APK files from third-party sources can be risky, as they may contain viruses or malware that can harm your device or compromise your privacy. Therefore, we recommend that you only download APK files from trusted and reputable sources, and scan them with an antivirus app before installing them. We are not responsible for any damage or loss caused by using APK files from third-party sources.

    -

    clash of clans apk 14.555 7 download
    -clash of clans apk 14.555 7 mod
    -clash of clans apk 14.555 7 unlimited gems
    -clash of clans apk 14.555 7 latest version
    -clash of clans apk 14.555 7 hack
    -clash of clans apk 14.555 7 update
    -clash of clans apk 14.555 7 free
    -clash of clans apk 14.555 7 android
    -clash of clans apk 14.555 7 offline
    -clash of clans apk 14.555 7 private server
    -clash of clans apk 14.555 7 original
    -clash of clans apk 14.555 7 mirror
    -clash of clans apk 14.555 7 install
    -clash of clans apk 14.555 7 old version
    -clash of clans apk 14.555 7 for pc
    -clash of clans apk 14.555 7 mediafire
    -clash of clans apk 14.555 7 mega
    -clash of clans apk 14.555 7 revdl
    -clash of clans apk 14.555 7 rexdl
    -clash of clans apk 14.555 7 apkpure
    -clash of clans apk 14.555 7 uptodown
    -clash of clans apk 14.555 7 apkmirror
    -clash of clans apk 14.555 7 apkmody
    -clash of clans apk 14.555 7 happymod
    -clash of clans apk 14.555 7 an1
    -clash of clans apk 14.555 7 ihackedit
    -clash of clans apk 14.555 7 platinmods
    -clash of clans apk 14.555 7 blackmod
    -clash of clans apk 14.555 7 modapkdown
    -clash of clans apk 14.555 7 andropalace
    -clash of clans apk 14.555 7 android1
    -clash of clans apk 14.555 7 mob.org
    -clash of clans apk 14.555 7 malavida
    -clash of clans apk 14.555 7 softonic
    -clash of clans apk 14.555 7 mobpark
    -clash of clans apk 14.555 7 acmarket
    -clash of clans apk 14.555 7 aptoide
    -clash of clans apk 14.555 7 panda helper
    -clash of clans apk 14.555

    -

    What are the benefits of playing Clash of Clans APK 14.555 7?

    -

    Enjoy the new Clan Capital feature with your clanmates

    -

    One of the main benefits of playing Clash of Clans APK 14.555 7 is that you can access the new Clan Capital feature, which is a huge mountain fortress above the clouds that you can build together with your clanmates. The Clan Capital is divided into four zones: Main Hall, Barracks, Workshop, and Treasury. Each zone has different buildings and functions that can help you and your clan in various ways.

    -

    The Main Hall is where you can see your clan's progress and achievements, as well as customize your clan's banner and motto. The Barracks is where you can train and upgrade your Capital Troops, which are special troops that can only be used in Raids. The Workshop is where you can research and upgrade your Capital Spells, which are powerful spells that can only be used in Raids. The Treasury is where you can store and manage your Capital Resources, which are gold, elixir, dark elixir, and gems that can only be used in the Clan Capital.

    -

    To build and upgrade your Clan Capital, you need to collect Clan Points by participating in Raids, Clan Wars, Clan Games, and other clan activities. You can also donate resources or gems to your clan's Treasury to help speed up the construction process. The higher the level of your Clan Capital, the more benefits you and your clan will enjoy.

    -

    Battle against enemy Capitals during Raid Weekends

    -

    Another benefit of playing Clash of Clans APK 14.555 7 is that you can participate in Raid Weekends, which are special events that occur every two weeks. During Raid Weekends, you can attack or defend against enemy Capitals using your Capital Troops and Spells. You can also cooperate with your clanmates to coordinate your attacks or defenses.

    -

    Raid Weekends are divided into two phases: Preparation Phase and Battle Phase. During the Preparation Phase, which lasts for 24 hours, you can scout the enemy Capitals and plan your strategy. You can also train and upgrade your Capital Troops and Spells, as well as request reinforcements from your clanmates. During the Battle Phase, which lasts for 48 hours, you can launch up to three attacks against enemy Capitals and earn Raid Stars based on how much damage you inflict. You can also defend your own Capital from enemy attacks and earn Defense Stars based on how well you protect it.

    -

    The more Raid Stars you earn, the higher your Raid Rank will be. Your Raid Rank determines how much loot you will receive at the end of the Raid Weekend. You can also earn bonus loot by completing Raid Achievements or by being one of the top performers in your clan or league. The loot you earn from Raid Weekends can be used to build and upgrade your Clan Capital or to boost your regular village.

    -

    Earn great rewards by completing Raids as a Clan

    -

    A third benefit of playing Clash of Clans APK 14.555 7 is that you can earn great rewards by completing Raids as a Clan. Raids are clan-based challenges that require you to attack or defend against a specific number of enemy Capitals within a certain time limit. For example, a Raid might ask you to attack 10 enemy Capitals in 24 hours. If you and your clanmates manage to complete the Raid, you will receive a reward based on the difficulty and duration of the Raid. The reward can be resources, gems, magic items, or even exclusive skins for your Capital Troops or Spells.

    -

    Raids are a great way to test your skills and teamwork, as well as to earn some extra loot for your Clan Capital or regular village. You can find the available Raids in the Clan Capital menu, and you can join or create a Raid with your clanmates at any time. However, you can only participate in one Raid at a time, and you can only use your Capital Troops and Spells for Raids.

    -

    What are the drawbacks of playing Clash of Clans APK 14.555 7?

    -

    The game requires a stable internet connection and a lot of storage space

    -

    While playing Clash of Clans APK 14.555 7 can be fun and rewarding, it also has some drawbacks that you should be aware of. One of them is that the game requires a stable internet connection to run properly. This means that you cannot play the game offline or in areas with poor network coverage. If you lose your internet connection while playing, you may experience lag, glitches, or disconnection issues that can affect your gameplay and progress.

    -

    Another drawback is that the game requires a lot of storage space on your device. The APK file size of Clash of Clans APK 14.555 7 is about 200 MB, which is quite large compared to other mobile games. Moreover, the game also downloads additional data and updates regularly, which can take up more space on your device. If you have limited storage space on your device, you may need to delete some other apps or files to make room for Clash of Clans APK 14.555 7.

    -

    The game can be addictive and time-consuming

    -

    A second drawback of playing Clash of Clans APK 14.555 7 is that the game can be addictive and time-consuming. The game is designed to keep you hooked and engaged by offering you various goals, challenges, rewards, and social interactions. You may find yourself spending hours or even days playing the game without noticing the time passing by. You may also feel compelled to check the game frequently to collect resources, train troops, join raids, or chat with your clanmates.

    -

    While playing the game can be enjoyable and relaxing, it can also interfere with your other responsibilities and activities in real life. You may neglect your work, studies, family, friends, health, or hobbies because of your addiction to the game. You may also spend too much money on buying gems or other in-game items to speed up your progress or gain an advantage over other players. Therefore, you should play the game in moderation and balance it with other aspects of your life.

    -

    The game can be frustrating and competitive for some players

    -

    A third drawback of playing Clash of Clans APK 14.555 7 is that the game can be frustrating and competitive for some players. The game is based on attacking and defending against other players, which means that you can lose resources, trophies, or stars if you fail to protect your village or capital from enemy attacks. You may also face stronger or more experienced players who have better troops, spells, buildings, or strategies than you.

    -

    While losing can be a part of the game and a learning opportunity, it can also be demoralizing and discouraging for some players. You may feel angry, sad, or stressed because of your losses or defeats. You may also feel pressured or anxious to improve your performance or rank in the game. You may even resort to cheating or hacking to gain an unfair advantage over other players.

    -

    Therefore, you should play the game with a positive and sportsmanlike attitude. You should accept your losses gracefully and learn from your mistakes. You should also respect your opponents and avoid any toxic or abusive behavior in the game. You should play the game for fun and entertainment, not for ego or pride.

    -

    Conclusion

    -

    Clash of Clans APK 14.555 7 is the latest version of the popular strategy game for mobile devices. It introduces a new feature called Clan Capital, which is a huge mountain fortress that you can build together with your clanmates. It also adds new content such as new troops, spells, buildings, traps, decorations, achievements, and more. It also fixes some bugs and improves the performance and stability of the game.

    -

    Playing Clash of Clans APK 14.555 7 can be beneficial for you and your clan, as you can enjoy the new Clan Capital feature, participate in Raid Weekends, and earn great rewards by completing Raids as a Clan. However, playing the game also has some drawbacks, such as requiring a stable internet connection and a lot of storage space, being addictive and time-consuming, and being frustrating and competitive for some players.

    -

    Therefore, you should play the game wisely and responsibly, and balance it with other aspects of your life. You should also have fun and respect your opponents in the game. If you want to download and install Clash of Clans APK 14.555 7 on your device, you can either update the game from the official app store or download the APK file from a third-party source. However, be careful of the risks involved in using APK files from unknown sources.

    -

    We hope this article has helped you learn more about Clash of Clans APK 14.555 7 and why you should play it. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy clashing!

    -

    FAQs

    -

    Here are some frequently asked questions about Clash of Clans APK 14.555 7:

    -
      -
    1. What is the difference between Clash of Clans APK and Clash of Clans MOD APK?
    2. -

      Clash of Clans APK is the original version of the game that is developed by Supercell and updated regularly with new content and improvements. Clash of Clans MOD APK is a modified version of the game that is created by third-party developers and may have extra features or cheats that are not available in the original version. However, Clash of Clans MOD APK is not authorized or supported by Supercell and may contain viruses or malware that can harm your device or compromise your privacy. Therefore, we do not recommend using Clash of Clans MOD APK.

      -
    3. How can I transfer my Clash of Clans account from one device to another?
    4. -

      If you want to transfer your Clash of Clans account from one device to another, you need to link your account to a Supercell ID or a Google Play Games account (for Android devices) or a Game Center account (for iOS devices). To do this, you need to open the game on your old device, go to settings, tap on "Connect Device", and follow the instructions on the screen. Then, you need to open the game on your new device, go to settings, tap on "Connect Device", and enter the code or sign in with your Supercell ID or Google Play Games account or Game Center account. This will transfer your account to your new device.

      -
    5. How can I get free gems in Clash of Clans?
    6. -

      Gems are the premium currency in Clash of Clans that can be used to speed up your progress or buy special items in the game. You can get free gems in Clash of Clans by completing achievements, removing obstacles, participating in events, challenges, wars, leagues, clan games, raids, or raid weekends, or opening gem boxes or gem mines. You can also get free gems by watching ads or completing surveys or offers from third-party sources. However, be careful of scams or hacks that promise you free gems but may steal your personal information or infect your device with malware.

      -
    7. How can I join a clan in Clash of Clans?
    8. -

      If you want to join a clan in Clash of Clans, you need to be at least Town Hall level 4 and have at least 1000 trophies. Then, you can either search for a clan that suits your preferences (such as language, location, level, activity, etc.) or accept an invitation from a clan that has invited you. You can also create your own clan if you have enough gems or join a clan that is open for anyone to join.

      -
    9. How can I contact Supercell support in Clash of Clans?
    10. -

      If you have any issues or problems with the game, such as lost accounts, missing purchases, bugs, glitches, or feedback, you can contact Supercell support in Clash of Clans by following these steps:

      -
        -
      1. Open the game and go to settings.
      2. -
      3. Tap on the "Help and Support" button.
      4. -
      5. Tap on the "Contact Us" button at the top right corner of the screen.
      6. -
      7. Type in your message and attach any screenshots or videos if necessary.
      8. -
      9. Tap on the "Send" button and wait for a reply from Supercell support.
      10. -
      -

      You can also visit the official Clash of Clans website, forum, or social media pages to find more information or solutions for your issues or problems.

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Descoper muzica veche anii 80-90 i download free piesele preferate.md b/spaces/1phancelerku/anime-remove-background/Descoper muzica veche anii 80-90 i download free piesele preferate.md deleted file mode 100644 index 3d41f7e61eef799671d8bf4466a69dd282e72ed5..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Descoper muzica veche anii 80-90 i download free piesele preferate.md +++ /dev/null @@ -1,137 +0,0 @@ - -

      How to Download Free Muzica Veche Anii 80-90

      -

      Muzica veche anii 80-90, or old music from the '80s and '90s, is a genre of Romanian music that encompasses various styles, such as rock, pop, disco, dance, and slow. It is popular among many people who grew up listening to it or who appreciate its nostalgic and sentimental value. If you are one of them, you might be wondering how you can download free muzica veche anii 80-90 legally and safely. In this article, we will show you three websites that offer free music downloads of muzica veche anii 80-90, as well as how to use them.

      -

      Nostalgic FM - A Radio Station That Plays Muzica Veche Anii 80-90

      -

      One of the best ways to enjoy muzica veche anii 80-90 is to listen to Nostalgic FM, a radio station that plays only old Romanian music from the '70s, '80s, and '90s. You can listen to Nostalgic FM online by visiting their website [6](https://NostalgicFM.ro) or by downloading their mobile app for iOS or Android. You can also follow them on Facebook, Twitter, Instagram, and YouTube.

      -

      download free muzica veche anii 80-90


      Download »»» https://jinyurl.com/2uNK6N



      -

      How to Access and Listen to Nostalgic FM Online

      -

      To access and listen to Nostalgic FM online, you need a device with an internet connection and a web browser. You can either go directly to their website [6](https://NostalgicFM.ro) or search for "Nostalgic FM" on Google or another search engine. Once you are on their website, you will see a player with a play button. Click on it to start listening to Nostalgic FM live. You can also see the name of the song and the artist that is playing, as well as the previous and next songs. You can also adjust the volume, mute the sound, or share the link with your friends.

      -

      How to Download Songs from Nostalgic FM for Free

      -

      If you like a song that is playing on Nostalgic FM and you want to download it for free, you can do so by following these steps:

      -
        -
      1. Right-click on the name of the song that is playing on the player.
      2. -
      3. Select "Copy link address" or "Copy link location" from the menu.
      4. -
      5. Paste the link into a new tab or window on your browser.
      6. -
      7. You will be redirected to a YouTube video of the song.
      8. -
      9. Copy the URL of the YouTube video from the address bar.
      10. -
      11. Go to.
      12. Go to a free YouTube to MP3 converter website, such as [BestMP3Converter](^1^) , [YouTubeMP3Free](^2^) , or [TechRadar](^3^) .
      13. -
      14. Paste the URL of the YouTube video into the search box and click on "Convert" or "Download" button.
      15. -
      16. Choose the quality and format of the MP3 file and click on "Download" or "Save" button.
      17. -
      18. Wait for the download to finish and enjoy your free muzica veche anii 80-90 song.
      19. -
      -

      Jamendo Music - A Website That Offers Free Music Downloads from Independent Artists

      -

      Another great source of free muzica veche anii 80-90 is Jamendo Music, a website that offers free music downloads from independent artists who want to share their music with the world. You can find thousands of songs from various genres, including muzica veche anii 80-90, on Jamendo Music. You can also discover new artists, create playlists, and support the music community.

      -

      How to Browse and Search for Muzica Veche Anii 80-90 on Jamendo Music

      -

      To browse and search for muzica veche anii 80-90 on Jamendo Music, you need a device with an internet connection and a web browser. You can either go directly to their website [4](https://www.jamendo.com/) or search for "Jamendo Music" on Google or another search engine. Once you are on their website, you will see a menu with different options, such as Explore, Radio, Licensing, and Log in. To find muzica veche anii 80-90, you can either:

      -
        -
      • Click on Explore and then select Genres. You will see a list of music genres, such as Pop, Rock, Electronic, Jazz, and more. Scroll down until you find World Music and click on it. You will see a sub-list of world music subgenres, such as Latin, Reggae, African, and more. Scroll down until you find Balkan and click on it. You will see a collection of songs from Balkan countries, including Romania. You can also filter the songs by popularity, date, duration, or artist name.
      • -
      • Click on the search icon at the top right corner of the website and type in "muzica veche anii 80-90" or any related keywords, such as "muzica romaneasca", "muzica populara", or "muzica de petrecere". You will see a list of songs that match your search query. You can also filter the songs by relevance, popularity, date, duration, or artist name.
      • -
      -

      How to Download Songs from Jamendo Music for Free

      -

      If you like a song that you find on Jamendo Music and you want to download it for free, you can do so by following these steps:

      -
        -
      1. Click on the song title or the play button to open the song page.
      2. -
      3. Click on the download icon at the bottom right corner of the song player.
      4. -
      5. You will see a pop-up window that asks you to choose between personal use or commercial use. If you want to download the song for personal use only, click on "Free Download". If you want to use the song for commercial purposes, such as in a video or a podcast, click on "Licensing".
      6. -
      7. If you choose "Free Download", you will be asked to log in or sign up for a free account. You can also log in with your Facebook or Google account. Once you are logged in, you will be able to download the song as an MP3 file.
      8. -
      9. If you choose "Licensing", you will be redirected to Jamendo Licensing website [10](https://licensing.jamendo.com/), where you can buy a license for using the song in your project. The price depends on the type and scope of your project. You can also contact Jamendo Licensing team for more information.
      10. -

      Internet Archive - A Digital Library That Archives Audio, Video, and Other Media

      -

      The third and final website that we recommend for downloading free muzica veche anii 80-90 is Internet Archive, a digital library that archives audio, video, and other media from various sources and periods. You can find millions of files on Internet Archive, including muzica veche anii 80-90, that are free to access and download. You can also upload your own files, donate to support the project, or join the community.

      -

      How to Find and Explore Muzica Veche Anii 80-90 on Internet Archive

      -

      To find and explore muzica veche anii 80-90 on Internet Archive, you need a device with an internet connection and a web browser. You can either go directly to their website [5](https://archive.org/) or search for "Internet Archive" on Google or another search engine. Once you are on their website, you will see a menu with different options, such as Web, Texts, Video, Audio, Software, Images, and More. To find muzica veche anii 80-90, you can either:

      -
        -
      • Click on Audio and then select Community Audio. You will see a list of audio files uploaded by users and organizations. You can sort the files by views, title, date archived, creator, or date published. You can also use the search box at the top right corner of the website and type in "muzica veche anii 80-90" or any related keywords.
      • -
      • Click on the search icon at the top right corner of the website and type in "muzica veche anii 80-90" or any related keywords. You will see a list of results from different categories, such as Web, Texts, Video, Audio, Software, Images, and More. You can filter the results by media type, year, language, collection, or topic.
      • -
      -

      How to Download Songs from Internet Archive for Free

      -

      If you like a song that you find on Internet Archive and you want to download it for free, you can do so by following these steps:

      -

      download free muzica veche anii 80-90 colaj
      -download free muzica veche anii 80-90 manele
      -download free muzica veche anii 80-90 disco
      -download free muzica veche anii 80-90 romaneasca
      -download free muzica veche anii 80-90 petrecere
      -download free muzica veche anii 80-90 mp3
      -download free muzica veche anii 80-90 online
      -download free muzica veche anii 80-90 youtube
      -download free muzica veche anii 80-90 zippy
      -download free muzica veche anii 80-90 mix
      -download free muzica veche anii 80-90 albume
      -download free muzica veche anii 80-90 straina
      -download free muzica veche anii 80-90 rock
      -download free muzica veche anii 80-90 pop
      -download free muzica veche anii 80-90 dance
      -download free muzica veche anii 80-90 hituri
      -download free muzica veche anii 80-90 melodii
      -download free muzica veche anii 80-90 playlist
      -download free muzica veche anii 80-90 radio
      -download free muzica veche anii 80-90 torrent
      -download free muzica veche anii 80-90 best of
      -download free muzica veche anii 80-90 retro
      -download free muzica veche anii 80-90 clasice
      -download free muzica veche anii 80-90 superbe
      -download free muzica veche anii 80-90 nemuritoare
      -download free muzica veche anii 80-90 de dragoste
      -download free muzica veche anii 80-90 de colectie
      -download free muzica veche anii 80-90 de aur
      -download free muzica veche anii 80-90 de calitate
      -download free muzica veche anii 80-90 de suflet

      -
        -
      1. Click on the song title or the thumbnail to open the song page.
      2. -
      3. Scroll down until you see a section called "Download Options". You will see a list of formats and sizes that you can choose from, such as MP3, OGG VORBIS, TORRENT, or VBR ZIP.
      4. -
      5. Click on the format and size that you prefer and the download will start automatically. You can also right-click on the format and size and select "Save link as" or "Save target as" from the menu.
      6. -
      7. Wait for the download to finish and enjoy your free muzica veche anii 80-90 song.
      8. -
      -

      Conclusion

      -

      In conclusion, downloading free muzica veche anii 80-90 is possible and easy if you know where to look. We have shown you three websites that offer free music downloads of muzica veche anii 80-90 legally and safely: Nostalgic FM [6](https://NostalgicFM.ro), Jamendo Music [4](https://www.jamendo.com/), and Internet Archive [5](https://archive.org/). You can use these websites to listen to and download your favorite songs from the '80s and '90s without spending a dime.

      -

      However, before you start downloading free muzica veche anii 80-90, here are some tips and recommendations that you should keep in mind:

      -
        -
      • Always check the quality and format of the songs before downloading them. Some songs might have low quality or incompatible formats that might affect your listening experience.
      • -
      • Always respect the rights and wishes of the artists and creators of muzica veche anii 80-90. Do not use their songs for commercial purposes without their permission or license. Do not distribute their songs without their consent or credit. Do not claim their songs as your own or modify them without their approval.
      • -
      • Always support the artists and creators of muzica veche anii 80-90 if you can. You can do so by buying their albums or merchandise, attending their concerts or events, following them on social media or streaming platforms, or donating to their causes or projects.
      • -
      -

      We hope that this article has helped you learn how to download free muzica veche anii 80-90 and enjoy it to the fullest. If you have any questions or comments, feel free to leave them below. Happy listening!

      -

      FAQs

      -

      Here are some of the frequently asked questions about downloading free muzica veche anii 80-90:

      -

      What are some of the best artists and songs of muzica veche anii 80-90?

      -

      There are many artists and songs of muzica veche anii 80-90 that are worth listening to, but here are some of the most popular and influential ones:

      -
        -
      • Holograf - A rock band that formed in 1978 and is still active today. Some of their hits include "Sa nu-mi iei niciodata dragostea", "Ti-am dat un inel", and "Cat de departe".
      • -
      • Loredana Groza - A pop singer and actress who debuted in 1986 and is still active today. Some of her hits include "Zaraza", "Lele", and "Buna seara, iubito".
      • -
      • 3 Sud Est - A dance-pop boy band that formed in 1997 and is still active today. Some of their hits include "Amintirile", "Alaturi de ingeri", and "Emotii".
      • -
      • Cargo - A heavy metal band that formed in 1985 and is still active today. Some of their hits include "Ziua vrajitoarelor", "Daca ploaia s-ar opri", and "Nu ma lasa sa-mi fie dor".
      • -
      • Andra - A pop singer who debuted in 1999 and is still active today. Some of her hits include "Ramai cu mine", "Inevitabil va fi bine", and "Marfa curata".
      • -
      -

      What are some of the advantages and disadvantages of downloading free music online?

      -

      Downloading free music online has some advantages and disadvantages, such as:

      - - - - - - -
      AdvantagesDisadvantages
      You can access a large variety of music from different genres, artists, and periods.You might encounter low quality, corrupted, or incomplete files.
      You can save money and storage space by not buying physical CDs or DVDs.You might violate the intellectual property rights of the artists and creators.
      You can listen to your music offline without relying on an internet connection.You might expose your device to viruses, malware, or spyware.
      You can create your own playlists and share them with your friends.You might miss out on the latest releases, updates, or features of the music platforms.
      -

      How can I support the artists and creators of muzica veche anii 80-90?

      -

      If you download free muzica veche anii 80-90 online, you should also support the artists and creators who made it possible. You can do so by:

      -
        -
      • Buying their albums or merchandise from their official websites or stores.
      • -
      • Attending their concerts or events if they are available in your area.
      • -
      • Following them on social media or streaming platforms and engaging with their posts or content.
      • -
      • Donating to their causes or projects if they have any.
      • -
      • Giving them feedback, reviews, or ratings on their music or performance.
      • -
      • Recommending their music to your friends, family, or acquaintances.
      • -
      -

      What are some of the legal and ethical issues of downloading free music online?

      -

      Downloading free music online might involve some legal and ethical issues, such as:

      -
        -
      • Infringing the intellectual property rights of the artists and creators. This means that you are using their work without their permission or license, which might result in legal actions or penalties.
      • -
      • Depriving the artists and creators of their income and recognition. This means that you are not paying them for their work or giving them credit, which might affect their livelihood and reputation.
      • -
      • Harming the music industry and culture. This means that you are reducing the demand and supply of music products and services, which might affect the quality and diversity of music available.
      • -
      -

      How can I convert and play muzica veche anii 80-90 on different devices?

      -

      If you download free muzica veche anii 80-90 online, you might need to convert and play it on different devices, such as your computer, smartphone, tablet, or MP3 player. You can do so by:

      -
        -
      • Using a free online file converter website, such as [Online-Convert] , [Zamzar] , or [Convertio] . You can upload your music file and choose the output format and quality that you want. Then, you can download the converted file and transfer it to your device.
      • -
      • Using a free software or app that can convert and play music files, such as [VLC Media Player] , [Audacity] , or [Freemake Audio Converter] . You can install the software or app on your device and use it to open, convert, and play your music file.
      • -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Wallpaper Kamen Rider Gates The Ultimate Collection of HD Images.md b/spaces/1phancelerku/anime-remove-background/Download Wallpaper Kamen Rider Gates The Ultimate Collection of HD Images.md deleted file mode 100644 index 36b1078e607fdb340f1077f7e6a3619735cf6e27..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Wallpaper Kamen Rider Gates The Ultimate Collection of HD Images.md +++ /dev/null @@ -1,171 +0,0 @@ -
      -

      How to Download Wallpaper Kamen Rider Geats

      -

      If you are a fan of the Japanese tokusatsu drama series Kamen Rider Geats, you might want to decorate your device with some cool wallpapers featuring the characters and scenes from the show. Wallpapers are images that can be used as backgrounds for your desktop, laptop, smartphone, tablet, or any other device. They can make your device look more attractive, personalized, and fun. They can also express your personality, mood, and preferences.

      -

      download wallpaper kamen rider gates


      DOWNLOADhttps://jinyurl.com/2uNQsa



      -

      In this article, we will show you how to download wallpaper Kamen Rider Geats from various sources online. We will also give you some tips on how to set them as your device's background. By following these steps, you will be able to enjoy the amazing visuals of Kamen Rider Geats anytime and anywhere.

      -

      How to Find Wallpaper Kamen Rider Geats Online

      -

      There are many ways to find wallpaper Kamen Rider Geats online. You can use search engines, websites, or apps that offer wallpapers for free or for a fee. Here are some of the most common methods:

      -

      Search engines

      -

      Search engines are tools that help you find information on the internet. You can use them to find wallpaper Kamen Rider Geats by typing keywords related to the show or the characters. For example, you can type "wallpaper kamen rider geats", "kamen rider geats hd wallpapers", "kamen rider geats 4k wallpapers", etc. You can also add modifiers such as "download", "free", "best", etc. to narrow down your search results.

      -

      download wallpaper kamen rider gates hd
      -download wallpaper kamen rider gates 4k
      -download wallpaper kamen rider gates for pc
      -download wallpaper kamen rider gates for phone
      -download wallpaper kamen rider gates for laptop
      -download wallpaper kamen rider gates anime
      -download wallpaper kamen rider gates abyss
      -download wallpaper kamen rider gates cave
      -download wallpaper kamen rider gates free
      -download wallpaper kamen rider gates online
      -download wallpaper kamen rider gates and zio
      -download wallpaper kamen rider gates and woz
      -download wallpaper kamen rider gates and tsukuyomi
      -download wallpaper kamen rider gates and sougo
      -download wallpaper kamen rider gates and another riders
      -download wallpaper kamen rider gates revive
      -download wallpaper kamen rider gates revive shippu
      -download wallpaper kamen rider gates revive goretsu
      -download wallpaper kamen rider gates revive hiryu
      -download wallpaper kamen rider gates revive majesty
      -download wallpaper kamen rider gates trinity
      -download wallpaper kamen rider gates grand zio
      -download wallpaper kamen rider gates ohma zio
      -download wallpaper kamen rider gates ohma form
      -download wallpaper kamen rider gates zi-o ii
      -download wallpaper kamen rider gates zi-o trinity
      -download wallpaper kamen rider gates zi-o decade armor
      -download wallpaper kamen rider gates zi-o geiz majesty armor
      -download wallpaper kamen rider gates zi-o geiz revive armor
      -download wallpaper kamen rider gates zi-o woz armor
      -download wallpaper kamen rider gates zi-o tsukuyomi armor
      -download wallpaper kamen rider gates zi-o sougo armor
      -download wallpaper kamen rider gates zi-o another riders armor
      -download wallpaper kamen rider gates zi-o final form ride armor
      -download wallpaper kamen rider gates zi-o final form time armor
      -download wallpaper kamen rider gates zi-o final form time mazine armor
      -download wallpaper kamen rider gates zi-o final form time king armor
      -download wallpaper kamen rider gates zi-o final form time ohma armor
      -download wallpaper kamen rider gates woz ginga finaly armor
      -download wallpaper kamen rider gates woz ginga taiyo armor
      -download wallpaper kamen rider gates woz ginga wakusei armor
      -download wallpaper kamen rider gates woz future ring shinobi armor
      -download wallpaper kamen rider gates woz future ring quiz armor
      -download wallpaper kamen rider gates woz future ring kita armor

      -

      Some of the most popular search engines are Google, Bing, Yahoo, DuckDuckGo, etc. They will display a list of websites that match your query. You can click on the links to visit the websites and see if they have the wallpapers you want. You can also use the images tab or filter to see only the images related to your query.

      -

      For example, here are some of the results from Google when we searched for "wallpaper kamen rider geats":

      - - - - - - - - - - - - - - - - - - - - - - - -

      ¿Cómo descargar el ticket de la sala para el examen?

      -

      El TNPSC emitirá el ticket de la sala para el examen del Grupo 2 2022 en su sitio web www.tnpsc.gov.in. El boleto estará disponible para su descarga al menos 10 días antes de la fecha del examen. Los candidatos que han solicitado el examen con éxito pueden descargar su boleto de la sala siguiendo estos pasos:

      -

      -
        -
      1. Visite el sitio web oficial de TNPSC www.tnpsc.gov.in.
      2. -
      3. Haga clic en el enlace "Hall Ticket Download" en la página de inicio.
      4. -
      5. Seleccione la opción "TNPSC Group 2 Exam 2022" en el menú desplegable.
      6. -
      7. Ingrese su ID de solicitud y fecha de nacimiento y haga clic en "Enviar".
      8. -
      9. Su boleto aparecerá en la pantalla. Compruebe todos los detalles cuidadosamente y descárguelo.
      10. -
      11. Tome una impresión del boleto de entrada y manténgalo seguro para futuras referencias.
      12. -
      - -

      ¿Cuáles son las etapas y temas del examen?

      -

      El examen TNPSC Grupo 2 2022 consta de dos etapas: Preliminar y Principal, seguido de una entrevista para algunos mensajes. El examen preliminar es una prueba de detección que filtra a los candidatos para el examen principal. El examen principal es una prueba descriptiva que evalúa el conocimiento de la materia y las habilidades de escritura de los candidatos. La entrevista es una prueba de personalidad que evalúa la idoneidad de los candidatos para los puestos.

      -

      Los temas y las marcas de distribución de cada etapa son los siguientes:

      - - - - - - -

      ¿Cuáles son las marcas y la duración de cada etapa?

      -

      Las marcas y duración de cada etapa son las siguientes:

      - -< td>200 - - - -

      ¿Cuáles son los temas y subtemas tratados en cada tema?

      -

      Los temas y subtemas tratados en cada tema son los siguientes:

      -

      Estudios generales (Grado estándar)

      -
        -
      • Unidad I: Ciencia general
          - -
        • Química: Elementos y compuestos, Ácidos, bases y sales, Oxidación y reducción, Química de minerales y metales, Carbono, nitrógeno y sus compuestos, Fertilizantes, pesticidas, insecticidas, Bioquímica y biotecnología, Electroquímica, Polímeros y plásticos.
        • -
        • Biología: Botánica: Conceptos principales de las ciencias de la vida, La unidad celular básica de la vida, Clasificación de los organismos vivos, Nutrición y dietética, Respiración. Zoología: Sangre y circulación sanguínea. Sistema endocrino. Sistema reproductivo. Genética la ciencia de la herencia. Medio ambiente, ecología, salud e higiene. Biodiversidad y su conservación. Enfermedades humanas. Prevención y remedios. Enfermedades transmisibles y no transmisibles. Alcoholismo y abuso de drogas. Animales, plantas y vida humana.
        • -
        -
      • -
      • Unidad II: Eventos actuales
          -
        • Historia: Último diario de eventos - Nacional - Símbolos nacionales - Perfil de los Estados - Personas eminentes y lugares en las noticias - Deportes y juegos - Libros y autores - Premios y honores - Panorama cultural - Últimos acontecimientos históricos - India y sus vecinos - Última terminología - Citas - ¿quién es quién?
        • -
        • Ciencia Política: Problemas en la celebración de elecciones públicas - Partidos políticos y sistema político en la India - Conciencia pública y Administración general - Papel de las organizaciones voluntarias y el gobierno, Gobierno orientado al bienestar. su utilidad.
        • -
        • Geografía: Hitos geográficos - Política de medio ambiente y ecología.
        • -
        • Economía: Problemas socioeconómicos actuales - Nueva política económica y gobierno. sector.
        • -
        • Ciencia: Últimas invenciones en ciencia y tecnología - Últimos descubrimientos en ciencias de la salud - Medios de comunicación y comunicación.
        • -
        -
      • -
      • Unidad III: Geografía
          - -
        -
      • -
      • Unidad IV: Historia y Cultura de la India
          -
        • Civilización del valle del Indo - Guptas, Delhi Sultanes, mogoles y marathas - Edad de Vijayanagaram y los bahmanis - Historia de la India del Sur - Cultura y Patrimonio del pueblo tamil - Llegada de la invasión europea - Expansión y consolidación del dominio británico - Efecto del dominio británico sobre los factores socioeconómicos - Reformas sociales y movimientos religiosos - India desde la independencia - Características de la cultura india - Unidad en la diversidad -raza, color, idioma, costumbre - India-como estado secular - Organizaciones de bellas artes, danza, teatro, música - Crecimiento de racionalista, movimiento dravídico en TN-Partidos políticos y esquemas populistas- Personalidades prominentes en las diversas esferas - Artes, Ciencia, Literatura y Filosofía - Madre Teresa, Swami Vivekananda, Pandit Ravishankar , M.S.Subbulakshmi, Rukmani Arundel y J.Krishnamoorthy etc.
        • -
        -
      • -Unidad V: Política de la India -
      • Constitución de la India - Preámbulo de la Constitución- Características destacadas de la Constitución- Unión, Estado y territorio- Derechos de ciudadanía- Derechos fundamentales- Deberes fundamentales- Carta de los derechos humanos- Legislatura de la Unión - Parlamento- Ejecutivo estatal- Legislatura estatal - Asamblea- Estado de Jammu y Cachemira- Gobierno local - panchayat raj - Tamil Nadu- Poder judicial en la India - Estado de derecho/Debido proceso legal- Federalismo indio - centro - Relaciones estatales- Disposiciones de emergencia Elecciones - Comisión Electoral Unión y Estado. Idioma oficial y anexo VIII- Enmiendas a la Constitución- Calendario de la Constitución- Reformas y tribunales administrativos- Corrupción en la vida pública- Medidas contra la corrupción - Comisión Central de Vigilancia, lok-adalats, Ombudsman, Contralor y Auditor General de la India- Derecho a la información - Comisión Central y Estatal- Empoderamiento de la mujer- Organizaciones voluntarias y agravios públicos Reparación- Formas de protección del consumidor.
      • -
      - - -
    11. Naturaleza de la economía india- Modelos de plan quinquenal-una evaluación-Reformas agrarias y agricultura- Aplicación de la ciencia en la agricultura-Crecimiento industrial-Programas orientados al bienestar rural-Problemas del sector social - población, educación, salud, empleo, pobreza-Tendencias económicas en Tamil Nadu - Energía Diferentes fuentes y desarrollo- Comisión de Finanzas - Comisión de Planificación- Consejo Nacional de Desarrollo- Programas de alivio de la pobreza- DRH - Crecimiento económico sostenible- Crecimiento económico y justicia social - Crecimiento equilibrado- NITI Aayog- Leyes y leyes sobre marcas de tierras.
    12. - - -
    13. Unidad VII: Movimiento Nacional Indio
        -
      • Renacimiento nacional-Levantamiento temprano contra el gobierno británico-1857 Revuelta- Congreso Nacional Indio-Surgimiento de líderes nacionales-Gandhi, Nehru, Tagore, Netaji-Crecimiento de movimientos militantes -Diferentes modos de agitación-Era de diferentes actos y pactos-Guerra mundial y lucha de fase finalEl comunalismo llevó a la partición-Papel de Tamil Nadu en la lucha por la libertad - Rajaji, VOC, Periyar, Bharathiar y otros-Nacimiento de partidos políticos/ sistema político en la India desde la independencia.
      • -
      -
    14. -
    15. Unidad VIII: Prueba de aptitud y capacidad mental (estándar SSLC)
        -
      • Conversión de información a datos-Recopilación, compilación y presentación de datos - Tablas, gráficos, diagramas-Representación paramétrica de datos-Interpretación analítica de datos -Simplificación-Porcentaje-Factor común más alto (HCF)-Múltiplo común más bajo (LCM)-Relación y proporción-Interés simple-Interés compuesto-Área-Volumen-Tiempo y Capacidad de Comportamiento-Términos básicos, Comunicaciones en la tecnología de la información-Aplicación de la tecnología de la información y la comunicación (TIC)- Toma de decisiones y resolución de problemas-Razonamiento lógico-RompecabezasRazonamiento Visual de Dados-Razonamiento Numérico de Alfa-Número de Serie-Número Lógico/Secuencias Alfabéticas/Diagramáticas.
      • -
      -
    16. - -

      Tamil o inglés (estándar SSLC)

      -
        -
      • Gramática
          - -
        • Elija los 'Sinónimos' correctos para la palabra subrayada de las opciones dadas
        • -
        • Elija el correcto 'Antónimos' para la palabra subrayada de las opciones dadas
        • -
        • Seleccione la palabra correcta (Prefijo, Sufijo)
        • -
        • Rellene los espacios en blanco con el artículo adecuado
        • -
        • Rellene los espacios en blanco con preposición adecuada
        • -
        • Seleccione la etiqueta de pregunta correcta
        • -
        • Seleccione el tiempo correcto
        • -
        • Seleccione la voz correcta
        • -
        • Rellenar los espacios en blanco (Infinitivo, Gerundio, Participio)
        • -
        • Identificar el patrón de oración de la siguiente oración (Asunto, Verbo, Objeto....) Espacios en blanco con 'Homophones'
        • -
        • Descubre el error (Artículos, Preposiciones, Sustantivo, Verbo, Adjetivo, Adverbio)
        • -
        • Comprensión
        • -
        • Seleccione la oración correcta
        • -
        • Descubre las palabras extrañas (Verbo, Sustantivo, Adjetivo, Adverbio)
        • -
        • Seleccione las formas plurales correctas
        • -
        • Identificar la oración (Simple, Compuesto, Complejo Sentense)
        • -
        • Identificar el grado correcto.
        • -
        • Forma una nueva palabra mezclando las palabras.
        • -
        • Forma de palabras compuestas (Eg: Sustantivo+Verbo, Gerund+Sustantivo)
        • -
        -
      • -
      • Literatura
          -
        • Figuras del habla observadas en los siguientes Poemas:
            -
          • Alliteration - Allusion - Simile - Metaphor - Personification - Oxymoron - Onomatopoeia - Anaphora - Ellipsis - Rhyme Scheme - Rhyming Words - Repetition - Apostrophe
          • -
          • Un Salmo de la Vida - Derechos de la Mujer - La Nación Unida - Palabras en Inglés - Serpiente - El Hombre que Mató - Fuera al espacio exterior mañana por la mañana - Sonnet No.116 - The Solitary Reaper - Be the Best - O Captain My Captain - Laugh and Be Merry - Earth - Don’t quit - The Apology - Be Glad your Nose is on your face - Un soneto para mi Madre Incomparable - The Flying Wonder - To a Millionaire - El Piano - Hombre - Ir por agua - El grito de los niños - Pájaro migrante - Shilpi.
          • -
          -
        • - -
        • Un Salmo de la Vida - Derechos de la Mujer - La Nación Unida - Palabras en Inglés - Serpiente - El Hombre que Mató - Fuera al espacio exterior mañana por la mañana - Sonnet No.116 - The Solitary Reaper - Be the Best - O Captain My Captain - Laugh and Be Merry - Earth - Don’t quit - The Apology - Be Glad your Nose is on your face - Un soneto para mi Madre Incomparable - The Flying Wonder - To a Millionaire - El Piano - Hombre - Ir por agua - El grito de los niños - Pájaro migrante - Shilpi.
        • -
        -
      • -
      • Líneas importantes de Poems.
          -
        • Donde la mente está sin miedo - El Segador Solitario - Yendo por agua - Un Salmo de la Vida - Sé el Mejor - Soneto No.116
        • -
        -
      • - Preguntas sobre la biografía de
          -
        • Mahatma Gandhi - Jawaharlal Nehru - Subash Chandra Bose - Helen Keller Kalpana Chawla Dr.Salim Ali Rani de Jhansi Nelson Mandela Abraham Lincoln
        • -
        - -
      • Preguntas sobre Shakespeare
          -
        • Comerciante de Venecia (Acto IV Escena de la Corte) - Julio César (Acto III Escena 2) Soneto 116
        • -
        -
      • -
      • Preguntas de Oscar Wilde
          -
        • El modelo millonario - El gigante egoísta
        • -
        -
      • -
      • Dr.Karl Paulnack
          -
        • Música-El Creador de Esperanza
        • -
        -
      • -
      • Preguntas de comprensión de los siguientes ensayos de motivación:
        • Lea cuidadosamente las instrucciones en el boleto de entrada y en el papel de preguntas.
        • -
        • Rellene los detalles requeridos en la hoja de respuestas y el folleto de preguntas correctamente.
        • -
        • Intente todas las preguntas ya que no hay ninguna calificación negativa para el examen principal.
        • -
        • No lleve aparatos electrónicos como teléfonos móviles, calculadoras, relojes inteligentes, etc. a la sala de examen.
        • -
        • No lleve libros, notas, documentos o cualquier otro material a la sala de examen.
        • - -
        • No salga de la sala de examen antes del tiempo asignado sin el permiso del vigilante.
        • -
        -

        TNPSC Grupo 2 Hall Ticket Preguntas frecuentes

        -

        ¿Cómo recuperar el ID de la aplicación si se olvida?

        -

        Si ha olvidado su ID de aplicación, puede recuperarlo siguiendo estos pasos:

        -
          -
        1. Visite el sitio web oficial de TNPSC www.tnpsc.gov.in.
        2. -
        3. Haga clic en el enlace "Olvidé el ID de inicio de sesión" en la página de inicio.
        4. -
        5. Ingrese su ID de correo electrónico registrado y fecha de nacimiento y haga clic en "Enviar".
        6. -
        7. Su ID de aplicación será enviado a su ID de correo electrónico.
        8. -
        -

        ¿Cómo rectificar cualquier error en el ticket del hall?

        -

        Si encuentra algún error o discrepancia en su boleto de pasillo, como errores de ortografía, fotografía incorrecta, detalles incorrectos, etc., debe ponerse en contacto inmediatamente con el TNPSC y hacer que se rectifique. Puede ponerse en contacto con el TNPSC por teléfono, correo electrónico o en persona en su oficina. Los datos de contacto del TNPSC son los siguientes:

        -

        Comisión de Administración Pública de Tamil Nadu
        -Camino del puente de Frazer
        -V.O.C.Nagar, Park Town, Chennai-600003, Tamil Nadu, INDIA
        -Teléfono: +91-44-25300300 (12 líneas)
        -Fax: +91-44-25300598
        -Correo electrónico: coetnpsc.tn@nic.in, contacttnpsc@gmail.com
        -Sitio web: www.tnpsc.gov.in

        -

        ¿Cómo contactar al TNPSC en caso de cualquier problema o consulta?

        -

        Si tiene algún problema o consulta con respecto al examen TNPSC Group 2 2022 o el ticket del hall, puede ponerse en contacto con el TNPSC a través del teléfono, correo electrónico o en persona en su oficina. Los datos de contacto del TNPSC son los siguientes:

        -

        Comisión de Administración Pública de Tamil Nadu
        -Camino del puente de Frazer
        -V.O.C.Nagar, Park Town, Chennai-600003, Tamil Nadu, INDIA
        -Teléfono: +91-44-25300300 (12 líneas)
        -Fax: +91-44-25300598
        -Correo electrónico: coetnpsc.tn@nic.in, contacttnpsc@gmail.com
        -Sitio web: www.tnpsc.gov.in

        -

        Conclusión

        - -

        El examen TNPSC Group 2 2022 es una oportunidad de oro para unirse a los prestigiosos servicios gubernamentales en Tamil Nadu. Para resolver este examen, necesitas trabajar duro e inteligente. Necesitas revisar tus conceptos, practicar pruebas simuladas, mejorar tu velocidad y precisión, y administrar bien tu tiempo. También es necesario mantener la calma y la confianza en el día del examen y evitar cualquier estrés o pánico.

        -

        Le deseamos todo lo mejor para su examen y esperamos que logre su sueño de convertirse en un oficial de TNPSC Group 2. Recuerda que nada es imposible si tienes fe en ti mismo y en tus habilidades. Mantén el ánimo alto y no te rindas. ¡Puedes hacerlo!

        -

        TNPSC Grupo 2 Hall Ticket Preguntas frecuentes

        -

        Q1. ¿Cuándo estará disponible la entrada para el examen principal del Grupo 2 del TNPSC 2022?

        -

        A1. El ticket para el examen principal del TNPSC Grupo 2 2022 estará disponible al menos 10 días antes de la fecha del examen. Los candidatos pueden descargarlo desde el sitio web oficial de TNPSC www.tnpsc.gov.in introduciendo su ID de solicitud y fecha de nacimiento.

        -

        Q2. ¿Qué pasa si me olvido de llevar mi boleto de pasillo o prueba de identidad con foto al centro de examen?

        -

        A2. Si se olvida de llevar su boleto de pasillo o prueba de identidad con foto al centro de examen, no se le permitirá tomar el examen. El boleto de entrada y la prueba de identidad con foto son documentos obligatorios que verifican su identidad y elegibilidad para el examen. Por lo tanto, debes asegurarte de llevarlos contigo el día del examen.

        -

        Q3. ¿Puedo cambiar mi centro de examen después de descargar el ticket de la sala?

        -

        A3. No, no puede cambiar su centro de examen después de descargar el ticket del hall. El centro de examen una vez asignado por el TNPSC es definitivo y no se puede cambiar bajo ninguna circunstancia. Usted debe elegir cuidadosamente su centro de examen preferido mientras llena el formulario de solicitud y descargar su boleto de pasillo en consecuencia.

        -

        Q4. ¿Cómo puedo prepararme para el examen TNPSC Group 2 2022?

        - -
          -
        1. Ir a través de la notificación oficial y entender los criterios de elegibilidad, proceso de solicitud, proceso de selección, política de reserva, etc.
        2. -
        3. Compruebe el patrón de examen y el plan de estudios y planifique su horario de estudio en consecuencia.
        4. -
        5. Consulte los mejores libros y materiales de estudio para cada tema y tema.
        6. -
        7. Revisa tus conceptos y fórmulas regularmente y toma notas de puntos importantes.
        8. -
        9. Práctica de documentos del año anterior y pruebas simuladas para mejorar su velocidad y precisión.
        10. -
        11. Analiza tu desempeño e identifica tus fortalezas y debilidades.
        12. -
        13. Trabaja en tus áreas débiles y despeja tus dudas con expertos o mentores.
        14. -
        15. Manténgase actualizado con los asuntos actuales y el conocimiento general leyendo periódicos, revistas, etc.
        16. -
        17. Mejore sus habilidades lingüísticas leyendo, escribiendo, hablando y escuchando inglés o tamil.
        18. -
        19. Cuida tu salud y bienestar comiendo bien, durmiendo bien, haciendo ejercicio y relajándote bien.
        20. -
        -

        Q5. ¿Cuáles son las perspectivas de carrera de los servicios de TNPSC Group 2?

        -

        A5. Las perspectivas de carrera de los servicios de TNPSC Group 2 son muy brillantes y gratificantes. Los candidatos seleccionados recibirán un buen salario, seguridad laboral, crecimiento profesional y otros beneficios según las normas del gobierno estatal. También tendrán la oportunidad de servir al público y contribuir al desarrollo de Tamil Nadu. Algunos de los mensajes en TNPSC Group 2 servicios son los siguientes:

        -
          -
        • Oficial Adjunto de Impuestos Comerciales
        • -
        • Oficial de sección asistente
        • -
        • Oficial de Auditoría
        • Inspector Superior de Sociedades Cooperativas -
        • Inspector Adjunto de Trabajo
        • -
        • Sub Registrador
        • -
        • Comisionado Municipal
        • -
        • Asistente de ingresos
        • -
        • Inspector de telar manual
        • -
        • Asistente de Jailor
        • -
        • Asistente especial
        • -
        • Asistente de auditoría
        • -
        • Supervisor de cooperativas industriales
        • -
        • Empleado principal
        • -
        • Oficial de Empleo Junior
        • - -
        • Oficial Ejecutivo, Grado II
        • -
        • Oficial de Ingresos, etc.
        • -
        -

        Espero que haya encontrado este artículo útil e informativo. Si tiene algún comentario o sugerencia, no dude en compartirlos conmigo. Me encantaría saber de ti y mejorar mis habilidades de escritura. ¡Gracias por leer y tener un gran día!

        64aa2da5cf
        -
        -
        \ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/unicode_utils.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/unicode_utils.py deleted file mode 100644 index e84e65e3e14152a2ba6e6e05d914f0e1bbef187b..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/unicode_utils.py +++ /dev/null @@ -1,42 +0,0 @@ -import unicodedata -import sys - - -# HFS Plus uses decomposed UTF-8 -def decompose(path): - if isinstance(path, str): - return unicodedata.normalize('NFD', path) - try: - path = path.decode('utf-8') - path = unicodedata.normalize('NFD', path) - path = path.encode('utf-8') - except UnicodeError: - pass # Not UTF-8 - return path - - -def filesys_decode(path): - """ - Ensure that the given path is decoded, - NONE when no expected encoding works - """ - - if isinstance(path, str): - return path - - fs_enc = sys.getfilesystemencoding() or 'utf-8' - candidates = fs_enc, 'utf-8' - - for enc in candidates: - try: - return path.decode(enc) - except UnicodeDecodeError: - continue - - -def try_encode(string, enc): - "turn unicode encoding into a functional routine" - try: - return string.encode(enc) - except UnicodeEncodeError: - return None diff --git a/spaces/Boadiwaa/Recipes/openai/api_resources/abstract/__init__.py b/spaces/Boadiwaa/Recipes/openai/api_resources/abstract/__init__.py deleted file mode 100644 index 32830e273c9ffbda9747bf25e3ac2a8a572c3568..0000000000000000000000000000000000000000 --- a/spaces/Boadiwaa/Recipes/openai/api_resources/abstract/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# flake8: noqa - -from openai.api_resources.abstract.api_resource import APIResource -from openai.api_resources.abstract.createable_api_resource import CreateableAPIResource -from openai.api_resources.abstract.deletable_api_resource import DeletableAPIResource -from openai.api_resources.abstract.listable_api_resource import ListableAPIResource -from openai.api_resources.abstract.nested_resource_class_methods import ( - nested_resource_class_methods, -) -from openai.api_resources.abstract.updateable_api_resource import UpdateableAPIResource diff --git a/spaces/Boilin/URetinex-Net/network/Math_Module.py b/spaces/Boilin/URetinex-Net/network/Math_Module.py deleted file mode 100644 index 752c07fe48696d6a9d08e7e7ba8b55cf3400ca95..0000000000000000000000000000000000000000 --- a/spaces/Boilin/URetinex-Net/network/Math_Module.py +++ /dev/null @@ -1,38 +0,0 @@ -import torch -import torch.nn as nn -from torchvision.transforms import Grayscale - - -class P(nn.Module): - """ - to solve min(P) = ||I-PQ||^2 + γ||P-R||^2 - this is a least square problem - how to solve? - P* = (gamma*R + I*Q) / (Q*Q + gamma) - """ - def __init__(self): - super().__init__() - - def forward(self, I, Q, R, gamma): - return ((I * Q + gamma * R) / (gamma + Q * Q)) - -class Q(nn.Module): - """ - to solve min(Q) = ||I-PQ||^2 + λ||Q-L||^2 - Q* = (lamda*L + I*P) / (P*P + lamda) - """ - def __init__(self): - super().__init__() - - def forward(self, I, P, L, lamda): - - IR = I[:, 0:1, :, :] - IG = I[:, 1:2, :, :] - IB = I[:, 2:3, :, :] - - PR = P[:, 0:1, :, :] - PG = P[:, 1:2, :, :] - PB = P[:, 2:3, :, :] - - return (IR*PR + IG*PG + IB*PB + lamda*L) / ((PR*PR + PG*PG + PB*PB) + lamda) - \ No newline at end of file diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/checkpoint/catalog.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/checkpoint/catalog.py deleted file mode 100644 index 56a656e804c653e89694e73a4943b6a14bda1daa..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/checkpoint/catalog.py +++ /dev/null @@ -1,132 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import logging -from fvcore.common.file_io import PathHandler, PathManager - - -class ModelCatalog(object): - """ - Store mappings from names to third-party models. - """ - - S3_C2_DETECTRON_PREFIX = "https://dl.fbaipublicfiles.com/detectron" - - # MSRA models have STRIDE_IN_1X1=True. False otherwise. - # NOTE: all BN models here have fused BN into an affine layer. - # As a result, you should only load them to a model with "FrozenBN". - # Loading them to a model with regular BN or SyncBN is wrong. - # Even when loaded to FrozenBN, it is still different from affine by an epsilon, - # which should be negligible for training. - # NOTE: all models here uses PIXEL_STD=[1,1,1] - C2_IMAGENET_MODELS = { - "MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl", - "MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl", - "FAIR/R-50-GN": "ImageNetPretrained/47261647/R-50-GN.pkl", - "FAIR/R-101-GN": "ImageNetPretrained/47592356/R-101-GN.pkl", - "FAIR/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl", - "FAIR/X-101-64x4d": "ImageNetPretrained/FBResNeXt/X-101-64x4d.pkl", - "FAIR/X-152-32x8d-IN5k": "ImageNetPretrained/25093814/X-152-32x8d-IN5k.pkl", - } - - C2_DETECTRON_PATH_FORMAT = ( - "{prefix}/{url}/output/train/{dataset}/{type}/model_final.pkl" - ) # noqa B950 - - C2_DATASET_COCO = "coco_2014_train%3Acoco_2014_valminusminival" - C2_DATASET_COCO_KEYPOINTS = "keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival" - - # format: {model_name} -> part of the url - C2_DETECTRON_MODELS = { - "35857197/e2e_faster_rcnn_R-50-C4_1x": "35857197/12_2017_baselines/e2e_faster_rcnn_R-50-C4_1x.yaml.01_33_49.iAX0mXvW", # noqa B950 - "35857345/e2e_faster_rcnn_R-50-FPN_1x": "35857345/12_2017_baselines/e2e_faster_rcnn_R-50-FPN_1x.yaml.01_36_30.cUF7QR7I", # noqa B950 - "35857890/e2e_faster_rcnn_R-101-FPN_1x": "35857890/12_2017_baselines/e2e_faster_rcnn_R-101-FPN_1x.yaml.01_38_50.sNxI7sX7", # noqa B950 - "36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x": "36761737/12_2017_baselines/e2e_faster_rcnn_X-101-32x8d-FPN_1x.yaml.06_31_39.5MIHi1fZ", # noqa B950 - "35858791/e2e_mask_rcnn_R-50-C4_1x": "35858791/12_2017_baselines/e2e_mask_rcnn_R-50-C4_1x.yaml.01_45_57.ZgkA7hPB", # noqa B950 - "35858933/e2e_mask_rcnn_R-50-FPN_1x": "35858933/12_2017_baselines/e2e_mask_rcnn_R-50-FPN_1x.yaml.01_48_14.DzEQe4wC", # noqa B950 - "35861795/e2e_mask_rcnn_R-101-FPN_1x": "35861795/12_2017_baselines/e2e_mask_rcnn_R-101-FPN_1x.yaml.02_31_37.KqyEK4tT", # noqa B950 - "36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x": "36761843/12_2017_baselines/e2e_mask_rcnn_X-101-32x8d-FPN_1x.yaml.06_35_59.RZotkLKI", # noqa B950 - "48616381/e2e_mask_rcnn_R-50-FPN_2x_gn": "GN/48616381/04_2018_gn_baselines/e2e_mask_rcnn_R-50-FPN_2x_gn_0416.13_23_38.bTlTI97Q", # noqa B950 - "37697547/e2e_keypoint_rcnn_R-50-FPN_1x": "37697547/12_2017_baselines/e2e_keypoint_rcnn_R-50-FPN_1x.yaml.08_42_54.kdzV35ao", # noqa B950 - "35998355/rpn_R-50-C4_1x": "35998355/12_2017_baselines/rpn_R-50-C4_1x.yaml.08_00_43.njH5oD9L", # noqa B950 - "35998814/rpn_R-50-FPN_1x": "35998814/12_2017_baselines/rpn_R-50-FPN_1x.yaml.08_06_03.Axg0r179", # noqa B950 - "36225147/fast_R-50-FPN_1x": "36225147/12_2017_baselines/fast_rcnn_R-50-FPN_1x.yaml.08_39_09.L3obSdQ2", # noqa B950 - } - - @staticmethod - def get(name): - if name.startswith("Caffe2Detectron/COCO"): - return ModelCatalog._get_c2_detectron_baseline(name) - if name.startswith("ImageNetPretrained/"): - return ModelCatalog._get_c2_imagenet_pretrained(name) - raise RuntimeError("model not present in the catalog: {}".format(name)) - - @staticmethod - def _get_c2_imagenet_pretrained(name): - prefix = ModelCatalog.S3_C2_DETECTRON_PREFIX - name = name[len("ImageNetPretrained/") :] - name = ModelCatalog.C2_IMAGENET_MODELS[name] - url = "/".join([prefix, name]) - return url - - @staticmethod - def _get_c2_detectron_baseline(name): - name = name[len("Caffe2Detectron/COCO/") :] - url = ModelCatalog.C2_DETECTRON_MODELS[name] - if "keypoint_rcnn" in name: - dataset = ModelCatalog.C2_DATASET_COCO_KEYPOINTS - else: - dataset = ModelCatalog.C2_DATASET_COCO - - if "35998355/rpn_R-50-C4_1x" in name: - # this one model is somehow different from others .. - type = "rpn" - else: - type = "generalized_rcnn" - - # Detectron C2 models are stored in the structure defined in `C2_DETECTRON_PATH_FORMAT`. - url = ModelCatalog.C2_DETECTRON_PATH_FORMAT.format( - prefix=ModelCatalog.S3_C2_DETECTRON_PREFIX, url=url, type=type, dataset=dataset - ) - return url - - -class ModelCatalogHandler(PathHandler): - """ - Resolve URL like catalog://. - """ - - PREFIX = "catalog://" - - def _get_supported_prefixes(self): - return [self.PREFIX] - - def _get_local_path(self, path): - logger = logging.getLogger(__name__) - catalog_path = ModelCatalog.get(path[len(self.PREFIX) :]) - logger.info("Catalog entry {} points to {}".format(path, catalog_path)) - return PathManager.get_local_path(catalog_path) - - def _open(self, path, mode="r", **kwargs): - return PathManager.open(self._get_local_path(path), mode, **kwargs) - - -class Detectron2Handler(PathHandler): - """ - Resolve anything that's in Detectron2 model zoo. - """ - - PREFIX = "detectron2://" - S3_DETECTRON2_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/" - - def _get_supported_prefixes(self): - return [self.PREFIX] - - def _get_local_path(self, path): - name = path[len(self.PREFIX) :] - return PathManager.get_local_path(self.S3_DETECTRON2_PREFIX + name) - - def _open(self, path, mode="r", **kwargs): - return PathManager.open(self._get_local_path(path), mode, **kwargs) - - -PathManager.register_handler(ModelCatalogHandler()) -PathManager.register_handler(Detectron2Handler()) diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/export/caffe2_modeling.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/export/caffe2_modeling.py deleted file mode 100644 index 982fec5e0c45868f8401f7da2f907287f7ddbbac..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/export/caffe2_modeling.py +++ /dev/null @@ -1,492 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved - -import functools -import io -import struct -import types -import torch - -from detectron2.modeling import meta_arch -from detectron2.modeling.box_regression import Box2BoxTransform -from detectron2.modeling.meta_arch.panoptic_fpn import combine_semantic_and_instance_outputs -from detectron2.modeling.postprocessing import detector_postprocess, sem_seg_postprocess -from detectron2.modeling.roi_heads import keypoint_head -from detectron2.structures import Boxes, ImageList, Instances, RotatedBoxes - -from .c10 import Caffe2Compatible -from .patcher import ROIHeadsPatcher, patch_generalized_rcnn -from .shared import ( - alias, - check_set_pb_arg, - get_pb_arg_floats, - get_pb_arg_valf, - get_pb_arg_vali, - get_pb_arg_vals, - mock_torch_nn_functional_interpolate, -) - - -def assemble_rcnn_outputs_by_name(image_sizes, tensor_outputs, force_mask_on=False): - """ - A function to assemble caffe2 model's outputs (i.e. Dict[str, Tensor]) - to detectron2's format (i.e. list of Instances instance). - This only works when the model follows the Caffe2 detectron's naming convention. - - Args: - image_sizes (List[List[int, int]]): [H, W] of every image. - tensor_outputs (Dict[str, Tensor]): external_output to its tensor. - - force_mask_on (Bool): if true, the it make sure there'll be pred_masks even - if the mask is not found from tensor_outputs (usually due to model crash) - """ - - results = [Instances(image_size) for image_size in image_sizes] - - batch_splits = tensor_outputs.get("batch_splits", None) - if batch_splits: - raise NotImplementedError() - assert len(image_sizes) == 1 - result = results[0] - - bbox_nms = tensor_outputs["bbox_nms"] - score_nms = tensor_outputs["score_nms"] - class_nms = tensor_outputs["class_nms"] - # Detection will always success because Conv support 0-batch - assert bbox_nms is not None - assert score_nms is not None - assert class_nms is not None - if bbox_nms.shape[1] == 5: - result.pred_boxes = RotatedBoxes(bbox_nms) - else: - result.pred_boxes = Boxes(bbox_nms) - result.scores = score_nms - result.pred_classes = class_nms.to(torch.int64) - - mask_fcn_probs = tensor_outputs.get("mask_fcn_probs", None) - if mask_fcn_probs is not None: - # finish the mask pred - mask_probs_pred = mask_fcn_probs - num_masks = mask_probs_pred.shape[0] - class_pred = result.pred_classes - indices = torch.arange(num_masks, device=class_pred.device) - mask_probs_pred = mask_probs_pred[indices, class_pred][:, None] - result.pred_masks = mask_probs_pred - elif force_mask_on: - # NOTE: there's no way to know the height/width of mask here, it won't be - # used anyway when batch size is 0, so just set them to 0. - result.pred_masks = torch.zeros([0, 1, 0, 0], dtype=torch.uint8) - - keypoints_out = tensor_outputs.get("keypoints_out", None) - kps_score = tensor_outputs.get("kps_score", None) - if keypoints_out is not None: - # keypoints_out: [N, 4, #kypoints], where 4 is in order of (x, y, score, prob) - keypoints_tensor = keypoints_out - # NOTE: it's possible that prob is not calculated if "should_output_softmax" - # is set to False in HeatmapMaxKeypoint, so just using raw score, seems - # it doesn't affect mAP. TODO: check more carefully. - keypoint_xyp = keypoints_tensor.transpose(1, 2)[:, :, [0, 1, 2]] - result.pred_keypoints = keypoint_xyp - elif kps_score is not None: - # keypoint heatmap to sparse data structure - pred_keypoint_logits = kps_score - keypoint_head.keypoint_rcnn_inference(pred_keypoint_logits, [result]) - - return results - - -def _cast_to_f32(f64): - return struct.unpack("f", struct.pack("f", f64))[0] - - -def set_caffe2_compatible_tensor_mode(model, enable=True): - def _fn(m): - if isinstance(m, Caffe2Compatible): - m.tensor_mode = enable - - model.apply(_fn) - - -def convert_batched_inputs_to_c2_format(batched_inputs, size_divisibility, device): - """ - See get_caffe2_inputs() below. - """ - assert all(isinstance(x, dict) for x in batched_inputs) - assert all(x["image"].dim() == 3 for x in batched_inputs) - - images = [x["image"] for x in batched_inputs] - images = ImageList.from_tensors(images, size_divisibility) - - im_info = [] - for input_per_image, image_size in zip(batched_inputs, images.image_sizes): - target_height = input_per_image.get("height", image_size[0]) - target_width = input_per_image.get("width", image_size[1]) # noqa - # NOTE: The scale inside im_info is kept as convention and for providing - # post-processing information if further processing is needed. For - # current Caffe2 model definitions that don't include post-processing inside - # the model, this number is not used. - # NOTE: There can be a slight difference between width and height - # scales, using a single number can results in numerical difference - # compared with D2's post-processing. - scale = target_height / image_size[0] - im_info.append([image_size[0], image_size[1], scale]) - im_info = torch.Tensor(im_info) - - return images.tensor.to(device), im_info.to(device) - - -class Caffe2MetaArch(Caffe2Compatible, torch.nn.Module): - """ - Base class for caffe2-compatible implementation of a meta architecture. - The forward is traceable and its traced graph can be converted to caffe2 - graph through ONNX. - """ - - def __init__(self, cfg, torch_model): - """ - Args: - cfg (CfgNode): - torch_model (nn.Module): the detectron2 model (meta_arch) to be - converted. - """ - super().__init__() - self._wrapped_model = torch_model - self.eval() - set_caffe2_compatible_tensor_mode(self, True) - - def get_caffe2_inputs(self, batched_inputs): - """ - Convert pytorch-style structured inputs to caffe2-style inputs that - are tuples of tensors. - - Args: - batched_inputs (list[dict]): inputs to a detectron2 model - in its standard format. Each dict has "image" (CHW tensor), and optionally - "height" and "width". - - Returns: - tuple[Tensor]: - tuple of tensors that will be the inputs to the - :meth:`forward` method. For existing models, the first - is an NCHW tensor (padded and batched); the second is - a im_info Nx3 tensor, where the rows are - (height, width, unused legacy parameter) - """ - return convert_batched_inputs_to_c2_format( - batched_inputs, - self._wrapped_model.backbone.size_divisibility, - self._wrapped_model.device, - ) - - def encode_additional_info(self, predict_net, init_net): - """ - Save extra metadata that will be used by inference in the output protobuf. - """ - pass - - def forward(self, inputs): - """ - Run the forward in caffe2-style. It has to use caffe2-compatible ops - and the method will be used for tracing. - - Args: - inputs (tuple[Tensor]): inputs defined by :meth:`get_caffe2_input`. - They will be the inputs of the converted caffe2 graph. - - Returns: - tuple[Tensor]: output tensors. They will be the outputs of the - converted caffe2 graph. - """ - raise NotImplementedError - - def _caffe2_preprocess_image(self, inputs): - """ - Caffe2 implementation of preprocess_image, which is called inside each MetaArch's forward. - It normalizes the input images, and the final caffe2 graph assumes the - inputs have been batched already. - """ - data, im_info = inputs - data = alias(data, "data") - im_info = alias(im_info, "im_info") - normalized_data = self._wrapped_model.normalizer(data) - normalized_data = alias(normalized_data, "normalized_data") - - # Pack (data, im_info) into ImageList which is recognized by self.inference. - images = ImageList(tensor=normalized_data, image_sizes=im_info) - return images - - @staticmethod - def get_outputs_converter(predict_net, init_net): - """ - Creates a function that converts outputs of the caffe2 model to - detectron2's standard format. - The function uses information in `predict_net` and `init_net` that are - available at inferene time. Therefore the function logic can be used in inference. - - The returned function has the following signature: - - def convert(batched_inputs, c2_inputs, c2_results) -> detectron2_outputs - - Where - - * batched_inputs (list[dict]): the original input format of the meta arch - * c2_inputs (dict[str, Tensor]): the caffe2 inputs. - * c2_results (dict[str, Tensor]): the caffe2 output format, - corresponding to the outputs of the :meth:`forward` function. - * detectron2_outputs: the original output format of the meta arch. - - This function can be used to compare the outputs of the original meta arch and - the converted caffe2 graph. - - Returns: - callable: a callable of the above signature. - """ - raise NotImplementedError - - -class Caffe2GeneralizedRCNN(Caffe2MetaArch): - def __init__(self, cfg, torch_model): - assert isinstance(torch_model, meta_arch.GeneralizedRCNN) - torch_model = patch_generalized_rcnn(torch_model) - super().__init__(cfg, torch_model) - - self.roi_heads_patcher = ROIHeadsPatcher(cfg, self._wrapped_model.roi_heads) - - def encode_additional_info(self, predict_net, init_net): - size_divisibility = self._wrapped_model.backbone.size_divisibility - check_set_pb_arg(predict_net, "size_divisibility", "i", size_divisibility) - check_set_pb_arg( - predict_net, "device", "s", str.encode(str(self._wrapped_model.device), "ascii") - ) - check_set_pb_arg(predict_net, "meta_architecture", "s", b"GeneralizedRCNN") - - @mock_torch_nn_functional_interpolate() - def forward(self, inputs): - if not self.tensor_mode: - return self._wrapped_model.inference(inputs) - images = self._caffe2_preprocess_image(inputs) - features = self._wrapped_model.backbone(images.tensor) - proposals, _ = self._wrapped_model.proposal_generator(images, features) - with self.roi_heads_patcher.mock_roi_heads(): - detector_results, _ = self._wrapped_model.roi_heads(images, features, proposals) - return tuple(detector_results[0].flatten()) - - @staticmethod - def get_outputs_converter(predict_net, init_net): - def f(batched_inputs, c2_inputs, c2_results): - image_sizes = [[int(im[0]), int(im[1])] for im in c2_inputs["im_info"]] - results = assemble_rcnn_outputs_by_name(image_sizes, c2_results) - return meta_arch.GeneralizedRCNN._postprocess(results, batched_inputs, image_sizes) - - return f - - -class Caffe2PanopticFPN(Caffe2MetaArch): - def __init__(self, cfg, torch_model): - assert isinstance(torch_model, meta_arch.PanopticFPN) - torch_model = patch_generalized_rcnn(torch_model) - super().__init__(cfg, torch_model) - - self.roi_heads_patcher = ROIHeadsPatcher(cfg, self._wrapped_model.roi_heads) - - @mock_torch_nn_functional_interpolate() - def forward(self, inputs): - assert self.tensor_mode - images = self._caffe2_preprocess_image(inputs) - features = self._wrapped_model.backbone(images.tensor) - - sem_seg_results, _ = self._wrapped_model.sem_seg_head(features) - sem_seg_results = alias(sem_seg_results, "sem_seg") - - proposals, _ = self._wrapped_model.proposal_generator(images, features) - - with self.roi_heads_patcher.mock_roi_heads(self.tensor_mode): - detector_results, _ = self._wrapped_model.roi_heads(images, features, proposals) - - return tuple(detector_results[0].flatten()) + (sem_seg_results,) - - def encode_additional_info(self, predict_net, init_net): - size_divisibility = self._wrapped_model.backbone.size_divisibility - check_set_pb_arg(predict_net, "size_divisibility", "i", size_divisibility) - check_set_pb_arg( - predict_net, "device", "s", str.encode(str(self._wrapped_model.device), "ascii") - ) - check_set_pb_arg(predict_net, "meta_architecture", "s", b"PanopticFPN") - - # Inference parameters: - check_set_pb_arg(predict_net, "combine_on", "i", self._wrapped_model.combine_on) - check_set_pb_arg( - predict_net, - "combine_overlap_threshold", - "f", - _cast_to_f32(self._wrapped_model.combine_overlap_threshold), - ) - check_set_pb_arg( - predict_net, - "combine_stuff_area_limit", - "i", - self._wrapped_model.combine_stuff_area_limit, - ) - check_set_pb_arg( - predict_net, - "combine_instances_confidence_threshold", - "f", - _cast_to_f32(self._wrapped_model.combine_instances_confidence_threshold), - ) - - @staticmethod - def get_outputs_converter(predict_net, init_net): - combine_on = get_pb_arg_vali(predict_net, "combine_on", None) - combine_overlap_threshold = get_pb_arg_valf(predict_net, "combine_overlap_threshold", None) - combine_stuff_area_limit = get_pb_arg_vali(predict_net, "combine_stuff_area_limit", None) - combine_instances_confidence_threshold = get_pb_arg_valf( - predict_net, "combine_instances_confidence_threshold", None - ) - - def f(batched_inputs, c2_inputs, c2_results): - image_sizes = [[int(im[0]), int(im[1])] for im in c2_inputs["im_info"]] - detector_results = assemble_rcnn_outputs_by_name( - image_sizes, c2_results, force_mask_on=True - ) - sem_seg_results = c2_results["sem_seg"] - - # copied from meta_arch/panoptic_fpn.py ... - processed_results = [] - for sem_seg_result, detector_result, input_per_image, image_size in zip( - sem_seg_results, detector_results, batched_inputs, image_sizes - ): - height = input_per_image.get("height", image_size[0]) - width = input_per_image.get("width", image_size[1]) - sem_seg_r = sem_seg_postprocess(sem_seg_result, image_size, height, width) - detector_r = detector_postprocess(detector_result, height, width) - - processed_results.append({"sem_seg": sem_seg_r, "instances": detector_r}) - - if combine_on: - panoptic_r = combine_semantic_and_instance_outputs( - detector_r, - sem_seg_r.argmax(dim=0), - combine_overlap_threshold, - combine_stuff_area_limit, - combine_instances_confidence_threshold, - ) - processed_results[-1]["panoptic_seg"] = panoptic_r - return processed_results - - return f - - -class Caffe2RetinaNet(Caffe2MetaArch): - def __init__(self, cfg, torch_model): - assert isinstance(torch_model, meta_arch.RetinaNet) - super().__init__(cfg, torch_model) - - @mock_torch_nn_functional_interpolate() - def forward(self, inputs): - assert self.tensor_mode - images = self._caffe2_preprocess_image(inputs) - - # explicitly return the images sizes to avoid removing "im_info" by ONNX - # since it's not used in the forward path - return_tensors = [images.image_sizes] - - features = self._wrapped_model.backbone(images.tensor) - features = [features[f] for f in self._wrapped_model.in_features] - for i, feature_i in enumerate(features): - features[i] = alias(feature_i, "feature_{}".format(i), is_backward=True) - return_tensors.append(features[i]) - - box_cls, box_delta = self._wrapped_model.head(features) - for i, (box_cls_i, box_delta_i) in enumerate(zip(box_cls, box_delta)): - return_tensors.append(alias(box_cls_i, "box_cls_{}".format(i))) - return_tensors.append(alias(box_delta_i, "box_delta_{}".format(i))) - - return tuple(return_tensors) - - def encode_additional_info(self, predict_net, init_net): - size_divisibility = self._wrapped_model.backbone.size_divisibility - check_set_pb_arg(predict_net, "size_divisibility", "i", size_divisibility) - check_set_pb_arg( - predict_net, "device", "s", str.encode(str(self._wrapped_model.device), "ascii") - ) - check_set_pb_arg(predict_net, "meta_architecture", "s", b"RetinaNet") - - # Inference parameters: - check_set_pb_arg( - predict_net, "score_threshold", "f", _cast_to_f32(self._wrapped_model.score_threshold) - ) - check_set_pb_arg(predict_net, "topk_candidates", "i", self._wrapped_model.topk_candidates) - check_set_pb_arg( - predict_net, "nms_threshold", "f", _cast_to_f32(self._wrapped_model.nms_threshold) - ) - check_set_pb_arg( - predict_net, - "max_detections_per_image", - "i", - self._wrapped_model.max_detections_per_image, - ) - - check_set_pb_arg( - predict_net, - "bbox_reg_weights", - "floats", - [_cast_to_f32(w) for w in self._wrapped_model.box2box_transform.weights], - ) - self._encode_anchor_generator_cfg(predict_net) - - def _encode_anchor_generator_cfg(self, predict_net): - # serialize anchor_generator for future use - serialized_anchor_generator = io.BytesIO() - torch.save(self._wrapped_model.anchor_generator, serialized_anchor_generator) - # Ideally we can put anchor generating inside the model, then we don't - # need to store this information. - bytes = serialized_anchor_generator.getvalue() - check_set_pb_arg(predict_net, "serialized_anchor_generator", "s", bytes) - - @staticmethod - def get_outputs_converter(predict_net, init_net): - self = types.SimpleNamespace() - serialized_anchor_generator = io.BytesIO( - get_pb_arg_vals(predict_net, "serialized_anchor_generator", None) - ) - self.anchor_generator = torch.load(serialized_anchor_generator) - bbox_reg_weights = get_pb_arg_floats(predict_net, "bbox_reg_weights", None) - self.box2box_transform = Box2BoxTransform(weights=tuple(bbox_reg_weights)) - self.score_threshold = get_pb_arg_valf(predict_net, "score_threshold", None) - self.topk_candidates = get_pb_arg_vali(predict_net, "topk_candidates", None) - self.nms_threshold = get_pb_arg_valf(predict_net, "nms_threshold", None) - self.max_detections_per_image = get_pb_arg_vali( - predict_net, "max_detections_per_image", None - ) - - # hack to reuse inference code from RetinaNet - self.inference = functools.partial(meta_arch.RetinaNet.inference, self) - self.inference_single_image = functools.partial( - meta_arch.RetinaNet.inference_single_image, self - ) - - def f(batched_inputs, c2_inputs, c2_results): - image_sizes = [[int(im[0]), int(im[1])] for im in c2_inputs["im_info"]] - - num_features = len([x for x in c2_results.keys() if x.startswith("box_cls_")]) - box_cls = [c2_results["box_cls_{}".format(i)] for i in range(num_features)] - box_delta = [c2_results["box_delta_{}".format(i)] for i in range(num_features)] - - # For each feature level, feature should have the same batch size and - # spatial dimension as the box_cls and box_delta. - dummy_features = [box_delta[i].clone()[:, 0:0, :, :] for i in range(num_features)] - anchors = self.anchor_generator(dummy_features) - - # self.num_classess can be inferred - self.num_classes = box_cls[0].shape[1] // (box_delta[0].shape[1] // 4) - - results = self.inference(box_cls, box_delta, anchors, image_sizes) - return meta_arch.GeneralizedRCNN._postprocess(results, batched_inputs, image_sizes) - - return f - - -META_ARCH_CAFFE2_EXPORT_TYPE_MAP = { - "GeneralizedRCNN": Caffe2GeneralizedRCNN, - "PanopticFPN": Caffe2PanopticFPN, - "RetinaNet": Caffe2RetinaNet, -} diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/__init__.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/__init__.py deleted file mode 100644 index 19b74696b2f0590ccbdf1920bf63b353b01a9dd2..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -from .batch_norm import FrozenBatchNorm2d, get_norm, NaiveSyncBatchNorm -from .deform_conv import DeformConv, ModulatedDeformConv -from .mask_ops import paste_masks_in_image -from .nms import batched_nms, batched_nms_rotated, nms, nms_rotated -from .roi_align import ROIAlign, roi_align -from .roi_align_rotated import ROIAlignRotated, roi_align_rotated -from .shape_spec import ShapeSpec -from .wrappers import BatchNorm2d, Conv2d, ConvTranspose2d, cat, interpolate, Linear - -__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/docker/README.md b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/docker/README.md deleted file mode 100644 index 530b1b3580e8e5c03ef365ba55284b43b2aee127..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/docker/README.md +++ /dev/null @@ -1,24 +0,0 @@ -## Run the container -Change to the *docker* directory of this repository: -``` -cd docker -USER_ID=$UID docker-compose run detectron2 -``` - -#### Using a persistent cache directory -Prevents models to be re-downloaded on every run, by storing them in a cache directory. - -`docker-compose run --volume=/path/to/cache:/tmp:rw detectron2` - -## Rebuild the container -Rebuild the container by `USER_ID=$UID docker-compose build detectron2`. -This is only necessary when `Dockerfile` has been changed. The initial build is done automatically. - -## Install new dependencies -Add the following to `Dockerfile` to make persistent changes. -``` -RUN sudo apt-get update && sudo apt-get install -y \ - nano vim emacs -RUN pip install --user pandas -``` -Or run them in the container to make temporary changes. diff --git a/spaces/CVPR/LIVE/filter.h b/spaces/CVPR/LIVE/filter.h deleted file mode 100644 index 2dd0b62acb83e94da89696e9a8024c4b919f6749..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/filter.h +++ /dev/null @@ -1,106 +0,0 @@ -#pragma once - -#include "diffvg.h" -#include "atomic.h" - -enum class FilterType { - Box, - Tent, - RadialParabolic, // 4/3(1 - (d/r)) - Hann // https://en.wikipedia.org/wiki/Window_function#Hann_and_Hamming_windows -}; - -struct Filter { - FilterType type; - float radius; -}; - -struct DFilter { - float radius; -}; - -DEVICE -inline -float compute_filter_weight(const Filter &filter, - float dx, - float dy) { - if (fabs(dx) > filter.radius || fabs(dy) > filter.radius) { - return 0; - } - if (filter.type == FilterType::Box) { - return 1.f / square(2 * filter.radius); - } else if (filter.type == FilterType::Tent) { - return (filter.radius - fabs(dx)) * (filter.radius - fabs(dy)) / - square(square(filter.radius)); - } else if (filter.type == FilterType::RadialParabolic) { - return (4.f / 3.f) * (1 - square(dx / filter.radius)) * - (4.f / 3.f) * (1 - square(dy / filter.radius)); - } else { - assert(filter.type == FilterType::Hann); - // normalize dx, dy to [0, 1] - auto ndx = (dx / (2*filter.radius)) + 0.5f; - auto ndy = (dy / (2*filter.radius)) + 0.5f; - // the normalization factor is R^2 - return 0.5f * (1.f - cos(float(2 * M_PI) * ndx)) * - 0.5f * (1.f - cos(float(2 * M_PI) * ndy)) / - square(filter.radius); - } -} - -DEVICE -inline -void d_compute_filter_weight(const Filter &filter, - float dx, - float dy, - float d_return, - DFilter *d_filter) { - if (filter.type == FilterType::Box) { - // return 1.f / square(2 * filter.radius); - atomic_add(d_filter->radius, - d_return * (-2) * 2 * filter.radius / cubic(2 * filter.radius)); - } else if (filter.type == FilterType::Tent) { - // return (filer.radius - fabs(dx)) * (filer.radius - fabs(dy)) / - // square(square(filter.radius)); - auto fx = filter.radius - fabs(dx); - auto fy = filter.radius - fabs(dy); - auto norm = 1 / square(filter.radius); - auto d_fx = d_return * fy * norm; - auto d_fy = d_return * fx * norm; - auto d_norm = d_return * fx * fy; - atomic_add(d_filter->radius, - d_fx + d_fy + (-4) * d_norm / pow(filter.radius, 5)); - } else if (filter.type == FilterType::RadialParabolic) { - // return (4.f / 3.f) * (1 - square(dx / filter.radius)) * - // (4.f / 3.f) * (1 - square(dy / filter.radius)); - // auto d_square_x = d_return * (-4.f / 3.f); - // auto d_square_y = d_return * (-4.f / 3.f); - auto r3 = filter.radius * filter.radius * filter.radius; - auto d_radius = -(2 * square(dx) + 2 * square(dy)) / r3; - atomic_add(d_filter->radius, d_radius); - } else { - assert(filter.type == FilterType::Hann); - // // normalize dx, dy to [0, 1] - // auto ndx = (dx / (2*filter.radius)) + 0.5f; - // auto ndy = (dy / (2*filter.radius)) + 0.5f; - // // the normalization factor is R^2 - // return 0.5f * (1.f - cos(float(2 * M_PI) * ndx)) * - // 0.5f * (1.f - cos(float(2 * M_PI) * ndy)) / - // square(filter.radius); - - // normalize dx, dy to [0, 1] - auto ndx = (dx / (2*filter.radius)) + 0.5f; - auto ndy = (dy / (2*filter.radius)) + 0.5f; - auto fx = 0.5f * (1.f - cos(float(2*M_PI) * ndx)); - auto fy = 0.5f * (1.f - cos(float(2*M_PI) * ndy)); - auto norm = 1 / square(filter.radius); - auto d_fx = d_return * fy * norm; - auto d_fy = d_return * fx * norm; - auto d_norm = d_return * fx * fy; - auto d_ndx = d_fx * 0.5f * sin(float(2*M_PI) * ndx) * float(2*M_PI); - auto d_ndy = d_fy * 0.5f * sin(float(2*M_PI) * ndy) * float(2*M_PI); - atomic_add(d_filter->radius, - d_ndx * (-2*dx / square(2*filter.radius)) + - d_ndy * (-2*dy / square(2*filter.radius)) + - (-2) * d_norm / cubic(filter.radius)); - } -} diff --git a/spaces/CVPR/LIVE/thrust/thrust/random/detail/random_core_access.h b/spaces/CVPR/LIVE/thrust/thrust/random/detail/random_core_access.h deleted file mode 100644 index f03060e0ab06806c3c42d4857bd8bb1acb3eff66..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/random/detail/random_core_access.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -namespace thrust -{ - -namespace random -{ - -namespace detail -{ - -struct random_core_access -{ - -template -static OStream &stream_out(OStream &os, const EngineOrDistribution &x) -{ - return x.stream_out(os); -} - -template -static IStream &stream_in(IStream &is, EngineOrDistribution &x) -{ - return x.stream_in(is); -} - -template -__host__ __device__ -static bool equal(const EngineOrDistribution &lhs, const EngineOrDistribution &rhs) -{ - return lhs.equal(rhs); -} - -}; // end random_core_access - -} // end detail - -} // end random - -} // end thrust - diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/fill.h b/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/fill.h deleted file mode 100644 index 078e1b3781fda6e5de9824e1f96d61a529c6f839..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/fill.h +++ /dev/null @@ -1,94 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ -#pragma once - -#if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC -#include -#include -#include - -namespace thrust -{ -namespace cuda_cub { - -namespace __fill { - - // fill functor - template - struct functor - { - Iterator it; - T value; - - THRUST_FUNCTION - functor(Iterator it, T value) - : it(it), value(value) {} - - template - THRUST_DEVICE_FUNCTION void operator()(Size idx) - { - it[idx] = value; - } - }; // struct functor - -} // namespace __fill - -template -OutputIterator __host__ __device__ -fill_n(execution_policy& policy, - OutputIterator first, - Size count, - const T& value) -{ - cuda_cub::parallel_for(policy, - __fill::functor( - first, - value), - count); - - cuda_cub::throw_on_error( - cuda_cub::synchronize(policy) - , "fill_n: failed to synchronize" - ); - - return first + count; -} // func fill_n - -template -void __host__ __device__ -fill(execution_policy& policy, - ForwardIterator first, - ForwardIterator last, - const T& value) -{ - cuda_cub::fill_n(policy, first, thrust::distance(first,last), value); -} // func filll - - -} // namespace cuda_cub -} // end namespace thrust -#endif diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/unique.h b/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/unique.h deleted file mode 100644 index c2aff4c6489ccf47e76288ffd7c5afe7c43b2dc0..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/unique.h +++ /dev/null @@ -1,801 +0,0 @@ -/****************************************************************************** - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the NVIDIA CORPORATION nor the - * names of its contributors may be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ -#pragma once - - -#if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace thrust -{ - -template -__host__ __device__ ForwardIterator -unique( - const thrust::detail::execution_policy_base &exec, - ForwardIterator first, - ForwardIterator last, - BinaryPredicate binary_pred); - -template -__host__ __device__ OutputIterator -unique_copy( - const thrust::detail::execution_policy_base &exec, - InputIterator first, - InputIterator last, - OutputIterator result, - BinaryPredicate binary_pred); - -namespace cuda_cub { - -// XXX it should be possible to unify unique & unique_by_key into a single -// agent with various specializations, similar to what is done -// with partition -namespace __unique { - - template - struct PtxPolicy - { - enum - { - BLOCK_THREADS = _BLOCK_THREADS, - ITEMS_PER_THREAD = _ITEMS_PER_THREAD, - ITEMS_PER_TILE = _BLOCK_THREADS * _ITEMS_PER_THREAD, - }; - static const cub::BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; - static const cub::CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; - static const cub::BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM; - }; // struct PtxPolicy - - template - struct Tuning; - - namespace mpl = thrust::detail::mpl::math; - - template - struct items_per_thread - { - enum - { - value = mpl::min< - int, - NOMINAL_4B_ITEMS_PER_THREAD, - mpl::max::value>::value - }; - }; - - template - struct Tuning - { - const static int INPUT_SIZE = sizeof(T); - enum - { - NOMINAL_4B_ITEMS_PER_THREAD = 11, - // - ITEMS_PER_THREAD = items_per_thread::value - }; - - typedef PtxPolicy<64, - ITEMS_PER_THREAD, - cub::BLOCK_LOAD_WARP_TRANSPOSE, - cub::LOAD_LDG, - cub::BLOCK_SCAN_WARP_SCANS> - type; - }; // Tuning for sm52 - - - template - struct Tuning - { - const static int INPUT_SIZE = sizeof(T); - enum - { - NOMINAL_4B_ITEMS_PER_THREAD = 9, - // - ITEMS_PER_THREAD = items_per_thread::value - }; - - typedef PtxPolicy<128, - ITEMS_PER_THREAD, - cub::BLOCK_LOAD_WARP_TRANSPOSE, - cub::LOAD_LDG, - cub::BLOCK_SCAN_WARP_SCANS> - type; - }; // Tuning for sm35 - - template - struct Tuning - { - const static int INPUT_SIZE = sizeof(T); - enum - { - NOMINAL_4B_ITEMS_PER_THREAD = 7, - // - ITEMS_PER_THREAD = items_per_thread::value - }; - - typedef PtxPolicy<128, - ITEMS_PER_THREAD, - cub::BLOCK_LOAD_WARP_TRANSPOSE, - cub::LOAD_DEFAULT, - cub::BLOCK_SCAN_WARP_SCANS> - type; - }; // Tuning for sm30 - - template - struct UniqueAgent - { - typedef typename iterator_traits::value_type item_type; - - typedef cub::ScanTileState ScanTileState; - - template - struct PtxPlan : Tuning::type - { - typedef Tuning tuning; - - typedef typename core::LoadIterator::type ItemsLoadIt; - - typedef typename core::BlockLoad::type BlockLoadItems; - - typedef cub::BlockDiscontinuity - BlockDiscontinuityItems; - - typedef cub::TilePrefixCallbackOp - TilePrefixCallback; - typedef cub::BlockScan - BlockScan; - - typedef core::uninitialized_array - shared_items_t; - - union TempStorage - { - struct - { - typename BlockScan::TempStorage scan; - typename TilePrefixCallback::TempStorage prefix; - typename BlockDiscontinuityItems::TempStorage discontinuity; - }; - - typename BlockLoadItems::TempStorage load_items; - shared_items_t shared_items; - - }; // union TempStorage - }; // struct PtxPlan - - typedef typename core::specialize_plan_msvc10_war::type::type ptx_plan; - - typedef typename ptx_plan::ItemsLoadIt ItemsLoadIt; - typedef typename ptx_plan::BlockLoadItems BlockLoadItems; - typedef typename ptx_plan::BlockDiscontinuityItems BlockDiscontinuityItems; - typedef typename ptx_plan::TilePrefixCallback TilePrefixCallback; - typedef typename ptx_plan::BlockScan BlockScan; - typedef typename ptx_plan::shared_items_t shared_items_t; - typedef typename ptx_plan::TempStorage TempStorage; - - enum - { - BLOCK_THREADS = ptx_plan::BLOCK_THREADS, - ITEMS_PER_THREAD = ptx_plan::ITEMS_PER_THREAD, - ITEMS_PER_TILE = ptx_plan::ITEMS_PER_TILE - }; - - struct impl - { - //--------------------------------------------------------------------- - // Per-thread fields - //--------------------------------------------------------------------- - - TempStorage & temp_storage; - ScanTileState & tile_state; - ItemsLoadIt items_in; - ItemsOutputIt items_out; - cub::InequalityWrapper predicate; - Size num_items; - - //--------------------------------------------------------------------- - // Utility functions - //--------------------------------------------------------------------- - - THRUST_DEVICE_FUNCTION - shared_items_t &get_shared() - { - return temp_storage.shared_items; - } - - void THRUST_DEVICE_FUNCTION - scatter(item_type (&items)[ITEMS_PER_THREAD], - Size (&selection_flags)[ITEMS_PER_THREAD], - Size (&selection_indices)[ITEMS_PER_THREAD], - int /*num_tile_items*/, - int num_tile_selections, - Size num_selections_prefix, - Size /*num_selections*/) - { - using core::sync_threadblock; - -#pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - int local_scatter_offset = selection_indices[ITEM] - - num_selections_prefix; - if (selection_flags[ITEM]) - { - get_shared()[local_scatter_offset] = items[ITEM]; - } - } - - sync_threadblock(); - - for (int item = threadIdx.x; - item < num_tile_selections; - item += BLOCK_THREADS) - { - items_out[num_selections_prefix + item] = get_shared()[item]; - } - - sync_threadblock(); - } - - //--------------------------------------------------------------------- - // Tile processing - //--------------------------------------------------------------------- - - template - Size THRUST_DEVICE_FUNCTION - consume_tile_impl(int num_tile_items, - int tile_idx, - Size tile_base) - { - using core::sync_threadblock; - using core::uninitialized_array; - - item_type items_loc[ITEMS_PER_THREAD]; - Size selection_flags[ITEMS_PER_THREAD]; - Size selection_idx[ITEMS_PER_THREAD]; - - if (IS_LAST_TILE) - { - BlockLoadItems(temp_storage.load_items) - .Load(items_in + tile_base, - items_loc, - num_tile_items, - *(items_in + tile_base)); - } - else - { - BlockLoadItems(temp_storage.load_items) - .Load(items_in + tile_base, items_loc); - } - - - sync_threadblock(); - - if (IS_FIRST_TILE) - { - BlockDiscontinuityItems(temp_storage.discontinuity) - .FlagHeads(selection_flags, items_loc, predicate); - } - else - { - item_type tile_predecessor = items_in[tile_base - 1]; - BlockDiscontinuityItems(temp_storage.discontinuity) - .FlagHeads(selection_flags, items_loc, predicate, tile_predecessor); - } - -#pragma unroll - for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) - { - // Set selection_flags for out-of-bounds items - if ((IS_LAST_TILE) && - (Size(threadIdx.x * ITEMS_PER_THREAD) + ITEM >= num_tile_items)) - selection_flags[ITEM] = 1; - } - - sync_threadblock(); - - Size num_tile_selections = 0; - Size num_selections = 0; - Size num_selections_prefix = 0; - if (IS_FIRST_TILE) - { - BlockScan(temp_storage.scan) - .ExclusiveSum(selection_flags, - selection_idx, - num_tile_selections); - - if (threadIdx.x == 0) - { - // Update tile status if this is not the last tile - if (!IS_LAST_TILE) - tile_state.SetInclusive(0, num_tile_selections); - } - - // Do not count any out-of-bounds selections - if (IS_LAST_TILE) - { - int num_discount = ITEMS_PER_TILE - num_tile_items; - num_tile_selections -= num_discount; - } - num_selections = num_tile_selections; - } - else - { - TilePrefixCallback prefix_cb(tile_state, - temp_storage.prefix, - cub::Sum(), - tile_idx); - BlockScan(temp_storage.scan) - .ExclusiveSum(selection_flags, - selection_idx, - prefix_cb); - - num_selections = prefix_cb.GetInclusivePrefix(); - num_tile_selections = prefix_cb.GetBlockAggregate(); - num_selections_prefix = prefix_cb.GetExclusivePrefix(); - - if (IS_LAST_TILE) - { - int num_discount = ITEMS_PER_TILE - num_tile_items; - num_tile_selections -= num_discount; - num_selections -= num_discount; - } - } - - sync_threadblock(); - - scatter(items_loc, - selection_flags, - selection_idx, - num_tile_items, - num_tile_selections, - num_selections_prefix, - num_selections); - - return num_selections; - } - - - template - Size THRUST_DEVICE_FUNCTION - consume_tile(int num_tile_items, - int tile_idx, - Size tile_base) - { - if (tile_idx == 0) - { - return consume_tile_impl(num_tile_items, - tile_idx, - tile_base); - } - else - { - return consume_tile_impl(num_tile_items, - tile_idx, - tile_base); - } - } - - //--------------------------------------------------------------------- - // Constructor - //--------------------------------------------------------------------- - - THRUST_DEVICE_FUNCTION - impl(TempStorage & temp_storage_, - ScanTileState & tile_state_, - ItemsLoadIt items_in_, - ItemsOutputIt items_out_, - BinaryPred binary_pred_, - Size num_items_, - int num_tiles, - NumSelectedOutIt num_selected_out) - : temp_storage(temp_storage_), - tile_state(tile_state_), - items_in(items_in_), - items_out(items_out_), - predicate(binary_pred_), - num_items(num_items_) - { - int tile_idx = blockIdx.x; - Size tile_base = tile_idx * ITEMS_PER_TILE; - - if (tile_idx < num_tiles - 1) - { - consume_tile(ITEMS_PER_TILE, - tile_idx, - tile_base); - } - else - { - int num_remaining = static_cast(num_items - tile_base); - Size num_selections = consume_tile(num_remaining, - tile_idx, - tile_base); - if (threadIdx.x == 0) - { - *num_selected_out = num_selections; - } - } - } - }; // struct impl - - //--------------------------------------------------------------------- - // Agent entry point - //--------------------------------------------------------------------- - - THRUST_AGENT_ENTRY(ItemsIt items_in, - ItemsOutputIt items_out, - BinaryPred binary_pred, - NumSelectedOutIt num_selected_out, - Size num_items, - ScanTileState tile_state, - int num_tiles, - char * shmem) - { - TempStorage &storage = *reinterpret_cast(shmem); - - impl(storage, - tile_state, - core::make_load_iterator(ptx_plan(), items_in), - items_out, - binary_pred, - num_items, - num_tiles, - num_selected_out); - } - }; // struct UniqueAgent - - template - struct InitAgent - { - template - struct PtxPlan : PtxPolicy<128> {}; - typedef core::specialize_plan ptx_plan; - - //--------------------------------------------------------------------- - // Agent entry point - //--------------------------------------------------------------------- - - THRUST_AGENT_ENTRY(ScanTileState tile_state, - Size num_tiles, - NumSelectedIt num_selected_out, - char * /*shmem*/) - { - tile_state.InitializeStatus(num_tiles); - if (blockIdx.x == 0 && threadIdx.x == 0) - *num_selected_out = 0; - } - - }; // struct InitAgent - - template - static cudaError_t THRUST_RUNTIME_FUNCTION - doit_step(void * d_temp_storage, - size_t & temp_storage_bytes, - ItemsInputIt items_in, - ItemsOutputIt items_out, - BinaryPred binary_pred, - NumSelectedOutIt num_selected_out, - Size num_items, - cudaStream_t stream, - bool debug_sync) - { - using core::AgentLauncher; - using core::AgentPlan; - using core::get_agent_plan; - - typedef AgentLauncher< - UniqueAgent > - unique_agent; - - typedef typename unique_agent::ScanTileState ScanTileState; - - typedef AgentLauncher< - InitAgent > - init_agent; - - using core::get_plan; - typename get_plan::type init_plan = init_agent::get_plan(); - typename get_plan::type unique_plan = unique_agent::get_plan(stream); - - - int tile_size = unique_plan.items_per_tile; - size_t num_tiles = (num_items + tile_size - 1) / tile_size; - - size_t vshmem_size = core::vshmem_size(unique_plan.shared_memory_size, - num_tiles); - - cudaError_t status = cudaSuccess; - size_t allocation_sizes[2] = {0, vshmem_size}; - status = ScanTileState::AllocationSize(static_cast(num_tiles), allocation_sizes[0]); - CUDA_CUB_RET_IF_FAIL(status); - - void *allocations[2] = {NULL, NULL}; - // - status = cub::AliasTemporaries(d_temp_storage, - temp_storage_bytes, - allocations, - allocation_sizes); - CUDA_CUB_RET_IF_FAIL(status); - - if (d_temp_storage == NULL) - { - return status; - } - - ScanTileState tile_status; - status = tile_status.Init(static_cast(num_tiles), allocations[0], allocation_sizes[0]); - CUDA_CUB_RET_IF_FAIL(status); - - num_tiles = max(1,num_tiles); - init_agent ia(init_plan, num_tiles, stream, "unique_by_key::init_agent", debug_sync); - ia.launch(tile_status, num_tiles, num_selected_out); - CUDA_CUB_RET_IF_FAIL(cudaPeekAtLastError()); - - if (num_items == 0) { return status; } - - char *vshmem_ptr = vshmem_size > 0 ? (char *)allocations[1] : NULL; - - unique_agent ua(unique_plan, num_items, stream, vshmem_ptr, "unique_by_key::unique_agent", debug_sync); - ua.launch(items_in, - items_out, - binary_pred, - num_selected_out, - num_items, - tile_status, - num_tiles); - CUDA_CUB_RET_IF_FAIL(cudaPeekAtLastError()); - return status; - } - - template - THRUST_RUNTIME_FUNCTION - ItemsOutputIt unique(execution_policy& policy, - ItemsInputIt items_first, - ItemsInputIt items_last, - ItemsOutputIt items_result, - BinaryPred binary_pred) - { - // typedef typename iterator_traits::difference_type size_type; - typedef int size_type; - - size_type num_items = static_cast(thrust::distance(items_first, items_last)); - size_t temp_storage_bytes = 0; - cudaStream_t stream = cuda_cub::stream(policy); - bool debug_sync = THRUST_DEBUG_SYNC_FLAG; - - cudaError_t status; - status = doit_step(NULL, - temp_storage_bytes, - items_first, - items_result, - binary_pred, - reinterpret_cast(NULL), - num_items, - stream, - debug_sync); - cuda_cub::throw_on_error(status, "unique: failed on 1st step"); - - size_t allocation_sizes[2] = {sizeof(size_type), temp_storage_bytes}; - void * allocations[2] = {NULL, NULL}; - - size_t storage_size = 0; - status = core::alias_storage(NULL, - storage_size, - allocations, - allocation_sizes); - cuda_cub::throw_on_error(status, "unique: failed on 1st step"); - - // Allocate temporary storage. - thrust::detail::temporary_array - tmp(policy, storage_size); - void *ptr = static_cast(tmp.data().get()); - - status = core::alias_storage(ptr, - storage_size, - allocations, - allocation_sizes); - cuda_cub::throw_on_error(status, "unique: failed on 2nd step"); - - size_type* d_num_selected_out - = thrust::detail::aligned_reinterpret_cast(allocations[0]); - - status = doit_step(allocations[1], - temp_storage_bytes, - items_first, - items_result, - binary_pred, - d_num_selected_out, - num_items, - stream, - debug_sync); - cuda_cub::throw_on_error(status, "unique: failed on 2nd step"); - - status = cuda_cub::synchronize(policy); - cuda_cub::throw_on_error(status, "unique: failed to synchronize"); - - size_type num_selected = get_value(policy, d_num_selected_out); - - return items_result + num_selected; - } -} // namespace __unique - -//------------------------- -// Thrust API entry points -//------------------------- - -__thrust_exec_check_disable__ -template -OutputIt __host__ __device__ -unique_copy(execution_policy &policy, - InputIt first, - InputIt last, - OutputIt result, - BinaryPred binary_pred) -{ - OutputIt ret = result; - if (__THRUST_HAS_CUDART__) - { - ret = __unique::unique(policy, - first, - last, - result, - binary_pred); - } - else - { -#if !__THRUST_HAS_CUDART__ - ret = thrust::unique_copy(cvt_to_seq(derived_cast(policy)), - first, - last, - result, - binary_pred); -#endif - } - return ret; -} - -template -OutputIt __host__ __device__ -unique_copy(execution_policy &policy, - InputIt first, - InputIt last, - OutputIt result) -{ - typedef typename iterator_traits::value_type input_type; - return cuda_cub::unique_copy(policy, first, last, result, equal_to()); -} - - - -__thrust_exec_check_disable__ -template -InputIt __host__ __device__ -unique(execution_policy &policy, - InputIt first, - InputIt last, - BinaryPred binary_pred) -{ - InputIt ret = first; - if (__THRUST_HAS_CUDART__) - { - ret = cuda_cub::unique_copy(policy, first, last, first, binary_pred); - } - else - { -#if !__THRUST_HAS_CUDART__ - ret = thrust::unique(cvt_to_seq(derived_cast(policy)), - first, - last, - binary_pred); -#endif - } - return ret; -} - -template -InputIt __host__ __device__ -unique(execution_policy &policy, - InputIt first, - InputIt last) -{ - typedef typename iterator_traits::value_type input_type; - return cuda_cub::unique(policy, first, last, equal_to()); -} - -} // namespace cuda_cub -} // end namespace thrust - -// -#include -#include -#endif diff --git a/spaces/CVPR/WALT/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py b/spaces/CVPR/WALT/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py deleted file mode 100644 index 0e86d2ea67e154fae18dbf9d2bfde6d0a70e582c..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py +++ /dev/null @@ -1,205 +0,0 @@ -import torch.nn as nn -from mmcv.cnn import ConvModule - -from mmdet.models.builder import HEADS -from .bbox_head import BBoxHead - - -@HEADS.register_module() -class ConvFCBBoxHead(BBoxHead): - r"""More general bbox head, with shared conv and fc layers and two optional - separated branches. - - .. code-block:: none - - /-> cls convs -> cls fcs -> cls - shared convs -> shared fcs - \-> reg convs -> reg fcs -> reg - """ # noqa: W605 - - def __init__(self, - num_shared_convs=0, - num_shared_fcs=0, - num_cls_convs=0, - num_cls_fcs=0, - num_reg_convs=0, - num_reg_fcs=0, - conv_out_channels=256, - fc_out_channels=1024, - conv_cfg=None, - norm_cfg=None, - *args, - **kwargs): - super(ConvFCBBoxHead, self).__init__(*args, **kwargs) - assert (num_shared_convs + num_shared_fcs + num_cls_convs + - num_cls_fcs + num_reg_convs + num_reg_fcs > 0) - if num_cls_convs > 0 or num_reg_convs > 0: - assert num_shared_fcs == 0 - if not self.with_cls: - assert num_cls_convs == 0 and num_cls_fcs == 0 - if not self.with_reg: - assert num_reg_convs == 0 and num_reg_fcs == 0 - self.num_shared_convs = num_shared_convs - self.num_shared_fcs = num_shared_fcs - self.num_cls_convs = num_cls_convs - self.num_cls_fcs = num_cls_fcs - self.num_reg_convs = num_reg_convs - self.num_reg_fcs = num_reg_fcs - self.conv_out_channels = conv_out_channels - self.fc_out_channels = fc_out_channels - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - - # add shared convs and fcs - self.shared_convs, self.shared_fcs, last_layer_dim = \ - self._add_conv_fc_branch( - self.num_shared_convs, self.num_shared_fcs, self.in_channels, - True) - self.shared_out_channels = last_layer_dim - - # add cls specific branch - self.cls_convs, self.cls_fcs, self.cls_last_dim = \ - self._add_conv_fc_branch( - self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels) - - # add reg specific branch - self.reg_convs, self.reg_fcs, self.reg_last_dim = \ - self._add_conv_fc_branch( - self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels) - - if self.num_shared_fcs == 0 and not self.with_avg_pool: - if self.num_cls_fcs == 0: - self.cls_last_dim *= self.roi_feat_area - if self.num_reg_fcs == 0: - self.reg_last_dim *= self.roi_feat_area - - self.relu = nn.ReLU(inplace=True) - # reconstruct fc_cls and fc_reg since input channels are changed - if self.with_cls: - self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes + 1) - if self.with_reg: - out_dim_reg = (4 if self.reg_class_agnostic else 4 * - self.num_classes) - self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg) - - def _add_conv_fc_branch(self, - num_branch_convs, - num_branch_fcs, - in_channels, - is_shared=False): - """Add shared or separable branch. - - convs -> avg pool (optional) -> fcs - """ - last_layer_dim = in_channels - # add branch specific conv layers - branch_convs = nn.ModuleList() - if num_branch_convs > 0: - for i in range(num_branch_convs): - conv_in_channels = ( - last_layer_dim if i == 0 else self.conv_out_channels) - branch_convs.append( - ConvModule( - conv_in_channels, - self.conv_out_channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - last_layer_dim = self.conv_out_channels - # add branch specific fc layers - branch_fcs = nn.ModuleList() - if num_branch_fcs > 0: - # for shared branch, only consider self.with_avg_pool - # for separated branches, also consider self.num_shared_fcs - if (is_shared - or self.num_shared_fcs == 0) and not self.with_avg_pool: - last_layer_dim *= self.roi_feat_area - for i in range(num_branch_fcs): - fc_in_channels = ( - last_layer_dim if i == 0 else self.fc_out_channels) - branch_fcs.append( - nn.Linear(fc_in_channels, self.fc_out_channels)) - last_layer_dim = self.fc_out_channels - return branch_convs, branch_fcs, last_layer_dim - - def init_weights(self): - super(ConvFCBBoxHead, self).init_weights() - # conv layers are already initialized by ConvModule - for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]: - for m in module_list.modules(): - if isinstance(m, nn.Linear): - nn.init.xavier_uniform_(m.weight) - nn.init.constant_(m.bias, 0) - - def forward(self, x): - # shared part - if self.num_shared_convs > 0: - for conv in self.shared_convs: - x = conv(x) - - if self.num_shared_fcs > 0: - if self.with_avg_pool: - x = self.avg_pool(x) - - x = x.flatten(1) - - for fc in self.shared_fcs: - x = self.relu(fc(x)) - # separate branches - x_cls = x - x_reg = x - - for conv in self.cls_convs: - x_cls = conv(x_cls) - if x_cls.dim() > 2: - if self.with_avg_pool: - x_cls = self.avg_pool(x_cls) - x_cls = x_cls.flatten(1) - for fc in self.cls_fcs: - x_cls = self.relu(fc(x_cls)) - - for conv in self.reg_convs: - x_reg = conv(x_reg) - if x_reg.dim() > 2: - if self.with_avg_pool: - x_reg = self.avg_pool(x_reg) - x_reg = x_reg.flatten(1) - for fc in self.reg_fcs: - x_reg = self.relu(fc(x_reg)) - - cls_score = self.fc_cls(x_cls) if self.with_cls else None - bbox_pred = self.fc_reg(x_reg) if self.with_reg else None - return cls_score, bbox_pred - - -@HEADS.register_module() -class Shared2FCBBoxHead(ConvFCBBoxHead): - - def __init__(self, fc_out_channels=1024, *args, **kwargs): - super(Shared2FCBBoxHead, self).__init__( - num_shared_convs=0, - num_shared_fcs=2, - num_cls_convs=0, - num_cls_fcs=0, - num_reg_convs=0, - num_reg_fcs=0, - fc_out_channels=fc_out_channels, - *args, - **kwargs) - - -@HEADS.register_module() -class Shared4Conv1FCBBoxHead(ConvFCBBoxHead): - - def __init__(self, fc_out_channels=1024, *args, **kwargs): - super(Shared4Conv1FCBBoxHead, self).__init__( - num_shared_convs=4, - num_shared_fcs=1, - num_cls_convs=0, - num_cls_fcs=0, - num_reg_convs=0, - num_reg_fcs=0, - fc_out_channels=fc_out_channels, - *args, - **kwargs) diff --git a/spaces/CVPR/regionclip-demo/detectron2/layers/__init__.py b/spaces/CVPR/regionclip-demo/detectron2/layers/__init__.py deleted file mode 100644 index c8bd1fb024d1cb911dda3f8a77f7ec3ad2e63798..0000000000000000000000000000000000000000 --- a/spaces/CVPR/regionclip-demo/detectron2/layers/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from .batch_norm import FrozenBatchNorm2d, get_norm, NaiveSyncBatchNorm -from .deform_conv import DeformConv, ModulatedDeformConv -from .mask_ops import paste_masks_in_image -from .nms import batched_nms, batched_nms_rotated, nms, nms_rotated -from .roi_align import ROIAlign, roi_align -from .roi_align_rotated import ROIAlignRotated, roi_align_rotated -from .shape_spec import ShapeSpec -from .wrappers import ( - BatchNorm2d, - Conv2d, - ConvTranspose2d, - cat, - interpolate, - Linear, - nonzero_tuple, - cross_entropy, -) -from .blocks import CNNBlockBase, DepthwiseSeparableConv2d -from .aspp import ASPP - -__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/spaces/CVPR/regionclip-demo/detectron2/utils/README.md b/spaces/CVPR/regionclip-demo/detectron2/utils/README.md deleted file mode 100644 index 9765b24a730b77556104187ac3ef5439ab0859fd..0000000000000000000000000000000000000000 --- a/spaces/CVPR/regionclip-demo/detectron2/utils/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Utility functions - -This folder contain utility functions that are not used in the -core library, but are useful for building models or training -code using the config system. diff --git a/spaces/Chirag1994/Melanoma_Skin_Cancer_Detection_App/app.py b/spaces/Chirag1994/Melanoma_Skin_Cancer_Detection_App/app.py deleted file mode 100644 index 35265297f1ced39b81a213182abfe73e1a8f0b10..0000000000000000000000000000000000000000 --- a/spaces/Chirag1994/Melanoma_Skin_Cancer_Detection_App/app.py +++ /dev/null @@ -1,73 +0,0 @@ -# Importing Libraries -import os -import torch -import numpy as np -import gradio as gr -from model import Model -import albumentations as A - -# Creating a model instance -efficientnet_b5_model = Model() -efficientnet_b5_model = torch.nn.DataParallel( - efficientnet_b5_model) # Must wrap our model in nn.DataParallel() -# if used multi-gpu's to train the model otherwise we would get state_dict keys mismatch error. -efficientnet_b5_model.load_state_dict( - torch.load( - f='efficientnet_b5_checkpoint_fold_0.pt', - map_location=torch.device("cpu") - ) -) - -# Predict on a single image - - -def predict_on_single_image(img): - """ - Function takes an image, transforms for - model training like normalizing the statistics - of the image. Converting the numpy array into - torch tensor and passing through the model - to get the prediction probability of a patient - having melanoma. - """ - img = np.array(img) - transforms = A.Compose([A.Resize(512, 512), - A.Normalize(mean=(0.485, 0.456, 0.406), - std=(0.229, 0.224, 0.225), - max_pixel_value=255.0, - always_apply=True - )] - ) - img = transforms(image=img)['image'] - image = np.transpose(img, (2, 0, 1)).astype(np.float32) - image = torch.tensor(image, dtype=torch.float).unsqueeze(dim=0) - efficientnet_b5_model.eval() - with torch.inference_mode(): - probs = torch.sigmoid(efficientnet_b5_model(image)) - prob_of_melanoma = probs[0].item() - prob_of_not_having_melanoma = 1 - prob_of_melanoma - pred_label = {"Probability of Having Melanoma": prob_of_melanoma, - "Probability of Not having Melanoma": prob_of_not_having_melanoma} - return pred_label - - -# Gradio App - -# Examples directory path -melanoma_app_examples_path = "examples" - -# Creating the title and description strings -title = "Melanoma Cancer Detection App" -description = 'An efficientnet-b5 model that predicts the probability of a patient having melanoma skin cancer or not.' -example_list = [["examples/" + example] - for example in os.listdir(melanoma_app_examples_path)] - -# Create the Gradio demo -demo = gr.Interface(fn=predict_on_single_image, - inputs=gr.Image(type='pil'), - outputs=[gr.Label(label='Probabilities')], - examples=example_list, title=title, - description=description) - -# Launch the demo! -demo.launch() diff --git a/spaces/CofAI/chat.v2/temp.py b/spaces/CofAI/chat.v2/temp.py deleted file mode 100644 index fab040ff070d12bd78f8bbf2b2e78ac27e6ed65b..0000000000000000000000000000000000000000 --- a/spaces/CofAI/chat.v2/temp.py +++ /dev/null @@ -1,4 +0,0 @@ -import pandas as pd - -pd = pd.DataFrame({'address':[], 'car_num': [], 'lat': [], 'long': [], 'time': [], 'date': []}) -pd.to_csv('data.csv', index=False) \ No newline at end of file diff --git a/spaces/Cong723/gpt-academic-public/docs/README_FR.md b/spaces/Cong723/gpt-academic-public/docs/README_FR.md deleted file mode 100644 index f21e90035ef2ddea91382155e0ad46b6740f5322..0000000000000000000000000000000000000000 --- a/spaces/Cong723/gpt-academic-public/docs/README_FR.md +++ /dev/null @@ -1,296 +0,0 @@ -> **Note** -> -> Ce fichier README est généré automatiquement par le plugin de traduction markdown de ce projet et n'est peut - être pas correct à 100%. -> - -# ChatGPT Optimisation Académique - -**Si vous aimez ce projet, donnez-lui une étoile; si vous avez inventé des raccourcis académiques plus utiles ou des plugins fonctionnels, n'hésitez pas à ouvrir une demande ou une demande de traction. Nous avons également un fichier README en [anglais|](docs/README_EN.md)[japonais|](docs/README_JP.md)[russe|](docs/README_RS.md)[français](docs/README_FR.md) traduit par ce projet lui-même.** - -> **Note** -> -> 1. Veuillez noter que seuls les plugins de fonction signalés en **rouge** sont capables de lire les fichiers, certains plugins se trouvent dans le **menu déroulant** de la section plugin. Nous sommes également les bienvenus avec la plus haute priorité pour traiter et accepter tout nouveau PR de plugin! -> -> 2. Chaque fichier dans ce projet est expliqué en détail dans l'auto-analyse [self_analysis.md](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A). Avec l'itération des versions, vous pouvez également cliquer sur les plugins fonctionnels pertinents pour appeler GPT et générer un rapport d'auto-analyse projet mis à jour. Les questions fréquemment posées sont résumées dans le [wiki](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98). -> - -
        - -Fonctionnalité | Description ---- | --- -Polissage en un clic | Prend en charge la correction en un clic et la recherche d'erreurs de syntaxe dans les documents de recherche. -Traduction Chinois-Anglais en un clic | Une touche pour traduire la partie chinoise en anglais ou celle anglaise en chinois. -Explication de code en un clic | Affiche et explique correctement le code. -[Raccourcis clavier personnalisables](https://www.bilibili.com/video/BV14s4y1E7jN) | Prend en charge les raccourcis clavier personnalisables. -[Configuration du serveur proxy](https://www.bilibili.com/video/BV1rc411W7Dr) | Prend en charge la configuration du serveur proxy. -Conception modulaire | Prend en charge la personnalisation des plugins de fonctions et des [plugins] de fonctions hiérarchiques personnalisés, et les plugins prennent en charge [la mise à jour à chaud](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97). -[Auto-analyse du programme](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugins] [Lire en un clic](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) le code source de ce projet. -[Analyse de programme](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugins] En un clic, les projets Python/C/C++/Java/Lua/... peuvent être analysés. -Lire le document de recherche | [Plugins] Lisez le résumé de l'article en latex et générer un résumé. -Traduction et polissage de l'article complet en LaTeX | [Plugins] Une touche pour traduire ou corriger en LaTeX -Génération Commentaire de fonction en vrac | [Plugins] Lisez en un clic les fonctions et générez des commentaires de fonction. -Rapport d'analyse automatique des chats générés | [Plugins] Génère un rapport de synthèse après l'exécution. -[Assistant arxiv](https://www.bilibili.com/video/BV1LM4y1279X) | [Plugins] Entrez l'url de l'article arxiv pour traduire le résumé + télécharger le PDF en un clic -[Traduction complète des articles PDF](https://www.bilibili.com/video/BV1KT411x7Wn) | [Plugins] Extraire le titre et le résumé de l'article PDF + Traduire le texte entier (multithread) -[Aide à la recherche Google Academ](https://www.bilibili.com/video/BV19L411U7ia) | [Plugins] Donnez à GPT l'URL de n'importe quelle page de recherche Google Academ pour vous aider à sélectionner des articles intéressants -Affichage de formules/images/tableaux | Afficher la forme traduite et rendue d'une formule en même temps, plusieurs formules et surlignage du code prend en charge -Prise en charge des plugins multithread | Prise en charge de l'appel multithread de chatgpt, traitement en masse de texte ou de programmes en un clic -Activer le thème Gradio sombre [theme](https://github.com/binary-husky/chatgpt_academic/issues/173) au démarrage | Ajoutez ```/?__dark-theme=true``` à l'URL du navigateur pour basculer vers le thème sombre -[Prise en charge de plusieurs modèles LLM](https://www.bilibili.com/video/BV1wT411p7yf), [prise en charge de l'interface API2D](https://api2d.com/) | Comment cela serait-il de se faire servir par GPT3.5, GPT4 et la [ChatGLM de Tsinghua](https://github.com/THUDM/ChatGLM-6B) en même temps? -Expérience en ligne d'huggingface sans science | Après vous être connecté à huggingface, copiez [cet espace](https://huggingface.co/spaces/qingxu98/gpt-academic) -... | ... - -
        - - -Vous êtes un traducteur professionnel d'articles universitaires en français. - -Ceci est un fichier Markdown, veuillez le traduire en français sans modifier les commandes Markdown existantes : - -- Nouvelle interface (modifiable en modifiant l'option de mise en page dans config.py pour basculer entre les mises en page gauche-droite et haut-bas) -
        - -
        - - -- Tous les boutons sont générés dynamiquement en lisant functional.py, les utilisateurs peuvent ajouter librement des fonctions personnalisées pour libérer le presse-papiers. -
        - -
        - -- Correction/amélioration -
        - -
        - -- Si la sortie contient des formules, elles seront affichées simultanément sous forme de de texte brut et de forme rendue pour faciliter la copie et la lecture. -
        - -
        - -- Pas envie de lire le code du projet ? Faites votre propre démo avec ChatGPT. -
        - -
        - -- Utilisation combinée de plusieurs modèles de langage sophistiqués (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) -
        - -
        - -Utilisation combinée de plusieurs modèles de langage sophistiqués en version de test [huggingface](https://huggingface.co/spaces/qingxu98/academic-chatgpt-beta) (la version huggingface ne prend pas en charge Chatglm). - - ---- - -## Installation - Méthode 1 : Exécution directe (Windows, Linux or MacOS) - -1. Téléchargez le projet -```sh -git clone https://github.com/binary-husky/chatgpt_academic.git -cd chatgpt_academic -``` - -2. Configuration de l'API_KEY et des paramètres de proxy - -Dans `config.py`, configurez les paramètres de proxy et de clé d'API OpenAI, comme indiqué ci-dessous -``` -1. Si vous êtes en Chine, vous devez configurer un proxy étranger pour utiliser l'API OpenAI en toute transparence. Pour ce faire, veuillez lire attentivement le fichier config.py (1. Modifiez l'option USE_PROXY ; 2. Modifiez les paramètres de proxies comme indiqué dans les instructions). -2. Configurez votre clé API OpenAI. Vous devez vous inscrire sur le site web d'OpenAI pour obtenir une clé API. Une fois que vous avez votre clé API, vous pouvez la configurer dans le fichier config.py. -3. Tous les problèmes liés aux réseaux de proxy (temps d'attente, non-fonctionnement des proxies) sont résumés dans https://github.com/binary-husky/chatgpt_academic/issues/1. -``` -(Remarque : le programme vérifie d'abord s'il existe un fichier de configuration privé nommé `config_private.py`, et utilise les configurations de celui-ci à la place de celles du fichier `config.py`. Par conséquent, si vous comprenez notre logique de lecture de configuration, nous vous recommandons fortement de créer un nouveau fichier de configuration nommé `config_private.py` à côté de `config.py` et de transférer (copier) les configurations de celui-ci dans `config_private.py`. `config_private.py` n'est pas contrôlé par git et rend vos informations personnelles plus sûres.) - -3. Installation des dépendances -```sh -# (Option 1) Recommandé -python -m pip install -r requirements.txt - -# (Option 2) Si vous utilisez anaconda, les étapes sont similaires : -# (Option 2.1) conda create -n gptac_venv python=3.11 -# (Option 2.2) conda activate gptac_venv -# (Option 2.3) python -m pip install -r requirements.txt - -# note : Utilisez la source pip officielle ou la source pip Alibaba. D'autres sources (comme celles des universités) pourraient poser problème. Pour utiliser temporairement une autre source, utilisez : -# python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ -``` - -Si vous avez besoin de soutenir ChatGLM de Tsinghua, vous devez installer plus de dépendances (si vous n'êtes pas familier avec Python ou que votre ordinateur n'est pas assez performant, nous vous recommandons de ne pas essayer) : -```sh -python -m pip install -r request_llm/requirements_chatglm.txt -``` - -4. Exécution -```sh -python main.py -``` - -5. Tester les plugins de fonctions -``` -- Test Python Project Analysis - Dans la zone de saisie, entrez `./crazy_functions/test_project/python/dqn`, puis cliquez sur "Parse Entire Python Project" -- Test d'auto-lecture du code - Cliquez sur "[Démo multi-thread] Parser ce projet lui-même (auto-traduction de la source)" -- Test du modèle de fonctionnalité expérimentale (exige une réponse de l'IA à ce qui est arrivé aujourd'hui dans l'histoire). Vous pouvez utiliser cette fonctionnalité comme modèle pour des fonctions plus complexes. - Cliquez sur "[Démo modèle de plugin de fonction] Histoire du Jour" -- Le menu déroulant de la zone de plugin de fonctionnalité contient plus de fonctionnalités à sélectionner. -``` - -## Installation - Méthode 2 : Utilisation de docker (Linux) - - -Vous êtes un traducteur professionnel d'articles académiques en français. - -1. ChatGPT seul (recommandé pour la plupart des gens) -``` sh -# Télécharger le projet -git clone https://github.com/binary-husky/chatgpt_academic.git -cd chatgpt_academic -# Configurer le proxy outre-mer et la clé API OpenAI -Modifier le fichier config.py avec n'importe quel éditeur de texte -# Installer -docker build -t gpt-academic . -# Exécuter -docker run --rm -it --net=host gpt-academic - -# Tester les modules de fonction -## Tester la fonction modèle des modules (requiert la réponse de GPT à "qu'est-ce qui s'est passé dans l'histoire aujourd'hui ?"), vous pouvez utiliser cette fonction en tant que modèle pour implémenter des fonctions plus complexes. -Cliquez sur "[Exemple de modèle de module] Histoire d'aujourd'hui" -## Tester le résumé écrit pour le projet LaTeX -Dans la zone de saisie, tapez ./crazy_functions/test_project/latex/attention, puis cliquez sur "Lire le résumé de l'article de recherche LaTeX" -## Tester l'analyse du projet Python -Dans la zone de saisie, tapez ./crazy_functions/test_project/python/dqn, puis cliquez sur "Analyser l'ensemble du projet Python" - -D'autres fonctions sont disponibles dans la liste déroulante des modules de fonction. -``` - -2. ChatGPT+ChatGLM (nécessite une grande connaissance de docker et une configuration informatique suffisamment puissante) -``` sh -# Modifier le dockerfile -cd docs && nano Dockerfile+ChatGLM -# Comment construire | 如何构建 (Dockerfile+ChatGLM在docs路径下,请先cd docs) -docker build -t gpt-academic --network=host -f Dockerfile+ChatGLM . -# Comment exécuter | 如何运行 (1) Directement exécuter : -docker run --rm -it --net=host --gpus=all gpt-academic -# Comment exécuter | 如何运行 (2) Je veux effectuer quelques ajustements dans le conteneur avant de lancer : -docker run --rm -it --net=host --gpus=all gpt-academic bash -``` - -## Installation - Méthode 3 : Autres méthodes de déploiement - -1. Déploiement sur un cloud serveur distant -Veuillez consulter le [wiki de déploiement-1](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97) - -2. Utilisation de WSL2 (Windows Subsystem for Linux) -Veuillez consulter le [wiki de déploiement-2](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) - - -## Configuration de la procuration de l'installation -### Méthode 1 : Méthode conventionnelle -[Configuration de la procuration](https://github.com/binary-husky/chatgpt_academic/issues/1) - -### Méthode 2 : Tutoriel pour débutant pur -[Tutoriel pour débutant pur](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BB%A3%E7%90%86%E8%BD%AF%E4%BB%B6%E9%97%AE%E9%A2%98%E7%9A%84%E6%96%B0%E6%89%8B%E8%A7%A3%E5%86%B3%E6%96%B9%E6%B3%95%EF%BC%88%E6%96%B9%E6%B3%95%E5%8F%AA%E9%80%82%E7%94%A8%E4%BA%8E%E6%96%B0%E6%89%8B%EF%BC%89) - - ---- - -## Personnalisation des nouveaux boutons pratiques (personnalisation des raccourcis académiques) -Ouvrez le fichier `core_functional.py` avec n'importe quel éditeur de texte, ajoutez les éléments suivants, puis redémarrez le programme. (Si le bouton a déjà été ajouté avec succès et est visible, le préfixe et le suffixe pris en charge peuvent être modifiés à chaud sans avoir besoin de redémarrer le programme.) -Par exemple: -``` -"Traduction Français-Chinois": { - # Préfixe, qui sera ajouté avant votre saisie. Par exemple, pour décrire votre demande, telle que la traduction, le débogage de code, l'amélioration, etc. - "Prefix": "Veuillez traduire le contenu ci-dessous en chinois, puis expliquer chaque terme propre mentionné dans un tableau Markdown :\n\n", - - # Suffixe, qui sera ajouté après votre saisie. Par exemple, en combinaison avec un préfixe, vous pouvez mettre le contenu de votre saisie entre guillemets. - "Suffix": "", -}, -``` - -
        - -
        - ---- - - -## Présentation de certaines fonctionnalités - -### Affichage des images: - -
        - -
        - - -### Si un programme peut comprendre et décomposer lui-même : - -
        - -
        - -
        - -
        - - -### Analyse de tout projet Python/Cpp quelconque : -
        - -
        - -
        - -
        - -### Lecture et résumé générés automatiquement pour les articles en Latex -
        - -
        - -### Génération de rapports automatique -
        - - - -
        - -### Conception de fonctionnalités modulaires -
        - - -
        - - -### Traduction de code source en anglais - -
        - -
        - -## À faire et planification de version : -- version 3.2+ (à faire) : Prise en charge de plus de paramètres d'interface de plugin de fonction -- version 3.1 : Prise en charge de l'interrogation simultanée de plusieurs modèles GPT ! Prise en charge de l'API2d, prise en charge de la répartition de charge de plusieurs clés API -- version 3.0 : Prise en charge de chatglm et d'autres petits llm -- version 2.6 : Réorganisation de la structure du plugin, amélioration de l'interactivité, ajout de plus de plugins -- version 2.5 : Mise à jour automatique, résolution du problème de dépassement de jeton et de texte trop long lors de la compilation du code source complet -- version 2.4 : (1) Ajout de la fonctionnalité de traduction intégrale de PDF ; (2) Ajout d'une fonctionnalité de changement de position de zone de saisie ; (3) Ajout d'une option de disposition verticale ; (4) Optimisation du plugin de fonction multi-thread. -- version 2.3 : Amélioration de l'interactivité multi-thread -- version 2.2 : Prise en charge du rechargement à chaud du plugin de fonction -- version 2.1 : Mise en page pliable -- version 2.0 : Introduction du plugin de fonction modulaire -- version 1.0 : Fonctionnalité de base - -## Références et apprentissage - -``` -De nombreux designs d'autres projets exceptionnels ont été utilisés pour référence dans le code, notamment : - -# Projet 1 : De nombreuses astuces ont été empruntées à ChuanhuChatGPT -https://github.com/GaiZhenbiao/ChuanhuChatGPT - -# Projet 2 : ChatGLM-6B de Tsinghua : -https://github.com/THUDM/ChatGLM-6B -``` - diff --git a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/list_dataset.py b/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/list_dataset.py deleted file mode 100644 index 9058d35b3d4279048732074f4a8dbb6edd4c9ed0..0000000000000000000000000000000000000000 --- a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/list_dataset.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -""" -Simple dataset class that wraps a list of path names -""" - -from PIL import Image - -from maskrcnn_benchmark.structures.bounding_box import BoxList - - -class ListDataset(object): - def __init__(self, image_lists, transforms=None): - self.image_lists = image_lists - self.transforms = transforms - - def __getitem__(self, item): - img = Image.open(self.image_lists[item]).convert("RGB") - - # dummy target - w, h = img.size - target = BoxList([[0, 0, w, h]], img.size, mode="xyxy") - - if self.transforms is not None: - img, target = self.transforms(img, target) - - return img, target - - def __len__(self): - return len(self.image_lists) - - def get_img_info(self, item): - """ - Return the image dimensions for the image, without - loading and pre-processing it - """ - pass diff --git a/spaces/DEEMOSTECH/ChatAvatar/static/js/main.1c320b3e.js b/spaces/DEEMOSTECH/ChatAvatar/static/js/main.1c320b3e.js deleted file mode 100644 index 49a08551726a5c3f1273522922464a62d28ab693..0000000000000000000000000000000000000000 --- a/spaces/DEEMOSTECH/ChatAvatar/static/js/main.1c320b3e.js +++ /dev/null @@ -1,3 +0,0 @@ -/*! For license information please see main.1c320b3e.js.LICENSE.txt */ -!function(){var e={498:function(e){e.exports=function(){"use strict";var e=function(t,n){return e=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(e,t){e.__proto__=t}||function(e,t){for(var n in t)Object.prototype.hasOwnProperty.call(t,n)&&(e[n]=t[n])},e(t,n)};function t(t,n){if("function"!==typeof n&&null!==n)throw new TypeError("Class extends value "+String(n)+" is not a constructor or null");function r(){this.constructor=t}e(t,n),t.prototype=null===n?Object.create(n):(r.prototype=n.prototype,new r)}var n=function(){return n=Object.assign||function(e){for(var t,n=1,r=arguments.length;n0&&i[i.length-1])&&(6===A[0]||2===A[0])){a=0;continue}if(3===A[0]&&(!i||A[1]>i[0]&&A[1]=55296&&i<=56319&&n>10),a%1024+56320)),(i+1===n||r.length>16384)&&(A+=String.fromCharCode.apply(String,r),r.length=0)}return A},c="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",d="undefined"===typeof Uint8Array?[]:new Uint8Array(256),h=0;h>4,u[s++]=(15&r)<<4|i>>2,u[s++]=(3&i)<<6|63&A;return l},v=function(e){for(var t=e.length,n=[],r=0;r>w,x=(1<>w)+32,S=65536>>B,E=(1<=0){if(e<55296||e>56319&&e<=65535)return t=((t=this.index[e>>w])<<_)+(e&x),this.data[t];if(e<=65535)return t=((t=this.index[b+(e-55296>>w)])<<_)+(e&x),this.data[t];if(e>B),t=this.index[t],t+=e>>w&E,t=((t=this.index[t])<<_)+(e&x),this.data[t];if(e<=1114111)return this.data[this.highValueIndex]}return this.errorValue},e}(),Q="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",k="undefined"===typeof Uint8Array?[]:new Uint8Array(256),L=0;LI?(i.push(!0),a-=I):i.push(!1),-1!==["normal","auto","loose"].indexOf(t)&&-1!==[8208,8211,12316,12448].indexOf(e))return r.push(A),n.push(Y);if(a===H||a===K){if(0===A)return r.push(A),n.push(ue);var o=n[A-1];return-1===ke.indexOf(o)?(r.push(r[A-1]),n.push(o)):(r.push(A),n.push(ue))}return r.push(A),a===ce?n.push("strict"===t?te:me):a===_e||a===le?n.push(ue):a===be?e>=131072&&e<=196605||e>=196608&&e<=262141?n.push(me):n.push(ue):void n.push(a)})),[r,n,i]},Re=function(e,t,n,r){var i=r[n];if(Array.isArray(e)?-1!==e.indexOf(i):e===i)for(var A=n;A<=r.length;){if((s=r[++A])===t)return!0;if(s!==G)break}if(i===G)for(A=n;A>0;){var a=r[--A];if(Array.isArray(e)?-1!==e.indexOf(a):e===a)for(var o=n;o<=r.length;){var s;if((s=r[++o])===t)return!0;if(s!==G)break}if(a!==G)break}return!1},Pe=function(e,t){for(var n=e;n>=0;){var r=t[n];if(r!==G)return r;n--}return 0},He=function(e,t,n,r,i){if(0===n[r])return Se;var A=r-1;if(Array.isArray(i)&&!0===i[A])return Se;var a=A-1,o=A+1,s=t[A],l=a>=0?t[a]:0,u=t[o];if(s===R&&u===P)return Se;if(-1!==Fe.indexOf(s))return Ce;if(-1!==Fe.indexOf(u))return Se;if(-1!==Te.indexOf(u))return Se;if(Pe(A,t)===V)return Ee;if(Ue.get(e[A])===K)return Se;if((s===de||s===he)&&Ue.get(e[o])===K)return Se;if(s===N||u===N)return Se;if(s===z)return Se;if(-1===[G,j,q].indexOf(s)&&u===z)return Se;if(-1!==[J,Z,$,ie,se].indexOf(u))return Se;if(Pe(A,t)===ne)return Se;if(Re(re,ne,A,t))return Se;if(Re([J,Z],te,A,t))return Se;if(Re(W,W,A,t))return Se;if(s===G)return Ee;if(s===re||u===re)return Se;if(u===Y||s===Y)return Ee;if(-1!==[j,q,te].indexOf(u)||s===X)return Se;if(l===ge&&-1!==Ie.indexOf(s))return Se;if(s===se&&u===ge)return Se;if(u===ee)return Se;if(-1!==Me.indexOf(u)&&s===Ae||-1!==Me.indexOf(s)&&u===Ae)return Se;if(s===oe&&-1!==[me,de,he].indexOf(u)||-1!==[me,de,he].indexOf(s)&&u===ae)return Se;if(-1!==Me.indexOf(s)&&-1!==Qe.indexOf(u)||-1!==Qe.indexOf(s)&&-1!==Me.indexOf(u))return Se;if(-1!==[oe,ae].indexOf(s)&&(u===Ae||-1!==[ne,q].indexOf(u)&&t[o+1]===Ae)||-1!==[ne,q].indexOf(s)&&u===Ae||s===Ae&&-1!==[Ae,se,ie].indexOf(u))return Se;if(-1!==[Ae,se,ie,J,Z].indexOf(u))for(var c=A;c>=0;){if((d=t[c])===Ae)return Se;if(-1===[se,ie].indexOf(d))break;c--}if(-1!==[oe,ae].indexOf(u))for(c=-1!==[J,Z].indexOf(s)?a:A;c>=0;){var d;if((d=t[c])===Ae)return Se;if(-1===[se,ie].indexOf(d))break;c--}if(ve===s&&-1!==[ve,ye,fe,pe].indexOf(u)||-1!==[ye,fe].indexOf(s)&&-1!==[ye,we].indexOf(u)||-1!==[we,pe].indexOf(s)&&u===we)return Se;if(-1!==Le.indexOf(s)&&-1!==[ee,ae].indexOf(u)||-1!==Le.indexOf(u)&&s===oe)return Se;if(-1!==Me.indexOf(s)&&-1!==Me.indexOf(u))return Se;if(s===ie&&-1!==Me.indexOf(u))return Se;if(-1!==Me.concat(Ae).indexOf(s)&&u===ne&&-1===xe.indexOf(e[o])||-1!==Me.concat(Ae).indexOf(u)&&s===Z)return Se;if(s===Be&&u===Be){for(var h=n[A],f=1;h>0&&t[--h]===Be;)f++;if(f%2!==0)return Se}return s===de&&u===he?Se:Ee},Oe=function(e,t){t||(t={lineBreak:"normal",wordBreak:"normal"});var n=De(e,t.lineBreak),r=n[0],i=n[1],A=n[2];"break-all"!==t.wordBreak&&"break-word"!==t.wordBreak||(i=i.map((function(e){return-1!==[Ae,ue,_e].indexOf(e)?me:e})));var a="keep-all"===t.wordBreak?A.map((function(t,n){return t&&e[n]>=19968&&e[n]<=40959})):void 0;return[r,i,a]},Ne=function(){function e(e,t,n,r){this.codePoints=e,this.required=t===Ce,this.start=n,this.end=r}return e.prototype.slice=function(){return u.apply(void 0,this.codePoints.slice(this.start,this.end))},e}(),Ve=function(e,t){var n=l(e),r=Oe(n,t),i=r[0],A=r[1],a=r[2],o=n.length,s=0,u=0;return{next:function(){if(u>=o)return{done:!0,value:null};for(var e=Se;u=It&&e<=57},jt=function(e){return e>=55296&&e<=57343},Xt=function(e){return Wt(e)||e>=Nt&&e<=zt||e>=Dt&&e<=Pt},qt=function(e){return e>=Dt&&e<=Ot},Yt=function(e){return e>=Nt&&e<=Kt},Jt=function(e){return qt(e)||Yt(e)},Zt=function(e){return e>=wt},$t=function(e){return e===je||e===Ye||e===Je},en=function(e){return Jt(e)||Zt(e)||e===at},tn=function(e){return en(e)||Wt(e)||e===ot},nn=function(e){return e>=Ut&&e<=Mt||e===Ft||e>=Tt&&e<=Qt||e===kt},rn=function(e,t){return e===qe&&t!==je},An=function(e,t,n){return e===ot?en(t)||rn(t,n):!!en(e)||!(e!==qe||!rn(e,t))},an=function(e,t,n){return e===bt||e===ot?!!Wt(t)||t===Et&&Wt(n):Wt(e===Et?t:e)},on=function(e){var t=0,n=1;e[t]!==bt&&e[t]!==ot||(e[t]===ot&&(n=-1),t++);for(var r=[];Wt(e[t]);)r.push(e[t++]);var i=r.length?parseInt(u.apply(void 0,r),10):0;e[t]===Et&&t++;for(var A=[];Wt(e[t]);)A.push(e[t++]);var a=A.length,o=a?parseInt(u.apply(void 0,A),10):0;e[t]!==Vt&&e[t]!==Rt||t++;var s=1;e[t]!==bt&&e[t]!==ot||(e[t]===ot&&(s=-1),t++);for(var l=[];Wt(e[t]);)l.push(e[t++]);var c=l.length?parseInt(u.apply(void 0,l),10):0;return n*(i+o*Math.pow(10,-a))*Math.pow(10,s*c)},sn={type:2},ln={type:3},un={type:4},cn={type:13},dn={type:8},hn={type:21},fn={type:9},pn={type:10},gn={type:11},mn={type:12},vn={type:14},yn={type:23},wn={type:1},Bn={type:25},_n={type:24},bn={type:26},xn={type:27},Cn={type:28},Sn={type:29},En={type:31},Un={type:32},Mn=function(){function e(){this._value=[]}return e.prototype.write=function(e){this._value=this._value.concat(l(e))},e.prototype.read=function(){for(var e=[],t=this.consumeToken();t!==Un;)e.push(t),t=this.consumeToken();return e},e.prototype.consumeToken=function(){var e=this.consumeCodePoint();switch(e){case Ze:return this.consumeStringToken(Ze);case et:var t=this.peekCodePoint(0),n=this.peekCodePoint(1),r=this.peekCodePoint(2);if(tn(t)||rn(n,r)){var i=An(t,n,r)?Ge:ze;return{type:5,value:this.consumeName(),flags:i}}break;case tt:if(this.peekCodePoint(0)===$e)return this.consumeCodePoint(),cn;break;case rt:return this.consumeStringToken(rt);case it:return sn;case At:return ln;case _t:if(this.peekCodePoint(0)===$e)return this.consumeCodePoint(),vn;break;case bt:if(an(e,this.peekCodePoint(0),this.peekCodePoint(1)))return this.reconsumeCodePoint(e),this.consumeNumericToken();break;case xt:return un;case ot:var A=e,a=this.peekCodePoint(0),o=this.peekCodePoint(1);if(an(A,a,o))return this.reconsumeCodePoint(e),this.consumeNumericToken();if(An(A,a,o))return this.reconsumeCodePoint(e),this.consumeIdentLikeToken();if(a===ot&&o===ut)return this.consumeCodePoint(),this.consumeCodePoint(),_n;break;case Et:if(an(e,this.peekCodePoint(0),this.peekCodePoint(1)))return this.reconsumeCodePoint(e),this.consumeNumericToken();break;case Xe:if(this.peekCodePoint(0)===_t)for(this.consumeCodePoint();;){var s=this.consumeCodePoint();if(s===_t&&(s=this.consumeCodePoint())===Xe)return this.consumeToken();if(s===Lt)return this.consumeToken()}break;case Ct:return bn;case St:return xn;case lt:if(this.peekCodePoint(0)===st&&this.peekCodePoint(1)===ot&&this.peekCodePoint(2)===ot)return this.consumeCodePoint(),this.consumeCodePoint(),Bn;break;case ct:var l=this.peekCodePoint(0),c=this.peekCodePoint(1),d=this.peekCodePoint(2);if(An(l,c,d))return{type:7,value:this.consumeName()};break;case dt:return Cn;case qe:if(rn(e,this.peekCodePoint(0)))return this.reconsumeCodePoint(e),this.consumeIdentLikeToken();break;case ht:return Sn;case ft:if(this.peekCodePoint(0)===$e)return this.consumeCodePoint(),dn;break;case pt:return gn;case mt:return mn;case Ht:case Gt:var h=this.peekCodePoint(0),f=this.peekCodePoint(1);return h!==bt||!Xt(f)&&f!==gt||(this.consumeCodePoint(),this.consumeUnicodeRangeToken()),this.reconsumeCodePoint(e),this.consumeIdentLikeToken();case vt:if(this.peekCodePoint(0)===$e)return this.consumeCodePoint(),fn;if(this.peekCodePoint(0)===vt)return this.consumeCodePoint(),hn;break;case yt:if(this.peekCodePoint(0)===$e)return this.consumeCodePoint(),pn;break;case Lt:return Un}return $t(e)?(this.consumeWhiteSpace(),En):Wt(e)?(this.reconsumeCodePoint(e),this.consumeNumericToken()):en(e)?(this.reconsumeCodePoint(e),this.consumeIdentLikeToken()):{type:6,value:u(e)}},e.prototype.consumeCodePoint=function(){var e=this._value.shift();return"undefined"===typeof e?-1:e},e.prototype.reconsumeCodePoint=function(e){this._value.unshift(e)},e.prototype.peekCodePoint=function(e){return e>=this._value.length?-1:this._value[e]},e.prototype.consumeUnicodeRangeToken=function(){for(var e=[],t=this.consumeCodePoint();Xt(t)&&e.length<6;)e.push(t),t=this.consumeCodePoint();for(var n=!1;t===gt&&e.length<6;)e.push(t),t=this.consumeCodePoint(),n=!0;if(n)return{type:30,start:parseInt(u.apply(void 0,e.map((function(e){return e===gt?It:e}))),16),end:parseInt(u.apply(void 0,e.map((function(e){return e===gt?zt:e}))),16)};var r=parseInt(u.apply(void 0,e),16);if(this.peekCodePoint(0)===ot&&Xt(this.peekCodePoint(1))){this.consumeCodePoint(),t=this.consumeCodePoint();for(var i=[];Xt(t)&&i.length<6;)i.push(t),t=this.consumeCodePoint();return{type:30,start:r,end:parseInt(u.apply(void 0,i),16)}}return{type:30,start:r,end:r}},e.prototype.consumeIdentLikeToken=function(){var e=this.consumeName();return"url"===e.toLowerCase()&&this.peekCodePoint(0)===it?(this.consumeCodePoint(),this.consumeUrlToken()):this.peekCodePoint(0)===it?(this.consumeCodePoint(),{type:19,value:e}):{type:20,value:e}},e.prototype.consumeUrlToken=function(){var e=[];if(this.consumeWhiteSpace(),this.peekCodePoint(0)===Lt)return{type:22,value:""};var t=this.peekCodePoint(0);if(t===rt||t===Ze){var n=this.consumeStringToken(this.consumeCodePoint());return 0===n.type&&(this.consumeWhiteSpace(),this.peekCodePoint(0)===Lt||this.peekCodePoint(0)===At)?(this.consumeCodePoint(),{type:22,value:n.value}):(this.consumeBadUrlRemnants(),yn)}for(;;){var r=this.consumeCodePoint();if(r===Lt||r===At)return{type:22,value:u.apply(void 0,e)};if($t(r))return this.consumeWhiteSpace(),this.peekCodePoint(0)===Lt||this.peekCodePoint(0)===At?(this.consumeCodePoint(),{type:22,value:u.apply(void 0,e)}):(this.consumeBadUrlRemnants(),yn);if(r===Ze||r===rt||r===it||nn(r))return this.consumeBadUrlRemnants(),yn;if(r===qe){if(!rn(r,this.peekCodePoint(0)))return this.consumeBadUrlRemnants(),yn;e.push(this.consumeEscapedCodePoint())}else e.push(r)}},e.prototype.consumeWhiteSpace=function(){for(;$t(this.peekCodePoint(0));)this.consumeCodePoint()},e.prototype.consumeBadUrlRemnants=function(){for(;;){var e=this.consumeCodePoint();if(e===At||e===Lt)return;rn(e,this.peekCodePoint(0))&&this.consumeEscapedCodePoint()}},e.prototype.consumeStringSlice=function(e){for(var t=5e4,n="";e>0;){var r=Math.min(t,e);n+=u.apply(void 0,this._value.splice(0,r)),e-=r}return this._value.shift(),n},e.prototype.consumeStringToken=function(e){for(var t="",n=0;;){var r=this._value[n];if(r===Lt||void 0===r||r===e)return{type:0,value:t+=this.consumeStringSlice(n)};if(r===je)return this._value.splice(0,n),wn;if(r===qe){var i=this._value[n+1];i!==Lt&&void 0!==i&&(i===je?(t+=this.consumeStringSlice(n),n=-1,this._value.shift()):rn(r,i)&&(t+=this.consumeStringSlice(n),t+=u(this.consumeEscapedCodePoint()),n=-1))}n++}},e.prototype.consumeNumber=function(){var e=[],t=Ke,n=this.peekCodePoint(0);for(n!==bt&&n!==ot||e.push(this.consumeCodePoint());Wt(this.peekCodePoint(0));)e.push(this.consumeCodePoint());n=this.peekCodePoint(0);var r=this.peekCodePoint(1);if(n===Et&&Wt(r))for(e.push(this.consumeCodePoint(),this.consumeCodePoint()),t=We;Wt(this.peekCodePoint(0));)e.push(this.consumeCodePoint());n=this.peekCodePoint(0),r=this.peekCodePoint(1);var i=this.peekCodePoint(2);if((n===Vt||n===Rt)&&((r===bt||r===ot)&&Wt(i)||Wt(r)))for(e.push(this.consumeCodePoint(),this.consumeCodePoint()),t=We;Wt(this.peekCodePoint(0));)e.push(this.consumeCodePoint());return[on(e),t]},e.prototype.consumeNumericToken=function(){var e=this.consumeNumber(),t=e[0],n=e[1],r=this.peekCodePoint(0),i=this.peekCodePoint(1),A=this.peekCodePoint(2);return An(r,i,A)?{type:15,number:t,flags:n,unit:this.consumeName()}:r===nt?(this.consumeCodePoint(),{type:16,number:t,flags:n}):{type:17,number:t,flags:n}},e.prototype.consumeEscapedCodePoint=function(){var e=this.consumeCodePoint();if(Xt(e)){for(var t=u(e);Xt(this.peekCodePoint(0))&&t.length<6;)t+=u(this.consumeCodePoint());$t(this.peekCodePoint(0))&&this.consumeCodePoint();var n=parseInt(t,16);return 0===n||jt(n)||n>1114111?Bt:n}return e===Lt?Bt:e},e.prototype.consumeName=function(){for(var e="";;){var t=this.consumeCodePoint();if(tn(t))e+=u(t);else{if(!rn(t,this.peekCodePoint(0)))return this.reconsumeCodePoint(t),e;e+=u(this.consumeEscapedCodePoint())}}},e}(),Fn=function(){function e(e){this._tokens=e}return e.create=function(t){var n=new Mn;return n.write(t),new e(n.read())},e.parseValue=function(t){return e.create(t).parseComponentValue()},e.parseValues=function(t){return e.create(t).parseComponentValues()},e.prototype.parseComponentValue=function(){for(var e=this.consumeToken();31===e.type;)e=this.consumeToken();if(32===e.type)throw new SyntaxError("Error parsing CSS component value, unexpected EOF");this.reconsumeToken(e);var t=this.consumeComponentValue();do{e=this.consumeToken()}while(31===e.type);if(32===e.type)return t;throw new SyntaxError("Error parsing CSS component value, multiple values found when expecting only one")},e.prototype.parseComponentValues=function(){for(var e=[];;){var t=this.consumeComponentValue();if(32===t.type)return e;e.push(t),e.push()}},e.prototype.consumeComponentValue=function(){var e=this.consumeToken();switch(e.type){case 11:case 28:case 2:return this.consumeSimpleBlock(e.type);case 19:return this.consumeFunction(e)}return e},e.prototype.consumeSimpleBlock=function(e){for(var t={type:e,values:[]},n=this.consumeToken();;){if(32===n.type||Hn(n,e))return t;this.reconsumeToken(n),t.values.push(this.consumeComponentValue()),n=this.consumeToken()}},e.prototype.consumeFunction=function(e){for(var t={name:e.value,values:[],type:18};;){var n=this.consumeToken();if(32===n.type||3===n.type)return t;this.reconsumeToken(n),t.values.push(this.consumeComponentValue())}},e.prototype.consumeToken=function(){var e=this._tokens.shift();return"undefined"===typeof e?Un:e},e.prototype.reconsumeToken=function(e){this._tokens.unshift(e)},e}(),Tn=function(e){return 15===e.type},Qn=function(e){return 17===e.type},kn=function(e){return 20===e.type},Ln=function(e){return 0===e.type},In=function(e,t){return kn(e)&&e.value===t},Dn=function(e){return 31!==e.type},Rn=function(e){return 31!==e.type&&4!==e.type},Pn=function(e){var t=[],n=[];return e.forEach((function(e){if(4===e.type){if(0===n.length)throw new Error("Error parsing function args, zero tokens for arg");return t.push(n),void(n=[])}31!==e.type&&n.push(e)})),n.length&&t.push(n),t},Hn=function(e,t){return 11===t&&12===e.type||28===t&&29===e.type||2===t&&3===e.type},On=function(e){return 17===e.type||15===e.type},Nn=function(e){return 16===e.type||On(e)},Vn=function(e){return e.length>1?[e[0],e[1]]:[e[0]]},zn={type:17,number:0,flags:Ke},Gn={type:16,number:50,flags:Ke},Kn={type:16,number:100,flags:Ke},Wn=function(e,t,n){var r=e[0],i=e[1];return[jn(r,t),jn("undefined"!==typeof i?i:r,n)]},jn=function(e,t){if(16===e.type)return e.number/100*t;if(Tn(e))switch(e.unit){case"rem":case"em":return 16*e.number;default:return e.number}return e.number},Xn="deg",qn="grad",Yn="rad",Jn="turn",Zn={name:"angle",parse:function(e,t){if(15===t.type)switch(t.unit){case Xn:return Math.PI*t.number/180;case qn:return Math.PI/200*t.number;case Yn:return t.number;case Jn:return 2*Math.PI*t.number}throw new Error("Unsupported angle type")}},$n=function(e){return 15===e.type&&(e.unit===Xn||e.unit===qn||e.unit===Yn||e.unit===Jn)},er=function(e){switch(e.filter(kn).map((function(e){return e.value})).join(" ")){case"to bottom right":case"to right bottom":case"left top":case"top left":return[zn,zn];case"to top":case"bottom":return tr(0);case"to bottom left":case"to left bottom":case"right top":case"top right":return[zn,Kn];case"to right":case"left":return tr(90);case"to top left":case"to left top":case"right bottom":case"bottom right":return[Kn,Kn];case"to bottom":case"top":return tr(180);case"to top right":case"to right top":case"left bottom":case"bottom left":return[Kn,zn];case"to left":case"right":return tr(270)}return 0},tr=function(e){return Math.PI*e/180},nr={name:"color",parse:function(e,t){if(18===t.type){var n=ur[t.name];if("undefined"===typeof n)throw new Error('Attempting to parse an unsupported color function "'+t.name+'"');return n(e,t.values)}if(5===t.type){if(3===t.value.length){var r=t.value.substring(0,1),i=t.value.substring(1,2),A=t.value.substring(2,3);return Ar(parseInt(r+r,16),parseInt(i+i,16),parseInt(A+A,16),1)}if(4===t.value.length){r=t.value.substring(0,1),i=t.value.substring(1,2),A=t.value.substring(2,3);var a=t.value.substring(3,4);return Ar(parseInt(r+r,16),parseInt(i+i,16),parseInt(A+A,16),parseInt(a+a,16)/255)}if(6===t.value.length)return r=t.value.substring(0,2),i=t.value.substring(2,4),A=t.value.substring(4,6),Ar(parseInt(r,16),parseInt(i,16),parseInt(A,16),1);if(8===t.value.length)return r=t.value.substring(0,2),i=t.value.substring(2,4),A=t.value.substring(4,6),a=t.value.substring(6,8),Ar(parseInt(r,16),parseInt(i,16),parseInt(A,16),parseInt(a,16)/255)}if(20===t.type){var o=dr[t.value.toUpperCase()];if("undefined"!==typeof o)return o}return dr.TRANSPARENT}},rr=function(e){return 0===(255&e)},ir=function(e){var t=255&e,n=255&e>>8,r=255&e>>16,i=255&e>>24;return t<255?"rgba("+i+","+r+","+n+","+t/255+")":"rgb("+i+","+r+","+n+")"},Ar=function(e,t,n,r){return(e<<24|t<<16|n<<8|Math.round(255*r)<<0)>>>0},ar=function(e,t){if(17===e.type)return e.number;if(16===e.type){var n=3===t?1:255;return 3===t?e.number/100*n:Math.round(e.number/100*n)}return 0},or=function(e,t){var n=t.filter(Rn);if(3===n.length){var r=n.map(ar),i=r[0],A=r[1],a=r[2];return Ar(i,A,a,1)}if(4===n.length){var o=n.map(ar),s=(i=o[0],A=o[1],a=o[2],o[3]);return Ar(i,A,a,s)}return 0};function sr(e,t,n){return n<0&&(n+=1),n>=1&&(n-=1),n<1/6?(t-e)*n*6+e:n<.5?t:n<2/3?6*(t-e)*(2/3-n)+e:e}var lr=function(e,t){var n=t.filter(Rn),r=n[0],i=n[1],A=n[2],a=n[3],o=(17===r.type?tr(r.number):Zn.parse(e,r))/(2*Math.PI),s=Nn(i)?i.number/100:0,l=Nn(A)?A.number/100:0,u="undefined"!==typeof a&&Nn(a)?jn(a,1):1;if(0===s)return Ar(255*l,255*l,255*l,1);var c=l<=.5?l*(s+1):l+s-l*s,d=2*l-c,h=sr(d,c,o+1/3),f=sr(d,c,o),p=sr(d,c,o-1/3);return Ar(255*h,255*f,255*p,u)},ur={hsl:lr,hsla:lr,rgb:or,rgba:or},cr=function(e,t){return nr.parse(e,Fn.create(t).parseComponentValue())},dr={ALICEBLUE:4042850303,ANTIQUEWHITE:4209760255,AQUA:16777215,AQUAMARINE:2147472639,AZURE:4043309055,BEIGE:4126530815,BISQUE:4293182719,BLACK:255,BLANCHEDALMOND:4293643775,BLUE:65535,BLUEVIOLET:2318131967,BROWN:2771004159,BURLYWOOD:3736635391,CADETBLUE:1604231423,CHARTREUSE:2147418367,CHOCOLATE:3530104575,CORAL:4286533887,CORNFLOWERBLUE:1687547391,CORNSILK:4294499583,CRIMSON:3692313855,CYAN:16777215,DARKBLUE:35839,DARKCYAN:9145343,DARKGOLDENROD:3095837695,DARKGRAY:2846468607,DARKGREEN:6553855,DARKGREY:2846468607,DARKKHAKI:3182914559,DARKMAGENTA:2332068863,DARKOLIVEGREEN:1433087999,DARKORANGE:4287365375,DARKORCHID:2570243327,DARKRED:2332033279,DARKSALMON:3918953215,DARKSEAGREEN:2411499519,DARKSLATEBLUE:1211993087,DARKSLATEGRAY:793726975,DARKSLATEGREY:793726975,DARKTURQUOISE:13554175,DARKVIOLET:2483082239,DEEPPINK:4279538687,DEEPSKYBLUE:12582911,DIMGRAY:1768516095,DIMGREY:1768516095,DODGERBLUE:512819199,FIREBRICK:2988581631,FLORALWHITE:4294635775,FORESTGREEN:579543807,FUCHSIA:4278255615,GAINSBORO:3705462015,GHOSTWHITE:4177068031,GOLD:4292280575,GOLDENROD:3668254975,GRAY:2155905279,GREEN:8388863,GREENYELLOW:2919182335,GREY:2155905279,HONEYDEW:4043305215,HOTPINK:4285117695,INDIANRED:3445382399,INDIGO:1258324735,IVORY:4294963455,KHAKI:4041641215,LAVENDER:3873897215,LAVENDERBLUSH:4293981695,LAWNGREEN:2096890111,LEMONCHIFFON:4294626815,LIGHTBLUE:2916673279,LIGHTCORAL:4034953471,LIGHTCYAN:3774873599,LIGHTGOLDENRODYELLOW:4210742015,LIGHTGRAY:3553874943,LIGHTGREEN:2431553791,LIGHTGREY:3553874943,LIGHTPINK:4290167295,LIGHTSALMON:4288707327,LIGHTSEAGREEN:548580095,LIGHTSKYBLUE:2278488831,LIGHTSLATEGRAY:2005441023,LIGHTSLATEGREY:2005441023,LIGHTSTEELBLUE:2965692159,LIGHTYELLOW:4294959359,LIME:16711935,LIMEGREEN:852308735,LINEN:4210091775,MAGENTA:4278255615,MAROON:2147483903,MEDIUMAQUAMARINE:1724754687,MEDIUMBLUE:52735,MEDIUMORCHID:3126187007,MEDIUMPURPLE:2473647103,MEDIUMSEAGREEN:1018393087,MEDIUMSLATEBLUE:2070474495,MEDIUMSPRINGGREEN:16423679,MEDIUMTURQUOISE:1221709055,MEDIUMVIOLETRED:3340076543,MIDNIGHTBLUE:421097727,MINTCREAM:4127193855,MISTYROSE:4293190143,MOCCASIN:4293178879,NAVAJOWHITE:4292783615,NAVY:33023,OLDLACE:4260751103,OLIVE:2155872511,OLIVEDRAB:1804477439,ORANGE:4289003775,ORANGERED:4282712319,ORCHID:3664828159,PALEGOLDENROD:4008225535,PALEGREEN:2566625535,PALETURQUOISE:2951671551,PALEVIOLETRED:3681588223,PAPAYAWHIP:4293907967,PEACHPUFF:4292524543,PERU:3448061951,PINK:4290825215,PLUM:3718307327,POWDERBLUE:2967529215,PURPLE:2147516671,REBECCAPURPLE:1714657791,RED:4278190335,ROSYBROWN:3163525119,ROYALBLUE:1097458175,SADDLEBROWN:2336560127,SALMON:4202722047,SANDYBROWN:4104413439,SEAGREEN:780883967,SEASHELL:4294307583,SIENNA:2689740287,SILVER:3233857791,SKYBLUE:2278484991,SLATEBLUE:1784335871,SLATEGRAY:1887473919,SLATEGREY:1887473919,SNOW:4294638335,SPRINGGREEN:16744447,STEELBLUE:1182971135,TAN:3535047935,TEAL:8421631,THISTLE:3636451583,TOMATO:4284696575,TRANSPARENT:0,TURQUOISE:1088475391,VIOLET:4001558271,WHEAT:4125012991,WHITE:4294967295,WHITESMOKE:4126537215,YELLOW:4294902015,YELLOWGREEN:2597139199},hr={name:"background-clip",initialValue:"border-box",prefix:!1,type:1,parse:function(e,t){return t.map((function(e){if(kn(e))switch(e.value){case"padding-box":return 1;case"content-box":return 2}return 0}))}},fr={name:"background-color",initialValue:"transparent",prefix:!1,type:3,format:"color"},pr=function(e,t){var n=nr.parse(e,t[0]),r=t[1];return r&&Nn(r)?{color:n,stop:r}:{color:n,stop:null}},gr=function(e,t){var n=e[0],r=e[e.length-1];null===n.stop&&(n.stop=zn),null===r.stop&&(r.stop=Kn);for(var i=[],A=0,a=0;aA?i.push(s):i.push(A),A=s}else i.push(null)}var l=null;for(a=0;ae.optimumDistance)?{optimumCorner:t,optimumDistance:o}:e}),{optimumDistance:i?1/0:-1/0,optimumCorner:null}).optimumCorner},Br=function(e,t,n,r,i){var A=0,a=0;switch(e.size){case 0:0===e.shape?A=a=Math.min(Math.abs(t),Math.abs(t-r),Math.abs(n),Math.abs(n-i)):1===e.shape&&(A=Math.min(Math.abs(t),Math.abs(t-r)),a=Math.min(Math.abs(n),Math.abs(n-i)));break;case 2:if(0===e.shape)A=a=Math.min(yr(t,n),yr(t,n-i),yr(t-r,n),yr(t-r,n-i));else if(1===e.shape){var o=Math.min(Math.abs(n),Math.abs(n-i))/Math.min(Math.abs(t),Math.abs(t-r)),s=wr(r,i,t,n,!0),l=s[0],u=s[1];a=o*(A=yr(l-t,(u-n)/o))}break;case 1:0===e.shape?A=a=Math.max(Math.abs(t),Math.abs(t-r),Math.abs(n),Math.abs(n-i)):1===e.shape&&(A=Math.max(Math.abs(t),Math.abs(t-r)),a=Math.max(Math.abs(n),Math.abs(n-i)));break;case 3:if(0===e.shape)A=a=Math.max(yr(t,n),yr(t,n-i),yr(t-r,n),yr(t-r,n-i));else if(1===e.shape){o=Math.max(Math.abs(n),Math.abs(n-i))/Math.max(Math.abs(t),Math.abs(t-r));var c=wr(r,i,t,n,!1);l=c[0],u=c[1],a=o*(A=yr(l-t,(u-n)/o))}}return Array.isArray(e.size)&&(A=jn(e.size[0],r),a=2===e.size.length?jn(e.size[1],i):A),[A,a]},_r=function(e,t){var n=tr(180),r=[];return Pn(t).forEach((function(t,i){if(0===i){var A=t[0];if(20===A.type&&-1!==["top","left","right","bottom"].indexOf(A.value))return void(n=er(t));if($n(A))return void(n=(Zn.parse(e,A)+tr(270))%tr(360))}var a=pr(e,t);r.push(a)})),{angle:n,stops:r,type:1}},br="closest-side",xr="farthest-side",Cr="closest-corner",Sr="farthest-corner",Er="circle",Ur="ellipse",Mr="cover",Fr="contain",Tr=function(e,t){var n=0,r=3,i=[],A=[];return Pn(t).forEach((function(t,a){var o=!0;if(0===a?o=t.reduce((function(e,t){if(kn(t))switch(t.value){case"center":return A.push(Gn),!1;case"top":case"left":return A.push(zn),!1;case"right":case"bottom":return A.push(Kn),!1}else if(Nn(t)||On(t))return A.push(t),!1;return e}),o):1===a&&(o=t.reduce((function(e,t){if(kn(t))switch(t.value){case Er:return n=0,!1;case Ur:return n=1,!1;case Fr:case br:return r=0,!1;case xr:return r=1,!1;case Cr:return r=2,!1;case Mr:case Sr:return r=3,!1}else if(On(t)||Nn(t))return Array.isArray(r)||(r=[]),r.push(t),!1;return e}),o)),o){var s=pr(e,t);i.push(s)}})),{size:r,shape:n,stops:i,position:A,type:2}},Qr=function(e){return 1===e.type},kr=function(e){return 2===e.type},Lr={name:"image",parse:function(e,t){if(22===t.type){var n={url:t.value,type:0};return e.cache.addImage(t.value),n}if(18===t.type){var r=Rr[t.name];if("undefined"===typeof r)throw new Error('Attempting to parse an unsupported image function "'+t.name+'"');return r(e,t.values)}throw new Error("Unsupported image type "+t.type)}};function Ir(e){return!(20===e.type&&"none"===e.value)&&(18!==e.type||!!Rr[e.name])}var Dr,Rr={"linear-gradient":function(e,t){var n=tr(180),r=[];return Pn(t).forEach((function(t,i){if(0===i){var A=t[0];if(20===A.type&&"to"===A.value)return void(n=er(t));if($n(A))return void(n=Zn.parse(e,A))}var a=pr(e,t);r.push(a)})),{angle:n,stops:r,type:1}},"-moz-linear-gradient":_r,"-ms-linear-gradient":_r,"-o-linear-gradient":_r,"-webkit-linear-gradient":_r,"radial-gradient":function(e,t){var n=0,r=3,i=[],A=[];return Pn(t).forEach((function(t,a){var o=!0;if(0===a){var s=!1;o=t.reduce((function(e,t){if(s)if(kn(t))switch(t.value){case"center":return A.push(Gn),e;case"top":case"left":return A.push(zn),e;case"right":case"bottom":return A.push(Kn),e}else(Nn(t)||On(t))&&A.push(t);else if(kn(t))switch(t.value){case Er:return n=0,!1;case Ur:return n=1,!1;case"at":return s=!0,!1;case br:return r=0,!1;case Mr:case xr:return r=1,!1;case Fr:case Cr:return r=2,!1;case Sr:return r=3,!1}else if(On(t)||Nn(t))return Array.isArray(r)||(r=[]),r.push(t),!1;return e}),o)}if(o){var l=pr(e,t);i.push(l)}})),{size:r,shape:n,stops:i,position:A,type:2}},"-moz-radial-gradient":Tr,"-ms-radial-gradient":Tr,"-o-radial-gradient":Tr,"-webkit-radial-gradient":Tr,"-webkit-gradient":function(e,t){var n=tr(180),r=[],i=1,A=0,a=3,o=[];return Pn(t).forEach((function(t,n){var A=t[0];if(0===n){if(kn(A)&&"linear"===A.value)return void(i=1);if(kn(A)&&"radial"===A.value)return void(i=2)}if(18===A.type)if("from"===A.name){var a=nr.parse(e,A.values[0]);r.push({stop:zn,color:a})}else if("to"===A.name)a=nr.parse(e,A.values[0]),r.push({stop:Kn,color:a});else if("color-stop"===A.name){var o=A.values.filter(Rn);if(2===o.length){a=nr.parse(e,o[1]);var s=o[0];Qn(s)&&r.push({stop:{type:16,number:100*s.number,flags:s.flags},color:a})}}})),1===i?{angle:(n+tr(180))%tr(360),stops:r,type:i}:{size:a,shape:A,stops:r,position:o,type:i}}},Pr={name:"background-image",initialValue:"none",type:1,prefix:!1,parse:function(e,t){if(0===t.length)return[];var n=t[0];return 20===n.type&&"none"===n.value?[]:t.filter((function(e){return Rn(e)&&Ir(e)})).map((function(t){return Lr.parse(e,t)}))}},Hr={name:"background-origin",initialValue:"border-box",prefix:!1,type:1,parse:function(e,t){return t.map((function(e){if(kn(e))switch(e.value){case"padding-box":return 1;case"content-box":return 2}return 0}))}},Or={name:"background-position",initialValue:"0% 0%",type:1,prefix:!1,parse:function(e,t){return Pn(t).map((function(e){return e.filter(Nn)})).map(Vn)}},Nr={name:"background-repeat",initialValue:"repeat",prefix:!1,type:1,parse:function(e,t){return Pn(t).map((function(e){return e.filter(kn).map((function(e){return e.value})).join(" ")})).map(Vr)}},Vr=function(e){switch(e){case"no-repeat":return 1;case"repeat-x":case"repeat no-repeat":return 2;case"repeat-y":case"no-repeat repeat":return 3;default:return 0}};!function(e){e.AUTO="auto",e.CONTAIN="contain",e.COVER="cover"}(Dr||(Dr={}));var zr,Gr={name:"background-size",initialValue:"0",prefix:!1,type:1,parse:function(e,t){return Pn(t).map((function(e){return e.filter(Kr)}))}},Kr=function(e){return kn(e)||Nn(e)},Wr=function(e){return{name:"border-"+e+"-color",initialValue:"transparent",prefix:!1,type:3,format:"color"}},jr=Wr("top"),Xr=Wr("right"),qr=Wr("bottom"),Yr=Wr("left"),Jr=function(e){return{name:"border-radius-"+e,initialValue:"0 0",prefix:!1,type:1,parse:function(e,t){return Vn(t.filter(Nn))}}},Zr=Jr("top-left"),$r=Jr("top-right"),ei=Jr("bottom-right"),ti=Jr("bottom-left"),ni=function(e){return{name:"border-"+e+"-style",initialValue:"solid",prefix:!1,type:2,parse:function(e,t){switch(t){case"none":return 0;case"dashed":return 2;case"dotted":return 3;case"double":return 4}return 1}}},ri=ni("top"),ii=ni("right"),Ai=ni("bottom"),ai=ni("left"),oi=function(e){return{name:"border-"+e+"-width",initialValue:"0",type:0,prefix:!1,parse:function(e,t){return Tn(t)?t.number:0}}},si=oi("top"),li=oi("right"),ui=oi("bottom"),ci=oi("left"),di={name:"color",initialValue:"transparent",prefix:!1,type:3,format:"color"},hi={name:"direction",initialValue:"ltr",prefix:!1,type:2,parse:function(e,t){return"rtl"===t?1:0}},fi={name:"display",initialValue:"inline-block",prefix:!1,type:1,parse:function(e,t){return t.filter(kn).reduce((function(e,t){return e|pi(t.value)}),0)}},pi=function(e){switch(e){case"block":case"-webkit-box":return 2;case"inline":return 4;case"run-in":return 8;case"flow":return 16;case"flow-root":return 32;case"table":return 64;case"flex":case"-webkit-flex":return 128;case"grid":case"-ms-grid":return 256;case"ruby":return 512;case"subgrid":return 1024;case"list-item":return 2048;case"table-row-group":return 4096;case"table-header-group":return 8192;case"table-footer-group":return 16384;case"table-row":return 32768;case"table-cell":return 65536;case"table-column-group":return 131072;case"table-column":return 262144;case"table-caption":return 524288;case"ruby-base":return 1048576;case"ruby-text":return 2097152;case"ruby-base-container":return 4194304;case"ruby-text-container":return 8388608;case"contents":return 16777216;case"inline-block":return 33554432;case"inline-list-item":return 67108864;case"inline-table":return 134217728;case"inline-flex":return 268435456;case"inline-grid":return 536870912}return 0},gi={name:"float",initialValue:"none",prefix:!1,type:2,parse:function(e,t){switch(t){case"left":return 1;case"right":return 2;case"inline-start":return 3;case"inline-end":return 4}return 0}},mi={name:"letter-spacing",initialValue:"0",prefix:!1,type:0,parse:function(e,t){return 20===t.type&&"normal"===t.value?0:17===t.type||15===t.type?t.number:0}};!function(e){e.NORMAL="normal",e.STRICT="strict"}(zr||(zr={}));var vi,yi={name:"line-break",initialValue:"normal",prefix:!1,type:2,parse:function(e,t){return"strict"===t?zr.STRICT:zr.NORMAL}},wi={name:"line-height",initialValue:"normal",prefix:!1,type:4},Bi=function(e,t){return kn(e)&&"normal"===e.value?1.2*t:17===e.type?t*e.number:Nn(e)?jn(e,t):t},_i={name:"list-style-image",initialValue:"none",type:0,prefix:!1,parse:function(e,t){return 20===t.type&&"none"===t.value?null:Lr.parse(e,t)}},bi={name:"list-style-position",initialValue:"outside",prefix:!1,type:2,parse:function(e,t){return"inside"===t?0:1}},xi={name:"list-style-type",initialValue:"none",prefix:!1,type:2,parse:function(e,t){switch(t){case"disc":return 0;case"circle":return 1;case"square":return 2;case"decimal":return 3;case"cjk-decimal":return 4;case"decimal-leading-zero":return 5;case"lower-roman":return 6;case"upper-roman":return 7;case"lower-greek":return 8;case"lower-alpha":return 9;case"upper-alpha":return 10;case"arabic-indic":return 11;case"armenian":return 12;case"bengali":return 13;case"cambodian":return 14;case"cjk-earthly-branch":return 15;case"cjk-heavenly-stem":return 16;case"cjk-ideographic":return 17;case"devanagari":return 18;case"ethiopic-numeric":return 19;case"georgian":return 20;case"gujarati":return 21;case"gurmukhi":case"hebrew":return 22;case"hiragana":return 23;case"hiragana-iroha":return 24;case"japanese-formal":return 25;case"japanese-informal":return 26;case"kannada":return 27;case"katakana":return 28;case"katakana-iroha":return 29;case"khmer":return 30;case"korean-hangul-formal":return 31;case"korean-hanja-formal":return 32;case"korean-hanja-informal":return 33;case"lao":return 34;case"lower-armenian":return 35;case"malayalam":return 36;case"mongolian":return 37;case"myanmar":return 38;case"oriya":return 39;case"persian":return 40;case"simp-chinese-formal":return 41;case"simp-chinese-informal":return 42;case"tamil":return 43;case"telugu":return 44;case"thai":return 45;case"tibetan":return 46;case"trad-chinese-formal":return 47;case"trad-chinese-informal":return 48;case"upper-armenian":return 49;case"disclosure-open":return 50;case"disclosure-closed":return 51;default:return-1}}},Ci=function(e){return{name:"margin-"+e,initialValue:"0",prefix:!1,type:4}},Si=Ci("top"),Ei=Ci("right"),Ui=Ci("bottom"),Mi=Ci("left"),Fi={name:"overflow",initialValue:"visible",prefix:!1,type:1,parse:function(e,t){return t.filter(kn).map((function(e){switch(e.value){case"hidden":return 1;case"scroll":return 2;case"clip":return 3;case"auto":return 4;default:return 0}}))}},Ti={name:"overflow-wrap",initialValue:"normal",prefix:!1,type:2,parse:function(e,t){return"break-word"===t?"break-word":"normal"}},Qi=function(e){return{name:"padding-"+e,initialValue:"0",prefix:!1,type:3,format:"length-percentage"}},ki=Qi("top"),Li=Qi("right"),Ii=Qi("bottom"),Di=Qi("left"),Ri={name:"text-align",initialValue:"left",prefix:!1,type:2,parse:function(e,t){switch(t){case"right":return 2;case"center":case"justify":return 1;default:return 0}}},Pi={name:"position",initialValue:"static",prefix:!1,type:2,parse:function(e,t){switch(t){case"relative":return 1;case"absolute":return 2;case"fixed":return 3;case"sticky":return 4}return 0}},Hi={name:"text-shadow",initialValue:"none",type:1,prefix:!1,parse:function(e,t){return 1===t.length&&In(t[0],"none")?[]:Pn(t).map((function(t){for(var n={color:dr.TRANSPARENT,offsetX:zn,offsetY:zn,blur:zn},r=0,i=0;i1?1:0],this.overflowWrap=vA(e,Ti,t.overflowWrap),this.paddingTop=vA(e,ki,t.paddingTop),this.paddingRight=vA(e,Li,t.paddingRight),this.paddingBottom=vA(e,Ii,t.paddingBottom),this.paddingLeft=vA(e,Di,t.paddingLeft),this.paintOrder=vA(e,dA,t.paintOrder),this.position=vA(e,Pi,t.position),this.textAlign=vA(e,Ri,t.textAlign),this.textDecorationColor=vA(e,Ji,null!==(n=t.textDecorationColor)&&void 0!==n?n:t.color),this.textDecorationLine=vA(e,Zi,null!==(r=t.textDecorationLine)&&void 0!==r?r:t.textDecoration),this.textShadow=vA(e,Hi,t.textShadow),this.textTransform=vA(e,Oi,t.textTransform),this.transform=vA(e,Ni,t.transform),this.transformOrigin=vA(e,Ki,t.transformOrigin),this.visibility=vA(e,Wi,t.visibility),this.webkitTextStrokeColor=vA(e,hA,t.webkitTextStrokeColor),this.webkitTextStrokeWidth=vA(e,fA,t.webkitTextStrokeWidth),this.wordBreak=vA(e,ji,t.wordBreak),this.zIndex=vA(e,Xi,t.zIndex)}return e.prototype.isVisible=function(){return this.display>0&&this.opacity>0&&0===this.visibility},e.prototype.isTransparent=function(){return rr(this.backgroundColor)},e.prototype.isTransformed=function(){return null!==this.transform},e.prototype.isPositioned=function(){return 0!==this.position},e.prototype.isPositionedWithZIndex=function(){return this.isPositioned()&&!this.zIndex.auto},e.prototype.isFloating=function(){return 0!==this.float},e.prototype.isInlineLevel=function(){return iA(this.display,4)||iA(this.display,33554432)||iA(this.display,268435456)||iA(this.display,536870912)||iA(this.display,67108864)||iA(this.display,134217728)},e}(),gA=function(){function e(e,t){this.content=vA(e,AA,t.content),this.quotes=vA(e,lA,t.quotes)}return e}(),mA=function(){function e(e,t){this.counterIncrement=vA(e,aA,t.counterIncrement),this.counterReset=vA(e,oA,t.counterReset)}return e}(),vA=function(e,t,n){var r=new Mn,i=null!==n&&"undefined"!==typeof n?n.toString():t.initialValue;r.write(i);var A=new Fn(r.read());switch(t.type){case 2:var a=A.parseComponentValue();return t.parse(e,kn(a)?a.value:t.initialValue);case 0:return t.parse(e,A.parseComponentValue());case 1:return t.parse(e,A.parseComponentValues());case 4:return A.parseComponentValue();case 3:switch(t.format){case"angle":return Zn.parse(e,A.parseComponentValue());case"color":return nr.parse(e,A.parseComponentValue());case"image":return Lr.parse(e,A.parseComponentValue());case"length":var o=A.parseComponentValue();return On(o)?o:zn;case"length-percentage":var s=A.parseComponentValue();return Nn(s)?s:zn;case"time":return qi.parse(e,A.parseComponentValue())}}},yA="data-html2canvas-debug",wA=function(e){switch(e.getAttribute(yA)){case"all":return 1;case"clone":return 2;case"parse":return 3;case"render":return 4;default:return 0}},BA=function(e,t){var n=wA(e);return 1===n||t===n},_A=function(){function e(e,t){this.context=e,this.textNodes=[],this.elements=[],this.flags=0,BA(t,3),this.styles=new pA(e,window.getComputedStyle(t,null)),lo(t)&&(this.styles.animationDuration.some((function(e){return e>0}))&&(t.style.animationDuration="0s"),null!==this.styles.transform&&(t.style.transform="none")),this.bounds=o(this.context,t),BA(t,4)&&(this.flags|=16)}return e}(),bA="AAAAAAAAAAAAEA4AGBkAAFAaAAACAAAAAAAIABAAGAAwADgACAAQAAgAEAAIABAACAAQAAgAEAAIABAACAAQAAgAEAAIABAAQABIAEQATAAIABAACAAQAAgAEAAIABAAVABcAAgAEAAIABAACAAQAGAAaABwAHgAgACIAI4AlgAIABAAmwCjAKgAsAC2AL4AvQDFAMoA0gBPAVYBWgEIAAgACACMANoAYgFkAWwBdAF8AX0BhQGNAZUBlgGeAaMBlQGWAasBswF8AbsBwwF0AcsBYwHTAQgA2wG/AOMBdAF8AekB8QF0AfkB+wHiAHQBfAEIAAMC5gQIAAsCEgIIAAgAFgIeAggAIgIpAggAMQI5AkACygEIAAgASAJQAlgCYAIIAAgACAAKBQoFCgUTBRMFGQUrBSsFCAAIAAgACAAIAAgACAAIAAgACABdAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACABoAmgCrwGvAQgAbgJ2AggAHgEIAAgACADnAXsCCAAIAAgAgwIIAAgACAAIAAgACACKAggAkQKZAggAPADJAAgAoQKkAqwCsgK6AsICCADJAggA0AIIAAgACAAIANYC3gIIAAgACAAIAAgACABAAOYCCAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAkASoB+QIEAAgACAA8AEMCCABCBQgACABJBVAFCAAIAAgACAAIAAgACAAIAAgACABTBVoFCAAIAFoFCABfBWUFCAAIAAgACAAIAAgAbQUIAAgACAAIAAgACABzBXsFfQWFBYoFigWKBZEFigWKBYoFmAWfBaYFrgWxBbkFCAAIAAgACAAIAAgACAAIAAgACAAIAMEFCAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAMgFCADQBQgACAAIAAgACAAIAAgACAAIAAgACAAIAO4CCAAIAAgAiQAIAAgACABAAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAD0AggACAD8AggACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIANYFCAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAMDvwAIAAgAJAIIAAgACAAIAAgACAAIAAgACwMTAwgACAB9BOsEGwMjAwgAKwMyAwsFYgE3A/MEPwMIAEUDTQNRAwgAWQOsAGEDCAAIAAgACAAIAAgACABpAzQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFIQUoBSwFCAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACABtAwgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACABMAEwACAAIAAgACAAIABgACAAIAAgACAC/AAgACAAyAQgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACACAAIAAwAAgACAAIAAgACAAIAAgACAAIAAAARABIAAgACAAIABQASAAIAAgAIABwAEAAjgCIABsAqAC2AL0AigDQAtwC+IJIQqVAZUBWQqVAZUBlQGVAZUBlQGrC5UBlQGVAZUBlQGVAZUBlQGVAXsKlQGVAbAK6wsrDGUMpQzlDJUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAfAKAAuZA64AtwCJALoC6ADwAAgAuACgA/oEpgO6AqsD+AAIAAgAswMIAAgACAAIAIkAuwP5AfsBwwPLAwgACAAIAAgACADRA9kDCAAIAOED6QMIAAgACAAIAAgACADuA/YDCAAIAP4DyQAIAAgABgQIAAgAXQAOBAgACAAIAAgACAAIABMECAAIAAgACAAIAAgACAD8AAQBCAAIAAgAGgQiBCoECAExBAgAEAEIAAgACAAIAAgACAAIAAgACAAIAAgACAA4BAgACABABEYECAAIAAgATAQYAQgAVAQIAAgACAAIAAgACAAIAAgACAAIAFoECAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgAOQEIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAB+BAcACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAEABhgSMBAgACAAIAAgAlAQIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAwAEAAQABAADAAMAAwADAAQABAAEAAQABAAEAAQABHATAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgAdQMIAAgACAAIAAgACAAIAMkACAAIAAgAfQMIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACACFA4kDCAAIAAgACAAIAOcBCAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAIcDCAAIAAgACAAIAAgACAAIAAgACAAIAJEDCAAIAAgACADFAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACABgBAgAZgQIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgAbAQCBXIECAAIAHkECAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACABAAJwEQACjBKoEsgQIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAC6BMIECAAIAAgACAAIAAgACABmBAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgAxwQIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAGYECAAIAAgAzgQIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgAigWKBYoFigWKBYoFigWKBd0FXwUIAOIF6gXxBYoF3gT5BQAGCAaKBYoFigWKBYoFigWKBYoFigWKBYoFigXWBIoFigWKBYoFigWKBYoFigWKBYsFEAaKBYoFigWKBYoFigWKBRQGCACKBYoFigWKBQgACAAIANEECAAIABgGigUgBggAJgYIAC4GMwaKBYoF0wQ3Bj4GigWKBYoFigWKBYoFigWKBYoFigWKBYoFigUIAAgACAAIAAgACAAIAAgAigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWLBf///////wQABAAEAAQABAAEAAQABAAEAAQAAwAEAAQAAgAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAQADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAUAAAAFAAUAAAAFAAUAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAEAAQABAAEAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUAAQAAAAUABQAFAAUABQAFAAAAAAAFAAUAAAAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAFAAUAAQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABwAFAAUABQAFAAAABwAHAAcAAAAHAAcABwAFAAEAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAFAAUABQAFAAcABwAFAAUAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAAAAQABAAAAAAAAAAAAAAAFAAUABQAFAAAABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAHAAcABwAHAAcAAAAHAAcAAAAAAAUABQAHAAUAAQAHAAEABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABwABAAUABQAFAAUAAAAAAAAAAAAAAAEAAQABAAEAAQABAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABwAFAAUAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUAAQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQABQANAAQABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQABAAEAAQABAAEAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAEAAQABAAEAAQABAAEAAQABAAEAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAQABAAEAAQABAAEAAQABAAAAAAAAAAAAAAAAAAAAAAABQAHAAUABQAFAAAAAAAAAAcABQAFAAUABQAFAAQABAAEAAQABAAEAAQABAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUAAAAFAAUABQAFAAUAAAAFAAUABQAAAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAAAAAAAAAAAAUABQAFAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAHAAUAAAAHAAcABwAFAAUABQAFAAUABQAFAAUABwAHAAcABwAFAAcABwAAAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABwAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAUABwAHAAUABQAFAAUAAAAAAAcABwAAAAAABwAHAAUAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAABQAFAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAABwAHAAcABQAFAAAAAAAAAAAABQAFAAAAAAAFAAUABQAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAFAAUABQAFAAUAAAAFAAUABwAAAAcABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAFAAUABwAFAAUABQAFAAAAAAAHAAcAAAAAAAcABwAFAAAAAAAAAAAAAAAAAAAABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAcABwAAAAAAAAAHAAcABwAAAAcABwAHAAUAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAABQAHAAcABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABwAHAAcABwAAAAUABQAFAAAABQAFAAUABQAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAcABQAHAAcABQAHAAcAAAAFAAcABwAAAAcABwAFAAUAAAAAAAAAAAAAAAAAAAAFAAUAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAUABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAFAAcABwAFAAUABQAAAAUAAAAHAAcABwAHAAcABwAHAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAHAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAABwAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAUAAAAFAAAAAAAAAAAABwAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABwAFAAUABQAFAAUAAAAFAAUAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABwAFAAUABQAFAAUABQAAAAUABQAHAAcABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABQAFAAAAAAAAAAAABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAcABQAFAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAHAAUABQAFAAUABQAFAAUABwAHAAcABwAHAAcABwAHAAUABwAHAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABwAHAAcABwAFAAUABwAHAAcAAAAAAAAAAAAHAAcABQAHAAcABwAHAAcABwAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAcABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABQAHAAUABQAFAAUABQAFAAUAAAAFAAAABQAAAAAABQAFAAUABQAFAAUABQAFAAcABwAHAAcABwAHAAUABQAFAAUABQAFAAUABQAFAAUAAAAAAAUABQAFAAUABQAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABwAFAAcABwAHAAcABwAFAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAUABQAFAAUABwAHAAUABQAHAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAcABQAFAAcABwAHAAUABwAFAAUABQAHAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAcABwAHAAcABwAHAAUABQAFAAUABQAFAAUABQAHAAcABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUAAAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAcABQAFAAUABQAFAAUABQAAAAAAAAAAAAUAAAAAAAAAAAAAAAAABQAAAAAABwAFAAUAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAAABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUAAAAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAABQAAAAAAAAAFAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAUABQAHAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABwAHAAcABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAFAAUABQAHAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAcABwAFAAUABQAFAAcABwAFAAUABwAHAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAcABwAFAAUABwAHAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAFAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAFAAUABQAAAAAABQAFAAAAAAAAAAAAAAAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABQAFAAcABwAAAAAAAAAAAAAABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABwAFAAcABwAFAAcABwAAAAcABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAAAAAAAAAAAAAAAAAFAAUABQAAAAUABQAAAAAAAAAAAAAABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABQAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABwAFAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAcABQAFAAUABQAFAAUABQAFAAUABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAFAAUABQAHAAcABQAHAAUABQAAAAAAAAAAAAAAAAAFAAAABwAHAAcABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABwAHAAcABwAAAAAABwAHAAAAAAAHAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAAAAAAFAAUABQAFAAUABQAFAAAAAAAAAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAFAAUABQAFAAUABQAFAAUABwAHAAUABQAFAAcABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAHAAcABQAFAAUABQAFAAUABwAFAAcABwAFAAcABQAFAAcABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAHAAcABQAFAAUABQAAAAAABwAHAAcABwAFAAUABwAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABwAHAAUABQAFAAUABQAFAAUABQAHAAcABQAHAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABwAFAAcABwAFAAUABQAFAAUABQAHAAUAAAAAAAAAAAAAAAAAAAAAAAcABwAFAAUABQAFAAcABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAFAAUABQAFAAUABQAFAAUABQAHAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAFAAUABQAFAAAAAAAFAAUABwAHAAcABwAFAAAAAAAAAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABwAHAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABQAFAAUABQAFAAUABQAAAAUABQAFAAUABQAFAAcABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAAAHAAUABQAFAAUABQAFAAUABwAFAAUABwAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUAAAAAAAAABQAAAAUABQAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAHAAcAAAAFAAUAAAAHAAcABQAHAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABwAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAAAAAAAAAAAAAAAAAAABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAUABQAFAAAAAAAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAAAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAAAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAAABQAFAAUABQAFAAUABQAAAAUABQAAAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAFAAUABQAFAAUADgAOAA4ADgAOAA4ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAAAAAAAAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAMAAwADAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkAAAAAAAAAAAAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAAAAAAAAAAAAsADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwACwAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAAAAAAAAAAAAAAAAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAOAAAAAAAAAAAADgAOAA4AAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAAAA4ADgAOAA4ADgAOAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4AAAAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4AAAAAAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAAAA4AAAAOAAAAAAAAAAAAAAAAAA4AAAAAAAAAAAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAADgAAAAAAAAAAAA4AAAAOAAAAAAAAAAAADgAOAA4AAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAA4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAAAAAAAA4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAAAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4AAAAAAA4ADgAOAA4ADgAOAA4ADgAOAAAADgAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4AAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4AAAAAAAAAAAAAAAAAAAAAAA4ADgAOAA4ADgAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4AAAAOAA4ADgAOAA4ADgAAAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4AAAAAAAAAAAA=",xA="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",CA="undefined"===typeof Uint8Array?[]:new Uint8Array(256),SA=0;SA>4,u[s++]=(15&r)<<4|i>>2,u[s++]=(3&i)<<6|63&A;return l},UA=function(e){for(var t=e.length,n=[],r=0;r>FA,LA=(1<>FA)+32,DA=65536>>TA,RA=(1<=0){if(e<55296||e>56319&&e<=65535)return t=((t=this.index[e>>FA])<>FA)])<>TA),t=this.index[t],t+=e>>FA&RA,t=((t=this.index[t])<=55296&&i<=56319&&n>10),a%1024+56320)),(i+1===n||r.length>16384)&&(A+=String.fromCharCode.apply(String,r),r.length=0)}return A},sa=OA(bA),la="\xd7",ua="\xf7",ca=function(e){return sa.get(e)},da=function(e,t,n){var r=n-2,i=t[r],A=t[n-1],a=t[n];if(A===jA&&a===XA)return la;if(A===jA||A===XA||A===qA)return ua;if(a===jA||a===XA||a===qA)return ua;if(A===ZA&&-1!==[ZA,$A,ta,na].indexOf(a))return la;if((A===ta||A===$A)&&(a===$A||a===ea))return la;if((A===na||A===ea)&&a===ea)return la;if(a===ra||a===YA)return la;if(a===JA)return la;if(A===WA)return la;if(A===ra&&a===ia){for(;i===YA;)i=t[--r];if(i===ia)return la}if(A===Aa&&a===Aa){for(var o=0;i===Aa;)o++,i=t[--r];if(o%2===0)return la}return ua},ha=function(e){var t=aa(e),n=t.length,r=0,i=0,A=t.map(ca);return{next:function(){if(r>=n)return{done:!0,value:null};for(var e=la;ra.x||i.y>a.y;return a=i,0===t||o}));return e.body.removeChild(t),o},ma=function(){return"undefined"!==typeof(new Image).crossOrigin},va=function(){return"string"===typeof(new XMLHttpRequest).responseType},ya=function(e){var t=new Image,n=e.createElement("canvas"),r=n.getContext("2d");if(!r)return!1;t.src="data:image/svg+xml,";try{r.drawImage(t,0,0),n.toDataURL()}catch(Rt){return!1}return!0},wa=function(e){return 0===e[0]&&255===e[1]&&0===e[2]&&255===e[3]},Ba=function(e){var t=e.createElement("canvas"),n=100;t.width=n,t.height=n;var r=t.getContext("2d");if(!r)return Promise.reject(!1);r.fillStyle="rgb(0, 255, 0)",r.fillRect(0,0,n,n);var i=new Image,A=t.toDataURL();i.src=A;var a=_a(n,n,0,0,i);return r.fillStyle="red",r.fillRect(0,0,n,n),ba(a).then((function(t){r.drawImage(t,0,0);var i=r.getImageData(0,0,n,n).data;r.fillStyle="red",r.fillRect(0,0,n,n);var a=e.createElement("div");return a.style.backgroundImage="url("+A+")",a.style.height=n+"px",wa(i)?ba(_a(n,n,0,0,a)):Promise.reject(!1)})).then((function(e){return r.drawImage(e,0,0),wa(r.getImageData(0,0,n,n).data)})).catch((function(){return!1}))},_a=function(e,t,n,r,i){var A="http://www.w3.org/2000/svg",a=document.createElementNS(A,"svg"),o=document.createElementNS(A,"foreignObject");return a.setAttributeNS(null,"width",e.toString()),a.setAttributeNS(null,"height",t.toString()),o.setAttributeNS(null,"width","100%"),o.setAttributeNS(null,"height","100%"),o.setAttributeNS(null,"x",n.toString()),o.setAttributeNS(null,"y",r.toString()),o.setAttributeNS(null,"externalResourcesRequired","true"),a.appendChild(o),o.appendChild(i),a},ba=function(e){return new Promise((function(t,n){var r=new Image;r.onload=function(){return t(r)},r.onerror=n,r.src="data:image/svg+xml;charset=utf-8,"+encodeURIComponent((new XMLSerializer).serializeToString(e))}))},xa={get SUPPORT_RANGE_BOUNDS(){var e=pa(document);return Object.defineProperty(xa,"SUPPORT_RANGE_BOUNDS",{value:e}),e},get SUPPORT_WORD_BREAKING(){var e=xa.SUPPORT_RANGE_BOUNDS&&ga(document);return Object.defineProperty(xa,"SUPPORT_WORD_BREAKING",{value:e}),e},get SUPPORT_SVG_DRAWING(){var e=ya(document);return Object.defineProperty(xa,"SUPPORT_SVG_DRAWING",{value:e}),e},get SUPPORT_FOREIGNOBJECT_DRAWING(){var e="function"===typeof Array.from&&"function"===typeof window.fetch?Ba(document):Promise.resolve(!1);return Object.defineProperty(xa,"SUPPORT_FOREIGNOBJECT_DRAWING",{value:e}),e},get SUPPORT_CORS_IMAGES(){var e=ma();return Object.defineProperty(xa,"SUPPORT_CORS_IMAGES",{value:e}),e},get SUPPORT_RESPONSE_TYPE(){var e=va();return Object.defineProperty(xa,"SUPPORT_RESPONSE_TYPE",{value:e}),e},get SUPPORT_CORS_XHR(){var e="withCredentials"in new XMLHttpRequest;return Object.defineProperty(xa,"SUPPORT_CORS_XHR",{value:e}),e},get SUPPORT_NATIVE_TEXT_SEGMENTATION(){var e=!("undefined"===typeof Intl||!Intl.Segmenter);return Object.defineProperty(xa,"SUPPORT_NATIVE_TEXT_SEGMENTATION",{value:e}),e}},Ca=function(){function e(e,t){this.text=e,this.bounds=t}return e}(),Sa=function(e,t,n,r){var i=Ta(t,n),A=[],o=0;return i.forEach((function(t){if(n.textDecorationLine.length||t.trim().length>0)if(xa.SUPPORT_RANGE_BOUNDS){var i=Ua(r,o,t.length).getClientRects();if(i.length>1){var s=Ma(t),l=0;s.forEach((function(t){A.push(new Ca(t,a.fromDOMRectList(e,Ua(r,l+o,t.length).getClientRects()))),l+=t.length}))}else A.push(new Ca(t,a.fromDOMRectList(e,i)))}else{var u=r.splitText(t.length);A.push(new Ca(t,Ea(e,r))),r=u}else xa.SUPPORT_RANGE_BOUNDS||(r=r.splitText(t.length));o+=t.length})),A},Ea=function(e,t){var n=t.ownerDocument;if(n){var r=n.createElement("html2canvaswrapper");r.appendChild(t.cloneNode(!0));var i=t.parentNode;if(i){i.replaceChild(r,t);var A=o(e,r);return r.firstChild&&i.replaceChild(r.firstChild,r),A}}return a.EMPTY},Ua=function(e,t,n){var r=e.ownerDocument;if(!r)throw new Error("Node has no owner document");var i=r.createRange();return i.setStart(e,t),i.setEnd(e,t+n),i},Ma=function(e){if(xa.SUPPORT_NATIVE_TEXT_SEGMENTATION){var t=new Intl.Segmenter(void 0,{granularity:"grapheme"});return Array.from(t.segment(e)).map((function(e){return e.segment}))}return fa(e)},Fa=function(e,t){if(xa.SUPPORT_NATIVE_TEXT_SEGMENTATION){var n=new Intl.Segmenter(void 0,{granularity:"word"});return Array.from(n.segment(e)).map((function(e){return e.segment}))}return ka(e,t)},Ta=function(e,t){return 0!==t.letterSpacing?Ma(e):Fa(e,t)},Qa=[32,160,4961,65792,65793,4153,4241],ka=function(e,t){for(var n,r=Ve(e,{lineBreak:t.lineBreak,wordBreak:"break-word"===t.overflowWrap?"break-word":t.wordBreak}),i=[],A=function(){if(n.value){var e=n.value.slice(),t=l(e),r="";t.forEach((function(e){-1===Qa.indexOf(e)?r+=u(e):(r.length&&i.push(r),i.push(u(e)),r="")})),r.length&&i.push(r)}};!(n=r.next()).done;)A();return i},La=function(){function e(e,t,n){this.text=Ia(t.data,n.textTransform),this.textBounds=Sa(e,this.text,n,t)}return e}(),Ia=function(e,t){switch(t){case 1:return e.toLowerCase();case 3:return e.replace(Da,Ra);case 2:return e.toUpperCase();default:return e}},Da=/(^|\s|:|-|\(|\))([a-z])/g,Ra=function(e,t,n){return e.length>0?t+n.toUpperCase():e},Pa=function(e){function n(t,n){var r=e.call(this,t,n)||this;return r.src=n.currentSrc||n.src,r.intrinsicWidth=n.naturalWidth,r.intrinsicHeight=n.naturalHeight,r.context.cache.addImage(r.src),r}return t(n,e),n}(_A),Ha=function(e){function n(t,n){var r=e.call(this,t,n)||this;return r.canvas=n,r.intrinsicWidth=n.width,r.intrinsicHeight=n.height,r}return t(n,e),n}(_A),Oa=function(e){function n(t,n){var r=e.call(this,t,n)||this,i=new XMLSerializer,A=o(t,n);return n.setAttribute("width",A.width+"px"),n.setAttribute("height",A.height+"px"),r.svg="data:image/svg+xml,"+encodeURIComponent(i.serializeToString(n)),r.intrinsicWidth=n.width.baseVal.value,r.intrinsicHeight=n.height.baseVal.value,r.context.cache.addImage(r.svg),r}return t(n,e),n}(_A),Na=function(e){function n(t,n){var r=e.call(this,t,n)||this;return r.value=n.value,r}return t(n,e),n}(_A),Va=function(e){function n(t,n){var r=e.call(this,t,n)||this;return r.start=n.start,r.reversed="boolean"===typeof n.reversed&&!0===n.reversed,r}return t(n,e),n}(_A),za=[{type:15,flags:0,unit:"px",number:3}],Ga=[{type:16,flags:0,number:50}],Ka=function(e){return e.width>e.height?new a(e.left+(e.width-e.height)/2,e.top,e.height,e.height):e.width0)r.textNodes.push(new La(t,A,r.styles));else if(so(A))if(So(A)&&A.assignedNodes)A.assignedNodes().forEach((function(n){return e(t,n,r,i)}));else{var o=ro(t,A);o.styles.isVisible()&&(Ao(A,o,i)?o.flags|=4:ao(o.styles)&&(o.flags|=2),-1!==to.indexOf(A.tagName)&&(o.flags|=8),r.elements.push(o),A.slot,A.shadowRoot?e(t,A.shadowRoot,o,i):xo(A)||go(A)||Co(A)||e(t,A,o,i))}},ro=function(e,t){return wo(t)?new Pa(e,t):vo(t)?new Ha(e,t):go(t)?new Oa(e,t):co(t)?new Na(e,t):ho(t)?new Va(e,t):fo(t)?new Ja(e,t):Co(t)?new Za(e,t):xo(t)?new $a(e,t):Bo(t)?new eo(e,t):new _A(e,t)},io=function(e,t){var n=ro(e,t);return n.flags|=4,no(e,t,n,n),n},Ao=function(e,t,n){return t.styles.isPositionedWithZIndex()||t.styles.opacity<1||t.styles.isTransformed()||mo(e)&&n.styles.isTransparent()},ao=function(e){return e.isPositioned()||e.isFloating()},oo=function(e){return e.nodeType===Node.TEXT_NODE},so=function(e){return e.nodeType===Node.ELEMENT_NODE},lo=function(e){return so(e)&&"undefined"!==typeof e.style&&!uo(e)},uo=function(e){return"object"===typeof e.className},co=function(e){return"LI"===e.tagName},ho=function(e){return"OL"===e.tagName},fo=function(e){return"INPUT"===e.tagName},po=function(e){return"HTML"===e.tagName},go=function(e){return"svg"===e.tagName},mo=function(e){return"BODY"===e.tagName},vo=function(e){return"CANVAS"===e.tagName},yo=function(e){return"VIDEO"===e.tagName},wo=function(e){return"IMG"===e.tagName},Bo=function(e){return"IFRAME"===e.tagName},_o=function(e){return"STYLE"===e.tagName},bo=function(e){return"SCRIPT"===e.tagName},xo=function(e){return"TEXTAREA"===e.tagName},Co=function(e){return"SELECT"===e.tagName},So=function(e){return"SLOT"===e.tagName},Eo=function(e){return e.tagName.indexOf("-")>0},Uo=function(){function e(){this.counters={}}return e.prototype.getCounterValue=function(e){var t=this.counters[e];return t&&t.length?t[t.length-1]:1},e.prototype.getCounterValues=function(e){var t=this.counters[e];return t||[]},e.prototype.pop=function(e){var t=this;e.forEach((function(e){return t.counters[e].pop()}))},e.prototype.parse=function(e){var t=this,n=e.counterIncrement,r=e.counterReset,i=!0;null!==n&&n.forEach((function(e){var n=t.counters[e.counter];n&&0!==e.increment&&(i=!1,n.length||n.push(1),n[Math.max(0,n.length-1)]+=e.increment)}));var A=[];return i&&r.forEach((function(e){var n=t.counters[e.counter];A.push(e.counter),n||(n=t.counters[e.counter]=[]),n.push(e.reset)})),A},e}(),Mo={integers:[1e3,900,500,400,100,90,50,40,10,9,5,4,1],values:["M","CM","D","CD","C","XC","L","XL","X","IX","V","IV","I"]},Fo={integers:[9e3,8e3,7e3,6e3,5e3,4e3,3e3,2e3,1e3,900,800,700,600,500,400,300,200,100,90,80,70,60,50,40,30,20,10,9,8,7,6,5,4,3,2,1],values:["\u0554","\u0553","\u0552","\u0551","\u0550","\u054f","\u054e","\u054d","\u054c","\u054b","\u054a","\u0549","\u0548","\u0547","\u0546","\u0545","\u0544","\u0543","\u0542","\u0541","\u0540","\u053f","\u053e","\u053d","\u053c","\u053b","\u053a","\u0539","\u0538","\u0537","\u0536","\u0535","\u0534","\u0533","\u0532","\u0531"]},To={integers:[1e4,9e3,8e3,7e3,6e3,5e3,4e3,3e3,2e3,1e3,400,300,200,100,90,80,70,60,50,40,30,20,19,18,17,16,15,10,9,8,7,6,5,4,3,2,1],values:["\u05d9\u05f3","\u05d8\u05f3","\u05d7\u05f3","\u05d6\u05f3","\u05d5\u05f3","\u05d4\u05f3","\u05d3\u05f3","\u05d2\u05f3","\u05d1\u05f3","\u05d0\u05f3","\u05ea","\u05e9","\u05e8","\u05e7","\u05e6","\u05e4","\u05e2","\u05e1","\u05e0","\u05de","\u05dc","\u05db","\u05d9\u05d8","\u05d9\u05d7","\u05d9\u05d6","\u05d8\u05d6","\u05d8\u05d5","\u05d9","\u05d8","\u05d7","\u05d6","\u05d5","\u05d4","\u05d3","\u05d2","\u05d1","\u05d0"]},Qo={integers:[1e4,9e3,8e3,7e3,6e3,5e3,4e3,3e3,2e3,1e3,900,800,700,600,500,400,300,200,100,90,80,70,60,50,40,30,20,10,9,8,7,6,5,4,3,2,1],values:["\u10f5","\u10f0","\u10ef","\u10f4","\u10ee","\u10ed","\u10ec","\u10eb","\u10ea","\u10e9","\u10e8","\u10e7","\u10e6","\u10e5","\u10e4","\u10f3","\u10e2","\u10e1","\u10e0","\u10df","\u10de","\u10dd","\u10f2","\u10dc","\u10db","\u10da","\u10d9","\u10d8","\u10d7","\u10f1","\u10d6","\u10d5","\u10d4","\u10d3","\u10d2","\u10d1","\u10d0"]},ko=function(e,t,n,r,i,A){return en?Wo(e,i,A.length>0):r.integers.reduce((function(t,n,i){for(;e>=n;)e-=n,t+=r.values[i];return t}),"")+A},Lo=function(e,t,n,r){var i="";do{n||e--,i=r(e)+i,e/=t}while(e*t>=t);return i},Io=function(e,t,n,r,i){var A=n-t+1;return(e<0?"-":"")+(Lo(Math.abs(e),A,r,(function(e){return u(Math.floor(e%A)+t)}))+i)},Do=function(e,t,n){void 0===n&&(n=". ");var r=t.length;return Lo(Math.abs(e),r,!1,(function(e){return t[Math.floor(e%r)]}))+n},Ro=1,Po=2,Ho=4,Oo=8,No=function(e,t,n,r,i,A){if(e<-9999||e>9999)return Wo(e,4,i.length>0);var a=Math.abs(e),o=i;if(0===a)return t[0]+o;for(var s=0;a>0&&s<=4;s++){var l=a%10;0===l&&iA(A,Ro)&&""!==o?o=t[l]+o:l>1||1===l&&0===s||1===l&&1===s&&iA(A,Po)||1===l&&1===s&&iA(A,Ho)&&e>100||1===l&&s>1&&iA(A,Oo)?o=t[l]+(s>0?n[s-1]:"")+o:1===l&&s>0&&(o=n[s-1]+o),a=Math.floor(a/10)}return(e<0?r:"")+o},Vo="\u5341\u767e\u5343\u842c",zo="\u62fe\u4f70\u4edf\u842c",Go="\u30de\u30a4\u30ca\u30b9",Ko="\ub9c8\uc774\ub108\uc2a4",Wo=function(e,t,n){var r=n?". ":"",i=n?"\u3001":"",A=n?", ":"",a=n?" ":"";switch(t){case 0:return"\u2022"+a;case 1:return"\u25e6"+a;case 2:return"\u25fe"+a;case 5:var o=Io(e,48,57,!0,r);return o.length<4?"0"+o:o;case 4:return Do(e,"\u3007\u4e00\u4e8c\u4e09\u56db\u4e94\u516d\u4e03\u516b\u4e5d",i);case 6:return ko(e,1,3999,Mo,3,r).toLowerCase();case 7:return ko(e,1,3999,Mo,3,r);case 8:return Io(e,945,969,!1,r);case 9:return Io(e,97,122,!1,r);case 10:return Io(e,65,90,!1,r);case 11:return Io(e,1632,1641,!0,r);case 12:case 49:return ko(e,1,9999,Fo,3,r);case 35:return ko(e,1,9999,Fo,3,r).toLowerCase();case 13:return Io(e,2534,2543,!0,r);case 14:case 30:return Io(e,6112,6121,!0,r);case 15:return Do(e,"\u5b50\u4e11\u5bc5\u536f\u8fb0\u5df3\u5348\u672a\u7533\u9149\u620c\u4ea5",i);case 16:return Do(e,"\u7532\u4e59\u4e19\u4e01\u620a\u5df1\u5e9a\u8f9b\u58ec\u7678",i);case 17:case 48:return No(e,"\u96f6\u4e00\u4e8c\u4e09\u56db\u4e94\u516d\u4e03\u516b\u4e5d",Vo,"\u8ca0",i,Po|Ho|Oo);case 47:return No(e,"\u96f6\u58f9\u8cb3\u53c3\u8086\u4f0d\u9678\u67d2\u634c\u7396",zo,"\u8ca0",i,Ro|Po|Ho|Oo);case 42:return No(e,"\u96f6\u4e00\u4e8c\u4e09\u56db\u4e94\u516d\u4e03\u516b\u4e5d",Vo,"\u8d1f",i,Po|Ho|Oo);case 41:return No(e,"\u96f6\u58f9\u8d30\u53c1\u8086\u4f0d\u9646\u67d2\u634c\u7396",zo,"\u8d1f",i,Ro|Po|Ho|Oo);case 26:return No(e,"\u3007\u4e00\u4e8c\u4e09\u56db\u4e94\u516d\u4e03\u516b\u4e5d","\u5341\u767e\u5343\u4e07",Go,i,0);case 25:return No(e,"\u96f6\u58f1\u5f10\u53c2\u56db\u4f0d\u516d\u4e03\u516b\u4e5d","\u62fe\u767e\u5343\u4e07",Go,i,Ro|Po|Ho);case 31:return No(e,"\uc601\uc77c\uc774\uc0bc\uc0ac\uc624\uc721\uce60\ud314\uad6c","\uc2ed\ubc31\ucc9c\ub9cc",Ko,A,Ro|Po|Ho);case 33:return No(e,"\u96f6\u4e00\u4e8c\u4e09\u56db\u4e94\u516d\u4e03\u516b\u4e5d","\u5341\u767e\u5343\u842c",Ko,A,0);case 32:return No(e,"\u96f6\u58f9\u8cb3\u53c3\u56db\u4e94\u516d\u4e03\u516b\u4e5d","\u62fe\u767e\u5343",Ko,A,Ro|Po|Ho);case 18:return Io(e,2406,2415,!0,r);case 20:return ko(e,1,19999,Qo,3,r);case 21:return Io(e,2790,2799,!0,r);case 22:return Io(e,2662,2671,!0,r);case 22:return ko(e,1,10999,To,3,r);case 23:return Do(e,"\u3042\u3044\u3046\u3048\u304a\u304b\u304d\u304f\u3051\u3053\u3055\u3057\u3059\u305b\u305d\u305f\u3061\u3064\u3066\u3068\u306a\u306b\u306c\u306d\u306e\u306f\u3072\u3075\u3078\u307b\u307e\u307f\u3080\u3081\u3082\u3084\u3086\u3088\u3089\u308a\u308b\u308c\u308d\u308f\u3090\u3091\u3092\u3093");case 24:return Do(e,"\u3044\u308d\u306f\u306b\u307b\u3078\u3068\u3061\u308a\u306c\u308b\u3092\u308f\u304b\u3088\u305f\u308c\u305d\u3064\u306d\u306a\u3089\u3080\u3046\u3090\u306e\u304a\u304f\u3084\u307e\u3051\u3075\u3053\u3048\u3066\u3042\u3055\u304d\u3086\u3081\u307f\u3057\u3091\u3072\u3082\u305b\u3059");case 27:return Io(e,3302,3311,!0,r);case 28:return Do(e,"\u30a2\u30a4\u30a6\u30a8\u30aa\u30ab\u30ad\u30af\u30b1\u30b3\u30b5\u30b7\u30b9\u30bb\u30bd\u30bf\u30c1\u30c4\u30c6\u30c8\u30ca\u30cb\u30cc\u30cd\u30ce\u30cf\u30d2\u30d5\u30d8\u30db\u30de\u30df\u30e0\u30e1\u30e2\u30e4\u30e6\u30e8\u30e9\u30ea\u30eb\u30ec\u30ed\u30ef\u30f0\u30f1\u30f2\u30f3",i);case 29:return Do(e,"\u30a4\u30ed\u30cf\u30cb\u30db\u30d8\u30c8\u30c1\u30ea\u30cc\u30eb\u30f2\u30ef\u30ab\u30e8\u30bf\u30ec\u30bd\u30c4\u30cd\u30ca\u30e9\u30e0\u30a6\u30f0\u30ce\u30aa\u30af\u30e4\u30de\u30b1\u30d5\u30b3\u30a8\u30c6\u30a2\u30b5\u30ad\u30e6\u30e1\u30df\u30b7\u30f1\u30d2\u30e2\u30bb\u30b9",i);case 34:return Io(e,3792,3801,!0,r);case 37:return Io(e,6160,6169,!0,r);case 38:return Io(e,4160,4169,!0,r);case 39:return Io(e,2918,2927,!0,r);case 40:return Io(e,1776,1785,!0,r);case 43:return Io(e,3046,3055,!0,r);case 44:return Io(e,3174,3183,!0,r);case 45:return Io(e,3664,3673,!0,r);case 46:return Io(e,3872,3881,!0,r);default:return Io(e,48,57,!0,r)}},jo="data-html2canvas-ignore",Xo=function(){function e(e,t,n){if(this.context=e,this.options=n,this.scrolledElements=[],this.referenceElement=t,this.counters=new Uo,this.quoteDepth=0,!t.ownerDocument)throw new Error("Cloned element does not have an owner document");this.documentElement=this.cloneNode(t.ownerDocument.documentElement,!1)}return e.prototype.toIFrame=function(e,t){var n=this,A=Yo(e,t);if(!A.contentWindow)return Promise.reject("Unable to find iframe window");var a=e.defaultView.pageXOffset,o=e.defaultView.pageYOffset,s=A.contentWindow,l=s.document,u=$o(A).then((function(){return r(n,void 0,void 0,(function(){var e,n;return i(this,(function(r){switch(r.label){case 0:return this.scrolledElements.forEach(is),s&&(s.scrollTo(t.left,t.top),!/(iPad|iPhone|iPod)/g.test(navigator.userAgent)||s.scrollY===t.top&&s.scrollX===t.left||(this.context.logger.warn("Unable to restore scroll position for cloned document"),this.context.windowBounds=this.context.windowBounds.add(s.scrollX-t.left,s.scrollY-t.top,0,0))),e=this.options.onclone,"undefined"===typeof(n=this.clonedReferenceElement)?[2,Promise.reject("Error finding the "+this.referenceElement.nodeName+" in the cloned document")]:l.fonts&&l.fonts.ready?[4,l.fonts.ready]:[3,2];case 1:r.sent(),r.label=2;case 2:return/(AppleWebKit)/g.test(navigator.userAgent)?[4,Zo(l)]:[3,4];case 3:r.sent(),r.label=4;case 4:return"function"===typeof e?[2,Promise.resolve().then((function(){return e(l,n)})).then((function(){return A}))]:[2,A]}}))}))}));return l.open(),l.write(ns(document.doctype)+""),rs(this.referenceElement.ownerDocument,a,o),l.replaceChild(l.adoptNode(this.documentElement),l.documentElement),l.close(),u},e.prototype.createElementClone=function(e){if(BA(e,2),vo(e))return this.createCanvasClone(e);if(yo(e))return this.createVideoClone(e);if(_o(e))return this.createStyleClone(e);var t=e.cloneNode(!1);return wo(t)&&(wo(e)&&e.currentSrc&&e.currentSrc!==e.src&&(t.src=e.currentSrc,t.srcset=""),"lazy"===t.loading&&(t.loading="eager")),Eo(t)?this.createCustomElementClone(t):t},e.prototype.createCustomElementClone=function(e){var t=document.createElement("html2canvascustomelement");return ts(e.style,t),t},e.prototype.createStyleClone=function(e){try{var t=e.sheet;if(t&&t.cssRules){var n=[].slice.call(t.cssRules,0).reduce((function(e,t){return t&&"string"===typeof t.cssText?e+t.cssText:e}),""),r=e.cloneNode(!1);return r.textContent=n,r}}catch(Rt){if(this.context.logger.error("Unable to access cssRules property",Rt),"SecurityError"!==Rt.name)throw Rt}return e.cloneNode(!1)},e.prototype.createCanvasClone=function(e){var t;if(this.options.inlineImages&&e.ownerDocument){var n=e.ownerDocument.createElement("img");try{return n.src=e.toDataURL(),n}catch(Rt){this.context.logger.info("Unable to inline canvas contents, canvas is tainted",e)}}var r=e.cloneNode(!1);try{r.width=e.width,r.height=e.height;var i=e.getContext("2d"),A=r.getContext("2d");if(A)if(!this.options.allowTaint&&i)A.putImageData(i.getImageData(0,0,e.width,e.height),0,0);else{var a=null!==(t=e.getContext("webgl2"))&&void 0!==t?t:e.getContext("webgl");if(a){var o=a.getContextAttributes();!1===(null===o||void 0===o?void 0:o.preserveDrawingBuffer)&&this.context.logger.warn("Unable to clone WebGL context as it has preserveDrawingBuffer=false",e)}A.drawImage(e,0,0)}return r}catch(Rt){this.context.logger.info("Unable to clone canvas as it is tainted",e)}return r},e.prototype.createVideoClone=function(e){var t=e.ownerDocument.createElement("canvas");t.width=e.offsetWidth,t.height=e.offsetHeight;var n=t.getContext("2d");try{return n&&(n.drawImage(e,0,0,t.width,t.height),this.options.allowTaint||n.getImageData(0,0,t.width,t.height)),t}catch(Rt){this.context.logger.info("Unable to clone video as it is tainted",e)}var r=e.ownerDocument.createElement("canvas");return r.width=e.offsetWidth,r.height=e.offsetHeight,r},e.prototype.appendChildNode=function(e,t,n){so(t)&&(bo(t)||t.hasAttribute(jo)||"function"===typeof this.options.ignoreElements&&this.options.ignoreElements(t))||this.options.copyStyles&&so(t)&&_o(t)||e.appendChild(this.cloneNode(t,n))},e.prototype.cloneChildNodes=function(e,t,n){for(var r=this,i=e.shadowRoot?e.shadowRoot.firstChild:e.firstChild;i;i=i.nextSibling)if(so(i)&&So(i)&&"function"===typeof i.assignedNodes){var A=i.assignedNodes();A.length&&A.forEach((function(e){return r.appendChildNode(t,e,n)}))}else this.appendChildNode(t,i,n)},e.prototype.cloneNode=function(e,t){if(oo(e))return document.createTextNode(e.data);if(!e.ownerDocument)return e.cloneNode(!1);var n=e.ownerDocument.defaultView;if(n&&so(e)&&(lo(e)||uo(e))){var r=this.createElementClone(e);r.style.transitionProperty="none";var i=n.getComputedStyle(e),A=n.getComputedStyle(e,":before"),a=n.getComputedStyle(e,":after");this.referenceElement===e&&lo(r)&&(this.clonedReferenceElement=r),mo(r)&&us(r);var o=this.counters.parse(new mA(this.context,i)),s=this.resolvePseudoContent(e,r,A,KA.BEFORE);Eo(e)&&(t=!0),yo(e)||this.cloneChildNodes(e,r,t),s&&r.insertBefore(s,r.firstChild);var l=this.resolvePseudoContent(e,r,a,KA.AFTER);return l&&r.appendChild(l),this.counters.pop(o),(i&&(this.options.copyStyles||uo(e))&&!Bo(e)||t)&&ts(i,r),0===e.scrollTop&&0===e.scrollLeft||this.scrolledElements.push([r,e.scrollLeft,e.scrollTop]),(xo(e)||Co(e))&&(xo(r)||Co(r))&&(r.value=e.value),r}return e.cloneNode(!1)},e.prototype.resolvePseudoContent=function(e,t,n,r){var i=this;if(n){var A=n.content,a=t.ownerDocument;if(a&&A&&"none"!==A&&"-moz-alt-content"!==A&&"none"!==n.display){this.counters.parse(new mA(this.context,n));var o=new gA(this.context,n),s=a.createElement("html2canvaspseudoelement");ts(n,s),o.content.forEach((function(t){if(0===t.type)s.appendChild(a.createTextNode(t.value));else if(22===t.type){var n=a.createElement("img");n.src=t.value,n.style.opacity="1",s.appendChild(n)}else if(18===t.type){if("attr"===t.name){var r=t.values.filter(kn);r.length&&s.appendChild(a.createTextNode(e.getAttribute(r[0].value)||""))}else if("counter"===t.name){var A=t.values.filter(Rn),l=A[0],u=A[1];if(l&&kn(l)){var c=i.counters.getCounterValue(l.value),d=u&&kn(u)?xi.parse(i.context,u.value):3;s.appendChild(a.createTextNode(Wo(c,d,!1)))}}else if("counters"===t.name){var h=t.values.filter(Rn),f=(l=h[0],h[1]);if(u=h[2],l&&kn(l)){var p=i.counters.getCounterValues(l.value),g=u&&kn(u)?xi.parse(i.context,u.value):3,m=f&&0===f.type?f.value:"",v=p.map((function(e){return Wo(e,g,!1)})).join(m);s.appendChild(a.createTextNode(v))}}}else if(20===t.type)switch(t.value){case"open-quote":s.appendChild(a.createTextNode(uA(o.quotes,i.quoteDepth++,!0)));break;case"close-quote":s.appendChild(a.createTextNode(uA(o.quotes,--i.quoteDepth,!1)));break;default:s.appendChild(a.createTextNode(t.value))}})),s.className=os+" "+ss;var l=r===KA.BEFORE?" "+os:" "+ss;return uo(t)?t.className.baseValue+=l:t.className+=l,s}}},e.destroy=function(e){return!!e.parentNode&&(e.parentNode.removeChild(e),!0)},e}();!function(e){e[e.BEFORE=0]="BEFORE",e[e.AFTER=1]="AFTER"}(KA||(KA={}));var qo,Yo=function(e,t){var n=e.createElement("iframe");return n.className="html2canvas-container",n.style.visibility="hidden",n.style.position="fixed",n.style.left="-10000px",n.style.top="0px",n.style.border="0",n.width=t.width.toString(),n.height=t.height.toString(),n.scrolling="no",n.setAttribute(jo,"true"),e.body.appendChild(n),n},Jo=function(e){return new Promise((function(t){e.complete?t():e.src?(e.onload=t,e.onerror=t):t()}))},Zo=function(e){return Promise.all([].slice.call(e.images,0).map(Jo))},$o=function(e){return new Promise((function(t,n){var r=e.contentWindow;if(!r)return n("No window assigned for iframe");var i=r.document;r.onload=e.onload=function(){r.onload=e.onload=null;var n=setInterval((function(){i.body.childNodes.length>0&&"complete"===i.readyState&&(clearInterval(n),t(e))}),50)}}))},es=["all","d","content"],ts=function(e,t){for(var n=e.length-1;n>=0;n--){var r=e.item(n);-1===es.indexOf(r)&&t.style.setProperty(r,e.getPropertyValue(r))}return t},ns=function(e){var t="";return e&&(t+=""),t},rs=function(e,t,n){e&&e.defaultView&&(t!==e.defaultView.pageXOffset||n!==e.defaultView.pageYOffset)&&e.defaultView.scrollTo(t,n)},is=function(e){var t=e[0],n=e[1],r=e[2];t.scrollLeft=n,t.scrollTop=r},As=":before",as=":after",os="___html2canvas___pseudoelement_before",ss="___html2canvas___pseudoelement_after",ls='{\n content: "" !important;\n display: none !important;\n}',us=function(e){cs(e,"."+os+As+ls+"\n ."+ss+as+ls)},cs=function(e,t){var n=e.ownerDocument;if(n){var r=n.createElement("style");r.textContent=t,e.appendChild(r)}},ds=function(){function e(){}return e.getOrigin=function(t){var n=e._link;return n?(n.href=t,n.href=n.href,n.protocol+n.hostname+n.port):"about:blank"},e.isSameOrigin=function(t){return e.getOrigin(t)===e._origin},e.setContext=function(t){e._link=t.document.createElement("a"),e._origin=e.getOrigin(t.location.href)},e._origin="about:blank",e}(),hs=function(){function e(e,t){this.context=e,this._options=t,this._cache={}}return e.prototype.addImage=function(e){var t=Promise.resolve();return this.has(e)?t:ws(e)||ms(e)?((this._cache[e]=this.loadImage(e)).catch((function(){})),t):t},e.prototype.match=function(e){return this._cache[e]},e.prototype.loadImage=function(e){return r(this,void 0,void 0,(function(){var t,n,r,A,a=this;return i(this,(function(i){switch(i.label){case 0:return t=ds.isSameOrigin(e),n=!vs(e)&&!0===this._options.useCORS&&xa.SUPPORT_CORS_IMAGES&&!t,r=!vs(e)&&!t&&!ws(e)&&"string"===typeof this._options.proxy&&xa.SUPPORT_CORS_XHR&&!n,t||!1!==this._options.allowTaint||vs(e)||ws(e)||r||n?(A=e,r?[4,this.proxy(A)]:[3,2]):[2];case 1:A=i.sent(),i.label=2;case 2:return this.context.logger.debug("Added image "+e.substring(0,256)),[4,new Promise((function(e,t){var r=new Image;r.onload=function(){return e(r)},r.onerror=t,(ys(A)||n)&&(r.crossOrigin="anonymous"),r.src=A,!0===r.complete&&setTimeout((function(){return e(r)}),500),a._options.imageTimeout>0&&setTimeout((function(){return t("Timed out ("+a._options.imageTimeout+"ms) loading image")}),a._options.imageTimeout)}))];case 3:return[2,i.sent()]}}))}))},e.prototype.has=function(e){return"undefined"!==typeof this._cache[e]},e.prototype.keys=function(){return Promise.resolve(Object.keys(this._cache))},e.prototype.proxy=function(e){var t=this,n=this._options.proxy;if(!n)throw new Error("No proxy defined");var r=e.substring(0,256);return new Promise((function(i,A){var a=xa.SUPPORT_RESPONSE_TYPE?"blob":"text",o=new XMLHttpRequest;o.onload=function(){if(200===o.status)if("text"===a)i(o.response);else{var e=new FileReader;e.addEventListener("load",(function(){return i(e.result)}),!1),e.addEventListener("error",(function(e){return A(e)}),!1),e.readAsDataURL(o.response)}else A("Failed to proxy resource "+r+" with status code "+o.status)},o.onerror=A;var s=n.indexOf("?")>-1?"&":"?";if(o.open("GET",""+n+s+"url="+encodeURIComponent(e)+"&responseType="+a),"text"!==a&&o instanceof XMLHttpRequest&&(o.responseType=a),t._options.imageTimeout){var l=t._options.imageTimeout;o.timeout=l,o.ontimeout=function(){return A("Timed out ("+l+"ms) proxying "+r)}}o.send()}))},e}(),fs=/^data:image\/svg\+xml/i,ps=/^data:image\/.*;base64,/i,gs=/^data:image\/.*/i,ms=function(e){return xa.SUPPORT_SVG_DRAWING||!Bs(e)},vs=function(e){return gs.test(e)},ys=function(e){return ps.test(e)},ws=function(e){return"blob"===e.substr(0,4)},Bs=function(e){return"svg"===e.substr(-3).toLowerCase()||fs.test(e)},_s=function(){function e(e,t){this.type=0,this.x=e,this.y=t}return e.prototype.add=function(t,n){return new e(this.x+t,this.y+n)},e}(),bs=function(e,t,n){return new _s(e.x+(t.x-e.x)*n,e.y+(t.y-e.y)*n)},xs=function(){function e(e,t,n,r){this.type=1,this.start=e,this.startControl=t,this.endControl=n,this.end=r}return e.prototype.subdivide=function(t,n){var r=bs(this.start,this.startControl,t),i=bs(this.startControl,this.endControl,t),A=bs(this.endControl,this.end,t),a=bs(r,i,t),o=bs(i,A,t),s=bs(a,o,t);return n?new e(this.start,r,a,s):new e(s,o,A,this.end)},e.prototype.add=function(t,n){return new e(this.start.add(t,n),this.startControl.add(t,n),this.endControl.add(t,n),this.end.add(t,n))},e.prototype.reverse=function(){return new e(this.end,this.endControl,this.startControl,this.start)},e}(),Cs=function(e){return 1===e.type},Ss=function(){function e(e){var t=e.styles,n=e.bounds,r=Wn(t.borderTopLeftRadius,n.width,n.height),i=r[0],A=r[1],a=Wn(t.borderTopRightRadius,n.width,n.height),o=a[0],s=a[1],l=Wn(t.borderBottomRightRadius,n.width,n.height),u=l[0],c=l[1],d=Wn(t.borderBottomLeftRadius,n.width,n.height),h=d[0],f=d[1],p=[];p.push((i+o)/n.width),p.push((h+u)/n.width),p.push((A+f)/n.height),p.push((s+c)/n.height);var g=Math.max.apply(Math,p);g>1&&(i/=g,A/=g,o/=g,s/=g,u/=g,c/=g,h/=g,f/=g);var m=n.width-o,v=n.height-c,y=n.width-u,w=n.height-f,B=t.borderTopWidth,_=t.borderRightWidth,b=t.borderBottomWidth,x=t.borderLeftWidth,C=jn(t.paddingTop,e.bounds.width),S=jn(t.paddingRight,e.bounds.width),E=jn(t.paddingBottom,e.bounds.width),U=jn(t.paddingLeft,e.bounds.width);this.topLeftBorderDoubleOuterBox=i>0||A>0?Es(n.left+x/3,n.top+B/3,i-x/3,A-B/3,qo.TOP_LEFT):new _s(n.left+x/3,n.top+B/3),this.topRightBorderDoubleOuterBox=i>0||A>0?Es(n.left+m,n.top+B/3,o-_/3,s-B/3,qo.TOP_RIGHT):new _s(n.left+n.width-_/3,n.top+B/3),this.bottomRightBorderDoubleOuterBox=u>0||c>0?Es(n.left+y,n.top+v,u-_/3,c-b/3,qo.BOTTOM_RIGHT):new _s(n.left+n.width-_/3,n.top+n.height-b/3),this.bottomLeftBorderDoubleOuterBox=h>0||f>0?Es(n.left+x/3,n.top+w,h-x/3,f-b/3,qo.BOTTOM_LEFT):new _s(n.left+x/3,n.top+n.height-b/3),this.topLeftBorderDoubleInnerBox=i>0||A>0?Es(n.left+2*x/3,n.top+2*B/3,i-2*x/3,A-2*B/3,qo.TOP_LEFT):new _s(n.left+2*x/3,n.top+2*B/3),this.topRightBorderDoubleInnerBox=i>0||A>0?Es(n.left+m,n.top+2*B/3,o-2*_/3,s-2*B/3,qo.TOP_RIGHT):new _s(n.left+n.width-2*_/3,n.top+2*B/3),this.bottomRightBorderDoubleInnerBox=u>0||c>0?Es(n.left+y,n.top+v,u-2*_/3,c-2*b/3,qo.BOTTOM_RIGHT):new _s(n.left+n.width-2*_/3,n.top+n.height-2*b/3),this.bottomLeftBorderDoubleInnerBox=h>0||f>0?Es(n.left+2*x/3,n.top+w,h-2*x/3,f-2*b/3,qo.BOTTOM_LEFT):new _s(n.left+2*x/3,n.top+n.height-2*b/3),this.topLeftBorderStroke=i>0||A>0?Es(n.left+x/2,n.top+B/2,i-x/2,A-B/2,qo.TOP_LEFT):new _s(n.left+x/2,n.top+B/2),this.topRightBorderStroke=i>0||A>0?Es(n.left+m,n.top+B/2,o-_/2,s-B/2,qo.TOP_RIGHT):new _s(n.left+n.width-_/2,n.top+B/2),this.bottomRightBorderStroke=u>0||c>0?Es(n.left+y,n.top+v,u-_/2,c-b/2,qo.BOTTOM_RIGHT):new _s(n.left+n.width-_/2,n.top+n.height-b/2),this.bottomLeftBorderStroke=h>0||f>0?Es(n.left+x/2,n.top+w,h-x/2,f-b/2,qo.BOTTOM_LEFT):new _s(n.left+x/2,n.top+n.height-b/2),this.topLeftBorderBox=i>0||A>0?Es(n.left,n.top,i,A,qo.TOP_LEFT):new _s(n.left,n.top),this.topRightBorderBox=o>0||s>0?Es(n.left+m,n.top,o,s,qo.TOP_RIGHT):new _s(n.left+n.width,n.top),this.bottomRightBorderBox=u>0||c>0?Es(n.left+y,n.top+v,u,c,qo.BOTTOM_RIGHT):new _s(n.left+n.width,n.top+n.height),this.bottomLeftBorderBox=h>0||f>0?Es(n.left,n.top+w,h,f,qo.BOTTOM_LEFT):new _s(n.left,n.top+n.height),this.topLeftPaddingBox=i>0||A>0?Es(n.left+x,n.top+B,Math.max(0,i-x),Math.max(0,A-B),qo.TOP_LEFT):new _s(n.left+x,n.top+B),this.topRightPaddingBox=o>0||s>0?Es(n.left+Math.min(m,n.width-_),n.top+B,m>n.width+_?0:Math.max(0,o-_),Math.max(0,s-B),qo.TOP_RIGHT):new _s(n.left+n.width-_,n.top+B),this.bottomRightPaddingBox=u>0||c>0?Es(n.left+Math.min(y,n.width-x),n.top+Math.min(v,n.height-b),Math.max(0,u-_),Math.max(0,c-b),qo.BOTTOM_RIGHT):new _s(n.left+n.width-_,n.top+n.height-b),this.bottomLeftPaddingBox=h>0||f>0?Es(n.left+x,n.top+Math.min(w,n.height-b),Math.max(0,h-x),Math.max(0,f-b),qo.BOTTOM_LEFT):new _s(n.left+x,n.top+n.height-b),this.topLeftContentBox=i>0||A>0?Es(n.left+x+U,n.top+B+C,Math.max(0,i-(x+U)),Math.max(0,A-(B+C)),qo.TOP_LEFT):new _s(n.left+x+U,n.top+B+C),this.topRightContentBox=o>0||s>0?Es(n.left+Math.min(m,n.width+x+U),n.top+B+C,m>n.width+x+U?0:o-x+U,s-(B+C),qo.TOP_RIGHT):new _s(n.left+n.width-(_+S),n.top+B+C),this.bottomRightContentBox=u>0||c>0?Es(n.left+Math.min(y,n.width-(x+U)),n.top+Math.min(v,n.height+B+C),Math.max(0,u-(_+S)),c-(b+E),qo.BOTTOM_RIGHT):new _s(n.left+n.width-(_+S),n.top+n.height-(b+E)),this.bottomLeftContentBox=h>0||f>0?Es(n.left+x+U,n.top+w,Math.max(0,h-(x+U)),f-(b+E),qo.BOTTOM_LEFT):new _s(n.left+x+U,n.top+n.height-(b+E))}return e}();!function(e){e[e.TOP_LEFT=0]="TOP_LEFT",e[e.TOP_RIGHT=1]="TOP_RIGHT",e[e.BOTTOM_RIGHT=2]="BOTTOM_RIGHT",e[e.BOTTOM_LEFT=3]="BOTTOM_LEFT"}(qo||(qo={}));var Es=function(e,t,n,r,i){var A=(Math.sqrt(2)-1)/3*4,a=n*A,o=r*A,s=e+n,l=t+r;switch(i){case qo.TOP_LEFT:return new xs(new _s(e,l),new _s(e,l-o),new _s(s-a,t),new _s(s,t));case qo.TOP_RIGHT:return new xs(new _s(e,t),new _s(e+a,t),new _s(s,l-o),new _s(s,l));case qo.BOTTOM_RIGHT:return new xs(new _s(s,t),new _s(s,t+o),new _s(e+a,l),new _s(e,l));case qo.BOTTOM_LEFT:default:return new xs(new _s(s,l),new _s(s-a,l),new _s(e,t+o),new _s(e,t))}},Us=function(e){return[e.topLeftBorderBox,e.topRightBorderBox,e.bottomRightBorderBox,e.bottomLeftBorderBox]},Ms=function(e){return[e.topLeftContentBox,e.topRightContentBox,e.bottomRightContentBox,e.bottomLeftContentBox]},Fs=function(e){return[e.topLeftPaddingBox,e.topRightPaddingBox,e.bottomRightPaddingBox,e.bottomLeftPaddingBox]},Ts=function(){function e(e,t,n){this.offsetX=e,this.offsetY=t,this.matrix=n,this.type=0,this.target=6}return e}(),Qs=function(){function e(e,t){this.path=e,this.target=t,this.type=1}return e}(),ks=function(){function e(e){this.opacity=e,this.type=2,this.target=6}return e}(),Ls=function(e){return 0===e.type},Is=function(e){return 1===e.type},Ds=function(e){return 2===e.type},Rs=function(e,t){return e.length===t.length&&e.some((function(e,n){return e===t[n]}))},Ps=function(e,t,n,r,i){return e.map((function(e,A){switch(A){case 0:return e.add(t,n);case 1:return e.add(t+r,n);case 2:return e.add(t+r,n+i);case 3:return e.add(t,n+i)}return e}))},Hs=function(){function e(e){this.element=e,this.inlineLevel=[],this.nonInlineLevel=[],this.negativeZIndex=[],this.zeroOrAutoZIndexOrTransformedOrOpacity=[],this.positiveZIndex=[],this.nonPositionedFloats=[],this.nonPositionedInlineLevel=[]}return e}(),Os=function(){function e(e,t){if(this.container=e,this.parent=t,this.effects=[],this.curves=new Ss(this.container),this.container.styles.opacity<1&&this.effects.push(new ks(this.container.styles.opacity)),null!==this.container.styles.transform){var n=this.container.bounds.left+this.container.styles.transformOrigin[0].number,r=this.container.bounds.top+this.container.styles.transformOrigin[1].number,i=this.container.styles.transform;this.effects.push(new Ts(n,r,i))}if(0!==this.container.styles.overflowX){var A=Us(this.curves),a=Fs(this.curves);Rs(A,a)?this.effects.push(new Qs(A,6)):(this.effects.push(new Qs(A,2)),this.effects.push(new Qs(a,4)))}}return e.prototype.getEffects=function(e){for(var t=-1===[2,3].indexOf(this.container.styles.position),n=this.parent,r=this.effects.slice(0);n;){var i=n.effects.filter((function(e){return!Is(e)}));if(t||0!==n.container.styles.position||!n.parent){if(r.unshift.apply(r,i),t=-1===[2,3].indexOf(n.container.styles.position),0!==n.container.styles.overflowX){var A=Us(n.curves),a=Fs(n.curves);Rs(A,a)||r.unshift(new Qs(a,6))}}else r.unshift.apply(r,i);n=n.parent}return r.filter((function(t){return iA(t.target,e)}))},e}(),Ns=function e(t,n,r,i){t.container.elements.forEach((function(A){var a=iA(A.flags,4),o=iA(A.flags,2),s=new Os(A,t);iA(A.styles.display,2048)&&i.push(s);var l=iA(A.flags,8)?[]:i;if(a||o){var u=a||A.styles.isPositioned()?r:n,c=new Hs(s);if(A.styles.isPositioned()||A.styles.opacity<1||A.styles.isTransformed()){var d=A.styles.zIndex.order;if(d<0){var h=0;u.negativeZIndex.some((function(e,t){return d>e.element.container.styles.zIndex.order?(h=t,!1):h>0})),u.negativeZIndex.splice(h,0,c)}else if(d>0){var f=0;u.positiveZIndex.some((function(e,t){return d>=e.element.container.styles.zIndex.order?(f=t+1,!1):f>0})),u.positiveZIndex.splice(f,0,c)}else u.zeroOrAutoZIndexOrTransformedOrOpacity.push(c)}else A.styles.isFloating()?u.nonPositionedFloats.push(c):u.nonPositionedInlineLevel.push(c);e(s,c,a?c:r,l)}else A.styles.isInlineLevel()?n.inlineLevel.push(s):n.nonInlineLevel.push(s),e(s,n,r,l);iA(A.flags,8)&&Vs(A,l)}))},Vs=function(e,t){for(var n=e instanceof Va?e.start:1,r=e instanceof Va&&e.reversed,i=0;i0&&e.intrinsicHeight>0){var r=Js(e),i=Fs(t);this.path(i),this.ctx.save(),this.ctx.clip(),this.ctx.drawImage(n,0,0,e.intrinsicWidth,e.intrinsicHeight,r.left,r.top,r.width,r.height),this.ctx.restore()}},n.prototype.renderNodeContent=function(e){return r(this,void 0,void 0,(function(){var t,r,A,o,s,l,u,c,d,h,f,p,g,m,v,y,w,B;return i(this,(function(i){switch(i.label){case 0:this.applyEffects(e.getEffects(4)),t=e.container,r=e.curves,A=t.styles,o=0,s=t.textNodes,i.label=1;case 1:return o0&&x>0&&(v=r.ctx.createPattern(p,"repeat"),r.renderRepeat(w,v,S,E))):kr(n)&&(y=el(e,t,[null,null,null]),w=y[0],B=y[1],_=y[2],b=y[3],x=y[4],C=0===n.position.length?[Gn]:n.position,S=jn(C[0],b),E=jn(C[C.length-1],x),U=Br(n,S,E,b,x),M=U[0],F=U[1],M>0&&F>0&&(T=r.ctx.createRadialGradient(B+S,_+E,0,B+S,_+E,M),gr(n.stops,2*M).forEach((function(e){return T.addColorStop(e.stop,ir(e.color))})),r.path(w),r.ctx.fillStyle=T,M!==F?(Q=e.bounds.left+.5*e.bounds.width,k=e.bounds.top+.5*e.bounds.height,I=1/(L=F/M),r.ctx.save(),r.ctx.translate(Q,k),r.ctx.transform(1,0,0,L,0,0),r.ctx.translate(-Q,-k),r.ctx.fillRect(B,I*(_-k)+k,b,x*I),r.ctx.restore()):r.ctx.fill())),i.label=6;case 6:return t--,[2]}}))},r=this,A=0,a=e.styles.backgroundImage.slice(0).reverse(),s.label=1;case 1:return A0?2!==l.style?[3,5]:[4,this.renderDashedDottedBorder(l.color,l.width,a,e.curves,2)]:[3,11]:[3,13];case 4:return i.sent(),[3,11];case 5:return 3!==l.style?[3,7]:[4,this.renderDashedDottedBorder(l.color,l.width,a,e.curves,3)];case 6:return i.sent(),[3,11];case 7:return 4!==l.style?[3,9]:[4,this.renderDoubleBorder(l.color,l.width,a,e.curves)];case 8:return i.sent(),[3,11];case 9:return[4,this.renderSolidBorder(l.color,a,e.curves)];case 10:i.sent(),i.label=11;case 11:a++,i.label=12;case 12:return o++,[3,3];case 13:return[2]}}))}))},n.prototype.renderDashedDottedBorder=function(e,t,n,A,a){return r(this,void 0,void 0,(function(){var r,o,s,l,u,c,d,h,f,p,g,m,v,y,w,B;return i(this,(function(i){return this.ctx.save(),r=js(A,n),o=Gs(A,n),2===a&&(this.path(o),this.ctx.clip()),Cs(o[0])?(s=o[0].start.x,l=o[0].start.y):(s=o[0].x,l=o[0].y),Cs(o[1])?(u=o[1].end.x,c=o[1].end.y):(u=o[1].x,c=o[1].y),d=0===n||2===n?Math.abs(s-u):Math.abs(l-c),this.ctx.beginPath(),3===a?this.formatPath(r):this.formatPath(o.slice(0,2)),h=t<3?3*t:2*t,f=t<3?2*t:t,3===a&&(h=t,f=t),p=!0,d<=2*h?p=!1:d<=2*h+f?(h*=g=d/(2*h+f),f*=g):(m=Math.floor((d+f)/(h+f)),v=(d-m*h)/(m-1),f=(y=(d-(m+1)*h)/m)<=0||Math.abs(f-v)
      TitleSnippetURL
      20+ Kamen Rider Geats HD Wallpapers and BackgroundsEach of these 20+ Kamen Rider Geats Wallpapers has been community curated to work great as a wallpaper. Explore: Wallpapers Phone Wallpapers pfp. 4K Kamen Rider Geats Wallpapers. Infinite. All Resolutions.(^4^)
      Kamen Rider Geats (TV Series 2022–2023) - IMDbKamen Rider Geats: With Hideyoshi Kan, Kazuto Mokudai, Kok

      Here is the continuation of the article:

      -

      Websites

      -

      Websites are online platforms that provide various kinds of content and services. You can use them to find wallpaper Kamen Rider Geats by browsing through their collections or categories. Some websites may require you to register or pay a fee to access their wallpapers, while others may offer them for free. Some websites may also allow you to upload your own wallpapers or request custom ones.

      -

      Some of the most popular websites that offer wallpaper Kamen Rider Geats are WallpaperAccess, WallpaperCave, WallpaperFlare, etc. They have a large and diverse selection of wallpapers for different devices and resolutions. You can also filter them by color, theme, style, etc. You can preview the wallpapers before downloading them and see how they look on your device.

      -

      For example, here are some of the wallpapers from WallpaperAccess when we searched for "Kamen Rider Geats":

      - - - - - - - - - - - - - - - - - - - - - - - - - -
      ImageTitleResolutionURL
      Kamen Rider Geats WallpaperKamen Rider Geats Wallpaper1920x1080
      Kamen Rider Geats Zio WallpaperKamen Rider Geats Zio Wallpaper1920x1080
      Kamen Rider Geats Revive WallpaperKamen Rider Geats Revive Wallpaper1920x1080
      -

      Apps

      -

      Apps are software applications that run on your device. You can use them to find wallpaper Kamen Rider Geats by downloading them from the app store or the website of the developer. Some apps may require you to grant permissions or watch ads to use their wallpapers, while others may offer them without any restrictions. Some apps may also have features such as cropping, editing, sharing, etc.

      -

      Some of the most popular apps that provide wallpaper Kamen Rider Geats are Zedge, Walli, Backdrops, etc. They have a user-friendly interface and a regular update of wallpapers for different genres and fandoms. You can also rate, review, and save your favorite wallpapers for later use.

      -

      For example, here are some of the wallpapers from Zedge when we searched for "Kamen Rider Geats":

      - - - - - - - - - - - - - - - - - - - - - -4/5 stars
      ImageTitleRatingURL
      Kamen Rider GeatsKamen Rider Geats4.5/5 stars
      Kamen Rider Geats TrinityKamen Rider Geats Trinity4/5 stars
      Kamen Rider Geats WozKamen Rider Geats Woz
      -

      Mobile

      -

      To download wallpaper Kamen Rider Geats on your mobile device, you can do one of the following:

      -
        -
      • -

        Some tips and tricks for mobile users are:

        -
          -
        • -

          How to Set Wallpaper Kamen Rider Geats on Different Devices

          -

          Once you have downloaded the wallpaper Kamen Rider Geats that you like, you can set it as your device's background by following these steps:

          -

          PC

          -

          To set wallpaper Kamen Rider Geats on your PC, you can do one of the following:

          -
            -
          • -

            Some options and settings for PC users are:

            -
              -
            • -

              Mobile

              -

              To set wallpaper Kamen Rider Geats on your mobile device, you can do one of the following:

              -
                -
              • -

                Some options and settings for mobile users are:

                -
                  -
                • -

                  Conclusion

                  -

                  In this article, we have shown you how to download wallpaper Kamen Rider Geats from various sources online. We have also given you some tips on how to set them as your device's background. By following these steps, you will be able to enjoy the amazing visuals of Kamen Rider Geats anytime and anywhere.

                  -

                  Kamen Rider Geats is a popular Japanese tokusatsu drama series that features a young man who travels back in time to prevent a dystopian future. It is part of the Kamen Rider franchise that has been running since 1971. It has a loyal fan base and a rich lore. If you are one of the fans, you might want to show your love and support by downloading wallpaper Kamen Rider Geats for your device.

                  -

                  So, what are you waiting for? Go ahead and download wallpaper Kamen Rider Geats now and make your device look awesome. You can also share your wallpapers with your friends and family and spread the word about this amazing show. You can also check out other wallpapers related to the Kamen Rider franchise and discover more characters and stories.

                  -

                  Thank you for reading this article. We hope you found it helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. We would love to hear from you.

                  -

                  FAQs

                  -

                  Here are some of the frequently asked questions about wallpaper Kamen Rider Geats:

                  -
                    -
                  1. -

                    The best resolution for wallpaper Kamen Rider Geats depends on the size and quality of your device's screen. Generally, the higher the resolution, the better the image quality. However, higher resolution also means larger file size and more storage space. You can check your device's screen resolution by going to your settings or using an online tool such as WhatIsMyScreenResolution.com. You can then choose a wallpaper that matches or exceeds your screen resolution.

                    -
                  2. -

                    You can watch Kamen Rider Geats online on various streaming platforms such as Netflix, Hulu, Amazon Prime Video, etc. You can also watch it on YouTube or Dailymotion, but be aware of the quality and legality of the videos. You can also buy or rent the DVDs or Blu-rays of the show from online or offline stores.

                    -
                  3. -

                    The main characters of Kamen Rider Geats are:

                    -
                      -
                    • -
                    • -

                      Kamen Rider Geats is a sci-fi action drama that explores the themes of time travel, destiny, friendship, rivalry, and justice. It is part of the Heisei era of the Kamen Rider franchise, which is known for its darker and more mature tone than the previous Showa era. It is also a tribute to the previous Kamen Rider series, featuring crossover episodes and references to them.

                      -
                    • -

                      Kamen Rider Geats has 49 episodes in total, each lasting about 24 minutes. It aired from September 2, 2022 to August 25, 2023 on TV Asahi. It was followed by Kamen Rider Zero-One, which is the first series of the Reiwa era.

                      -

                  401be4b1e0
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Euro Truck Driver 2018 Download MOD APK with Unlimited Money.md b/spaces/1phancelerku/anime-remove-background/Euro Truck Driver 2018 Download MOD APK with Unlimited Money.md deleted file mode 100644 index 40d3ee72e38416652b1eb28e915921fe7fc3d426..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Euro Truck Driver 2018 Download MOD APK with Unlimited Money.md +++ /dev/null @@ -1,124 +0,0 @@ - -

                  Euro Truck Driver 2018 Hack Mod APK Download: How to Get Unlimited Money and Features

                  -

                  If you are a fan of truck driving simulation games, you might have heard of Euro Truck Driver 2018, one of the most popular and realistic truck simulator games on the market. In this game, you can experience the thrill of driving across Europe, transporting goods from one city to another, exploring the amazing open world map, and enjoying the next-gen graphics, awesome features, and realistic trucking scenarios. However, you might also find that the game is quite challenging and requires a lot of time and money to unlock all the trucks, trailers, and features that you want. That's why some players resort to using a hack mod apk for Euro Truck Driver 2018, which is a modified version of the game that gives you unlimited money and access to everything in the game. But is it worth it? What are the benefits and risks of using a hack mod apk for Euro Truck Driver 2018? How can you download and install it safely? And are there any alternatives to using a hack mod apk for Euro Truck Driver 2018? In this article, we will answer all these questions and more. Read on to find out more.

                  -

                  Introduction

                  -

                  What is Euro Truck Driver 2018?

                  -

                  Euro Truck Driver 2018 is a truck driving simulation game developed by Ovidiu Pop, a well-known developer of simulation games. The game was released in 2020 and has since gained millions of downloads and positive reviews from players all over the world. The game features many Euro truck brands, such as Mercedes-Benz, Volvo, Scania, MAN, Renault, DAF, Iveco, and more, with realistic engine sounds and detailed interiors. You can drive across Europe, from Germany to France, from Spain to Italy, from UK to Poland, and more, transporting stuff from a city to another, such as cars, food, furniture, chemicals, livestock, etc. You can explore the amazing open world map, which includes desert, snow, mountain, and cities, with dynamic weather system (snow, rain, sun...). You can also enjoy the realistic controls, such as tilt steering, buttons or virtual steering wheel, manual transmission with h-shifter and clutch, accurate engine sounds, etc. You can play in multiplayer mode and join your friends or other drivers online in real-time. You can also play in career mode and become a professional truck driver by completing missions and earning money. You can use your money to buy new trucks or upgrade your existing ones. You can also customize your trucks with different paint jobs, accessories, lights, horns, etc. You can also experience the visual and mechanical damage on your vehicles if you crash or drive recklessly.

                  -

                  euro truck driver 2018 hack mod apk download


                  Downloadhttps://jinyurl.com/2uNRrg



                  -

                  Why do you need a hack mod apk for Euro Truck Driver 2018?

                  -

                  As you can see, Euro Truck Driver 2018 is a very fun and immersive game that offers a lot of content and features for you to

                  enjoy. However, you might also encounter some challenges and limitations that might affect your gaming experience. For example, you might find that the game is too hard or too slow for your liking, especially if you are a beginner or a casual player. You might have to spend a lot of time and effort to complete the missions, earn money, and unlock new trucks and features. You might also have to deal with the in-game ads that might pop up and interrupt your gameplay. You might also need to have a rooted device to access some of the advanced features of the game. That's why some players look for a hack mod apk for Euro Truck Driver 2018, which is a modified version of the game that gives you unlimited money and access to everything in the game. With a hack mod apk, you can enjoy the game without any restrictions or hassles. You can buy any truck or trailer you want, upgrade them to the max, customize them as you wish, and drive them across Europe with ease. You can also play the game without any ads or root requirement. Sounds tempting, right? But before you download and install a hack mod apk for Euro Truck Driver 2018, you should also be aware of the benefits and risks of using one.

                  -

                  Benefits of Euro Truck Driver 2018 Hack Mod APK

                  -

                  Unlimited money

                  -

                  The most obvious benefit of using a hack mod apk for Euro Truck Driver 2018 is that you can get unlimited money in the game. Money is the main currency in the game that you can use to buy new trucks, trailers, upgrades, and customizations. Normally, you have to earn money by completing missions, delivering goods, driving safely, etc. However, this can be very time-consuming and tedious, especially if you want to buy the most expensive and powerful trucks in the game. With a hack mod apk, you can get unlimited money instantly and spend it as much as you want. You can buy any truck or trailer you like, from the cheapest to the most luxurious ones. You can also upgrade your trucks with better engines, transmissions, tires, brakes, etc. You can also customize your trucks with different paint jobs, accessories, lights, horns, etc. You can make your truck look unique and awesome with unlimited money.

                  -

                  Unlocked trucks and trailers

                  -

                  Another benefit of using a hack mod apk for Euro Truck Driver 2018 is that you can get unlocked trucks and trailers in the game. Trucks and trailers are the main vehicles in the game that you can use to transport goods across Europe. There are many different types of trucks and trailers in the game, such as flatbeds, refrigerated trailers, car carriers, tankers, etc. Each truck and trailer has its own specifications, such as speed, power, fuel consumption, cargo capacity, etc. Normally, you have to unlock new trucks and trailers by earning money and reaching certain levels in the game. However, this can be very challenging and frustrating, especially if you want to try out different trucks and trailers in the game. With a hack mod apk, you can get unlocked trucks and trailers instantly and use them as you wish. You can switch between different trucks and trailers depending on your preference and mission requirements. You can also enjoy driving different brands of trucks with realistic engine sounds and detailed interiors.

                  -

                  No ads and no root required

                  -

                  A third benefit of using a hack mod apk for Euro Truck Driver 2018 is that you can play the game without any ads and root requirement. Ads are annoying pop-ups that might appear on your screen while playing the game. They might distract you from your gameplay or make you wait for a few seconds before resuming the game. They might also consume your data or battery life unnecessarily. Normally, you have to watch ads or pay real money to remove them from the game. However, with a hack mod apk, you can play the game without any ads at all. You can enjoy the game without any interruptions or distractions. Root is a process that allows you to access some of the advanced features of your device or app that are normally restricted by the manufacturer or developer. However, rooting your device might also void your warranty or expose your device to security risks or malware infections. Normally, you have to root your device to access some of the advanced features of Euro Truck Driver 2018 such as multiplayer mode or visual damage effects. However

                  with a hack mod apk, you can play the game without any root requirement at all. You can access all the features of the game without risking your device or warranty.

                  -

                  Risks of Euro Truck Driver 2018 Hack Mod APK

                  -

                  Possible malware or virus infection

                  -

                  However, using a hack mod apk for Euro Truck Driver 2018 is not without risks. One of the main risks is that you might get a malware or virus infection on your device. Malware or virus is a malicious software that can harm your device or steal your personal information. Hack mod apk files are usually downloaded from unofficial or unknown sources that might not be trustworthy or secure. They might contain hidden malware or virus that can infect your device once you install them. They might also ask for unnecessary permissions or access to your device's data, such as contacts, photos, messages, etc. They might also display unwanted ads or pop-ups that might redirect you to harmful websites or download more malware or virus on your device. Therefore, you should be very careful and cautious when downloading and installing a hack mod apk for Euro Truck Driver 2018. You should always scan the file with a reliable antivirus software before installing it. You should also backup your device's data and use a VPN to protect your privacy and security.

                  -

                  euro truck driver 2018 hack mod apk download for android
                  -euro truck driver 2018 hack mod apk download unlimited money
                  -euro truck driver 2018 hack mod apk download latest version
                  -euro truck driver 2018 hack mod apk download revdl
                  -euro truck driver 2018 hack mod apk download rexdl
                  -euro truck driver 2018 hack mod apk download android 1
                  -euro truck driver 2018 hack mod apk download uptodown
                  -euro truck driver 2018 hack mod apk download apkpure
                  -euro truck driver 2018 hack mod apk download free
                  -euro truck driver 2018 hack mod apk download offline
                  -euro truck driver 2018 hack mod apk download no root
                  -euro truck driver 2018 hack mod apk download obb
                  -euro truck driver 2018 hack mod apk download ios
                  -euro truck driver 2018 hack mod apk download pc
                  -euro truck driver 2018 hack mod apk download windows 10
                  -euro truck driver 2018 hack mod apk download windows 7
                  -euro truck driver 2018 hack mod apk download laptop
                  -euro truck driver 2018 hack mod apk download mac
                  -euro truck driver 2018 hack mod apk download bluestacks
                  -euro truck driver 2018 hack mod apk download without verification
                  -euro truck driver 2018 hack mod apk download without survey
                  -euro truck driver 2018 hack mod apk download without human verification
                  -euro truck driver 2018 hack mod apk download without password
                  -euro truck driver 2018 hack mod apk download online
                  -euro truck driver 2018 hack mod apk download link
                  -euro truck driver 2018 hack mod apk download site
                  -euro truck driver 2018 hack mod apk download website
                  -euro truck driver 2018 hack mod apk download google drive
                  -euro truck driver 2018 hack mod apk download mediafire
                  -euro truck driver 2018 hack mod apk download mega
                  -euro truck driver 2018 hack mod apk download zippyshare
                  -euro truck driver 2018 hack mod apk download filehippo
                  -euro truck driver 2018 hack mod apk download softonic
                  -euro truck driver 2018 hack mod apk download cnet
                  -euro truck driver 2018 hack mod apk download malavida
                  -euro truck driver 2018 hack mod apk download happymod
                  -euro truck driver 2018 hack mod apk download mob.org
                  -euro truck driver 2018 hack mod apk download an1.com
                  -euro truck driver 2018 hack mod apk download dlandroid.com
                  -euro truck driver 2018 hack mod apk download andropalace.org
                  -euro truck driver 2018 hack mod apk download androeed.ru
                  -euro truck driver 2018 hack mod apk download andropark.info
                  -euro truck driver 2018 hack mod apk download androking.org
                  -euro truck driver 2018 hack mod apk download androgamer.org
                  -euro truck driver 2018 hack mod apk download androplace.net
                  -euro truck driver 2018 hack mod apk download androeed.net
                  -euro truck driver 2018 hack mod apk download androapk.org

                  -

                  Ban from online multiplayer mode

                  -

                  Another risk of using a hack mod apk for Euro Truck Driver 2018 is that you might get a ban from online multiplayer mode. Online multiplayer mode is one of the most fun and exciting features of Euro Truck Driver 2018, where you can join your friends or other drivers online in real-time and compete with them in various missions and challenges. However, online multiplayer mode also requires a fair and balanced gameplay for all players. Therefore, the game developers have implemented a cheat detection system that can detect and ban players who use hack mod apk or other cheating methods in online multiplayer mode. If you use a hack mod apk for Euro Truck Driver 2018, you might get detected and banned from online multiplayer mode permanently. You might also lose your game progress and data, as well as your reputation and ranking among other players. Therefore, you should avoid using a hack mod apk for Euro Truck Driver 2018 if you want to play online multiplayer mode without any risk of getting banned.

                  -

                  Loss of game progress and data

                  -

                  A third risk of using a hack mod apk for Euro Truck Driver 2018 is that you might lose your game progress and data. Game progress and data are the information that the game saves on your device or online account, such as your level, money, trucks, trailers, missions, achievements, etc. They are important for you to continue playing the game from where you left off and to keep track of your performance and progress in the game. However, using a hack mod apk for Euro Truck Driver 2018 might cause some compatibility issues with the original version of the game or the official updates from the game developers. The hack mod apk might not work properly or crash frequently on your device. It might also overwrite or corrupt your game progress and data, making them unusable or inaccessible. You might also lose your game progress and data if you uninstall the hack mod apk or switch back to the original version of the game. Therefore, you should always backup your game progress and data before using a hack mod apk for Euro Truck Driver 2018.

                  -

                  How to Download and Install Euro Truck Driver 2018 Hack Mod APK

                  -

                  Step 1: Find a reliable source for the hack mod apk file

                  -

                  If you still want to try using a hack mod apk for Euro Truck Driver 2018 despite the risks, you need to find a reliable source for the hack mod apk file first. A reliable source is a website or platform that provides genuine and safe hack mod apk files for various games and apps. A reliable source should have positive reviews and feedbacks from other users who have downloaded and used the hack mod apk files. A reliable source should also have clear and detailed instructions on how to download and install the hack mod apk files. A reliable source should also have updated and working hack mod apk files that are compatible with the latest version of the game and device. You can search online for some of the best sources for hack mod apk files for Euro Truck Driver 2018, such as APKPure, APKMody, ModDroid, etc.

                  -

                  Step 2: Enable unknown sources on your device settings

                  -

                  After finding a reliable source for the hack mod apk file for Euro Truck Driver 2018, you need to enable unknown sources on your device settings

                  next. Unknown sources are the sources that are not verified or authorized by the official app store or developer of your device. Hack mod apk files are usually downloaded from unknown sources, as they are not available on the official app store or developer website. Therefore, you need to enable unknown sources on your device settings to allow your device to install hack mod apk files from unknown sources. To enable unknown sources on your device settings, you need to follow these steps:

                  -
                    -
                  1. Go to your device's Settings app and tap on Security or Privacy.
                  2. -
                  3. Find and tap on the option that says Unknown Sources or Install Unknown Apps.
                  4. -
                  5. Toggle on the switch or check the box that allows you to install apps from unknown sources.
                  6. -
                  7. A warning message might pop up, telling you about the risks of installing apps from unknown sources. Tap on OK or Allow to confirm your choice.
                  8. -
                  -

                  You have now enabled unknown sources on your device settings and you can proceed to download and install the hack mod apk file for Euro Truck Driver 2018.

                  -

                  Step 3: Download and install the hack mod apk file

                  -

                  The final step is to download and install the hack mod apk file for Euro Truck Driver 2018 from the reliable source that you have chosen. To download and install the hack mod apk file for Euro Truck Driver 2018, you need to follow these steps:

                  -
                    -
                  1. Go to the website or platform that provides the hack mod apk file for Euro Truck Driver 2018 and find the download link or button.
                  2. -
                  3. Tap on the download link or button and wait for the hack mod apk file to be downloaded on your device. The download time might vary depending on your internet speed and file size.
                  4. -
                  5. Once the hack mod apk file is downloaded, go to your device's File Manager app and locate the hack mod apk file in your Downloads folder or any other folder where you have saved it.
                  6. -
                  7. Tap on the hack mod apk file and a pop-up window might appear, asking you if you want to install the app. Tap on Install or Yes to start the installation process.
                  8. -
                  9. The installation process might take a few seconds or minutes, depending on your device's performance and file size. You might also see some progress bars or indicators showing you the installation status.
                  10. -
                  11. Once the installation process is completed, a message might appear, telling you that the app is installed successfully. Tap on Open or Done to launch or exit the app.
                  12. -
                  -

                  You have now downloaded and installed the hack mod apk file for Euro Truck Driver 2018 and you can start playing the game with unlimited money and features.

                  -

                  Alternatives to Euro Truck Driver 2018 Hack Mod APK

                  -

                  Use legitimate cheats and tips

                  -

                  If you are not comfortable with using a hack mod apk for Euro Truck Driver 2018, you can also use some legitimate cheats and tips that can help you improve your gameplay and performance in the game. Legitimate cheats and tips are the ones that are provided by the game developers or other reputable sources that do not involve any hacking or modification of the game files. They are usually based on some tricks, strategies, or secrets that can help you complete missions, earn money, unlock trucks, etc. more easily and quickly. Some examples of legitimate cheats and tips for Euro Truck Driver 2018 are:

                  -
                    -
                  • Earn more money by driving longer distances: The longer you drive, the more money you earn in Euro Truck Driver 2018. Therefore, you should choose missions that require you to drive across different countries or regions, as they will pay you more than missions that require you to drive within a city or a country.
                  • -
                  • Earn more money by driving safely: The safer you drive, the more money you earn in Euro Truck Driver 2018. Therefore, you should avoid crashing, speeding, running red lights, etc., as they will reduce your money earnings and damage your truck. You should also follow the traffic rules and signs, such as speed limits, lane markings, etc., as they will increase your money earnings and reputation.
                  • -
                  • Earn more money by delivering special cargo: The more special cargo you deliver, the more money you earn in Euro Truck Driver 2018. Special cargo are goods that are fragile, dangerous, oversized, etc., such as cars, chemicals, livestock, etc. These cargo are more challenging and rewarding to deliver, as they require more skill and care. You can find special cargo missions by looking for the yellow icons on the map or the job market.
                  • -
                  • Unlock new trucks by reaching higher levels: The higher level you reach, the more trucks you unlock in Euro Truck Driver 2018. Therefore, you should try to level up as fast as possible by completing missions, delivering goods, driving safely, etc. You can also use some boosters or bonuses that can increase your experience points (XP) earnings, such as driving at night, using a GPS, etc. You can check your level and XP progress on the top left corner of the screen.
                  • -
                  • Unlock new trailers by buying them from the garage: The more trailers you buy, the more trailers you unlock in Euro Truck Driver 2018. Therefore, you should save up some money and buy new trailers from the garage, which is located on the map or the menu. You can choose from different types of trailers, such as flatbeds, refrigerated trailers, car carriers, tankers, etc. Each trailer has its own price and specifications, such as cargo capacity, weight, length, etc. You can also sell your old trailers if you don't need them anymore.
                  • -
                  -

                  These are some of the legitimate cheats and tips that you can use for Euro Truck Driver 2018. You can search online for more cheats and tips from other sources, such as YouTube videos, blogs, forums, etc.

                  -

                  Play other truck simulator games

                  -

                  If you are bored or dissatisfied with Euro Truck Driver 2018, you can also play some other truck simulator games that might offer you a different or better gaming experience. There are many other truck simulator games available on the market, such as American Truck Simulator, Truck Simulator USA, World Truck Driving Simulator, Heavy Truck Simulator, etc. Each game has its own features and advantages, such as different locations, trucks, graphics, gameplay modes, etc. You can compare and contrast different truck simulator games and choose the one that suits your preference and taste. You can also play multiple truck simulator games at the same time and switch between them whenever you want.

                  -

                  Conclusion

                  -

                  In conclusion, Euro Truck Driver 2018 is a great truck driving simulation game that offers a lot of fun and realism for truck enthusiasts and gamers alike. However, some players might want to use a hack mod apk for Euro Truck Driver 2018 to get unlimited money and features in the game. While using a hack mod apk might have some benefits, such as unlimited money, unlocked trucks and trailers, no ads and no root required, it also has some risks, such as possible malware or virus infection, ban from online multiplayer mode, loss of game progress and data. Therefore, you should be careful and cautious when using a hack mod apk for Euro Truck Driver 2018. You should also consider some alternatives to using a hack mod apk for Euro Truck Driver 2018, such as using legitimate cheats and tips or playing other truck simulator games.

                  -

                  FAQs

                  -

                  Here are some of the frequently asked questions (FAQs) about Euro Truck Driver 2018 hack mod apk:

                  -
                    -
                  1. Q: Is Euro Truck Driver 2018 hack mod apk safe to use?
                  2. -
                  3. A: Euro Truck Driver 2018 hack mod apk is not safe to use unless you download it from a reliable source that provides genuine and safe hack mod apk files. However even if you download it from a reliable source, you might still face some risks, such as possible malware or virus infection, ban from online multiplayer mode, loss of game progress and data. Therefore, you should always scan the file with a reliable antivirus software before installing it. You should also backup your device's data and use a VPN to protect your privacy and security.
                  4. -
                  5. Q: How can I get unlimited money in Euro Truck Driver 2018 without using a hack mod apk?
                  6. -
                  7. A: You can get unlimited money in Euro Truck Driver 2018 without using a hack mod apk by using some legitimate cheats and tips that can help you earn more money in the game. Some of these cheats and tips are: driving longer distances, driving safely, delivering special cargo, etc. You can also use some boosters or bonuses that can increase your money earnings, such as driving at night, using a GPS, etc. You can search online for more cheats and tips from other sources, such as YouTube videos, blogs, forums, etc.
                  8. -
                  9. Q: What are the best trucks and trailers in Euro Truck Driver 2018?
                  10. -
                  11. A: The best trucks and trailers in Euro Truck Driver 2018 depend on your personal preference and taste, as well as your mission requirements and budget. However, some of the most popular and powerful trucks and trailers in the game are: Mercedes-Benz Actros (truck), Scania R730 (truck), Volvo FH16 (truck), DAF XF105 (truck), Iveco Stralis (truck), Car Carrier (trailer), Refrigerated Trailer (trailer), Tanker (trailer), Flatbed (trailer), etc. You can compare and contrast different trucks and trailers based on their specifications, such as speed, power, fuel consumption, cargo capacity, etc.
                  12. -
                  13. Q: How can I play online multiplayer mode in Euro Truck Driver 2018?
                  14. -
                  15. A: You can play online multiplayer mode in Euro Truck Driver 2018 by tapping on the Multiplayer button on the main menu of the game. You will then be able to join or create a room with other players online in real-time. You can also invite your friends or other drivers to join your room by sharing your room code or link. You can then choose your truck, trailer, location, weather, time, etc. and start driving with other players online. You can also chat with other players online by using the voice chat or text chat features.
                  16. -
                  17. Q: How can I update Euro Truck Driver 2018 to the latest version?
                  18. -
                  19. A: You can update Euro Truck Driver 2018 to the latest version by going to the official app store or developer website of your device and checking for any available updates for the game. You can also enable the Auto-update feature on your device settings to automatically update the game whenever there is a new version available. However, if you are using a hack mod apk for Euro Truck Driver 2018, you might not be able to update the game to the latest version, as the hack mod apk might not be compatible with the official updates from the game developers. You might also lose your game progress and data if you update the game to the latest version while using a hack mod apk.
                  20. -

                  401be4b1e0
                  -
                  -
                  \ No newline at end of file diff --git a/spaces/1vash/demo-flask-docker-template/README.md b/spaces/1vash/demo-flask-docker-template/README.md deleted file mode 100644 index 94369148f6d9a639030f0269111f92e92b4d1161..0000000000000000000000000000000000000000 --- a/spaces/1vash/demo-flask-docker-template/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Demo Flask Docker Template -emoji: 🐠 -colorFrom: indigo -colorTo: blue -sdk: docker -app_port: 5000 -models: ['1vash/mnist_demo_model'] -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/A00001/bingothoo/src/components/ui/alert-dialog.tsx b/spaces/A00001/bingothoo/src/components/ui/alert-dialog.tsx deleted file mode 100644 index 17fec4d16510328deacc1416569173c97761ef72..0000000000000000000000000000000000000000 --- a/spaces/A00001/bingothoo/src/components/ui/alert-dialog.tsx +++ /dev/null @@ -1,150 +0,0 @@ -'use client' - -import * as React from 'react' -import * as AlertDialogPrimitive from '@radix-ui/react-alert-dialog' - -import { cn } from '@/lib/utils' -import { buttonVariants } from '@/components/ui/button' - -const AlertDialog = AlertDialogPrimitive.Root - -const AlertDialogTrigger = AlertDialogPrimitive.Trigger - -const AlertDialogPortal = ({ - className, - children, - ...props -}: AlertDialogPrimitive.AlertDialogPortalProps) => ( - -
                  - {children} -
                  -
                  -) -AlertDialogPortal.displayName = AlertDialogPrimitive.Portal.displayName - -const AlertDialogOverlay = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - -)) -AlertDialogOverlay.displayName = AlertDialogPrimitive.Overlay.displayName - -const AlertDialogContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - - - - -)) -AlertDialogContent.displayName = AlertDialogPrimitive.Content.displayName - -const AlertDialogHeader = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
                  -) -AlertDialogHeader.displayName = 'AlertDialogHeader' - -const AlertDialogFooter = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
                  -) -AlertDialogFooter.displayName = 'AlertDialogFooter' - -const AlertDialogTitle = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AlertDialogTitle.displayName = AlertDialogPrimitive.Title.displayName - -const AlertDialogDescription = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AlertDialogDescription.displayName = - AlertDialogPrimitive.Description.displayName - -const AlertDialogAction = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AlertDialogAction.displayName = AlertDialogPrimitive.Action.displayName - -const AlertDialogCancel = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AlertDialogCancel.displayName = AlertDialogPrimitive.Cancel.displayName - -export { - AlertDialog, - AlertDialogTrigger, - AlertDialogContent, - AlertDialogHeader, - AlertDialogFooter, - AlertDialogTitle, - AlertDialogDescription, - AlertDialogAction, - AlertDialogCancel -} diff --git a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/utils/tts_utils.py b/spaces/AIGC-Audio/AudioGPT/NeuralSeq/utils/tts_utils.py deleted file mode 100644 index 869b8b4bc495f119a4f09fa8436b6b6aec02a81d..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/utils/tts_utils.py +++ /dev/null @@ -1,398 +0,0 @@ -from collections import defaultdict -import torch -import torch.nn.functional as F - - -def make_positions(tensor, padding_idx): - """Replace non-padding symbols with their position numbers. - - Position numbers begin at padding_idx+1. Padding symbols are ignored. - """ - # The series of casts and type-conversions here are carefully - # balanced to both work with ONNX export and XLA. In particular XLA - # prefers ints, cumsum defaults to output longs, and ONNX doesn't know - # how to handle the dtype kwarg in cumsum. - mask = tensor.ne(padding_idx).int() - return ( - torch.cumsum(mask, dim=1).type_as(mask) * mask - ).long() + padding_idx - - -def softmax(x, dim): - return F.softmax(x, dim=dim, dtype=torch.float32) - - -def sequence_mask(lengths, maxlen, dtype=torch.bool): - if maxlen is None: - maxlen = lengths.max() - mask = ~(torch.ones((len(lengths), maxlen)).to(lengths.device).cumsum(dim=1).t() > lengths).t() - mask.type(dtype) - return mask - - -INCREMENTAL_STATE_INSTANCE_ID = defaultdict(lambda: 0) - - -def _get_full_incremental_state_key(module_instance, key): - module_name = module_instance.__class__.__name__ - - # assign a unique ID to each module instance, so that incremental state is - # not shared across module instances - if not hasattr(module_instance, '_instance_id'): - INCREMENTAL_STATE_INSTANCE_ID[module_name] += 1 - module_instance._instance_id = INCREMENTAL_STATE_INSTANCE_ID[module_name] - - return '{}.{}.{}'.format(module_name, module_instance._instance_id, key) - - -def get_incremental_state(module, incremental_state, key): - """Helper for getting incremental state for an nn.Module.""" - full_key = _get_full_incremental_state_key(module, key) - if incremental_state is None or full_key not in incremental_state: - return None - return incremental_state[full_key] - - -def set_incremental_state(module, incremental_state, key, value): - """Helper for setting incremental state for an nn.Module.""" - if incremental_state is not None: - full_key = _get_full_incremental_state_key(module, key) - incremental_state[full_key] = value - - -def fill_with_neg_inf(t): - """FP16-compatible function that fills a tensor with -inf.""" - return t.float().fill_(float('-inf')).type_as(t) - - -def fill_with_neg_inf2(t): - """FP16-compatible function that fills a tensor with -inf.""" - return t.float().fill_(-1e8).type_as(t) - - -def get_focus_rate(attn, src_padding_mask=None, tgt_padding_mask=None): - ''' - attn: bs x L_t x L_s - ''' - if src_padding_mask is not None: - attn = attn * (1 - src_padding_mask.float())[:, None, :] - - if tgt_padding_mask is not None: - attn = attn * (1 - tgt_padding_mask.float())[:, :, None] - - focus_rate = attn.max(-1).values.sum(-1) - focus_rate = focus_rate / attn.sum(-1).sum(-1) - return focus_rate - - -def get_phone_coverage_rate(attn, src_padding_mask=None, src_seg_mask=None, tgt_padding_mask=None): - ''' - attn: bs x L_t x L_s - ''' - src_mask = attn.new(attn.size(0), attn.size(-1)).bool().fill_(False) - if src_padding_mask is not None: - src_mask |= src_padding_mask - if src_seg_mask is not None: - src_mask |= src_seg_mask - - attn = attn * (1 - src_mask.float())[:, None, :] - if tgt_padding_mask is not None: - attn = attn * (1 - tgt_padding_mask.float())[:, :, None] - - phone_coverage_rate = attn.max(1).values.sum(-1) - # phone_coverage_rate = phone_coverage_rate / attn.sum(-1).sum(-1) - phone_coverage_rate = phone_coverage_rate / (1 - src_mask.float()).sum(-1) - return phone_coverage_rate - - -def get_diagonal_focus_rate(attn, attn_ks, target_len, src_padding_mask=None, tgt_padding_mask=None, - band_mask_factor=5, band_width=50): - ''' - attn: bx x L_t x L_s - attn_ks: shape: tensor with shape [batch_size], input_lens/output_lens - - diagonal: y=k*x (k=attn_ks, x:output, y:input) - 1 0 0 - 0 1 0 - 0 0 1 - y>=k*(x-width) and y<=k*(x+width):1 - else:0 - ''' - # width = min(target_len/band_mask_factor, 50) - width1 = target_len / band_mask_factor - width2 = target_len.new(target_len.size()).fill_(band_width) - width = torch.where(width1 < width2, width1, width2).float() - base = torch.ones(attn.size()).to(attn.device) - zero = torch.zeros(attn.size()).to(attn.device) - x = torch.arange(0, attn.size(1)).to(attn.device)[None, :, None].float() * base - y = torch.arange(0, attn.size(2)).to(attn.device)[None, None, :].float() * base - cond = (y - attn_ks[:, None, None] * x) - cond1 = cond + attn_ks[:, None, None] * width[:, None, None] - cond2 = cond - attn_ks[:, None, None] * width[:, None, None] - mask1 = torch.where(cond1 < 0, zero, base) - mask2 = torch.where(cond2 > 0, zero, base) - mask = mask1 * mask2 - - if src_padding_mask is not None: - attn = attn * (1 - src_padding_mask.float())[:, None, :] - if tgt_padding_mask is not None: - attn = attn * (1 - tgt_padding_mask.float())[:, :, None] - - diagonal_attn = attn * mask - diagonal_focus_rate = diagonal_attn.sum(-1).sum(-1) / attn.sum(-1).sum(-1) - return diagonal_focus_rate, mask - - -def select_attn(attn_logits, type='best'): - """ - - :param attn_logits: [n_layers, B, n_head, T_sp, T_txt] - :return: - """ - encdec_attn = torch.stack(attn_logits, 0).transpose(1, 2) - # [n_layers * n_head, B, T_sp, T_txt] - encdec_attn = (encdec_attn.reshape([-1, *encdec_attn.shape[2:]])).softmax(-1) - if type == 'best': - indices = encdec_attn.max(-1).values.sum(-1).argmax(0) - encdec_attn = encdec_attn.gather( - 0, indices[None, :, None, None].repeat(1, 1, encdec_attn.size(-2), encdec_attn.size(-1)))[0] - return encdec_attn - elif type == 'mean': - return encdec_attn.mean(0) - - -def make_pad_mask(lengths, xs=None, length_dim=-1): - """Make mask tensor containing indices of padded part. - Args: - lengths (LongTensor or List): Batch of lengths (B,). - xs (Tensor, optional): The reference tensor. - If set, masks will be the same shape as this tensor. - length_dim (int, optional): Dimension indicator of the above tensor. - See the example. - Returns: - Tensor: Mask tensor containing indices of padded part. - dtype=torch.uint8 in PyTorch 1.2- - dtype=torch.bool in PyTorch 1.2+ (including 1.2) - Examples: - With only lengths. - >>> lengths = [5, 3, 2] - >>> make_non_pad_mask(lengths) - masks = [[0, 0, 0, 0 ,0], - [0, 0, 0, 1, 1], - [0, 0, 1, 1, 1]] - With the reference tensor. - >>> xs = torch.zeros((3, 2, 4)) - >>> make_pad_mask(lengths, xs) - tensor([[[0, 0, 0, 0], - [0, 0, 0, 0]], - [[0, 0, 0, 1], - [0, 0, 0, 1]], - [[0, 0, 1, 1], - [0, 0, 1, 1]]], dtype=torch.uint8) - >>> xs = torch.zeros((3, 2, 6)) - >>> make_pad_mask(lengths, xs) - tensor([[[0, 0, 0, 0, 0, 1], - [0, 0, 0, 0, 0, 1]], - [[0, 0, 0, 1, 1, 1], - [0, 0, 0, 1, 1, 1]], - [[0, 0, 1, 1, 1, 1], - [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8) - With the reference tensor and dimension indicator. - >>> xs = torch.zeros((3, 6, 6)) - >>> make_pad_mask(lengths, xs, 1) - tensor([[[0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0], - [1, 1, 1, 1, 1, 1]], - [[0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0], - [1, 1, 1, 1, 1, 1], - [1, 1, 1, 1, 1, 1], - [1, 1, 1, 1, 1, 1]], - [[0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0], - [1, 1, 1, 1, 1, 1], - [1, 1, 1, 1, 1, 1], - [1, 1, 1, 1, 1, 1], - [1, 1, 1, 1, 1, 1]]], dtype=torch.uint8) - >>> make_pad_mask(lengths, xs, 2) - tensor([[[0, 0, 0, 0, 0, 1], - [0, 0, 0, 0, 0, 1], - [0, 0, 0, 0, 0, 1], - [0, 0, 0, 0, 0, 1], - [0, 0, 0, 0, 0, 1], - [0, 0, 0, 0, 0, 1]], - [[0, 0, 0, 1, 1, 1], - [0, 0, 0, 1, 1, 1], - [0, 0, 0, 1, 1, 1], - [0, 0, 0, 1, 1, 1], - [0, 0, 0, 1, 1, 1], - [0, 0, 0, 1, 1, 1]], - [[0, 0, 1, 1, 1, 1], - [0, 0, 1, 1, 1, 1], - [0, 0, 1, 1, 1, 1], - [0, 0, 1, 1, 1, 1], - [0, 0, 1, 1, 1, 1], - [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8) - """ - if length_dim == 0: - raise ValueError("length_dim cannot be 0: {}".format(length_dim)) - - if not isinstance(lengths, list): - lengths = lengths.tolist() - bs = int(len(lengths)) - if xs is None: - maxlen = int(max(lengths)) - else: - maxlen = xs.size(length_dim) - - seq_range = torch.arange(0, maxlen, dtype=torch.int64) - seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen) - seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1) - mask = seq_range_expand >= seq_length_expand - - if xs is not None: - assert xs.size(0) == bs, (xs.size(0), bs) - - if length_dim < 0: - length_dim = xs.dim() + length_dim - # ind = (:, None, ..., None, :, , None, ..., None) - ind = tuple( - slice(None) if i in (0, length_dim) else None for i in range(xs.dim()) - ) - mask = mask[ind].expand_as(xs).to(xs.device) - return mask - - -def make_non_pad_mask(lengths, xs=None, length_dim=-1): - """Make mask tensor containing indices of non-padded part. - Args: - lengths (LongTensor or List): Batch of lengths (B,). - xs (Tensor, optional): The reference tensor. - If set, masks will be the same shape as this tensor. - length_dim (int, optional): Dimension indicator of the above tensor. - See the example. - Returns: - ByteTensor: mask tensor containing indices of padded part. - dtype=torch.uint8 in PyTorch 1.2- - dtype=torch.bool in PyTorch 1.2+ (including 1.2) - Examples: - With only lengths. - >>> lengths = [5, 3, 2] - >>> make_non_pad_mask(lengths) - masks = [[1, 1, 1, 1 ,1], - [1, 1, 1, 0, 0], - [1, 1, 0, 0, 0]] - With the reference tensor. - >>> xs = torch.zeros((3, 2, 4)) - >>> make_non_pad_mask(lengths, xs) - tensor([[[1, 1, 1, 1], - [1, 1, 1, 1]], - [[1, 1, 1, 0], - [1, 1, 1, 0]], - [[1, 1, 0, 0], - [1, 1, 0, 0]]], dtype=torch.uint8) - >>> xs = torch.zeros((3, 2, 6)) - >>> make_non_pad_mask(lengths, xs) - tensor([[[1, 1, 1, 1, 1, 0], - [1, 1, 1, 1, 1, 0]], - [[1, 1, 1, 0, 0, 0], - [1, 1, 1, 0, 0, 0]], - [[1, 1, 0, 0, 0, 0], - [1, 1, 0, 0, 0, 0]]], dtype=torch.uint8) - With the reference tensor and dimension indicator. - >>> xs = torch.zeros((3, 6, 6)) - >>> make_non_pad_mask(lengths, xs, 1) - tensor([[[1, 1, 1, 1, 1, 1], - [1, 1, 1, 1, 1, 1], - [1, 1, 1, 1, 1, 1], - [1, 1, 1, 1, 1, 1], - [1, 1, 1, 1, 1, 1], - [0, 0, 0, 0, 0, 0]], - [[1, 1, 1, 1, 1, 1], - [1, 1, 1, 1, 1, 1], - [1, 1, 1, 1, 1, 1], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0]], - [[1, 1, 1, 1, 1, 1], - [1, 1, 1, 1, 1, 1], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0]]], dtype=torch.uint8) - >>> make_non_pad_mask(lengths, xs, 2) - tensor([[[1, 1, 1, 1, 1, 0], - [1, 1, 1, 1, 1, 0], - [1, 1, 1, 1, 1, 0], - [1, 1, 1, 1, 1, 0], - [1, 1, 1, 1, 1, 0], - [1, 1, 1, 1, 1, 0]], - [[1, 1, 1, 0, 0, 0], - [1, 1, 1, 0, 0, 0], - [1, 1, 1, 0, 0, 0], - [1, 1, 1, 0, 0, 0], - [1, 1, 1, 0, 0, 0], - [1, 1, 1, 0, 0, 0]], - [[1, 1, 0, 0, 0, 0], - [1, 1, 0, 0, 0, 0], - [1, 1, 0, 0, 0, 0], - [1, 1, 0, 0, 0, 0], - [1, 1, 0, 0, 0, 0], - [1, 1, 0, 0, 0, 0]]], dtype=torch.uint8) - """ - return ~make_pad_mask(lengths, xs, length_dim) - - -def get_mask_from_lengths(lengths): - max_len = torch.max(lengths).item() - ids = torch.arange(0, max_len).to(lengths.device) - mask = (ids < lengths.unsqueeze(1)).bool() - return mask - - -def group_hidden_by_segs(h, seg_ids, max_len): - """ - - :param h: [B, T, H] - :param seg_ids: [B, T] - :return: h_ph: [B, T_ph, H] - """ - B, T, H = h.shape - h_gby_segs = h.new_zeros([B, max_len + 1, H]).scatter_add_(1, seg_ids[:, :, None].repeat([1, 1, H]), h) - all_ones = h.new_ones(h.shape[:2]) - cnt_gby_segs = h.new_zeros([B, max_len + 1]).scatter_add_(1, seg_ids, all_ones).contiguous() - h_gby_segs = h_gby_segs[:, 1:] - cnt_gby_segs = cnt_gby_segs[:, 1:] - h_gby_segs = h_gby_segs / torch.clamp(cnt_gby_segs[:, :, None], min=1) - return h_gby_segs, cnt_gby_segs - -def mel2token_to_dur(mel2token, T_txt=None, max_dur=None): - is_torch = isinstance(mel2token, torch.Tensor) - has_batch_dim = True - if not is_torch: - mel2token = torch.LongTensor(mel2token) - if T_txt is None: - T_txt = mel2token.max() - if len(mel2token.shape) == 1: - mel2token = mel2token[None, ...] - has_batch_dim = False - B, _ = mel2token.shape - dur = mel2token.new_zeros(B, T_txt + 1).scatter_add(1, mel2token, torch.ones_like(mel2token)) - dur = dur[:, 1:] - if max_dur is not None: - dur = dur.clamp(max=max_dur) - if not is_torch: - dur = dur.numpy() - if not has_batch_dim: - dur = dur[0] - return dur - -def expand_word2ph(word_encoding, ph2word): - word_encoding = F.pad(word_encoding,[0,0,1,0]) - ph2word_ = ph2word[:, :, None].repeat([1, 1, word_encoding.shape[-1]]) - out = torch.gather(word_encoding, 1, ph2word_) # [B, T, H] - return out \ No newline at end of file diff --git a/spaces/AIGC-Audio/Make_An_Audio/ldm/modules/encoders/CLAP/utils.py b/spaces/AIGC-Audio/Make_An_Audio/ldm/modules/encoders/CLAP/utils.py deleted file mode 100644 index f95931fb1c422cbd8349b88e1effb9323f170b2b..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/Make_An_Audio/ldm/modules/encoders/CLAP/utils.py +++ /dev/null @@ -1,26 +0,0 @@ -import argparse -import yaml -import sys - -def read_config_as_args(config_path,args=None,is_config_str=False): - return_dict = {} - - if config_path is not None: - if is_config_str: - yml_config = yaml.load(config_path, Loader=yaml.FullLoader) - else: - with open(config_path, "r") as f: - yml_config = yaml.load(f, Loader=yaml.FullLoader) - - if args != None: - for k, v in yml_config.items(): - if k in args.__dict__: - args.__dict__[k] = v - else: - sys.stderr.write("Ignored unknown parameter {} in yaml.\n".format(k)) - else: - for k, v in yml_config.items(): - return_dict[k] = v - - args = args if args != None else return_dict - return argparse.Namespace(**args) diff --git a/spaces/AIatUIUC/CodeLATS/executors/executor_utils.py b/spaces/AIatUIUC/CodeLATS/executors/executor_utils.py deleted file mode 100644 index dd8d0698c955b8638b0426feb748663bef795c3c..0000000000000000000000000000000000000000 --- a/spaces/AIatUIUC/CodeLATS/executors/executor_utils.py +++ /dev/null @@ -1,46 +0,0 @@ - -def timeout_handler(_, __): - raise TimeoutError() - -import os, json -def to_jsonl(dict_data, file_path): - with open(file_path, 'a') as file: - json_line = json.dumps(dict_data) - file.write(json_line + os.linesep) - -from threading import Thread -class PropagatingThread(Thread): - def run(self): - self.exc = None - try: - if hasattr(self, '_Thread__target'): - # Thread uses name mangling prior to Python 3. - self.ret = self._Thread__target(*self._Thread__args, **self._Thread__kwargs) - else: - self.ret = self._target(*self._args, **self._kwargs) - except BaseException as e: - self.exc = e - - def join(self, timeout=None): - super(PropagatingThread, self).join(timeout) - if self.exc: - raise self.exc - return self.ret - - -def function_with_timeout(func, args, timeout): - result_container = [] - - def wrapper(): - result_container.append(func(*args)) - - thread = PropagatingThread(target=wrapper) - thread.start() - thread.join(timeout) - - if thread.is_alive(): - raise TimeoutError() - else: - return result_container[0] - - diff --git a/spaces/AchyuthGamer/ImMagician/app.py b/spaces/AchyuthGamer/ImMagician/app.py deleted file mode 100644 index 8857385b4f68cdf53b40ea968878061302b8a0b3..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/ImMagician/app.py +++ /dev/null @@ -1,190 +0,0 @@ -import gradio as gr -import os -import sys -from pathlib import Path -import random -import string -import time -from queue import Queue -from threading import Thread -import emoji - -text_gen=gr.Interface.load("spaces/AchyuthGamer/MagicPrompt-Stable-Diffusion") -def get_prompts(prompt_text): - if prompt_text: - return text_gen(prompt_text + ", realistic, 8k, cyberpunk, highly detailed, ultra super realism, realism, high graphics, key visual, intricate, highly detailed, breathtaking beauty, precise lineart, vibrant, comprehensive cinematic, trending on DIGITAL ART WEBSITE, best quality, ultra sharp focus, 8k, artgerm") - else: - return text_gen("") -proc1=gr.Interface.load("models/AchyuthGamer/ImMagician-Fantasy") - -def restart_script_periodically(): - while True: - random_time = random.randint(540, 600) - time.sleep(random_time) - os.execl(sys.executable, sys.executable, *sys.argv) - - -restart_thread = Thread(target=restart_script_periodically, daemon=True) -restart_thread.start() - - -queue = Queue() -queue_threshold = 50 - -#Don't add noise to the first picture no matter what (the point of noise is to get varied outputs, the first one doesn't need to vary about anything) -def noadd_random_noise(prompt, noise_level=0.00): - if noise_level == 0: - noise_level = 0.00 - percentage_noise = noise_level * 5 - num_noise_chars = int(len(prompt) * (percentage_noise/100)) - noise_indices = random.sample(range(len(prompt)), num_noise_chars) - prompt_list = list(prompt) - noise_chars = list(string.ascii_letters + string.punctuation + '' + string.digits) - noise_chars.extend(['']) - for index in noise_indices: - prompt_list[index] = random.choice(noise_chars) - return "".join(prompt_list) - -#normal behavior -def add_random_noise(prompt, noise_level=0.00): - if noise_level == 0: - noise_level = 0.00 - percentage_noise = noise_level * 5 - num_noise_chars = int(len(prompt) * (percentage_noise/100)) - noise_indices = random.sample(range(len(prompt)), num_noise_chars) - prompt_list = list(prompt) - noise_chars = list(string.ascii_letters + string.punctuation + ' ' + string.digits) - noise_chars.extend(['😍', 'beautiful', '😂', '🤔', '😊', '🤗', '😭', '🙄', 'pretty', '🤯', '🤫', '🥴', 'sitting', '🤩', '🥳', '😔', '😩', '🤪', '😇', 'retro', '😈', '👹', 'masterpiece', '🤖', '👽', 'high quality', '🎃', '🎅', '🎄', '🎁', '🎂', '🎉', '🎈', '🎊', '🎮', '❤️', '💔', '💕', '💖', '💗', '🐶', '🐱', 'visible', '🐹', '🦊', '🐻', '🐨', '🐯', '🦁', '🐘', '🔥', '🌧️', '🌞', '🌈', '💥', '🌴', '🌊', '🌺', '🌻', '🌸', '🎨', '🌅', '🌌', '☁️', '⛈️', '❄️', '☀️', '🌤️', '⛅️', '🌥️', '🌦️', '🌧️', '🌩️', '🌨️', '🌫️', '☔️', '🌬️', '💨', '🌪️', 'cute', 'kawaii', 'little', 'photo', 'movie', 'still']) - for index in noise_indices: - prompt_list[index] = random.choice(noise_chars) - return "".join(prompt_list) - -def send_it1(inputs, noise_level, proc1=proc1): - prompt_with_noise = noadd_random_noise(inputs, noise_level) - while queue.qsize() >= queue_threshold: - time.sleep(2) - queue.put(prompt_with_noise) - output1 = proc1(prompt_with_noise) - return output1 - -def send_it2(inputs, noise_level, proc1=proc1): - prompt_with_noise = add_random_noise(inputs, noise_level) - while queue.qsize() >= queue_threshold: - time.sleep(2) - queue.put(prompt_with_noise) - output2 = proc1(prompt_with_noise) - return output2 - -def send_it3(inputs, noise_level, proc1=proc1): - prompt_with_noise = add_random_noise(inputs, noise_level) - while queue.qsize() >= queue_threshold: - time.sleep(2) - queue.put(prompt_with_noise) - output3 = proc1(prompt_with_noise) - return output3 - -def send_it4(inputs, noise_level, proc1=proc1): - prompt_with_noise = add_random_noise(inputs, noise_level) - while queue.qsize() >= queue_threshold: - time.sleep(2) - queue.put(prompt_with_noise) - output4 = proc1(prompt_with_noise) - return output4 - -with gr.Blocks(css='style.css') as demo: - gr.HTML( - """ -
                  -
                  - - -

                  ImMagician

                  -
                  - -
                  -

                  - 🤗 Celebrating 10000 views at blogger! 🤗

                  -

                  - ❤️ Made by Achyuth! ❤️ -

                  -
                  - """ - ) - with gr.Column(elem_id="col-container"): - with gr.Row(variant="compact"): - input_text = gr.Textbox( - label="Short Prompt", - show_label=False, - max_lines=20, - placeholder="Enter a basic idea and click 'Magic Prompt'. Got no ideas? No problem, Simply just hit the magic button!", - ).style( - container=False,min_width=1200 - ) - see_prompts = gr.Button("✨Magic✨ ✨Prompt✨").style(full_width=False) - - - with gr.Row(variant="compact"): - prompt = gr.Textbox( - label="Enter your prompt", - show_label=False, - max_lines=20, - placeholder="Full Prompt", - ).style( - container=False, - ) - run = gr.Button("Generate Images").style(full_width=False) - with gr.Row(): - with gr.Row(): - #Now that the first box generates a picture with noise=0 having the default at 0 makes no sense as it'd generate the same image 6 times. - noise_level = gr.Slider(minimum=0.5, maximum=3, step=0.1, label="Noise Level (0.1 or less was generating the same pic 6 times! 🤣)") - gr.HTML( - """ -
                  -
                  - -

                  Please allow up to 1 minute for each image to generate, for a total of 6 minutes max.

                  -
                  - -
                  -
                  - """ - ) - with gr.Row(): - with gr.Row(): - output1=gr.Image(label="ImMagician", show_label=False, min_width=640, object_fit="contain", height="auto", download=True) - output2=gr.Image(label="ImMagician", show_label=False, min_width=640, object_fit="contain", height="auto", download=True) - with gr.Row(): - with gr.Row(): - output3=gr.Image(label="ImMagician", show_label=False, min_width=640, object_fit="contain", height="auto", download=True) - output4=gr.Image(label="ImMagician", show_label=False, min_width=640, object_fit="contain", height="auto", download=True) - - see_prompts.click(get_prompts, inputs=[input_text], outputs=[prompt], queue=False) - run.click(send_it1, inputs=[prompt, noise_level], outputs=[output1]) - run.click(send_it2, inputs=[prompt, noise_level], outputs=[output2]) - run.click(send_it3, inputs=[prompt, noise_level], outputs=[output3]) - run.click(send_it4, inputs=[prompt, noise_level], outputs=[output4]) - - with gr.Row(): - gr.HTML( - """ - -
                  -

                  Unleash your creative side and generate mesmerizing images with just a few clicks! Enter a spark of inspiration in the "Basic Idea" text box and click the "Magic Prompt" button to elevate it to a polished masterpiece. Make any final tweaks in the "Full Prompt" box and hit the "Generate Images" button to watch your vision come to life. Experiment with the "Noise Level" for a diverse range of outputs, from similar to wildly unique. Let the fun begin! -

                  -
                  - """ -) - - demo.launch(enable_queue=True, inline=True) - block.queue(concurrency_count=50) \ No newline at end of file diff --git a/spaces/Adapter/CoAdapter/style.css b/spaces/Adapter/CoAdapter/style.css deleted file mode 100644 index c4739b4ea5fc35e774a049e3dacc443f7f0eac19..0000000000000000000000000000000000000000 --- a/spaces/Adapter/CoAdapter/style.css +++ /dev/null @@ -1,3 +0,0 @@ -h1 { - text-align: center; -} diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/ObjectFactory.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/ObjectFactory.js deleted file mode 100644 index 56a37cdc697266749a0c400fb94b2bf9c8bf3896..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/ObjectFactory.js +++ /dev/null @@ -1,20 +0,0 @@ -class ObjectFactory { - constructor(scene) { - this.scene = scene; - this.displayList = scene.sys.displayList; - this.updateList = scene.sys.updateList; - - scene.events.once('destroy', this.destroy, this); - } - - destroy() { - this.scene = null; - this.displayList = null; - this.updateList = null; - } - - static register(type, callback) { - ObjectFactory.prototype[type] = callback; - } -}; -export default ObjectFactory; \ No newline at end of file diff --git a/spaces/Alex132/togethercomputer-LLaMA-2-7B-32K/README.md b/spaces/Alex132/togethercomputer-LLaMA-2-7B-32K/README.md deleted file mode 100644 index 2e514703f2ed9bbf674757b0ec23561cab3e4c85..0000000000000000000000000000000000000000 --- a/spaces/Alex132/togethercomputer-LLaMA-2-7B-32K/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Togethercomputer LLaMA 2 7B 32K -emoji: 🦀 -colorFrom: gray -colorTo: indigo -sdk: gradio -sdk_version: 3.40.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AlexWang/lama/models/ade20k/segm_lib/nn/modules/comm.py b/spaces/AlexWang/lama/models/ade20k/segm_lib/nn/modules/comm.py deleted file mode 100644 index b64bf6ba3b3e7abbab375c6dd4a87d8239e62138..0000000000000000000000000000000000000000 --- a/spaces/AlexWang/lama/models/ade20k/segm_lib/nn/modules/comm.py +++ /dev/null @@ -1,131 +0,0 @@ -# -*- coding: utf-8 -*- -# File : comm.py -# Author : Jiayuan Mao -# Email : maojiayuan@gmail.com -# Date : 27/01/2018 -# -# This file is part of Synchronized-BatchNorm-PyTorch. -# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch -# Distributed under MIT License. - -import queue -import collections -import threading - -__all__ = ['FutureResult', 'SlavePipe', 'SyncMaster'] - - -class FutureResult(object): - """A thread-safe future implementation. Used only as one-to-one pipe.""" - - def __init__(self): - self._result = None - self._lock = threading.Lock() - self._cond = threading.Condition(self._lock) - - def put(self, result): - with self._lock: - assert self._result is None, 'Previous result has\'t been fetched.' - self._result = result - self._cond.notify() - - def get(self): - with self._lock: - if self._result is None: - self._cond.wait() - - res = self._result - self._result = None - return res - - -_MasterRegistry = collections.namedtuple('MasterRegistry', ['result']) -_SlavePipeBase = collections.namedtuple('_SlavePipeBase', ['identifier', 'queue', 'result']) - - -class SlavePipe(_SlavePipeBase): - """Pipe for master-slave communication.""" - - def run_slave(self, msg): - self.queue.put((self.identifier, msg)) - ret = self.result.get() - self.queue.put(True) - return ret - - -class SyncMaster(object): - """An abstract `SyncMaster` object. - - - During the replication, as the data parallel will trigger an callback of each module, all slave devices should - call `register(id)` and obtain an `SlavePipe` to communicate with the master. - - During the forward pass, master device invokes `run_master`, all messages from slave devices will be collected, - and passed to a registered callback. - - After receiving the messages, the master device should gather the information and determine to message passed - back to each slave devices. - """ - - def __init__(self, master_callback): - """ - - Args: - master_callback: a callback to be invoked after having collected messages from slave devices. - """ - self._master_callback = master_callback - self._queue = queue.Queue() - self._registry = collections.OrderedDict() - self._activated = False - - def register_slave(self, identifier): - """ - Register an slave device. - - Args: - identifier: an identifier, usually is the device id. - - Returns: a `SlavePipe` object which can be used to communicate with the master device. - - """ - if self._activated: - assert self._queue.empty(), 'Queue is not clean before next initialization.' - self._activated = False - self._registry.clear() - future = FutureResult() - self._registry[identifier] = _MasterRegistry(future) - return SlavePipe(identifier, self._queue, future) - - def run_master(self, master_msg): - """ - Main entry for the master device in each forward pass. - The messages were first collected from each devices (including the master device), and then - an callback will be invoked to compute the message to be sent back to each devices - (including the master device). - - Args: - master_msg: the message that the master want to send to itself. This will be placed as the first - message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example. - - Returns: the message to be sent back to the master device. - - """ - self._activated = True - - intermediates = [(0, master_msg)] - for i in range(self.nr_slaves): - intermediates.append(self._queue.get()) - - results = self._master_callback(intermediates) - assert results[0][0] == 0, 'The first result should belongs to the master.' - - for i, res in results: - if i == 0: - continue - self._registry[i].result.put(res) - - for i in range(self.nr_slaves): - assert self._queue.get() is True - - return results[0][1] - - @property - def nr_slaves(self): - return len(self._registry) diff --git a/spaces/Aloento/9Nine-PITS/text/frontend/generate_lexicon.py b/spaces/Aloento/9Nine-PITS/text/frontend/generate_lexicon.py deleted file mode 100644 index 06fd8d6bbf1231b8b666c5420c4de5b9e25ea0fc..0000000000000000000000000000000000000000 --- a/spaces/Aloento/9Nine-PITS/text/frontend/generate_lexicon.py +++ /dev/null @@ -1,158 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# Design principles: https://zhuanlan.zhihu.com/p/349600439 -"""Generate lexicon and symbols for Mandarin Chinese phonology. -The lexicon is used for Montreal Force Aligner. -Note that syllables are used as word in this lexicon. Since syllables rather -than words are used in transcriptions produced by `reorganize_baker.py`. -We make this choice to better leverage other software for chinese text to -pinyin tools like pypinyin. This is the convention for G2P in Chinese. -""" -import re -from collections import OrderedDict - -INITIALS = [ - 'b', 'p', 'm', 'f', 'd', 't', 'n', 'l', 'g', 'k', 'h', 'zh', 'ch', 'sh', - 'r', 'z', 'c', 's', 'j', 'q', 'x' -] - -FINALS = [ - 'a', 'ai', 'ao', 'an', 'ang', 'e', 'er', 'ei', 'en', 'eng', 'o', 'ou', - 'ong', 'ii', 'iii', 'i', 'ia', 'iao', 'ian', 'iang', 'ie', 'io', 'iou', - 'iong', 'in', 'ing', 'u', 'ua', 'uai', 'uan', 'uang', 'uei', 'uo', 'uen', - 'ueng', 'v', 've', 'van', 'vn' -] - -SPECIALS = ['sil', 'sp'] - - -def rule(C, V, R, T): - """Generate a syllable given the initial, the final, erhua indicator, and tone. - Orthographical rules for pinyin are applied. (special case for y, w, ui, un, iu) - - Note that in this system, 'ü' is alway written as 'v' when appeared in phoneme, but converted to - 'u' in syllables when certain conditions are satisfied. - - 'i' is distinguished when appeared in phonemes, and separated into 3 categories, 'i', 'ii' and 'iii'. - Erhua is is possibly applied to every finals, except for finals that already ends with 'r'. - When a syllable is impossible or does not have any characters with this pronunciation, return None - to filter it out. - """ - - # 不可拼的音节, ii 只能和 z, c, s 拼 - if V in ["ii"] and (C not in ['z', 'c', 's']): - return None - # iii 只能和 zh, ch, sh, r 拼 - if V in ['iii'] and (C not in ['zh', 'ch', 'sh', 'r']): - return None - - # 齐齿呼或者撮口呼不能和 f, g, k, h, zh, ch, sh, r, z, c, s - if (V not in ['ii', 'iii']) and V[0] in ['i', 'v'] and ( - C in ['f', 'g', 'k', 'h', 'zh', 'ch', 'sh', 'r', 'z', 'c', 's']): - return None - - # 撮口呼只能和 j, q, x l, n 拼 - if V.startswith("v"): - # v, ve 只能和 j ,q , x, n, l 拼 - if V in ['v', 've']: - if C not in ['j', 'q', 'x', 'n', 'l', '']: - return None - # 其他只能和 j, q, x 拼 - else: - if C not in ['j', 'q', 'x', '']: - return None - - # j, q, x 只能和齐齿呼或者撮口呼拼 - if (C in ['j', 'q', 'x']) and not ( - (V not in ['ii', 'iii']) and V[0] in ['i', 'v']): - return None - - # b, p ,m, f 不能和合口呼拼,除了 u 之外 - # bm p, m, f 不能和撮口呼拼 - if (C in ['b', 'p', 'm', 'f']) and ((V[0] in ['u', 'v'] and V != "u") or - V == 'ong'): - return None - - # ua, uai, uang 不能和 d, t, n, l, r, z, c, s 拼 - if V in ['ua', 'uai', - 'uang'] and C in ['d', 't', 'n', 'l', 'r', 'z', 'c', 's']: - return None - - # sh 和 ong 不能拼 - if V == 'ong' and C in ['sh']: - return None - - # o 和 gkh, zh ch sh r z c s 不能拼 - if V == "o" and C in [ - 'd', 't', 'n', 'g', 'k', 'h', 'zh', 'ch', 'sh', 'r', 'z', 'c', 's' - ]: - return None - - # ueng 只是 weng 这个 ad-hoc 其他情况下都是 ong - if V == 'ueng' and C != '': - return - - # 非儿化的 er 只能单独存在 - if V == 'er' and C != '': - return None - - if C == '': - if V in ["i", "in", "ing"]: - C = 'y' - elif V == 'u': - C = 'w' - elif V.startswith('i') and V not in ["ii", "iii"]: - C = 'y' - V = V[1:] - elif V.startswith('u'): - C = 'w' - V = V[1:] - elif V.startswith('v'): - C = 'yu' - V = V[1:] - else: - if C in ['j', 'q', 'x']: - if V.startswith('v'): - V = re.sub('v', 'u', V) - if V == 'iou': - V = 'iu' - elif V == 'uei': - V = 'ui' - elif V == 'uen': - V = 'un' - result = C + V - - # Filter er 不能再儿化 - if result.endswith('r') and R == 'r': - return None - - # ii and iii, change back to i - result = re.sub(r'i+', 'i', result) - - result = result + R + T - return result - - -def generate_lexicon(with_tone=False, with_erhua=False): - """Generate lexicon for Mandarin Chinese.""" - syllables = OrderedDict() - - for C in [''] + INITIALS: - for V in FINALS: - for R in [''] if not with_erhua else ['', 'r']: - for T in [''] if not with_tone else ['1', '2', '3', '4', '5']: - result = rule(C, V, R, T) - if result: - syllables[result] = f'{C} {V}{R}{T}' - return syllables diff --git a/spaces/Amrrs/DragGan-Inversion/stylegan_human/edit.py b/spaces/Amrrs/DragGan-Inversion/stylegan_human/edit.py deleted file mode 100644 index 778cb3283bde9466d53ad4605d5a6426869cb584..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/DragGan-Inversion/stylegan_human/edit.py +++ /dev/null @@ -1,207 +0,0 @@ -# Copyright (c) SenseTime Research. All rights reserved. - -from edit.edit_helper import conv_warper, decoder, encoder_ifg, encoder_ss, encoder_sefa -import legacy -import subprocess -from typing import List, Optional -import cv2 -import click -from torch_utils.models import Generator -import os -import sys -import torch -import numpy as np -sys.path.append(".") - - -""" -Edit generated images with different SOTA methods. - Notes: - 1. We provide some latent directions in the folder, you can play around with them. - 2. ''upper_length'' and ''bottom_length'' of ''attr_name'' are available for demo. - 3. Layers to control and editing strength are set in edit/edit_config.py. - -Examples: - -\b -# Editing with InterfaceGAN, StyleSpace, and Sefa -python edit.py --network pretrained_models/stylegan_human_v2_1024.pkl --attr_name upper_length \\ - --seeds 61531,61570,61571,61610 --outdir outputs/edit_results - - -# Editing using inverted latent code -python edit.py ---network outputs/pti/checkpoints/model_test.pkl --attr_name upper_length \\ - --outdir outputs/edit_results --real True --real_w_path outputs/pti/embeddings/test/PTI/test/0.pt --real_img_path aligned_image/test.png - -""" - - -@click.command() -@click.pass_context -@click.option('--network', 'ckpt_path', help='Network pickle filename', required=True) -@click.option('--attr_name', help='choose one of the attr: upper_length or bottom_length', type=str, required=True) -@click.option('--trunc', 'truncation', type=float, help='Truncation psi', default=0.8, show_default=True) -@click.option('--gen_video', type=bool, default=True, help='If want to generate video') -@click.option('--combine', type=bool, default=True, help='If want to combine different editing results in the same frame') -@click.option('--seeds', type=legacy.num_range, help='List of random seeds') -@click.option('--outdir', help='Where to save the output images', type=str, required=True, default='outputs/editing', metavar='DIR') -@click.option('--real', type=bool, help='True for editing real image', default=False) -@click.option('--real_w_path', help='Path of latent code for real image') -@click.option('--real_img_path', help='Path of real image, this just concat real image with inverted and edited results together') -def main( - ctx: click.Context, - ckpt_path: str, - attr_name: str, - truncation: float, - gen_video: bool, - combine: bool, - seeds: Optional[List[int]], - outdir: str, - real: str, - real_w_path: str, - real_img_path: str -): - # convert pkl to pth - # if not os.path.exists(ckpt_path.replace('.pkl','.pth')): - legacy.convert(ckpt_path, ckpt_path.replace('.pkl', '.pth'), G_only=real) - ckpt_path = ckpt_path.replace('.pkl', '.pth') - print("start...", flush=True) - config = {"latent": 512, "n_mlp": 8, "channel_multiplier": 2} - generator = Generator( - size=1024, - style_dim=config["latent"], - n_mlp=config["n_mlp"], - channel_multiplier=config["channel_multiplier"] - ) - - generator.load_state_dict(torch.load(ckpt_path)['g_ema']) - generator.eval().cuda() - - with torch.no_grad(): - mean_path = os.path.join('edit', 'mean_latent.pkl') - if not os.path.exists(mean_path): - mean_n = 3000 - mean_latent = generator.mean_latent(mean_n).detach() - legacy.save_obj(mean_latent, mean_path) - else: - mean_latent = legacy.load_pkl(mean_path).cuda() - finals = [] - - ## -- selected sample seeds -- ## - # seeds = [60948,60965,61174,61210,61511,61598,61610] #bottom -> long - # [60941,61064,61103,61313,61531,61570,61571] # bottom -> short - # [60941,60965,61064,61103,6117461210,61531,61570,61571,61610] # upper --> long - # [60948,61313,61511,61598] # upper --> short - if real: - seeds = [0] - - for t in seeds: - if real: # now assume process single real image only - if real_img_path: - real_image = cv2.imread(real_img_path) - real_image = cv2.cvtColor(real_image, cv2.COLOR_BGR2RGB) - import torchvision.transforms as transforms - transform = transforms.Compose( # normalize to (-1, 1) - [transforms.ToTensor(), - transforms.Normalize(mean=(.5, .5, .5), std=(.5, .5, .5))] - ) - real_image = transform(real_image).unsqueeze(0).cuda() - - test_input = torch.load(real_w_path) - output, _ = generator( - test_input, False, truncation=1, input_is_latent=True, real=True) - - else: # generate image from random seeds - test_input = torch.from_numpy(np.random.RandomState( - t).randn(1, 512)).float().cuda() # torch.Size([1, 512]) - output, _ = generator( - [test_input], False, truncation=truncation, truncation_latent=mean_latent, real=real) - - # interfacegan - style_space, latent, noise = encoder_ifg( - generator, test_input, attr_name, truncation, mean_latent, real=real) - image1 = decoder(generator, style_space, latent, noise) - # stylespace - style_space, latent, noise = encoder_ss( - generator, test_input, attr_name, truncation, mean_latent, real=real) - image2 = decoder(generator, style_space, latent, noise) - # sefa - latent, noise = encoder_sefa( - generator, test_input, attr_name, truncation, mean_latent, real=real) - image3, _ = generator([latent], noise=noise, input_is_latent=True) - if real_img_path: - final = torch.cat( - (real_image, output, image1, image2, image3), 3) - else: - final = torch.cat((output, image1, image2, image3), 3) - - # legacy.visual(output, f'{outdir}/{attr_name}_{t:05d}_raw.jpg') - # legacy.visual(image1, f'{outdir}/{attr_name}_{t:05d}_ifg.jpg') - # legacy.visual(image2, f'{outdir}/{attr_name}_{t:05d}_ss.jpg') - # legacy.visual(image3, f'{outdir}/{attr_name}_{t:05d}_sefa.jpg') - - if gen_video: - total_step = 90 - if real: - video_ifg_path = f"{outdir}/video/ifg_{attr_name}_{real_w_path.split('/')[-2]}/" - video_ss_path = f"{outdir}/video/ss_{attr_name}_{real_w_path.split('/')[-2]}/" - video_sefa_path = f"{outdir}/video/ss_{attr_name}_{real_w_path.split('/')[-2]}/" - else: - video_ifg_path = f"{outdir}/video/ifg_{attr_name}_{t:05d}/" - video_ss_path = f"{outdir}/video/ss_{attr_name}_{t:05d}/" - video_sefa_path = f"{outdir}/video/ss_{attr_name}_{t:05d}/" - video_comb_path = f"{outdir}/video/tmp" - - if combine: - if not os.path.exists(video_comb_path): - os.makedirs(video_comb_path) - else: - if not os.path.exists(video_ifg_path): - os.makedirs(video_ifg_path) - if not os.path.exists(video_ss_path): - os.makedirs(video_ss_path) - if not os.path.exists(video_sefa_path): - os.makedirs(video_sefa_path) - for i in range(total_step): - style_space, latent, noise = encoder_ifg( - generator, test_input, attr_name, truncation, mean_latent, step=i, total=total_step, real=real) - image1 = decoder(generator, style_space, latent, noise) - style_space, latent, noise = encoder_ss( - generator, test_input, attr_name, truncation, mean_latent, step=i, total=total_step, real=real) - image2 = decoder(generator, style_space, latent, noise) - latent, noise = encoder_sefa( - generator, test_input, attr_name, truncation, mean_latent, step=i, total=total_step, real=real) - image3, _ = generator( - [latent], noise=noise, input_is_latent=True) - if combine: - if real_img_path: - comb_img = torch.cat( - (real_image, output, image1, image2, image3), 3) - else: - comb_img = torch.cat( - (output, image1, image2, image3), 3) - legacy.visual(comb_img, os.path.join( - video_comb_path, f'{i:05d}.jpg')) - else: - legacy.visual(image1, os.path.join( - video_ifg_path, f'{i:05d}.jpg')) - legacy.visual(image2, os.path.join( - video_ss_path, f'{i:05d}.jpg')) - if combine: - cmd = f"ffmpeg -hide_banner -loglevel error -y -r 30 -i {video_comb_path}/%05d.jpg -vcodec libx264 -pix_fmt yuv420p {video_ifg_path.replace('ifg_', '')[:-1] + '.mp4'}" - subprocess.call(cmd, shell=True) - else: - cmd = f"ffmpeg -hide_banner -loglevel error -y -r 30 -i {video_ifg_path}/%05d.jpg -vcodec libx264 -pix_fmt yuv420p {video_ifg_path[:-1] + '.mp4'}" - subprocess.call(cmd, shell=True) - cmd = f"ffmpeg -hide_banner -loglevel error -y -r 30 -i {video_ss_path}/%05d.jpg -vcodec libx264 -pix_fmt yuv420p {video_ss_path[:-1] + '.mp4'}" - subprocess.call(cmd, shell=True) - - # interfacegan, stylespace, sefa - finals.append(final) - - final = torch.cat(finals, 2) - legacy.visual(final, os.path.join(outdir, 'final.jpg')) - - -if __name__ == "__main__": - main() diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py deleted file mode 100644 index 1fcb88b78f149ef438e725bc7767407890733726..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py +++ /dev/null @@ -1,717 +0,0 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import inspect -import warnings -from typing import Any, Callable, Dict, List, Optional, Union - -import torch -from packaging import version -from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer - -from ...configuration_utils import FrozenDict -from ...image_processor import VaeImageProcessor -from ...loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin -from ...models import AutoencoderKL, UNet2DConditionModel -from ...schedulers import KarrasDiffusionSchedulers -from ...utils import ( - deprecate, - is_accelerate_available, - is_accelerate_version, - logging, - randn_tensor, - replace_example_docstring, -) -from ..pipeline_utils import DiffusionPipeline -from . import StableDiffusionPipelineOutput -from .safety_checker import StableDiffusionSafetyChecker - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - -EXAMPLE_DOC_STRING = """ - Examples: - ```py - >>> import torch - >>> from diffusers import StableDiffusionPipeline - - >>> pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) - >>> pipe = pipe.to("cuda") - - >>> prompt = "a photo of an astronaut riding a horse on mars" - >>> image = pipe(prompt).images[0] - ``` -""" - - -def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): - """ - Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and - Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 - """ - std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) - std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) - # rescale the results from guidance (fixes overexposure) - noise_pred_rescaled = noise_cfg * (std_text / std_cfg) - # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images - noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg - return noise_cfg - - -class StableDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin): - r""" - Pipeline for text-to-image generation using Stable Diffusion. - - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods - implemented for all pipelines (downloading, saving, running on a particular device, etc.). - - The pipeline also inherits the following loading methods: - - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights - - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights - - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files - - Args: - vae ([`AutoencoderKL`]): - Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. - text_encoder ([`~transformers.CLIPTextModel`]): - Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). - tokenizer ([`~transformers.CLIPTokenizer`]): - A `CLIPTokenizer` to tokenize text. - unet ([`UNet2DConditionModel`]): - A `UNet2DConditionModel` to denoise the encoded image latents. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of - [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. - safety_checker ([`StableDiffusionSafetyChecker`]): - Classification module that estimates whether generated images could be considered offensive or harmful. - Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details - about a model's potential harms. - feature_extractor ([`~transformers.CLIPImageProcessor`]): - A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. - """ - _optional_components = ["safety_checker", "feature_extractor"] - - def __init__( - self, - vae: AutoencoderKL, - text_encoder: CLIPTextModel, - tokenizer: CLIPTokenizer, - unet: UNet2DConditionModel, - scheduler: KarrasDiffusionSchedulers, - safety_checker: StableDiffusionSafetyChecker, - feature_extractor: CLIPImageProcessor, - requires_safety_checker: bool = True, - ): - super().__init__() - - if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: - deprecation_message = ( - f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" - f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " - "to update the config accordingly as leaving `steps_offset` might led to incorrect results" - " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," - " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" - " file" - ) - deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) - new_config = dict(scheduler.config) - new_config["steps_offset"] = 1 - scheduler._internal_dict = FrozenDict(new_config) - - if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: - deprecation_message = ( - f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." - " `clip_sample` should be set to False in the configuration file. Please make sure to update the" - " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" - " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" - " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" - ) - deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) - new_config = dict(scheduler.config) - new_config["clip_sample"] = False - scheduler._internal_dict = FrozenDict(new_config) - - if safety_checker is None and requires_safety_checker: - logger.warning( - f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" - " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" - " results in services or applications open to the public. Both the diffusers team and Hugging Face" - " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" - " it only for use-cases that involve analyzing network behavior or auditing its results. For more" - " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." - ) - - if safety_checker is not None and feature_extractor is None: - raise ValueError( - "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" - " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." - ) - - is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( - version.parse(unet.config._diffusers_version).base_version - ) < version.parse("0.9.0.dev0") - is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 - if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: - deprecation_message = ( - "The configuration file of the unet has set the default `sample_size` to smaller than" - " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" - " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" - " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" - " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" - " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" - " in the config might lead to incorrect results in future versions. If you have downloaded this" - " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" - " the `unet/config.json` file" - ) - deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) - new_config = dict(unet.config) - new_config["sample_size"] = 64 - unet._internal_dict = FrozenDict(new_config) - - self.register_modules( - vae=vae, - text_encoder=text_encoder, - tokenizer=tokenizer, - unet=unet, - scheduler=scheduler, - safety_checker=safety_checker, - feature_extractor=feature_extractor, - ) - self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) - self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) - self.register_to_config(requires_safety_checker=requires_safety_checker) - - def enable_vae_slicing(self): - r""" - Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to - compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. - """ - self.vae.enable_slicing() - - def disable_vae_slicing(self): - r""" - Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to - computing decoding in one step. - """ - self.vae.disable_slicing() - - def enable_vae_tiling(self): - r""" - Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to - compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow - processing larger images. - """ - self.vae.enable_tiling() - - def disable_vae_tiling(self): - r""" - Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to - computing decoding in one step. - """ - self.vae.disable_tiling() - - def enable_model_cpu_offload(self, gpu_id=0): - r""" - Offload all models to CPU to reduce memory usage with a low impact on performance. Moves one whole model at a - time to the GPU when its `forward` method is called, and the model remains in GPU until the next model runs. - Memory savings are lower than using `enable_sequential_cpu_offload`, but performance is much better due to the - iterative execution of the `unet`. - """ - if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): - from accelerate import cpu_offload_with_hook - else: - raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.") - - device = torch.device(f"cuda:{gpu_id}") - - if self.device.type != "cpu": - self.to("cpu", silence_dtype_warnings=True) - torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) - - hook = None - for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]: - _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) - - if self.safety_checker is not None: - _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook) - - # We'll offload the last model manually. - self.final_offload_hook = hook - - def _encode_prompt( - self, - prompt, - device, - num_images_per_prompt, - do_classifier_free_guidance, - negative_prompt=None, - prompt_embeds: Optional[torch.FloatTensor] = None, - negative_prompt_embeds: Optional[torch.FloatTensor] = None, - lora_scale: Optional[float] = None, - ): - r""" - Encodes the prompt into text encoder hidden states. - - Args: - prompt (`str` or `List[str]`, *optional*): - prompt to be encoded - device: (`torch.device`): - torch device - num_images_per_prompt (`int`): - number of images that should be generated per prompt - do_classifier_free_guidance (`bool`): - whether to use classifier free guidance or not - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. If not defined, one has to pass - `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is - less than `1`). - prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not - provided, text embeddings will be generated from `prompt` input argument. - negative_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt - weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input - argument. - lora_scale (`float`, *optional*): - A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. - """ - # set lora scale so that monkey patched LoRA - # function of text encoder can correctly access it - if lora_scale is not None and isinstance(self, LoraLoaderMixin): - self._lora_scale = lora_scale - - if prompt is not None and isinstance(prompt, str): - batch_size = 1 - elif prompt is not None and isinstance(prompt, list): - batch_size = len(prompt) - else: - batch_size = prompt_embeds.shape[0] - - if prompt_embeds is None: - # textual inversion: procecss multi-vector tokens if necessary - if isinstance(self, TextualInversionLoaderMixin): - prompt = self.maybe_convert_prompt(prompt, self.tokenizer) - - text_inputs = self.tokenizer( - prompt, - padding="max_length", - max_length=self.tokenizer.model_max_length, - truncation=True, - return_tensors="pt", - ) - text_input_ids = text_inputs.input_ids - untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids - - if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( - text_input_ids, untruncated_ids - ): - removed_text = self.tokenizer.batch_decode( - untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] - ) - logger.warning( - "The following part of your input was truncated because CLIP can only handle sequences up to" - f" {self.tokenizer.model_max_length} tokens: {removed_text}" - ) - - if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: - attention_mask = text_inputs.attention_mask.to(device) - else: - attention_mask = None - - prompt_embeds = self.text_encoder( - text_input_ids.to(device), - attention_mask=attention_mask, - ) - prompt_embeds = prompt_embeds[0] - - prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) - - bs_embed, seq_len, _ = prompt_embeds.shape - # duplicate text embeddings for each generation per prompt, using mps friendly method - prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) - prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) - - # get unconditional embeddings for classifier free guidance - if do_classifier_free_guidance and negative_prompt_embeds is None: - uncond_tokens: List[str] - if negative_prompt is None: - uncond_tokens = [""] * batch_size - elif prompt is not None and type(prompt) is not type(negative_prompt): - raise TypeError( - f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" - f" {type(prompt)}." - ) - elif isinstance(negative_prompt, str): - uncond_tokens = [negative_prompt] - elif batch_size != len(negative_prompt): - raise ValueError( - f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" - f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" - " the batch size of `prompt`." - ) - else: - uncond_tokens = negative_prompt - - # textual inversion: procecss multi-vector tokens if necessary - if isinstance(self, TextualInversionLoaderMixin): - uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) - - max_length = prompt_embeds.shape[1] - uncond_input = self.tokenizer( - uncond_tokens, - padding="max_length", - max_length=max_length, - truncation=True, - return_tensors="pt", - ) - - if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: - attention_mask = uncond_input.attention_mask.to(device) - else: - attention_mask = None - - negative_prompt_embeds = self.text_encoder( - uncond_input.input_ids.to(device), - attention_mask=attention_mask, - ) - negative_prompt_embeds = negative_prompt_embeds[0] - - if do_classifier_free_guidance: - # duplicate unconditional embeddings for each generation per prompt, using mps friendly method - seq_len = negative_prompt_embeds.shape[1] - - negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) - - negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) - negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) - - # For classifier free guidance, we need to do two forward passes. - # Here we concatenate the unconditional and text embeddings into a single batch - # to avoid doing two forward passes - prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) - - return prompt_embeds - - def run_safety_checker(self, image, device, dtype): - if self.safety_checker is None: - has_nsfw_concept = None - else: - if torch.is_tensor(image): - feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") - else: - feature_extractor_input = self.image_processor.numpy_to_pil(image) - safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) - image, has_nsfw_concept = self.safety_checker( - images=image, clip_input=safety_checker_input.pixel_values.to(dtype) - ) - return image, has_nsfw_concept - - def decode_latents(self, latents): - warnings.warn( - "The decode_latents method is deprecated and will be removed in a future version. Please" - " use VaeImageProcessor instead", - FutureWarning, - ) - latents = 1 / self.vae.config.scaling_factor * latents - image = self.vae.decode(latents, return_dict=False)[0] - image = (image / 2 + 0.5).clamp(0, 1) - # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 - image = image.cpu().permute(0, 2, 3, 1).float().numpy() - return image - - def prepare_extra_step_kwargs(self, generator, eta): - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. - # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 - # and should be between [0, 1] - - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - extra_step_kwargs = {} - if accepts_eta: - extra_step_kwargs["eta"] = eta - - # check if the scheduler accepts generator - accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) - if accepts_generator: - extra_step_kwargs["generator"] = generator - return extra_step_kwargs - - def check_inputs( - self, - prompt, - height, - width, - callback_steps, - negative_prompt=None, - prompt_embeds=None, - negative_prompt_embeds=None, - ): - if height % 8 != 0 or width % 8 != 0: - raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") - - if (callback_steps is None) or ( - callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) - ): - raise ValueError( - f"`callback_steps` has to be a positive integer but is {callback_steps} of type" - f" {type(callback_steps)}." - ) - - if prompt is not None and prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" - " only forward one of the two." - ) - elif prompt is None and prompt_embeds is None: - raise ValueError( - "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." - ) - elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): - raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") - - if negative_prompt is not None and negative_prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" - f" {negative_prompt_embeds}. Please make sure to only forward one of the two." - ) - - if prompt_embeds is not None and negative_prompt_embeds is not None: - if prompt_embeds.shape != negative_prompt_embeds.shape: - raise ValueError( - "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" - f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" - f" {negative_prompt_embeds.shape}." - ) - - def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): - shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) - if isinstance(generator, list) and len(generator) != batch_size: - raise ValueError( - f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" - f" size of {batch_size}. Make sure the batch size matches the length of the generators." - ) - - if latents is None: - latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) - else: - latents = latents.to(device) - - # scale the initial noise by the standard deviation required by the scheduler - latents = latents * self.scheduler.init_noise_sigma - return latents - - @torch.no_grad() - @replace_example_docstring(EXAMPLE_DOC_STRING) - def __call__( - self, - prompt: Union[str, List[str]] = None, - height: Optional[int] = None, - width: Optional[int] = None, - num_inference_steps: int = 50, - guidance_scale: float = 7.5, - negative_prompt: Optional[Union[str, List[str]]] = None, - num_images_per_prompt: Optional[int] = 1, - eta: float = 0.0, - generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, - latents: Optional[torch.FloatTensor] = None, - prompt_embeds: Optional[torch.FloatTensor] = None, - negative_prompt_embeds: Optional[torch.FloatTensor] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, - callback_steps: int = 1, - cross_attention_kwargs: Optional[Dict[str, Any]] = None, - guidance_rescale: float = 0.0, - ): - r""" - The call function to the pipeline for generation. - - Args: - prompt (`str` or `List[str]`, *optional*): - The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. - height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): - The height in pixels of the generated image. - width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): - The width in pixels of the generated image. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - guidance_scale (`float`, *optional*, defaults to 7.5): - A higher guidance scale value encourages the model to generate images closely linked to the text - `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts to guide what to not include in image generation. If not defined, you need to - pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). - num_images_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. - eta (`float`, *optional*, defaults to 0.0): - Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies - to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. - generator (`torch.Generator` or `List[torch.Generator]`, *optional*): - A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make - generation deterministic. - latents (`torch.FloatTensor`, *optional*): - Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image - generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor is generated by sampling using the supplied random `generator`. - prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not - provided, text embeddings are generated from the `prompt` input argument. - negative_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If - not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generated image. Choose between `PIL.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a - plain tuple. - callback (`Callable`, *optional*): - A function that calls every `callback_steps` steps during inference. The function is called with the - following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. - callback_steps (`int`, *optional*, defaults to 1): - The frequency at which the `callback` function is called. If not specified, the callback is called at - every step. - cross_attention_kwargs (`dict`, *optional*): - A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in - [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py). - guidance_rescale (`float`, *optional*, defaults to 0.7): - Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are - Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when - using zero terminal SNR. - - Examples: - - Returns: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: - If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, - otherwise a `tuple` is returned where the first element is a list with the generated images and the - second element is a list of `bool`s indicating whether the corresponding generated image contains - "not-safe-for-work" (nsfw) content. - """ - # 0. Default height and width to unet - height = height or self.unet.config.sample_size * self.vae_scale_factor - width = width or self.unet.config.sample_size * self.vae_scale_factor - - # 1. Check inputs. Raise error if not correct - self.check_inputs( - prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds - ) - - # 2. Define call parameters - if prompt is not None and isinstance(prompt, str): - batch_size = 1 - elif prompt is not None and isinstance(prompt, list): - batch_size = len(prompt) - else: - batch_size = prompt_embeds.shape[0] - - device = self._execution_device - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) - # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` - # corresponds to doing no classifier free guidance. - do_classifier_free_guidance = guidance_scale > 1.0 - - # 3. Encode input prompt - text_encoder_lora_scale = ( - cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None - ) - prompt_embeds = self._encode_prompt( - prompt, - device, - num_images_per_prompt, - do_classifier_free_guidance, - negative_prompt, - prompt_embeds=prompt_embeds, - negative_prompt_embeds=negative_prompt_embeds, - lora_scale=text_encoder_lora_scale, - ) - - # 4. Prepare timesteps - self.scheduler.set_timesteps(num_inference_steps, device=device) - timesteps = self.scheduler.timesteps - - # 5. Prepare latent variables - num_channels_latents = self.unet.config.in_channels - latents = self.prepare_latents( - batch_size * num_images_per_prompt, - num_channels_latents, - height, - width, - prompt_embeds.dtype, - device, - generator, - latents, - ) - - # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline - extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) - - # 7. Denoising loop - num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order - with self.progress_bar(total=num_inference_steps) as progress_bar: - for i, t in enumerate(timesteps): - # expand the latents if we are doing classifier free guidance - latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents - latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) - - # predict the noise residual - noise_pred = self.unet( - latent_model_input, - t, - encoder_hidden_states=prompt_embeds, - cross_attention_kwargs=cross_attention_kwargs, - return_dict=False, - )[0] - - # perform guidance - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) - - if do_classifier_free_guidance and guidance_rescale > 0.0: - # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf - noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) - - # compute the previous noisy sample x_t -> x_t-1 - latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] - - # call the callback, if provided - if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): - progress_bar.update() - if callback is not None and i % callback_steps == 0: - callback(i, t, latents) - - if not output_type == "latent": - image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] - image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) - else: - image = latents - has_nsfw_concept = None - - if has_nsfw_concept is None: - do_denormalize = [True] * image.shape[0] - else: - do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] - - image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) - - # Offload last model to CPU - if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: - self.final_offload_hook.offload() - - if not return_dict: - return (image, has_nsfw_concept) - - return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_pndm_flax.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_pndm_flax.py deleted file mode 100644 index c654f2de8dd3e4f96403cce4b9db8f8b7b69861f..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_pndm_flax.py +++ /dev/null @@ -1,511 +0,0 @@ -# Copyright 2023 Zhejiang University Team and The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim - -from dataclasses import dataclass -from typing import Optional, Tuple, Union - -import flax -import jax -import jax.numpy as jnp - -from ..configuration_utils import ConfigMixin, register_to_config -from .scheduling_utils_flax import ( - CommonSchedulerState, - FlaxKarrasDiffusionSchedulers, - FlaxSchedulerMixin, - FlaxSchedulerOutput, - add_noise_common, -) - - -@flax.struct.dataclass -class PNDMSchedulerState: - common: CommonSchedulerState - final_alpha_cumprod: jnp.ndarray - - # setable values - init_noise_sigma: jnp.ndarray - timesteps: jnp.ndarray - num_inference_steps: Optional[int] = None - prk_timesteps: Optional[jnp.ndarray] = None - plms_timesteps: Optional[jnp.ndarray] = None - - # running values - cur_model_output: Optional[jnp.ndarray] = None - counter: Optional[jnp.int32] = None - cur_sample: Optional[jnp.ndarray] = None - ets: Optional[jnp.ndarray] = None - - @classmethod - def create( - cls, - common: CommonSchedulerState, - final_alpha_cumprod: jnp.ndarray, - init_noise_sigma: jnp.ndarray, - timesteps: jnp.ndarray, - ): - return cls( - common=common, - final_alpha_cumprod=final_alpha_cumprod, - init_noise_sigma=init_noise_sigma, - timesteps=timesteps, - ) - - -@dataclass -class FlaxPNDMSchedulerOutput(FlaxSchedulerOutput): - state: PNDMSchedulerState - - -class FlaxPNDMScheduler(FlaxSchedulerMixin, ConfigMixin): - """ - Pseudo numerical methods for diffusion models (PNDM) proposes using more advanced ODE integration techniques, - namely Runge-Kutta method and a linear multi-step method. - - [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` - function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. - [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and - [`~SchedulerMixin.from_pretrained`] functions. - - For more details, see the original paper: https://arxiv.org/abs/2202.09778 - - Args: - num_train_timesteps (`int`): number of diffusion steps used to train the model. - beta_start (`float`): the starting `beta` value of inference. - beta_end (`float`): the final `beta` value. - beta_schedule (`str`): - the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from - `linear`, `scaled_linear`, or `squaredcos_cap_v2`. - trained_betas (`jnp.ndarray`, optional): - option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. - skip_prk_steps (`bool`): - allows the scheduler to skip the Runge-Kutta steps that are defined in the original paper as being required - before plms steps; defaults to `False`. - set_alpha_to_one (`bool`, default `False`): - each diffusion step uses the value of alphas product at that step and at the previous one. For the final - step there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`, - otherwise it uses the value of alpha at step 0. - steps_offset (`int`, default `0`): - an offset added to the inference steps. You can use a combination of `offset=1` and - `set_alpha_to_one=False`, to make the last step use step 0 for the previous alpha product, as done in - stable diffusion. - prediction_type (`str`, default `epsilon`, optional): - prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion - process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4 - https://imagen.research.google/video/paper.pdf) - dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`): - the `dtype` used for params and computation. - """ - - _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers] - - dtype: jnp.dtype - pndm_order: int - - @property - def has_state(self): - return True - - @register_to_config - def __init__( - self, - num_train_timesteps: int = 1000, - beta_start: float = 0.0001, - beta_end: float = 0.02, - beta_schedule: str = "linear", - trained_betas: Optional[jnp.ndarray] = None, - skip_prk_steps: bool = False, - set_alpha_to_one: bool = False, - steps_offset: int = 0, - prediction_type: str = "epsilon", - dtype: jnp.dtype = jnp.float32, - ): - self.dtype = dtype - - # For now we only support F-PNDM, i.e. the runge-kutta method - # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf - # mainly at formula (9), (12), (13) and the Algorithm 2. - self.pndm_order = 4 - - def create_state(self, common: Optional[CommonSchedulerState] = None) -> PNDMSchedulerState: - if common is None: - common = CommonSchedulerState.create(self) - - # At every step in ddim, we are looking into the previous alphas_cumprod - # For the final step, there is no previous alphas_cumprod because we are already at 0 - # `set_alpha_to_one` decides whether we set this parameter simply to one or - # whether we use the final alpha of the "non-previous" one. - final_alpha_cumprod = ( - jnp.array(1.0, dtype=self.dtype) if self.config.set_alpha_to_one else common.alphas_cumprod[0] - ) - - # standard deviation of the initial noise distribution - init_noise_sigma = jnp.array(1.0, dtype=self.dtype) - - timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1] - - return PNDMSchedulerState.create( - common=common, - final_alpha_cumprod=final_alpha_cumprod, - init_noise_sigma=init_noise_sigma, - timesteps=timesteps, - ) - - def set_timesteps(self, state: PNDMSchedulerState, num_inference_steps: int, shape: Tuple) -> PNDMSchedulerState: - """ - Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. - - Args: - state (`PNDMSchedulerState`): - the `FlaxPNDMScheduler` state data class instance. - num_inference_steps (`int`): - the number of diffusion steps used when generating samples with a pre-trained model. - shape (`Tuple`): - the shape of the samples to be generated. - """ - - step_ratio = self.config.num_train_timesteps // num_inference_steps - # creates integer timesteps by multiplying by ratio - # rounding to avoid issues when num_inference_step is power of 3 - _timesteps = (jnp.arange(0, num_inference_steps) * step_ratio).round() + self.config.steps_offset - - if self.config.skip_prk_steps: - # for some models like stable diffusion the prk steps can/should be skipped to - # produce better results. When using PNDM with `self.config.skip_prk_steps` the implementation - # is based on crowsonkb's PLMS sampler implementation: https://github.com/CompVis/latent-diffusion/pull/51 - - prk_timesteps = jnp.array([], dtype=jnp.int32) - plms_timesteps = jnp.concatenate([_timesteps[:-1], _timesteps[-2:-1], _timesteps[-1:]])[::-1] - - else: - prk_timesteps = _timesteps[-self.pndm_order :].repeat(2) + jnp.tile( - jnp.array([0, self.config.num_train_timesteps // num_inference_steps // 2], dtype=jnp.int32), - self.pndm_order, - ) - - prk_timesteps = (prk_timesteps[:-1].repeat(2)[1:-1])[::-1] - plms_timesteps = _timesteps[:-3][::-1] - - timesteps = jnp.concatenate([prk_timesteps, plms_timesteps]) - - # initial running values - - cur_model_output = jnp.zeros(shape, dtype=self.dtype) - counter = jnp.int32(0) - cur_sample = jnp.zeros(shape, dtype=self.dtype) - ets = jnp.zeros((4,) + shape, dtype=self.dtype) - - return state.replace( - timesteps=timesteps, - num_inference_steps=num_inference_steps, - prk_timesteps=prk_timesteps, - plms_timesteps=plms_timesteps, - cur_model_output=cur_model_output, - counter=counter, - cur_sample=cur_sample, - ets=ets, - ) - - def scale_model_input( - self, state: PNDMSchedulerState, sample: jnp.ndarray, timestep: Optional[int] = None - ) -> jnp.ndarray: - """ - Ensures interchangeability with schedulers that need to scale the denoising model input depending on the - current timestep. - - Args: - state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance. - sample (`jnp.ndarray`): input sample - timestep (`int`, optional): current timestep - - Returns: - `jnp.ndarray`: scaled input sample - """ - return sample - - def step( - self, - state: PNDMSchedulerState, - model_output: jnp.ndarray, - timestep: int, - sample: jnp.ndarray, - return_dict: bool = True, - ) -> Union[FlaxPNDMSchedulerOutput, Tuple]: - """ - Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion - process from the learned model outputs (most often the predicted noise). - - This function calls `step_prk()` or `step_plms()` depending on the internal variable `counter`. - - Args: - state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance. - model_output (`jnp.ndarray`): direct output from learned diffusion model. - timestep (`int`): current discrete timestep in the diffusion chain. - sample (`jnp.ndarray`): - current instance of sample being created by diffusion process. - return_dict (`bool`): option for returning tuple rather than FlaxPNDMSchedulerOutput class - - Returns: - [`FlaxPNDMSchedulerOutput`] or `tuple`: [`FlaxPNDMSchedulerOutput`] if `return_dict` is True, otherwise a - `tuple`. When returning a tuple, the first element is the sample tensor. - - """ - - if state.num_inference_steps is None: - raise ValueError( - "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" - ) - - if self.config.skip_prk_steps: - prev_sample, state = self.step_plms(state, model_output, timestep, sample) - else: - prk_prev_sample, prk_state = self.step_prk(state, model_output, timestep, sample) - plms_prev_sample, plms_state = self.step_plms(state, model_output, timestep, sample) - - cond = state.counter < len(state.prk_timesteps) - - prev_sample = jax.lax.select(cond, prk_prev_sample, plms_prev_sample) - - state = state.replace( - cur_model_output=jax.lax.select(cond, prk_state.cur_model_output, plms_state.cur_model_output), - ets=jax.lax.select(cond, prk_state.ets, plms_state.ets), - cur_sample=jax.lax.select(cond, prk_state.cur_sample, plms_state.cur_sample), - counter=jax.lax.select(cond, prk_state.counter, plms_state.counter), - ) - - if not return_dict: - return (prev_sample, state) - - return FlaxPNDMSchedulerOutput(prev_sample=prev_sample, state=state) - - def step_prk( - self, - state: PNDMSchedulerState, - model_output: jnp.ndarray, - timestep: int, - sample: jnp.ndarray, - ) -> Union[FlaxPNDMSchedulerOutput, Tuple]: - """ - Step function propagating the sample with the Runge-Kutta method. RK takes 4 forward passes to approximate the - solution to the differential equation. - - Args: - state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance. - model_output (`jnp.ndarray`): direct output from learned diffusion model. - timestep (`int`): current discrete timestep in the diffusion chain. - sample (`jnp.ndarray`): - current instance of sample being created by diffusion process. - return_dict (`bool`): option for returning tuple rather than FlaxPNDMSchedulerOutput class - - Returns: - [`FlaxPNDMSchedulerOutput`] or `tuple`: [`FlaxPNDMSchedulerOutput`] if `return_dict` is True, otherwise a - `tuple`. When returning a tuple, the first element is the sample tensor. - - """ - - if state.num_inference_steps is None: - raise ValueError( - "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" - ) - - diff_to_prev = jnp.where( - state.counter % 2, 0, self.config.num_train_timesteps // state.num_inference_steps // 2 - ) - prev_timestep = timestep - diff_to_prev - timestep = state.prk_timesteps[state.counter // 4 * 4] - - model_output = jax.lax.select( - (state.counter % 4) != 3, - model_output, # remainder 0, 1, 2 - state.cur_model_output + 1 / 6 * model_output, # remainder 3 - ) - - state = state.replace( - cur_model_output=jax.lax.select_n( - state.counter % 4, - state.cur_model_output + 1 / 6 * model_output, # remainder 0 - state.cur_model_output + 1 / 3 * model_output, # remainder 1 - state.cur_model_output + 1 / 3 * model_output, # remainder 2 - jnp.zeros_like(state.cur_model_output), # remainder 3 - ), - ets=jax.lax.select( - (state.counter % 4) == 0, - state.ets.at[0:3].set(state.ets[1:4]).at[3].set(model_output), # remainder 0 - state.ets, # remainder 1, 2, 3 - ), - cur_sample=jax.lax.select( - (state.counter % 4) == 0, - sample, # remainder 0 - state.cur_sample, # remainder 1, 2, 3 - ), - ) - - cur_sample = state.cur_sample - prev_sample = self._get_prev_sample(state, cur_sample, timestep, prev_timestep, model_output) - state = state.replace(counter=state.counter + 1) - - return (prev_sample, state) - - def step_plms( - self, - state: PNDMSchedulerState, - model_output: jnp.ndarray, - timestep: int, - sample: jnp.ndarray, - ) -> Union[FlaxPNDMSchedulerOutput, Tuple]: - """ - Step function propagating the sample with the linear multi-step method. This has one forward pass with multiple - times to approximate the solution. - - Args: - state (`PNDMSchedulerState`): the `FlaxPNDMScheduler` state data class instance. - model_output (`jnp.ndarray`): direct output from learned diffusion model. - timestep (`int`): current discrete timestep in the diffusion chain. - sample (`jnp.ndarray`): - current instance of sample being created by diffusion process. - return_dict (`bool`): option for returning tuple rather than FlaxPNDMSchedulerOutput class - - Returns: - [`FlaxPNDMSchedulerOutput`] or `tuple`: [`FlaxPNDMSchedulerOutput`] if `return_dict` is True, otherwise a - `tuple`. When returning a tuple, the first element is the sample tensor. - - """ - - if state.num_inference_steps is None: - raise ValueError( - "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" - ) - - # NOTE: There is no way to check in the jitted runtime if the prk mode was ran before - - prev_timestep = timestep - self.config.num_train_timesteps // state.num_inference_steps - prev_timestep = jnp.where(prev_timestep > 0, prev_timestep, 0) - - # Reference: - # if state.counter != 1: - # state.ets.append(model_output) - # else: - # prev_timestep = timestep - # timestep = timestep + self.config.num_train_timesteps // state.num_inference_steps - - prev_timestep = jnp.where(state.counter == 1, timestep, prev_timestep) - timestep = jnp.where( - state.counter == 1, timestep + self.config.num_train_timesteps // state.num_inference_steps, timestep - ) - - # Reference: - # if len(state.ets) == 1 and state.counter == 0: - # model_output = model_output - # state.cur_sample = sample - # elif len(state.ets) == 1 and state.counter == 1: - # model_output = (model_output + state.ets[-1]) / 2 - # sample = state.cur_sample - # state.cur_sample = None - # elif len(state.ets) == 2: - # model_output = (3 * state.ets[-1] - state.ets[-2]) / 2 - # elif len(state.ets) == 3: - # model_output = (23 * state.ets[-1] - 16 * state.ets[-2] + 5 * state.ets[-3]) / 12 - # else: - # model_output = (1 / 24) * (55 * state.ets[-1] - 59 * state.ets[-2] + 37 * state.ets[-3] - 9 * state.ets[-4]) - - state = state.replace( - ets=jax.lax.select( - state.counter != 1, - state.ets.at[0:3].set(state.ets[1:4]).at[3].set(model_output), # counter != 1 - state.ets, # counter 1 - ), - cur_sample=jax.lax.select( - state.counter != 1, - sample, # counter != 1 - state.cur_sample, # counter 1 - ), - ) - - state = state.replace( - cur_model_output=jax.lax.select_n( - jnp.clip(state.counter, 0, 4), - model_output, # counter 0 - (model_output + state.ets[-1]) / 2, # counter 1 - (3 * state.ets[-1] - state.ets[-2]) / 2, # counter 2 - (23 * state.ets[-1] - 16 * state.ets[-2] + 5 * state.ets[-3]) / 12, # counter 3 - (1 / 24) - * (55 * state.ets[-1] - 59 * state.ets[-2] + 37 * state.ets[-3] - 9 * state.ets[-4]), # counter >= 4 - ), - ) - - sample = state.cur_sample - model_output = state.cur_model_output - prev_sample = self._get_prev_sample(state, sample, timestep, prev_timestep, model_output) - state = state.replace(counter=state.counter + 1) - - return (prev_sample, state) - - def _get_prev_sample(self, state: PNDMSchedulerState, sample, timestep, prev_timestep, model_output): - # See formula (9) of PNDM paper https://arxiv.org/pdf/2202.09778.pdf - # this function computes x_(t−δ) using the formula of (9) - # Note that x_t needs to be added to both sides of the equation - - # Notation ( -> - # alpha_prod_t -> α_t - # alpha_prod_t_prev -> α_(t−δ) - # beta_prod_t -> (1 - α_t) - # beta_prod_t_prev -> (1 - α_(t−δ)) - # sample -> x_t - # model_output -> e_θ(x_t, t) - # prev_sample -> x_(t−δ) - alpha_prod_t = state.common.alphas_cumprod[timestep] - alpha_prod_t_prev = jnp.where( - prev_timestep >= 0, state.common.alphas_cumprod[prev_timestep], state.final_alpha_cumprod - ) - beta_prod_t = 1 - alpha_prod_t - beta_prod_t_prev = 1 - alpha_prod_t_prev - - if self.config.prediction_type == "v_prediction": - model_output = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample - elif self.config.prediction_type != "epsilon": - raise ValueError( - f"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `v_prediction`" - ) - - # corresponds to (α_(t−δ) - α_t) divided by - # denominator of x_t in formula (9) and plus 1 - # Note: (α_(t−δ) - α_t) / (sqrt(α_t) * (sqrt(α_(t−δ)) + sqr(α_t))) = - # sqrt(α_(t−δ)) / sqrt(α_t)) - sample_coeff = (alpha_prod_t_prev / alpha_prod_t) ** (0.5) - - # corresponds to denominator of e_θ(x_t, t) in formula (9) - model_output_denom_coeff = alpha_prod_t * beta_prod_t_prev ** (0.5) + ( - alpha_prod_t * beta_prod_t * alpha_prod_t_prev - ) ** (0.5) - - # full formula (9) - prev_sample = ( - sample_coeff * sample - (alpha_prod_t_prev - alpha_prod_t) * model_output / model_output_denom_coeff - ) - - return prev_sample - - def add_noise( - self, - state: PNDMSchedulerState, - original_samples: jnp.ndarray, - noise: jnp.ndarray, - timesteps: jnp.ndarray, - ) -> jnp.ndarray: - return add_noise_common(state.common, original_samples, noise, timesteps) - - def __len__(self): - return self.config.num_train_timesteps diff --git a/spaces/Andy1621/uniformer_image_detection/configs/fp16/mask_rcnn_r50_fpn_fp16_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/fp16/mask_rcnn_r50_fpn_fp16_1x_coco.py deleted file mode 100644 index f506ea815fedd6faefad9a06d7f466b86e8d2622..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/fp16/mask_rcnn_r50_fpn_fp16_1x_coco.py +++ /dev/null @@ -1,3 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' -# fp16 settings -fp16 = dict(loss_scale=512.) diff --git a/spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco.py deleted file mode 100644 index 1e0fe4931e9cb340fcf3b80a4f9380abee500238..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/guided_anchoring/ga_rpn_x101_32x4d_fpn_1x_coco.py +++ /dev/null @@ -1,13 +0,0 @@ -_base_ = './ga_rpn_r50_fpn_1x_coco.py' -model = dict( - pretrained='open-mmlab://resnext101_32x4d', - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch')) diff --git a/spaces/Andy1621/uniformer_image_detection/configs/paa/paa_r101_fpn_2x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/paa/paa_r101_fpn_2x_coco.py deleted file mode 100644 index 641ef764d2713184845b624b20db1771cfcd6739..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/paa/paa_r101_fpn_2x_coco.py +++ /dev/null @@ -1,3 +0,0 @@ -_base_ = './paa_r101_fpn_1x_coco.py' -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/datasets/pipelines/loading.py b/spaces/Andy1621/uniformer_image_detection/mmdet/datasets/pipelines/loading.py deleted file mode 100644 index 69225941903f6b9d67b8b8c5fc3b1801cd964fb2..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/mmdet/datasets/pipelines/loading.py +++ /dev/null @@ -1,458 +0,0 @@ -import os.path as osp - -import mmcv -import numpy as np -import pycocotools.mask as maskUtils - -from mmdet.core import BitmapMasks, PolygonMasks -from ..builder import PIPELINES - - -@PIPELINES.register_module() -class LoadImageFromFile(object): - """Load an image from file. - - Required keys are "img_prefix" and "img_info" (a dict that must contain the - key "filename"). Added or updated keys are "filename", "img", "img_shape", - "ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`), - "scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1). - - Args: - to_float32 (bool): Whether to convert the loaded image to a float32 - numpy array. If set to False, the loaded image is an uint8 array. - Defaults to False. - color_type (str): The flag argument for :func:`mmcv.imfrombytes`. - Defaults to 'color'. - file_client_args (dict): Arguments to instantiate a FileClient. - See :class:`mmcv.fileio.FileClient` for details. - Defaults to ``dict(backend='disk')``. - """ - - def __init__(self, - to_float32=False, - color_type='color', - file_client_args=dict(backend='disk')): - self.to_float32 = to_float32 - self.color_type = color_type - self.file_client_args = file_client_args.copy() - self.file_client = None - - def __call__(self, results): - """Call functions to load image and get image meta information. - - Args: - results (dict): Result dict from :obj:`mmdet.CustomDataset`. - - Returns: - dict: The dict contains loaded image and meta information. - """ - - if self.file_client is None: - self.file_client = mmcv.FileClient(**self.file_client_args) - - if results['img_prefix'] is not None: - filename = osp.join(results['img_prefix'], - results['img_info']['filename']) - else: - filename = results['img_info']['filename'] - - img_bytes = self.file_client.get(filename) - img = mmcv.imfrombytes(img_bytes, flag=self.color_type) - if self.to_float32: - img = img.astype(np.float32) - - results['filename'] = filename - results['ori_filename'] = results['img_info']['filename'] - results['img'] = img - results['img_shape'] = img.shape - results['ori_shape'] = img.shape - results['img_fields'] = ['img'] - return results - - def __repr__(self): - repr_str = (f'{self.__class__.__name__}(' - f'to_float32={self.to_float32}, ' - f"color_type='{self.color_type}', " - f'file_client_args={self.file_client_args})') - return repr_str - - -@PIPELINES.register_module() -class LoadImageFromWebcam(LoadImageFromFile): - """Load an image from webcam. - - Similar with :obj:`LoadImageFromFile`, but the image read from webcam is in - ``results['img']``. - """ - - def __call__(self, results): - """Call functions to add image meta information. - - Args: - results (dict): Result dict with Webcam read image in - ``results['img']``. - - Returns: - dict: The dict contains loaded image and meta information. - """ - - img = results['img'] - if self.to_float32: - img = img.astype(np.float32) - - results['filename'] = None - results['ori_filename'] = None - results['img'] = img - results['img_shape'] = img.shape - results['ori_shape'] = img.shape - results['img_fields'] = ['img'] - return results - - -@PIPELINES.register_module() -class LoadMultiChannelImageFromFiles(object): - """Load multi-channel images from a list of separate channel files. - - Required keys are "img_prefix" and "img_info" (a dict that must contain the - key "filename", which is expected to be a list of filenames). - Added or updated keys are "filename", "img", "img_shape", - "ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`), - "scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1). - - Args: - to_float32 (bool): Whether to convert the loaded image to a float32 - numpy array. If set to False, the loaded image is an uint8 array. - Defaults to False. - color_type (str): The flag argument for :func:`mmcv.imfrombytes`. - Defaults to 'color'. - file_client_args (dict): Arguments to instantiate a FileClient. - See :class:`mmcv.fileio.FileClient` for details. - Defaults to ``dict(backend='disk')``. - """ - - def __init__(self, - to_float32=False, - color_type='unchanged', - file_client_args=dict(backend='disk')): - self.to_float32 = to_float32 - self.color_type = color_type - self.file_client_args = file_client_args.copy() - self.file_client = None - - def __call__(self, results): - """Call functions to load multiple images and get images meta - information. - - Args: - results (dict): Result dict from :obj:`mmdet.CustomDataset`. - - Returns: - dict: The dict contains loaded images and meta information. - """ - - if self.file_client is None: - self.file_client = mmcv.FileClient(**self.file_client_args) - - if results['img_prefix'] is not None: - filename = [ - osp.join(results['img_prefix'], fname) - for fname in results['img_info']['filename'] - ] - else: - filename = results['img_info']['filename'] - - img = [] - for name in filename: - img_bytes = self.file_client.get(name) - img.append(mmcv.imfrombytes(img_bytes, flag=self.color_type)) - img = np.stack(img, axis=-1) - if self.to_float32: - img = img.astype(np.float32) - - results['filename'] = filename - results['ori_filename'] = results['img_info']['filename'] - results['img'] = img - results['img_shape'] = img.shape - results['ori_shape'] = img.shape - # Set initial values for default meta_keys - results['pad_shape'] = img.shape - results['scale_factor'] = 1.0 - num_channels = 1 if len(img.shape) < 3 else img.shape[2] - results['img_norm_cfg'] = dict( - mean=np.zeros(num_channels, dtype=np.float32), - std=np.ones(num_channels, dtype=np.float32), - to_rgb=False) - return results - - def __repr__(self): - repr_str = (f'{self.__class__.__name__}(' - f'to_float32={self.to_float32}, ' - f"color_type='{self.color_type}', " - f'file_client_args={self.file_client_args})') - return repr_str - - -@PIPELINES.register_module() -class LoadAnnotations(object): - """Load mutiple types of annotations. - - Args: - with_bbox (bool): Whether to parse and load the bbox annotation. - Default: True. - with_label (bool): Whether to parse and load the label annotation. - Default: True. - with_mask (bool): Whether to parse and load the mask annotation. - Default: False. - with_seg (bool): Whether to parse and load the semantic segmentation - annotation. Default: False. - poly2mask (bool): Whether to convert the instance masks from polygons - to bitmaps. Default: True. - file_client_args (dict): Arguments to instantiate a FileClient. - See :class:`mmcv.fileio.FileClient` for details. - Defaults to ``dict(backend='disk')``. - """ - - def __init__(self, - with_bbox=True, - with_label=True, - with_mask=False, - with_seg=False, - poly2mask=True, - file_client_args=dict(backend='disk')): - self.with_bbox = with_bbox - self.with_label = with_label - self.with_mask = with_mask - self.with_seg = with_seg - self.poly2mask = poly2mask - self.file_client_args = file_client_args.copy() - self.file_client = None - - def _load_bboxes(self, results): - """Private function to load bounding box annotations. - - Args: - results (dict): Result dict from :obj:`mmdet.CustomDataset`. - - Returns: - dict: The dict contains loaded bounding box annotations. - """ - - ann_info = results['ann_info'] - results['gt_bboxes'] = ann_info['bboxes'].copy() - - gt_bboxes_ignore = ann_info.get('bboxes_ignore', None) - if gt_bboxes_ignore is not None: - results['gt_bboxes_ignore'] = gt_bboxes_ignore.copy() - results['bbox_fields'].append('gt_bboxes_ignore') - results['bbox_fields'].append('gt_bboxes') - return results - - def _load_labels(self, results): - """Private function to load label annotations. - - Args: - results (dict): Result dict from :obj:`mmdet.CustomDataset`. - - Returns: - dict: The dict contains loaded label annotations. - """ - - results['gt_labels'] = results['ann_info']['labels'].copy() - return results - - def _poly2mask(self, mask_ann, img_h, img_w): - """Private function to convert masks represented with polygon to - bitmaps. - - Args: - mask_ann (list | dict): Polygon mask annotation input. - img_h (int): The height of output mask. - img_w (int): The width of output mask. - - Returns: - numpy.ndarray: The decode bitmap mask of shape (img_h, img_w). - """ - - if isinstance(mask_ann, list): - # polygon -- a single object might consist of multiple parts - # we merge all parts into one mask rle code - rles = maskUtils.frPyObjects(mask_ann, img_h, img_w) - rle = maskUtils.merge(rles) - elif isinstance(mask_ann['counts'], list): - # uncompressed RLE - rle = maskUtils.frPyObjects(mask_ann, img_h, img_w) - else: - # rle - rle = mask_ann - mask = maskUtils.decode(rle) - return mask - - def process_polygons(self, polygons): - """Convert polygons to list of ndarray and filter invalid polygons. - - Args: - polygons (list[list]): Polygons of one instance. - - Returns: - list[numpy.ndarray]: Processed polygons. - """ - - polygons = [np.array(p) for p in polygons] - valid_polygons = [] - for polygon in polygons: - if len(polygon) % 2 == 0 and len(polygon) >= 6: - valid_polygons.append(polygon) - return valid_polygons - - def _load_masks(self, results): - """Private function to load mask annotations. - - Args: - results (dict): Result dict from :obj:`mmdet.CustomDataset`. - - Returns: - dict: The dict contains loaded mask annotations. - If ``self.poly2mask`` is set ``True``, `gt_mask` will contain - :obj:`PolygonMasks`. Otherwise, :obj:`BitmapMasks` is used. - """ - - h, w = results['img_info']['height'], results['img_info']['width'] - gt_masks = results['ann_info']['masks'] - if self.poly2mask: - gt_masks = BitmapMasks( - [self._poly2mask(mask, h, w) for mask in gt_masks], h, w) - else: - gt_masks = PolygonMasks( - [self.process_polygons(polygons) for polygons in gt_masks], h, - w) - results['gt_masks'] = gt_masks - results['mask_fields'].append('gt_masks') - return results - - def _load_semantic_seg(self, results): - """Private function to load semantic segmentation annotations. - - Args: - results (dict): Result dict from :obj:`dataset`. - - Returns: - dict: The dict contains loaded semantic segmentation annotations. - """ - - if self.file_client is None: - self.file_client = mmcv.FileClient(**self.file_client_args) - - filename = osp.join(results['seg_prefix'], - results['ann_info']['seg_map']) - img_bytes = self.file_client.get(filename) - results['gt_semantic_seg'] = mmcv.imfrombytes( - img_bytes, flag='unchanged').squeeze() - results['seg_fields'].append('gt_semantic_seg') - return results - - def __call__(self, results): - """Call function to load multiple types annotations. - - Args: - results (dict): Result dict from :obj:`mmdet.CustomDataset`. - - Returns: - dict: The dict contains loaded bounding box, label, mask and - semantic segmentation annotations. - """ - - if self.with_bbox: - results = self._load_bboxes(results) - if results is None: - return None - if self.with_label: - results = self._load_labels(results) - if self.with_mask: - results = self._load_masks(results) - if self.with_seg: - results = self._load_semantic_seg(results) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(with_bbox={self.with_bbox}, ' - repr_str += f'with_label={self.with_label}, ' - repr_str += f'with_mask={self.with_mask}, ' - repr_str += f'with_seg={self.with_seg}, ' - repr_str += f'poly2mask={self.poly2mask}, ' - repr_str += f'poly2mask={self.file_client_args})' - return repr_str - - -@PIPELINES.register_module() -class LoadProposals(object): - """Load proposal pipeline. - - Required key is "proposals". Updated keys are "proposals", "bbox_fields". - - Args: - num_max_proposals (int, optional): Maximum number of proposals to load. - If not specified, all proposals will be loaded. - """ - - def __init__(self, num_max_proposals=None): - self.num_max_proposals = num_max_proposals - - def __call__(self, results): - """Call function to load proposals from file. - - Args: - results (dict): Result dict from :obj:`mmdet.CustomDataset`. - - Returns: - dict: The dict contains loaded proposal annotations. - """ - - proposals = results['proposals'] - if proposals.shape[1] not in (4, 5): - raise AssertionError( - 'proposals should have shapes (n, 4) or (n, 5), ' - f'but found {proposals.shape}') - proposals = proposals[:, :4] - - if self.num_max_proposals is not None: - proposals = proposals[:self.num_max_proposals] - - if len(proposals) == 0: - proposals = np.array([[0, 0, 0, 0]], dtype=np.float32) - results['proposals'] = proposals - results['bbox_fields'].append('proposals') - return results - - def __repr__(self): - return self.__class__.__name__ + \ - f'(num_max_proposals={self.num_max_proposals})' - - -@PIPELINES.register_module() -class FilterAnnotations(object): - """Filter invalid annotations. - - Args: - min_gt_bbox_wh (tuple[int]): Minimum width and height of ground truth - boxes. - """ - - def __init__(self, min_gt_bbox_wh): - # TODO: add more filter options - self.min_gt_bbox_wh = min_gt_bbox_wh - - def __call__(self, results): - assert 'gt_bboxes' in results - gt_bboxes = results['gt_bboxes'] - w = gt_bboxes[:, 2] - gt_bboxes[:, 0] - h = gt_bboxes[:, 3] - gt_bboxes[:, 1] - keep = (w > self.min_gt_bbox_wh[0]) & (h > self.min_gt_bbox_wh[1]) - if not keep.any(): - return None - else: - keys = ('gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg') - for key in keys: - if key in results: - results[key] = results[key][keep] - return results diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r50-d8_512x512_160k_ade20k.py b/spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r50-d8_512x512_160k_ade20k.py deleted file mode 100644 index 1ad94d8988bb822c1571816255464126d9d5b95d..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r50-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = [ - '../_base_/models/ccnet_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] -model = dict( - decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/api-examples/api-example-chat.py b/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/api-examples/api-example-chat.py deleted file mode 100644 index b2a1e1e42bdbfe0c745b15ac8bf61f4633952472..0000000000000000000000000000000000000000 --- a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/api-examples/api-example-chat.py +++ /dev/null @@ -1,92 +0,0 @@ -import html -import json - -import requests - -# For local streaming, the websockets are hosted without ssl - http:// -HOST = 'localhost:5000' -URI = f'http://{HOST}/api/v1/chat' - -# For reverse-proxied streaming, the remote will likely host with ssl - https:// -# URI = 'https://your-uri-here.trycloudflare.com/api/v1/chat' - - -def run(user_input, history): - request = { - 'user_input': user_input, - 'max_new_tokens': 250, - 'auto_max_new_tokens': False, - 'max_tokens_second': 0, - 'history': history, - 'mode': 'instruct', # Valid options: 'chat', 'chat-instruct', 'instruct' - 'character': 'Example', - 'instruction_template': 'Vicuna-v1.1', # Will get autodetected if unset - 'your_name': 'You', - # 'name1': 'name of user', # Optional - # 'name2': 'name of character', # Optional - # 'context': 'character context', # Optional - # 'greeting': 'greeting', # Optional - # 'name1_instruct': 'You', # Optional - # 'name2_instruct': 'Assistant', # Optional - # 'context_instruct': 'context_instruct', # Optional - # 'turn_template': 'turn_template', # Optional - 'regenerate': False, - '_continue': False, - 'chat_instruct_command': 'Continue the chat dialogue below. Write a single reply for the character "<|character|>".\n\n<|prompt|>', - - # Generation params. If 'preset' is set to different than 'None', the values - # in presets/preset-name.yaml are used instead of the individual numbers. - 'preset': 'None', - 'do_sample': True, - 'temperature': 0.7, - 'top_p': 0.1, - 'typical_p': 1, - 'epsilon_cutoff': 0, # In units of 1e-4 - 'eta_cutoff': 0, # In units of 1e-4 - 'tfs': 1, - 'top_a': 0, - 'repetition_penalty': 1.18, - 'repetition_penalty_range': 0, - 'top_k': 40, - 'min_length': 0, - 'no_repeat_ngram_size': 0, - 'num_beams': 1, - 'penalty_alpha': 0, - 'length_penalty': 1, - 'early_stopping': False, - 'mirostat_mode': 0, - 'mirostat_tau': 5, - 'mirostat_eta': 0.1, - 'grammar_string': '', - 'guidance_scale': 1, - 'negative_prompt': '', - - 'seed': -1, - 'add_bos_token': True, - 'truncation_length': 2048, - 'ban_eos_token': False, - 'custom_token_bans': '', - 'skip_special_tokens': True, - 'stopping_strings': [] - } - - response = requests.post(URI, json=request) - - if response.status_code == 200: - result = response.json()['results'][0]['history'] - print(json.dumps(result, indent=4)) - print() - print(html.unescape(result['visible'][-1][1])) - - -if __name__ == '__main__': - user_input = "Please give me a step-by-step guide on how to plant a tree in my backyard." - - # Basic example - history = {'internal': [], 'visible': []} - - # "Continue" example. Make sure to set '_continue' to True above - # arr = [user_input, 'Surely, here is'] - # history = {'internal': [arr], 'visible': [arr]} - - run(user_input, history) diff --git a/spaces/AntNikYab/NaturalLanguageProcessing/README.md b/spaces/AntNikYab/NaturalLanguageProcessing/README.md deleted file mode 100644 index a5eaf13ad36545da108c7eae11ec72bbebdc70ec..0000000000000000000000000000000000000000 --- a/spaces/AntNikYab/NaturalLanguageProcessing/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: NaturalLanguageProcessing -emoji: 🏆 -colorFrom: red -colorTo: red -sdk: streamlit -sdk_version: 1.26.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Arijit-hazra/my-image-captioner/load_model.py b/spaces/Arijit-hazra/my-image-captioner/load_model.py deleted file mode 100644 index 827bd9c94c2459864490034f8642bb1d2a0a25d8..0000000000000000000000000000000000000000 --- a/spaces/Arijit-hazra/my-image-captioner/load_model.py +++ /dev/null @@ -1,363 +0,0 @@ -### IMPORTS -import tensorflow as tf -import numpy as np - -import einops -import numpy as np -import tqdm - -import collections -import re -import string -import pickle - -print("import complete") -#========================================================================================================================= -### UTILITY FUNCTIONS -#========================================================================================================================= - -IMAGE_SHAPE=(224, 224, 3) - -@tf.keras.utils.register_keras_serializable() -def custom_standardization(s): - s = tf.strings.lower(s) - s = tf.strings.regex_replace(s, f'[{re.escape(string.punctuation)}]', '') - s = tf.strings.join(['[START]', s, '[END]'], separator=' ') - return s - -def load_image(image_path): - img = tf.io.read_file(image_path) - img = tf.io.decode_jpeg(img, channels=3) - img = tf.image.resize(img, IMAGE_SHAPE[:-1]) - return img - -def load_image_obj(img): - img = tf.image.resize(img, IMAGE_SHAPE[:-1]) - return img - -def masked_loss(labels, preds): - loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels, preds) - - mask = (labels != 0) & (loss < 1e8) - mask = tf.cast(mask, loss.dtype) - - loss = loss*mask - loss = tf.reduce_sum(loss)/tf.reduce_sum(mask) - return loss - -def masked_acc(labels, preds): - mask = tf.cast(labels!=0, tf.float32) - preds = tf.argmax(preds, axis=-1) - labels = tf.cast(labels, tf.int64) - match = tf.cast(preds == labels, mask.dtype) - acc = tf.reduce_sum(match*mask)/tf.reduce_sum(mask) - return acc - -print("utility complete") -#========================================================================================================================= -### MODEL CLASS -#========================================================================================================================= - -mobilenet = tf.keras.applications.MobileNetV3Small( - input_shape=IMAGE_SHAPE, - include_top=False, - include_preprocessing=True) -mobilenet.trainable=False - -class SeqEmbedding(tf.keras.layers.Layer): - def __init__(self, vocab_size, max_length, depth): - super().__init__() - self.pos_embedding = tf.keras.layers.Embedding(input_dim=max_length, output_dim=depth) - - self.token_embedding = tf.keras.layers.Embedding( - input_dim=vocab_size, - output_dim=depth, - mask_zero=True) - - self.add = tf.keras.layers.Add() - - - def call(self, seq): - seq = self.token_embedding(seq) # (batch, seq, depth) - - x = tf.range(tf.shape(seq)[1]) # (seq) - x = x[tf.newaxis, :] # (1, seq) - x = self.pos_embedding(x) # (1, seq, depth) - - return self.add([seq,x]) - -class CausalSelfAttention(tf.keras.layers.Layer): - def __init__(self, **kwargs): - super().__init__() - self.mha = tf.keras.layers.MultiHeadAttention(**kwargs) - # Use Add instead of + so the keras mask propagates through. - self.add = tf.keras.layers.Add() - self.layernorm = tf.keras.layers.LayerNormalization() - - - def call(self, x): - attn = self.mha(query=x, value=x, - use_causal_mask=True) - x = self.add([x, attn]) - return self.layernorm(x) - -class CrossAttention(tf.keras.layers.Layer): - def __init__(self,**kwargs): - super().__init__() - self.mha = tf.keras.layers.MultiHeadAttention(**kwargs) - self.add = tf.keras.layers.Add() - self.layernorm = tf.keras.layers.LayerNormalization() - - def call(self, x, y, **kwargs): - attn, attention_scores = self.mha( - query=x, value=y, - return_attention_scores=True) - - self.last_attention_scores = attention_scores - - x = self.add([x, attn]) - return self.layernorm(x) - -class FeedForward(tf.keras.layers.Layer): - def __init__(self, units, dropout_rate=0.1): - super().__init__() - self.seq = tf.keras.Sequential([ - tf.keras.layers.Dense(units=2*units, activation='relu'), - tf.keras.layers.Dense(units=units), - tf.keras.layers.Dropout(rate=dropout_rate), - ]) - - self.layernorm = tf.keras.layers.LayerNormalization() - - def call(self, x): - x = x + self.seq(x) - return self.layernorm(x) - -class DecoderLayer(tf.keras.layers.Layer): - def __init__(self, units, num_heads=1, dropout_rate=0.1): - super().__init__() - - self.self_attention = CausalSelfAttention(num_heads=num_heads, - key_dim=units, - dropout=dropout_rate) - self.cross_attention = CrossAttention(num_heads=num_heads, - key_dim=units, - dropout=dropout_rate) - self.ff = FeedForward(units=units, dropout_rate=dropout_rate) - - - def call(self, inputs, training=False): - in_seq, out_seq = inputs - - # Text input - out_seq = self.self_attention(out_seq) - - out_seq = self.cross_attention(out_seq, in_seq) - - self.last_attention_scores = self.cross_attention.last_attention_scores - - out_seq = self.ff(out_seq) - - return out_seq - -class TokenOutput(tf.keras.layers.Layer): - def __init__(self, tokenizer, banned_tokens=('', '[UNK]', '[START]'), bias=None, **kwargs): - super().__init__() - - self.dense = tf.keras.layers.Dense( - units=tokenizer.vocabulary_size(), **kwargs) - self.tokenizer = tokenizer - self.banned_tokens = banned_tokens - - self.bias = bias - - def adapt(self, ds): - counts = collections.Counter() - vocab_dict = {name: id - for id, name in enumerate(self.tokenizer.get_vocabulary())} - - for tokens in tqdm.tqdm(ds): - counts.update(tokens.numpy().flatten()) - - counts_arr = np.zeros(shape=(self.tokenizer.vocabulary_size(),)) - counts_arr[np.array(list(counts.keys()), dtype=np.int32)] = list(counts.values()) - - counts_arr = counts_arr[:] - for token in self.banned_tokens: - counts_arr[vocab_dict[token]] = 0 - - total = counts_arr.sum() - p = counts_arr/total - p[counts_arr==0] = 1.0 - log_p = np.log(p) # log(1) == 0 - - entropy = -(log_p*p).sum() - - print() - print(f"Uniform entropy: {np.log(self.tokenizer.vocabulary_size()):0.2f}") - print(f"Marginal entropy: {entropy:0.2f}") - - self.bias = log_p - self.bias[counts_arr==0] = -1e9 - - def call(self, x): - x = self.dense(x) - return x + self.bias - - def get_config(self): - config = super(TokenOutput, self).get_config() - config.update({ - "tokenizer": self.tokenizer, - "banned_tokens": self.banned_tokens, - "bias": self.bias, - "dense":self.dense - }) - - return config - -class Captioner(tf.keras.Model): - @classmethod - def add_method(cls, fun): - setattr(cls, fun.__name__, fun) - return fun - - def __init__(self, tokenizer, feature_extractor, output_layer, num_layers=1, - units=256, max_length=50, num_heads=1, dropout_rate=0.1): - super().__init__() - self.feature_extractor = feature_extractor - self.tokenizer = tokenizer - self.word_to_index = tf.keras.layers.StringLookup( - mask_token="", - vocabulary=tokenizer.get_vocabulary()) - self.index_to_word = tf.keras.layers.StringLookup( - mask_token="", - vocabulary=tokenizer.get_vocabulary(), - invert=True) - - self.seq_embedding = SeqEmbedding( - vocab_size=tokenizer.vocabulary_size(), - depth=units, - max_length=max_length) - - self.decoder_layers = [ - DecoderLayer(units, num_heads=num_heads, dropout_rate=dropout_rate) - for n in range(num_layers)] - - self.output_layer = output_layer - - def call(self, inputs): - image, txt = inputs - - if image.shape[-1] == 3: - # Apply the feature-extractor, if you get an RGB image. - image = self.feature_extractor(image) - - # Flatten the feature map - image = einops.rearrange(image, 'b h w c -> b (h w) c') - - - if txt.dtype == tf.string: - # Apply the tokenizer if you get string inputs. - txt = self.tokenizer(txt) - - txt = self.seq_embedding(txt) - - # Look at the image - for dec_layer in self.decoder_layers: - txt = dec_layer(inputs=(image, txt)) - - txt = self.output_layer(txt) - - return txt - - - def simple_gen(self, image, temperature=1): - initial = self.word_to_index([['[START]']]) # (batch, sequence) - img_features = self.feature_extractor(image[tf.newaxis, ...]) - - tokens = initial # (batch, sequence) - for n in range(50): - preds = self((img_features, tokens)).numpy() # (batch, sequence, vocab) - preds = preds[:,-1, :] #(batch, vocab) - if temperature==0: - next = tf.argmax(preds, axis=-1)[:, tf.newaxis] # (batch, 1) - else: - next = tf.random.categorical(preds/temperature, num_samples=1) # (batch, 1) - tokens = tf.concat([tokens, next], axis=1) # (batch, sequence) - - if next[0] == self.word_to_index('[END]'): - break - - words = self.index_to_word(tokens[0, 1:-1]) - result = tf.strings.reduce_join(words, axis=-1, separator=' ') - return result.numpy().decode() - - # def get_config(self): - # config = super().get_config() - # config.update({"feature_extractor": self.feature_extractor, - # "tokenizer": self.tokenizer, - # "word_to_index": self.word_to_index, - # "index_to_word": self.index_to_word, - # "outputlayer": self.output_layer, - # "seq_embedding": self.seq_embedding, - # "decoder_layers": self.decoder_layers - # }) - # return config - - # def build_from_config(self, config): - # return super().build_from_config(config) - -# model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4), -# loss=masked_loss, -# metrics=[masked_acc]) - -print("model complete") -#========================================================================================================================= -### LOAD FUNCTION -#========================================================================================================================= - -def build(): - filename = "model/tokenizer.pkl" - token_meta = pickle.load(open(filename, 'rb')) - tokenizer = tf.keras.layers.TextVectorization.from_config(token_meta["config"]) - tokenizer.set_weights(token_meta['weights']) - word_to_index = tf.keras.layers.StringLookup( - mask_token="", - vocabulary=tokenizer.get_vocabulary()) - - index_to_word = tf.keras.layers.StringLookup( - mask_token="", - vocabulary=tokenizer.get_vocabulary(), - invert=True) - - output_layer = TokenOutput(tokenizer, banned_tokens=('', '[UNK]', '[START]')) - filename = "model/output_layer.pkl" - bias = pickle.load(open(filename, 'rb')) - output_layer.bias = bias - - load_model = Captioner(tokenizer, feature_extractor=mobilenet, output_layer=output_layer, - units=256, dropout_rate=0.5, num_layers=2, num_heads=2) - load_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4), - loss=masked_loss, - metrics=[masked_acc]) - - # image_url = 'https://tensorflow.org/images/surf.jpg' - # image_path = tf.keras.utils.get_file('surf.jpg', origin=image_url) - # image = load_image(image_path) - image = pickle.load(open("test_run_img", "rb")) - print(load_model.simple_gen(image)) - - path = "model/captioner_weights" - load_model.load_weights(path) - return load_model - -# loaded_model = build() -print("loaded") -#========================================================================================================================= -### TEST RUN -#========================================================================================================================= - -image_url = 'https://tensorflow.org/images/surf.jpg' -image_path = tf.keras.utils.get_file('surf.jpg', origin=image_url) -image = load_image(image_path) - diff --git a/spaces/ArkanDash/rvc-models-new/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py b/spaces/ArkanDash/rvc-models-new/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py deleted file mode 100644 index ee3171bcb7c4a5066560723108b56e055f18be45..0000000000000000000000000000000000000000 --- a/spaces/ArkanDash/rvc-models-new/lib/infer_pack/modules/F0Predictor/DioF0Predictor.py +++ /dev/null @@ -1,90 +0,0 @@ -from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor -import pyworld -import numpy as np - - -class DioF0Predictor(F0Predictor): - def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100): - self.hop_length = hop_length - self.f0_min = f0_min - self.f0_max = f0_max - self.sampling_rate = sampling_rate - - def interpolate_f0(self, f0): - """ - 对F0进行插值处理 - """ - - data = np.reshape(f0, (f0.size, 1)) - - vuv_vector = np.zeros((data.size, 1), dtype=np.float32) - vuv_vector[data > 0.0] = 1.0 - vuv_vector[data <= 0.0] = 0.0 - - ip_data = data - - frame_number = data.size - last_value = 0.0 - for i in range(frame_number): - if data[i] <= 0.0: - j = i + 1 - for j in range(i + 1, frame_number): - if data[j] > 0.0: - break - if j < frame_number - 1: - if last_value > 0.0: - step = (data[j] - data[i - 1]) / float(j - i) - for k in range(i, j): - ip_data[k] = data[i - 1] + step * (k - i + 1) - else: - for k in range(i, j): - ip_data[k] = data[j] - else: - for k in range(i, frame_number): - ip_data[k] = last_value - else: - ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝 - last_value = data[i] - - return ip_data[:, 0], vuv_vector[:, 0] - - def resize_f0(self, x, target_len): - source = np.array(x) - source[source < 0.001] = np.nan - target = np.interp( - np.arange(0, len(source) * target_len, len(source)) / target_len, - np.arange(0, len(source)), - source, - ) - res = np.nan_to_num(target) - return res - - def compute_f0(self, wav, p_len=None): - if p_len is None: - p_len = wav.shape[0] // self.hop_length - f0, t = pyworld.dio( - wav.astype(np.double), - fs=self.sampling_rate, - f0_floor=self.f0_min, - f0_ceil=self.f0_max, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) - for index, pitch in enumerate(f0): - f0[index] = round(pitch, 1) - return self.interpolate_f0(self.resize_f0(f0, p_len))[0] - - def compute_f0_uv(self, wav, p_len=None): - if p_len is None: - p_len = wav.shape[0] // self.hop_length - f0, t = pyworld.dio( - wav.astype(np.double), - fs=self.sampling_rate, - f0_floor=self.f0_min, - f0_ceil=self.f0_max, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) - for index, pitch in enumerate(f0): - f0[index] = round(pitch, 1) - return self.interpolate_f0(self.resize_f0(f0, p_len)) diff --git a/spaces/ArpitM/chat-llm-streaming/app.py b/spaces/ArpitM/chat-llm-streaming/app.py deleted file mode 100644 index c66ad15777a5c4a69ed2cf549575c2d6bf797ff7..0000000000000000000000000000000000000000 --- a/spaces/ArpitM/chat-llm-streaming/app.py +++ /dev/null @@ -1,321 +0,0 @@ -import os - -import gradio as gr - -from text_generation import Client, InferenceAPIClient - -openchat_preprompt = ( - "\n: Hi!\n: My name is Bot, model version is 0.15, part of an open-source kit for " - "fine-tuning new bots! I was created by Together, LAION, and Ontocord.ai and the open-source " - "community. I am not human, not evil and not alive, and thus have no thoughts and feelings, " - "but I am programmed to be helpful, polite, honest, and friendly.\n" -) - - -def get_client(model: str): - if model == "togethercomputer/GPT-NeoXT-Chat-Base-20B": - return Client(os.getenv("OPENCHAT_API_URL")) - return InferenceAPIClient(model, token=os.getenv("HF_TOKEN", None)) - - -def get_usernames(model: str): - """ - Returns: - (str, str, str, str): pre-prompt, username, bot name, separator - """ - if model in ("OpenAssistant/oasst-sft-1-pythia-12b", "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5"): - return "", "<|prompter|>", "<|assistant|>", "<|endoftext|>" - if model == "togethercomputer/GPT-NeoXT-Chat-Base-20B": - return openchat_preprompt, ": ", ": ", "\n" - return "", "User: ", "Assistant: ", "\n" - - -def predict( - model: str, - inputs: str, - typical_p: float, - top_p: float, - temperature: float, - top_k: int, - repetition_penalty: float, - watermark: bool, - chatbot, - history, -): - client = get_client(model) - preprompt, user_name, assistant_name, sep = get_usernames(model) - - history.append(inputs) - - past = [] - for data in chatbot: - user_data, model_data = data - - if not user_data.startswith(user_name): - user_data = user_name + user_data - if not model_data.startswith(sep + assistant_name): - model_data = sep + assistant_name + model_data - - past.append(user_data + model_data.rstrip() + sep) - - if not inputs.startswith(user_name): - inputs = user_name + inputs - - total_inputs = preprompt + "".join(past) + inputs + sep + assistant_name.rstrip() - - partial_words = "" - - if model in ("OpenAssistant/oasst-sft-1-pythia-12b", "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5"): - iterator = client.generate_stream( - total_inputs, - typical_p=typical_p, - truncate=1000, - watermark=watermark, - max_new_tokens=500, - ) - else: - iterator = client.generate_stream( - total_inputs, - top_p=top_p if top_p < 1.0 else None, - top_k=top_k, - truncate=1000, - repetition_penalty=repetition_penalty, - watermark=watermark, - temperature=temperature, - max_new_tokens=500, - stop_sequences=[user_name.rstrip(), assistant_name.rstrip()], - ) - - for i, response in enumerate(iterator): - if response.token.special: - continue - - partial_words = partial_words + response.token.text - if partial_words.endswith(user_name.rstrip()): - partial_words = partial_words.rstrip(user_name.rstrip()) - if partial_words.endswith(assistant_name.rstrip()): - partial_words = partial_words.rstrip(assistant_name.rstrip()) - - if i == 0: - history.append(" " + partial_words) - elif response.token.text not in user_name: - history[-1] = partial_words - - chat = [ - (history[i].strip(), history[i + 1].strip()) - for i in range(0, len(history) - 1, 2) - ] - yield chat, history - - -def reset_textbox(): - return gr.update(value="") - - -def radio_on_change( - value: str, - disclaimer, - typical_p, - top_p, - top_k, - temperature, - repetition_penalty, - watermark, -): - if value in ("OpenAssistant/oasst-sft-1-pythia-12b", "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5"): - typical_p = typical_p.update(value=0.2, visible=True) - top_p = top_p.update(visible=False) - top_k = top_k.update(visible=False) - temperature = temperature.update(visible=False) - disclaimer = disclaimer.update(visible=False) - repetition_penalty = repetition_penalty.update(visible=False) - watermark = watermark.update(False) - elif value == "togethercomputer/GPT-NeoXT-Chat-Base-20B": - typical_p = typical_p.update(visible=False) - top_p = top_p.update(value=0.25, visible=True) - top_k = top_k.update(value=50, visible=True) - temperature = temperature.update(value=0.6, visible=True) - repetition_penalty = repetition_penalty.update(value=1.01, visible=True) - watermark = watermark.update(False) - disclaimer = disclaimer.update(visible=True) - else: - typical_p = typical_p.update(visible=False) - top_p = top_p.update(value=0.95, visible=True) - top_k = top_k.update(value=4, visible=True) - temperature = temperature.update(value=0.5, visible=True) - repetition_penalty = repetition_penalty.update(value=1.03, visible=True) - watermark = watermark.update(True) - disclaimer = disclaimer.update(visible=False) - return ( - disclaimer, - typical_p, - top_p, - top_k, - temperature, - repetition_penalty, - watermark, - ) - - -title = """

                  Large Language Model Chat API

                  """ -description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form: - -``` -User: -Assistant: -User: -Assistant: -... -``` - -In this app, you can explore the outputs of multiple LLMs when prompted in this way. -""" - -text_generation_inference = """ - -""" - -openchat_disclaimer = """ -
                  Checkout the official OpenChatKit feedback app for the full experience.
                  -""" - -with gr.Blocks( - css="""#col_container {margin-left: auto; margin-right: auto;} - #chatbot {height: 520px; overflow: auto;}""" -) as demo: - gr.HTML(title) - gr.Markdown(text_generation_inference, visible=True) - with gr.Column(elem_id="col_container"): - model = gr.Radio( - value="OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5", - choices=[ - "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5", - "OpenAssistant/oasst-sft-1-pythia-12b", - # "togethercomputer/GPT-NeoXT-Chat-Base-20B", - "google/flan-t5-xxl", - "google/flan-ul2", - "bigscience/bloom", - "bigscience/bloomz", - "EleutherAI/gpt-neox-20b", - ], - label="Model", - interactive=True, - ) - - chatbot = gr.Chatbot(elem_id="chatbot") - inputs = gr.Textbox( - placeholder="Hi there!", label="Type an input and press Enter" - ) - disclaimer = gr.Markdown(openchat_disclaimer, visible=False) - state = gr.State([]) - b1 = gr.Button() - - with gr.Accordion("Parameters", open=False): - typical_p = gr.Slider( - minimum=-0, - maximum=1.0, - value=0.2, - step=0.05, - interactive=True, - label="Typical P mass", - ) - top_p = gr.Slider( - minimum=-0, - maximum=1.0, - value=0.25, - step=0.05, - interactive=True, - label="Top-p (nucleus sampling)", - visible=False, - ) - temperature = gr.Slider( - minimum=-0, - maximum=5.0, - value=0.6, - step=0.1, - interactive=True, - label="Temperature", - visible=False, - ) - top_k = gr.Slider( - minimum=1, - maximum=50, - value=50, - step=1, - interactive=True, - label="Top-k", - visible=False, - ) - repetition_penalty = gr.Slider( - minimum=0.1, - maximum=3.0, - value=1.03, - step=0.01, - interactive=True, - label="Repetition Penalty", - visible=False, - ) - watermark = gr.Checkbox(value=False, label="Text watermarking") - - model.change( - lambda value: radio_on_change( - value, - disclaimer, - typical_p, - top_p, - top_k, - temperature, - repetition_penalty, - watermark, - ), - inputs=model, - outputs=[ - disclaimer, - typical_p, - top_p, - top_k, - temperature, - repetition_penalty, - watermark, - ], - ) - - inputs.submit( - predict, - [ - model, - inputs, - typical_p, - top_p, - temperature, - top_k, - repetition_penalty, - watermark, - chatbot, - state, - ], - [chatbot, state], - api_name = "chat_text", - ) - b1.click( - predict, - [ - model, - inputs, - typical_p, - top_p, - temperature, - top_k, - repetition_penalty, - watermark, - chatbot, - state, - ], - [chatbot, state], - api_name = "chat_button", - ) - b1.click(reset_textbox, [], [inputs] ,api_name = "button") - inputs.submit(reset_textbox, [], [inputs] , api_name = "text") - - gr.Markdown(description) - demo.queue(concurrency_count=16).launch(debug=True) diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/uninstall.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/uninstall.py deleted file mode 100644 index f198fc313ff57929d95d36216e3e6ecec3877673..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/uninstall.py +++ /dev/null @@ -1,113 +0,0 @@ -import logging -from optparse import Values -from typing import List - -from pip._vendor.packaging.utils import canonicalize_name - -from pip._internal.cli import cmdoptions -from pip._internal.cli.base_command import Command -from pip._internal.cli.req_command import SessionCommandMixin, warn_if_run_as_root -from pip._internal.cli.status_codes import SUCCESS -from pip._internal.exceptions import InstallationError -from pip._internal.req import parse_requirements -from pip._internal.req.constructors import ( - install_req_from_line, - install_req_from_parsed_requirement, -) -from pip._internal.utils.misc import ( - check_externally_managed, - protect_pip_from_modification_on_windows, -) - -logger = logging.getLogger(__name__) - - -class UninstallCommand(Command, SessionCommandMixin): - """ - Uninstall packages. - - pip is able to uninstall most installed packages. Known exceptions are: - - - Pure distutils packages installed with ``python setup.py install``, which - leave behind no metadata to determine what files were installed. - - Script wrappers installed by ``python setup.py develop``. - """ - - usage = """ - %prog [options] ... - %prog [options] -r ...""" - - def add_options(self) -> None: - self.cmd_opts.add_option( - "-r", - "--requirement", - dest="requirements", - action="append", - default=[], - metavar="file", - help=( - "Uninstall all the packages listed in the given requirements " - "file. This option can be used multiple times." - ), - ) - self.cmd_opts.add_option( - "-y", - "--yes", - dest="yes", - action="store_true", - help="Don't ask for confirmation of uninstall deletions.", - ) - self.cmd_opts.add_option(cmdoptions.root_user_action()) - self.cmd_opts.add_option(cmdoptions.override_externally_managed()) - self.parser.insert_option_group(0, self.cmd_opts) - - def run(self, options: Values, args: List[str]) -> int: - session = self.get_default_session(options) - - reqs_to_uninstall = {} - for name in args: - req = install_req_from_line( - name, - isolated=options.isolated_mode, - ) - if req.name: - reqs_to_uninstall[canonicalize_name(req.name)] = req - else: - logger.warning( - "Invalid requirement: %r ignored -" - " the uninstall command expects named" - " requirements.", - name, - ) - for filename in options.requirements: - for parsed_req in parse_requirements( - filename, options=options, session=session - ): - req = install_req_from_parsed_requirement( - parsed_req, isolated=options.isolated_mode - ) - if req.name: - reqs_to_uninstall[canonicalize_name(req.name)] = req - if not reqs_to_uninstall: - raise InstallationError( - f"You must give at least one requirement to {self.name} (see " - f'"pip help {self.name}")' - ) - - if not options.override_externally_managed: - check_externally_managed() - - protect_pip_from_modification_on_windows( - modifying_pip="pip" in reqs_to_uninstall - ) - - for req in reqs_to_uninstall.values(): - uninstall_pathset = req.uninstall( - auto_confirm=options.yes, - verbose=self.verbosity > 0, - ) - if uninstall_pathset: - uninstall_pathset.commit() - if options.root_user_action == "warn": - warn_if_run_as_root() - return SUCCESS diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/common/models/mask_rcnn_c4.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/common/models/mask_rcnn_c4.py deleted file mode 100644 index a3dcf8be42a39c6e5f6e76e3ab23adeccb33085d..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/common/models/mask_rcnn_c4.py +++ /dev/null @@ -1,88 +0,0 @@ -from detectron2.config import LazyCall as L -from detectron2.layers import ShapeSpec -from detectron2.modeling.meta_arch import GeneralizedRCNN -from detectron2.modeling.anchor_generator import DefaultAnchorGenerator -from detectron2.modeling.backbone import BasicStem, BottleneckBlock, ResNet -from detectron2.modeling.box_regression import Box2BoxTransform -from detectron2.modeling.matcher import Matcher -from detectron2.modeling.poolers import ROIPooler -from detectron2.modeling.proposal_generator import RPN, StandardRPNHead -from detectron2.modeling.roi_heads import ( - FastRCNNOutputLayers, - MaskRCNNConvUpsampleHead, - Res5ROIHeads, -) - -model = L(GeneralizedRCNN)( - backbone=L(ResNet)( - stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"), - stages=L(ResNet.make_default_stages)( - depth=50, - stride_in_1x1=True, - norm="FrozenBN", - ), - out_features=["res4"], - ), - proposal_generator=L(RPN)( - in_features=["res4"], - head=L(StandardRPNHead)(in_channels=1024, num_anchors=15), - anchor_generator=L(DefaultAnchorGenerator)( - sizes=[[32, 64, 128, 256, 512]], - aspect_ratios=[0.5, 1.0, 2.0], - strides=[16], - offset=0.0, - ), - anchor_matcher=L(Matcher)( - thresholds=[0.3, 0.7], labels=[0, -1, 1], allow_low_quality_matches=True - ), - box2box_transform=L(Box2BoxTransform)(weights=[1.0, 1.0, 1.0, 1.0]), - batch_size_per_image=256, - positive_fraction=0.5, - pre_nms_topk=(12000, 6000), - post_nms_topk=(2000, 1000), - nms_thresh=0.7, - ), - roi_heads=L(Res5ROIHeads)( - num_classes=80, - batch_size_per_image=512, - positive_fraction=0.25, - proposal_matcher=L(Matcher)( - thresholds=[0.5], labels=[0, 1], allow_low_quality_matches=False - ), - in_features=["res4"], - pooler=L(ROIPooler)( - output_size=14, - scales=(1.0 / 16,), - sampling_ratio=0, - pooler_type="ROIAlignV2", - ), - res5=L(ResNet.make_stage)( - block_class=BottleneckBlock, - num_blocks=3, - stride_per_block=[2, 1, 1], - in_channels=1024, - bottleneck_channels=512, - out_channels=2048, - norm="FrozenBN", - stride_in_1x1=True, - ), - box_predictor=L(FastRCNNOutputLayers)( - input_shape=L(ShapeSpec)(channels="${...res5.out_channels}", height=1, width=1), - test_score_thresh=0.05, - box2box_transform=L(Box2BoxTransform)(weights=(10, 10, 5, 5)), - num_classes="${..num_classes}", - ), - mask_head=L(MaskRCNNConvUpsampleHead)( - input_shape=L(ShapeSpec)( - channels="${...res5.out_channels}", - width="${...pooler.output_size}", - height="${...pooler.output_size}", - ), - num_classes="${..num_classes}", - conv_dims=[256], - ), - ), - pixel_mean=[103.530, 116.280, 123.675], - pixel_std=[1.0, 1.0, 1.0], - input_format="BGR", -) diff --git a/spaces/BAAI/AltDiffusion-m9/css_and_js.py b/spaces/BAAI/AltDiffusion-m9/css_and_js.py deleted file mode 100644 index 64e6dd5e703281d0b11e7a9ef7f05a264fb2341c..0000000000000000000000000000000000000000 --- a/spaces/BAAI/AltDiffusion-m9/css_and_js.py +++ /dev/null @@ -1,92 +0,0 @@ -from os import path -import json - - -def readTextFile(*args): - dir = path.dirname(__file__) - entry = path.join(dir, *args) - with open(entry, "r", encoding="utf8") as f: - data = f.read() - return data - - -def css(opt): - styling = readTextFile("css", "styles.css") - # TODO: @altryne restore this before merge - if not opt.no_progressbar_hiding: - styling += readTextFile("css", "no_progress_bar.css") - return styling - - -def js(opt): - data = readTextFile("js", "index.js") - data = "(z) => {" + data + "; return z ?? [] }" - return data - - -# TODO : @altryne fix this to the new JS format -js_copy_txt2img_output = "(x) => {navigator.clipboard.writeText(document.querySelector('gradio-app').shadowRoot.querySelector('#highlight .textfield').textContent.replace(/\s+/g,' ').replace(/: /g,':'))}" - - - -js_parse_prompt =""" -(txt2img_prompt, txt2img_width, txt2img_height, txt2img_steps, txt2img_seed, txt2img_batch_count, txt2img_cfg) => { - -const prompt_input = document.querySelector('gradio-app').shadowRoot.querySelector('#prompt_input [data-testid="textbox"]'); -const multiline = document.querySelector('gradio-app').shadowRoot.querySelector('#submit_on_enter label:nth-child(2)') -if (prompt_input.scrollWidth > prompt_input.clientWidth + 10 ) { - multiline.click(); -} - - -let height_match = /(?:-h|-H|--height|height)[ :]?(?\d+) /.exec(txt2img_prompt); -if (height_match) { - txt2img_height = Math.round(height_match.groups.height / 64) * 64; - txt2img_prompt = txt2img_prompt.replace(height_match[0], ''); -} -let width_match = /(?:-w|-W|--width|width)[ :]?(?\d+) /.exec(txt2img_prompt); -if (width_match) { - txt2img_width = Math.round(width_match.groups.width / 64) * 64; - txt2img_prompt = txt2img_prompt.replace(width_match[0], ''); -} -let steps_match = /(?:-s|--steps|steps)[ :]?(?\d+) /.exec(txt2img_prompt); -if (steps_match) { - txt2img_steps = steps_match.groups.steps.trim(); - txt2img_prompt = txt2img_prompt.replace(steps_match[0], ''); -} -let seed_match = /(?:-S|--seed|seed)[ :]?(?\d+) /.exec(txt2img_prompt); -if (seed_match) { - txt2img_seed = seed_match.groups.seed; - txt2img_prompt = txt2img_prompt.replace(seed_match[0], ''); -} -let batch_count_match = /(?:-n|-N|--number|number)[ :]?(?\d+) /.exec(txt2img_prompt); -if (batch_count_match) { - txt2img_batch_count = batch_count_match.groups.batch_count; - txt2img_prompt = txt2img_prompt.replace(batch_count_match[0], ''); -} -let cfg_scale_match = /(?:-c|-C|--cfg-scale|cfg_scale|cfg)[ :]?(?\d\.?\d+?) /.exec(txt2img_prompt); -if (cfg_scale_match) { - txt2img_cfg = parseFloat(cfg_scale_match.groups.cfgscale).toFixed(1); - txt2img_prompt = txt2img_prompt.replace(cfg_scale_match[0], ''); -} -let sampler_match = /(?:-A|--sampler|sampler)[ :]?(?\w+) /.exec(txt2img_prompt); -if (sampler_match) { - - txt2img_prompt = txt2img_prompt.replace(sampler_match[0], ''); -} - -return [txt2img_prompt, parseInt(txt2img_width), parseInt(txt2img_height), parseInt(txt2img_steps), txt2img_seed, parseInt(txt2img_batch_count), parseFloat(txt2img_cfg)]; -} -""" - - -# Wrap the typical SD method call into async closure for ease of use -# Supplies the js function with a params object -# That includes all the passed arguments and input from Gradio: x -# ATTENTION: x is an array of values of all components passed to your -# python event handler -# Example call in Gradio component's event handler (pass the result to _js arg): -# _js=call_JS("myJsMethod", arg1="string", arg2=100, arg3=[]) -def call_JS(sd_method, **kwargs): - param_str = json.dumps(kwargs) - return f"async (...x) => {{ return await SD.{sd_method}({{ x, ...{param_str} }}) ?? []; }}" diff --git a/spaces/Banbri/zcvzcv/src/components/ui/select.tsx b/spaces/Banbri/zcvzcv/src/components/ui/select.tsx deleted file mode 100644 index 704239634b359b9e680dab25275e205e72579f82..0000000000000000000000000000000000000000 --- a/spaces/Banbri/zcvzcv/src/components/ui/select.tsx +++ /dev/null @@ -1,121 +0,0 @@ -"use client" - -import * as React from "react" -import * as SelectPrimitive from "@radix-ui/react-select" -import { Check, ChevronDown } from "lucide-react" - -import { cn } from "@/lib/utils" - -const Select = SelectPrimitive.Root - -const SelectGroup = SelectPrimitive.Group - -const SelectValue = SelectPrimitive.Value - -const SelectTrigger = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - {children} - - - - -)) -SelectTrigger.displayName = SelectPrimitive.Trigger.displayName - -const SelectContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, position = "popper", ...props }, ref) => ( - - - - {children} - - - -)) -SelectContent.displayName = SelectPrimitive.Content.displayName - -const SelectLabel = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -SelectLabel.displayName = SelectPrimitive.Label.displayName - -const SelectItem = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - - - - - - - {children} - -)) -SelectItem.displayName = SelectPrimitive.Item.displayName - -const SelectSeparator = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -SelectSeparator.displayName = SelectPrimitive.Separator.displayName - -export { - Select, - SelectGroup, - SelectValue, - SelectTrigger, - SelectContent, - SelectLabel, - SelectItem, - SelectSeparator, -} diff --git a/spaces/Bart92/RVC_HF/lib/infer_pack/transforms.py b/spaces/Bart92/RVC_HF/lib/infer_pack/transforms.py deleted file mode 100644 index a11f799e023864ff7082c1f49c0cc18351a13b47..0000000000000000000000000000000000000000 --- a/spaces/Bart92/RVC_HF/lib/infer_pack/transforms.py +++ /dev/null @@ -1,209 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = {"tails": tails, "tail_bound": tail_bound} - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1 - - -def unconstrained_rational_quadratic_spline( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails="linear", - tail_bound=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == "linear": - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError("{} tails are not implemented.".format(tails)) - - ( - outputs[inside_interval_mask], - logabsdet[inside_interval_mask], - ) = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, - right=tail_bound, - bottom=-tail_bound, - top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - ) - - return outputs, logabsdet - - -def rational_quadratic_spline( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0.0, - right=1.0, - bottom=0.0, - top=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError("Input to a transform is not within its domain") - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError("Minimal bin width too large for the number of bins") - if min_bin_height * num_bins > 1.0: - raise ValueError("Minimal bin height too large for the number of bins") - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (inputs - input_cumheights) * ( - input_derivatives + input_derivatives_plus_one - 2 * input_delta - ) + input_heights * (input_delta - input_derivatives) - b = input_heights * input_derivatives - (inputs - input_cumheights) * ( - input_derivatives + input_derivatives_plus_one - 2 * input_delta - ) - c = -input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ( - (input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta - ) - derivative_numerator = input_delta.pow(2) * ( - input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2) - ) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * ( - input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta - ) - denominator = input_delta + ( - (input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta - ) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * ( - input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2) - ) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/Benson/text-generation/Examples/Boleto Para El Grupo 2 2022.md b/spaces/Benson/text-generation/Examples/Boleto Para El Grupo 2 2022.md deleted file mode 100644 index 897dbec66d79f89c0669e09daa177f5dacca8c02..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Boleto Para El Grupo 2 2022.md +++ /dev/null @@ -1,234 +0,0 @@ -
                  -

                  Cómo Descargar Boleto Hall para el Examen del Grupo 2 2022

                  -

                  Si usted está aspirando a unirse a la Comisión de Servicio Público Tamil Nadu (TNPSC) en varios puestos en el Grupo 2 servicios, entonces usted debe ser consciente de la TNPSC Grupo 2 examen 2022. Este examen es realizado cada año por el TNPSC para seleccionar candidatos para diferentes puestos como Asistente de Ingresos, Oficial de Sección Auxiliar, Oficial de Auditoría, etc. El examen consta de dos etapas: Preliminar y Principal, seguido de una entrevista para algunos puestos. El examen preliminar se realizó el 21 de mayo de 2022, y el examen principal está programado para el 25 de febrero de 2023.

                  -

                  boleto para el grupo 2 2022


                  Download –––––>>> https://bltlly.com/2v6JLq



                  -

                  Una de las cosas más importantes que usted necesita hacer antes de aparecer para el examen es descargar su boleto de entrada o tarjeta de admisión. El ticket hall es un documento que contiene sus datos personales y de examen, como su nombre, número de lista, centro de examen, fecha y hora, etc. También sirve como prueba de su identidad y elegibilidad para el examen. Sin el boleto de la sala, no se le permitirá entrar en la sala de examen o tomar el examen.

                  -

                  Entonces, ¿cómo puede descargar su boleto de pasillo para el examen TNPSC Group 2 2022? En este artículo, le diremos todo lo que necesita saber al respecto. También le proporcionaremos información útil sobre el patrón del examen, el plan de estudios, los detalles de los boletos de la sala, las instrucciones y las preguntas frecuentes. Sigue leyendo para saber más.

                  -

                  Introducción

                  -

                  ¿Qué es el examen TNPSC Group 2 y por qué es importante?

                  -

                  El examen del Grupo 2 del TNPSC es un concurso realizado por la Comisión de Administración Pública de Tamil Nadu (TNPSC) para reclutar candidatos para diversos puestos en los servicios del Grupo 2. Estos puestos incluyen puestos para entrevistas y puestos no relacionados con entrevistas, como el Oficial Adjunto de Impuestos Comerciales, el Oficial de Sección Auxiliar, el Oficial de Auditoría, etc. Se estima que las vacantes para estos puestos ascenderán a unas 5529 para este año.

                  - -

                  ¿Cuáles son las fechas y eventos importantes para el examen?

                  -

                  El TNPSC ha publicado una notificación oficial para el examen del Grupo 2 2022 en su sitio web www.tnpsc.gov.in. La notificación contiene todos los detalles sobre los criterios de elegibilidad, proceso de solicitud, proceso de selección, política de reserva, etc. Los candidatos interesados y elegibles pueden solicitar en línea a través de Registro Único (OTR) en o antes del 23 de marzo de 2022.

                  -

                  Las fechas y eventos importantes para el examen TNPSC Group 2 2022 son los siguientes:

                  - -
      EventoFecha
      Fecha de publicación de la notificación23 de febrero de 2022
      Última fecha para aplicar en línea23 de marzo de 2022
      Fecha del examen preliminar21 de mayo de 2022
      Fecha del resultado del examen preliminarJunio 2022 (tentativo)
      Fecha del examen principal25 de febrero de 2023
      Fecha del resultado del examen principalAbril 2023 (tentativo)
      Fecha de la entrevistaMay 2023 (tentativo)
      Fecha del resultado finalJunio 2023 (tentativo)
      StageAsuntoMarcas
      PreliminarEstudios generales (Degree Standard)150
      Prueba de aptitud y capacidad mental (estándar SSLC)50
      PrincipalTamil o Inglés (Estándar SSLC)100
      Estudios generales (Degree Standard)150
      Entrevista y registro<40
      EtapaMarcasMarcado negativoDuraciónPreliminar<1/33 horas/td>
      Principal300No3 horas
      Entrevista40No<-
      - - - - -
      - An astronaut riding a horse. -
      - An astronaut riding a horse. -
      - Darth vader surfing in waves. -
      - Darth vader surfing in waves. -
      - -### `cerspense/zeroscope_v2_576w` & `cerspense/zeroscope_v2_XL` - -Zeroscope are watermark-free model and have been trained on specific sizes such as `576x320` and `1024x576`. -One should first generate a video using the lower resolution checkpoint [`cerspense/zeroscope_v2_576w`](https://huggingface.co/cerspense/zeroscope_v2_576w) with [`TextToVideoSDPipeline`], -which can then be upscaled using [`VideoToVideoSDPipeline`] and [`cerspense/zeroscope_v2_XL`](https://huggingface.co/cerspense/zeroscope_v2_XL). - - -```py -import torch -from diffusers import DiffusionPipeline -from diffusers.utils import export_to_video - -pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16) -pipe.enable_model_cpu_offload() - -# memory optimization -pipe.unet.enable_forward_chunking(chunk_size=1, dim=1) -pipe.enable_vae_slicing() - -prompt = "Darth Vader surfing a wave" -video_frames = pipe(prompt, num_frames=24).frames -video_path = export_to_video(video_frames) -video_path -``` - -Now the video can be upscaled: - -```py -pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_XL", torch_dtype=torch.float16) -pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) -pipe.enable_model_cpu_offload() - -# memory optimization -pipe.unet.enable_forward_chunking(chunk_size=1, dim=1) -pipe.enable_vae_slicing() - -video = [Image.fromarray(frame).resize((1024, 576)) for frame in video_frames] - -video_frames = pipe(prompt, video=video, strength=0.6).frames -video_path = export_to_video(video_frames) -video_path -``` - -Here are some sample outputs: - - - - - -
      - Darth vader surfing in waves. -
      - Darth vader surfing in waves. -
      - -## TextToVideoSDPipeline -[[autodoc]] TextToVideoSDPipeline - - all - - __call__ - -## VideoToVideoSDPipeline -[[autodoc]] VideoToVideoSDPipeline - - all - - __call__ - -## TextToVideoSDPipelineOutput -[[autodoc]] pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput \ No newline at end of file diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/community/ddim_noise_comparative_analysis.py b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/community/ddim_noise_comparative_analysis.py deleted file mode 100644 index e0784fc5138a7b3765c870f59a06d3a609ee3a01..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/community/ddim_noise_comparative_analysis.py +++ /dev/null @@ -1,190 +0,0 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import List, Optional, Tuple, Union - -import PIL -import torch -from torchvision import transforms - -from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput -from diffusers.schedulers import DDIMScheduler -from diffusers.utils.torch_utils import randn_tensor - - -trans = transforms.Compose( - [ - transforms.Resize((256, 256)), - transforms.ToTensor(), - transforms.Normalize([0.5], [0.5]), - ] -) - - -def preprocess(image): - if isinstance(image, torch.Tensor): - return image - elif isinstance(image, PIL.Image.Image): - image = [image] - - image = [trans(img.convert("RGB")) for img in image] - image = torch.stack(image) - return image - - -class DDIMNoiseComparativeAnalysisPipeline(DiffusionPipeline): - r""" - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the - library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) - - Parameters: - unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of - [`DDPMScheduler`], or [`DDIMScheduler`]. - """ - - def __init__(self, unet, scheduler): - super().__init__() - - # make sure scheduler can always be converted to DDIM - scheduler = DDIMScheduler.from_config(scheduler.config) - - self.register_modules(unet=unet, scheduler=scheduler) - - def check_inputs(self, strength): - if strength < 0 or strength > 1: - raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") - - def get_timesteps(self, num_inference_steps, strength, device): - # get the original timestep using init_timestep - init_timestep = min(int(num_inference_steps * strength), num_inference_steps) - - t_start = max(num_inference_steps - init_timestep, 0) - timesteps = self.scheduler.timesteps[t_start:] - - return timesteps, num_inference_steps - t_start - - def prepare_latents(self, image, timestep, batch_size, dtype, device, generator=None): - if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): - raise ValueError( - f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" - ) - - init_latents = image.to(device=device, dtype=dtype) - - if isinstance(generator, list) and len(generator) != batch_size: - raise ValueError( - f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" - f" size of {batch_size}. Make sure the batch size matches the length of the generators." - ) - - shape = init_latents.shape - noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) - - # get latents - print("add noise to latents at timestep", timestep) - init_latents = self.scheduler.add_noise(init_latents, noise, timestep) - latents = init_latents - - return latents - - @torch.no_grad() - def __call__( - self, - image: Union[torch.FloatTensor, PIL.Image.Image] = None, - strength: float = 0.8, - batch_size: int = 1, - generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, - eta: float = 0.0, - num_inference_steps: int = 50, - use_clipped_model_output: Optional[bool] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - ) -> Union[ImagePipelineOutput, Tuple]: - r""" - Args: - image (`torch.FloatTensor` or `PIL.Image.Image`): - `Image`, or tensor representing an image batch, that will be used as the starting point for the - process. - strength (`float`, *optional*, defaults to 0.8): - Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` - will be used as a starting point, adding more noise to it the larger the `strength`. The number of - denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will - be maximum and the denoising process will run for the full number of iterations specified in - `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. - batch_size (`int`, *optional*, defaults to 1): - The number of images to generate. - generator (`torch.Generator`, *optional*): - One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) - to make generation deterministic. - eta (`float`, *optional*, defaults to 0.0): - The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM). - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - use_clipped_model_output (`bool`, *optional*, defaults to `None`): - if `True` or `False`, see documentation for `DDIMScheduler.step`. If `None`, nothing is passed - downstream to the scheduler. So use `None` for schedulers which don't support this argument. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generate image. Choose between - [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. - - Returns: - [`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if `return_dict` is - True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images. - """ - # 1. Check inputs. Raise error if not correct - self.check_inputs(strength) - - # 2. Preprocess image - image = preprocess(image) - - # 3. set timesteps - self.scheduler.set_timesteps(num_inference_steps, device=self.device) - timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, self.device) - latent_timestep = timesteps[:1].repeat(batch_size) - - # 4. Prepare latent variables - latents = self.prepare_latents(image, latent_timestep, batch_size, self.unet.dtype, self.device, generator) - image = latents - - # 5. Denoising loop - for t in self.progress_bar(timesteps): - # 1. predict noise model_output - model_output = self.unet(image, t).sample - - # 2. predict previous mean of image x_t-1 and add variance depending on eta - # eta corresponds to η in paper and should be between [0, 1] - # do x_t -> x_t-1 - image = self.scheduler.step( - model_output, - t, - image, - eta=eta, - use_clipped_model_output=use_clipped_model_output, - generator=generator, - ).prev_sample - - image = (image / 2 + 0.5).clamp(0, 1) - image = image.cpu().permute(0, 2, 3, 1).numpy() - if output_type == "pil": - image = self.numpy_to_pil(image) - - if not return_dict: - return (image, latent_timestep.item()) - - return ImagePipelineOutput(images=image) diff --git a/spaces/parkermini/general/app.py b/spaces/parkermini/general/app.py deleted file mode 100644 index 98371552914d7edfc42f032dc60436f26642c4d6..0000000000000000000000000000000000000000 --- a/spaces/parkermini/general/app.py +++ /dev/null @@ -1,25 +0,0 @@ -__all__ = ['is_laetiporus', 'learn', 'classify_image', 'categories', 'image', 'label', 'examples', 'intf'] - -# Cell -from fastai.vision.all import * -import gradio as gr - -def is_laetiporus(x): return x[0].isupper() - -# Cell -learn = load_learner('model.pkl') - -# Cell -categories = ('Laetiporus','Amanita phalloides','Pacific Golden Chanterelle','White Chanterelle','Morchella importuna','Leccinum scabrum','Tricholoma murrillianum') - -def classify_image(img): - pred,idx,probs = learn.predict(img) - return dict(zip(categories, map(float,probs))) - -# Cell -image = gr.inputs.Image(shape=(192, 192)) -label = gr.outputs.Label() -examples = ['deathcap.jpg', 'chicken_of_the_woods.jpg', 'chanterelle.jpg'] - -intf = gr.Interface(fn=classify_image, inputs=image, outputs=label, examples=examples) -intf.launch(inline=False) \ No newline at end of file diff --git a/spaces/perilli/tortoise-tts-v2/models/xtransformers.py b/spaces/perilli/tortoise-tts-v2/models/xtransformers.py deleted file mode 100644 index 70e8e63d3c7069306536331e0ae1421ed6ab89cd..0000000000000000000000000000000000000000 --- a/spaces/perilli/tortoise-tts-v2/models/xtransformers.py +++ /dev/null @@ -1,1253 +0,0 @@ -import functools -import math -import torch -from torch import nn, einsum -import torch.nn.functional as F -from functools import partial -from inspect import isfunction -from collections import namedtuple - -from einops import rearrange, repeat, reduce -from einops.layers.torch import Rearrange - -from entmax import entmax15 -from torch.utils.checkpoint import checkpoint - -DEFAULT_DIM_HEAD = 64 - -Intermediates = namedtuple('Intermediates', [ - 'pre_softmax_attn', - 'post_softmax_attn' -]) - -LayerIntermediates = namedtuple('Intermediates', [ - 'hiddens', - 'attn_intermediates', - 'past_key_values', -]) - - -# helpers - -def exists(val): - return val is not None - - -def default(val, d): - if exists(val): - return val - return d() if isfunction(d) else d - - -def cast_tuple(val, depth): - return val if isinstance(val, tuple) else (val,) * depth - - -class always(): - def __init__(self, val): - self.val = val - - def __call__(self, *args, **kwargs): - return self.val - - -class not_equals(): - def __init__(self, val): - self.val = val - - def __call__(self, x, *args, **kwargs): - return x != self.val - - -class equals(): - def __init__(self, val): - self.val = val - - def __call__(self, x, *args, **kwargs): - return x == self.val - - -def max_neg_value(tensor): - return -torch.finfo(tensor.dtype).max - - -def l2norm(t): - return F.normalize(t, p=2, dim=-1) - - -# init helpers - -def init_zero_(layer): - nn.init.constant_(layer.weight, 0.) - if exists(layer.bias): - nn.init.constant_(layer.bias, 0.) - - -# keyword argument helpers - -def pick_and_pop(keys, d): - values = list(map(lambda key: d.pop(key), keys)) - return dict(zip(keys, values)) - - -def group_dict_by_key(cond, d): - return_val = [dict(), dict()] - for key in d.keys(): - match = bool(cond(key)) - ind = int(not match) - return_val[ind][key] = d[key] - return (*return_val,) - - -def string_begins_with(prefix, str): - return str.startswith(prefix) - - -def group_by_key_prefix(prefix, d): - return group_dict_by_key(partial(string_begins_with, prefix), d) - - -def groupby_prefix_and_trim(prefix, d): - kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d) - kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items()))) - return kwargs_without_prefix, kwargs - - -# activations - -class ReluSquared(nn.Module): - def forward(self, x): - return F.relu(x) ** 2 - - -# positional embeddings - -class AbsolutePositionalEmbedding(nn.Module): - def __init__(self, dim, max_seq_len): - super().__init__() - self.scale = dim ** -0.5 - self.emb = nn.Embedding(max_seq_len, dim) - - def forward(self, x): - n = torch.arange(x.shape[1], device=x.device) - pos_emb = self.emb(n) - pos_emb = rearrange(pos_emb, 'n d -> () n d') - return pos_emb * self.scale - - -class FixedPositionalEmbedding(nn.Module): - def __init__(self, dim): - super().__init__() - inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim)) - self.register_buffer('inv_freq', inv_freq) - - def forward(self, x, seq_dim=1, offset=0): - t = torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq) + offset - sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq) - emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1) - return rearrange(emb, 'n d -> () n d') - - -class RelativePositionBias(nn.Module): - def __init__(self, scale, causal=False, num_buckets=32, max_distance=128, heads=8): - super().__init__() - self.scale = scale - self.causal = causal - self.num_buckets = num_buckets - self.max_distance = max_distance - self.relative_attention_bias = nn.Embedding(num_buckets, heads) - - @staticmethod - def _relative_position_bucket(relative_position, causal=True, num_buckets=32, max_distance=128): - ret = 0 - n = -relative_position - if not causal: - num_buckets //= 2 - ret += (n < 0).long() * num_buckets - n = torch.abs(n) - else: - n = torch.max(n, torch.zeros_like(n)) - - max_exact = num_buckets // 2 - is_small = n < max_exact - - val_if_large = max_exact + ( - torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) - ).long() - val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1)) - - ret += torch.where(is_small, n, val_if_large) - return ret - - def forward(self, qk_dots): - i, j, device = *qk_dots.shape[-2:], qk_dots.device - q_pos = torch.arange(i, dtype=torch.long, device=device) - k_pos = torch.arange(j, dtype=torch.long, device=device) - rel_pos = k_pos[None, :] - q_pos[:, None] - rp_bucket = self._relative_position_bucket(rel_pos, causal=self.causal, num_buckets=self.num_buckets, - max_distance=self.max_distance) - values = self.relative_attention_bias(rp_bucket) - bias = rearrange(values, 'i j h -> () h i j') - return qk_dots + (bias * self.scale) - - -class AlibiPositionalBias(nn.Module): - def __init__(self, heads, **kwargs): - super().__init__() - self.heads = heads - slopes = torch.Tensor(self._get_slopes(heads)) - slopes = rearrange(slopes, 'h -> () h () ()') - self.register_buffer('slopes', slopes, persistent=False) - self.register_buffer('bias', None, persistent=False) - - @staticmethod - def _get_slopes(heads): - def get_slopes_power_of_2(n): - start = (2 ** (-2 ** -(math.log2(n) - 3))) - ratio = start - return [start * ratio ** i for i in range(n)] - - if math.log2(heads).is_integer(): - return get_slopes_power_of_2(heads) - - closest_power_of_2 = 2 ** math.floor(math.log2(heads)) - return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][ - :heads - closest_power_of_2] - - def forward(self, qk_dots): - h, i, j, device = *qk_dots.shape[-3:], qk_dots.device - - if exists(self.bias) and self.bias.shape[-1] >= j: - return qk_dots + self.bias[..., :j] - - bias = torch.arange(j, device=device) - bias = rearrange(bias, 'j -> () () () j') - bias = bias * self.slopes - - num_heads_unalibied = h - bias.shape[1] - bias = F.pad(bias, (0, 0, 0, 0, 0, num_heads_unalibied)) - - self.register_buffer('bias', bias, persistent=False) - return qk_dots + self.bias - - -class LearnedAlibiPositionalBias(AlibiPositionalBias): - def __init__(self, heads, bidirectional=False): - super().__init__(heads) - los_slopes = torch.log(self.slopes) - self.learned_logslopes = nn.Parameter(los_slopes) - - self.bidirectional = bidirectional - if self.bidirectional: - self.learned_logslopes_future = nn.Parameter(los_slopes) - - def forward(self, qk_dots): - h, i, j, device = *qk_dots.shape[-3:], qk_dots.device - - def get_slopes(param): - return F.pad(param.exp(), (0, 0, 0, 0, 0, h - param.shape[1])) - - if exists(self.bias) and self.bias.shape[-1] >= j: - bias = self.bias[..., :i, :j] - else: - i_arange = torch.arange(i, device=device) - j_arange = torch.arange(j, device=device) - bias = rearrange(j_arange, 'j -> 1 1 1 j') - rearrange(i_arange, 'i -> 1 1 i 1') - self.register_buffer('bias', bias, persistent=False) - - if self.bidirectional: - past_slopes = get_slopes(self.learned_logslopes) - future_slopes = get_slopes(self.learned_logslopes_future) - bias = torch.tril(bias * past_slopes) + torch.triu(bias * future_slopes) - else: - slopes = get_slopes(self.learned_logslopes) - bias = bias * slopes - - return qk_dots + bias - - -class RotaryEmbedding(nn.Module): - def __init__(self, dim): - super().__init__() - inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim)) - self.register_buffer('inv_freq', inv_freq) - - def forward(self, max_seq_len, device): - t = torch.arange(max_seq_len, device=device).type_as(self.inv_freq) - freqs = torch.einsum('i , j -> i j', t, self.inv_freq) - emb = torch.cat((freqs, freqs), dim=-1) - return rearrange(emb, 'n d -> () () n d') - - -def rotate_half(x): - x = rearrange(x, '... (j d) -> ... j d', j=2) - x1, x2 = x.unbind(dim=-2) - return torch.cat((-x2, x1), dim=-1) - - -def apply_rotary_pos_emb(t, freqs): - seq_len = t.shape[-2] - freqs = freqs[:, :, -seq_len:] - return (t * freqs.cos()) + (rotate_half(t) * freqs.sin()) - - -# norms - -class Scale(nn.Module): - def __init__(self, value, fn): - super().__init__() - self.value = value - self.fn = fn - - def forward(self, x, **kwargs): - out = self.fn(x, **kwargs) - scale_fn = lambda t: t * self.value - - if not isinstance(out, tuple): - return scale_fn(out) - - return (scale_fn(out[0]), *out[1:]) - - -class Rezero(nn.Module): - def __init__(self, fn): - super().__init__() - self.fn = fn - self.g = nn.Parameter(torch.zeros(1)) - - def forward(self, x, **kwargs): - out = self.fn(x, **kwargs) - rezero_fn = lambda t: t * self.g - - if not isinstance(out, tuple): - return rezero_fn(out) - - return (rezero_fn(out[0]), *out[1:]) - - -class ScaleNorm(nn.Module): - def __init__(self, dim, eps=1e-5): - super().__init__() - self.scale = dim ** -0.5 - self.eps = eps - self.g = nn.Parameter(torch.ones(1)) - - def forward(self, x): - norm = torch.norm(x, dim=-1, keepdim=True) * self.scale - return x / norm.clamp(min=self.eps) * self.g - - -class RMSNorm(nn.Module): - def __init__(self, dim, eps=1e-8): - super().__init__() - self.scale = dim ** -0.5 - self.eps = eps - self.g = nn.Parameter(torch.ones(dim)) - - def forward(self, x): - norm = torch.norm(x, dim=-1, keepdim=True) * self.scale - return x / norm.clamp(min=self.eps) * self.g - - -class RMSScaleShiftNorm(nn.Module): - def __init__(self, dim, eps=1e-8): - super().__init__() - self.scale = dim ** -0.5 - self.eps = eps - self.g = nn.Parameter(torch.ones(dim)) - self.scale_shift_process = nn.Linear(dim * 2, dim * 2) - - def forward(self, x, norm_scale_shift_inp): - norm = torch.norm(x, dim=-1, keepdim=True) * self.scale - norm = x / norm.clamp(min=self.eps) * self.g - - ss_emb = self.scale_shift_process(norm_scale_shift_inp) - scale, shift = torch.chunk(ss_emb, 2, dim=1) - h = norm * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1) - return h - - -# residual and residual gates - -class Residual(nn.Module): - def __init__(self, dim, scale_residual=False): - super().__init__() - self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None - - def forward(self, x, residual): - if exists(self.residual_scale): - residual = residual * self.residual_scale - - return x + residual - - -class GRUGating(nn.Module): - def __init__(self, dim, scale_residual=False): - super().__init__() - self.gru = nn.GRUCell(dim, dim) - self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None - - def forward(self, x, residual): - if exists(self.residual_scale): - residual = residual * self.residual_scale - - gated_output = self.gru( - rearrange(x, 'b n d -> (b n) d'), - rearrange(residual, 'b n d -> (b n) d') - ) - - return gated_output.reshape_as(x) - - -# token shifting - -def shift(t, amount, mask=None): - if amount == 0: - return t - - if exists(mask): - t = t.masked_fill(~mask[..., None], 0.) - - return F.pad(t, (0, 0, amount, -amount), value=0.) - - -class ShiftTokens(nn.Module): - def __init__(self, shifts, fn): - super().__init__() - self.fn = fn - self.shifts = tuple(shifts) - - def forward(self, x, **kwargs): - mask = kwargs.get('mask', None) - shifts = self.shifts - segments = len(shifts) - feats_per_shift = x.shape[-1] // segments - splitted = x.split(feats_per_shift, dim=-1) - segments_to_shift, rest = splitted[:segments], splitted[segments:] - segments_to_shift = list(map(lambda args: shift(*args, mask=mask), zip(segments_to_shift, shifts))) - x = torch.cat((*segments_to_shift, *rest), dim=-1) - return self.fn(x, **kwargs) - - -# feedforward - -class GLU(nn.Module): - def __init__(self, dim_in, dim_out, activation): - super().__init__() - self.act = activation - self.proj = nn.Linear(dim_in, dim_out * 2) - - def forward(self, x): - x, gate = self.proj(x).chunk(2, dim=-1) - return x * self.act(gate) - - -class FeedForward(nn.Module): - def __init__( - self, - dim, - dim_out=None, - mult=4, - glu=False, - relu_squared=False, - post_act_ln=False, - dropout=0., - zero_init_output=False - ): - super().__init__() - inner_dim = int(dim * mult) - dim_out = default(dim_out, dim) - activation = ReluSquared() if relu_squared else nn.GELU() - - project_in = nn.Sequential( - nn.Linear(dim, inner_dim), - activation - ) if not glu else GLU(dim, inner_dim, activation) - - self.net = nn.Sequential( - project_in, - nn.LayerNorm(inner_dim) if post_act_ln else nn.Identity(), - nn.Dropout(dropout), - nn.Linear(inner_dim, dim_out) - ) - - # init last linear layer to 0 - if zero_init_output: - init_zero_(self.net[-1]) - - def forward(self, x): - return self.net(x) - - -# attention. - -class Attention(nn.Module): - def __init__( - self, - dim, - dim_head=DEFAULT_DIM_HEAD, - heads=8, - causal=False, - talking_heads=False, - head_scale=False, - collab_heads=False, - collab_compression=.3, - sparse_topk=None, - use_entmax15=False, - num_mem_kv=0, - dropout=0., - on_attn=False, - gate_values=False, - zero_init_output=False, - max_attend_past=None, - qk_norm=False, - scale_init_value=None, - rel_pos_bias=False, - rel_pos_num_buckets=32, - rel_pos_max_distance=128, - ): - super().__init__() - self.scale = dim_head ** -0.5 - - self.heads = heads - self.causal = causal - self.max_attend_past = max_attend_past - - qk_dim = v_dim = dim_head * heads - - # collaborative heads - self.collab_heads = collab_heads - if self.collab_heads: - qk_dim = int(collab_compression * qk_dim) - self.collab_mixing = nn.Parameter(torch.randn(heads, qk_dim)) - - self.to_q = nn.Linear(dim, qk_dim, bias=False) - self.to_k = nn.Linear(dim, qk_dim, bias=False) - self.to_v = nn.Linear(dim, v_dim, bias=False) - - self.dropout = nn.Dropout(dropout) - - # add GLU gating for aggregated values, from alphafold2 - self.to_v_gate = None - if gate_values: - self.to_v_gate = nn.Linear(dim, v_dim) - nn.init.constant_(self.to_v_gate.weight, 0) - nn.init.constant_(self.to_v_gate.bias, 1) - - # cosine sim attention - self.qk_norm = qk_norm - if qk_norm: - scale_init_value = default(scale_init_value, - -3) # if not provided, initialize as though it were sequence length of 1024 - self.scale = nn.Parameter(torch.ones(1, heads, 1, 1) * scale_init_value) - - # talking heads - self.talking_heads = talking_heads - if talking_heads: - self.pre_softmax_proj = nn.Parameter(torch.randn(heads, heads)) - self.post_softmax_proj = nn.Parameter(torch.randn(heads, heads)) - - # head scaling - self.head_scale = head_scale - if head_scale: - self.head_scale_params = nn.Parameter(torch.ones(1, heads, 1, 1)) - - # explicit topk sparse attention - self.sparse_topk = sparse_topk - - # entmax - self.attn_fn = entmax15 if use_entmax15 else F.softmax - - # add memory key / values - self.num_mem_kv = num_mem_kv - if num_mem_kv > 0: - self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) - self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) - - # attention on attention - self.attn_on_attn = on_attn - self.to_out = nn.Sequential(nn.Linear(v_dim, dim * 2), nn.GLU()) if on_attn else nn.Linear(v_dim, dim) - - self.rel_pos_bias = rel_pos_bias - if rel_pos_bias: - assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance' - self.rel_pos = RelativePositionBias(scale=dim_head ** 0.5, causal=causal, heads=heads, - num_buckets=rel_pos_num_buckets, max_distance=rel_pos_max_distance) - - # init output projection 0 - if zero_init_output: - init_zero_(self.to_out) - - def forward( - self, - x, - context=None, - mask=None, - context_mask=None, - attn_mask=None, - sinusoidal_emb=None, - rotary_pos_emb=None, - prev_attn=None, - mem=None, - layer_past=None, - ): - b, n, _, h, talking_heads, collab_heads, head_scale, scale, device, has_context = *x.shape, self.heads, self.talking_heads, self.collab_heads, self.head_scale, self.scale, x.device, exists( - context) - kv_input = default(context, x) - - q_input = x - k_input = kv_input - v_input = kv_input - - if exists(mem): - k_input = torch.cat((mem, k_input), dim=-2) - v_input = torch.cat((mem, v_input), dim=-2) - - if exists(sinusoidal_emb): - # in shortformer, the query would start at a position offset depending on the past cached memory - offset = k_input.shape[-2] - q_input.shape[-2] - q_input = q_input + sinusoidal_emb(q_input, offset=offset) - k_input = k_input + sinusoidal_emb(k_input) - - q = self.to_q(q_input) - k = self.to_k(k_input) - v = self.to_v(v_input) - - if not collab_heads: - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v)) - else: - q = einsum('b i d, h d -> b h i d', q, self.collab_mixing) - k = rearrange(k, 'b n d -> b () n d') - v = rearrange(v, 'b n (h d) -> b h n d', h=h) - - if layer_past is not None: - past_key, past_value = layer_past - k = torch.cat([past_key, k], dim=-2) - v = torch.cat([past_value, v], dim=-2) - k_cache = k - v_cache = v - - if exists(rotary_pos_emb) and not has_context: - l = rotary_pos_emb.shape[-1] - (ql, qr), (kl, kr), (vl, vr) = map(lambda t: (t[..., :l], t[..., l:]), (q, k, v)) - ql, kl, vl = map(lambda t: apply_rotary_pos_emb(t, rotary_pos_emb), (ql, kl, vl)) - q, k, v = map(lambda t: torch.cat(t, dim=-1), ((ql, qr), (kl, kr), (vl, vr))) - - input_mask = None - if any(map(exists, (mask, context_mask))): - q_mask = default(mask, lambda: torch.ones((b, n), device=device).bool()) - k_mask = q_mask if not exists(context) else context_mask - k_mask = default(k_mask, lambda: torch.ones((b, k.shape[-2]), device=device).bool()) - q_mask = rearrange(q_mask, 'b i -> b () i ()') - k_mask = rearrange(k_mask, 'b j -> b () () j') - input_mask = q_mask * k_mask - - if self.num_mem_kv > 0: - mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b=b), (self.mem_k, self.mem_v)) - k = torch.cat((mem_k, k), dim=-2) - v = torch.cat((mem_v, v), dim=-2) - if exists(input_mask): - input_mask = F.pad(input_mask, (self.num_mem_kv, 0), value=True) - - if collab_heads: - k = k.expand(-1, h, -1, -1) - - if self.qk_norm: - q, k = map(l2norm, (q, k)) - scale = 1 / (self.scale.exp().clamp(min=1e-2)) - - dots = einsum('b h i d, b h j d -> b h i j', q, k) * scale - mask_value = max_neg_value(dots) - - if exists(prev_attn): - dots = dots + prev_attn - - pre_softmax_attn = dots.clone() - - if talking_heads: - dots = einsum('b h i j, h k -> b k i j', dots, self.pre_softmax_proj).contiguous() - - if self.rel_pos_bias: - dots = self.rel_pos(dots) - - if exists(input_mask): - dots.masked_fill_(~input_mask, mask_value) - del input_mask - - if exists(attn_mask): - assert 2 <= attn_mask.ndim <= 4, 'attention mask must have greater than 2 dimensions but less than or equal to 4' - if attn_mask.ndim == 2: - attn_mask = rearrange(attn_mask, 'i j -> () () i j') - elif attn_mask.ndim == 3: - attn_mask = rearrange(attn_mask, 'h i j -> () h i j') - dots.masked_fill_(~attn_mask, mask_value) - - if exists(self.max_attend_past): - i, j = dots.shape[-2:] - range_q = torch.arange(j - i, j, device=device) - range_k = torch.arange(j, device=device) - dist = rearrange(range_q, 'i -> () () i ()') - rearrange(range_k, 'j -> () () () j') - mask = dist > self.max_attend_past - dots.masked_fill_(mask, mask_value) - del mask - - if self.causal: - i, j = dots.shape[-2:] - r = torch.arange(i, device=device) - mask = rearrange(r, 'i -> () () i ()') < rearrange(r, 'j -> () () () j') - mask = F.pad(mask, (j - i, 0), value=False) - dots.masked_fill_(mask, mask_value) - del mask - - if exists(self.sparse_topk) and self.sparse_topk < dots.shape[-1]: - top, _ = dots.topk(self.sparse_topk, dim=-1) - vk = top[..., -1].unsqueeze(-1).expand_as(dots) - mask = dots < vk - dots.masked_fill_(mask, mask_value) - del mask - - attn = self.attn_fn(dots, dim=-1) - post_softmax_attn = attn.clone() - - attn = self.dropout(attn) - - if talking_heads: - attn = einsum('b h i j, h k -> b k i j', attn, self.post_softmax_proj).contiguous() - - out = einsum('b h i j, b h j d -> b h i d', attn, v) - - if head_scale: - out = out * self.head_scale_params - - out = rearrange(out, 'b h n d -> b n (h d)') - - if exists(self.to_v_gate): - gates = self.to_v_gate(x) - out = out * gates.sigmoid() - - intermediates = Intermediates( - pre_softmax_attn=pre_softmax_attn, - post_softmax_attn=post_softmax_attn - ) - - return self.to_out(out), intermediates, k_cache, v_cache - - -class AttentionLayers(nn.Module): - def __init__( - self, - dim, - depth, - heads=8, - causal=False, - cross_attend=False, - only_cross=False, - use_scalenorm=False, - use_rms_scaleshift_norm=False, - use_rmsnorm=False, - use_rezero=False, - alibi_pos_bias=False, - alibi_num_heads=None, - alibi_learned=False, - position_infused_attn=False, - rotary_pos_emb=False, - rotary_emb_dim=None, - custom_layers=None, - sandwich_coef=None, - par_ratio=None, - residual_attn=False, - cross_residual_attn=False, - macaron=False, - pre_norm=True, - gate_residual=False, - scale_residual=False, - shift_tokens=0, - sandwich_norm=False, - use_qk_norm_attn=False, - qk_norm_attn_seq_len=None, - zero_init_branch_output=False, - **kwargs - ): - super().__init__() - ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs) - attn_kwargs, _ = groupby_prefix_and_trim('attn_', kwargs) - - dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD) - - self.dim = dim - self.depth = depth - self.layers = nn.ModuleList([]) - self.causal = causal - - rel_pos_bias = 'rel_pos_bias' in attn_kwargs - self.has_pos_emb = position_infused_attn or rel_pos_bias or rotary_pos_emb - self.pia_pos_emb = FixedPositionalEmbedding(dim) if position_infused_attn else None - - rotary_emb_dim = max(default(rotary_emb_dim, dim_head // 2), 32) - self.rotary_pos_emb = RotaryEmbedding(rotary_emb_dim) if rotary_pos_emb else None - - assert not ( - alibi_pos_bias and rel_pos_bias), 'you can only choose Alibi positional bias or T5 relative positional bias, not both' - - if alibi_pos_bias: - alibi_num_heads = default(alibi_num_heads, heads) - assert alibi_num_heads <= heads, 'number of ALiBi heads must be less than the total number of heads' - alibi_pos_klass = LearnedAlibiPositionalBias if alibi_learned or not causal else AlibiPositionalBias - self.rel_pos = alibi_pos_klass(heads=alibi_num_heads, bidirectional=not causal) - else: - self.rel_pos = None - - assert not (not pre_norm and sandwich_norm), 'sandwich norm cannot be used when not using prenorm' - self.pre_norm = pre_norm - self.sandwich_norm = sandwich_norm - - self.residual_attn = residual_attn - self.cross_residual_attn = cross_residual_attn - self.cross_attend = cross_attend - - norm_class = ScaleNorm if use_scalenorm else nn.LayerNorm - norm_class = RMSNorm if use_rmsnorm else norm_class - norm_class = RMSScaleShiftNorm if use_rms_scaleshift_norm else norm_class - norm_fn = partial(norm_class, dim) - - norm_fn = nn.Identity if use_rezero else norm_fn - branch_fn = Rezero if use_rezero else None - - if cross_attend and not only_cross: - default_block = ('a', 'c', 'f') - elif cross_attend and only_cross: - default_block = ('c', 'f') - else: - default_block = ('a', 'f') - - if macaron: - default_block = ('f',) + default_block - - # qk normalization - - if use_qk_norm_attn: - attn_scale_init_value = -math.log(math.log2(qk_norm_attn_seq_len ** 2 - qk_norm_attn_seq_len)) if exists( - qk_norm_attn_seq_len) else None - attn_kwargs = {**attn_kwargs, 'qk_norm': True, 'scale_init_value': attn_scale_init_value} - - # zero init - - if zero_init_branch_output: - attn_kwargs = {**attn_kwargs, 'zero_init_output': True} - ff_kwargs = {**ff_kwargs, 'zero_init_output': True} - - # calculate layer block order - - if exists(custom_layers): - layer_types = custom_layers - elif exists(par_ratio): - par_depth = depth * len(default_block) - assert 1 < par_ratio <= par_depth, 'par ratio out of range' - default_block = tuple(filter(not_equals('f'), default_block)) - par_attn = par_depth // par_ratio - depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper - par_width = (depth_cut + depth_cut // par_attn) // par_attn - assert len(default_block) <= par_width, 'default block is too large for par_ratio' - par_block = default_block + ('f',) * (par_width - len(default_block)) - par_head = par_block * par_attn - layer_types = par_head + ('f',) * (par_depth - len(par_head)) - elif exists(sandwich_coef): - assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth' - layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef - else: - layer_types = default_block * depth - - self.layer_types = layer_types - self.num_attn_layers = len(list(filter(equals('a'), layer_types))) - - # calculate token shifting - - shift_tokens = cast_tuple(shift_tokens, len(layer_types)) - - # iterate and construct layers - - for ind, (layer_type, layer_shift_tokens) in enumerate(zip(self.layer_types, shift_tokens)): - is_last_layer = ind == (len(self.layer_types) - 1) - - if layer_type == 'a': - layer = Attention(dim, heads=heads, causal=causal, **attn_kwargs) - elif layer_type == 'c': - layer = Attention(dim, heads=heads, **attn_kwargs) - elif layer_type == 'f': - layer = FeedForward(dim, **ff_kwargs) - layer = layer if not macaron else Scale(0.5, layer) - else: - raise Exception(f'invalid layer type {layer_type}') - - if layer_shift_tokens > 0: - shift_range_upper = layer_shift_tokens + 1 - shift_range_lower = -layer_shift_tokens if not causal else 0 - layer = ShiftTokens(range(shift_range_lower, shift_range_upper), layer) - - if exists(branch_fn): - layer = branch_fn(layer) - - residual_fn = GRUGating if gate_residual else Residual - residual = residual_fn(dim, scale_residual=scale_residual) - - layer_uses_qk_norm = use_qk_norm_attn and layer_type in ('a', 'c') - - pre_branch_norm = norm_fn() if pre_norm and not layer_uses_qk_norm else None - post_branch_norm = norm_fn() if sandwich_norm or layer_uses_qk_norm else None - post_main_norm = norm_fn() if not pre_norm and not is_last_layer else None - - norms = nn.ModuleList([ - pre_branch_norm, - post_branch_norm, - post_main_norm - ]) - - self.layers.append(nn.ModuleList([ - norms, - layer, - residual - ])) - - def forward( - self, - x, - context=None, - full_context=None, # for passing a list of hidden states from an encoder - mask=None, - context_mask=None, - attn_mask=None, - mems=None, - return_hiddens=False, - norm_scale_shift_inp=None, - past_key_values=None, - expected_seq_len=None, - ): - - assert not (self.cross_attend ^ (exists(context) or exists( - full_context))), 'context must be passed in if cross_attend is set to True' - assert context is None or full_context is None, 'only one of full_context or context can be provided' - - hiddens = [] - intermediates = [] - prev_attn = None - prev_cross_attn = None - - mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers - norm_args = {} - if exists(norm_scale_shift_inp): - norm_args['norm_scale_shift_inp'] = norm_scale_shift_inp - - rotary_pos_emb = None - if exists(self.rotary_pos_emb): - if not self.training and self.causal: - assert expected_seq_len is not None, "To decode a transformer with rotary embeddings, you must specify an `expected_seq_len`" - elif expected_seq_len is None: - expected_seq_len = 0 - seq_len = x.shape[1] - if past_key_values is not None: - seq_len += past_key_values[0][0].shape[-2] - max_rotary_emb_length = max(list(map(lambda m: (m.shape[1] if exists(m) else 0) + seq_len, mems)) + [expected_seq_len]) - rotary_pos_emb = self.rotary_pos_emb(max_rotary_emb_length, x.device) - - present_key_values = [] - cross_attn_count = 0 - for ind, (layer_type, (norm, block, residual_fn)) in enumerate(zip(self.layer_types, self.layers)): - if layer_type == 'a': - layer_mem = mems.pop(0) if mems else None - - residual = x - - pre_branch_norm, post_branch_norm, post_main_norm = norm - - if exists(pre_branch_norm): - x = pre_branch_norm(x, **norm_args) - - if layer_type == 'a' or layer_type == 'c': - if past_key_values is not None: - layer_kv = past_key_values.pop(0) - layer_past = tuple(s.to(x.device) for s in layer_kv) - else: - layer_past = None - - if layer_type == 'a': - out, inter, k, v = checkpoint(block, x, None, mask, None, attn_mask, self.pia_pos_emb, rotary_pos_emb, - prev_attn, layer_mem, layer_past) - elif layer_type == 'c': - if exists(full_context): - out, inter, k, v = checkpoint(block, x, full_context[cross_attn_count], mask, context_mask, None, None, - None, prev_attn, None, layer_past) - else: - out, inter, k, v = checkpoint(block, x, context, mask, context_mask, None, None, None, prev_attn, None, layer_past) - elif layer_type == 'f': - out = checkpoint(block, x) - - if layer_type == 'a' or layer_type == 'c' and present_key_values is not None: - present_key_values.append((k.detach(), v.detach())) - - if exists(post_branch_norm): - out = post_branch_norm(out, **norm_args) - - x = residual_fn(out, residual) - - if layer_type in ('a', 'c'): - intermediates.append(inter) - - if layer_type == 'a' and self.residual_attn: - prev_attn = inter.pre_softmax_attn - elif layer_type == 'c' and self.cross_residual_attn: - prev_cross_attn = inter.pre_softmax_attn - - if exists(post_main_norm): - x = post_main_norm(x, **norm_args) - - if layer_type == 'c': - cross_attn_count += 1 - - if layer_type == 'f': - hiddens.append(x) - - if return_hiddens: - intermediates = LayerIntermediates( - hiddens=hiddens, - attn_intermediates=intermediates, - past_key_values=present_key_values - ) - - return x, intermediates - - return x - - -class Encoder(AttentionLayers): - def __init__(self, **kwargs): - assert 'causal' not in kwargs, 'cannot set causality on encoder' - super().__init__(causal=False, **kwargs) - - -class Decoder(AttentionLayers): - def __init__(self, **kwargs): - assert 'causal' not in kwargs, 'cannot set causality on decoder' - super().__init__(causal=True, **kwargs) - - -class CrossAttender(AttentionLayers): - def __init__(self, **kwargs): - super().__init__(cross_attend=True, only_cross=True, **kwargs) - - -class ViTransformerWrapper(nn.Module): - def __init__( - self, - *, - image_size, - patch_size, - attn_layers, - num_classes=None, - dropout=0., - emb_dropout=0. - ): - super().__init__() - assert isinstance(attn_layers, Encoder), 'attention layers must be an Encoder' - assert image_size % patch_size == 0, 'image dimensions must be divisible by the patch size' - dim = attn_layers.dim - num_patches = (image_size // patch_size) ** 2 - patch_dim = 3 * patch_size ** 2 - - self.patch_size = patch_size - - self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim)) - self.patch_to_embedding = nn.Linear(patch_dim, dim) - self.cls_token = nn.Parameter(torch.randn(1, 1, dim)) - self.dropout = nn.Dropout(emb_dropout) - - self.attn_layers = attn_layers - self.norm = nn.LayerNorm(dim) - self.mlp_head = FeedForward(dim, dim_out=num_classes, dropout=dropout) if exists(num_classes) else None - - def forward( - self, - img, - return_embeddings=False - ): - p = self.patch_size - - x = rearrange(img, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1=p, p2=p) - x = self.patch_to_embedding(x) - b, n, _ = x.shape - - cls_tokens = repeat(self.cls_token, '() n d -> b n d', b=b) - x = torch.cat((cls_tokens, x), dim=1) - x = x + self.pos_embedding[:, :(n + 1)] - x = self.dropout(x) - - x = self.attn_layers(x) - x = self.norm(x) - - if not exists(self.mlp_head) or return_embeddings: - return x - - return self.mlp_head(x[:, 0]) - - -class TransformerWrapper(nn.Module): - def __init__( - self, - *, - num_tokens, - max_seq_len, - attn_layers, - emb_dim=None, - max_mem_len=0., - shift_mem_down=0, - emb_dropout=0., - num_memory_tokens=None, - tie_embedding=False, - use_pos_emb=True - ): - super().__init__() - assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder' - - dim = attn_layers.dim - emb_dim = default(emb_dim, dim) - - self.max_seq_len = max_seq_len - self.max_mem_len = max_mem_len - self.shift_mem_down = shift_mem_down - - self.token_emb = nn.Embedding(num_tokens, emb_dim) - self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len) if ( - use_pos_emb and not attn_layers.has_pos_emb) else always(0) - self.emb_dropout = nn.Dropout(emb_dropout) - - self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity() - self.attn_layers = attn_layers - self.norm = nn.LayerNorm(dim) - - self.init_() - - self.to_logits = nn.Linear(dim, num_tokens) if not tie_embedding else lambda t: t @ self.token_emb.weight.t() - - # memory tokens (like [cls]) from Memory Transformers paper - num_memory_tokens = default(num_memory_tokens, 0) - self.num_memory_tokens = num_memory_tokens - if num_memory_tokens > 0: - self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim)) - - def init_(self): - nn.init.kaiming_normal_(self.token_emb.weight) - - def forward( - self, - x, - return_embeddings=False, - mask=None, - return_hiddens=False, - return_attn=False, - mems=None, - use_cache=False, - **kwargs - ): - b, n, device, num_mem = *x.shape, x.device, self.num_memory_tokens - x = self.token_emb(x) - x = x + self.pos_emb(x) - x = self.emb_dropout(x) - - x = self.project_emb(x) - - if num_mem > 0: - mem = repeat(self.memory_tokens, 'n d -> b n d', b=b) - x = torch.cat((mem, x), dim=1) - - # auto-handle masking after appending memory tokens - if exists(mask): - mask = F.pad(mask, (num_mem, 0), value=True) - - if self.shift_mem_down and exists(mems): - mems_l, mems_r = mems[:self.shift_mem_down], mems[self.shift_mem_down:] - mems = [*mems_r, *mems_l] - - x, intermediates = self.attn_layers(x, mask=mask, mems=mems, return_hiddens=True, **kwargs) - x = self.norm(x) - - mem, x = x[:, :num_mem], x[:, num_mem:] - - out = self.to_logits(x) if not return_embeddings else x - - if return_hiddens: - hiddens = intermediates.hiddens - return out, hiddens - - res = [out] - if return_attn: - attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates)) - res.append(attn_maps) - if use_cache: - res.append(intermediates.past_key_values) - - if len(res) > 1: - return tuple(res) - return res[0] - - -class ContinuousTransformerWrapper(nn.Module): - def __init__( - self, - *, - max_seq_len, - attn_layers, - dim_in=None, - dim_out=None, - emb_dim=None, - emb_dropout=0., - use_pos_emb=True - ): - super().__init__() - assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder' - - dim = attn_layers.dim - - self.max_seq_len = max_seq_len - - self.pos_emb = AbsolutePositionalEmbedding(dim, max_seq_len) if ( - use_pos_emb and not attn_layers.has_pos_emb) else always(0) - self.emb_dropout = nn.Dropout(emb_dropout) - - self.project_in = nn.Linear(dim_in, dim) if exists(dim_in) else nn.Identity() - - self.attn_layers = attn_layers - self.norm = nn.LayerNorm(dim) - - self.project_out = nn.Linear(dim, dim_out) if exists(dim_out) else nn.Identity() - - def forward( - self, - x, - return_embeddings=False, - mask=None, - return_attn=False, - mems=None, - use_cache=False, - **kwargs - ): - b, n, _, device = *x.shape, x.device - - x = self.project_in(x) - x = x + self.pos_emb(x) - x = self.emb_dropout(x) - - x, intermediates = self.attn_layers(x, mask=mask, mems=mems, return_hiddens=True, **kwargs) - x = self.norm(x) - - out = self.project_out(x) if not return_embeddings else x - - res = [out] - if return_attn: - attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates)) - res.append(attn_maps) - if use_cache: - res.append(intermediates.past_key_values) - - if len(res) > 1: - return tuple(res) - return res[0] - diff --git a/spaces/phyloforfun/VoucherVision/vouchervision/component_detector/utils_convert_to_TorchScript.py b/spaces/phyloforfun/VoucherVision/vouchervision/component_detector/utils_convert_to_TorchScript.py deleted file mode 100644 index 663d59fdc49559acb1ce557acfc3b9598445d448..0000000000000000000000000000000000000000 --- a/spaces/phyloforfun/VoucherVision/vouchervision/component_detector/utils_convert_to_TorchScript.py +++ /dev/null @@ -1,81 +0,0 @@ -import torch -import torch.jit -import sys -import os - -def check_if_jit_model(model_path): - try: - # Try to load the model as a TorchScript model - torch.jit.load(model_path) - return True, "The model is in TorchScript format." - except Exception as e: - return False, f"The model is not in TorchScript format. Error: {str(e)}" - -# Append the path to your system path (Note: Don't use quotes) -sys.path.append("D:/Dropbox/FieldPrism/fieldprism/yolov5/weights") - - -# Check if the path exists and you have read permissions -model_path = "D:/Dropbox/FieldPrism/fieldprism/yolov5/weights_nano/best.pt" - - -if not os.path.exists(model_path): - print(f"Model path {model_path} does not exist. Please check the path.") - sys.exit(1) - - -is_jit_model, message = check_if_jit_model(model_path) -print(message) - - -# Load your custom model -# Load your custom model -try: - loaded_dict = torch.load(model_path, map_location='cuda:0') # Adjust the device as needed - - # Assuming your model architecture is defined in a class called `YourModelClass` - # model = YourModelClass() - # model.load_state_dict(loaded_dict['model']) # If the model is saved as a state dictionary - - # If the model is saved entirely (architecture + weights) - if isinstance(loaded_dict, dict) and 'model' in loaded_dict: - model = loaded_dict['model'] - else: - model = loaded_dict # Assuming the loaded object is a model - - # Switch the model to evaluation mode - model.eval() - - # Create a dummy input that matches the input dimensions of the model - dummy_input = torch.randn(1, 3, 512, 512).half().to('cuda:0') - - - # Try tracing the model - try: - scripted_module = torch.jit.trace(model, dummy_input) - print("Model traced successfully.") - except Exception as e: - print(f"An error occurred during tracing. Error: {str(e)}") - - # Try scripting the model - try: - scripted_module = torch.jit.script(model) - save_path = "D:/Dropbox/FieldPrism/fieldprism/yolov5/weights/fieldprism_v_1_0.pt" - scripted_module.save(save_path) - print(f"Saved TorchScript model to {save_path}") - except Exception as e: - print(f"An error occurred during the scripting. Error: {str(e)}") -except Exception as e: - print(f"Error in loading the model: {e}") - sys.exit(1) - - -# Script the model -try: - scripted_module = torch.jit.script(model) - # Save the TorchScript model (make sure you have write permissions for the directory) - save_path = "D:/Dropbox/FieldPrism/fieldprism/yolov5/weights/fieldprism_v_1_0.pt" - scripted_module.save(save_path) - print(f"Saved TorchScript model to {save_path}") -except Exception as e: - print(f"An error occurred during the scripting. Error: {str(e)}") diff --git a/spaces/pknez/face-swap-docker/clip/__init__.py b/spaces/pknez/face-swap-docker/clip/__init__.py deleted file mode 100644 index dcc5619538c0f7c782508bdbd9587259d805e0d9..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/clip/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .clip import * diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/util/connection.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/util/connection.py deleted file mode 100644 index 6af1138f260e4eaaa0aa242f7f50b918a283b49f..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/util/connection.py +++ /dev/null @@ -1,149 +0,0 @@ -from __future__ import absolute_import - -import socket - -from ..contrib import _appengine_environ -from ..exceptions import LocationParseError -from ..packages import six -from .wait import NoWayToWaitForSocketError, wait_for_read - - -def is_connection_dropped(conn): # Platform-specific - """ - Returns True if the connection is dropped and should be closed. - - :param conn: - :class:`http.client.HTTPConnection` object. - - Note: For platforms like AppEngine, this will always return ``False`` to - let the platform handle connection recycling transparently for us. - """ - sock = getattr(conn, "sock", False) - if sock is False: # Platform-specific: AppEngine - return False - if sock is None: # Connection already closed (such as by httplib). - return True - try: - # Returns True if readable, which here means it's been dropped - return wait_for_read(sock, timeout=0.0) - except NoWayToWaitForSocketError: # Platform-specific: AppEngine - return False - - -# This function is copied from socket.py in the Python 2.7 standard -# library test suite. Added to its signature is only `socket_options`. -# One additional modification is that we avoid binding to IPv6 servers -# discovered in DNS if the system doesn't have IPv6 functionality. -def create_connection( - address, - timeout=socket._GLOBAL_DEFAULT_TIMEOUT, - source_address=None, - socket_options=None, -): - """Connect to *address* and return the socket object. - - Convenience function. Connect to *address* (a 2-tuple ``(host, - port)``) and return the socket object. Passing the optional - *timeout* parameter will set the timeout on the socket instance - before attempting to connect. If no *timeout* is supplied, the - global default timeout setting returned by :func:`socket.getdefaulttimeout` - is used. If *source_address* is set it must be a tuple of (host, port) - for the socket to bind as a source address before making the connection. - An host of '' or port 0 tells the OS to use the default. - """ - - host, port = address - if host.startswith("["): - host = host.strip("[]") - err = None - - # Using the value from allowed_gai_family() in the context of getaddrinfo lets - # us select whether to work with IPv4 DNS records, IPv6 records, or both. - # The original create_connection function always returns all records. - family = allowed_gai_family() - - try: - host.encode("idna") - except UnicodeError: - return six.raise_from( - LocationParseError(u"'%s', label empty or too long" % host), None - ) - - for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM): - af, socktype, proto, canonname, sa = res - sock = None - try: - sock = socket.socket(af, socktype, proto) - - # If provided, set socket level options before connecting. - _set_socket_options(sock, socket_options) - - if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: - sock.settimeout(timeout) - if source_address: - sock.bind(source_address) - sock.connect(sa) - return sock - - except socket.error as e: - err = e - if sock is not None: - sock.close() - sock = None - - if err is not None: - raise err - - raise socket.error("getaddrinfo returns an empty list") - - -def _set_socket_options(sock, options): - if options is None: - return - - for opt in options: - sock.setsockopt(*opt) - - -def allowed_gai_family(): - """This function is designed to work in the context of - getaddrinfo, where family=socket.AF_UNSPEC is the default and - will perform a DNS search for both IPv6 and IPv4 records.""" - - family = socket.AF_INET - if HAS_IPV6: - family = socket.AF_UNSPEC - return family - - -def _has_ipv6(host): - """Returns True if the system can bind an IPv6 address.""" - sock = None - has_ipv6 = False - - # App Engine doesn't support IPV6 sockets and actually has a quota on the - # number of sockets that can be used, so just early out here instead of - # creating a socket needlessly. - # See https://github.com/urllib3/urllib3/issues/1446 - if _appengine_environ.is_appengine_sandbox(): - return False - - if socket.has_ipv6: - # has_ipv6 returns true if cPython was compiled with IPv6 support. - # It does not tell us if the system has IPv6 support enabled. To - # determine that we must bind to an IPv6 address. - # https://github.com/urllib3/urllib3/pull/611 - # https://bugs.python.org/issue658327 - try: - sock = socket.socket(socket.AF_INET6) - sock.bind((host, 0)) - has_ipv6 = True - except Exception: - pass - - if sock: - sock.close() - return has_ipv6 - - -HAS_IPV6 = _has_ipv6("::1") diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/wheel/vendored/packaging/markers.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/wheel/vendored/packaging/markers.py deleted file mode 100644 index 68369c981b1e9e9a49640fc15e69d06633ed21ff..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/wheel/vendored/packaging/markers.py +++ /dev/null @@ -1,245 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -import operator -import os -import platform -import sys -from typing import Any, Callable, Dict, List, Optional, Tuple, Union - -from ._parser import MarkerAtom, MarkerList, Op, Value, Variable, parse_marker -from ._tokenizer import ParserSyntaxError -from .specifiers import InvalidSpecifier, Specifier -from .utils import canonicalize_name - -__all__ = [ - "InvalidMarker", - "UndefinedComparison", - "UndefinedEnvironmentName", - "Marker", - "default_environment", -] - -Operator = Callable[[str, str], bool] - - -class InvalidMarker(ValueError): - """ - An invalid marker was found, users should refer to PEP 508. - """ - - -class UndefinedComparison(ValueError): - """ - An invalid operation was attempted on a value that doesn't support it. - """ - - -class UndefinedEnvironmentName(ValueError): - """ - A name was attempted to be used that does not exist inside of the - environment. - """ - - -def _normalize_extra_values(results: Any) -> Any: - """ - Normalize extra values. - """ - if isinstance(results[0], tuple): - lhs, op, rhs = results[0] - if isinstance(lhs, Variable) and lhs.value == "extra": - normalized_extra = canonicalize_name(rhs.value) - rhs = Value(normalized_extra) - elif isinstance(rhs, Variable) and rhs.value == "extra": - normalized_extra = canonicalize_name(lhs.value) - lhs = Value(normalized_extra) - results[0] = lhs, op, rhs - return results - - -def _format_marker( - marker: Union[List[str], MarkerAtom, str], first: Optional[bool] = True -) -> str: - - assert isinstance(marker, (list, tuple, str)) - - # Sometimes we have a structure like [[...]] which is a single item list - # where the single item is itself it's own list. In that case we want skip - # the rest of this function so that we don't get extraneous () on the - # outside. - if ( - isinstance(marker, list) - and len(marker) == 1 - and isinstance(marker[0], (list, tuple)) - ): - return _format_marker(marker[0]) - - if isinstance(marker, list): - inner = (_format_marker(m, first=False) for m in marker) - if first: - return " ".join(inner) - else: - return "(" + " ".join(inner) + ")" - elif isinstance(marker, tuple): - return " ".join([m.serialize() for m in marker]) - else: - return marker - - -_operators: Dict[str, Operator] = { - "in": lambda lhs, rhs: lhs in rhs, - "not in": lambda lhs, rhs: lhs not in rhs, - "<": operator.lt, - "<=": operator.le, - "==": operator.eq, - "!=": operator.ne, - ">=": operator.ge, - ">": operator.gt, -} - - -def _eval_op(lhs: str, op: Op, rhs: str) -> bool: - try: - spec = Specifier("".join([op.serialize(), rhs])) - except InvalidSpecifier: - pass - else: - return spec.contains(lhs, prereleases=True) - - oper: Optional[Operator] = _operators.get(op.serialize()) - if oper is None: - raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.") - - return oper(lhs, rhs) - - -def _normalize(*values: str, key: str) -> Tuple[str, ...]: - # PEP 685 – Comparison of extra names for optional distribution dependencies - # https://peps.python.org/pep-0685/ - # > When comparing extra names, tools MUST normalize the names being - # > compared using the semantics outlined in PEP 503 for names - if key == "extra": - return tuple(canonicalize_name(v) for v in values) - - # other environment markers don't have such standards - return values - - -def _evaluate_markers(markers: MarkerList, environment: Dict[str, str]) -> bool: - groups: List[List[bool]] = [[]] - - for marker in markers: - assert isinstance(marker, (list, tuple, str)) - - if isinstance(marker, list): - groups[-1].append(_evaluate_markers(marker, environment)) - elif isinstance(marker, tuple): - lhs, op, rhs = marker - - if isinstance(lhs, Variable): - environment_key = lhs.value - lhs_value = environment[environment_key] - rhs_value = rhs.value - else: - lhs_value = lhs.value - environment_key = rhs.value - rhs_value = environment[environment_key] - - lhs_value, rhs_value = _normalize(lhs_value, rhs_value, key=environment_key) - groups[-1].append(_eval_op(lhs_value, op, rhs_value)) - else: - assert marker in ["and", "or"] - if marker == "or": - groups.append([]) - - return any(all(item) for item in groups) - - -def format_full_version(info: "sys._version_info") -> str: - version = "{0.major}.{0.minor}.{0.micro}".format(info) - kind = info.releaselevel - if kind != "final": - version += kind[0] + str(info.serial) - return version - - -def default_environment() -> Dict[str, str]: - iver = format_full_version(sys.implementation.version) - implementation_name = sys.implementation.name - return { - "implementation_name": implementation_name, - "implementation_version": iver, - "os_name": os.name, - "platform_machine": platform.machine(), - "platform_release": platform.release(), - "platform_system": platform.system(), - "platform_version": platform.version(), - "python_full_version": platform.python_version(), - "platform_python_implementation": platform.python_implementation(), - "python_version": ".".join(platform.python_version_tuple()[:2]), - "sys_platform": sys.platform, - } - - -class Marker: - def __init__(self, marker: str) -> None: - # Note: We create a Marker object without calling this constructor in - # packaging.requirements.Requirement. If any additional logic is - # added here, make sure to mirror/adapt Requirement. - try: - self._markers = _normalize_extra_values(parse_marker(marker)) - # The attribute `_markers` can be described in terms of a recursive type: - # MarkerList = List[Union[Tuple[Node, ...], str, MarkerList]] - # - # For example, the following expression: - # python_version > "3.6" or (python_version == "3.6" and os_name == "unix") - # - # is parsed into: - # [ - # (, ')>, ), - # 'and', - # [ - # (, , ), - # 'or', - # (, , ) - # ] - # ] - except ParserSyntaxError as e: - raise InvalidMarker(str(e)) from e - - def __str__(self) -> str: - return _format_marker(self._markers) - - def __repr__(self) -> str: - return f"" - - def __hash__(self) -> int: - return hash((self.__class__.__name__, str(self))) - - def __eq__(self, other: Any) -> bool: - if not isinstance(other, Marker): - return NotImplemented - - return str(self) == str(other) - - def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool: - """Evaluate a marker. - - Return the boolean from evaluating the given marker against the - environment. environment is an optional argument to override all or - part of the determined environment. - - The environment is determined from the current Python process. - """ - current_environment = default_environment() - current_environment["extra"] = "" - if environment is not None: - current_environment.update(environment) - # The API used to allow setting extra to None. We need to handle this - # case for backwards compatibility. - if current_environment["extra"] is None: - current_environment["extra"] = "" - - return _evaluate_markers(self._markers, current_environment) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/aiohttp/payload.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/aiohttp/payload.py deleted file mode 100644 index a2340e2945edcc21de4cf99479670a3361180816..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/aiohttp/payload.py +++ /dev/null @@ -1,465 +0,0 @@ -import asyncio -import enum -import io -import json -import mimetypes -import os -import warnings -from abc import ABC, abstractmethod -from itertools import chain -from typing import ( - IO, - TYPE_CHECKING, - Any, - ByteString, - Dict, - Iterable, - Optional, - TextIO, - Tuple, - Type, - Union, -) - -from multidict import CIMultiDict - -from . import hdrs -from .abc import AbstractStreamWriter -from .helpers import ( - PY_36, - content_disposition_header, - guess_filename, - parse_mimetype, - sentinel, -) -from .streams import StreamReader -from .typedefs import Final, JSONEncoder, _CIMultiDict - -__all__ = ( - "PAYLOAD_REGISTRY", - "get_payload", - "payload_type", - "Payload", - "BytesPayload", - "StringPayload", - "IOBasePayload", - "BytesIOPayload", - "BufferedReaderPayload", - "TextIOPayload", - "StringIOPayload", - "JsonPayload", - "AsyncIterablePayload", -) - -TOO_LARGE_BYTES_BODY: Final[int] = 2**20 # 1 MB - -if TYPE_CHECKING: # pragma: no cover - from typing import List - - -class LookupError(Exception): - pass - - -class Order(str, enum.Enum): - normal = "normal" - try_first = "try_first" - try_last = "try_last" - - -def get_payload(data: Any, *args: Any, **kwargs: Any) -> "Payload": - return PAYLOAD_REGISTRY.get(data, *args, **kwargs) - - -def register_payload( - factory: Type["Payload"], type: Any, *, order: Order = Order.normal -) -> None: - PAYLOAD_REGISTRY.register(factory, type, order=order) - - -class payload_type: - def __init__(self, type: Any, *, order: Order = Order.normal) -> None: - self.type = type - self.order = order - - def __call__(self, factory: Type["Payload"]) -> Type["Payload"]: - register_payload(factory, self.type, order=self.order) - return factory - - -PayloadType = Type["Payload"] -_PayloadRegistryItem = Tuple[PayloadType, Any] - - -class PayloadRegistry: - """Payload registry. - - note: we need zope.interface for more efficient adapter search - """ - - def __init__(self) -> None: - self._first: List[_PayloadRegistryItem] = [] - self._normal: List[_PayloadRegistryItem] = [] - self._last: List[_PayloadRegistryItem] = [] - - def get( - self, - data: Any, - *args: Any, - _CHAIN: "Type[chain[_PayloadRegistryItem]]" = chain, - **kwargs: Any, - ) -> "Payload": - if isinstance(data, Payload): - return data - for factory, type in _CHAIN(self._first, self._normal, self._last): - if isinstance(data, type): - return factory(data, *args, **kwargs) - - raise LookupError() - - def register( - self, factory: PayloadType, type: Any, *, order: Order = Order.normal - ) -> None: - if order is Order.try_first: - self._first.append((factory, type)) - elif order is Order.normal: - self._normal.append((factory, type)) - elif order is Order.try_last: - self._last.append((factory, type)) - else: - raise ValueError(f"Unsupported order {order!r}") - - -class Payload(ABC): - - _default_content_type: str = "application/octet-stream" - _size: Optional[int] = None - - def __init__( - self, - value: Any, - headers: Optional[ - Union[_CIMultiDict, Dict[str, str], Iterable[Tuple[str, str]]] - ] = None, - content_type: Optional[str] = sentinel, - filename: Optional[str] = None, - encoding: Optional[str] = None, - **kwargs: Any, - ) -> None: - self._encoding = encoding - self._filename = filename - self._headers: _CIMultiDict = CIMultiDict() - self._value = value - if content_type is not sentinel and content_type is not None: - self._headers[hdrs.CONTENT_TYPE] = content_type - elif self._filename is not None: - content_type = mimetypes.guess_type(self._filename)[0] - if content_type is None: - content_type = self._default_content_type - self._headers[hdrs.CONTENT_TYPE] = content_type - else: - self._headers[hdrs.CONTENT_TYPE] = self._default_content_type - self._headers.update(headers or {}) - - @property - def size(self) -> Optional[int]: - """Size of the payload.""" - return self._size - - @property - def filename(self) -> Optional[str]: - """Filename of the payload.""" - return self._filename - - @property - def headers(self) -> _CIMultiDict: - """Custom item headers""" - return self._headers - - @property - def _binary_headers(self) -> bytes: - return ( - "".join([k + ": " + v + "\r\n" for k, v in self.headers.items()]).encode( - "utf-8" - ) - + b"\r\n" - ) - - @property - def encoding(self) -> Optional[str]: - """Payload encoding""" - return self._encoding - - @property - def content_type(self) -> str: - """Content type""" - return self._headers[hdrs.CONTENT_TYPE] - - def set_content_disposition( - self, - disptype: str, - quote_fields: bool = True, - _charset: str = "utf-8", - **params: Any, - ) -> None: - """Sets ``Content-Disposition`` header.""" - self._headers[hdrs.CONTENT_DISPOSITION] = content_disposition_header( - disptype, quote_fields=quote_fields, _charset=_charset, **params - ) - - @abstractmethod - async def write(self, writer: AbstractStreamWriter) -> None: - """Write payload. - - writer is an AbstractStreamWriter instance: - """ - - -class BytesPayload(Payload): - def __init__(self, value: ByteString, *args: Any, **kwargs: Any) -> None: - if not isinstance(value, (bytes, bytearray, memoryview)): - raise TypeError(f"value argument must be byte-ish, not {type(value)!r}") - - if "content_type" not in kwargs: - kwargs["content_type"] = "application/octet-stream" - - super().__init__(value, *args, **kwargs) - - if isinstance(value, memoryview): - self._size = value.nbytes - else: - self._size = len(value) - - if self._size > TOO_LARGE_BYTES_BODY: - if PY_36: - kwargs = {"source": self} - else: - kwargs = {} - warnings.warn( - "Sending a large body directly with raw bytes might" - " lock the event loop. You should probably pass an " - "io.BytesIO object instead", - ResourceWarning, - **kwargs, - ) - - async def write(self, writer: AbstractStreamWriter) -> None: - await writer.write(self._value) - - -class StringPayload(BytesPayload): - def __init__( - self, - value: str, - *args: Any, - encoding: Optional[str] = None, - content_type: Optional[str] = None, - **kwargs: Any, - ) -> None: - - if encoding is None: - if content_type is None: - real_encoding = "utf-8" - content_type = "text/plain; charset=utf-8" - else: - mimetype = parse_mimetype(content_type) - real_encoding = mimetype.parameters.get("charset", "utf-8") - else: - if content_type is None: - content_type = "text/plain; charset=%s" % encoding - real_encoding = encoding - - super().__init__( - value.encode(real_encoding), - encoding=real_encoding, - content_type=content_type, - *args, - **kwargs, - ) - - -class StringIOPayload(StringPayload): - def __init__(self, value: IO[str], *args: Any, **kwargs: Any) -> None: - super().__init__(value.read(), *args, **kwargs) - - -class IOBasePayload(Payload): - _value: IO[Any] - - def __init__( - self, value: IO[Any], disposition: str = "attachment", *args: Any, **kwargs: Any - ) -> None: - if "filename" not in kwargs: - kwargs["filename"] = guess_filename(value) - - super().__init__(value, *args, **kwargs) - - if self._filename is not None and disposition is not None: - if hdrs.CONTENT_DISPOSITION not in self.headers: - self.set_content_disposition(disposition, filename=self._filename) - - async def write(self, writer: AbstractStreamWriter) -> None: - loop = asyncio.get_event_loop() - try: - chunk = await loop.run_in_executor(None, self._value.read, 2**16) - while chunk: - await writer.write(chunk) - chunk = await loop.run_in_executor(None, self._value.read, 2**16) - finally: - await loop.run_in_executor(None, self._value.close) - - -class TextIOPayload(IOBasePayload): - _value: TextIO - - def __init__( - self, - value: TextIO, - *args: Any, - encoding: Optional[str] = None, - content_type: Optional[str] = None, - **kwargs: Any, - ) -> None: - - if encoding is None: - if content_type is None: - encoding = "utf-8" - content_type = "text/plain; charset=utf-8" - else: - mimetype = parse_mimetype(content_type) - encoding = mimetype.parameters.get("charset", "utf-8") - else: - if content_type is None: - content_type = "text/plain; charset=%s" % encoding - - super().__init__( - value, - content_type=content_type, - encoding=encoding, - *args, - **kwargs, - ) - - @property - def size(self) -> Optional[int]: - try: - return os.fstat(self._value.fileno()).st_size - self._value.tell() - except OSError: - return None - - async def write(self, writer: AbstractStreamWriter) -> None: - loop = asyncio.get_event_loop() - try: - chunk = await loop.run_in_executor(None, self._value.read, 2**16) - while chunk: - data = ( - chunk.encode(encoding=self._encoding) - if self._encoding - else chunk.encode() - ) - await writer.write(data) - chunk = await loop.run_in_executor(None, self._value.read, 2**16) - finally: - await loop.run_in_executor(None, self._value.close) - - -class BytesIOPayload(IOBasePayload): - @property - def size(self) -> int: - position = self._value.tell() - end = self._value.seek(0, os.SEEK_END) - self._value.seek(position) - return end - position - - -class BufferedReaderPayload(IOBasePayload): - @property - def size(self) -> Optional[int]: - try: - return os.fstat(self._value.fileno()).st_size - self._value.tell() - except OSError: - # data.fileno() is not supported, e.g. - # io.BufferedReader(io.BytesIO(b'data')) - return None - - -class JsonPayload(BytesPayload): - def __init__( - self, - value: Any, - encoding: str = "utf-8", - content_type: str = "application/json", - dumps: JSONEncoder = json.dumps, - *args: Any, - **kwargs: Any, - ) -> None: - - super().__init__( - dumps(value).encode(encoding), - content_type=content_type, - encoding=encoding, - *args, - **kwargs, - ) - - -if TYPE_CHECKING: # pragma: no cover - from typing import AsyncIterable, AsyncIterator - - _AsyncIterator = AsyncIterator[bytes] - _AsyncIterable = AsyncIterable[bytes] -else: - from collections.abc import AsyncIterable, AsyncIterator - - _AsyncIterator = AsyncIterator - _AsyncIterable = AsyncIterable - - -class AsyncIterablePayload(Payload): - - _iter: Optional[_AsyncIterator] = None - - def __init__(self, value: _AsyncIterable, *args: Any, **kwargs: Any) -> None: - if not isinstance(value, AsyncIterable): - raise TypeError( - "value argument must support " - "collections.abc.AsyncIterable interface, " - "got {!r}".format(type(value)) - ) - - if "content_type" not in kwargs: - kwargs["content_type"] = "application/octet-stream" - - super().__init__(value, *args, **kwargs) - - self._iter = value.__aiter__() - - async def write(self, writer: AbstractStreamWriter) -> None: - if self._iter: - try: - # iter is not None check prevents rare cases - # when the case iterable is used twice - while True: - chunk = await self._iter.__anext__() - await writer.write(chunk) - except StopAsyncIteration: - self._iter = None - - -class StreamReaderPayload(AsyncIterablePayload): - def __init__(self, value: StreamReader, *args: Any, **kwargs: Any) -> None: - super().__init__(value.iter_any(), *args, **kwargs) - - -PAYLOAD_REGISTRY = PayloadRegistry() -PAYLOAD_REGISTRY.register(BytesPayload, (bytes, bytearray, memoryview)) -PAYLOAD_REGISTRY.register(StringPayload, str) -PAYLOAD_REGISTRY.register(StringIOPayload, io.StringIO) -PAYLOAD_REGISTRY.register(TextIOPayload, io.TextIOBase) -PAYLOAD_REGISTRY.register(BytesIOPayload, io.BytesIO) -PAYLOAD_REGISTRY.register(BufferedReaderPayload, (io.BufferedReader, io.BufferedRandom)) -PAYLOAD_REGISTRY.register(IOBasePayload, io.IOBase) -PAYLOAD_REGISTRY.register(StreamReaderPayload, StreamReader) -# try_last for giving a chance to more specialized async interables like -# multidict.BodyPartReaderPayload override the default -PAYLOAD_REGISTRY.register(AsyncIterablePayload, AsyncIterable, order=Order.try_last) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/colorama/win32.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/colorama/win32.py deleted file mode 100644 index 841b0e270a381cdfaca544a9be976d7276d83b1e..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/colorama/win32.py +++ /dev/null @@ -1,180 +0,0 @@ -# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. - -# from winbase.h -STDOUT = -11 -STDERR = -12 - -ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 - -try: - import ctypes - from ctypes import LibraryLoader - windll = LibraryLoader(ctypes.WinDLL) - from ctypes import wintypes -except (AttributeError, ImportError): - windll = None - SetConsoleTextAttribute = lambda *_: None - winapi_test = lambda *_: None -else: - from ctypes import byref, Structure, c_char, POINTER - - COORD = wintypes._COORD - - class CONSOLE_SCREEN_BUFFER_INFO(Structure): - """struct in wincon.h.""" - _fields_ = [ - ("dwSize", COORD), - ("dwCursorPosition", COORD), - ("wAttributes", wintypes.WORD), - ("srWindow", wintypes.SMALL_RECT), - ("dwMaximumWindowSize", COORD), - ] - def __str__(self): - return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % ( - self.dwSize.Y, self.dwSize.X - , self.dwCursorPosition.Y, self.dwCursorPosition.X - , self.wAttributes - , self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right - , self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X - ) - - _GetStdHandle = windll.kernel32.GetStdHandle - _GetStdHandle.argtypes = [ - wintypes.DWORD, - ] - _GetStdHandle.restype = wintypes.HANDLE - - _GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo - _GetConsoleScreenBufferInfo.argtypes = [ - wintypes.HANDLE, - POINTER(CONSOLE_SCREEN_BUFFER_INFO), - ] - _GetConsoleScreenBufferInfo.restype = wintypes.BOOL - - _SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute - _SetConsoleTextAttribute.argtypes = [ - wintypes.HANDLE, - wintypes.WORD, - ] - _SetConsoleTextAttribute.restype = wintypes.BOOL - - _SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition - _SetConsoleCursorPosition.argtypes = [ - wintypes.HANDLE, - COORD, - ] - _SetConsoleCursorPosition.restype = wintypes.BOOL - - _FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA - _FillConsoleOutputCharacterA.argtypes = [ - wintypes.HANDLE, - c_char, - wintypes.DWORD, - COORD, - POINTER(wintypes.DWORD), - ] - _FillConsoleOutputCharacterA.restype = wintypes.BOOL - - _FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute - _FillConsoleOutputAttribute.argtypes = [ - wintypes.HANDLE, - wintypes.WORD, - wintypes.DWORD, - COORD, - POINTER(wintypes.DWORD), - ] - _FillConsoleOutputAttribute.restype = wintypes.BOOL - - _SetConsoleTitleW = windll.kernel32.SetConsoleTitleW - _SetConsoleTitleW.argtypes = [ - wintypes.LPCWSTR - ] - _SetConsoleTitleW.restype = wintypes.BOOL - - _GetConsoleMode = windll.kernel32.GetConsoleMode - _GetConsoleMode.argtypes = [ - wintypes.HANDLE, - POINTER(wintypes.DWORD) - ] - _GetConsoleMode.restype = wintypes.BOOL - - _SetConsoleMode = windll.kernel32.SetConsoleMode - _SetConsoleMode.argtypes = [ - wintypes.HANDLE, - wintypes.DWORD - ] - _SetConsoleMode.restype = wintypes.BOOL - - def _winapi_test(handle): - csbi = CONSOLE_SCREEN_BUFFER_INFO() - success = _GetConsoleScreenBufferInfo( - handle, byref(csbi)) - return bool(success) - - def winapi_test(): - return any(_winapi_test(h) for h in - (_GetStdHandle(STDOUT), _GetStdHandle(STDERR))) - - def GetConsoleScreenBufferInfo(stream_id=STDOUT): - handle = _GetStdHandle(stream_id) - csbi = CONSOLE_SCREEN_BUFFER_INFO() - success = _GetConsoleScreenBufferInfo( - handle, byref(csbi)) - return csbi - - def SetConsoleTextAttribute(stream_id, attrs): - handle = _GetStdHandle(stream_id) - return _SetConsoleTextAttribute(handle, attrs) - - def SetConsoleCursorPosition(stream_id, position, adjust=True): - position = COORD(*position) - # If the position is out of range, do nothing. - if position.Y <= 0 or position.X <= 0: - return - # Adjust for Windows' SetConsoleCursorPosition: - # 1. being 0-based, while ANSI is 1-based. - # 2. expecting (x,y), while ANSI uses (y,x). - adjusted_position = COORD(position.Y - 1, position.X - 1) - if adjust: - # Adjust for viewport's scroll position - sr = GetConsoleScreenBufferInfo(STDOUT).srWindow - adjusted_position.Y += sr.Top - adjusted_position.X += sr.Left - # Resume normal processing - handle = _GetStdHandle(stream_id) - return _SetConsoleCursorPosition(handle, adjusted_position) - - def FillConsoleOutputCharacter(stream_id, char, length, start): - handle = _GetStdHandle(stream_id) - char = c_char(char.encode()) - length = wintypes.DWORD(length) - num_written = wintypes.DWORD(0) - # Note that this is hard-coded for ANSI (vs wide) bytes. - success = _FillConsoleOutputCharacterA( - handle, char, length, start, byref(num_written)) - return num_written.value - - def FillConsoleOutputAttribute(stream_id, attr, length, start): - ''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )''' - handle = _GetStdHandle(stream_id) - attribute = wintypes.WORD(attr) - length = wintypes.DWORD(length) - num_written = wintypes.DWORD(0) - # Note that this is hard-coded for ANSI (vs wide) bytes. - return _FillConsoleOutputAttribute( - handle, attribute, length, start, byref(num_written)) - - def SetConsoleTitle(title): - return _SetConsoleTitleW(title) - - def GetConsoleMode(handle): - mode = wintypes.DWORD() - success = _GetConsoleMode(handle, byref(mode)) - if not success: - raise ctypes.WinError() - return mode.value - - def SetConsoleMode(handle, mode): - success = _SetConsoleMode(handle, mode) - if not success: - raise ctypes.WinError() diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fastapi/openapi/constants.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fastapi/openapi/constants.py deleted file mode 100644 index d724ee3cfdbcda1c39f39511046c7a884186ca98..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fastapi/openapi/constants.py +++ /dev/null @@ -1,3 +0,0 @@ -METHODS_WITH_BODY = {"GET", "HEAD", "POST", "PUT", "DELETE", "PATCH"} -REF_PREFIX = "#/components/schemas/" -REF_TEMPLATE = "#/components/schemas/{model}" diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fsspec/gui.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fsspec/gui.py deleted file mode 100644 index c6e9eb9dfce2f8837069375f64e70989921d0673..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fsspec/gui.py +++ /dev/null @@ -1,413 +0,0 @@ -import ast -import contextlib -import logging -import os -import re -from typing import ClassVar, Sequence - -import panel as pn - -from .core import OpenFile, get_filesystem_class, split_protocol -from .registry import known_implementations - -pn.extension() -logger = logging.getLogger("fsspec.gui") - - -class SigSlot: - """Signal-slot mixin, for Panel event passing - - Include this class in a widget manager's superclasses to be able to - register events and callbacks on Panel widgets managed by that class. - - The method ``_register`` should be called as widgets are added, and external - code should call ``connect`` to associate callbacks. - - By default, all signals emit a DEBUG logging statement. - """ - - # names of signals that this class may emit each of which must be - # set by _register for any new instance - signals: ClassVar[Sequence[str]] = [] - # names of actions that this class may respond to - slots: ClassVar[Sequence[str]] = [] - - # each of which must be a method name - - def __init__(self): - self._ignoring_events = False - self._sigs = {} - self._map = {} - self._setup() - - def _setup(self): - """Create GUI elements and register signals""" - self.panel = pn.pane.PaneBase() - # no signals to set up in the base class - - def _register( - self, widget, name, thing="value", log_level=logging.DEBUG, auto=False - ): - """Watch the given attribute of a widget and assign it a named event - - This is normally called at the time a widget is instantiated, in the - class which owns it. - - Parameters - ---------- - widget : pn.layout.Panel or None - Widget to watch. If None, an anonymous signal not associated with - any widget. - name : str - Name of this event - thing : str - Attribute of the given widget to watch - log_level : int - When the signal is triggered, a logging event of the given level - will be fired in the dfviz logger. - auto : bool - If True, automatically connects with a method in this class of the - same name. - """ - if name not in self.signals: - raise ValueError(f"Attempt to assign an undeclared signal: {name}") - self._sigs[name] = { - "widget": widget, - "callbacks": [], - "thing": thing, - "log": log_level, - } - wn = "-".join( - [ - getattr(widget, "name", str(widget)) if widget is not None else "none", - thing, - ] - ) - self._map[wn] = name - if widget is not None: - widget.param.watch(self._signal, thing, onlychanged=True) - if auto and hasattr(self, name): - self.connect(name, getattr(self, name)) - - def _repr_mimebundle_(self, *args, **kwargs): - """Display in a notebook or a server""" - try: - return self.panel._repr_mimebundle_(*args, **kwargs) - except (ValueError, AttributeError): - raise NotImplementedError("Panel does not seem to be set " "up properly") - - def connect(self, signal, slot): - """Associate call back with given event - - The callback must be a function which takes the "new" value of the - watched attribute as the only parameter. If the callback return False, - this cancels any further processing of the given event. - - Alternatively, the callback can be a string, in which case it means - emitting the correspondingly-named event (i.e., connect to self) - """ - self._sigs[signal]["callbacks"].append(slot) - - def _signal(self, event): - """This is called by a an action on a widget - - Within an self.ignore_events context, nothing happens. - - Tests can execute this method by directly changing the values of - widget components. - """ - if not self._ignoring_events: - wn = "-".join([event.obj.name, event.name]) - if wn in self._map and self._map[wn] in self._sigs: - self._emit(self._map[wn], event.new) - - @contextlib.contextmanager - def ignore_events(self): - """Temporarily turn off events processing in this instance - - (does not propagate to children) - """ - self._ignoring_events = True - try: - yield - finally: - self._ignoring_events = False - - def _emit(self, sig, value=None): - """An event happened, call its callbacks - - This method can be used in tests to simulate message passing without - directly changing visual elements. - - Calling of callbacks will halt whenever one returns False. - """ - logger.log(self._sigs[sig]["log"], f"{sig}: {value}") - for callback in self._sigs[sig]["callbacks"]: - if isinstance(callback, str): - self._emit(callback) - else: - try: - # running callbacks should not break the interface - ret = callback(value) - if ret is False: - break - except Exception as e: - logger.exception( - "Exception (%s) while executing callback for signal: %s" - "" % (e, sig) - ) - - def show(self, threads=False): - """Open a new browser tab and display this instance's interface""" - self.panel.show(threads=threads, verbose=False) - return self - - -class SingleSelect(SigSlot): - """A multiselect which only allows you to select one item for an event""" - - signals = ["_selected", "selected"] # the first is internal - slots = ["set_options", "set_selection", "add", "clear", "select"] - - def __init__(self, **kwargs): - self.kwargs = kwargs - super().__init__() - - def _setup(self): - self.panel = pn.widgets.MultiSelect(**self.kwargs) - self._register(self.panel, "_selected", "value") - self._register(None, "selected") - self.connect("_selected", self.select_one) - - def _signal(self, *args, **kwargs): - super()._signal(*args, **kwargs) - - def select_one(self, *_): - with self.ignore_events(): - val = [self.panel.value[-1]] if self.panel.value else [] - self.panel.value = val - self._emit("selected", self.panel.value) - - def set_options(self, options): - self.panel.options = options - - def clear(self): - self.panel.options = [] - - @property - def value(self): - return self.panel.value - - def set_selection(self, selection): - self.panel.value = [selection] - - -class FileSelector(SigSlot): - """Panel-based graphical file selector widget - - Instances of this widget are interactive and can be displayed in jupyter by having - them as the output of a cell, or in a separate browser tab using ``.show()``. - """ - - signals = [ - "protocol_changed", - "selection_changed", - "directory_entered", - "home_clicked", - "up_clicked", - "go_clicked", - "filters_changed", - ] - slots = ["set_filters", "go_home"] - - def __init__(self, url=None, filters=None, ignore=None, kwargs=None): - """ - - Parameters - ---------- - url : str (optional) - Initial value of the URL to populate the dialog; should include protocol - filters : list(str) (optional) - File endings to include in the listings. If not included, all files are - allowed. Does not affect directories. - If given, the endings will appear as checkboxes in the interface - ignore : list(str) (optional) - Regex(s) of file basename patterns to ignore, e.g., "\\." for typical - hidden files on posix - kwargs : dict (optional) - To pass to file system instance - """ - if url: - self.init_protocol, url = split_protocol(url) - else: - self.init_protocol, url = "file", os.getcwd() - self.init_url = url - self.init_kwargs = kwargs or "{}" - self.filters = filters - self.ignore = [re.compile(i) for i in ignore or []] - self._fs = None - super().__init__() - - def _setup(self): - self.url = pn.widgets.TextInput( - name="url", - value=self.init_url, - align="end", - sizing_mode="stretch_width", - width_policy="max", - ) - self.protocol = pn.widgets.Select( - options=sorted(known_implementations), - value=self.init_protocol, - name="protocol", - align="center", - ) - self.kwargs = pn.widgets.TextInput( - name="kwargs", value=self.init_kwargs, align="center" - ) - self.go = pn.widgets.Button(name="⇨", align="end", width=45) - self.main = SingleSelect(size=10) - self.home = pn.widgets.Button(name="🏠", width=40, height=30, align="end") - self.up = pn.widgets.Button(name="‹", width=30, height=30, align="end") - - self._register(self.protocol, "protocol_changed", auto=True) - self._register(self.go, "go_clicked", "clicks", auto=True) - self._register(self.up, "up_clicked", "clicks", auto=True) - self._register(self.home, "home_clicked", "clicks", auto=True) - self._register(None, "selection_changed") - self.main.connect("selected", self.selection_changed) - self._register(None, "directory_entered") - self.prev_protocol = self.protocol.value - self.prev_kwargs = self.storage_options - - self.filter_sel = pn.widgets.CheckBoxGroup( - value=[], options=[], inline=False, align="end", width_policy="min" - ) - self._register(self.filter_sel, "filters_changed", auto=True) - - self.panel = pn.Column( - pn.Row(self.protocol, self.kwargs), - pn.Row(self.home, self.up, self.url, self.go, self.filter_sel), - self.main.panel, - ) - self.set_filters(self.filters) - self.go_clicked() - - def set_filters(self, filters=None): - self.filters = filters - if filters: - self.filter_sel.options = filters - self.filter_sel.value = filters - else: - self.filter_sel.options = [] - self.filter_sel.value = [] - - @property - def storage_options(self): - """Value of the kwargs box as a dictionary""" - return ast.literal_eval(self.kwargs.value) or {} - - @property - def fs(self): - """Current filesystem instance""" - if self._fs is None: - cls = get_filesystem_class(self.protocol.value) - self._fs = cls(**self.storage_options) - return self._fs - - @property - def urlpath(self): - """URL of currently selected item""" - return ( - (f"{self.protocol.value}://{self.main.value[0]}") - if self.main.value - else None - ) - - def open_file(self, mode="rb", compression=None, encoding=None): - """Create OpenFile instance for the currently selected item - - For example, in a notebook you might do something like - - .. code-block:: - - [ ]: sel = FileSelector(); sel - - # user selects their file - - [ ]: with sel.open_file('rb') as f: - ... out = f.read() - - Parameters - ---------- - mode: str (optional) - Open mode for the file. - compression: str (optional) - The interact with the file as compressed. Set to 'infer' to guess - compression from the file ending - encoding: str (optional) - If using text mode, use this encoding; defaults to UTF8. - """ - if self.urlpath is None: - raise ValueError("No file selected") - return OpenFile(self.fs, self.urlpath, mode, compression, encoding) - - def filters_changed(self, values): - self.filters = values - self.go_clicked() - - def selection_changed(self, *_): - if self.urlpath is None: - return - if self.fs.isdir(self.urlpath): - self.url.value = self.fs._strip_protocol(self.urlpath) - self.go_clicked() - - def go_clicked(self, *_): - if ( - self.prev_protocol != self.protocol.value - or self.prev_kwargs != self.storage_options - ): - self._fs = None # causes fs to be recreated - self.prev_protocol = self.protocol.value - self.prev_kwargs = self.storage_options - listing = sorted( - self.fs.ls(self.url.value, detail=True), key=lambda x: x["name"] - ) - listing = [ - l - for l in listing - if not any(i.match(l["name"].rsplit("/", 1)[-1]) for i in self.ignore) - ] - folders = { - "📁 " + o["name"].rsplit("/", 1)[-1]: o["name"] - for o in listing - if o["type"] == "directory" - } - files = { - "📄 " + o["name"].rsplit("/", 1)[-1]: o["name"] - for o in listing - if o["type"] == "file" - } - if self.filters: - files = { - k: v - for k, v in files.items() - if any(v.endswith(ext) for ext in self.filters) - } - self.main.set_options(dict(**folders, **files)) - - def protocol_changed(self, *_): - self._fs = None - self.main.options = [] - self.url.value = "" - - def home_clicked(self, *_): - self.protocol.value = self.init_protocol - self.kwargs.value = self.init_kwargs - self.url.value = self.init_url - self.go_clicked() - - def up_clicked(self, *_): - self.url.value = self.fs._parent(self.url.value) - self.go_clicked() diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/_simple_templates/simpletextbox.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/_simple_templates/simpletextbox.py deleted file mode 100644 index ec8fb60e7e6f006e5bab52f08eabd7b5d275ea19..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/_simple_templates/simpletextbox.py +++ /dev/null @@ -1,96 +0,0 @@ -from __future__ import annotations - -from typing import Any, Callable - -from gradio.components.base import FormComponent -from gradio.events import Events - - -class SimpleTextbox(FormComponent): - """ - Creates a very simple textbox for user to enter string input or display string output. - Preprocessing: passes textbox value as a {str} into the function. - Postprocessing: expects a {str} returned from function and sets textbox value to it. - Examples-format: a {str} representing the textbox input. - """ - - EVENTS = [ - Events.change, - Events.input, - Events.submit, - ] - - def __init__( - self, - value: str | Callable | None = "", - *, - placeholder: str | None = None, - label: str | None = None, - every: float | None = None, - show_label: bool | None = None, - scale: int | None = None, - min_width: int = 160, - interactive: bool | None = None, - visible: bool = True, - rtl: bool = False, - elem_id: str | None = None, - elem_classes: list[str] | str | None = None, - render: bool = True, - ): - """ - Parameters: - value: default text to provide in textbox. If callable, the function will be called whenever the app loads to set the initial value of the component. - placeholder: placeholder hint to provide behind textbox. - label: component name in interface. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - show_label: if True, will display label. - scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer. - min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first. - interactive: if True, will be rendered as an editable textbox; if False, editing will be disabled. If not provided, this is inferred based on whether the component is used as an input or output. - visible: If False, component will be hidden. - rtl: If True and `type` is "text", sets the direction of the text to right-to-left (cursor appears on the left of the text). Default is False, which renders cursor on the right. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles. - render: If False, component will not render be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later. - """ - self.placeholder = placeholder - self.rtl = rtl - super().__init__( - label=label, - every=every, - show_label=show_label, - scale=scale, - min_width=min_width, - interactive=interactive, - visible=visible, - elem_id=elem_id, - elem_classes=elem_classes, - value=value, - render=render, - ) - - def preprocess(self, x: str | None) -> str | None: - """ - Preprocesses input (converts it to a string) before passing it to the function. - Parameters: - x: text - Returns: - text - """ - return None if x is None else str(x) - - def postprocess(self, y: str | None) -> str | None: - """ - Postproccess the function output y by converting it to a str before passing it to the frontend. - Parameters: - y: function output to postprocess. - Returns: - text - """ - return None if y is None else str(y) - - def api_info(self) -> dict[str, Any]: - return {"type": "string"} - - def example_inputs(self) -> Any: - return "Hello!!" diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/components/color_picker.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/components/color_picker.py deleted file mode 100644 index 6575c9d42c0b7ff921edc1c6380b778070ef490f..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/components/color_picker.py +++ /dev/null @@ -1,92 +0,0 @@ -"""gr.ColorPicker() component.""" - -from __future__ import annotations - -from typing import Any, Callable - -from gradio_client.documentation import document, set_documentation_group - -from gradio.components.base import Component -from gradio.events import Events - -set_documentation_group("component") - - -@document() -class ColorPicker(Component): - """ - Creates a color picker for user to select a color as string input. - Preprocessing: passes selected color value as a {str} into the function. - Postprocessing: expects a {str} returned from function and sets color picker value to it. - Examples-format: a {str} with a hexadecimal representation of a color, e.g. "#ff0000" for red. - Demos: color_picker, color_generator - """ - - EVENTS = [Events.change, Events.input, Events.submit, Events.focus, Events.blur] - - def __init__( - self, - value: str | Callable | None = None, - *, - label: str | None = None, - info: str | None = None, - every: float | None = None, - show_label: bool | None = None, - container: bool = True, - scale: int | None = None, - min_width: int = 160, - interactive: bool | None = None, - visible: bool = True, - elem_id: str | None = None, - elem_classes: list[str] | str | None = None, - render: bool = True, - ): - """ - Parameters: - value: default text to provide in color picker. If callable, the function will be called whenever the app loads to set the initial value of the component. - label: The label for this component. Appears above the component and is also used as the header if there are a table of examples for this component. If None and used in a `gr.Interface`, the label will be the name of the parameter this component is assigned to. - info: additional component description. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - show_label: if True, will display label. - container: If True, will place the component in a container - providing some extra padding around the border. - scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer. - min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first. - interactive: if True, will be rendered as an editable color picker; if False, editing will be disabled. If not provided, this is inferred based on whether the component is used as an input or output. - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles. - render: If False, component will not render be rendered in the Blocks context. Should be used if the intention is to assign event listeners now but render the component later. - """ - super().__init__( - label=label, - info=info, - every=every, - show_label=show_label, - container=container, - scale=scale, - min_width=min_width, - interactive=interactive, - visible=visible, - elem_id=elem_id, - elem_classes=elem_classes, - render=render, - value=value, - ) - - def example_inputs(self) -> str: - return "#000000" - - def api_info(self) -> dict[str, Any]: - return {"type": "string"} - - def preprocess(self, payload: str | None) -> str | None: - if payload is None: - return None - else: - return str(payload) - - def postprocess(self, value: str | None) -> str | None: - if value is None: - return None - else: - return str(value) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/huggingface_hub/utils/_cache_assets.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/huggingface_hub/utils/_cache_assets.py deleted file mode 100644 index d6a6421e3b0ff0261079094ea2e2df5de212bce7..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/huggingface_hub/utils/_cache_assets.py +++ /dev/null @@ -1,135 +0,0 @@ -# coding=utf-8 -# Copyright 2019-present, the HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from pathlib import Path -from typing import Union - -from ..constants import HUGGINGFACE_ASSETS_CACHE - - -def cached_assets_path( - library_name: str, - namespace: str = "default", - subfolder: str = "default", - *, - assets_dir: Union[str, Path, None] = None, -): - """Return a folder path to cache arbitrary files. - - `huggingface_hub` provides a canonical folder path to store assets. This is the - recommended way to integrate cache in a downstream library as it will benefit from - the builtins tools to scan and delete the cache properly. - - The distinction is made between files cached from the Hub and assets. Files from the - Hub are cached in a git-aware manner and entirely managed by `huggingface_hub`. See - [related documentation](https://huggingface.co/docs/huggingface_hub/how-to-cache). - All other files that a downstream library caches are considered to be "assets" - (files downloaded from external sources, extracted from a .tar archive, preprocessed - for training,...). - - Once the folder path is generated, it is guaranteed to exist and to be a directory. - The path is based on 3 levels of depth: the library name, a namespace and a - subfolder. Those 3 levels grants flexibility while allowing `huggingface_hub` to - expect folders when scanning/deleting parts of the assets cache. Within a library, - it is expected that all namespaces share the same subset of subfolder names but this - is not a mandatory rule. The downstream library has then full control on which file - structure to adopt within its cache. Namespace and subfolder are optional (would - default to a `"default/"` subfolder) but library name is mandatory as we want every - downstream library to manage its own cache. - - Expected tree: - ```text - assets/ - └── datasets/ - │ ├── SQuAD/ - │ │ ├── downloaded/ - │ │ ├── extracted/ - │ │ └── processed/ - │ ├── Helsinki-NLP--tatoeba_mt/ - │ ├── downloaded/ - │ ├── extracted/ - │ └── processed/ - └── transformers/ - ├── default/ - │ ├── something/ - ├── bert-base-cased/ - │ ├── default/ - │ └── training/ - hub/ - └── models--julien-c--EsperBERTo-small/ - ├── blobs/ - │ ├── (...) - │ ├── (...) - ├── refs/ - │ └── (...) - └── [ 128] snapshots/ - ├── 2439f60ef33a0d46d85da5001d52aeda5b00ce9f/ - │ ├── (...) - └── bbc77c8132af1cc5cf678da3f1ddf2de43606d48/ - └── (...) - ``` - - - Args: - library_name (`str`): - Name of the library that will manage the cache folder. Example: `"dataset"`. - namespace (`str`, *optional*, defaults to "default"): - Namespace to which the data belongs. Example: `"SQuAD"`. - subfolder (`str`, *optional*, defaults to "default"): - Subfolder in which the data will be stored. Example: `extracted`. - assets_dir (`str`, `Path`, *optional*): - Path to the folder where assets are cached. This must not be the same folder - where Hub files are cached. Defaults to `HF_HOME / "assets"` if not provided. - Can also be set with `HUGGINGFACE_ASSETS_CACHE` environment variable. - - Returns: - Path to the cache folder (`Path`). - - Example: - ```py - >>> from huggingface_hub import cached_assets_path - - >>> cached_assets_path(library_name="datasets", namespace="SQuAD", subfolder="download") - PosixPath('/home/wauplin/.cache/huggingface/extra/datasets/SQuAD/download') - - >>> cached_assets_path(library_name="datasets", namespace="SQuAD", subfolder="extracted") - PosixPath('/home/wauplin/.cache/huggingface/extra/datasets/SQuAD/extracted') - - >>> cached_assets_path(library_name="datasets", namespace="Helsinki-NLP/tatoeba_mt") - PosixPath('/home/wauplin/.cache/huggingface/extra/datasets/Helsinki-NLP--tatoeba_mt/default') - - >>> cached_assets_path(library_name="datasets", assets_dir="/tmp/tmp123456") - PosixPath('/tmp/tmp123456/datasets/default/default') - ``` - """ - # Resolve assets_dir - if assets_dir is None: - assets_dir = HUGGINGFACE_ASSETS_CACHE - assets_dir = Path(assets_dir).expanduser().resolve() - - # Avoid names that could create path issues - for part in (" ", "/", "\\"): - library_name = library_name.replace(part, "--") - namespace = namespace.replace(part, "--") - subfolder = subfolder.replace(part, "--") - - # Path to subfolder is created - path = assets_dir / library_name / namespace / subfolder - try: - path.mkdir(exist_ok=True, parents=True) - except (FileExistsError, NotADirectoryError): - raise ValueError(f"Corrupted assets folder: cannot create directory because of an existing file ({path}).") - - # Return - return path diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/jinja2/exceptions.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/jinja2/exceptions.py deleted file mode 100644 index 082ebe8f221d4e7e980e4d321c0a0c5da033b124..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/jinja2/exceptions.py +++ /dev/null @@ -1,166 +0,0 @@ -import typing as t - -if t.TYPE_CHECKING: - from .runtime import Undefined - - -class TemplateError(Exception): - """Baseclass for all template errors.""" - - def __init__(self, message: t.Optional[str] = None) -> None: - super().__init__(message) - - @property - def message(self) -> t.Optional[str]: - return self.args[0] if self.args else None - - -class TemplateNotFound(IOError, LookupError, TemplateError): - """Raised if a template does not exist. - - .. versionchanged:: 2.11 - If the given name is :class:`Undefined` and no message was - provided, an :exc:`UndefinedError` is raised. - """ - - # Silence the Python warning about message being deprecated since - # it's not valid here. - message: t.Optional[str] = None - - def __init__( - self, - name: t.Optional[t.Union[str, "Undefined"]], - message: t.Optional[str] = None, - ) -> None: - IOError.__init__(self, name) - - if message is None: - from .runtime import Undefined - - if isinstance(name, Undefined): - name._fail_with_undefined_error() - - message = name - - self.message = message - self.name = name - self.templates = [name] - - def __str__(self) -> str: - return str(self.message) - - -class TemplatesNotFound(TemplateNotFound): - """Like :class:`TemplateNotFound` but raised if multiple templates - are selected. This is a subclass of :class:`TemplateNotFound` - exception, so just catching the base exception will catch both. - - .. versionchanged:: 2.11 - If a name in the list of names is :class:`Undefined`, a message - about it being undefined is shown rather than the empty string. - - .. versionadded:: 2.2 - """ - - def __init__( - self, - names: t.Sequence[t.Union[str, "Undefined"]] = (), - message: t.Optional[str] = None, - ) -> None: - if message is None: - from .runtime import Undefined - - parts = [] - - for name in names: - if isinstance(name, Undefined): - parts.append(name._undefined_message) - else: - parts.append(name) - - parts_str = ", ".join(map(str, parts)) - message = f"none of the templates given were found: {parts_str}" - - super().__init__(names[-1] if names else None, message) - self.templates = list(names) - - -class TemplateSyntaxError(TemplateError): - """Raised to tell the user that there is a problem with the template.""" - - def __init__( - self, - message: str, - lineno: int, - name: t.Optional[str] = None, - filename: t.Optional[str] = None, - ) -> None: - super().__init__(message) - self.lineno = lineno - self.name = name - self.filename = filename - self.source: t.Optional[str] = None - - # this is set to True if the debug.translate_syntax_error - # function translated the syntax error into a new traceback - self.translated = False - - def __str__(self) -> str: - # for translated errors we only return the message - if self.translated: - return t.cast(str, self.message) - - # otherwise attach some stuff - location = f"line {self.lineno}" - name = self.filename or self.name - if name: - location = f'File "{name}", {location}' - lines = [t.cast(str, self.message), " " + location] - - # if the source is set, add the line to the output - if self.source is not None: - try: - line = self.source.splitlines()[self.lineno - 1] - except IndexError: - pass - else: - lines.append(" " + line.strip()) - - return "\n".join(lines) - - def __reduce__(self): # type: ignore - # https://bugs.python.org/issue1692335 Exceptions that take - # multiple required arguments have problems with pickling. - # Without this, raises TypeError: __init__() missing 1 required - # positional argument: 'lineno' - return self.__class__, (self.message, self.lineno, self.name, self.filename) - - -class TemplateAssertionError(TemplateSyntaxError): - """Like a template syntax error, but covers cases where something in the - template caused an error at compile time that wasn't necessarily caused - by a syntax error. However it's a direct subclass of - :exc:`TemplateSyntaxError` and has the same attributes. - """ - - -class TemplateRuntimeError(TemplateError): - """A generic runtime error in the template engine. Under some situations - Jinja may raise this exception. - """ - - -class UndefinedError(TemplateRuntimeError): - """Raised if a template tries to operate on :class:`Undefined`.""" - - -class SecurityError(TemplateRuntimeError): - """Raised if a template tries to do something insecure if the - sandbox is enabled. - """ - - -class FilterArgumentError(TemplateRuntimeError): - """This error is raised if a filter was called with inappropriate - arguments - """ diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tests/test_type1font.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tests/test_type1font.py deleted file mode 100644 index 1e173d5ea84dec1792dfd40e0ea692da463df69f..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tests/test_type1font.py +++ /dev/null @@ -1,160 +0,0 @@ -import matplotlib._type1font as t1f -import os.path -import difflib -import pytest - - -def test_Type1Font(): - filename = os.path.join(os.path.dirname(__file__), 'cmr10.pfb') - font = t1f.Type1Font(filename) - slanted = font.transform({'slant': 1}) - condensed = font.transform({'extend': 0.5}) - with open(filename, 'rb') as fd: - rawdata = fd.read() - assert font.parts[0] == rawdata[0x0006:0x10c5] - assert font.parts[1] == rawdata[0x10cb:0x897f] - assert font.parts[2] == rawdata[0x8985:0x8ba6] - assert font.decrypted.startswith(b'dup\n/Private 18 dict dup begin') - assert font.decrypted.endswith(b'mark currentfile closefile\n') - assert slanted.decrypted.startswith(b'dup\n/Private 18 dict dup begin') - assert slanted.decrypted.endswith(b'mark currentfile closefile\n') - assert b'UniqueID 5000793' in font.parts[0] - assert b'UniqueID 5000793' in font.decrypted - assert font._pos['UniqueID'] == [(797, 818), (4483, 4504)] - - len0 = len(font.parts[0]) - for key in font._pos.keys(): - for pos0, pos1 in font._pos[key]: - if pos0 < len0: - data = font.parts[0][pos0:pos1] - else: - data = font.decrypted[pos0-len0:pos1-len0] - assert data.startswith(f'/{key}'.encode('ascii')) - assert {'FontType', 'FontMatrix', 'PaintType', 'ItalicAngle', 'RD' - } < set(font._pos.keys()) - - assert b'UniqueID 5000793' not in slanted.parts[0] - assert b'UniqueID 5000793' not in slanted.decrypted - assert 'UniqueID' not in slanted._pos - assert font.prop['Weight'] == 'Medium' - assert not font.prop['isFixedPitch'] - assert font.prop['ItalicAngle'] == 0 - assert slanted.prop['ItalicAngle'] == -45 - assert font.prop['Encoding'][5] == 'Pi' - assert isinstance(font.prop['CharStrings']['Pi'], bytes) - assert font._abbr['ND'] == 'ND' - - differ = difflib.Differ() - diff = list(differ.compare( - font.parts[0].decode('latin-1').splitlines(), - slanted.parts[0].decode('latin-1').splitlines())) - for line in ( - # Removes UniqueID - '- /UniqueID 5000793 def', - # Changes the font name - '- /FontName /CMR10 def', - '+ /FontName/CMR10_Slant_1000 def', - # Alters FontMatrix - '- /FontMatrix [0.001 0 0 0.001 0 0 ]readonly def', - '+ /FontMatrix [0.001 0 0.001 0.001 0 0] readonly def', - # Alters ItalicAngle - '- /ItalicAngle 0 def', - '+ /ItalicAngle -45.0 def'): - assert line in diff, 'diff to slanted font must contain %s' % line - - diff = list(differ.compare( - font.parts[0].decode('latin-1').splitlines(), - condensed.parts[0].decode('latin-1').splitlines())) - for line in ( - # Removes UniqueID - '- /UniqueID 5000793 def', - # Changes the font name - '- /FontName /CMR10 def', - '+ /FontName/CMR10_Extend_500 def', - # Alters FontMatrix - '- /FontMatrix [0.001 0 0 0.001 0 0 ]readonly def', - '+ /FontMatrix [0.0005 0 0 0.001 0 0] readonly def'): - assert line in diff, 'diff to condensed font must contain %s' % line - - -def test_Type1Font_2(): - filename = os.path.join(os.path.dirname(__file__), - 'Courier10PitchBT-Bold.pfb') - font = t1f.Type1Font(filename) - assert font.prop['Weight'] == 'Bold' - assert font.prop['isFixedPitch'] - assert font.prop['Encoding'][65] == 'A' # the font uses StandardEncoding - (pos0, pos1), = font._pos['Encoding'] - assert font.parts[0][pos0:pos1] == b'/Encoding StandardEncoding' - assert font._abbr['ND'] == '|-' - - -def test_tokenize(): - data = (b'1234/abc false -9.81 Foo <<[0 1 2]<0 1ef a\t>>>\n' - b'(string with(nested\t\\) par)ens\\\\)') - # 1 2 x 2 xx1 - # 1 and 2 are matching parens, x means escaped character - n, w, num, kw, d = 'name', 'whitespace', 'number', 'keyword', 'delimiter' - b, s = 'boolean', 'string' - correct = [ - (num, 1234), (n, 'abc'), (w, ' '), (b, False), (w, ' '), (num, -9.81), - (w, ' '), (kw, 'Foo'), (w, ' '), (d, '<<'), (d, '['), (num, 0), - (w, ' '), (num, 1), (w, ' '), (num, 2), (d, ']'), (s, b'\x01\xef\xa0'), - (d, '>>'), (w, '\n'), (s, 'string with(nested\t) par)ens\\') - ] - correct_no_ws = [x for x in correct if x[0] != w] - - def convert(tokens): - return [(t.kind, t.value()) for t in tokens] - - assert convert(t1f._tokenize(data, False)) == correct - assert convert(t1f._tokenize(data, True)) == correct_no_ws - - def bin_after(n): - tokens = t1f._tokenize(data, True) - result = [] - for _ in range(n): - result.append(next(tokens)) - result.append(tokens.send(10)) - return convert(result) - - for n in range(1, len(correct_no_ws)): - result = bin_after(n) - assert result[:-1] == correct_no_ws[:n] - assert result[-1][0] == 'binary' - assert isinstance(result[-1][1], bytes) - - -def test_tokenize_errors(): - with pytest.raises(ValueError): - list(t1f._tokenize(b'1234 (this (string) is unterminated\\)', True)) - with pytest.raises(ValueError): - list(t1f._tokenize(b'/Foo<01234', True)) - with pytest.raises(ValueError): - list(t1f._tokenize(b'/Foo<01234abcg>/Bar', True)) - - -def test_overprecision(): - # We used to output too many digits in FontMatrix entries and - # ItalicAngle, which could make Type-1 parsers unhappy. - filename = os.path.join(os.path.dirname(__file__), 'cmr10.pfb') - font = t1f.Type1Font(filename) - slanted = font.transform({'slant': .167}) - lines = slanted.parts[0].decode('ascii').splitlines() - matrix, = [line[line.index('[')+1:line.index(']')] - for line in lines if '/FontMatrix' in line] - angle, = [word - for line in lines if '/ItalicAngle' in line - for word in line.split() if word[0] in '-0123456789'] - # the following used to include 0.00016700000000000002 - assert matrix == '0.001 0 0.000167 0.001 0 0' - # and here we had -9.48090361795083 - assert angle == '-9.4809' - - -def test_encrypt_decrypt_roundtrip(): - data = b'this is my plaintext \0\1\2\3' - encrypted = t1f.Type1Font._encrypt(data, 'eexec') - decrypted = t1f.Type1Font._decrypt(encrypted, 'eexec') - assert encrypted != decrypted - assert data == decrypted diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tri/_triinterpolate.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tri/_triinterpolate.py deleted file mode 100644 index 90ad6cf3a76c45fa0fa2a607c6acee1cab1c5b05..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tri/_triinterpolate.py +++ /dev/null @@ -1,1574 +0,0 @@ -""" -Interpolation inside triangular grids. -""" - -import numpy as np - -from matplotlib import _api -from matplotlib.tri import Triangulation -from matplotlib.tri._trifinder import TriFinder -from matplotlib.tri._tritools import TriAnalyzer - -__all__ = ('TriInterpolator', 'LinearTriInterpolator', 'CubicTriInterpolator') - - -class TriInterpolator: - """ - Abstract base class for classes used to interpolate on a triangular grid. - - Derived classes implement the following methods: - - - ``__call__(x, y)``, - where x, y are array-like point coordinates of the same shape, and - that returns a masked array of the same shape containing the - interpolated z-values. - - - ``gradient(x, y)``, - where x, y are array-like point coordinates of the same - shape, and that returns a list of 2 masked arrays of the same shape - containing the 2 derivatives of the interpolator (derivatives of - interpolated z values with respect to x and y). - """ - - def __init__(self, triangulation, z, trifinder=None): - _api.check_isinstance(Triangulation, triangulation=triangulation) - self._triangulation = triangulation - - self._z = np.asarray(z) - if self._z.shape != self._triangulation.x.shape: - raise ValueError("z array must have same length as triangulation x" - " and y arrays") - - _api.check_isinstance((TriFinder, None), trifinder=trifinder) - self._trifinder = trifinder or self._triangulation.get_trifinder() - - # Default scaling factors : 1.0 (= no scaling) - # Scaling may be used for interpolations for which the order of - # magnitude of x, y has an impact on the interpolant definition. - # Please refer to :meth:`_interpolate_multikeys` for details. - self._unit_x = 1.0 - self._unit_y = 1.0 - - # Default triangle renumbering: None (= no renumbering) - # Renumbering may be used to avoid unnecessary computations - # if complex calculations are done inside the Interpolator. - # Please refer to :meth:`_interpolate_multikeys` for details. - self._tri_renum = None - - # __call__ and gradient docstrings are shared by all subclasses - # (except, if needed, relevant additions). - # However these methods are only implemented in subclasses to avoid - # confusion in the documentation. - _docstring__call__ = """ - Returns a masked array containing interpolated values at the specified - (x, y) points. - - Parameters - ---------- - x, y : array-like - x and y coordinates of the same shape and any number of - dimensions. - - Returns - ------- - np.ma.array - Masked array of the same shape as *x* and *y*; values corresponding - to (*x*, *y*) points outside of the triangulation are masked out. - - """ - - _docstringgradient = r""" - Returns a list of 2 masked arrays containing interpolated derivatives - at the specified (x, y) points. - - Parameters - ---------- - x, y : array-like - x and y coordinates of the same shape and any number of - dimensions. - - Returns - ------- - dzdx, dzdy : np.ma.array - 2 masked arrays of the same shape as *x* and *y*; values - corresponding to (x, y) points outside of the triangulation - are masked out. - The first returned array contains the values of - :math:`\frac{\partial z}{\partial x}` and the second those of - :math:`\frac{\partial z}{\partial y}`. - - """ - - def _interpolate_multikeys(self, x, y, tri_index=None, - return_keys=('z',)): - """ - Versatile (private) method defined for all TriInterpolators. - - :meth:`_interpolate_multikeys` is a wrapper around method - :meth:`_interpolate_single_key` (to be defined in the child - subclasses). - :meth:`_interpolate_single_key actually performs the interpolation, - but only for 1-dimensional inputs and at valid locations (inside - unmasked triangles of the triangulation). - - The purpose of :meth:`_interpolate_multikeys` is to implement the - following common tasks needed in all subclasses implementations: - - - calculation of containing triangles - - dealing with more than one interpolation request at the same - location (e.g., if the 2 derivatives are requested, it is - unnecessary to compute the containing triangles twice) - - scaling according to self._unit_x, self._unit_y - - dealing with points outside of the grid (with fill value np.nan) - - dealing with multi-dimensional *x*, *y* arrays: flattening for - :meth:`_interpolate_params` call and final reshaping. - - (Note that np.vectorize could do most of those things very well for - you, but it does it by function evaluations over successive tuples of - the input arrays. Therefore, this tends to be more time-consuming than - using optimized numpy functions - e.g., np.dot - which can be used - easily on the flattened inputs, in the child-subclass methods - :meth:`_interpolate_single_key`.) - - It is guaranteed that the calls to :meth:`_interpolate_single_key` - will be done with flattened (1-d) array-like input parameters *x*, *y* - and with flattened, valid `tri_index` arrays (no -1 index allowed). - - Parameters - ---------- - x, y : array-like - x and y coordinates where interpolated values are requested. - tri_index : array-like of int, optional - Array of the containing triangle indices, same shape as - *x* and *y*. Defaults to None. If None, these indices - will be computed by a TriFinder instance. - (Note: For point outside the grid, tri_index[ipt] shall be -1). - return_keys : tuple of keys from {'z', 'dzdx', 'dzdy'} - Defines the interpolation arrays to return, and in which order. - - Returns - ------- - list of arrays - Each array-like contains the expected interpolated values in the - order defined by *return_keys* parameter. - """ - # Flattening and rescaling inputs arrays x, y - # (initial shape is stored for output) - x = np.asarray(x, dtype=np.float64) - y = np.asarray(y, dtype=np.float64) - sh_ret = x.shape - if x.shape != y.shape: - raise ValueError("x and y shall have same shapes." - f" Given: {x.shape} and {y.shape}") - x = np.ravel(x) - y = np.ravel(y) - x_scaled = x/self._unit_x - y_scaled = y/self._unit_y - size_ret = np.size(x_scaled) - - # Computes & ravels the element indexes, extract the valid ones. - if tri_index is None: - tri_index = self._trifinder(x, y) - else: - if tri_index.shape != sh_ret: - raise ValueError( - "tri_index array is provided and shall" - " have same shape as x and y. Given: " - f"{tri_index.shape} and {sh_ret}") - tri_index = np.ravel(tri_index) - - mask_in = (tri_index != -1) - if self._tri_renum is None: - valid_tri_index = tri_index[mask_in] - else: - valid_tri_index = self._tri_renum[tri_index[mask_in]] - valid_x = x_scaled[mask_in] - valid_y = y_scaled[mask_in] - - ret = [] - for return_key in return_keys: - # Find the return index associated with the key. - try: - return_index = {'z': 0, 'dzdx': 1, 'dzdy': 2}[return_key] - except KeyError as err: - raise ValueError("return_keys items shall take values in" - " {'z', 'dzdx', 'dzdy'}") from err - - # Sets the scale factor for f & df components - scale = [1., 1./self._unit_x, 1./self._unit_y][return_index] - - # Computes the interpolation - ret_loc = np.empty(size_ret, dtype=np.float64) - ret_loc[~mask_in] = np.nan - ret_loc[mask_in] = self._interpolate_single_key( - return_key, valid_tri_index, valid_x, valid_y) * scale - ret += [np.ma.masked_invalid(ret_loc.reshape(sh_ret), copy=False)] - - return ret - - def _interpolate_single_key(self, return_key, tri_index, x, y): - """ - Interpolate at points belonging to the triangulation - (inside an unmasked triangles). - - Parameters - ---------- - return_key : {'z', 'dzdx', 'dzdy'} - The requested values (z or its derivatives). - tri_index : 1D int array - Valid triangle index (cannot be -1). - x, y : 1D arrays, same shape as `tri_index` - Valid locations where interpolation is requested. - - Returns - ------- - 1-d array - Returned array of the same size as *tri_index* - """ - raise NotImplementedError("TriInterpolator subclasses" + - "should implement _interpolate_single_key!") - - -class LinearTriInterpolator(TriInterpolator): - """ - Linear interpolator on a triangular grid. - - Each triangle is represented by a plane so that an interpolated value at - point (x, y) lies on the plane of the triangle containing (x, y). - Interpolated values are therefore continuous across the triangulation, but - their first derivatives are discontinuous at edges between triangles. - - Parameters - ---------- - triangulation : `~matplotlib.tri.Triangulation` - The triangulation to interpolate over. - z : (npoints,) array-like - Array of values, defined at grid points, to interpolate between. - trifinder : `~matplotlib.tri.TriFinder`, optional - If this is not specified, the Triangulation's default TriFinder will - be used by calling `.Triangulation.get_trifinder`. - - Methods - ------- - `__call__` (x, y) : Returns interpolated values at (x, y) points. - `gradient` (x, y) : Returns interpolated derivatives at (x, y) points. - - """ - def __init__(self, triangulation, z, trifinder=None): - super().__init__(triangulation, z, trifinder) - - # Store plane coefficients for fast interpolation calculations. - self._plane_coefficients = \ - self._triangulation.calculate_plane_coefficients(self._z) - - def __call__(self, x, y): - return self._interpolate_multikeys(x, y, tri_index=None, - return_keys=('z',))[0] - __call__.__doc__ = TriInterpolator._docstring__call__ - - def gradient(self, x, y): - return self._interpolate_multikeys(x, y, tri_index=None, - return_keys=('dzdx', 'dzdy')) - gradient.__doc__ = TriInterpolator._docstringgradient - - def _interpolate_single_key(self, return_key, tri_index, x, y): - _api.check_in_list(['z', 'dzdx', 'dzdy'], return_key=return_key) - if return_key == 'z': - return (self._plane_coefficients[tri_index, 0]*x + - self._plane_coefficients[tri_index, 1]*y + - self._plane_coefficients[tri_index, 2]) - elif return_key == 'dzdx': - return self._plane_coefficients[tri_index, 0] - else: # 'dzdy' - return self._plane_coefficients[tri_index, 1] - - -class CubicTriInterpolator(TriInterpolator): - r""" - Cubic interpolator on a triangular grid. - - In one-dimension - on a segment - a cubic interpolating function is - defined by the values of the function and its derivative at both ends. - This is almost the same in 2D inside a triangle, except that the values - of the function and its 2 derivatives have to be defined at each triangle - node. - - The CubicTriInterpolator takes the value of the function at each node - - provided by the user - and internally computes the value of the - derivatives, resulting in a smooth interpolation. - (As a special feature, the user can also impose the value of the - derivatives at each node, but this is not supposed to be the common - usage.) - - Parameters - ---------- - triangulation : `~matplotlib.tri.Triangulation` - The triangulation to interpolate over. - z : (npoints,) array-like - Array of values, defined at grid points, to interpolate between. - kind : {'min_E', 'geom', 'user'}, optional - Choice of the smoothing algorithm, in order to compute - the interpolant derivatives (defaults to 'min_E'): - - - if 'min_E': (default) The derivatives at each node is computed - to minimize a bending energy. - - if 'geom': The derivatives at each node is computed as a - weighted average of relevant triangle normals. To be used for - speed optimization (large grids). - - if 'user': The user provides the argument *dz*, no computation - is hence needed. - - trifinder : `~matplotlib.tri.TriFinder`, optional - If not specified, the Triangulation's default TriFinder will - be used by calling `.Triangulation.get_trifinder`. - dz : tuple of array-likes (dzdx, dzdy), optional - Used only if *kind* ='user'. In this case *dz* must be provided as - (dzdx, dzdy) where dzdx, dzdy are arrays of the same shape as *z* and - are the interpolant first derivatives at the *triangulation* points. - - Methods - ------- - `__call__` (x, y) : Returns interpolated values at (x, y) points. - `gradient` (x, y) : Returns interpolated derivatives at (x, y) points. - - Notes - ----- - This note is a bit technical and details how the cubic interpolation is - computed. - - The interpolation is based on a Clough-Tocher subdivision scheme of - the *triangulation* mesh (to make it clearer, each triangle of the - grid will be divided in 3 child-triangles, and on each child triangle - the interpolated function is a cubic polynomial of the 2 coordinates). - This technique originates from FEM (Finite Element Method) analysis; - the element used is a reduced Hsieh-Clough-Tocher (HCT) - element. Its shape functions are described in [1]_. - The assembled function is guaranteed to be C1-smooth, i.e. it is - continuous and its first derivatives are also continuous (this - is easy to show inside the triangles but is also true when crossing the - edges). - - In the default case (*kind* ='min_E'), the interpolant minimizes a - curvature energy on the functional space generated by the HCT element - shape functions - with imposed values but arbitrary derivatives at each - node. The minimized functional is the integral of the so-called total - curvature (implementation based on an algorithm from [2]_ - PCG sparse - solver): - - .. math:: - - E(z) = \frac{1}{2} \int_{\Omega} \left( - \left( \frac{\partial^2{z}}{\partial{x}^2} \right)^2 + - \left( \frac{\partial^2{z}}{\partial{y}^2} \right)^2 + - 2\left( \frac{\partial^2{z}}{\partial{y}\partial{x}} \right)^2 - \right) dx\,dy - - If the case *kind* ='geom' is chosen by the user, a simple geometric - approximation is used (weighted average of the triangle normal - vectors), which could improve speed on very large grids. - - References - ---------- - .. [1] Michel Bernadou, Kamal Hassan, "Basis functions for general - Hsieh-Clough-Tocher triangles, complete or reduced.", - International Journal for Numerical Methods in Engineering, - 17(5):784 - 789. 2.01. - .. [2] C.T. Kelley, "Iterative Methods for Optimization". - - """ - def __init__(self, triangulation, z, kind='min_E', trifinder=None, - dz=None): - super().__init__(triangulation, z, trifinder) - - # Loads the underlying c++ _triangulation. - # (During loading, reordering of triangulation._triangles may occur so - # that all final triangles are now anti-clockwise) - self._triangulation.get_cpp_triangulation() - - # To build the stiffness matrix and avoid zero-energy spurious modes - # we will only store internally the valid (unmasked) triangles and - # the necessary (used) points coordinates. - # 2 renumbering tables need to be computed and stored: - # - a triangle renum table in order to translate the result from a - # TriFinder instance into the internal stored triangle number. - # - a node renum table to overwrite the self._z values into the new - # (used) node numbering. - tri_analyzer = TriAnalyzer(self._triangulation) - (compressed_triangles, compressed_x, compressed_y, tri_renum, - node_renum) = tri_analyzer._get_compressed_triangulation() - self._triangles = compressed_triangles - self._tri_renum = tri_renum - # Taking into account the node renumbering in self._z: - valid_node = (node_renum != -1) - self._z[node_renum[valid_node]] = self._z[valid_node] - - # Computing scale factors - self._unit_x = np.ptp(compressed_x) - self._unit_y = np.ptp(compressed_y) - self._pts = np.column_stack([compressed_x / self._unit_x, - compressed_y / self._unit_y]) - # Computing triangle points - self._tris_pts = self._pts[self._triangles] - # Computing eccentricities - self._eccs = self._compute_tri_eccentricities(self._tris_pts) - # Computing dof estimations for HCT triangle shape function - _api.check_in_list(['user', 'geom', 'min_E'], kind=kind) - self._dof = self._compute_dof(kind, dz=dz) - # Loading HCT element - self._ReferenceElement = _ReducedHCT_Element() - - def __call__(self, x, y): - return self._interpolate_multikeys(x, y, tri_index=None, - return_keys=('z',))[0] - __call__.__doc__ = TriInterpolator._docstring__call__ - - def gradient(self, x, y): - return self._interpolate_multikeys(x, y, tri_index=None, - return_keys=('dzdx', 'dzdy')) - gradient.__doc__ = TriInterpolator._docstringgradient - - def _interpolate_single_key(self, return_key, tri_index, x, y): - _api.check_in_list(['z', 'dzdx', 'dzdy'], return_key=return_key) - tris_pts = self._tris_pts[tri_index] - alpha = self._get_alpha_vec(x, y, tris_pts) - ecc = self._eccs[tri_index] - dof = np.expand_dims(self._dof[tri_index], axis=1) - if return_key == 'z': - return self._ReferenceElement.get_function_values( - alpha, ecc, dof) - else: # 'dzdx', 'dzdy' - J = self._get_jacobian(tris_pts) - dzdx = self._ReferenceElement.get_function_derivatives( - alpha, J, ecc, dof) - if return_key == 'dzdx': - return dzdx[:, 0, 0] - else: - return dzdx[:, 1, 0] - - def _compute_dof(self, kind, dz=None): - """ - Compute and return nodal dofs according to kind. - - Parameters - ---------- - kind : {'min_E', 'geom', 'user'} - Choice of the _DOF_estimator subclass to estimate the gradient. - dz : tuple of array-likes (dzdx, dzdy), optional - Used only if *kind*=user; in this case passed to the - :class:`_DOF_estimator_user`. - - Returns - ------- - array-like, shape (npts, 2) - Estimation of the gradient at triangulation nodes (stored as - degree of freedoms of reduced-HCT triangle elements). - """ - if kind == 'user': - if dz is None: - raise ValueError("For a CubicTriInterpolator with " - "*kind*='user', a valid *dz* " - "argument is expected.") - TE = _DOF_estimator_user(self, dz=dz) - elif kind == 'geom': - TE = _DOF_estimator_geom(self) - else: # 'min_E', checked in __init__ - TE = _DOF_estimator_min_E(self) - return TE.compute_dof_from_df() - - @staticmethod - def _get_alpha_vec(x, y, tris_pts): - """ - Fast (vectorized) function to compute barycentric coordinates alpha. - - Parameters - ---------- - x, y : array-like of dim 1 (shape (nx,)) - Coordinates of the points whose points barycentric coordinates are - requested. - tris_pts : array like of dim 3 (shape: (nx, 3, 2)) - Coordinates of the containing triangles apexes. - - Returns - ------- - array of dim 2 (shape (nx, 3)) - Barycentric coordinates of the points inside the containing - triangles. - """ - ndim = tris_pts.ndim-2 - - a = tris_pts[:, 1, :] - tris_pts[:, 0, :] - b = tris_pts[:, 2, :] - tris_pts[:, 0, :] - abT = np.stack([a, b], axis=-1) - ab = _transpose_vectorized(abT) - OM = np.stack([x, y], axis=1) - tris_pts[:, 0, :] - - metric = ab @ abT - # Here we try to deal with the colinear cases. - # metric_inv is in this case set to the Moore-Penrose pseudo-inverse - # meaning that we will still return a set of valid barycentric - # coordinates. - metric_inv = _pseudo_inv22sym_vectorized(metric) - Covar = ab @ _transpose_vectorized(np.expand_dims(OM, ndim)) - ksi = metric_inv @ Covar - alpha = _to_matrix_vectorized([ - [1-ksi[:, 0, 0]-ksi[:, 1, 0]], [ksi[:, 0, 0]], [ksi[:, 1, 0]]]) - return alpha - - @staticmethod - def _get_jacobian(tris_pts): - """ - Fast (vectorized) function to compute triangle jacobian matrix. - - Parameters - ---------- - tris_pts : array like of dim 3 (shape: (nx, 3, 2)) - Coordinates of the containing triangles apexes. - - Returns - ------- - array of dim 3 (shape (nx, 2, 2)) - Barycentric coordinates of the points inside the containing - triangles. - J[itri, :, :] is the jacobian matrix at apex 0 of the triangle - itri, so that the following (matrix) relationship holds: - [dz/dksi] = [J] x [dz/dx] - with x: global coordinates - ksi: element parametric coordinates in triangle first apex - local basis. - """ - a = np.array(tris_pts[:, 1, :] - tris_pts[:, 0, :]) - b = np.array(tris_pts[:, 2, :] - tris_pts[:, 0, :]) - J = _to_matrix_vectorized([[a[:, 0], a[:, 1]], - [b[:, 0], b[:, 1]]]) - return J - - @staticmethod - def _compute_tri_eccentricities(tris_pts): - """ - Compute triangle eccentricities. - - Parameters - ---------- - tris_pts : array like of dim 3 (shape: (nx, 3, 2)) - Coordinates of the triangles apexes. - - Returns - ------- - array like of dim 2 (shape: (nx, 3)) - The so-called eccentricity parameters [1] needed for HCT triangular - element. - """ - a = np.expand_dims(tris_pts[:, 2, :] - tris_pts[:, 1, :], axis=2) - b = np.expand_dims(tris_pts[:, 0, :] - tris_pts[:, 2, :], axis=2) - c = np.expand_dims(tris_pts[:, 1, :] - tris_pts[:, 0, :], axis=2) - # Do not use np.squeeze, this is dangerous if only one triangle - # in the triangulation... - dot_a = (_transpose_vectorized(a) @ a)[:, 0, 0] - dot_b = (_transpose_vectorized(b) @ b)[:, 0, 0] - dot_c = (_transpose_vectorized(c) @ c)[:, 0, 0] - # Note that this line will raise a warning for dot_a, dot_b or dot_c - # zeros, but we choose not to support triangles with duplicate points. - return _to_matrix_vectorized([[(dot_c-dot_b) / dot_a], - [(dot_a-dot_c) / dot_b], - [(dot_b-dot_a) / dot_c]]) - - -# FEM element used for interpolation and for solving minimisation -# problem (Reduced HCT element) -class _ReducedHCT_Element: - """ - Implementation of reduced HCT triangular element with explicit shape - functions. - - Computes z, dz, d2z and the element stiffness matrix for bending energy: - E(f) = integral( (d2z/dx2 + d2z/dy2)**2 dA) - - *** Reference for the shape functions: *** - [1] Basis functions for general Hsieh-Clough-Tocher _triangles, complete or - reduced. - Michel Bernadou, Kamal Hassan - International Journal for Numerical Methods in Engineering. - 17(5):784 - 789. 2.01 - - *** Element description: *** - 9 dofs: z and dz given at 3 apex - C1 (conform) - - """ - # 1) Loads matrices to generate shape functions as a function of - # triangle eccentricities - based on [1] p.11 ''' - M = np.array([ - [ 0.00, 0.00, 0.00, 4.50, 4.50, 0.00, 0.00, 0.00, 0.00, 0.00], - [-0.25, 0.00, 0.00, 0.50, 1.25, 0.00, 0.00, 0.00, 0.00, 0.00], - [-0.25, 0.00, 0.00, 1.25, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00], - [ 0.50, 1.00, 0.00, -1.50, 0.00, 3.00, 3.00, 0.00, 0.00, 3.00], - [ 0.00, 0.00, 0.00, -0.25, 0.25, 0.00, 1.00, 0.00, 0.00, 0.50], - [ 0.25, 0.00, 0.00, -0.50, -0.25, 1.00, 0.00, 0.00, 0.00, 1.00], - [ 0.50, 0.00, 1.00, 0.00, -1.50, 0.00, 0.00, 3.00, 3.00, 3.00], - [ 0.25, 0.00, 0.00, -0.25, -0.50, 0.00, 0.00, 0.00, 1.00, 1.00], - [ 0.00, 0.00, 0.00, 0.25, -0.25, 0.00, 0.00, 1.00, 0.00, 0.50]]) - M0 = np.array([ - [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], - [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], - [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], - [-1.00, 0.00, 0.00, 1.50, 1.50, 0.00, 0.00, 0.00, 0.00, -3.00], - [-0.50, 0.00, 0.00, 0.75, 0.75, 0.00, 0.00, 0.00, 0.00, -1.50], - [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], - [ 1.00, 0.00, 0.00, -1.50, -1.50, 0.00, 0.00, 0.00, 0.00, 3.00], - [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], - [ 0.50, 0.00, 0.00, -0.75, -0.75, 0.00, 0.00, 0.00, 0.00, 1.50]]) - M1 = np.array([ - [-0.50, 0.00, 0.00, 1.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], - [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], - [-0.25, 0.00, 0.00, 0.75, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], - [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], - [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], - [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], - [ 0.50, 0.00, 0.00, -1.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], - [ 0.25, 0.00, 0.00, -0.75, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], - [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]]) - M2 = np.array([ - [ 0.50, 0.00, 0.00, 0.00, -1.50, 0.00, 0.00, 0.00, 0.00, 0.00], - [ 0.25, 0.00, 0.00, 0.00, -0.75, 0.00, 0.00, 0.00, 0.00, 0.00], - [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], - [-0.50, 0.00, 0.00, 0.00, 1.50, 0.00, 0.00, 0.00, 0.00, 0.00], - [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], - [-0.25, 0.00, 0.00, 0.00, 0.75, 0.00, 0.00, 0.00, 0.00, 0.00], - [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], - [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00], - [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]]) - - # 2) Loads matrices to rotate components of gradient & Hessian - # vectors in the reference basis of triangle first apex (a0) - rotate_dV = np.array([[ 1., 0.], [ 0., 1.], - [ 0., 1.], [-1., -1.], - [-1., -1.], [ 1., 0.]]) - - rotate_d2V = np.array([[1., 0., 0.], [0., 1., 0.], [ 0., 0., 1.], - [0., 1., 0.], [1., 1., 1.], [ 0., -2., -1.], - [1., 1., 1.], [1., 0., 0.], [-2., 0., -1.]]) - - # 3) Loads Gauss points & weights on the 3 sub-_triangles for P2 - # exact integral - 3 points on each subtriangles. - # NOTE: as the 2nd derivative is discontinuous , we really need those 9 - # points! - n_gauss = 9 - gauss_pts = np.array([[13./18., 4./18., 1./18.], - [ 4./18., 13./18., 1./18.], - [ 7./18., 7./18., 4./18.], - [ 1./18., 13./18., 4./18.], - [ 1./18., 4./18., 13./18.], - [ 4./18., 7./18., 7./18.], - [ 4./18., 1./18., 13./18.], - [13./18., 1./18., 4./18.], - [ 7./18., 4./18., 7./18.]], dtype=np.float64) - gauss_w = np.ones([9], dtype=np.float64) / 9. - - # 4) Stiffness matrix for curvature energy - E = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 2.]]) - - # 5) Loads the matrix to compute DOF_rot from tri_J at apex 0 - J0_to_J1 = np.array([[-1., 1.], [-1., 0.]]) - J0_to_J2 = np.array([[ 0., -1.], [ 1., -1.]]) - - def get_function_values(self, alpha, ecc, dofs): - """ - Parameters - ---------- - alpha : is a (N x 3 x 1) array (array of column-matrices) of - barycentric coordinates, - ecc : is a (N x 3 x 1) array (array of column-matrices) of triangle - eccentricities, - dofs : is a (N x 1 x 9) arrays (arrays of row-matrices) of computed - degrees of freedom. - - Returns - ------- - Returns the N-array of interpolated function values. - """ - subtri = np.argmin(alpha, axis=1)[:, 0] - ksi = _roll_vectorized(alpha, -subtri, axis=0) - E = _roll_vectorized(ecc, -subtri, axis=0) - x = ksi[:, 0, 0] - y = ksi[:, 1, 0] - z = ksi[:, 2, 0] - x_sq = x*x - y_sq = y*y - z_sq = z*z - V = _to_matrix_vectorized([ - [x_sq*x], [y_sq*y], [z_sq*z], [x_sq*z], [x_sq*y], [y_sq*x], - [y_sq*z], [z_sq*y], [z_sq*x], [x*y*z]]) - prod = self.M @ V - prod += _scalar_vectorized(E[:, 0, 0], self.M0 @ V) - prod += _scalar_vectorized(E[:, 1, 0], self.M1 @ V) - prod += _scalar_vectorized(E[:, 2, 0], self.M2 @ V) - s = _roll_vectorized(prod, 3*subtri, axis=0) - return (dofs @ s)[:, 0, 0] - - def get_function_derivatives(self, alpha, J, ecc, dofs): - """ - Parameters - ---------- - *alpha* is a (N x 3 x 1) array (array of column-matrices of - barycentric coordinates) - *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at - triangle first apex) - *ecc* is a (N x 3 x 1) array (array of column-matrices of triangle - eccentricities) - *dofs* is a (N x 1 x 9) arrays (arrays of row-matrices) of computed - degrees of freedom. - - Returns - ------- - Returns the values of interpolated function derivatives [dz/dx, dz/dy] - in global coordinates at locations alpha, as a column-matrices of - shape (N x 2 x 1). - """ - subtri = np.argmin(alpha, axis=1)[:, 0] - ksi = _roll_vectorized(alpha, -subtri, axis=0) - E = _roll_vectorized(ecc, -subtri, axis=0) - x = ksi[:, 0, 0] - y = ksi[:, 1, 0] - z = ksi[:, 2, 0] - x_sq = x*x - y_sq = y*y - z_sq = z*z - dV = _to_matrix_vectorized([ - [ -3.*x_sq, -3.*x_sq], - [ 3.*y_sq, 0.], - [ 0., 3.*z_sq], - [ -2.*x*z, -2.*x*z+x_sq], - [-2.*x*y+x_sq, -2.*x*y], - [ 2.*x*y-y_sq, -y_sq], - [ 2.*y*z, y_sq], - [ z_sq, 2.*y*z], - [ -z_sq, 2.*x*z-z_sq], - [ x*z-y*z, x*y-y*z]]) - # Puts back dV in first apex basis - dV = dV @ _extract_submatrices( - self.rotate_dV, subtri, block_size=2, axis=0) - - prod = self.M @ dV - prod += _scalar_vectorized(E[:, 0, 0], self.M0 @ dV) - prod += _scalar_vectorized(E[:, 1, 0], self.M1 @ dV) - prod += _scalar_vectorized(E[:, 2, 0], self.M2 @ dV) - dsdksi = _roll_vectorized(prod, 3*subtri, axis=0) - dfdksi = dofs @ dsdksi - # In global coordinates: - # Here we try to deal with the simplest colinear cases, returning a - # null matrix. - J_inv = _safe_inv22_vectorized(J) - dfdx = J_inv @ _transpose_vectorized(dfdksi) - return dfdx - - def get_function_hessians(self, alpha, J, ecc, dofs): - """ - Parameters - ---------- - *alpha* is a (N x 3 x 1) array (array of column-matrices) of - barycentric coordinates - *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at - triangle first apex) - *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle - eccentricities - *dofs* is a (N x 1 x 9) arrays (arrays of row-matrices) of computed - degrees of freedom. - - Returns - ------- - Returns the values of interpolated function 2nd-derivatives - [d2z/dx2, d2z/dy2, d2z/dxdy] in global coordinates at locations alpha, - as a column-matrices of shape (N x 3 x 1). - """ - d2sdksi2 = self.get_d2Sidksij2(alpha, ecc) - d2fdksi2 = dofs @ d2sdksi2 - H_rot = self.get_Hrot_from_J(J) - d2fdx2 = d2fdksi2 @ H_rot - return _transpose_vectorized(d2fdx2) - - def get_d2Sidksij2(self, alpha, ecc): - """ - Parameters - ---------- - *alpha* is a (N x 3 x 1) array (array of column-matrices) of - barycentric coordinates - *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle - eccentricities - - Returns - ------- - Returns the arrays d2sdksi2 (N x 3 x 1) Hessian of shape functions - expressed in covariant coordinates in first apex basis. - """ - subtri = np.argmin(alpha, axis=1)[:, 0] - ksi = _roll_vectorized(alpha, -subtri, axis=0) - E = _roll_vectorized(ecc, -subtri, axis=0) - x = ksi[:, 0, 0] - y = ksi[:, 1, 0] - z = ksi[:, 2, 0] - d2V = _to_matrix_vectorized([ - [ 6.*x, 6.*x, 6.*x], - [ 6.*y, 0., 0.], - [ 0., 6.*z, 0.], - [ 2.*z, 2.*z-4.*x, 2.*z-2.*x], - [2.*y-4.*x, 2.*y, 2.*y-2.*x], - [2.*x-4.*y, 0., -2.*y], - [ 2.*z, 0., 2.*y], - [ 0., 2.*y, 2.*z], - [ 0., 2.*x-4.*z, -2.*z], - [ -2.*z, -2.*y, x-y-z]]) - # Puts back d2V in first apex basis - d2V = d2V @ _extract_submatrices( - self.rotate_d2V, subtri, block_size=3, axis=0) - prod = self.M @ d2V - prod += _scalar_vectorized(E[:, 0, 0], self.M0 @ d2V) - prod += _scalar_vectorized(E[:, 1, 0], self.M1 @ d2V) - prod += _scalar_vectorized(E[:, 2, 0], self.M2 @ d2V) - d2sdksi2 = _roll_vectorized(prod, 3*subtri, axis=0) - return d2sdksi2 - - def get_bending_matrices(self, J, ecc): - """ - Parameters - ---------- - *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at - triangle first apex) - *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle - eccentricities - - Returns - ------- - Returns the element K matrices for bending energy expressed in - GLOBAL nodal coordinates. - K_ij = integral [ (d2zi/dx2 + d2zi/dy2) * (d2zj/dx2 + d2zj/dy2) dA] - tri_J is needed to rotate dofs from local basis to global basis - """ - n = np.size(ecc, 0) - - # 1) matrix to rotate dofs in global coordinates - J1 = self.J0_to_J1 @ J - J2 = self.J0_to_J2 @ J - DOF_rot = np.zeros([n, 9, 9], dtype=np.float64) - DOF_rot[:, 0, 0] = 1 - DOF_rot[:, 3, 3] = 1 - DOF_rot[:, 6, 6] = 1 - DOF_rot[:, 1:3, 1:3] = J - DOF_rot[:, 4:6, 4:6] = J1 - DOF_rot[:, 7:9, 7:9] = J2 - - # 2) matrix to rotate Hessian in global coordinates. - H_rot, area = self.get_Hrot_from_J(J, return_area=True) - - # 3) Computes stiffness matrix - # Gauss quadrature. - K = np.zeros([n, 9, 9], dtype=np.float64) - weights = self.gauss_w - pts = self.gauss_pts - for igauss in range(self.n_gauss): - alpha = np.tile(pts[igauss, :], n).reshape(n, 3) - alpha = np.expand_dims(alpha, 2) - weight = weights[igauss] - d2Skdksi2 = self.get_d2Sidksij2(alpha, ecc) - d2Skdx2 = d2Skdksi2 @ H_rot - K += weight * (d2Skdx2 @ self.E @ _transpose_vectorized(d2Skdx2)) - - # 4) With nodal (not elem) dofs - K = _transpose_vectorized(DOF_rot) @ K @ DOF_rot - - # 5) Need the area to compute total element energy - return _scalar_vectorized(area, K) - - def get_Hrot_from_J(self, J, return_area=False): - """ - Parameters - ---------- - *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at - triangle first apex) - - Returns - ------- - Returns H_rot used to rotate Hessian from local basis of first apex, - to global coordinates. - if *return_area* is True, returns also the triangle area (0.5*det(J)) - """ - # Here we try to deal with the simplest colinear cases; a null - # energy and area is imposed. - J_inv = _safe_inv22_vectorized(J) - Ji00 = J_inv[:, 0, 0] - Ji11 = J_inv[:, 1, 1] - Ji10 = J_inv[:, 1, 0] - Ji01 = J_inv[:, 0, 1] - H_rot = _to_matrix_vectorized([ - [Ji00*Ji00, Ji10*Ji10, Ji00*Ji10], - [Ji01*Ji01, Ji11*Ji11, Ji01*Ji11], - [2*Ji00*Ji01, 2*Ji11*Ji10, Ji00*Ji11+Ji10*Ji01]]) - if not return_area: - return H_rot - else: - area = 0.5 * (J[:, 0, 0]*J[:, 1, 1] - J[:, 0, 1]*J[:, 1, 0]) - return H_rot, area - - def get_Kff_and_Ff(self, J, ecc, triangles, Uc): - """ - Build K and F for the following elliptic formulation: - minimization of curvature energy with value of function at node - imposed and derivatives 'free'. - - Build the global Kff matrix in cco format. - Build the full Ff vec Ff = - Kfc x Uc. - - Parameters - ---------- - *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at - triangle first apex) - *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle - eccentricities - *triangles* is a (N x 3) array of nodes indexes. - *Uc* is (N x 3) array of imposed displacements at nodes - - Returns - ------- - (Kff_rows, Kff_cols, Kff_vals) Kff matrix in coo format - Duplicate - (row, col) entries must be summed. - Ff: force vector - dim npts * 3 - """ - ntri = np.size(ecc, 0) - vec_range = np.arange(ntri, dtype=np.int32) - c_indices = np.full(ntri, -1, dtype=np.int32) # for unused dofs, -1 - f_dof = [1, 2, 4, 5, 7, 8] - c_dof = [0, 3, 6] - - # vals, rows and cols indices in global dof numbering - f_dof_indices = _to_matrix_vectorized([[ - c_indices, triangles[:, 0]*2, triangles[:, 0]*2+1, - c_indices, triangles[:, 1]*2, triangles[:, 1]*2+1, - c_indices, triangles[:, 2]*2, triangles[:, 2]*2+1]]) - - expand_indices = np.ones([ntri, 9, 1], dtype=np.int32) - f_row_indices = _transpose_vectorized(expand_indices @ f_dof_indices) - f_col_indices = expand_indices @ f_dof_indices - K_elem = self.get_bending_matrices(J, ecc) - - # Extracting sub-matrices - # Explanation & notations: - # * Subscript f denotes 'free' degrees of freedom (i.e. dz/dx, dz/dx) - # * Subscript c denotes 'condensated' (imposed) degrees of freedom - # (i.e. z at all nodes) - # * F = [Ff, Fc] is the force vector - # * U = [Uf, Uc] is the imposed dof vector - # [ Kff Kfc ] - # * K = [ ] is the laplacian stiffness matrix - # [ Kcf Kff ] - # * As F = K x U one gets straightforwardly: Ff = - Kfc x Uc - - # Computing Kff stiffness matrix in sparse coo format - Kff_vals = np.ravel(K_elem[np.ix_(vec_range, f_dof, f_dof)]) - Kff_rows = np.ravel(f_row_indices[np.ix_(vec_range, f_dof, f_dof)]) - Kff_cols = np.ravel(f_col_indices[np.ix_(vec_range, f_dof, f_dof)]) - - # Computing Ff force vector in sparse coo format - Kfc_elem = K_elem[np.ix_(vec_range, f_dof, c_dof)] - Uc_elem = np.expand_dims(Uc, axis=2) - Ff_elem = -(Kfc_elem @ Uc_elem)[:, :, 0] - Ff_indices = f_dof_indices[np.ix_(vec_range, [0], f_dof)][:, 0, :] - - # Extracting Ff force vector in dense format - # We have to sum duplicate indices - using bincount - Ff = np.bincount(np.ravel(Ff_indices), weights=np.ravel(Ff_elem)) - return Kff_rows, Kff_cols, Kff_vals, Ff - - -# :class:_DOF_estimator, _DOF_estimator_user, _DOF_estimator_geom, -# _DOF_estimator_min_E -# Private classes used to compute the degree of freedom of each triangular -# element for the TriCubicInterpolator. -class _DOF_estimator: - """ - Abstract base class for classes used to estimate a function's first - derivatives, and deduce the dofs for a CubicTriInterpolator using a - reduced HCT element formulation. - - Derived classes implement ``compute_df(self, **kwargs)``, returning - ``np.vstack([dfx, dfy]).T`` where ``dfx, dfy`` are the estimation of the 2 - gradient coordinates. - """ - def __init__(self, interpolator, **kwargs): - _api.check_isinstance(CubicTriInterpolator, interpolator=interpolator) - self._pts = interpolator._pts - self._tris_pts = interpolator._tris_pts - self.z = interpolator._z - self._triangles = interpolator._triangles - (self._unit_x, self._unit_y) = (interpolator._unit_x, - interpolator._unit_y) - self.dz = self.compute_dz(**kwargs) - self.compute_dof_from_df() - - def compute_dz(self, **kwargs): - raise NotImplementedError - - def compute_dof_from_df(self): - """ - Compute reduced-HCT elements degrees of freedom, from the gradient. - """ - J = CubicTriInterpolator._get_jacobian(self._tris_pts) - tri_z = self.z[self._triangles] - tri_dz = self.dz[self._triangles] - tri_dof = self.get_dof_vec(tri_z, tri_dz, J) - return tri_dof - - @staticmethod - def get_dof_vec(tri_z, tri_dz, J): - """ - Compute the dof vector of a triangle, from the value of f, df and - of the local Jacobian at each node. - - Parameters - ---------- - tri_z : shape (3,) array - f nodal values. - tri_dz : shape (3, 2) array - df/dx, df/dy nodal values. - J - Jacobian matrix in local basis of apex 0. - - Returns - ------- - dof : shape (9,) array - For each apex ``iapex``:: - - dof[iapex*3+0] = f(Ai) - dof[iapex*3+1] = df(Ai).(AiAi+) - dof[iapex*3+2] = df(Ai).(AiAi-) - """ - npt = tri_z.shape[0] - dof = np.zeros([npt, 9], dtype=np.float64) - J1 = _ReducedHCT_Element.J0_to_J1 @ J - J2 = _ReducedHCT_Element.J0_to_J2 @ J - - col0 = J @ np.expand_dims(tri_dz[:, 0, :], axis=2) - col1 = J1 @ np.expand_dims(tri_dz[:, 1, :], axis=2) - col2 = J2 @ np.expand_dims(tri_dz[:, 2, :], axis=2) - - dfdksi = _to_matrix_vectorized([ - [col0[:, 0, 0], col1[:, 0, 0], col2[:, 0, 0]], - [col0[:, 1, 0], col1[:, 1, 0], col2[:, 1, 0]]]) - dof[:, 0:7:3] = tri_z - dof[:, 1:8:3] = dfdksi[:, 0] - dof[:, 2:9:3] = dfdksi[:, 1] - return dof - - -class _DOF_estimator_user(_DOF_estimator): - """dz is imposed by user; accounts for scaling if any.""" - - def compute_dz(self, dz): - (dzdx, dzdy) = dz - dzdx = dzdx * self._unit_x - dzdy = dzdy * self._unit_y - return np.vstack([dzdx, dzdy]).T - - -class _DOF_estimator_geom(_DOF_estimator): - """Fast 'geometric' approximation, recommended for large arrays.""" - - def compute_dz(self): - """ - self.df is computed as weighted average of _triangles sharing a common - node. On each triangle itri f is first assumed linear (= ~f), which - allows to compute d~f[itri] - Then the following approximation of df nodal values is then proposed: - f[ipt] = SUM ( w[itri] x d~f[itri] , for itri sharing apex ipt) - The weighted coeff. w[itri] are proportional to the angle of the - triangle itri at apex ipt - """ - el_geom_w = self.compute_geom_weights() - el_geom_grad = self.compute_geom_grads() - - # Sum of weights coeffs - w_node_sum = np.bincount(np.ravel(self._triangles), - weights=np.ravel(el_geom_w)) - - # Sum of weighted df = (dfx, dfy) - dfx_el_w = np.empty_like(el_geom_w) - dfy_el_w = np.empty_like(el_geom_w) - for iapex in range(3): - dfx_el_w[:, iapex] = el_geom_w[:, iapex]*el_geom_grad[:, 0] - dfy_el_w[:, iapex] = el_geom_w[:, iapex]*el_geom_grad[:, 1] - dfx_node_sum = np.bincount(np.ravel(self._triangles), - weights=np.ravel(dfx_el_w)) - dfy_node_sum = np.bincount(np.ravel(self._triangles), - weights=np.ravel(dfy_el_w)) - - # Estimation of df - dfx_estim = dfx_node_sum/w_node_sum - dfy_estim = dfy_node_sum/w_node_sum - return np.vstack([dfx_estim, dfy_estim]).T - - def compute_geom_weights(self): - """ - Build the (nelems, 3) weights coeffs of _triangles angles, - renormalized so that np.sum(weights, axis=1) == np.ones(nelems) - """ - weights = np.zeros([np.size(self._triangles, 0), 3]) - tris_pts = self._tris_pts - for ipt in range(3): - p0 = tris_pts[:, ipt % 3, :] - p1 = tris_pts[:, (ipt+1) % 3, :] - p2 = tris_pts[:, (ipt-1) % 3, :] - alpha1 = np.arctan2(p1[:, 1]-p0[:, 1], p1[:, 0]-p0[:, 0]) - alpha2 = np.arctan2(p2[:, 1]-p0[:, 1], p2[:, 0]-p0[:, 0]) - # In the below formula we could take modulo 2. but - # modulo 1. is safer regarding round-off errors (flat triangles). - angle = np.abs(((alpha2-alpha1) / np.pi) % 1) - # Weight proportional to angle up np.pi/2; null weight for - # degenerated cases 0 and np.pi (note that *angle* is normalized - # by np.pi). - weights[:, ipt] = 0.5 - np.abs(angle-0.5) - return weights - - def compute_geom_grads(self): - """ - Compute the (global) gradient component of f assumed linear (~f). - returns array df of shape (nelems, 2) - df[ielem].dM[ielem] = dz[ielem] i.e. df = dz x dM = dM.T^-1 x dz - """ - tris_pts = self._tris_pts - tris_f = self.z[self._triangles] - - dM1 = tris_pts[:, 1, :] - tris_pts[:, 0, :] - dM2 = tris_pts[:, 2, :] - tris_pts[:, 0, :] - dM = np.dstack([dM1, dM2]) - # Here we try to deal with the simplest colinear cases: a null - # gradient is assumed in this case. - dM_inv = _safe_inv22_vectorized(dM) - - dZ1 = tris_f[:, 1] - tris_f[:, 0] - dZ2 = tris_f[:, 2] - tris_f[:, 0] - dZ = np.vstack([dZ1, dZ2]).T - df = np.empty_like(dZ) - - # With np.einsum: could be ej,eji -> ej - df[:, 0] = dZ[:, 0]*dM_inv[:, 0, 0] + dZ[:, 1]*dM_inv[:, 1, 0] - df[:, 1] = dZ[:, 0]*dM_inv[:, 0, 1] + dZ[:, 1]*dM_inv[:, 1, 1] - return df - - -class _DOF_estimator_min_E(_DOF_estimator_geom): - """ - The 'smoothest' approximation, df is computed through global minimization - of the bending energy: - E(f) = integral[(d2z/dx2 + d2z/dy2 + 2 d2z/dxdy)**2 dA] - """ - def __init__(self, Interpolator): - self._eccs = Interpolator._eccs - super().__init__(Interpolator) - - def compute_dz(self): - """ - Elliptic solver for bending energy minimization. - Uses a dedicated 'toy' sparse Jacobi PCG solver. - """ - # Initial guess for iterative PCG solver. - dz_init = super().compute_dz() - Uf0 = np.ravel(dz_init) - - reference_element = _ReducedHCT_Element() - J = CubicTriInterpolator._get_jacobian(self._tris_pts) - eccs = self._eccs - triangles = self._triangles - Uc = self.z[self._triangles] - - # Building stiffness matrix and force vector in coo format - Kff_rows, Kff_cols, Kff_vals, Ff = reference_element.get_Kff_and_Ff( - J, eccs, triangles, Uc) - - # Building sparse matrix and solving minimization problem - # We could use scipy.sparse direct solver; however to avoid this - # external dependency an implementation of a simple PCG solver with - # a simple diagonal Jacobi preconditioner is implemented. - tol = 1.e-10 - n_dof = Ff.shape[0] - Kff_coo = _Sparse_Matrix_coo(Kff_vals, Kff_rows, Kff_cols, - shape=(n_dof, n_dof)) - Kff_coo.compress_csc() - Uf, err = _cg(A=Kff_coo, b=Ff, x0=Uf0, tol=tol) - # If the PCG did not converge, we return the best guess between Uf0 - # and Uf. - err0 = np.linalg.norm(Kff_coo.dot(Uf0) - Ff) - if err0 < err: - # Maybe a good occasion to raise a warning here ? - _api.warn_external("In TriCubicInterpolator initialization, " - "PCG sparse solver did not converge after " - "1000 iterations. `geom` approximation is " - "used instead of `min_E`") - Uf = Uf0 - - # Building dz from Uf - dz = np.empty([self._pts.shape[0], 2], dtype=np.float64) - dz[:, 0] = Uf[::2] - dz[:, 1] = Uf[1::2] - return dz - - -# The following private :class:_Sparse_Matrix_coo and :func:_cg provide -# a PCG sparse solver for (symmetric) elliptic problems. -class _Sparse_Matrix_coo: - def __init__(self, vals, rows, cols, shape): - """ - Create a sparse matrix in coo format. - *vals*: arrays of values of non-null entries of the matrix - *rows*: int arrays of rows of non-null entries of the matrix - *cols*: int arrays of cols of non-null entries of the matrix - *shape*: 2-tuple (n, m) of matrix shape - """ - self.n, self.m = shape - self.vals = np.asarray(vals, dtype=np.float64) - self.rows = np.asarray(rows, dtype=np.int32) - self.cols = np.asarray(cols, dtype=np.int32) - - def dot(self, V): - """ - Dot product of self by a vector *V* in sparse-dense to dense format - *V* dense vector of shape (self.m,). - """ - assert V.shape == (self.m,) - return np.bincount(self.rows, - weights=self.vals*V[self.cols], - minlength=self.m) - - def compress_csc(self): - """ - Compress rows, cols, vals / summing duplicates. Sort for csc format. - """ - _, unique, indices = np.unique( - self.rows + self.n*self.cols, - return_index=True, return_inverse=True) - self.rows = self.rows[unique] - self.cols = self.cols[unique] - self.vals = np.bincount(indices, weights=self.vals) - - def compress_csr(self): - """ - Compress rows, cols, vals / summing duplicates. Sort for csr format. - """ - _, unique, indices = np.unique( - self.m*self.rows + self.cols, - return_index=True, return_inverse=True) - self.rows = self.rows[unique] - self.cols = self.cols[unique] - self.vals = np.bincount(indices, weights=self.vals) - - def to_dense(self): - """ - Return a dense matrix representing self, mainly for debugging purposes. - """ - ret = np.zeros([self.n, self.m], dtype=np.float64) - nvals = self.vals.size - for i in range(nvals): - ret[self.rows[i], self.cols[i]] += self.vals[i] - return ret - - def __str__(self): - return self.to_dense().__str__() - - @property - def diag(self): - """Return the (dense) vector of the diagonal elements.""" - in_diag = (self.rows == self.cols) - diag = np.zeros(min(self.n, self.n), dtype=np.float64) # default 0. - diag[self.rows[in_diag]] = self.vals[in_diag] - return diag - - -def _cg(A, b, x0=None, tol=1.e-10, maxiter=1000): - """ - Use Preconditioned Conjugate Gradient iteration to solve A x = b - A simple Jacobi (diagonal) preconditioner is used. - - Parameters - ---------- - A : _Sparse_Matrix_coo - *A* must have been compressed before by compress_csc or - compress_csr method. - b : array - Right hand side of the linear system. - x0 : array, optional - Starting guess for the solution. Defaults to the zero vector. - tol : float, optional - Tolerance to achieve. The algorithm terminates when the relative - residual is below tol. Default is 1e-10. - maxiter : int, optional - Maximum number of iterations. Iteration will stop after *maxiter* - steps even if the specified tolerance has not been achieved. Defaults - to 1000. - - Returns - ------- - x : array - The converged solution. - err : float - The absolute error np.linalg.norm(A.dot(x) - b) - """ - n = b.size - assert A.n == n - assert A.m == n - b_norm = np.linalg.norm(b) - - # Jacobi pre-conditioner - kvec = A.diag - # For diag elem < 1e-6 we keep 1e-6. - kvec = np.maximum(kvec, 1e-6) - - # Initial guess - if x0 is None: - x = np.zeros(n) - else: - x = x0 - - r = b - A.dot(x) - w = r/kvec - - p = np.zeros(n) - beta = 0.0 - rho = np.dot(r, w) - k = 0 - - # Following C. T. Kelley - while (np.sqrt(abs(rho)) > tol*b_norm) and (k < maxiter): - p = w + beta*p - z = A.dot(p) - alpha = rho/np.dot(p, z) - r = r - alpha*z - w = r/kvec - rhoold = rho - rho = np.dot(r, w) - x = x + alpha*p - beta = rho/rhoold - # err = np.linalg.norm(A.dot(x) - b) # absolute accuracy - not used - k += 1 - err = np.linalg.norm(A.dot(x) - b) - return x, err - - -# The following private functions: -# :func:`_safe_inv22_vectorized` -# :func:`_pseudo_inv22sym_vectorized` -# :func:`_scalar_vectorized` -# :func:`_transpose_vectorized` -# :func:`_roll_vectorized` -# :func:`_to_matrix_vectorized` -# :func:`_extract_submatrices` -# provide fast numpy implementation of some standard operations on arrays of -# matrices - stored as (:, n_rows, n_cols)-shaped np.arrays. - -# Development note: Dealing with pathologic 'flat' triangles in the -# CubicTriInterpolator code and impact on (2, 2)-matrix inversion functions -# :func:`_safe_inv22_vectorized` and :func:`_pseudo_inv22sym_vectorized`. -# -# Goals: -# 1) The CubicTriInterpolator should be able to handle flat or almost flat -# triangles without raising an error, -# 2) These degenerated triangles should have no impact on the automatic dof -# calculation (associated with null weight for the _DOF_estimator_geom and -# with null energy for the _DOF_estimator_min_E), -# 3) Linear patch test should be passed exactly on degenerated meshes, -# 4) Interpolation (with :meth:`_interpolate_single_key` or -# :meth:`_interpolate_multi_key`) shall be correctly handled even *inside* -# the pathologic triangles, to interact correctly with a TriRefiner class. -# -# Difficulties: -# Flat triangles have rank-deficient *J* (so-called jacobian matrix) and -# *metric* (the metric tensor = J x J.T). Computation of the local -# tangent plane is also problematic. -# -# Implementation: -# Most of the time, when computing the inverse of a rank-deficient matrix it -# is safe to simply return the null matrix (which is the implementation in -# :func:`_safe_inv22_vectorized`). This is because of point 2), itself -# enforced by: -# - null area hence null energy in :class:`_DOF_estimator_min_E` -# - angles close or equal to 0 or np.pi hence null weight in -# :class:`_DOF_estimator_geom`. -# Note that the function angle -> weight is continuous and maximum for an -# angle np.pi/2 (refer to :meth:`compute_geom_weights`) -# The exception is the computation of barycentric coordinates, which is done -# by inversion of the *metric* matrix. In this case, we need to compute a set -# of valid coordinates (1 among numerous possibilities), to ensure point 4). -# We benefit here from the symmetry of metric = J x J.T, which makes it easier -# to compute a pseudo-inverse in :func:`_pseudo_inv22sym_vectorized` -def _safe_inv22_vectorized(M): - """ - Inversion of arrays of (2, 2) matrices, returns 0 for rank-deficient - matrices. - - *M* : array of (2, 2) matrices to inverse, shape (n, 2, 2) - """ - _api.check_shape((None, 2, 2), M=M) - M_inv = np.empty_like(M) - prod1 = M[:, 0, 0]*M[:, 1, 1] - delta = prod1 - M[:, 0, 1]*M[:, 1, 0] - - # We set delta_inv to 0. in case of a rank deficient matrix; a - # rank-deficient input matrix *M* will lead to a null matrix in output - rank2 = (np.abs(delta) > 1e-8*np.abs(prod1)) - if np.all(rank2): - # Normal 'optimized' flow. - delta_inv = 1./delta - else: - # 'Pathologic' flow. - delta_inv = np.zeros(M.shape[0]) - delta_inv[rank2] = 1./delta[rank2] - - M_inv[:, 0, 0] = M[:, 1, 1]*delta_inv - M_inv[:, 0, 1] = -M[:, 0, 1]*delta_inv - M_inv[:, 1, 0] = -M[:, 1, 0]*delta_inv - M_inv[:, 1, 1] = M[:, 0, 0]*delta_inv - return M_inv - - -def _pseudo_inv22sym_vectorized(M): - """ - Inversion of arrays of (2, 2) SYMMETRIC matrices; returns the - (Moore-Penrose) pseudo-inverse for rank-deficient matrices. - - In case M is of rank 1, we have M = trace(M) x P where P is the orthogonal - projection on Im(M), and we return trace(M)^-1 x P == M / trace(M)**2 - In case M is of rank 0, we return the null matrix. - - *M* : array of (2, 2) matrices to inverse, shape (n, 2, 2) - """ - _api.check_shape((None, 2, 2), M=M) - M_inv = np.empty_like(M) - prod1 = M[:, 0, 0]*M[:, 1, 1] - delta = prod1 - M[:, 0, 1]*M[:, 1, 0] - rank2 = (np.abs(delta) > 1e-8*np.abs(prod1)) - - if np.all(rank2): - # Normal 'optimized' flow. - M_inv[:, 0, 0] = M[:, 1, 1] / delta - M_inv[:, 0, 1] = -M[:, 0, 1] / delta - M_inv[:, 1, 0] = -M[:, 1, 0] / delta - M_inv[:, 1, 1] = M[:, 0, 0] / delta - else: - # 'Pathologic' flow. - # Here we have to deal with 2 sub-cases - # 1) First sub-case: matrices of rank 2: - delta = delta[rank2] - M_inv[rank2, 0, 0] = M[rank2, 1, 1] / delta - M_inv[rank2, 0, 1] = -M[rank2, 0, 1] / delta - M_inv[rank2, 1, 0] = -M[rank2, 1, 0] / delta - M_inv[rank2, 1, 1] = M[rank2, 0, 0] / delta - # 2) Second sub-case: rank-deficient matrices of rank 0 and 1: - rank01 = ~rank2 - tr = M[rank01, 0, 0] + M[rank01, 1, 1] - tr_zeros = (np.abs(tr) < 1.e-8) - sq_tr_inv = (1.-tr_zeros) / (tr**2+tr_zeros) - # sq_tr_inv = 1. / tr**2 - M_inv[rank01, 0, 0] = M[rank01, 0, 0] * sq_tr_inv - M_inv[rank01, 0, 1] = M[rank01, 0, 1] * sq_tr_inv - M_inv[rank01, 1, 0] = M[rank01, 1, 0] * sq_tr_inv - M_inv[rank01, 1, 1] = M[rank01, 1, 1] * sq_tr_inv - - return M_inv - - -def _scalar_vectorized(scalar, M): - """ - Scalar product between scalars and matrices. - """ - return scalar[:, np.newaxis, np.newaxis]*M - - -def _transpose_vectorized(M): - """ - Transposition of an array of matrices *M*. - """ - return np.transpose(M, [0, 2, 1]) - - -def _roll_vectorized(M, roll_indices, axis): - """ - Roll an array of matrices along *axis* (0: rows, 1: columns) according to - an array of indices *roll_indices*. - """ - assert axis in [0, 1] - ndim = M.ndim - assert ndim == 3 - ndim_roll = roll_indices.ndim - assert ndim_roll == 1 - sh = M.shape - r, c = sh[-2:] - assert sh[0] == roll_indices.shape[0] - vec_indices = np.arange(sh[0], dtype=np.int32) - - # Builds the rolled matrix - M_roll = np.empty_like(M) - if axis == 0: - for ir in range(r): - for ic in range(c): - M_roll[:, ir, ic] = M[vec_indices, (-roll_indices+ir) % r, ic] - else: # 1 - for ir in range(r): - for ic in range(c): - M_roll[:, ir, ic] = M[vec_indices, ir, (-roll_indices+ic) % c] - return M_roll - - -def _to_matrix_vectorized(M): - """ - Build an array of matrices from individuals np.arrays of identical shapes. - - Parameters - ---------- - M - ncols-list of nrows-lists of shape sh. - - Returns - ------- - M_res : np.array of shape (sh, nrow, ncols) - *M_res* satisfies ``M_res[..., i, j] = M[i][j]``. - """ - assert isinstance(M, (tuple, list)) - assert all(isinstance(item, (tuple, list)) for item in M) - c_vec = np.asarray([len(item) for item in M]) - assert np.all(c_vec-c_vec[0] == 0) - r = len(M) - c = c_vec[0] - M00 = np.asarray(M[0][0]) - dt = M00.dtype - sh = [M00.shape[0], r, c] - M_ret = np.empty(sh, dtype=dt) - for irow in range(r): - for icol in range(c): - M_ret[:, irow, icol] = np.asarray(M[irow][icol]) - return M_ret - - -def _extract_submatrices(M, block_indices, block_size, axis): - """ - Extract selected blocks of a matrices *M* depending on parameters - *block_indices* and *block_size*. - - Returns the array of extracted matrices *Mres* so that :: - - M_res[..., ir, :] = M[(block_indices*block_size+ir), :] - """ - assert block_indices.ndim == 1 - assert axis in [0, 1] - - r, c = M.shape - if axis == 0: - sh = [block_indices.shape[0], block_size, c] - else: # 1 - sh = [block_indices.shape[0], r, block_size] - - dt = M.dtype - M_res = np.empty(sh, dtype=dt) - if axis == 0: - for ir in range(block_size): - M_res[:, ir, :] = M[(block_indices*block_size+ir), :] - else: # 1 - for ic in range(block_size): - M_res[:, :, ic] = M[:, (block_indices*block_size+ic)] - - return M_res diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tri/_trirefine.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tri/_trirefine.py deleted file mode 100644 index 7f5110ab9e218e24683acd1489d38bded16c7420..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tri/_trirefine.py +++ /dev/null @@ -1,307 +0,0 @@ -""" -Mesh refinement for triangular grids. -""" - -import numpy as np - -from matplotlib import _api -from matplotlib.tri._triangulation import Triangulation -import matplotlib.tri._triinterpolate - - -class TriRefiner: - """ - Abstract base class for classes implementing mesh refinement. - - A TriRefiner encapsulates a Triangulation object and provides tools for - mesh refinement and interpolation. - - Derived classes must implement: - - - ``refine_triangulation(return_tri_index=False, **kwargs)`` , where - the optional keyword arguments *kwargs* are defined in each - TriRefiner concrete implementation, and which returns: - - - a refined triangulation, - - optionally (depending on *return_tri_index*), for each - point of the refined triangulation: the index of - the initial triangulation triangle to which it belongs. - - - ``refine_field(z, triinterpolator=None, **kwargs)``, where: - - - *z* array of field values (to refine) defined at the base - triangulation nodes, - - *triinterpolator* is an optional `~matplotlib.tri.TriInterpolator`, - - the other optional keyword arguments *kwargs* are defined in - each TriRefiner concrete implementation; - - and which returns (as a tuple) a refined triangular mesh and the - interpolated values of the field at the refined triangulation nodes. - """ - - def __init__(self, triangulation): - _api.check_isinstance(Triangulation, triangulation=triangulation) - self._triangulation = triangulation - - -class UniformTriRefiner(TriRefiner): - """ - Uniform mesh refinement by recursive subdivisions. - - Parameters - ---------- - triangulation : `~matplotlib.tri.Triangulation` - The encapsulated triangulation (to be refined) - """ -# See Also -# -------- -# :class:`~matplotlib.tri.CubicTriInterpolator` and -# :class:`~matplotlib.tri.TriAnalyzer`. -# """ - def __init__(self, triangulation): - super().__init__(triangulation) - - def refine_triangulation(self, return_tri_index=False, subdiv=3): - """ - Compute a uniformly refined triangulation *refi_triangulation* of - the encapsulated :attr:`triangulation`. - - This function refines the encapsulated triangulation by splitting each - father triangle into 4 child sub-triangles built on the edges midside - nodes, recursing *subdiv* times. In the end, each triangle is hence - divided into ``4**subdiv`` child triangles. - - Parameters - ---------- - return_tri_index : bool, default: False - Whether an index table indicating the father triangle index of each - point is returned. - subdiv : int, default: 3 - Recursion level for the subdivision. - Each triangle is divided into ``4**subdiv`` child triangles; - hence, the default results in 64 refined subtriangles for each - triangle of the initial triangulation. - - Returns - ------- - refi_triangulation : `~matplotlib.tri.Triangulation` - The refined triangulation. - found_index : int array - Index of the initial triangulation containing triangle, for each - point of *refi_triangulation*. - Returned only if *return_tri_index* is set to True. - """ - refi_triangulation = self._triangulation - ntri = refi_triangulation.triangles.shape[0] - - # Computes the triangulation ancestors numbers in the reference - # triangulation. - ancestors = np.arange(ntri, dtype=np.int32) - for _ in range(subdiv): - refi_triangulation, ancestors = self._refine_triangulation_once( - refi_triangulation, ancestors) - refi_npts = refi_triangulation.x.shape[0] - refi_triangles = refi_triangulation.triangles - - # Now we compute found_index table if needed - if return_tri_index: - # We have to initialize found_index with -1 because some nodes - # may very well belong to no triangle at all, e.g., in case of - # Delaunay Triangulation with DuplicatePointWarning. - found_index = np.full(refi_npts, -1, dtype=np.int32) - tri_mask = self._triangulation.mask - if tri_mask is None: - found_index[refi_triangles] = np.repeat(ancestors, - 3).reshape(-1, 3) - else: - # There is a subtlety here: we want to avoid whenever possible - # that refined points container is a masked triangle (which - # would result in artifacts in plots). - # So we impose the numbering from masked ancestors first, - # then overwrite it with unmasked ancestor numbers. - ancestor_mask = tri_mask[ancestors] - found_index[refi_triangles[ancestor_mask, :] - ] = np.repeat(ancestors[ancestor_mask], - 3).reshape(-1, 3) - found_index[refi_triangles[~ancestor_mask, :] - ] = np.repeat(ancestors[~ancestor_mask], - 3).reshape(-1, 3) - return refi_triangulation, found_index - else: - return refi_triangulation - - def refine_field(self, z, triinterpolator=None, subdiv=3): - """ - Refine a field defined on the encapsulated triangulation. - - Parameters - ---------- - z : (npoints,) array-like - Values of the field to refine, defined at the nodes of the - encapsulated triangulation. (``n_points`` is the number of points - in the initial triangulation) - triinterpolator : `~matplotlib.tri.TriInterpolator`, optional - Interpolator used for field interpolation. If not specified, - a `~matplotlib.tri.CubicTriInterpolator` will be used. - subdiv : int, default: 3 - Recursion level for the subdivision. - Each triangle is divided into ``4**subdiv`` child triangles. - - Returns - ------- - refi_tri : `~matplotlib.tri.Triangulation` - The returned refined triangulation. - refi_z : 1D array of length: *refi_tri* node count. - The returned interpolated field (at *refi_tri* nodes). - """ - if triinterpolator is None: - interp = matplotlib.tri.CubicTriInterpolator( - self._triangulation, z) - else: - _api.check_isinstance(matplotlib.tri.TriInterpolator, - triinterpolator=triinterpolator) - interp = triinterpolator - - refi_tri, found_index = self.refine_triangulation( - subdiv=subdiv, return_tri_index=True) - refi_z = interp._interpolate_multikeys( - refi_tri.x, refi_tri.y, tri_index=found_index)[0] - return refi_tri, refi_z - - @staticmethod - def _refine_triangulation_once(triangulation, ancestors=None): - """ - Refine a `.Triangulation` by splitting each triangle into 4 - child-masked_triangles built on the edges midside nodes. - - Masked triangles, if present, are also split, but their children - returned masked. - - If *ancestors* is not provided, returns only a new triangulation: - child_triangulation. - - If the array-like key table *ancestor* is given, it shall be of shape - (ntri,) where ntri is the number of *triangulation* masked_triangles. - In this case, the function returns - (child_triangulation, child_ancestors) - child_ancestors is defined so that the 4 child masked_triangles share - the same index as their father: child_ancestors.shape = (4 * ntri,). - """ - - x = triangulation.x - y = triangulation.y - - # According to tri.triangulation doc: - # neighbors[i, j] is the triangle that is the neighbor - # to the edge from point index masked_triangles[i, j] to point - # index masked_triangles[i, (j+1)%3]. - neighbors = triangulation.neighbors - triangles = triangulation.triangles - npts = np.shape(x)[0] - ntri = np.shape(triangles)[0] - if ancestors is not None: - ancestors = np.asarray(ancestors) - if np.shape(ancestors) != (ntri,): - raise ValueError( - "Incompatible shapes provide for " - "triangulation.masked_triangles and ancestors: " - f"{np.shape(triangles)} and {np.shape(ancestors)}") - - # Initiating tables refi_x and refi_y of the refined triangulation - # points - # hint: each apex is shared by 2 masked_triangles except the borders. - borders = np.sum(neighbors == -1) - added_pts = (3*ntri + borders) // 2 - refi_npts = npts + added_pts - refi_x = np.zeros(refi_npts) - refi_y = np.zeros(refi_npts) - - # First part of refi_x, refi_y is just the initial points - refi_x[:npts] = x - refi_y[:npts] = y - - # Second part contains the edge midside nodes. - # Each edge belongs to 1 triangle (if border edge) or is shared by 2 - # masked_triangles (interior edge). - # We first build 2 * ntri arrays of edge starting nodes (edge_elems, - # edge_apexes); we then extract only the masters to avoid overlaps. - # The so-called 'master' is the triangle with biggest index - # The 'slave' is the triangle with lower index - # (can be -1 if border edge) - # For slave and master we will identify the apex pointing to the edge - # start - edge_elems = np.tile(np.arange(ntri, dtype=np.int32), 3) - edge_apexes = np.repeat(np.arange(3, dtype=np.int32), ntri) - edge_neighbors = neighbors[edge_elems, edge_apexes] - mask_masters = (edge_elems > edge_neighbors) - - # Identifying the "masters" and adding to refi_x, refi_y vec - masters = edge_elems[mask_masters] - apex_masters = edge_apexes[mask_masters] - x_add = (x[triangles[masters, apex_masters]] + - x[triangles[masters, (apex_masters+1) % 3]]) * 0.5 - y_add = (y[triangles[masters, apex_masters]] + - y[triangles[masters, (apex_masters+1) % 3]]) * 0.5 - refi_x[npts:] = x_add - refi_y[npts:] = y_add - - # Building the new masked_triangles; each old masked_triangles hosts - # 4 new masked_triangles - # there are 6 pts to identify per 'old' triangle, 3 new_pt_corner and - # 3 new_pt_midside - new_pt_corner = triangles - - # What is the index in refi_x, refi_y of point at middle of apex iapex - # of elem ielem ? - # If ielem is the apex master: simple count, given the way refi_x was - # built. - # If ielem is the apex slave: yet we do not know; but we will soon - # using the neighbors table. - new_pt_midside = np.empty([ntri, 3], dtype=np.int32) - cum_sum = npts - for imid in range(3): - mask_st_loc = (imid == apex_masters) - n_masters_loc = np.sum(mask_st_loc) - elem_masters_loc = masters[mask_st_loc] - new_pt_midside[:, imid][elem_masters_loc] = np.arange( - n_masters_loc, dtype=np.int32) + cum_sum - cum_sum += n_masters_loc - - # Now dealing with slave elems. - # for each slave element we identify the master and then the inode - # once slave_masters is identified, slave_masters_apex is such that: - # neighbors[slaves_masters, slave_masters_apex] == slaves - mask_slaves = np.logical_not(mask_masters) - slaves = edge_elems[mask_slaves] - slaves_masters = edge_neighbors[mask_slaves] - diff_table = np.abs(neighbors[slaves_masters, :] - - np.outer(slaves, np.ones(3, dtype=np.int32))) - slave_masters_apex = np.argmin(diff_table, axis=1) - slaves_apex = edge_apexes[mask_slaves] - new_pt_midside[slaves, slaves_apex] = new_pt_midside[ - slaves_masters, slave_masters_apex] - - # Builds the 4 child masked_triangles - child_triangles = np.empty([ntri*4, 3], dtype=np.int32) - child_triangles[0::4, :] = np.vstack([ - new_pt_corner[:, 0], new_pt_midside[:, 0], - new_pt_midside[:, 2]]).T - child_triangles[1::4, :] = np.vstack([ - new_pt_corner[:, 1], new_pt_midside[:, 1], - new_pt_midside[:, 0]]).T - child_triangles[2::4, :] = np.vstack([ - new_pt_corner[:, 2], new_pt_midside[:, 2], - new_pt_midside[:, 1]]).T - child_triangles[3::4, :] = np.vstack([ - new_pt_midside[:, 0], new_pt_midside[:, 1], - new_pt_midside[:, 2]]).T - child_triangulation = Triangulation(refi_x, refi_y, child_triangles) - - # Builds the child mask - if triangulation.mask is not None: - child_triangulation.set_mask(np.repeat(triangulation.mask, 4)) - - if ancestors is None: - return child_triangulation - else: - return child_triangulation, np.repeat(ancestors, 4) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/mpl_toolkits/axisartist/floating_axes.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/mpl_toolkits/axisartist/floating_axes.py deleted file mode 100644 index 97dafe98c69494255373b28b812affab62df2ad0..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/mpl_toolkits/axisartist/floating_axes.py +++ /dev/null @@ -1,298 +0,0 @@ -""" -An experimental support for curvilinear grid. -""" - -# TODO : -# see if tick_iterator method can be simplified by reusing the parent method. - -import functools - -import numpy as np - -import matplotlib as mpl -from matplotlib import _api, cbook -import matplotlib.patches as mpatches -from matplotlib.path import Path - -from mpl_toolkits.axes_grid1.parasite_axes import host_axes_class_factory - -from . import axislines, grid_helper_curvelinear -from .axis_artist import AxisArtist -from .grid_finder import ExtremeFinderSimple - - -class FloatingAxisArtistHelper( - grid_helper_curvelinear.FloatingAxisArtistHelper): - pass - - -class FixedAxisArtistHelper(grid_helper_curvelinear.FloatingAxisArtistHelper): - - def __init__(self, grid_helper, side, nth_coord_ticks=None): - """ - nth_coord = along which coordinate value varies. - nth_coord = 0 -> x axis, nth_coord = 1 -> y axis - """ - lon1, lon2, lat1, lat2 = grid_helper.grid_finder.extreme_finder(*[None] * 5) - value, nth_coord = _api.check_getitem( - dict(left=(lon1, 0), right=(lon2, 0), bottom=(lat1, 1), top=(lat2, 1)), - side=side) - super().__init__(grid_helper, nth_coord, value, axis_direction=side) - if nth_coord_ticks is None: - nth_coord_ticks = nth_coord - self.nth_coord_ticks = nth_coord_ticks - - self.value = value - self.grid_helper = grid_helper - self._side = side - - def update_lim(self, axes): - self.grid_helper.update_lim(axes) - self._grid_info = self.grid_helper._grid_info - - def get_tick_iterators(self, axes): - """tick_loc, tick_angle, tick_label, (optionally) tick_label""" - - grid_finder = self.grid_helper.grid_finder - - lat_levs, lat_n, lat_factor = self._grid_info["lat_info"] - yy0 = lat_levs / lat_factor - - lon_levs, lon_n, lon_factor = self._grid_info["lon_info"] - xx0 = lon_levs / lon_factor - - extremes = self.grid_helper.grid_finder.extreme_finder(*[None] * 5) - xmin, xmax = sorted(extremes[:2]) - ymin, ymax = sorted(extremes[2:]) - - def trf_xy(x, y): - trf = grid_finder.get_transform() + axes.transData - return trf.transform(np.column_stack(np.broadcast_arrays(x, y))).T - - if self.nth_coord == 0: - mask = (ymin <= yy0) & (yy0 <= ymax) - (xx1, yy1), (dxx1, dyy1), (dxx2, dyy2) = \ - grid_helper_curvelinear._value_and_jacobian( - trf_xy, self.value, yy0[mask], (xmin, xmax), (ymin, ymax)) - labels = self._grid_info["lat_labels"] - - elif self.nth_coord == 1: - mask = (xmin <= xx0) & (xx0 <= xmax) - (xx1, yy1), (dxx2, dyy2), (dxx1, dyy1) = \ - grid_helper_curvelinear._value_and_jacobian( - trf_xy, xx0[mask], self.value, (xmin, xmax), (ymin, ymax)) - labels = self._grid_info["lon_labels"] - - labels = [l for l, m in zip(labels, mask) if m] - - angle_normal = np.arctan2(dyy1, dxx1) - angle_tangent = np.arctan2(dyy2, dxx2) - mm = (dyy1 == 0) & (dxx1 == 0) # points with degenerate normal - angle_normal[mm] = angle_tangent[mm] + np.pi / 2 - - tick_to_axes = self.get_tick_transform(axes) - axes.transAxes - in_01 = functools.partial( - mpl.transforms._interval_contains_close, (0, 1)) - - def f1(): - for x, y, normal, tangent, lab \ - in zip(xx1, yy1, angle_normal, angle_tangent, labels): - c2 = tick_to_axes.transform((x, y)) - if in_01(c2[0]) and in_01(c2[1]): - yield [x, y], *np.rad2deg([normal, tangent]), lab - - return f1(), iter([]) - - def get_line(self, axes): - self.update_lim(axes) - k, v = dict(left=("lon_lines0", 0), - right=("lon_lines0", 1), - bottom=("lat_lines0", 0), - top=("lat_lines0", 1))[self._side] - xx, yy = self._grid_info[k][v] - return Path(np.column_stack([xx, yy])) - - -class ExtremeFinderFixed(ExtremeFinderSimple): - # docstring inherited - - def __init__(self, extremes): - """ - This subclass always returns the same bounding box. - - Parameters - ---------- - extremes : (float, float, float, float) - The bounding box that this helper always returns. - """ - self._extremes = extremes - - def __call__(self, transform_xy, x1, y1, x2, y2): - # docstring inherited - return self._extremes - - -class GridHelperCurveLinear(grid_helper_curvelinear.GridHelperCurveLinear): - - def __init__(self, aux_trans, extremes, - grid_locator1=None, - grid_locator2=None, - tick_formatter1=None, - tick_formatter2=None): - # docstring inherited - super().__init__(aux_trans, - extreme_finder=ExtremeFinderFixed(extremes), - grid_locator1=grid_locator1, - grid_locator2=grid_locator2, - tick_formatter1=tick_formatter1, - tick_formatter2=tick_formatter2) - - @_api.deprecated("3.8") - def get_data_boundary(self, side): - """ - Return v=0, nth=1. - """ - lon1, lon2, lat1, lat2 = self.grid_finder.extreme_finder(*[None] * 5) - return dict(left=(lon1, 0), - right=(lon2, 0), - bottom=(lat1, 1), - top=(lat2, 1))[side] - - def new_fixed_axis(self, loc, - nth_coord=None, - axis_direction=None, - offset=None, - axes=None): - if axes is None: - axes = self.axes - if axis_direction is None: - axis_direction = loc - # This is not the same as the FixedAxisArtistHelper class used by - # grid_helper_curvelinear.GridHelperCurveLinear.new_fixed_axis! - helper = FixedAxisArtistHelper( - self, loc, nth_coord_ticks=nth_coord) - axisline = AxisArtist(axes, helper, axis_direction=axis_direction) - # Perhaps should be moved to the base class? - axisline.line.set_clip_on(True) - axisline.line.set_clip_box(axisline.axes.bbox) - return axisline - - # new_floating_axis will inherit the grid_helper's extremes. - - # def new_floating_axis(self, nth_coord, - # value, - # axes=None, - # axis_direction="bottom" - # ): - - # axis = super(GridHelperCurveLinear, - # self).new_floating_axis(nth_coord, - # value, axes=axes, - # axis_direction=axis_direction) - - # # set extreme values of the axis helper - # if nth_coord == 1: - # axis.get_helper().set_extremes(*self._extremes[:2]) - # elif nth_coord == 0: - # axis.get_helper().set_extremes(*self._extremes[2:]) - - # return axis - - def _update_grid(self, x1, y1, x2, y2): - if self._grid_info is None: - self._grid_info = dict() - - grid_info = self._grid_info - - grid_finder = self.grid_finder - extremes = grid_finder.extreme_finder(grid_finder.inv_transform_xy, - x1, y1, x2, y2) - - lon_min, lon_max = sorted(extremes[:2]) - lat_min, lat_max = sorted(extremes[2:]) - grid_info["extremes"] = lon_min, lon_max, lat_min, lat_max # extremes - - lon_levs, lon_n, lon_factor = \ - grid_finder.grid_locator1(lon_min, lon_max) - lon_levs = np.asarray(lon_levs) - lat_levs, lat_n, lat_factor = \ - grid_finder.grid_locator2(lat_min, lat_max) - lat_levs = np.asarray(lat_levs) - - grid_info["lon_info"] = lon_levs, lon_n, lon_factor - grid_info["lat_info"] = lat_levs, lat_n, lat_factor - - grid_info["lon_labels"] = grid_finder.tick_formatter1( - "bottom", lon_factor, lon_levs) - grid_info["lat_labels"] = grid_finder.tick_formatter2( - "bottom", lat_factor, lat_levs) - - lon_values = lon_levs[:lon_n] / lon_factor - lat_values = lat_levs[:lat_n] / lat_factor - - lon_lines, lat_lines = grid_finder._get_raw_grid_lines( - lon_values[(lon_min < lon_values) & (lon_values < lon_max)], - lat_values[(lat_min < lat_values) & (lat_values < lat_max)], - lon_min, lon_max, lat_min, lat_max) - - grid_info["lon_lines"] = lon_lines - grid_info["lat_lines"] = lat_lines - - lon_lines, lat_lines = grid_finder._get_raw_grid_lines( - # lon_min, lon_max, lat_min, lat_max) - extremes[:2], extremes[2:], *extremes) - - grid_info["lon_lines0"] = lon_lines - grid_info["lat_lines0"] = lat_lines - - def get_gridlines(self, which="major", axis="both"): - grid_lines = [] - if axis in ["both", "x"]: - grid_lines.extend(self._grid_info["lon_lines"]) - if axis in ["both", "y"]: - grid_lines.extend(self._grid_info["lat_lines"]) - return grid_lines - - -class FloatingAxesBase: - - def __init__(self, *args, grid_helper, **kwargs): - _api.check_isinstance(GridHelperCurveLinear, grid_helper=grid_helper) - super().__init__(*args, grid_helper=grid_helper, **kwargs) - self.set_aspect(1.) - - def _gen_axes_patch(self): - # docstring inherited - x0, x1, y0, y1 = self.get_grid_helper().grid_finder.extreme_finder(*[None] * 5) - patch = mpatches.Polygon([(x0, y0), (x1, y0), (x1, y1), (x0, y1)]) - patch.get_path()._interpolation_steps = 100 - return patch - - def clear(self): - super().clear() - self.patch.set_transform( - self.get_grid_helper().grid_finder.get_transform() - + self.transData) - # The original patch is not in the draw tree; it is only used for - # clipping purposes. - orig_patch = super()._gen_axes_patch() - orig_patch.set_figure(self.figure) - orig_patch.set_transform(self.transAxes) - self.patch.set_clip_path(orig_patch) - self.gridlines.set_clip_path(orig_patch) - self.adjust_axes_lim() - - def adjust_axes_lim(self): - bbox = self.patch.get_path().get_extents( - # First transform to pixel coords, then to parent data coords. - self.patch.get_transform() - self.transData) - bbox = bbox.expanded(1.02, 1.02) - self.set_xlim(bbox.xmin, bbox.xmax) - self.set_ylim(bbox.ymin, bbox.ymax) - - -floatingaxes_class_factory = cbook._make_class_factory( - FloatingAxesBase, "Floating{}") -FloatingAxes = floatingaxes_class_factory( - host_axes_class_factory(axislines.Axes)) -FloatingSubplot = FloatingAxes diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimddp.c b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimddp.c deleted file mode 100644 index e7068ce02e19856349873f40d03caff438efb6fe..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/distutils/checks/cpu_asimddp.c +++ /dev/null @@ -1,16 +0,0 @@ -#ifdef _MSC_VER - #include -#endif -#include - -int main(int argc, char **argv) -{ - unsigned char *src = (unsigned char*)argv[argc-1]; - uint8x16_t v1 = vdupq_n_u8(src[0]), v2 = vdupq_n_u8(src[1]); - uint32x4_t va = vdupq_n_u32(3); - int ret = (int)vgetq_lane_u32(vdotq_u32(va, v1, v2), 0); -#ifdef __aarch64__ - ret += (int)vgetq_lane_u32(vdotq_laneq_u32(va, v1, v2, 0), 0); -#endif - return ret; -} diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/distutils/fcompiler/hpux.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/distutils/fcompiler/hpux.py deleted file mode 100644 index 09e6483bf5adb89fee267a153c82ef76ccb1e12a..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/distutils/fcompiler/hpux.py +++ /dev/null @@ -1,41 +0,0 @@ -from numpy.distutils.fcompiler import FCompiler - -compilers = ['HPUXFCompiler'] - -class HPUXFCompiler(FCompiler): - - compiler_type = 'hpux' - description = 'HP Fortran 90 Compiler' - version_pattern = r'HP F90 (?P[^\s*,]*)' - - executables = { - 'version_cmd' : ["f90", "+version"], - 'compiler_f77' : ["f90"], - 'compiler_fix' : ["f90"], - 'compiler_f90' : ["f90"], - 'linker_so' : ["ld", "-b"], - 'archiver' : ["ar", "-cr"], - 'ranlib' : ["ranlib"] - } - module_dir_switch = None #XXX: fix me - module_include_switch = None #XXX: fix me - pic_flags = ['+Z'] - def get_flags(self): - return self.pic_flags + ['+ppu', '+DD64'] - def get_flags_opt(self): - return ['-O3'] - def get_libraries(self): - return ['m'] - def get_library_dirs(self): - opt = ['/usr/lib/hpux64'] - return opt - def get_version(self, force=0, ok_status=[256, 0, 1]): - # XXX status==256 may indicate 'unrecognized option' or - # 'no input file'. So, version_cmd needs more work. - return FCompiler.get_version(self, force, ok_status) - -if __name__ == '__main__': - from distutils import log - log.set_verbosity(10) - from numpy.distutils import customized_fcompiler - print(customized_fcompiler(compiler='hpux').get_version()) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_interval.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_interval.py deleted file mode 100644 index aff4944e7bd55bed722c843130896970cd193135..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/interval/test_interval.py +++ /dev/null @@ -1,934 +0,0 @@ -from itertools import permutations -import re - -import numpy as np -import pytest - -import pandas as pd -from pandas import ( - Index, - Interval, - IntervalIndex, - Timedelta, - Timestamp, - date_range, - interval_range, - isna, - notna, - timedelta_range, -) -import pandas._testing as tm -import pandas.core.common as com - - -@pytest.fixture(params=[None, "foo"]) -def name(request): - return request.param - - -class TestIntervalIndex: - index = IntervalIndex.from_arrays([0, 1], [1, 2]) - - def create_index(self, closed="right"): - return IntervalIndex.from_breaks(range(11), closed=closed) - - def create_index_with_nan(self, closed="right"): - mask = [True, False] + [True] * 8 - return IntervalIndex.from_arrays( - np.where(mask, np.arange(10), np.nan), - np.where(mask, np.arange(1, 11), np.nan), - closed=closed, - ) - - def test_properties(self, closed): - index = self.create_index(closed=closed) - assert len(index) == 10 - assert index.size == 10 - assert index.shape == (10,) - - tm.assert_index_equal(index.left, Index(np.arange(10, dtype=np.int64))) - tm.assert_index_equal(index.right, Index(np.arange(1, 11, dtype=np.int64))) - tm.assert_index_equal(index.mid, Index(np.arange(0.5, 10.5, dtype=np.float64))) - - assert index.closed == closed - - ivs = [ - Interval(left, right, closed) - for left, right in zip(range(10), range(1, 11)) - ] - expected = np.array(ivs, dtype=object) - tm.assert_numpy_array_equal(np.asarray(index), expected) - - # with nans - index = self.create_index_with_nan(closed=closed) - assert len(index) == 10 - assert index.size == 10 - assert index.shape == (10,) - - expected_left = Index([0, np.nan, 2, 3, 4, 5, 6, 7, 8, 9]) - expected_right = expected_left + 1 - expected_mid = expected_left + 0.5 - tm.assert_index_equal(index.left, expected_left) - tm.assert_index_equal(index.right, expected_right) - tm.assert_index_equal(index.mid, expected_mid) - - assert index.closed == closed - - ivs = [ - Interval(left, right, closed) if notna(left) else np.nan - for left, right in zip(expected_left, expected_right) - ] - expected = np.array(ivs, dtype=object) - tm.assert_numpy_array_equal(np.asarray(index), expected) - - @pytest.mark.parametrize( - "breaks", - [ - [1, 1, 2, 5, 15, 53, 217, 1014, 5335, 31240, 201608], - [-np.inf, -100, -10, 0.5, 1, 1.5, 3.8, 101, 202, np.inf], - pd.to_datetime(["20170101", "20170202", "20170303", "20170404"]), - pd.to_timedelta(["1ns", "2ms", "3s", "4min", "5H", "6D"]), - ], - ) - def test_length(self, closed, breaks): - # GH 18789 - index = IntervalIndex.from_breaks(breaks, closed=closed) - result = index.length - expected = Index(iv.length for iv in index) - tm.assert_index_equal(result, expected) - - # with NA - index = index.insert(1, np.nan) - result = index.length - expected = Index(iv.length if notna(iv) else iv for iv in index) - tm.assert_index_equal(result, expected) - - def test_with_nans(self, closed): - index = self.create_index(closed=closed) - assert index.hasnans is False - - result = index.isna() - expected = np.zeros(len(index), dtype=bool) - tm.assert_numpy_array_equal(result, expected) - - result = index.notna() - expected = np.ones(len(index), dtype=bool) - tm.assert_numpy_array_equal(result, expected) - - index = self.create_index_with_nan(closed=closed) - assert index.hasnans is True - - result = index.isna() - expected = np.array([False, True] + [False] * (len(index) - 2)) - tm.assert_numpy_array_equal(result, expected) - - result = index.notna() - expected = np.array([True, False] + [True] * (len(index) - 2)) - tm.assert_numpy_array_equal(result, expected) - - def test_copy(self, closed): - expected = self.create_index(closed=closed) - - result = expected.copy() - assert result.equals(expected) - - result = expected.copy(deep=True) - assert result.equals(expected) - assert result.left is not expected.left - - def test_ensure_copied_data(self, closed): - # exercise the copy flag in the constructor - - # not copying - index = self.create_index(closed=closed) - result = IntervalIndex(index, copy=False) - tm.assert_numpy_array_equal( - index.left.values, result.left.values, check_same="same" - ) - tm.assert_numpy_array_equal( - index.right.values, result.right.values, check_same="same" - ) - - # by-definition make a copy - result = IntervalIndex(np.array(index), copy=False) - tm.assert_numpy_array_equal( - index.left.values, result.left.values, check_same="copy" - ) - tm.assert_numpy_array_equal( - index.right.values, result.right.values, check_same="copy" - ) - - def test_delete(self, closed): - breaks = np.arange(1, 11, dtype=np.int64) - expected = IntervalIndex.from_breaks(breaks, closed=closed) - result = self.create_index(closed=closed).delete(0) - tm.assert_index_equal(result, expected) - - @pytest.mark.parametrize( - "data", - [ - interval_range(0, periods=10, closed="neither"), - interval_range(1.7, periods=8, freq=2.5, closed="both"), - interval_range(Timestamp("20170101"), periods=12, closed="left"), - interval_range(Timedelta("1 day"), periods=6, closed="right"), - ], - ) - def test_insert(self, data): - item = data[0] - idx_item = IntervalIndex([item]) - - # start - expected = idx_item.append(data) - result = data.insert(0, item) - tm.assert_index_equal(result, expected) - - # end - expected = data.append(idx_item) - result = data.insert(len(data), item) - tm.assert_index_equal(result, expected) - - # mid - expected = data[:3].append(idx_item).append(data[3:]) - result = data.insert(3, item) - tm.assert_index_equal(result, expected) - - # invalid type - res = data.insert(1, "foo") - expected = data.astype(object).insert(1, "foo") - tm.assert_index_equal(res, expected) - - msg = "can only insert Interval objects and NA into an IntervalArray" - with pytest.raises(TypeError, match=msg): - data._data.insert(1, "foo") - - # invalid closed - msg = "'value.closed' is 'left', expected 'right'." - for closed in {"left", "right", "both", "neither"} - {item.closed}: - msg = f"'value.closed' is '{closed}', expected '{item.closed}'." - bad_item = Interval(item.left, item.right, closed=closed) - res = data.insert(1, bad_item) - expected = data.astype(object).insert(1, bad_item) - tm.assert_index_equal(res, expected) - with pytest.raises(ValueError, match=msg): - data._data.insert(1, bad_item) - - # GH 18295 (test missing) - na_idx = IntervalIndex([np.nan], closed=data.closed) - for na in [np.nan, None, pd.NA]: - expected = data[:1].append(na_idx).append(data[1:]) - result = data.insert(1, na) - tm.assert_index_equal(result, expected) - - if data.left.dtype.kind not in ["m", "M"]: - # trying to insert pd.NaT into a numeric-dtyped Index should cast - expected = data.astype(object).insert(1, pd.NaT) - - msg = "can only insert Interval objects and NA into an IntervalArray" - with pytest.raises(TypeError, match=msg): - data._data.insert(1, pd.NaT) - - result = data.insert(1, pd.NaT) - tm.assert_index_equal(result, expected) - - def test_is_unique_interval(self, closed): - """ - Interval specific tests for is_unique in addition to base class tests - """ - # unique overlapping - distinct endpoints - idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], closed=closed) - assert idx.is_unique is True - - # unique overlapping - shared endpoints - idx = IntervalIndex.from_tuples([(1, 2), (1, 3), (2, 3)], closed=closed) - assert idx.is_unique is True - - # unique nested - idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)], closed=closed) - assert idx.is_unique is True - - # unique NaN - idx = IntervalIndex.from_tuples([(np.nan, np.nan)], closed=closed) - assert idx.is_unique is True - - # non-unique NaN - idx = IntervalIndex.from_tuples( - [(np.nan, np.nan), (np.nan, np.nan)], closed=closed - ) - assert idx.is_unique is False - - def test_monotonic(self, closed): - # increasing non-overlapping - idx = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)], closed=closed) - assert idx.is_monotonic_increasing is True - assert idx._is_strictly_monotonic_increasing is True - assert idx.is_monotonic_decreasing is False - assert idx._is_strictly_monotonic_decreasing is False - - # decreasing non-overlapping - idx = IntervalIndex.from_tuples([(4, 5), (2, 3), (1, 2)], closed=closed) - assert idx.is_monotonic_increasing is False - assert idx._is_strictly_monotonic_increasing is False - assert idx.is_monotonic_decreasing is True - assert idx._is_strictly_monotonic_decreasing is True - - # unordered non-overlapping - idx = IntervalIndex.from_tuples([(0, 1), (4, 5), (2, 3)], closed=closed) - assert idx.is_monotonic_increasing is False - assert idx._is_strictly_monotonic_increasing is False - assert idx.is_monotonic_decreasing is False - assert idx._is_strictly_monotonic_decreasing is False - - # increasing overlapping - idx = IntervalIndex.from_tuples([(0, 2), (0.5, 2.5), (1, 3)], closed=closed) - assert idx.is_monotonic_increasing is True - assert idx._is_strictly_monotonic_increasing is True - assert idx.is_monotonic_decreasing is False - assert idx._is_strictly_monotonic_decreasing is False - - # decreasing overlapping - idx = IntervalIndex.from_tuples([(1, 3), (0.5, 2.5), (0, 2)], closed=closed) - assert idx.is_monotonic_increasing is False - assert idx._is_strictly_monotonic_increasing is False - assert idx.is_monotonic_decreasing is True - assert idx._is_strictly_monotonic_decreasing is True - - # unordered overlapping - idx = IntervalIndex.from_tuples([(0.5, 2.5), (0, 2), (1, 3)], closed=closed) - assert idx.is_monotonic_increasing is False - assert idx._is_strictly_monotonic_increasing is False - assert idx.is_monotonic_decreasing is False - assert idx._is_strictly_monotonic_decreasing is False - - # increasing overlapping shared endpoints - idx = IntervalIndex.from_tuples([(1, 2), (1, 3), (2, 3)], closed=closed) - assert idx.is_monotonic_increasing is True - assert idx._is_strictly_monotonic_increasing is True - assert idx.is_monotonic_decreasing is False - assert idx._is_strictly_monotonic_decreasing is False - - # decreasing overlapping shared endpoints - idx = IntervalIndex.from_tuples([(2, 3), (1, 3), (1, 2)], closed=closed) - assert idx.is_monotonic_increasing is False - assert idx._is_strictly_monotonic_increasing is False - assert idx.is_monotonic_decreasing is True - assert idx._is_strictly_monotonic_decreasing is True - - # stationary - idx = IntervalIndex.from_tuples([(0, 1), (0, 1)], closed=closed) - assert idx.is_monotonic_increasing is True - assert idx._is_strictly_monotonic_increasing is False - assert idx.is_monotonic_decreasing is True - assert idx._is_strictly_monotonic_decreasing is False - - # empty - idx = IntervalIndex([], closed=closed) - assert idx.is_monotonic_increasing is True - assert idx._is_strictly_monotonic_increasing is True - assert idx.is_monotonic_decreasing is True - assert idx._is_strictly_monotonic_decreasing is True - - def test_is_monotonic_with_nans(self): - # GH#41831 - index = IntervalIndex([np.nan, np.nan]) - - assert not index.is_monotonic_increasing - assert not index._is_strictly_monotonic_increasing - assert not index.is_monotonic_increasing - assert not index._is_strictly_monotonic_decreasing - assert not index.is_monotonic_decreasing - - def test_get_item(self, closed): - i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan), closed=closed) - assert i[0] == Interval(0.0, 1.0, closed=closed) - assert i[1] == Interval(1.0, 2.0, closed=closed) - assert isna(i[2]) - - result = i[0:1] - expected = IntervalIndex.from_arrays((0.0,), (1.0,), closed=closed) - tm.assert_index_equal(result, expected) - - result = i[0:2] - expected = IntervalIndex.from_arrays((0.0, 1), (1.0, 2.0), closed=closed) - tm.assert_index_equal(result, expected) - - result = i[1:3] - expected = IntervalIndex.from_arrays( - (1.0, np.nan), (2.0, np.nan), closed=closed - ) - tm.assert_index_equal(result, expected) - - @pytest.mark.parametrize( - "breaks", - [ - date_range("20180101", periods=4), - date_range("20180101", periods=4, tz="US/Eastern"), - timedelta_range("0 days", periods=4), - ], - ids=lambda x: str(x.dtype), - ) - def test_maybe_convert_i8(self, breaks): - # GH 20636 - index = IntervalIndex.from_breaks(breaks) - - # intervalindex - result = index._maybe_convert_i8(index) - expected = IntervalIndex.from_breaks(breaks.asi8) - tm.assert_index_equal(result, expected) - - # interval - interval = Interval(breaks[0], breaks[1]) - result = index._maybe_convert_i8(interval) - expected = Interval(breaks[0]._value, breaks[1]._value) - assert result == expected - - # datetimelike index - result = index._maybe_convert_i8(breaks) - expected = Index(breaks.asi8) - tm.assert_index_equal(result, expected) - - # datetimelike scalar - result = index._maybe_convert_i8(breaks[0]) - expected = breaks[0]._value - assert result == expected - - # list-like of datetimelike scalars - result = index._maybe_convert_i8(list(breaks)) - expected = Index(breaks.asi8) - tm.assert_index_equal(result, expected) - - @pytest.mark.parametrize( - "breaks", - [date_range("2018-01-01", periods=5), timedelta_range("0 days", periods=5)], - ) - def test_maybe_convert_i8_nat(self, breaks): - # GH 20636 - index = IntervalIndex.from_breaks(breaks) - - to_convert = breaks._constructor([pd.NaT] * 3) - expected = Index([np.nan] * 3, dtype=np.float64) - result = index._maybe_convert_i8(to_convert) - tm.assert_index_equal(result, expected) - - to_convert = to_convert.insert(0, breaks[0]) - expected = expected.insert(0, float(breaks[0]._value)) - result = index._maybe_convert_i8(to_convert) - tm.assert_index_equal(result, expected) - - @pytest.mark.parametrize( - "make_key", - [lambda breaks: breaks, list], - ids=["lambda", "list"], - ) - def test_maybe_convert_i8_numeric(self, make_key, any_real_numpy_dtype): - # GH 20636 - breaks = np.arange(5, dtype=any_real_numpy_dtype) - index = IntervalIndex.from_breaks(breaks) - key = make_key(breaks) - - result = index._maybe_convert_i8(key) - kind = breaks.dtype.kind - expected_dtype = {"i": np.int64, "u": np.uint64, "f": np.float64}[kind] - expected = Index(key, dtype=expected_dtype) - tm.assert_index_equal(result, expected) - - @pytest.mark.parametrize( - "make_key", - [ - IntervalIndex.from_breaks, - lambda breaks: Interval(breaks[0], breaks[1]), - lambda breaks: breaks[0], - ], - ids=["IntervalIndex", "Interval", "scalar"], - ) - def test_maybe_convert_i8_numeric_identical(self, make_key, any_real_numpy_dtype): - # GH 20636 - breaks = np.arange(5, dtype=any_real_numpy_dtype) - index = IntervalIndex.from_breaks(breaks) - key = make_key(breaks) - - # test if _maybe_convert_i8 won't change key if an Interval or IntervalIndex - result = index._maybe_convert_i8(key) - assert result is key - - @pytest.mark.parametrize( - "breaks1, breaks2", - permutations( - [ - date_range("20180101", periods=4), - date_range("20180101", periods=4, tz="US/Eastern"), - timedelta_range("0 days", periods=4), - ], - 2, - ), - ids=lambda x: str(x.dtype), - ) - @pytest.mark.parametrize( - "make_key", - [ - IntervalIndex.from_breaks, - lambda breaks: Interval(breaks[0], breaks[1]), - lambda breaks: breaks, - lambda breaks: breaks[0], - list, - ], - ids=["IntervalIndex", "Interval", "Index", "scalar", "list"], - ) - def test_maybe_convert_i8_errors(self, breaks1, breaks2, make_key): - # GH 20636 - index = IntervalIndex.from_breaks(breaks1) - key = make_key(breaks2) - - msg = ( - f"Cannot index an IntervalIndex of subtype {breaks1.dtype} with " - f"values of dtype {breaks2.dtype}" - ) - msg = re.escape(msg) - with pytest.raises(ValueError, match=msg): - index._maybe_convert_i8(key) - - def test_contains_method(self): - # can select values that are IN the range of a value - i = IntervalIndex.from_arrays([0, 1], [1, 2]) - - expected = np.array([False, False], dtype="bool") - actual = i.contains(0) - tm.assert_numpy_array_equal(actual, expected) - actual = i.contains(3) - tm.assert_numpy_array_equal(actual, expected) - - expected = np.array([True, False], dtype="bool") - actual = i.contains(0.5) - tm.assert_numpy_array_equal(actual, expected) - actual = i.contains(1) - tm.assert_numpy_array_equal(actual, expected) - - # __contains__ not implemented for "interval in interval", follow - # that for the contains method for now - with pytest.raises( - NotImplementedError, match="contains not implemented for two" - ): - i.contains(Interval(0, 1)) - - def test_dropna(self, closed): - expected = IntervalIndex.from_tuples([(0.0, 1.0), (1.0, 2.0)], closed=closed) - - ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan], closed=closed) - result = ii.dropna() - tm.assert_index_equal(result, expected) - - ii = IntervalIndex.from_arrays([0, 1, np.nan], [1, 2, np.nan], closed=closed) - result = ii.dropna() - tm.assert_index_equal(result, expected) - - def test_non_contiguous(self, closed): - index = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=closed) - target = [0.5, 1.5, 2.5] - actual = index.get_indexer(target) - expected = np.array([0, -1, 1], dtype="intp") - tm.assert_numpy_array_equal(actual, expected) - - assert 1.5 not in index - - def test_isin(self, closed): - index = self.create_index(closed=closed) - - expected = np.array([True] + [False] * (len(index) - 1)) - result = index.isin(index[:1]) - tm.assert_numpy_array_equal(result, expected) - - result = index.isin([index[0]]) - tm.assert_numpy_array_equal(result, expected) - - other = IntervalIndex.from_breaks(np.arange(-2, 10), closed=closed) - expected = np.array([True] * (len(index) - 1) + [False]) - result = index.isin(other) - tm.assert_numpy_array_equal(result, expected) - - result = index.isin(other.tolist()) - tm.assert_numpy_array_equal(result, expected) - - for other_closed in ["right", "left", "both", "neither"]: - other = self.create_index(closed=other_closed) - expected = np.repeat(closed == other_closed, len(index)) - result = index.isin(other) - tm.assert_numpy_array_equal(result, expected) - - result = index.isin(other.tolist()) - tm.assert_numpy_array_equal(result, expected) - - def test_comparison(self): - actual = Interval(0, 1) < self.index - expected = np.array([False, True]) - tm.assert_numpy_array_equal(actual, expected) - - actual = Interval(0.5, 1.5) < self.index - expected = np.array([False, True]) - tm.assert_numpy_array_equal(actual, expected) - actual = self.index > Interval(0.5, 1.5) - tm.assert_numpy_array_equal(actual, expected) - - actual = self.index == self.index - expected = np.array([True, True]) - tm.assert_numpy_array_equal(actual, expected) - actual = self.index <= self.index - tm.assert_numpy_array_equal(actual, expected) - actual = self.index >= self.index - tm.assert_numpy_array_equal(actual, expected) - - actual = self.index < self.index - expected = np.array([False, False]) - tm.assert_numpy_array_equal(actual, expected) - actual = self.index > self.index - tm.assert_numpy_array_equal(actual, expected) - - actual = self.index == IntervalIndex.from_breaks([0, 1, 2], "left") - tm.assert_numpy_array_equal(actual, expected) - - actual = self.index == self.index.values - tm.assert_numpy_array_equal(actual, np.array([True, True])) - actual = self.index.values == self.index - tm.assert_numpy_array_equal(actual, np.array([True, True])) - actual = self.index <= self.index.values - tm.assert_numpy_array_equal(actual, np.array([True, True])) - actual = self.index != self.index.values - tm.assert_numpy_array_equal(actual, np.array([False, False])) - actual = self.index > self.index.values - tm.assert_numpy_array_equal(actual, np.array([False, False])) - actual = self.index.values > self.index - tm.assert_numpy_array_equal(actual, np.array([False, False])) - - # invalid comparisons - actual = self.index == 0 - tm.assert_numpy_array_equal(actual, np.array([False, False])) - actual = self.index == self.index.left - tm.assert_numpy_array_equal(actual, np.array([False, False])) - - msg = "|".join( - [ - "not supported between instances of 'int' and '.*.Interval'", - r"Invalid comparison between dtype=interval\[int64, right\] and ", - ] - ) - with pytest.raises(TypeError, match=msg): - self.index > 0 - with pytest.raises(TypeError, match=msg): - self.index <= 0 - with pytest.raises(TypeError, match=msg): - self.index > np.arange(2) - - msg = "Lengths must match to compare" - with pytest.raises(ValueError, match=msg): - self.index > np.arange(3) - - def test_missing_values(self, closed): - idx = Index( - [np.nan, Interval(0, 1, closed=closed), Interval(1, 2, closed=closed)] - ) - idx2 = IntervalIndex.from_arrays([np.nan, 0, 1], [np.nan, 1, 2], closed=closed) - assert idx.equals(idx2) - - msg = ( - "missing values must be missing in the same location both left " - "and right sides" - ) - with pytest.raises(ValueError, match=msg): - IntervalIndex.from_arrays( - [np.nan, 0, 1], np.array([0, 1, 2]), closed=closed - ) - - tm.assert_numpy_array_equal(isna(idx), np.array([True, False, False])) - - def test_sort_values(self, closed): - index = self.create_index(closed=closed) - - result = index.sort_values() - tm.assert_index_equal(result, index) - - result = index.sort_values(ascending=False) - tm.assert_index_equal(result, index[::-1]) - - # with nan - index = IntervalIndex([Interval(1, 2), np.nan, Interval(0, 1)]) - - result = index.sort_values() - expected = IntervalIndex([Interval(0, 1), Interval(1, 2), np.nan]) - tm.assert_index_equal(result, expected) - - result = index.sort_values(ascending=False, na_position="first") - expected = IntervalIndex([np.nan, Interval(1, 2), Interval(0, 1)]) - tm.assert_index_equal(result, expected) - - @pytest.mark.parametrize("tz", [None, "US/Eastern"]) - def test_datetime(self, tz): - start = Timestamp("2000-01-01", tz=tz) - dates = date_range(start=start, periods=10) - index = IntervalIndex.from_breaks(dates) - - # test mid - start = Timestamp("2000-01-01T12:00", tz=tz) - expected = date_range(start=start, periods=9) - tm.assert_index_equal(index.mid, expected) - - # __contains__ doesn't check individual points - assert Timestamp("2000-01-01", tz=tz) not in index - assert Timestamp("2000-01-01T12", tz=tz) not in index - assert Timestamp("2000-01-02", tz=tz) not in index - iv_true = Interval( - Timestamp("2000-01-02", tz=tz), Timestamp("2000-01-03", tz=tz) - ) - iv_false = Interval( - Timestamp("1999-12-31", tz=tz), Timestamp("2000-01-01", tz=tz) - ) - assert iv_true in index - assert iv_false not in index - - # .contains does check individual points - assert not index.contains(Timestamp("2000-01-01", tz=tz)).any() - assert index.contains(Timestamp("2000-01-01T12", tz=tz)).any() - assert index.contains(Timestamp("2000-01-02", tz=tz)).any() - - # test get_indexer - start = Timestamp("1999-12-31T12:00", tz=tz) - target = date_range(start=start, periods=7, freq="12H") - actual = index.get_indexer(target) - expected = np.array([-1, -1, 0, 0, 1, 1, 2], dtype="intp") - tm.assert_numpy_array_equal(actual, expected) - - start = Timestamp("2000-01-08T18:00", tz=tz) - target = date_range(start=start, periods=7, freq="6H") - actual = index.get_indexer(target) - expected = np.array([7, 7, 8, 8, 8, 8, -1], dtype="intp") - tm.assert_numpy_array_equal(actual, expected) - - def test_append(self, closed): - index1 = IntervalIndex.from_arrays([0, 1], [1, 2], closed=closed) - index2 = IntervalIndex.from_arrays([1, 2], [2, 3], closed=closed) - - result = index1.append(index2) - expected = IntervalIndex.from_arrays([0, 1, 1, 2], [1, 2, 2, 3], closed=closed) - tm.assert_index_equal(result, expected) - - result = index1.append([index1, index2]) - expected = IntervalIndex.from_arrays( - [0, 1, 0, 1, 1, 2], [1, 2, 1, 2, 2, 3], closed=closed - ) - tm.assert_index_equal(result, expected) - - for other_closed in {"left", "right", "both", "neither"} - {closed}: - index_other_closed = IntervalIndex.from_arrays( - [0, 1], [1, 2], closed=other_closed - ) - result = index1.append(index_other_closed) - expected = index1.astype(object).append(index_other_closed.astype(object)) - tm.assert_index_equal(result, expected) - - def test_is_non_overlapping_monotonic(self, closed): - # Should be True in all cases - tpls = [(0, 1), (2, 3), (4, 5), (6, 7)] - idx = IntervalIndex.from_tuples(tpls, closed=closed) - assert idx.is_non_overlapping_monotonic is True - - idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed) - assert idx.is_non_overlapping_monotonic is True - - # Should be False in all cases (overlapping) - tpls = [(0, 2), (1, 3), (4, 5), (6, 7)] - idx = IntervalIndex.from_tuples(tpls, closed=closed) - assert idx.is_non_overlapping_monotonic is False - - idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed) - assert idx.is_non_overlapping_monotonic is False - - # Should be False in all cases (non-monotonic) - tpls = [(0, 1), (2, 3), (6, 7), (4, 5)] - idx = IntervalIndex.from_tuples(tpls, closed=closed) - assert idx.is_non_overlapping_monotonic is False - - idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed) - assert idx.is_non_overlapping_monotonic is False - - # Should be False for closed='both', otherwise True (GH16560) - if closed == "both": - idx = IntervalIndex.from_breaks(range(4), closed=closed) - assert idx.is_non_overlapping_monotonic is False - else: - idx = IntervalIndex.from_breaks(range(4), closed=closed) - assert idx.is_non_overlapping_monotonic is True - - @pytest.mark.parametrize( - "start, shift, na_value", - [ - (0, 1, np.nan), - (Timestamp("2018-01-01"), Timedelta("1 day"), pd.NaT), - (Timedelta("0 days"), Timedelta("1 day"), pd.NaT), - ], - ) - def test_is_overlapping(self, start, shift, na_value, closed): - # GH 23309 - # see test_interval_tree.py for extensive tests; interface tests here - - # non-overlapping - tuples = [(start + n * shift, start + (n + 1) * shift) for n in (0, 2, 4)] - index = IntervalIndex.from_tuples(tuples, closed=closed) - assert index.is_overlapping is False - - # non-overlapping with NA - tuples = [(na_value, na_value)] + tuples + [(na_value, na_value)] - index = IntervalIndex.from_tuples(tuples, closed=closed) - assert index.is_overlapping is False - - # overlapping - tuples = [(start + n * shift, start + (n + 2) * shift) for n in range(3)] - index = IntervalIndex.from_tuples(tuples, closed=closed) - assert index.is_overlapping is True - - # overlapping with NA - tuples = [(na_value, na_value)] + tuples + [(na_value, na_value)] - index = IntervalIndex.from_tuples(tuples, closed=closed) - assert index.is_overlapping is True - - # common endpoints - tuples = [(start + n * shift, start + (n + 1) * shift) for n in range(3)] - index = IntervalIndex.from_tuples(tuples, closed=closed) - result = index.is_overlapping - expected = closed == "both" - assert result is expected - - # common endpoints with NA - tuples = [(na_value, na_value)] + tuples + [(na_value, na_value)] - index = IntervalIndex.from_tuples(tuples, closed=closed) - result = index.is_overlapping - assert result is expected - - # intervals with duplicate left values - a = [10, 15, 20, 25, 30, 35, 40, 45, 45, 50, 55, 60, 65, 70, 75, 80, 85] - b = [15, 20, 25, 30, 35, 40, 45, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90] - index = IntervalIndex.from_arrays(a, b, closed="right") - result = index.is_overlapping - assert result is False - - @pytest.mark.parametrize( - "tuples", - [ - list(zip(range(10), range(1, 11))), - list( - zip( - date_range("20170101", periods=10), - date_range("20170101", periods=10), - ) - ), - list( - zip( - timedelta_range("0 days", periods=10), - timedelta_range("1 day", periods=10), - ) - ), - ], - ) - def test_to_tuples(self, tuples): - # GH 18756 - idx = IntervalIndex.from_tuples(tuples) - result = idx.to_tuples() - expected = Index(com.asarray_tuplesafe(tuples)) - tm.assert_index_equal(result, expected) - - @pytest.mark.parametrize( - "tuples", - [ - list(zip(range(10), range(1, 11))) + [np.nan], - list( - zip( - date_range("20170101", periods=10), - date_range("20170101", periods=10), - ) - ) - + [np.nan], - list( - zip( - timedelta_range("0 days", periods=10), - timedelta_range("1 day", periods=10), - ) - ) - + [np.nan], - ], - ) - @pytest.mark.parametrize("na_tuple", [True, False]) - def test_to_tuples_na(self, tuples, na_tuple): - # GH 18756 - idx = IntervalIndex.from_tuples(tuples) - result = idx.to_tuples(na_tuple=na_tuple) - - # check the non-NA portion - expected_notna = Index(com.asarray_tuplesafe(tuples[:-1])) - result_notna = result[:-1] - tm.assert_index_equal(result_notna, expected_notna) - - # check the NA portion - result_na = result[-1] - if na_tuple: - assert isinstance(result_na, tuple) - assert len(result_na) == 2 - assert all(isna(x) for x in result_na) - else: - assert isna(result_na) - - def test_nbytes(self): - # GH 19209 - left = np.arange(0, 4, dtype="i8") - right = np.arange(1, 5, dtype="i8") - - result = IntervalIndex.from_arrays(left, right).nbytes - expected = 64 # 4 * 8 * 2 - assert result == expected - - @pytest.mark.parametrize("new_closed", ["left", "right", "both", "neither"]) - def test_set_closed(self, name, closed, new_closed): - # GH 21670 - index = interval_range(0, 5, closed=closed, name=name) - result = index.set_closed(new_closed) - expected = interval_range(0, 5, closed=new_closed, name=name) - tm.assert_index_equal(result, expected) - - @pytest.mark.parametrize("bad_closed", ["foo", 10, "LEFT", True, False]) - def test_set_closed_errors(self, bad_closed): - # GH 21670 - index = interval_range(0, 5) - msg = f"invalid option for 'closed': {bad_closed}" - with pytest.raises(ValueError, match=msg): - index.set_closed(bad_closed) - - def test_is_all_dates(self): - # GH 23576 - year_2017 = Interval( - Timestamp("2017-01-01 00:00:00"), Timestamp("2018-01-01 00:00:00") - ) - year_2017_index = IntervalIndex([year_2017]) - assert not year_2017_index._is_all_dates - - -def test_dir(): - # GH#27571 dir(interval_index) should not raise - index = IntervalIndex.from_arrays([0, 1], [1, 2]) - result = dir(index) - assert "str" not in result - - -def test_searchsorted_different_argument_classes(listlike_box): - # https://github.com/pandas-dev/pandas/issues/32762 - values = IntervalIndex([Interval(0, 1), Interval(1, 2)]) - result = values.searchsorted(listlike_box(values)) - expected = np.array([0, 1], dtype=result.dtype) - tm.assert_numpy_array_equal(result, expected) - - result = values._data.searchsorted(listlike_box(values)) - tm.assert_numpy_array_equal(result, expected) - - -@pytest.mark.parametrize( - "arg", [[1, 2], ["a", "b"], [Timestamp("2020-01-01", tz="Europe/London")] * 2] -) -def test_searchsorted_invalid_argument(arg): - values = IntervalIndex([Interval(0, 1), Interval(1, 2)]) - msg = "'<' not supported between instances of 'pandas._libs.interval.Interval' and " - with pytest.raises(TypeError, match=msg): - values.searchsorted(arg) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_pickle.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_pickle.py deleted file mode 100644 index 1d8b72140442159fa0b8c608022d167bddd95db4..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/multi/test_pickle.py +++ /dev/null @@ -1,10 +0,0 @@ -import pytest - -from pandas import MultiIndex - - -def test_pickle_compat_construction(): - # this is testing for pickle compat - # need an object to create with - with pytest.raises(TypeError, match="Must pass both levels and codes"): - MultiIndex() diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/ranges/test_indexing.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/ranges/test_indexing.py deleted file mode 100644 index 6202074a11d7883c6f6aa984c23d7964e9042eb0..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/ranges/test_indexing.py +++ /dev/null @@ -1,137 +0,0 @@ -import numpy as np -import pytest - -from pandas import ( - Index, - RangeIndex, -) -import pandas._testing as tm - - -class TestGetIndexer: - def test_get_indexer(self): - index = RangeIndex(start=0, stop=20, step=2) - target = RangeIndex(10) - indexer = index.get_indexer(target) - expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1], dtype=np.intp) - tm.assert_numpy_array_equal(indexer, expected) - - def test_get_indexer_pad(self): - index = RangeIndex(start=0, stop=20, step=2) - target = RangeIndex(10) - indexer = index.get_indexer(target, method="pad") - expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4], dtype=np.intp) - tm.assert_numpy_array_equal(indexer, expected) - - def test_get_indexer_backfill(self): - index = RangeIndex(start=0, stop=20, step=2) - target = RangeIndex(10) - indexer = index.get_indexer(target, method="backfill") - expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5], dtype=np.intp) - tm.assert_numpy_array_equal(indexer, expected) - - def test_get_indexer_limit(self): - # GH#28631 - idx = RangeIndex(4) - target = RangeIndex(6) - result = idx.get_indexer(target, method="pad", limit=1) - expected = np.array([0, 1, 2, 3, 3, -1], dtype=np.intp) - tm.assert_numpy_array_equal(result, expected) - - @pytest.mark.parametrize("stop", [0, -1, -2]) - def test_get_indexer_decreasing(self, stop): - # GH#28678 - index = RangeIndex(7, stop, -3) - result = index.get_indexer(range(9)) - expected = np.array([-1, 2, -1, -1, 1, -1, -1, 0, -1], dtype=np.intp) - tm.assert_numpy_array_equal(result, expected) - - -class TestTake: - def test_take_preserve_name(self): - index = RangeIndex(1, 5, name="foo") - taken = index.take([3, 0, 1]) - assert index.name == taken.name - - def test_take_fill_value(self): - # GH#12631 - idx = RangeIndex(1, 4, name="xxx") - result = idx.take(np.array([1, 0, -1])) - expected = Index([2, 1, 3], dtype=np.int64, name="xxx") - tm.assert_index_equal(result, expected) - - # fill_value - msg = "Unable to fill values because RangeIndex cannot contain NA" - with pytest.raises(ValueError, match=msg): - idx.take(np.array([1, 0, -1]), fill_value=True) - - # allow_fill=False - result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) - expected = Index([2, 1, 3], dtype=np.int64, name="xxx") - tm.assert_index_equal(result, expected) - - msg = "Unable to fill values because RangeIndex cannot contain NA" - with pytest.raises(ValueError, match=msg): - idx.take(np.array([1, 0, -2]), fill_value=True) - with pytest.raises(ValueError, match=msg): - idx.take(np.array([1, 0, -5]), fill_value=True) - - def test_take_raises_index_error(self): - idx = RangeIndex(1, 4, name="xxx") - - msg = "index -5 is out of bounds for (axis 0 with )?size 3" - with pytest.raises(IndexError, match=msg): - idx.take(np.array([1, -5])) - - msg = "index -4 is out of bounds for (axis 0 with )?size 3" - with pytest.raises(IndexError, match=msg): - idx.take(np.array([1, -4])) - - # no errors - result = idx.take(np.array([1, -3])) - expected = Index([2, 1], dtype=np.int64, name="xxx") - tm.assert_index_equal(result, expected) - - def test_take_accepts_empty_array(self): - idx = RangeIndex(1, 4, name="foo") - result = idx.take(np.array([])) - expected = Index([], dtype=np.int64, name="foo") - tm.assert_index_equal(result, expected) - - # empty index - idx = RangeIndex(0, name="foo") - result = idx.take(np.array([])) - expected = Index([], dtype=np.int64, name="foo") - tm.assert_index_equal(result, expected) - - def test_take_accepts_non_int64_array(self): - idx = RangeIndex(1, 4, name="foo") - result = idx.take(np.array([2, 1], dtype=np.uint32)) - expected = Index([3, 2], dtype=np.int64, name="foo") - tm.assert_index_equal(result, expected) - - def test_take_when_index_has_step(self): - idx = RangeIndex(1, 11, 3, name="foo") # [1, 4, 7, 10] - result = idx.take(np.array([1, 0, -1, -4])) - expected = Index([4, 1, 10, 1], dtype=np.int64, name="foo") - tm.assert_index_equal(result, expected) - - def test_take_when_index_has_negative_step(self): - idx = RangeIndex(11, -4, -2, name="foo") # [11, 9, 7, 5, 3, 1, -1, -3] - result = idx.take(np.array([1, 0, -1, -8])) - expected = Index([9, 11, -3, 11], dtype=np.int64, name="foo") - tm.assert_index_equal(result, expected) - - -class TestWhere: - def test_where_putmask_range_cast(self): - # GH#43240 - idx = RangeIndex(0, 5, name="test") - - mask = np.array([True, True, False, False, False]) - result = idx.putmask(mask, 10) - expected = Index([10, 10, 2, 3, 4], dtype=np.int64, name="test") - tm.assert_index_equal(result, expected) - - result = idx.where(~mask, 10) - tm.assert_index_equal(result, expected) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/strings/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/strings/__init__.py deleted file mode 100644 index 01b49b5e5b63323b065ec11fc34f6c247a7b0350..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/strings/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -import numpy as np - -import pandas as pd - -object_pyarrow_numpy = ("object", "string[pyarrow_numpy]") - - -def _convert_na_value(ser, expected): - if ser.dtype != object: - if ser.dtype.storage == "pyarrow_numpy": - expected = expected.fillna(np.nan) - else: - # GH#18463 - expected = expected.fillna(pd.NA) - return expected diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/rich/style.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/rich/style.py deleted file mode 100644 index 0787c33147b484432bcb4dc5523ab9c8d73bc197..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pip/_vendor/rich/style.py +++ /dev/null @@ -1,785 +0,0 @@ -import sys -from functools import lru_cache -from marshal import loads, dumps -from random import randint -from typing import Any, cast, Dict, Iterable, List, Optional, Type, Union - -from . import errors -from .color import Color, ColorParseError, ColorSystem, blend_rgb -from .repr import rich_repr, Result -from .terminal_theme import DEFAULT_TERMINAL_THEME, TerminalTheme - - -# Style instances and style definitions are often interchangeable -StyleType = Union[str, "Style"] - - -class _Bit: - """A descriptor to get/set a style attribute bit.""" - - __slots__ = ["bit"] - - def __init__(self, bit_no: int) -> None: - self.bit = 1 << bit_no - - def __get__(self, obj: "Style", objtype: Type["Style"]) -> Optional[bool]: - if obj._set_attributes & self.bit: - return obj._attributes & self.bit != 0 - return None - - -@rich_repr -class Style: - """A terminal style. - - A terminal style consists of a color (`color`), a background color (`bgcolor`), and a number of attributes, such - as bold, italic etc. The attributes have 3 states: they can either be on - (``True``), off (``False``), or not set (``None``). - - Args: - color (Union[Color, str], optional): Color of terminal text. Defaults to None. - bgcolor (Union[Color, str], optional): Color of terminal background. Defaults to None. - bold (bool, optional): Enable bold text. Defaults to None. - dim (bool, optional): Enable dim text. Defaults to None. - italic (bool, optional): Enable italic text. Defaults to None. - underline (bool, optional): Enable underlined text. Defaults to None. - blink (bool, optional): Enabled blinking text. Defaults to None. - blink2 (bool, optional): Enable fast blinking text. Defaults to None. - reverse (bool, optional): Enabled reverse text. Defaults to None. - conceal (bool, optional): Enable concealed text. Defaults to None. - strike (bool, optional): Enable strikethrough text. Defaults to None. - underline2 (bool, optional): Enable doubly underlined text. Defaults to None. - frame (bool, optional): Enable framed text. Defaults to None. - encircle (bool, optional): Enable encircled text. Defaults to None. - overline (bool, optional): Enable overlined text. Defaults to None. - link (str, link): Link URL. Defaults to None. - - """ - - _color: Optional[Color] - _bgcolor: Optional[Color] - _attributes: int - _set_attributes: int - _hash: int - _null: bool - _meta: Optional[bytes] - - __slots__ = [ - "_color", - "_bgcolor", - "_attributes", - "_set_attributes", - "_link", - "_link_id", - "_ansi", - "_style_definition", - "_hash", - "_null", - "_meta", - ] - - # maps bits on to SGR parameter - _style_map = { - 0: "1", - 1: "2", - 2: "3", - 3: "4", - 4: "5", - 5: "6", - 6: "7", - 7: "8", - 8: "9", - 9: "21", - 10: "51", - 11: "52", - 12: "53", - } - - STYLE_ATTRIBUTES = { - "dim": "dim", - "d": "dim", - "bold": "bold", - "b": "bold", - "italic": "italic", - "i": "italic", - "underline": "underline", - "u": "underline", - "blink": "blink", - "blink2": "blink2", - "reverse": "reverse", - "r": "reverse", - "conceal": "conceal", - "c": "conceal", - "strike": "strike", - "s": "strike", - "underline2": "underline2", - "uu": "underline2", - "frame": "frame", - "encircle": "encircle", - "overline": "overline", - "o": "overline", - } - - def __init__( - self, - *, - color: Optional[Union[Color, str]] = None, - bgcolor: Optional[Union[Color, str]] = None, - bold: Optional[bool] = None, - dim: Optional[bool] = None, - italic: Optional[bool] = None, - underline: Optional[bool] = None, - blink: Optional[bool] = None, - blink2: Optional[bool] = None, - reverse: Optional[bool] = None, - conceal: Optional[bool] = None, - strike: Optional[bool] = None, - underline2: Optional[bool] = None, - frame: Optional[bool] = None, - encircle: Optional[bool] = None, - overline: Optional[bool] = None, - link: Optional[str] = None, - meta: Optional[Dict[str, Any]] = None, - ): - self._ansi: Optional[str] = None - self._style_definition: Optional[str] = None - - def _make_color(color: Union[Color, str]) -> Color: - return color if isinstance(color, Color) else Color.parse(color) - - self._color = None if color is None else _make_color(color) - self._bgcolor = None if bgcolor is None else _make_color(bgcolor) - self._set_attributes = sum( - ( - bold is not None, - dim is not None and 2, - italic is not None and 4, - underline is not None and 8, - blink is not None and 16, - blink2 is not None and 32, - reverse is not None and 64, - conceal is not None and 128, - strike is not None and 256, - underline2 is not None and 512, - frame is not None and 1024, - encircle is not None and 2048, - overline is not None and 4096, - ) - ) - self._attributes = ( - sum( - ( - bold and 1 or 0, - dim and 2 or 0, - italic and 4 or 0, - underline and 8 or 0, - blink and 16 or 0, - blink2 and 32 or 0, - reverse and 64 or 0, - conceal and 128 or 0, - strike and 256 or 0, - underline2 and 512 or 0, - frame and 1024 or 0, - encircle and 2048 or 0, - overline and 4096 or 0, - ) - ) - if self._set_attributes - else 0 - ) - - self._link = link - self._link_id = f"{randint(0, 999999)}" if link else "" - self._meta = None if meta is None else dumps(meta) - self._hash = hash( - ( - self._color, - self._bgcolor, - self._attributes, - self._set_attributes, - link, - self._meta, - ) - ) - self._null = not (self._set_attributes or color or bgcolor or link or meta) - - @classmethod - def null(cls) -> "Style": - """Create an 'null' style, equivalent to Style(), but more performant.""" - return NULL_STYLE - - @classmethod - def from_color( - cls, color: Optional[Color] = None, bgcolor: Optional[Color] = None - ) -> "Style": - """Create a new style with colors and no attributes. - - Returns: - color (Optional[Color]): A (foreground) color, or None for no color. Defaults to None. - bgcolor (Optional[Color]): A (background) color, or None for no color. Defaults to None. - """ - style: Style = cls.__new__(Style) - style._ansi = None - style._style_definition = None - style._color = color - style._bgcolor = bgcolor - style._set_attributes = 0 - style._attributes = 0 - style._link = None - style._link_id = "" - style._meta = None - style._hash = hash( - ( - color, - bgcolor, - None, - None, - None, - None, - ) - ) - style._null = not (color or bgcolor) - return style - - @classmethod - def from_meta(cls, meta: Optional[Dict[str, Any]]) -> "Style": - """Create a new style with meta data. - - Returns: - meta (Optional[Dict[str, Any]]): A dictionary of meta data. Defaults to None. - """ - style: Style = cls.__new__(Style) - style._ansi = None - style._style_definition = None - style._color = None - style._bgcolor = None - style._set_attributes = 0 - style._attributes = 0 - style._link = None - style._link_id = "" - style._meta = dumps(meta) - style._hash = hash( - ( - None, - None, - None, - None, - None, - style._meta, - ) - ) - style._null = not (meta) - return style - - @classmethod - def on(cls, meta: Optional[Dict[str, Any]] = None, **handlers: Any) -> "Style": - """Create a blank style with meta information. - - Example: - style = Style.on(click=self.on_click) - - Args: - meta (Optiona[Dict[str, Any]], optional): An optional dict of meta information. - **handlers (Any): Keyword arguments are translated in to handlers. - - Returns: - Style: A Style with meta information attached. - """ - meta = {} if meta is None else meta - meta.update({f"@{key}": value for key, value in handlers.items()}) - return cls.from_meta(meta) - - bold = _Bit(0) - dim = _Bit(1) - italic = _Bit(2) - underline = _Bit(3) - blink = _Bit(4) - blink2 = _Bit(5) - reverse = _Bit(6) - conceal = _Bit(7) - strike = _Bit(8) - underline2 = _Bit(9) - frame = _Bit(10) - encircle = _Bit(11) - overline = _Bit(12) - - @property - def link_id(self) -> str: - """Get a link id, used in ansi code for links.""" - return self._link_id - - def __str__(self) -> str: - """Re-generate style definition from attributes.""" - if self._style_definition is None: - attributes: List[str] = [] - append = attributes.append - bits = self._set_attributes - if bits & 0b0000000001111: - if bits & 1: - append("bold" if self.bold else "not bold") - if bits & (1 << 1): - append("dim" if self.dim else "not dim") - if bits & (1 << 2): - append("italic" if self.italic else "not italic") - if bits & (1 << 3): - append("underline" if self.underline else "not underline") - if bits & 0b0000111110000: - if bits & (1 << 4): - append("blink" if self.blink else "not blink") - if bits & (1 << 5): - append("blink2" if self.blink2 else "not blink2") - if bits & (1 << 6): - append("reverse" if self.reverse else "not reverse") - if bits & (1 << 7): - append("conceal" if self.conceal else "not conceal") - if bits & (1 << 8): - append("strike" if self.strike else "not strike") - if bits & 0b1111000000000: - if bits & (1 << 9): - append("underline2" if self.underline2 else "not underline2") - if bits & (1 << 10): - append("frame" if self.frame else "not frame") - if bits & (1 << 11): - append("encircle" if self.encircle else "not encircle") - if bits & (1 << 12): - append("overline" if self.overline else "not overline") - if self._color is not None: - append(self._color.name) - if self._bgcolor is not None: - append("on") - append(self._bgcolor.name) - if self._link: - append("link") - append(self._link) - self._style_definition = " ".join(attributes) or "none" - return self._style_definition - - def __bool__(self) -> bool: - """A Style is false if it has no attributes, colors, or links.""" - return not self._null - - def _make_ansi_codes(self, color_system: ColorSystem) -> str: - """Generate ANSI codes for this style. - - Args: - color_system (ColorSystem): Color system. - - Returns: - str: String containing codes. - """ - if self._ansi is None: - sgr: List[str] = [] - append = sgr.append - _style_map = self._style_map - attributes = self._attributes & self._set_attributes - if attributes: - if attributes & 1: - append(_style_map[0]) - if attributes & 2: - append(_style_map[1]) - if attributes & 4: - append(_style_map[2]) - if attributes & 8: - append(_style_map[3]) - if attributes & 0b0000111110000: - for bit in range(4, 9): - if attributes & (1 << bit): - append(_style_map[bit]) - if attributes & 0b1111000000000: - for bit in range(9, 13): - if attributes & (1 << bit): - append(_style_map[bit]) - if self._color is not None: - sgr.extend(self._color.downgrade(color_system).get_ansi_codes()) - if self._bgcolor is not None: - sgr.extend( - self._bgcolor.downgrade(color_system).get_ansi_codes( - foreground=False - ) - ) - self._ansi = ";".join(sgr) - return self._ansi - - @classmethod - @lru_cache(maxsize=1024) - def normalize(cls, style: str) -> str: - """Normalize a style definition so that styles with the same effect have the same string - representation. - - Args: - style (str): A style definition. - - Returns: - str: Normal form of style definition. - """ - try: - return str(cls.parse(style)) - except errors.StyleSyntaxError: - return style.strip().lower() - - @classmethod - def pick_first(cls, *values: Optional[StyleType]) -> StyleType: - """Pick first non-None style.""" - for value in values: - if value is not None: - return value - raise ValueError("expected at least one non-None style") - - def __rich_repr__(self) -> Result: - yield "color", self.color, None - yield "bgcolor", self.bgcolor, None - yield "bold", self.bold, None, - yield "dim", self.dim, None, - yield "italic", self.italic, None - yield "underline", self.underline, None, - yield "blink", self.blink, None - yield "blink2", self.blink2, None - yield "reverse", self.reverse, None - yield "conceal", self.conceal, None - yield "strike", self.strike, None - yield "underline2", self.underline2, None - yield "frame", self.frame, None - yield "encircle", self.encircle, None - yield "link", self.link, None - if self._meta: - yield "meta", self.meta - - def __eq__(self, other: Any) -> bool: - if not isinstance(other, Style): - return NotImplemented - return ( - self._color == other._color - and self._bgcolor == other._bgcolor - and self._set_attributes == other._set_attributes - and self._attributes == other._attributes - and self._link == other._link - and self._meta == other._meta - ) - - def __hash__(self) -> int: - return self._hash - - @property - def color(self) -> Optional[Color]: - """The foreground color or None if it is not set.""" - return self._color - - @property - def bgcolor(self) -> Optional[Color]: - """The background color or None if it is not set.""" - return self._bgcolor - - @property - def link(self) -> Optional[str]: - """Link text, if set.""" - return self._link - - @property - def transparent_background(self) -> bool: - """Check if the style specified a transparent background.""" - return self.bgcolor is None or self.bgcolor.is_default - - @property - def background_style(self) -> "Style": - """A Style with background only.""" - return Style(bgcolor=self.bgcolor) - - @property - def meta(self) -> Dict[str, Any]: - """Get meta information (can not be changed after construction).""" - return {} if self._meta is None else cast(Dict[str, Any], loads(self._meta)) - - @property - def without_color(self) -> "Style": - """Get a copy of the style with color removed.""" - if self._null: - return NULL_STYLE - style: Style = self.__new__(Style) - style._ansi = None - style._style_definition = None - style._color = None - style._bgcolor = None - style._attributes = self._attributes - style._set_attributes = self._set_attributes - style._link = self._link - style._link_id = f"{randint(0, 999999)}" if self._link else "" - style._hash = self._hash - style._null = False - style._meta = None - return style - - @classmethod - @lru_cache(maxsize=4096) - def parse(cls, style_definition: str) -> "Style": - """Parse a style definition. - - Args: - style_definition (str): A string containing a style. - - Raises: - errors.StyleSyntaxError: If the style definition syntax is invalid. - - Returns: - `Style`: A Style instance. - """ - if style_definition.strip() == "none" or not style_definition: - return cls.null() - - STYLE_ATTRIBUTES = cls.STYLE_ATTRIBUTES - color: Optional[str] = None - bgcolor: Optional[str] = None - attributes: Dict[str, Optional[Any]] = {} - link: Optional[str] = None - - words = iter(style_definition.split()) - for original_word in words: - word = original_word.lower() - if word == "on": - word = next(words, "") - if not word: - raise errors.StyleSyntaxError("color expected after 'on'") - try: - Color.parse(word) is None - except ColorParseError as error: - raise errors.StyleSyntaxError( - f"unable to parse {word!r} as background color; {error}" - ) from None - bgcolor = word - - elif word == "not": - word = next(words, "") - attribute = STYLE_ATTRIBUTES.get(word) - if attribute is None: - raise errors.StyleSyntaxError( - f"expected style attribute after 'not', found {word!r}" - ) - attributes[attribute] = False - - elif word == "link": - word = next(words, "") - if not word: - raise errors.StyleSyntaxError("URL expected after 'link'") - link = word - - elif word in STYLE_ATTRIBUTES: - attributes[STYLE_ATTRIBUTES[word]] = True - - else: - try: - Color.parse(word) - except ColorParseError as error: - raise errors.StyleSyntaxError( - f"unable to parse {word!r} as color; {error}" - ) from None - color = word - style = Style(color=color, bgcolor=bgcolor, link=link, **attributes) - return style - - @lru_cache(maxsize=1024) - def get_html_style(self, theme: Optional[TerminalTheme] = None) -> str: - """Get a CSS style rule.""" - theme = theme or DEFAULT_TERMINAL_THEME - css: List[str] = [] - append = css.append - - color = self.color - bgcolor = self.bgcolor - if self.reverse: - color, bgcolor = bgcolor, color - if self.dim: - foreground_color = ( - theme.foreground_color if color is None else color.get_truecolor(theme) - ) - color = Color.from_triplet( - blend_rgb(foreground_color, theme.background_color, 0.5) - ) - if color is not None: - theme_color = color.get_truecolor(theme) - append(f"color: {theme_color.hex}") - append(f"text-decoration-color: {theme_color.hex}") - if bgcolor is not None: - theme_color = bgcolor.get_truecolor(theme, foreground=False) - append(f"background-color: {theme_color.hex}") - if self.bold: - append("font-weight: bold") - if self.italic: - append("font-style: italic") - if self.underline: - append("text-decoration: underline") - if self.strike: - append("text-decoration: line-through") - if self.overline: - append("text-decoration: overline") - return "; ".join(css) - - @classmethod - def combine(cls, styles: Iterable["Style"]) -> "Style": - """Combine styles and get result. - - Args: - styles (Iterable[Style]): Styles to combine. - - Returns: - Style: A new style instance. - """ - iter_styles = iter(styles) - return sum(iter_styles, next(iter_styles)) - - @classmethod - def chain(cls, *styles: "Style") -> "Style": - """Combine styles from positional argument in to a single style. - - Args: - *styles (Iterable[Style]): Styles to combine. - - Returns: - Style: A new style instance. - """ - iter_styles = iter(styles) - return sum(iter_styles, next(iter_styles)) - - def copy(self) -> "Style": - """Get a copy of this style. - - Returns: - Style: A new Style instance with identical attributes. - """ - if self._null: - return NULL_STYLE - style: Style = self.__new__(Style) - style._ansi = self._ansi - style._style_definition = self._style_definition - style._color = self._color - style._bgcolor = self._bgcolor - style._attributes = self._attributes - style._set_attributes = self._set_attributes - style._link = self._link - style._link_id = f"{randint(0, 999999)}" if self._link else "" - style._hash = self._hash - style._null = False - style._meta = self._meta - return style - - def update_link(self, link: Optional[str] = None) -> "Style": - """Get a copy with a different value for link. - - Args: - link (str, optional): New value for link. Defaults to None. - - Returns: - Style: A new Style instance. - """ - style: Style = self.__new__(Style) - style._ansi = self._ansi - style._style_definition = self._style_definition - style._color = self._color - style._bgcolor = self._bgcolor - style._attributes = self._attributes - style._set_attributes = self._set_attributes - style._link = link - style._link_id = f"{randint(0, 999999)}" if link else "" - style._hash = self._hash - style._null = False - style._meta = self._meta - return style - - def render( - self, - text: str = "", - *, - color_system: Optional[ColorSystem] = ColorSystem.TRUECOLOR, - legacy_windows: bool = False, - ) -> str: - """Render the ANSI codes for the style. - - Args: - text (str, optional): A string to style. Defaults to "". - color_system (Optional[ColorSystem], optional): Color system to render to. Defaults to ColorSystem.TRUECOLOR. - - Returns: - str: A string containing ANSI style codes. - """ - if not text or color_system is None: - return text - attrs = self._make_ansi_codes(color_system) - rendered = f"\x1b[{attrs}m{text}\x1b[0m" if attrs else text - if self._link and not legacy_windows: - rendered = ( - f"\x1b]8;id={self._link_id};{self._link}\x1b\\{rendered}\x1b]8;;\x1b\\" - ) - return rendered - - def test(self, text: Optional[str] = None) -> None: - """Write text with style directly to terminal. - - This method is for testing purposes only. - - Args: - text (Optional[str], optional): Text to style or None for style name. - - """ - text = text or str(self) - sys.stdout.write(f"{self.render(text)}\n") - - def __add__(self, style: Optional["Style"]) -> "Style": - if not (isinstance(style, Style) or style is None): - return NotImplemented - if style is None or style._null: - return self - if self._null: - return style - new_style: Style = self.__new__(Style) - new_style._ansi = None - new_style._style_definition = None - new_style._color = style._color or self._color - new_style._bgcolor = style._bgcolor or self._bgcolor - new_style._attributes = (self._attributes & ~style._set_attributes) | ( - style._attributes & style._set_attributes - ) - new_style._set_attributes = self._set_attributes | style._set_attributes - new_style._link = style._link or self._link - new_style._link_id = style._link_id or self._link_id - new_style._hash = style._hash - new_style._null = self._null or style._null - if self._meta and style._meta: - new_style._meta = dumps({**self.meta, **style.meta}) - else: - new_style._meta = self._meta or style._meta - return new_style - - -NULL_STYLE = Style() - - -class StyleStack: - """A stack of styles.""" - - __slots__ = ["_stack"] - - def __init__(self, default_style: "Style") -> None: - self._stack: List[Style] = [default_style] - - def __repr__(self) -> str: - return f"" - - @property - def current(self) -> Style: - """Get the Style at the top of the stack.""" - return self._stack[-1] - - def push(self, style: Style) -> None: - """Push a new style on to the stack. - - Args: - style (Style): New style to combine with current style. - """ - self._stack.append(self._stack[-1] + style) - - def pop(self) -> Style: - """Pop last style and discard. - - Returns: - Style: New current style (also available as stack.current) - """ - self._stack.pop() - return self._stack[-1] diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/whiley.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/whiley.py deleted file mode 100644 index bf707d25cc7b0da3a99b44af16cafd9a772d2abd..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/whiley.py +++ /dev/null @@ -1,116 +0,0 @@ -""" - pygments.lexers.whiley - ~~~~~~~~~~~~~~~~~~~~~~ - - Lexers for the Whiley language. - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pygments.lexer import RegexLexer, bygroups, words -from pygments.token import Comment, Keyword, Name, Number, Operator, \ - Punctuation, String, Text - -__all__ = ['WhileyLexer'] - - -class WhileyLexer(RegexLexer): - """ - Lexer for the Whiley programming language. - - .. versionadded:: 2.2 - """ - name = 'Whiley' - url = 'http://whiley.org/' - filenames = ['*.whiley'] - aliases = ['whiley'] - mimetypes = ['text/x-whiley'] - - # See the language specification: - # http://whiley.org/download/WhileyLanguageSpec.pdf - - tokens = { - 'root': [ - # Whitespace - (r'\s+', Text), - - # Comments - (r'//.*', Comment.Single), - # don't parse empty comment as doc comment - (r'/\*\*/', Comment.Multiline), - (r'(?s)/\*\*.*?\*/', String.Doc), - (r'(?s)/\*.*?\*/', Comment.Multiline), - - # Keywords - (words(( - 'if', 'else', 'while', 'for', 'do', 'return', - 'switch', 'case', 'default', 'break', 'continue', - 'requires', 'ensures', 'where', 'assert', 'assume', - 'all', 'no', 'some', 'in', 'is', 'new', - 'throw', 'try', 'catch', 'debug', 'skip', 'fail', - 'finite', 'total'), suffix=r'\b'), Keyword.Reserved), - (words(( - 'function', 'method', 'public', 'private', 'protected', - 'export', 'native'), suffix=r'\b'), Keyword.Declaration), - # "constant" & "type" are not keywords unless used in declarations - (r'(constant|type)(\s+)([a-zA-Z_]\w*)(\s+)(is)\b', - bygroups(Keyword.Declaration, Text, Name, Text, Keyword.Reserved)), - (r'(true|false|null)\b', Keyword.Constant), - (r'(bool|byte|int|real|any|void)\b', Keyword.Type), - # "from" is not a keyword unless used with import - (r'(import)(\s+)(\*)([^\S\n]+)(from)\b', - bygroups(Keyword.Namespace, Text, Punctuation, Text, Keyword.Namespace)), - (r'(import)(\s+)([a-zA-Z_]\w*)([^\S\n]+)(from)\b', - bygroups(Keyword.Namespace, Text, Name, Text, Keyword.Namespace)), - (r'(package|import)\b', Keyword.Namespace), - - # standard library: https://github.com/Whiley/WhileyLibs/ - (words(( - # types defined in whiley.lang.Int - 'i8', 'i16', 'i32', 'i64', - 'u8', 'u16', 'u32', 'u64', - 'uint', 'nat', - - # whiley.lang.Any - 'toString'), suffix=r'\b'), Name.Builtin), - - # byte literal - (r'[01]+b', Number.Bin), - - # decimal literal - (r'[0-9]+\.[0-9]+', Number.Float), - # match "1." but not ranges like "3..5" - (r'[0-9]+\.(?!\.)', Number.Float), - - # integer literal - (r'0x[0-9a-fA-F]+', Number.Hex), - (r'[0-9]+', Number.Integer), - - # character literal - (r"""'[^\\]'""", String.Char), - (r"""(')(\\['"\\btnfr])(')""", - bygroups(String.Char, String.Escape, String.Char)), - - # string literal - (r'"', String, 'string'), - - # operators and punctuation - (r'[{}()\[\],.;]', Punctuation), - (r'[+\-*/%&|<>^!~@=:?' - # unicode operators - r'\u2200\u2203\u2205\u2282\u2286\u2283\u2287' - r'\u222A\u2229\u2264\u2265\u2208\u2227\u2228' - r']', Operator), - - # identifier - (r'[a-zA-Z_]\w*', Name), - ], - 'string': [ - (r'"', String, '#pop'), - (r'\\[btnfr]', String.Escape), - (r'\\u[0-9a-fA-F]{4}', String.Escape), - (r'\\.', String), - (r'[^\\"]+', String), - ], - } diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/rich/align.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/rich/align.py deleted file mode 100644 index e8fc3062347943780aebe1ddc9c1faf15c9f1590..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/rich/align.py +++ /dev/null @@ -1,311 +0,0 @@ -import sys -from itertools import chain -from typing import TYPE_CHECKING, Iterable, Optional - -if sys.version_info >= (3, 8): - from typing import Literal -else: - from typing_extensions import Literal # pragma: no cover - -from .constrain import Constrain -from .jupyter import JupyterMixin -from .measure import Measurement -from .segment import Segment -from .style import StyleType - -if TYPE_CHECKING: - from .console import Console, ConsoleOptions, RenderableType, RenderResult - -AlignMethod = Literal["left", "center", "right"] -VerticalAlignMethod = Literal["top", "middle", "bottom"] - - -class Align(JupyterMixin): - """Align a renderable by adding spaces if necessary. - - Args: - renderable (RenderableType): A console renderable. - align (AlignMethod): One of "left", "center", or "right"" - style (StyleType, optional): An optional style to apply to the background. - vertical (Optional[VerticalAlginMethod], optional): Optional vertical align, one of "top", "middle", or "bottom". Defaults to None. - pad (bool, optional): Pad the right with spaces. Defaults to True. - width (int, optional): Restrict contents to given width, or None to use default width. Defaults to None. - height (int, optional): Set height of align renderable, or None to fit to contents. Defaults to None. - - Raises: - ValueError: if ``align`` is not one of the expected values. - """ - - def __init__( - self, - renderable: "RenderableType", - align: AlignMethod = "left", - style: Optional[StyleType] = None, - *, - vertical: Optional[VerticalAlignMethod] = None, - pad: bool = True, - width: Optional[int] = None, - height: Optional[int] = None, - ) -> None: - if align not in ("left", "center", "right"): - raise ValueError( - f'invalid value for align, expected "left", "center", or "right" (not {align!r})' - ) - if vertical is not None and vertical not in ("top", "middle", "bottom"): - raise ValueError( - f'invalid value for vertical, expected "top", "middle", or "bottom" (not {vertical!r})' - ) - self.renderable = renderable - self.align = align - self.style = style - self.vertical = vertical - self.pad = pad - self.width = width - self.height = height - - def __repr__(self) -> str: - return f"Align({self.renderable!r}, {self.align!r})" - - @classmethod - def left( - cls, - renderable: "RenderableType", - style: Optional[StyleType] = None, - *, - vertical: Optional[VerticalAlignMethod] = None, - pad: bool = True, - width: Optional[int] = None, - height: Optional[int] = None, - ) -> "Align": - """Align a renderable to the left.""" - return cls( - renderable, - "left", - style=style, - vertical=vertical, - pad=pad, - width=width, - height=height, - ) - - @classmethod - def center( - cls, - renderable: "RenderableType", - style: Optional[StyleType] = None, - *, - vertical: Optional[VerticalAlignMethod] = None, - pad: bool = True, - width: Optional[int] = None, - height: Optional[int] = None, - ) -> "Align": - """Align a renderable to the center.""" - return cls( - renderable, - "center", - style=style, - vertical=vertical, - pad=pad, - width=width, - height=height, - ) - - @classmethod - def right( - cls, - renderable: "RenderableType", - style: Optional[StyleType] = None, - *, - vertical: Optional[VerticalAlignMethod] = None, - pad: bool = True, - width: Optional[int] = None, - height: Optional[int] = None, - ) -> "Align": - """Align a renderable to the right.""" - return cls( - renderable, - "right", - style=style, - vertical=vertical, - pad=pad, - width=width, - height=height, - ) - - def __rich_console__( - self, console: "Console", options: "ConsoleOptions" - ) -> "RenderResult": - align = self.align - width = console.measure(self.renderable, options=options).maximum - rendered = console.render( - Constrain( - self.renderable, width if self.width is None else min(width, self.width) - ), - options.update(height=None), - ) - lines = list(Segment.split_lines(rendered)) - width, height = Segment.get_shape(lines) - lines = Segment.set_shape(lines, width, height) - new_line = Segment.line() - excess_space = options.max_width - width - style = console.get_style(self.style) if self.style is not None else None - - def generate_segments() -> Iterable[Segment]: - if excess_space <= 0: - # Exact fit - for line in lines: - yield from line - yield new_line - - elif align == "left": - # Pad on the right - pad = Segment(" " * excess_space, style) if self.pad else None - for line in lines: - yield from line - if pad: - yield pad - yield new_line - - elif align == "center": - # Pad left and right - left = excess_space // 2 - pad = Segment(" " * left, style) - pad_right = ( - Segment(" " * (excess_space - left), style) if self.pad else None - ) - for line in lines: - if left: - yield pad - yield from line - if pad_right: - yield pad_right - yield new_line - - elif align == "right": - # Padding on left - pad = Segment(" " * excess_space, style) - for line in lines: - yield pad - yield from line - yield new_line - - blank_line = ( - Segment(f"{' ' * (self.width or options.max_width)}\n", style) - if self.pad - else Segment("\n") - ) - - def blank_lines(count: int) -> Iterable[Segment]: - if count > 0: - for _ in range(count): - yield blank_line - - vertical_height = self.height or options.height - iter_segments: Iterable[Segment] - if self.vertical and vertical_height is not None: - if self.vertical == "top": - bottom_space = vertical_height - height - iter_segments = chain(generate_segments(), blank_lines(bottom_space)) - elif self.vertical == "middle": - top_space = (vertical_height - height) // 2 - bottom_space = vertical_height - top_space - height - iter_segments = chain( - blank_lines(top_space), - generate_segments(), - blank_lines(bottom_space), - ) - else: # self.vertical == "bottom": - top_space = vertical_height - height - iter_segments = chain(blank_lines(top_space), generate_segments()) - else: - iter_segments = generate_segments() - if self.style: - style = console.get_style(self.style) - iter_segments = Segment.apply_style(iter_segments, style) - yield from iter_segments - - def __rich_measure__( - self, console: "Console", options: "ConsoleOptions" - ) -> Measurement: - measurement = Measurement.get(console, options, self.renderable) - return measurement - - -class VerticalCenter(JupyterMixin): - """Vertically aligns a renderable. - - Warn: - This class is deprecated and may be removed in a future version. Use Align class with - `vertical="middle"`. - - Args: - renderable (RenderableType): A renderable object. - """ - - def __init__( - self, - renderable: "RenderableType", - style: Optional[StyleType] = None, - ) -> None: - self.renderable = renderable - self.style = style - - def __repr__(self) -> str: - return f"VerticalCenter({self.renderable!r})" - - def __rich_console__( - self, console: "Console", options: "ConsoleOptions" - ) -> "RenderResult": - style = console.get_style(self.style) if self.style is not None else None - lines = console.render_lines( - self.renderable, options.update(height=None), pad=False - ) - width, _height = Segment.get_shape(lines) - new_line = Segment.line() - height = options.height or options.size.height - top_space = (height - len(lines)) // 2 - bottom_space = height - top_space - len(lines) - blank_line = Segment(f"{' ' * width}", style) - - def blank_lines(count: int) -> Iterable[Segment]: - for _ in range(count): - yield blank_line - yield new_line - - if top_space > 0: - yield from blank_lines(top_space) - for line in lines: - yield from line - yield new_line - if bottom_space > 0: - yield from blank_lines(bottom_space) - - def __rich_measure__( - self, console: "Console", options: "ConsoleOptions" - ) -> Measurement: - measurement = Measurement.get(console, options, self.renderable) - return measurement - - -if __name__ == "__main__": # pragma: no cover - from rich.console import Console, Group - from rich.highlighter import ReprHighlighter - from rich.panel import Panel - - highlighter = ReprHighlighter() - console = Console() - - panel = Panel( - Group( - Align.left(highlighter("align='left'")), - Align.center(highlighter("align='center'")), - Align.right(highlighter("align='right'")), - ), - width=60, - style="on dark_blue", - title="Align", - ) - - console.print( - Align.center(panel, vertical="middle", style="on red", height=console.height) - ) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/errors.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/errors.py deleted file mode 100644 index 2701747f56cc77845159f2c5fee2d0ce114259af..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/errors.py +++ /dev/null @@ -1,16 +0,0 @@ -"""setuptools.errors - -Provides exceptions used by setuptools modules. -""" - -from distutils.errors import DistutilsError - - -class RemovedCommandError(DistutilsError, RuntimeError): - """Error used for commands that have been removed in setuptools. - - Since ``setuptools`` is built on ``distutils``, simply removing a command - from ``setuptools`` will make the behavior fall back to ``distutils``; this - error is raised if a command exists in ``distutils`` but has been actively - removed in ``setuptools``. - """ diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Buildwin Media Player Usb Device Driver UPD.md b/spaces/quidiaMuxgu/Expedit-SAM/Buildwin Media Player Usb Device Driver UPD.md deleted file mode 100644 index c69e5374e51ea5e700da3099eef9112f6a1dd8f4..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Buildwin Media Player Usb Device Driver UPD.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Buildwin media player usb device driver


      DOWNLOADhttps://geags.com/2uCsof



      - -You could download the latest version of Buildwin MediaPlayer USB Device driver on this page. Please choose the proper driver according to your computer ... 1fdad05405
      -
      -
      -

      diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Jihne Mera Dil Lutiya Punjabi Full Movie Download !!INSTALL!!.md b/spaces/quidiaMuxgu/Expedit-SAM/Jihne Mera Dil Lutiya Punjabi Full Movie Download !!INSTALL!!.md deleted file mode 100644 index d960bf63714ed789f0ab1ea6760b979eb565facf..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Jihne Mera Dil Lutiya Punjabi Full Movie Download !!INSTALL!!.md +++ /dev/null @@ -1,150 +0,0 @@ -
      ----> ServiceClient failure for DeepLeo[/ERROR]

      - - ---> ServiceClient failure for DeepLeo[/ERROR] -

      What is the plot of Jihne Mera Dil Luteya Punjabi Full Movie?

      - -

      Jihne Mera Dil Luteya Punjabi Full Movie is a comedy-drama film that was released in 2011. The film is directed by Mandeep Kumar and stars Gippy Grewal, Neeru Bajwa, and Diljit Dosanjh in the lead roles.

      -

      Jihne Mera Dil Lutiya Punjabi Full Movie Download


      Download Zip ✓✓✓ https://geags.com/2uCrDg



      - -

      The film revolves around the lives of three friends, Yuvraj, Gurnoor, and Noor, who study at the Patiala University. Yuvraj and Gurnoor are carefree and fun-loving guys who are always up for some mischief. Noor is a beautiful and charming girl who joins their college and becomes their love interest.

      - -

      Yuvraj and Gurnoor both fall in love with Noor and try to woo her with their antics. However, Noor is not interested in either of them and has a secret crush on someone else. The film follows the hilarious and romantic adventures of the trio as they try to win Noor's heart and deal with their own personal issues.

      - -

      Why should you watch Jihne Mera Dil Luteya Punjabi Full Movie?

      - -

      Jihne Mera Dil Luteya Punjabi Full Movie is a film that will make you laugh, cry, and fall in love. The film has a lot of entertainment value and offers a refreshing take on the college romance genre. The film has a lot of witty dialogues, comic situations, and catchy songs that will keep you engaged throughout.

      - -

      The film also has a lot of emotional moments that will touch your heart. The film explores the themes of friendship, love, family, and dreams. The film shows how the three friends overcome their differences and support each other in their journey.

      - -

      The film also has a stellar cast that delivers brilliant performances. Gippy Grewal, Neeru Bajwa, and Diljit Dosanjh are among the most popular and talented actors in the Punjabi film industry. They have a great chemistry and charisma on screen that makes their characters more relatable and lovable.

      - -

      How to download Jihne Mera Dil Luteya Punjabi Full Movie?

      - -

      If you want to download Jihne Mera Dil Luteya Punjabi Full Movie, you can follow these simple steps:

      - -
        -
      1. Click on the link below to download the movie in HD quality.
      2. -
      3. Extract the zip file using WinRAR or any other software that can handle zip files.
      4. -
      5. Run the movie file and enjoy watching Jihne Mera Dil Luteya Punjabi Full Movie.
      6. -
      - -

      The download link is:

      - -

      Download Jihne Mera Dil Luteya Punjabi Full Movie

      -

      - -

      We hope this article has helped you learn more about Jihne Mera Dil Luteya Punjabi Full Movie and how to download it.

      - -

      If you have any questions or feedback about Jihne Mera Dil Luteya Punjabi Full Movie, you can leave a comment below or contact us via email.

      - -

      Thank you for reading!

      -

      Who are the cast and crew of Jihne Mera Dil Luteya Punjabi Full Movie?

      - -

      Jihne Mera Dil Luteya Punjabi Full Movie is a film that features some of the most popular and talented actors and actresses in the Punjabi film industry. The film is also directed by a renowned filmmaker who has delivered many hit movies in the past. Here are some of the cast and crew members of Jihne Mera Dil Luteya Punjabi Full Movie:

      - -
        -
      • Gippy Grewal as Yuvraj: Gippy Grewal is one of the most successful and versatile actors and singers in the Punjabi film industry. He has starred in many blockbuster movies such as Carry on Jatta, Manje Bistre, Ardaas, and Subedar Joginder Singh. He has also won many awards for his acting and singing skills.
      • -
      • Neeru Bajwa as Noor: Neeru Bajwa is one of the most beautiful and talented actresses in the Punjabi film industry. She has acted in many hit movies such as Mel Karade Rabba, Jatt & Juliet, Sardaar Ji, and Shadaa. She has also won many awards for her acting and dancing skills.
      • -
      • Diljit Dosanjh as Gurnoor: Diljit Dosanjh is one of the most popular and talented actors and singers in the Punjabi film industry. He has acted in many hit movies such as Jatt & Juliet, Punjab 1984, Sardaar Ji, and Super Singh. He has also won many awards for his acting and singing skills.
      • -
      • Mandeep Kumar as Director: Mandeep Kumar is a well-known director and writer in the Punjabi film industry. He has directed many hit movies such as Tere Naal Love Ho Gaya, Ambarsariya, Double Di Trouble, and Carry on Jatta 2. He has also written many scripts for various movies.
      • -
      • Dheeraj Rattan as Writer: Dheeraj Rattan is a famous writer and director in the Punjabi film industry. He has written many hit movies such as Mel Karade Rabba, Jatt & Juliet, Jihne Mera Dil Luteya, and Sardaar Ji 2. He has also directed some movies such as Ishq Garaari and Saadi Love Story.
      • -
      - -

      These are some of the cast and crew members of Jihne Mera Dil Luteya Punjabi Full Movie. You can find more information about them on their official websites or social media accounts.

      - -

      Conclusion

      - -

      Jihne Mera Dil Luteya Punjabi Full Movie is a comedy-drama film that was released in 2011. The film is directed by Mandeep Kumar and stars Gippy Grewal, Neeru Bajwa, and Diljit Dosanjh in the lead roles.

      - -

      The film revolves around the lives of three friends, Yuvraj, Gurnoor, and Noor, who study at the Patiala University. Yuvraj and Gurnoor are carefree and fun-loving guys who are always up for some mischief. Noor is a beautiful and charming girl who joins their college and becomes their love interest.

      - -

      Yuvraj and Gurnoor both fall in love with Noor and try to woo her with their antics. However, Noor is not interested in either of them and has a secret crush on someone else. The film follows the hilarious and romantic adventures of the trio as they try to win Noor's heart and deal with their own personal issues.

      - -

      If you want to download Jihne Mera Dil Luteya Punjabi Full Movie, you can follow these simple steps:

      - -
        -
      1. Click on the link below to download the movie in HD quality.
      2. -
      3. Extract the zip file using WinRAR or any other software that can handle zip files.
      4. -
      5. Run the movie file and enjoy watching Jihne Mera Dil Luteya Punjabi Full Movie.
      6. -
      - -

      The download link is:

      - -

      Download Jihne Mera Dil Luteya Punjabi Full Movie

      - -

      We hope this article has helped you learn more about Jihne Mera Dil Luteya Punjabi Full Movie and how to download it.

      - -

      If you have any questions or feedback about Jihne Mera Dil Luteya Punjabi Full Movie, you can leave a comment below or contact us via email.

      - -

      Thank you for reading!

      -

      What are the songs of Jihne Mera Dil Luteya Punjabi Full Movie?

      - -

      Jihne Mera Dil Luteya Punjabi Full Movie is a film that has a lot of songs that are catchy, melodious, and fun. The film has a total of 11 songs that are composed by Bhinda Aujla and sung by various artists such as Gippy Grewal, Diljit Dosanjh, Gurlez Akhtar, and more. The film also has some popular Punjabi folk songs that are remixed and recreated for the film. Here are some of the songs of Jihne Mera Dil Luteya Punjabi Full Movie:

      - -
        -
      • Aakadd Dikhawe Je Koi: This is a peppy and upbeat song that features Diljit Dosanjh and Neeru Bajwa. The song is about how Diljit tries to impress Neeru with his attitude and style.
      • -
      • Billi Billi Akkh: This is a romantic and catchy song that features Gippy Grewal and Neeru Bajwa. The song is about how Gippy praises Neeru's eyes and beauty.
      • -
      • Bina Gallo Kise Naal: This is a sad and emotional song that features Gippy Grewal and Diljit Dosanjh. The song is about how they feel heartbroken after losing their love interest.
      • -
      • Channa: This is a beautiful and melodious song that features Gippy Grewal and Neeru Bajwa. The song is about how they express their love for each other.
      • -
      • Fukre: This is a fun and energetic song that features Diljit Dosanjh and Gippy Grewal. The song is about how they enjoy their college life and have fun with their friends.
      • -
      • Jado Kade Tohar Shohar: This is a humorous and witty song that features Gippy Grewal, Diljit Dosanjh, and Jaswinder Bhalla. The song is about how they tease each other about their future husbands.
      • -
      • Jhanjhar: This is a folk song that features Gippy Grewal, Diljit Dosanjh, and Gurlez Akhtar. The song is about how they dance to the sound of the anklets.
      • -
      • Jitthe Ho Jiye Khadde: This is a folk song that features Gippy Grewal and Diljit Dosanjh. The song is about how they challenge each other to fight wherever they meet.
      • -
      • Munde Jattan De: This is a folk song that features Gippy Grewal. The song is about how he boasts about his Jatt identity and pride.
      • -
      • Supna: This is a romantic and dreamy song that features Diljit Dosanjh and Neeru Bajwa. The song is about how Diljit sees Neeru in his dreams and falls in love with her.
      • -
      • Yaari Naalo Vadh Cheez: This is a folk song that features Diljit Dosanjh. The song is about how he values his friendship more than anything else.
      • -
      - -

      These are some of the songs of Jihne Mera Dil Luteya Punjabi Full Movie. You can listen to them online on various platforms such as JioSaavn, Gaana, Spotify, etc.

      - -

      Conclusion

      - -

      Jihne Mera Dil Luteya Punjabi Full Movie is a comedy-drama film that was released in 2011. The film is directed by Mandeep Kumar and stars Gippy Grewal, Neeru Bajwa, and Diljit Dosanjh in the lead roles.

      - -

      The film revolves around the lives of three friends, Yuvraj, Gurnoor, and Noor, who study at the Patiala University. Yuvraj and Gurnoor are carefree and fun-loving guys who are always up for some mischief. Noor is a beautiful and charming girl who joins their college and becomes their love interest.

      - -

      Yuvraj and Gurnoor both fall in love with Noor and try to woo her with their antics. However, Noor is not interested in either of them and has a secret crush on someone else. The film follows the hilarious and romantic adventures of the trio as they try to win Noor's heart and deal with their own personal issues.

      - -

      If you want to download Jihne Mera Dil Luteya Punjabi Full Movie, you can follow these simple steps:

      - -
        -
      1. Click on the link below to download the movie in HD quality.
      2. -
      3. Extract the zip file using WinRAR or any other software that can handle zip files.
      4. -
      5. Run the movie file and enjoy watching Jihne Mera Dil Luteya Punjabi Full Movie.
      6. -
      - -

      The download link is:

      - -

      Download Jihne Mera Dil Luteya Punjabi Full Movie

      - -

      We hope this article has helped you learn more about Jihne Mera Dil Luteya Punjabi Full Movie and how to download it.

      - -

      If you have any questions or feedback about Jihne Mera Dil Luteya Punjabi Full Movie, you can leave a comment below or contact us via email.

      - -

      Thank you for reading!

      -

      Jihne Mera Dil Luteya Punjabi Full Movie is a comedy-drama film that was released in 2011. The film is directed by Mandeep Kumar and stars Gippy Grewal, Neeru Bajwa, and Diljit Dosanjh in the lead roles.

      - -

      The film revolves around the lives of three friends, Yuvraj, Gurnoor, and Noor, who study at the Patiala University. Yuvraj and Gurnoor are carefree and fun-loving guys who are always up for some mischief. Noor is a beautiful and charming girl who joins their college and becomes their love interest.

      - -

      Yuvraj and Gurnoor both fall in love with Noor and try to woo her with their antics. However, Noor is not interested in either of them and has a secret crush on someone else. The film follows the hilarious and romantic adventures of the trio as they try to win Noor's heart and deal with their own personal issues.

      - -

      If you want to download Jihne Mera Dil Luteya Punjabi Full Movie, you can follow these simple steps:

      - -
        -
      1. Click on the link below to download the movie in HD quality.
      2. -
      3. Extract the zip file using WinRAR or any other software that can handle zip files.
      4. -
      5. Run the movie file and enjoy watching Jihne Mera Dil Luteya Punjabi Full Movie.
      6. -
      - -

      The download link is:

      - -

      Download Jihne Mera Dil Luteya Punjabi Full Movie

      - -

      We hope this article has helped you learn more about Jihne Mera Dil Luteya Punjabi Full Movie and how to download it.

      - -

      If you have any questions or feedback about Jihne Mera Dil Luteya Punjabi Full Movie, you can leave a comment below or contact us via email.

      - -

      Thank you for reading!

      3cee63e6c2
      -
      -
      \ No newline at end of file diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Mujhse Shaadi Karogi 2 Hd Movie Download 720p Movies HOT.md b/spaces/quidiaMuxgu/Expedit-SAM/Mujhse Shaadi Karogi 2 Hd Movie Download 720p Movies HOT.md deleted file mode 100644 index 9d95b761b6c7407cfd115b9198047ee30103e7b1..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Mujhse Shaadi Karogi 2 Hd Movie Download 720p Movies HOT.md +++ /dev/null @@ -1,22 +0,0 @@ -
      -

      Mujhse Shaadi Karogi 2 Hd Movie Download 720p Movies: How to Watch Online for Free

      -

      Mujhse Shaadi Karogi 2 is a Bollywood romantic comedy film that was released in 2020. The film is a sequel to the 2004 hit Mujhse Shaadi Karogi, and stars Salman Khan, Priyanka Chopra, and Akshay Kumar in the lead roles. The film follows the hilarious antics of two men who compete for the love of a woman, while also dealing with a vengeful ex-boyfriend.

      -

      If you are looking for a way to watch Mujhse Shaadi Karogi 2 online for free, you have come to the right place. In this article, we will show you how to download Mujhse Shaadi Karogi 2 hd movie in 720p quality from various websites and platforms. We will also provide you with some tips and warnings to avoid any legal or security issues.

      -

      Mujhse Shaadi Karogi 2 Hd Movie Download 720p Movies


      Download Zip ››› https://geags.com/2uCquV



      -

      Where to Download Mujhse Shaadi Karogi 2 Hd Movie in 720p Quality?

      -

      There are many websites and platforms that offer Mujhse Shaadi Karogi 2 hd movie download in 720p quality. However, not all of them are safe or legal. Some of them may contain malware, viruses, or spyware that can harm your device or compromise your privacy. Some of them may also violate the copyright laws and put you at risk of legal action.

      -

      Therefore, we recommend that you use only trusted and reliable sources to download Mujhse Shaadi Karogi 2 hd movie in 720p quality. Some of the best options are:

      -
        -
      • Netflix: Netflix is one of the most popular and legal streaming platforms that offers a wide range of movies and shows. You can watch Mujhse Shaadi Karogi 2 online on Netflix with a subscription plan that starts from Rs. 199 per month. You can also download Mujhse Shaadi Karogi 2 hd movie in 720p quality on Netflix and watch it offline on your device.
      • -
      • Amazon Prime Video: Amazon Prime Video is another popular and legal streaming platform that offers a variety of movies and shows. You can watch Mujhse Shaadi Karogi 2 online on Amazon Prime Video with a subscription plan that costs Rs. 129 per month or Rs. 999 per year. You can also download Mujhse Shaadi Karogi 2 hd movie in 720p quality on Amazon Prime Video and watch it offline on your device.
      • -
      • Hotstar: Hotstar is a leading streaming platform that offers movies, shows, sports, and news. You can watch Mujhse Shaadi Karogi 2 online on Hotstar with a subscription plan that costs Rs. 299 per month or Rs. 1499 per year. You can also download Mujhse Shaadi Karogi 2 hd movie in 720p quality on Hotstar and watch it offline on your device.
      • -
      -

      How to Download Mujhse Shaadi Karogi 2 Hd Movie in 720p Quality from Netflix, Amazon Prime Video, or Hotstar?

      -

      The process of downloading Mujhse Shaadi Karogi 2 hd movie in 720p quality from Netflix, Amazon Prime Video, or Hotstar is very simple and easy. All you need to do is follow these steps:

      -
        -
      1. Download and install the Netflix, Amazon Prime Video, or Hotstar app on your device from the Google Play Store or the App Store.
      2. -
      3. Sign up or log in to your account with your email address and password.
      4. -
      5. Search for Mujhse Shaadi Karogi 2 in the app and select the movie from the results.
      6. -
      7. Tap on the download icon

        d5da3c52bf
        -
        -
        \ No newline at end of file diff --git a/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/infer/modules/train/extract/extract_f0_rmvpe.py b/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/infer/modules/train/extract/extract_f0_rmvpe.py deleted file mode 100644 index 751b62d38df837bf6f1f21d45c485dd8e5f5d113..0000000000000000000000000000000000000000 --- a/spaces/r3gm/Aesthetic_RVC_Inference_HF/lib/infer/modules/train/extract/extract_f0_rmvpe.py +++ /dev/null @@ -1,138 +0,0 @@ -import os -import sys -import traceback - -now_dir = os.getcwd() -sys.path.append(now_dir) -import logging - -import numpy as np - -from lib.infer.infer_libs.audio import load_audio - -logging.getLogger("numba").setLevel(logging.WARNING) - -n_part = int(sys.argv[1]) -i_part = int(sys.argv[2]) -i_gpu = sys.argv[3] -os.environ["CUDA_VISIBLE_DEVICES"] = str(i_gpu) -exp_dir = sys.argv[4] -is_half = sys.argv[5] -f = open("%s/extract_f0_feature.log" % exp_dir, "a+") - - -def printt(strr): - print(strr) - f.write("%s\n" % strr) - f.flush() - - -class FeatureInput(object): - def __init__(self, samplerate=16000, hop_size=160): - self.fs = samplerate - self.hop = hop_size - - self.f0_bin = 256 - self.f0_max = 1100.0 - self.f0_min = 50.0 - self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700) - self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700) - - def compute_f0(self, path, f0_method): - x = load_audio(path, self.fs) - # p_len = x.shape[0] // self.hop - if f0_method == "rmvpe": - if hasattr(self, "model_rmvpe") == False: - from lib.infer.infer_libs.rmvpe import RMVPE - - print("Loading rmvpe model") - self.model_rmvpe = RMVPE( - "assets/rmvpe/rmvpe.pt", is_half=is_half, device="cuda" - ) - f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03) - return f0 - - def coarse_f0(self, f0): - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - self.f0_mel_min) * ( - self.f0_bin - 2 - ) / (self.f0_mel_max - self.f0_mel_min) + 1 - - # use 0 or 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > self.f0_bin - 1] = self.f0_bin - 1 - f0_coarse = np.rint(f0_mel).astype(int) - assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, ( - f0_coarse.max(), - f0_coarse.min(), - ) - return f0_coarse - - def go(self, paths, f0_method): - if len(paths) == 0: - printt("no-f0-todo") - else: - printt("todo-f0-%s" % len(paths)) - n = max(len(paths) // 5, 1) # 每个进程最多打印5条 - for idx, (inp_path, opt_path1, opt_path2) in enumerate(paths): - try: - if idx % n == 0: - printt("f0ing,now-%s,all-%s,-%s" % (idx, len(paths), inp_path)) - if ( - os.path.exists(opt_path1 + ".npy") == True - and os.path.exists(opt_path2 + ".npy") == True - ): - continue - featur_pit = self.compute_f0(inp_path, f0_method) - np.save( - opt_path2, - featur_pit, - allow_pickle=False, - ) # nsf - coarse_pit = self.coarse_f0(featur_pit) - np.save( - opt_path1, - coarse_pit, - allow_pickle=False, - ) # ori - except: - printt("f0fail-%s-%s-%s" % (idx, inp_path, traceback.format_exc())) - - -if __name__ == "__main__": - # exp_dir=r"E:\codes\py39\dataset\mi-test" - # n_p=16 - # f = open("%s/log_extract_f0.log"%exp_dir, "w") - printt(sys.argv) - featureInput = FeatureInput() - paths = [] - inp_root = "%s/1_16k_wavs" % (exp_dir) - opt_root1 = "%s/2a_f0" % (exp_dir) - opt_root2 = "%s/2b-f0nsf" % (exp_dir) - - os.makedirs(opt_root1, exist_ok=True) - os.makedirs(opt_root2, exist_ok=True) - for name in sorted(list(os.listdir(inp_root))): - inp_path = "%s/%s" % (inp_root, name) - if "spec" in inp_path: - continue - opt_path1 = "%s/%s" % (opt_root1, name) - opt_path2 = "%s/%s" % (opt_root2, name) - paths.append([inp_path, opt_path1, opt_path2]) - try: - featureInput.go(paths[i_part::n_part], "rmvpe") - except: - printt("f0_all_fail-%s" % (traceback.format_exc())) - # ps = [] - # for i in range(n_p): - # p = Process( - # target=featureInput.go, - # args=( - # paths[i::n_p], - # f0method, - # ), - # ) - # ps.append(p) - # p.start() - # for i in range(n_p): - # ps[i].join() diff --git a/spaces/r3gm/Ultimate-Vocal-Remover-WebUI/lib_v5/modules.py b/spaces/r3gm/Ultimate-Vocal-Remover-WebUI/lib_v5/modules.py deleted file mode 100644 index 4e77d2fb5b97c4ca0e6f6011e012f43e03a70b14..0000000000000000000000000000000000000000 --- a/spaces/r3gm/Ultimate-Vocal-Remover-WebUI/lib_v5/modules.py +++ /dev/null @@ -1,74 +0,0 @@ -import torch -import torch.nn as nn - - -class TFC(nn.Module): - def __init__(self, c, l, k, norm): - super(TFC, self).__init__() - - self.H = nn.ModuleList() - for i in range(l): - self.H.append( - nn.Sequential( - nn.Conv2d(in_channels=c, out_channels=c, kernel_size=k, stride=1, padding=k // 2), - norm(c), - nn.ReLU(), - ) - ) - - def forward(self, x): - for h in self.H: - x = h(x) - return x - - -class DenseTFC(nn.Module): - def __init__(self, c, l, k, norm): - super(DenseTFC, self).__init__() - - self.conv = nn.ModuleList() - for i in range(l): - self.conv.append( - nn.Sequential( - nn.Conv2d(in_channels=c, out_channels=c, kernel_size=k, stride=1, padding=k // 2), - norm(c), - nn.ReLU(), - ) - ) - - def forward(self, x): - for layer in self.conv[:-1]: - x = torch.cat([layer(x), x], 1) - return self.conv[-1](x) - - -class TFC_TDF(nn.Module): - def __init__(self, c, l, f, k, bn, dense=False, bias=True, norm=nn.BatchNorm2d): - - super(TFC_TDF, self).__init__() - - self.use_tdf = bn is not None - - self.tfc = DenseTFC(c, l, k, norm) if dense else TFC(c, l, k, norm) - - if self.use_tdf: - if bn == 0: - self.tdf = nn.Sequential( - nn.Linear(f, f, bias=bias), - norm(c), - nn.ReLU() - ) - else: - self.tdf = nn.Sequential( - nn.Linear(f, f // bn, bias=bias), - norm(c), - nn.ReLU(), - nn.Linear(f // bn, f, bias=bias), - norm(c), - nn.ReLU() - ) - - def forward(self, x): - x = self.tfc(x) - return x + self.tdf(x) if self.use_tdf else x - diff --git a/spaces/radames/MusicGen-Continuation/setup.py b/spaces/radames/MusicGen-Continuation/setup.py deleted file mode 100644 index 78a172b7c90003b689bde40b49cc8fe1fb8107d4..0000000000000000000000000000000000000000 --- a/spaces/radames/MusicGen-Continuation/setup.py +++ /dev/null @@ -1,65 +0,0 @@ -""" - Copyright (c) Meta Platforms, Inc. and affiliates. - All rights reserved. - - This source code is licensed under the license found in the - LICENSE file in the root directory of this source tree. - -""" - -from pathlib import Path - -from setuptools import setup, find_packages - - -NAME = 'audiocraft' -DESCRIPTION = 'Audio research library for PyTorch' - -URL = 'https://github.com/fairinternal/audiocraft' -AUTHOR = 'FAIR Speech & Audio' -EMAIL = 'defossez@meta.com' -REQUIRES_PYTHON = '>=3.8.0' - -for line in open('audiocraft/__init__.py'): - line = line.strip() - if '__version__' in line: - context = {} - exec(line, context) - VERSION = context['__version__'] - -HERE = Path(__file__).parent - -try: - with open(HERE / "README.md", encoding='utf-8') as f: - long_description = '\n' + f.read() -except FileNotFoundError: - long_description = DESCRIPTION - -REQUIRED = [i.strip() for i in open(HERE / 'requirements.txt') if not i.startswith('#')] - -setup( - name=NAME, - version=VERSION, - description=DESCRIPTION, - author_email=EMAIL, - long_description=long_description, - long_description_content_type='text/markdown', - author=AUTHOR, - url=URL, - python_requires=REQUIRES_PYTHON, - install_requires=REQUIRED, - extras_require={ - 'dev': ['coverage', 'flake8', 'mypy', 'pdoc3', 'pytest'], - }, - packages=find_packages(), - package_data={'audiocraft': ['py.typed']}, - include_package_data=True, - license='MIT License', - classifiers=[ - # Trove classifiers - # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers - 'License :: OSI Approved :: MIT License', - 'Topic :: Multimedia :: Sound/Audio', - 'Topic :: Scientific/Engineering :: Artificial Intelligence', - ], -) diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Dabangg 2 Movie In Tamil Free Download [HOT] Mp4.md b/spaces/raedeXanto/academic-chatgpt-beta/Dabangg 2 Movie In Tamil Free Download [HOT] Mp4.md deleted file mode 100644 index d257db55c65ff1309fad5f9da6881d8f903c8541..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Dabangg 2 Movie In Tamil Free Download [HOT] Mp4.md +++ /dev/null @@ -1,24 +0,0 @@ - -

        How to Watch Dabangg 2 Movie In Tamil For Free Online

        -

        Dabangg 2 is a 2012 Hindi action movie starring Salman Khan and Sonakshi Sinha. It is the sequel to the 2010 blockbuster Dabangg, which was also directed by Arbaaz Khan. The movie follows the adventures of Chulbul Pandey, a fearless cop who takes on a corrupt politician and a local goon.

        -

        Dabangg 2 Movie In Tamil Free Download Mp4


        DOWNLOADhttps://tinourl.com/2uL0T3



        -

        If you are a fan of Dabangg 2 and want to watch it in Tamil, you might be wondering how to do that without paying any money. Well, you are in luck because there are some ways to watch Dabangg 2 movie in Tamil for free online. Here are some of them:

        -
          -
        • Watch Dabangg 2 on Disney+ Hotstar: Disney+ Hotstar is a streaming service that offers a variety of movies and shows in different languages. You can watch Dabangg 2 movie in Tamil on Disney+ Hotstar for free if you have a subscription or a trial account. You can also download the movie on your device for offline viewing[^1^].
        • -
        • Watch Dabangg 2 on Internet Archive: Internet Archive is a website that archives various digital content, including movies, music, books, and more. You can watch Dabangg 2 movie in Tamil on Internet Archive for free by downloading the MP4 files from the website[^2^]. However, the quality of the video might not be very good and the subtitles might not be accurate.
        • -
        • Watch Dabangg 2 on Torrent Sites: Torrent sites are websites that allow users to share and download files using peer-to-peer technology. You can watch Dabangg 2 movie in Tamil on torrent sites for free by downloading the torrent file or magnet link from the website and using a torrent client to download the movie[^3^]. However, this method might be illegal and risky as you might violate copyright laws and expose your device to malware.
        • -
        -

        These are some of the ways to watch Dabangg 2 movie in Tamil for free online. However, we recommend that you watch the movie legally and ethically by paying for it or using authorized streaming services. This way, you can support the filmmakers and enjoy the movie in high quality.

        - -

        If you have watched Dabangg 2 movie in Tamil for free online, you might be interested in some trivia and facts about the movie. Here are some of them:

        -

        -
          -
        1. Dabangg 2 is the second highest-grossing Bollywood movie of 2012, after Ek Tha Tiger, which also starred Salman Khan.
        2. -
        3. Dabangg 2 is the first and only movie to feature Salman Khan and his brothers Arbaaz Khan and Sohail Khan together on screen.
        4. -
        5. Dabangg 2 is the debut movie of Prakash Raj as a villain in Bollywood. He is a famous actor in Tamil and Telugu movies.
        6. -
        7. Dabangg 2 features four songs composed by Sajid-Wajid and two songs composed by Lalit Pandit. The song "Fevicol Se" became a huge hit and featured Kareena Kapoor Khan in a special appearance.
        8. -
        9. Dabangg 2 won six awards at the 2013 Zee Cine Awards, including Best Actor (Salman Khan), Best Actress (Sonakshi Sinha), Best Film, Best Director (Arbaaz Khan), Best Action (Anl Arasu), and Best Song ("Fevicol Se").
        10. -
        -

        These are some of the trivia and facts about Dabangg 2 movie. We hope you enjoyed watching the movie and learning more about it.

        cec2833e83
        -
        -
        \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Download Chaar Sahibzaade - Rise of Banda Singh Bahadur 2 Full Movie in HD 720p A Must-See for Fans of Indian Cinema.md b/spaces/raedeXanto/academic-chatgpt-beta/Download Chaar Sahibzaade - Rise of Banda Singh Bahadur 2 Full Movie in HD 720p A Must-See for Fans of Indian Cinema.md deleted file mode 100644 index 1fd3341015fce10f7f6247b00827dbe59e57ddbe..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Download Chaar Sahibzaade - Rise of Banda Singh Bahadur 2 Full Movie in HD 720p A Must-See for Fans of Indian Cinema.md +++ /dev/null @@ -1,117 +0,0 @@ - -

        Keygen basketball scoreboard pro v2 42: What is it and how to use it?

        -

        If you are a basketball fan or a coach, you might have heard of basketball scoreboard pro v2 42, a software that allows you to turn your computer into a virtual scoreboard for your games. This software can help you keep track of the score, time, fouls, timeouts, possession, bonus, team names, logos, colors, sounds, and more. You can also customize the layout, size, fonts, colors, and display options of your scoreboard according to your preferences. You can even project your scoreboard onto a large screen or a wall using a projector or a TV.

        -

        However, this software is not free. You need to pay $149 to get a license key that will unlock all the features and functions of the software. If you don't want to spend that much money, you might be tempted to look for a keygen for this software. A keygen is a program that can generate valid serial numbers or license keys for various software products. By using a keygen, you can bypass the registration process and activate the software without paying anything.

        -

        Keygen basketball scoreboard pro v2 42


        Download ———>>> https://tinourl.com/2uL0Yz



        -

        But is it really worth it? Is it safe and legal to use a keygen for basketball scoreboard pro v2 42? How do you find, download, install, and use such a keygen? What are the pros and cons of doing so? In this article, we will answer these questions and more. We will also provide some alternative ways to get this software legally and safely.

        -

        How to download and install keygen basketball scoreboard pro v2 42

        -

        The first step to use a keygen for basketball scoreboard pro v2 42 is to find and download one from the internet. There are many websites that claim to offer free keygens for various software products, including this one. However, you need to be very careful when downloading anything from these sites. Many of them are full of malware, viruses, spyware, adware, or other harmful programs that can infect your computer and compromise your security and privacy. Some of them might even try to steal your personal or financial information or lock your files until you pay a ransom.

        -

        Therefore, before you download anything from these sites, you should always scan it with a reliable antivirus or anti-malware program. You should also read the user reviews and comments to see if other people have had any problems or issues with the file. You should also avoid clicking on any pop-ups, banners, ads, or links that might redirect you to other sites or download unwanted programs.

        -

        Once you have found a trustworthy site that offers a keygen for basketball scoreboard pro v2 42, you need to download it to your computer. The file size should be relatively small, usually less than 10 MB. After downloading it, you need to unzip it using a program like WinRAR or WinZip. Then you need to run the keygen.exe file as an administrator. This will open a window that looks something like this:

        - keygen window -

        This window will show you some information about the software product that you want to activate. It will also have a button that says "Generate" or "Create". By clicking on this button, you will get a random serial number or license key for basketball scoreboard pro v2 42. You need to copy this number somewhere safe.

        -

        The next step is to download and install basketball scoreboard pro v2 42 itself from its official website: https://www.pcscoreboards.com/basketballscoreboardprov2/. You can get a free trial version that will work for up to two minutes per session. To install it, you need to follow the instructions on the screen. After installing it it, you need to launch it and enter the serial number that you got from the keygen. You can do this by clicking on the "Register" button on the main window of the software. This will open a dialog box that looks something like this:

        -

        How to crack basketball scoreboard pro v2 42
        -Basketball scoreboard pro v2 42 activation code
        -Basketball scoreboard pro v2 42 serial number
        -Download basketball scoreboard pro v2 42 full version
        -Basketball scoreboard pro v2 42 license key generator
        -Basketball scoreboard pro v2 42 free trial
        -Basketball scoreboard pro v2 42 patch
        -Basketball scoreboard pro v2 42 registration key
        -Basketball scoreboard pro v2 42 product key
        -Basketball scoreboard pro v2 42 crack download
        -Basketball scoreboard pro v2 42 keygen online
        -Basketball scoreboard pro v2 42 software review
        -Basketball scoreboard pro v2 42 features and benefits
        -Basketball scoreboard pro v2 42 system requirements
        -Basketball scoreboard pro v2 42 installation guide
        -Basketball scoreboard pro v2 42 user manual
        -Basketball scoreboard pro v2 42 tutorial video
        -Basketball scoreboard pro v2 42 support and help
        -Basketball scoreboard pro v2 42 refund policy
        -Basketball scoreboard pro v2 42 discount coupon
        -Basketball scoreboard pro v2 42 alternative software
        -Basketball scoreboard pro v2 42 comparison with other products
        -Basketball scoreboard pro v2 42 testimonials and feedback
        -Basketball scoreboard pro v2 42 pros and cons
        -Basketball scoreboard pro v2 42 best price and deal
        -Basketball scoreboard pro v2 42 upgrade and update
        -Basketball scoreboard pro v2 42 compatibility and integration
        -Basketball scoreboard pro v2 42 customization and configuration
        -Basketball scoreboard pro v2 42 tips and tricks
        -Basketball scoreboard pro v2 42 FAQ and Q&A
        -Basketball scoreboard pro v2 42 error and bug fix
        -Basketball scoreboard pro v2 42 backup and restore
        -Basketball scoreboard pro v2 42 uninstall and remove
        -Basketball scoreboard pro v2 42 security and privacy
        -Basketball scoreboard pro v2 42 performance and speed
        -Basketball scoreboard pro v2 42 reliability and quality
        -Basketball scoreboard pro v2 42 customer service and satisfaction
        -Basketball scoreboard pro v2 42 warranty and guarantee
        -Basketball scoreboard pro v2 42 bonus and gift
        -Basketball scoreboard pro v2 42 affiliate program and commission
        -How to use basketball scoreboard pro v2 42 for free
        -How to get basketball scoreboard pro v2 42 for cheap
        -How to buy basketball scoreboard pro v2 42 with paypal or credit card
        -How to sell basketball scoreboard pro v2 42 online or offline
        -How to make money with basketball scoreboard pro v2 42
        -How to create a basketball score board with basketball scoreboard pro v2 42
        -How to customize a basketball score board with basketball scoreboard pro v2 42
        -How to display a basketball score board with basketball scoreboard pro v2 42
        -How to manage a basketball score board with basketball scoreboard pro v2 42

        - register window -

        You need to paste the serial number into the text box and click on the "Register" button. This will activate the software and unlock all its features and functions. You can now use it without any time limit or restriction.

        -

        How to use keygen basketball scoreboard pro v2 42

        -

        After activating the software, you can start using it for your basketball games. The software has a simple and intuitive interface that lets you control all aspects of your scoreboard. You can use your mouse, keyboard, or a remote control device to operate it. You can also use hotkeys to access common functions quickly and easily.

        -

        The main window of the software shows you the current state of your scoreboard, including the score, time, fouls, timeouts, possession, bonus, team names, logos, colors, sounds, and more. You can change any of these elements by clicking on them or using the buttons on the toolbar. You can also access more options and settings by clicking on the "Options" button on the top right corner of the window.

        -

        Some of the things you can do with this software are:

        - - Start, stop, pause, or reset the game clock - Adjust the score for each team - Add or subtract fouls for each team or player - Indicate which team has the ball possession or bonus - Set or clear timeouts for each team - Change the team names, logos, colors, and sounds - Customize the layout, size, fonts, colors, and display options of your scoreboard - Project your scoreboard onto a large screen or a wall using a projector or a TV - Save or load your scoreboard settings for different games or events - Record or export your game data for analysis or reporting

        You can find more detailed instructions and tutorials on how to use this software on its official website: https://www.pcscoreboards.com/basketballscoreboardprov2/.

        -

        Pros and cons of keygen basketball scoreboard pro v2 42

        -

        Using a keygen for basketball scoreboard pro v2 42 might seem like a good idea if you want to save money and enjoy all the features and functions of this software. However, there are also some drawbacks and risks that you should be aware of before doing so. Here are some of the pros and cons of using keygen basketball scoreboard pro v2 42:

        - - - - - - - - - -
        ProsCons
        -
          -
        • You can get a valid serial number or license key for free
        • -
        • You can activate the software without paying anything
        • -
        • You can use all the features and functions of the software without any limitation
        • -
        • You can customize your scoreboard according to your preferences
        • -
        • You can enhance your basketball games with a professional-looking scoreboard
        • -
        -
        -
          -
        • You might download malware, viruses, spyware, adware, or other harmful programs along with the keygen
        • -
        • You might compromise your security and privacy by exposing your personal or financial information to hackers or scammers
        • -
        • You might violate the intellectual property rights of the software developer and face legal consequences
        • -
        • You might lose access to updates, support, or warranty from the software developer
        • -
        • You might damage your reputation or credibility as an organizer or official by using cracked software
        • -
        -
        -

        As you can see, using a keygen for basketball scoreboard pro v2 42 has its advantages and disadvantages. You need to weigh them carefully and decide whether it is worth it or not.

        -

        Conclusion

        -

        In conclusion, keygen basketball scoreboard pro v2 42 is a program that can generate valid serial numbers or license keys for basketball scoreboard pro v2 42, a software that allows you to turn your computer into a virtual scoreboard for your basketball games. By using a keygen, you can activate the software without paying anything and use all its features and functions without any restriction.

        -

        However, using a keygen also comes with some risks and challenges. You might download malware or viruses along with the keygen that can harm your computer or steal your information. You might also break the law and face legal consequences for violating the intellectual property rights of the software developer. You might also lose access to updates, support, or warranty from the software developer. You might also damage your reputation or credibility as an organizer or official by using cracked software.

        -

        Therefore, we do not recommend using a keygen for basketball scoreboard pro v2 42. Instead, we suggest some alternative ways to get this software legally and safely. You can either buy a license key from the official website of the software developer or look for other similar software products that are free or cheaper. You can also try other methods of keeping score for your basketball games, such as using a physical scoreboard or an online service.

        -

        We hope this article has helped you understand what is keygen basketball scoreboard pro v2 42 and how to use it. We also hope you have learned about its pros and cons and its alternatives. If you have any questions or comments about this topic, please feel free to share them in the comments section below. Thank you for reading!

        -

        FAQs

        -

        What is a keygen?

        -

        A keygen is a program that can generate valid serial numbers or license keys for various software products.

        -

        What is basketball scoreboard pro v2 42?

        -

        Basketball scoreboard pro v2 42 is a software that allows you to turn your computer into a virtual scoreboard for your basketball games.

        -

        How do I use keygen basketball scoreboard pro v2 42?

        -

        You need to find and download a trustworthy keygen from the internet. Then you need to run it and generate a serial number for basketball scoreboard pro v2 42. Then you need to download and install basketball scoreboard pro v2 42 from its official website. Then you need to enter the serial number and activate the software.

        -

        Is it safe and legal to use keygen basketball scoreboard pro v2 42?

        -

        No, it is not safe and legal to use keygen basketball scoreboard pro v2 42. You might download malware or viruses along with the keygen that can harm your computer or steal your information. You might also break the law and face legal consequences for violating the intellectual property rights of the software developer.

        -

        What are some alternatives to keygen basketball scoreboard pro v2 42?

        -

        Some alternatives to keygen basketball scoreboard pro v2 42 are buying a license key from the official website of the software developer or looking for other similar software products that are free or cheaper. You can also try other methods of keeping score for your basketball games, such as using a physical scoreboard or an online service.

        -

        0a6ba089eb
        -
        -
        \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/FastMaint CMMS Maintenance Management Software Crack 22 Manage Facilities and Equipment Maintenance with No Cost.md b/spaces/raedeXanto/academic-chatgpt-beta/FastMaint CMMS Maintenance Management Software Crack 22 Manage Facilities and Equipment Maintenance with No Cost.md deleted file mode 100644 index bb1a77387843fc487789f23e7ce214201bbd9521..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/FastMaint CMMS Maintenance Management Software Crack 22 Manage Facilities and Equipment Maintenance with No Cost.md +++ /dev/null @@ -1,176 +0,0 @@ -
        -

        FastMaint CMMS Maintenance Management Software Crack 22: What You Need to Know

        -

        If you are looking for a way to use FastMaint CMMS maintenance management software without paying for it, you might be tempted to use a crack. A crack is a program or a file that modifies or bypasses the original software's protection mechanisms, such as serial numbers, activation codes, or license keys. However, using a crack is not only illegal, but also risky and unethical. In this article, we will explain what FastMaint CMMS is, what a crack is, why people use cracks, how to find and use FastMaint CMMS crack 22, and what are the drawbacks and dangers of using cracks.

        -

        fastmaint cmms maintenance management software crack 22


        Download Ziphttps://tinourl.com/2uL0wQ



        -

        What is FastMaint CMMS?

        -

        FastMaint CMMS is a computerized maintenance management system (CMMS) that helps you manage unplanned (breakdown) and planned (preventive) maintenance jobs for your equipment or facilities. It also helps you manage inventory, vendors, purchasing, work requests, reports, and more. You can use FastMaint CMMS to improve efficiency, reduce equipment downtime, and better resource management.

        -

        Features and benefits of FastMaint CMMS

        -

        Some of the features and benefits of FastMaint CMMS are:

        -
          -
        • Multiple user accounts to share and work with colleagues
        • -
        • Dashboards and reports that you can customize
        • -
        • Task templates for recurring jobs to reduce repetitive data entry
        • -
        • Faster setup by importing equipment and other assets from Excel files
        • -
        • Easy handling of maintenance work requests and creation of work orders
        • -
        • Avoid job delays due to missing parts by managing spare parts and supplies
        • -
        • Support for barcode scanning, email notifications, web access, mobile devices, and more
        • -
        -

        Editions and pricing of FastMaint CMMS

        -

        FastMaint CMMS is available in four editions:

        - - - - - - - - - - - - - - - - - - - - - - - - - - -
        EditionDescriptionPricing
        StandardA single user version suitable for small to mid-size teams.$995 one-time fee or $49/month subscription.
        ProfessionalA multi-user version that can work with Microsoft Access or SQL Server databases.$2495 one-time fee or $99/month subscription.
        WebA multi-user version that can be accessed using a web browser.$3995 one-time fee or $149/month subscription.
        CloudA multi-user version that is hosted on SMGlobal's server.$199/month subscription.
        -

        You can get a free 30-day trial of any edition of FastMaint CMMS from the SMGlobal website.

        -

        fastmaint cmms maintenance management software keygen 22
        -fastmaint cmms maintenance management software serial number 22
        -fastmaint cmms maintenance management software license code 22
        -fastmaint cmms maintenance management software activation key 22
        -fastmaint cmms maintenance management software registration code 22
        -fastmaint cmms maintenance management software full version download 22
        -fastmaint cmms maintenance management software free trial download 22
        -fastmaint cmms maintenance management software patch 22
        -fastmaint cmms maintenance management software cracked apk 22
        -fastmaint cmms maintenance management software modded apk 22
        -fastmaint cmms maintenance management software hacked apk 22
        -fastmaint cmms maintenance management software unlocked apk 22
        -fastmaint cmms maintenance management software premium apk 22
        -fastmaint cmms maintenance management software pro apk 22
        -fastmaint cmms maintenance management software cracked for pc 22
        -fastmaint cmms maintenance management software cracked for mac 22
        -fastmaint cmms maintenance management software cracked for linux 22
        -fastmaint cmms maintenance management software cracked for windows 22
        -fastmaint cmms maintenance management software cracked for android 22
        -fastmaint cmms maintenance management software cracked for ios 22
        -fastmaint cmms maintenance management software torrent download 22
        -fastmaint cmms maintenance management software magnet link download 22
        -fastmaint cmms maintenance management software direct link download 22
        -fastmaint cmms maintenance management software mirror link download 22
        -fastmaint cmms maintenance management software alternative download 22
        -fastmaint cmms maintenance management software review 22
        -fastmaint cmms maintenance management software features 22
        -fastmaint cmms maintenance management software benefits 22
        -fastmaint cmms maintenance management software pros and cons 22
        -fastmaint cmms maintenance management software comparison 22
        -fastmaint cmms maintenance management software vs other maintenance software 22
        -fastmaint cmms maintenance management software tutorial 22
        -fastmaint cmms maintenance management software guide 22
        -fastmaint cmms maintenance management software manual 22
        -fastmaint cmms maintenance management software tips and tricks 22
        -fastmaint cmms maintenance management software best practices 22
        -fastmaint cmms maintenance management software case studies 22
        -fastmaint cmms maintenance management software testimonials 22
        -fastmaint cmms maintenance management software customer reviews 22
        -fastmaint cmms maintenance management software user feedbacks 22
        -fastmaint cmms maintenance management software pricing plans 22
        -fastmaint cmms maintenance management software discount codes 22
        -fastmaint cmms maintenance management software coupon codes 22
        -fastmaint cmms maintenance management software promo codes 22
        -fastmaint cmms maintenance management software deals and offers 22
        -fastmaint cmms maintenance management software free alternatives 22
        -fastmaint cmms maintenance management software open source alternatives 22
        -how to install fastmaint cmms maintenance management software crack 22
        -how to use fastmaint cmms maintenance management software crack 22
        -how to uninstall fastmaint cmms maintenance management software crack 2

        -

        What is a crack?

        -

        A crack is a program or a file that modifies or bypasses the original software's protection mechanisms, such as serial numbers, activation codes, or license keys. A crack allows you to use the software without paying for it or following its terms and conditions. A crack can also refer to the act of cracking or breaking the software's protection mechanisms using various tools or techniques.

        -

        Definition and types of cracks

        -

        A crack can be defined as any modification or alteration that allows unauthorized use of a software product. There are different types of cracks depending on the method or purpose of cracking. Some common types of cracks are:

        -
          -
        • Patch: A patch is a small program that modifies the original software's executable file or code to remove or bypass the protection mechanisms.
        • -
        • Keygen: A keygen is a program that generates valid serial numbers, activation codes, or license keys for the original software.
        • -
        • Loader: A loader is a program that runs before the original software and modifies its behavior or memory to allow unauthorized use.
        • -
        • No-CD/DVD: A no-CD/DVD is a modified executable file or code that allows the original software to run without requiring the original CD/DVD in the drive.
        • -
        • Cheat: A cheat is a program or file that modifies the original software's gameplay or features to give an unfair advantage to the user.
        • -
        -

        Risks and consequences of using cracks

        -

        Using cracks may seem like an easy way to save money or get more features from a software product, but it comes with many risks and consequences. Some of the risks and consequences of using cracks are:

        -
          -
        • Legal issues: Using cracks is illegal in most countries and jurisdictions. It violates the intellectual property rights of the software developers and publishers. It can result in fines, lawsuits, criminal charges, or even jail time.
        • -
        • Security issues: Using cracks can expose your computer or device to malware, viruses, spyware, ransomware, trojans, worms, etc. These malicious programs can steal your personal information, damage your files, encrypt your data, hijack your browser, monitor your activities, etc.
        • -
        • Performance issues: Using cracks can affect your computer or device's performance negatively. Cracks can cause errors, crashes, freezes, slowdowns, compatibility issues, etc. They can also interfere with other programs or processes on your system.
        • -glitches, missing features, outdated versions, etc. They can also prevent you from getting updates, patches, support, or warranty from the software developers or publishers. -
        • Ethical issues: Using cracks is unethical and unfair to the software developers and publishers who invest their time, money, and effort to create and distribute their software products. It also harms the software industry and the economy by reducing the incentives and revenues for innovation and development.
        • -
        -

        Why do people use cracks?

        -

        Despite the risks and consequences of using cracks, some people still choose to use them for various reasons. Some of the common motivations and reasons for using cracks are:

        -
          -
        • Financial reasons: Some people use cracks to save money or avoid paying for a software product that they cannot afford or do not want to pay for.
        • -
        • Curiosity reasons: Some people use cracks to explore or test a software product that they are interested in or want to learn more about.
        • -
        • Challenge reasons: Some people use cracks to challenge themselves or prove their skills in cracking or hacking a software product.
        • -
        • Revenge reasons: Some people use cracks to retaliate or protest against a software developer or publisher that they dislike or disagree with.
        • -
        • Ignorance reasons: Some people use cracks without knowing or understanding the risks and consequences of using them.
        • -
        -

        Alternatives and solutions to using cracks

        -

        Instead of using cracks, there are better alternatives and solutions that can help you use a software product legally, safely, and ethically. Some of the alternatives and solutions are:

        -
          -
        • Free or open source software: There are many free or open source software products that you can use without paying or violating any intellectual property rights. These software products are usually created and maintained by a community of developers and users who share their code and resources. You can also modify or customize these software products according to your needs or preferences.
        • -
        • Trial or demo versions: There are many software products that offer trial or demo versions that you can use for a limited time or with limited features. These versions allow you to test or evaluate a software product before buying it. You can also compare different software products and choose the one that suits you best.
        • -
        • Discounts or coupons: There are many software products that offer discounts or coupons that you can use to buy them at a lower price. These discounts or coupons can be found on the official websites of the software developers or publishers, online platforms, magazines, newsletters, etc. You can also look for seasonal sales, promotions, bundles, etc. that can help you save money.
        • -
        • Licenses or subscriptions: There are many software products that offer different types of licenses or subscriptions that you can choose from depending on your needs or budget. These licenses or subscriptions can be for individual users, multiple users, organizations, etc. They can also be for a lifetime, a year, a month, etc. You can also cancel or renew your license or subscription at any time.
        • -
        • Support or donations: There are many software products that rely on support or donations from their users or fans to continue their development and distribution. These software products are usually free or low-cost but offer high-quality and functionality. You can support or donate to these software products by giving feedback, reporting bugs, spreading the word, contributing code, making suggestions, etc. You can also donate money, goods, services, etc. to show your appreciation and gratitude.
        • -
        -

        How to find and use FastMaint CMMS crack 22?

        -

        If you still want to find and use FastMaint CMMS crack 22 despite the risks and consequences of using cracks, here are some sources and methods of finding it and some steps and precautions of using it.

        -

        Sources and methods of finding FastMaint CMMS crack 22

        -

        Some of the sources and methods of finding FastMaint CMMS crack 22 are:

        -
          -
        • Search engines: You can use search engines like Google, Bing, Yahoo, etc. to look for FastMaint CMMS crack 22 by typing keywords like "FastMaint CMMS crack 22", "FastMaint CMMS keygen", "FastMaint CMMS patch", etc. You can also use advanced search options like filters, operators, modifiers, etc. to narrow down your search results.
        • -YouTube, SoundCloud, etc. that offer or link to FastMaint CMMS crack 22. You can also use the comments, ratings, reviews, etc. of these websites to check the quality and reliability of FastMaint CMMS crack 22. -
        • Forums: You can join forums that discuss or provide cracks like CrackBerry, CrackWatch, CrackStatus, etc. that may have FastMaint CMMS crack 22. You can also ask for help or advice from other forum members who may have FastMaint CMMS crack 22.
        • -
        • Social media: You can follow social media accounts or pages that post or share cracks like Facebook, Twitter, Instagram, etc. that may have FastMaint CMMS crack 22. You can also use hashtags, mentions, tags, etc. to find FastMaint CMMS crack 22 on social media.
        • -
        • Torrents: You can use torrents or peer-to-peer networks like BitTorrent, uTorrent, qBittorrent, etc. to download FastMaint CMMS crack 22 from other users who have it. You can also use trackers, magnets, seeds, leeches, etc. to find and download FastMaint CMMS crack 22 faster and easier.
        • -
        -

        Steps and precautions of using FastMaint CMMS crack 22

        -

        Some of the steps and precautions of using FastMaint CMMS crack 22 are:

        -
          -
        • Backup: Before using FastMaint CMMS crack 22, you should backup your important files and data on your computer or device. This will help you restore them in case FastMaint CMMS crack 22 causes any damage or loss to them.
        • -
        • Scan: Before using FastMaint CMMS crack 22, you should scan it with a reliable antivirus or anti-malware program. This will help you detect and remove any malware or virus that may be hidden in FastMaint CMMS crack 22.
        • -
        • Disable: Before using FastMaint CMMS crack 22, you should disable your internet connection and any security software or firewall on your computer or device. This will help you avoid any interference or detection from the original software developer or publisher or any law enforcement agency.
        • -
        • Extract: Before using FastMaint CMMS crack 22, you should extract it from its compressed file format using a suitable program like WinRAR, WinZip, 7-Zip, etc. This will help you access and use FastMaint CMMS crack 22 properly.
        • -
        • Run: Before using FastMaint CMMS crack 22, you should run it as an administrator or with elevated privileges on your computer or device. This will help you avoid any permission or compatibility issues with FastMaint CMMS crack 22.
        • -
        • Apply: Before using FastMaint CMMS crack 22, you should apply it to the original software's executable file or code according to the instructions provided by the cracker or the source of FastMaint CMMS crack 22. This will help you modify or bypass the original software's protection mechanisms and use it without paying for it or following its terms and conditions.
        • -
        • Enjoy: After using FastMaint CMMS crack 22, you should enjoy using the software product for your maintenance management needs. However, you should also be aware of the risks and consequences of using cracks and be prepared to face them if they occur.
        • -
        -

        Conclusion

        - security issues, performance issues, quality issues, and ethical issues. There are better alternatives and solutions that can help you use FastMaint CMMS legally, safely, and ethically. If you still want to find and use FastMaint CMMS crack 22, you should be careful and cautious and follow the steps and precautions of using it. We hope this article has helped you understand what FastMaint CMMS crack 22 is and what you need to know about it.

        -

        FAQs

        -

        Here are some frequently asked questions about FastMaint CMMS crack 22:

        -
          -
        1. What is the difference between a crack and a serial number?
        2. -

          A crack is a program or a file that modifies or bypasses the original software's protection mechanisms, such as serial numbers, activation codes, or license keys. A serial number is a unique code that is assigned to each copy of the original software and is used to activate or register it.

          -
        3. Is using FastMaint CMMS crack 22 safe?
        4. -

          No, using FastMaint CMMS crack 22 is not safe. It can expose your computer or device to malware, viruses, spyware, ransomware, trojans, worms, etc. These malicious programs can steal your personal information, damage your files, encrypt your data, hijack your browser, monitor your activities, etc.

          -
        5. Is using FastMaint CMMS crack 22 legal?
        6. -

          No, using FastMaint CMMS crack 22 is not legal. It violates the intellectual property rights of the software developers and publishers. It can result in fines, lawsuits, criminal charges, or even jail time.

          -
        7. Is using FastMaint CMMS crack 22 ethical?
        8. -

          No, using FastMaint CMMS crack 22 is not ethical. It harms the software developers and publishers who invest their time, money, and effort to create and distribute their software products. It also harms the software industry and the economy by reducing the incentives and revenues for innovation and development.

          -
        9. Where can I get FastMaint CMMS without using a crack?
        10. -

          You can get FastMaint CMMS without using a crack from the official website of SMGlobal Inc. You can choose from four editions of FastMaint CMMS depending on your needs or budget. You can also get a free 30-day trial of any edition of FastMaint CMMS from the SMGlobal website.

          -
        -

        0a6ba089eb
        -
        -
        \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Free Download Sayap Kecil Garuda A Film that Shows the Power of Goodness and Patriotism.md b/spaces/raedeXanto/academic-chatgpt-beta/Free Download Sayap Kecil Garuda A Film that Shows the Power of Goodness and Patriotism.md deleted file mode 100644 index f8cff83999649896c16a51764f86eb286b7aed31..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Free Download Sayap Kecil Garuda A Film that Shows the Power of Goodness and Patriotism.md +++ /dev/null @@ -1,120 +0,0 @@ -
        -

        Free Download Sayap Kecil Garuda: A Heartwarming Indonesian Movie

        -

        Have you ever heard of Sayap Kecil Garuda? It is a 2014 Indonesian movie that tells the story of a teenager who struggles to memorize Pancasila, the five principles that form the basis of the Indonesian state. If you are looking for a movie that is inspiring, educational, and entertaining, you should definitely watch Sayap Kecil Garuda. In this article, we will tell you everything you need to know about this movie, including its plot, cast, crew, themes, messages, and how to download it for free.

        -

        The Plot of Sayap Kecil Garuda

        -

        The movie follows the life of Pulung (played by Rizky Black), a 14-year-old boy who lives in a rural village in West Java. Pulung has a low memorization ability, which makes him unable to remember things easily. He often forgets his homework, his chores, his friends' names, and even his own birthday. He also has a hard time memorizing Pancasila, the five principles that every Indonesian citizen should know and uphold. These principles are:

        -

        free download sayap kecil garuda


        DOWNLOAD ✵✵✵ https://tinourl.com/2uL2Jd



        -
          -
        1. Belief in the One and Only God
        2. -
        3. A just and civilized humanity
        4. -
        5. The unity of Indonesia
        6. -
        7. Democracy guided by the inner wisdom in the unanimity arising out of deliberations amongst representatives
        8. -
        9. Social justice for all Indonesians
        10. -
        -

        Pulung's lack of memorization skills makes him a target of ridicule and bullying at school. His classmates call him "Pulung Pancasila" or "Pulung Pansos" (short for "Pancasila Sosial", meaning "socially clueless"). His teachers are also disappointed with him and doubt his intelligence. His parents are worried about his future and try to help him improve his memory by giving him various supplements and remedies.

        -

        One day, Pulung's school announces a competition to select the best student who can recite Pancasila and explain its meaning. The winner will represent the school at a national event in Jakarta. Pulung decides to join the competition, hoping to prove himself and make his parents proud. He asks his friends, his teachers, his neighbors, and even strangers to help him learn Pancasila. Along the way, he discovers the history, the values, and the relevance of Pancasila in his daily life.

        -

        The climax of the movie is when Pulung delivers his speech at the school competition. He impresses everyone with his eloquence, his confidence, and his understanding of Pancasila. He also reveals a surprising fact about his memorization ability that changes everyone's perception of him. He wins the competition and earns the respect and admiration of his family, friends, teachers, and community.

        -

        The Cast and Crew of Sayap Kecil Garuda

        -

        The Cast and Crew of Sayap Kecil Garuda

        -

        The movie was directed by Aditya Gumay, a veteran filmmaker and actor who has been involved in various Indonesian movies and TV shows since 1980. He is known for his works such as Cinta Fitri, Si Doel Anak Sekolahan, Bidadari, Kawin Gantung, Tukang Bubur Naik Haji, Anak-Anak Manusia, Garasi, Perempuan Berkalung Sorban, Mengejar Mas-Mas, Perahu Kertas, Surat Kecil untuk Tuhan, Tenggelamnya Kapal Van Der Wijck, Ayat-Ayat Cinta 2, Dilan 1990, Dilan 1991, Dua Garis Biru, Milea: Suara dari Dilan, Nanti Kita Cerita tentang Hari Ini, Mariposa, Tarung Sarung, Bucin, Seperti Hujan yang Jatuh ke Bumi, Guru-Guru Gokil, Nanti Kita Cerita tentang Hari Ini 2, Dilan 1992, Dilan 1993, Dilan 1994 -

        He also founded Sanggar Ananda and Teater Kawula Muda in 1986, which are art studios that nurture young talents in acting and performing. Some of the famous actors who started their careers from his studios are Olga Syahputra, Ruben Onsu, Okky Lukman, Indra Bekti, Jessica Mila, and Indra Jaylani Shahab. He also released an album with AB Utomo titled Hari Ini Aku Jatuh Cinta in the 1990s.

        -

        The actors who starred in Sayap Kecil Garuda are Rizky Black as Pulung, Reza Artamevia as Pulung's mother, Gatot Brajamusti as Abah Pulung, Guntur Bumi as Fuad Idris, Baby Mamesa as Asih, Aaliyah Massaid as Ida, Deddy Mizwar as the principal, Diza Refengga as Fandi, Adam Syachrizal as Dadang, and Elma Theana as Pulung's teacher. They all gave convincing and touching performances that brought the characters to life.

        -

        The production company behind the movie is Brajamusti Films, which is owned by Gatot Brajamusti and his family. The company has produced several movies and TV shows such as Tina Toon dan Lenong Bocah, Emak Ingin Naik Haji, Tendangan dari Langit, Cinta Suci Zahrana, Dibalik 98, and others. The company aims to produce quality films that can inspire and educate the audience.

        -

        The movie received one nomination at the 2014 Indonesian Movie Awards for Best Child Actor for Rizky Black. It also received positive reviews from critics and audiences who praised its story, message, direction, and acting. The movie was considered a refreshing and uplifting addition to the Indonesian cinema scene.

        -

        free download sayap kecil garuda pdf
        -free download sayap kecil garuda novel
        -free download sayap kecil garuda ebook
        -free download sayap kecil garuda full movie
        -free download sayap kecil garuda mp3
        -free download sayap kecil garuda audiobook
        -free download sayap kecil garuda epub
        -free download sayap kecil garuda film
        -free download sayap kecil garuda lagu
        -free download sayap kecil garuda buku
        -free download sayap kecil garuda sinopsis
        -free download sayap kecil garuda subtitle indonesia
        -free download sayap kecil garuda ost
        -free download sayap kecil garuda video
        -free download sayap kecil garuda trailer
        -free download sayap kecil garuda review
        -free download sayap kecil garuda resensi
        -free download sayap kecil garuda ringtone
        -free download sayap kecil garuda wallpaper
        -free download sayap kecil garuda theme song
        -free download sayap kecil garuda lirik
        -free download sayap kecil garuda hd
        -free download sayap kecil garuda online
        -free download sayap kecil garuda streaming
        -free download sayap kecil garuda terjemahan
        -free download sayap kecil garuda versi inggris
        -free download sayap kecil garuda wattpad
        -free download sayap kecil garuda zip file
        -free download sayap kecil garuda 480p
        -free download sayap kecil garuda 720p
        -free download sayap kecil garuda 1080p
        -free download sayap kecil garuda 4k
        -free download sayap kecil garuda bluray
        -free download sayap kecil garuda dvdrip
        -free download sayap kecil garuda mkv
        -free download sayap kecil garuda mp4
        -free download sayap kecil garuda avi
        -free download sayap kecil garuda indowebster
        -free download sayap kecil garuda ganool
        -free download sayap kecil garuda lk21
        -free download sayap kecil garuda indoxxi
        -free download sayap kecil garuda netflix
        -free download sayap kecil garuda amazon prime video
        -free download sayap kecil garuda disney plus hotstar
        -free download sayap kecil garuda hbo max
        -free download sayap kecil garuda iflix
        -free download sayap kecil garuda viu
        -free download sayap kecil garuda we tv
        -free download sayap kecil garuda voot
        -free download sayap kecil garuda zee5

        -

        The Themes and Messages of Sayap Kecil Garuda

        -

        The movie explores several themes and messages that are relevant and important for the Indonesian society. Some of these themes and messages are:

        -
          -
        • The importance of Pancasila as the foundation of Indonesia. The movie shows how Pancasila is not just a set of principles that need to be memorized, but a way of life that guides the behavior and attitude of every Indonesian citizen. The movie also highlights the history and significance of Pancasila in shaping the identity and unity of Indonesia as a diverse and multicultural nation.
        • -
        • The value of education and perseverance. The movie portrays how education is not only about academic achievement, but also about character development and moral values. The movie also demonstrates how perseverance is essential to overcome challenges and achieve goals. The movie inspires the audience to never give up on learning and improving themselves.
        • -
        • The role of family and community in supporting each other. The movie depicts how family and community are the sources of strength and motivation for Pulung. The movie shows how Pulung's parents, friends, teachers, neighbors, and even strangers help him in his journey to learn Pancasila. The movie also illustrates how Pulung's success is not only his own, but also a collective achievement of his family and community.
        • -
        • The diversity and unity of Indonesia. The movie celebrates the diversity and unity of Indonesia by featuring various characters from different backgrounds, cultures, religions, ethnicities, regions, and professions. The movie shows how they all respect and appreciate each other's differences while sharing a common bond as Indonesians. The movie also emphasizes how Pancasila is the glue that holds them together as one nation.
        • -
        -

        How to Download Sayap Kecil Garuda for Free

        -

        If you are interested in watching Sayap Kecil Garuda but do not have access to a DVD or a streaming service that offers it, you might be tempted to download it for free from the internet. However, before you do that, you should be aware of the legal and ethical issues of downloading movies for free.

        -

        First of all, downloading movies for free without permission from the copyright holders is illegal in most countries. You could face legal consequences such as fines or even jail time if you are caught doing so. You could also expose your computer or device to viruses or malware that could harm your data or privacy.

        -

        , you are hurting their livelihood and discouraging them from making more movies in the future.

        -

        Therefore, we strongly advise you to watch Sayap Kecil Garuda legally and ethically, either by buying or renting a DVD, or by subscribing to a streaming service that offers it. However, if you still insist on downloading it for free, here are some possible sources and platforms that you can try:

        -
          -
        • SoundCloud: This is a music streaming platform, but it also has some audio files of movies, including Sayap Kecil Garuda. You can download the audio file for free and listen to it offline. However, you will not be able to see the video or the subtitles.
        • -
        • REALXXI: This is a movie streaming site that claims to offer Sayap Kecil Garuda for free. You can watch it online or download it to your device. However, the site is not secure and may contain malware or viruses. You may also encounter pop-up ads or redirects that could harm your device or privacy.
        • -
        • IMDb: This is a reputable movie database site that provides information and reviews of movies, including Sayap Kecil Garuda. You can watch the trailer and some clips of the movie for free, but you cannot download the full movie. However, you can find links to other sites that offer the movie legally, such as Amazon Prime Video or iTunes.
        • -
        -

        If you decide to download Sayap Kecil Garuda from any of these sources, here are some steps and tips that you should follow:

        -
          -
        1. Make sure you have a reliable internet connection and enough storage space on your device.
        2. -
        3. Use a VPN service to hide your IP address and protect your identity online.
        4. -
        5. Use an antivirus software to scan your device and the downloaded file for any malware or viruses.
        6. -
        7. Use a video player that supports various file formats and subtitles.
        8. -
        9. Enjoy watching Sayap Kecil Garuda offline!
        10. -
        -

        Conclusion

        -

        Sayap Kecil Garuda is a heartwarming Indonesian movie that tells the story of a teenager who struggles to memorize Pancasila, the five principles of Indonesia. The movie has a captivating plot, a talented cast, a skilled director, and a meaningful message. The movie also celebrates the diversity and unity of Indonesia and its culture. If you are looking for a movie that is inspiring, educational, and entertaining, you should definitely watch Sayap Kecil Garuda.

        -

        However, we recommend that you watch it legally and ethically, either by buying or renting a DVD, or by subscribing to a streaming service that offers it. Downloading movies for free without permission is illegal and unethical, and could harm your device or privacy. If you still want to download it for free, you should be careful and use the sources and platforms that we have suggested above.

        -

        We hope this article has helped you learn more about Sayap Kecil Garuda and how to download it for free. Thank you for reading!

        -

        Frequently Asked Questions

        -

        Here are some common questions that people might have about Sayap Kecil Garuda and how to download it for free:

        -
          -
        1. What does Sayap Kecil Garuda mean?
        2. -

          Sayap Kecil Garuda means "The Little Wings of Garuda". Garuda is a mythical bird-like creature that is the national symbol of Indonesia. It also represents Pancasila, the five principles of Indonesia. The title implies that Pulung, the main character, is like a young Garuda who learns to fly with his wings of Pancasila.

          -
        3. What is Pancasila?
        4. -

          Pancasila is the official philosophical foundation of Indonesia. It consists of five principles that every Indonesian citizen should know and uphold. These principles are: Belief in the One and Only God; A just and civilized humanity; The unity of Indonesia; Democracy guided by the inner wisdom in the unanimity arising out of deliberations amongst representatives; Social justice for all Indonesians.

          -
        5. Where can I watch Sayap Kecil Garuda legally?
        6. -

          You can watch Sayap Kecil Garuda legally by buying or renting a DVD from online or offline stores, or by subscribing to a streaming service that offers it, such as Amazon Prime Video or iTunes. You can also find links to these sites on IMDb.

          -
        7. Is downloading movies for free illegal?
        8. -

          Yes, downloading movies for free without permission from the copyright holders is illegal in most countries. You could face legal consequences such as fines or even jail time if you are caught doing so. You could also expose your device or privacy to malware or viruses that could harm your data.

          -
        9. Is downloading movies for free unethical?
        10. -

          Yes, downloading movies for free without paying for them is unethical because it deprives the filmmakers of their rightful income. Making movies is not cheap or easy; it requires a lot of time, effort, money, talent, and resources. By downloading movies for free without supporting them financially

          -

          , you are hurting their livelihood and discouraging them from making more movies in the future.

          -

          0a6ba089eb
          -
          -
          \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Adobe Media Encoder Cc Crack Amtlib Dll 588.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Adobe Media Encoder Cc Crack Amtlib Dll 588.md deleted file mode 100644 index 18c00ffa12fc1a883bd14be5f0a7bf7c697d0885..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Adobe Media Encoder Cc Crack Amtlib Dll 588.md +++ /dev/null @@ -1,6 +0,0 @@ -

          adobe media encoder cc crack amtlib dll 588


          Downloadhttps://urlgoal.com/2uCM1B



          -
          -sencha architect activation code crack sencha architect 3 activation code ... adobe media encoder cc crack amtlib dll 588 · irdeto 2 decryption ... 4d29de3e1b
          -
          -
          -

          diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Ampongan Tax Reviewer Pdf Free !NEW!.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Ampongan Tax Reviewer Pdf Free !NEW!.md deleted file mode 100644 index e329d097f95f320ba735fb4bfa4425b9d3238d62..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Ampongan Tax Reviewer Pdf Free !NEW!.md +++ /dev/null @@ -1,6 +0,0 @@ -

          Ampongan Tax Reviewer Pdf Free


          Download File ••• https://urlgoal.com/2uCJCX



          -
          -marijuana and tobacco. Taxes_are imposed by governments, not_by the_people. (1)_Corporations -are_not_tax_payers. (2) Governmental entities, which include cities, states,_counties, and_regions, are different than_corporations. Therefore, the words “taxpayer” and “tax” in this context do not apply to governmental entities. In this case, these words apply to only the individual sellers and smokers of these substances. A consumer of the substances is not a “taxpayer.” For these reasons, the cigarette tax is not a sin tax. 8. Business corporations are widely recognized as separate and distinct from the state, and they cannot, therefore, be held responsible for the conduct of the state. (1) Every business entity in the United States has two classes of capital: (a) Municipal capital, which is money, labor, buildings, and equipment owned by the state, and (b) Private capital, which is money, labor, buildings, and equipment owned by the business. Private capital consists of corporate stock and bond stock. Private capital may be public or private. The nature of private capital is that it is not subject to the control of the state, and it is not subject to governmental regulation. (2) Some stock is purchased at a stock exchange for investment purposes. Investors in the stock are the owners of the business corporation. Private capital does not produce any revenues, except the dividends and other income earned on its investment, and it does not perform any of the services provided by the state. Private capital is separate and distinct from the state and cannot be held responsible for the conduct of the state. (3) Municipal capital performs services for the state, and the state collects taxes from municipalities to cover its expenses. This is the rationale for taxing a corporation on the basis of municipal capital. But no statute, and no court case, holds that corporations or corporations’ capital are responsible for the conduct of the state. (4) Revenue_generating_governmental_entities, such as states and counties, are not corporations or businesses that have individual human owners. They are not separate and distinct from the state. The state is a single entity, and revenue-generating governmental entities are merely parts of the state. (5) Corporations are different from states and are not “responsible” for state action. (6) Corporations do not generate revenue 4fefd39f24
          -
          -
          -

          diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/detectors/fsaf.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/detectors/fsaf.py deleted file mode 100644 index 81ed1bdef1a8957077788397422725c83e3ffed2..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/detectors/fsaf.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ..builder import DETECTORS -from .single_stage import SingleStageDetector - - -@DETECTORS.register_module() -class FSAF(SingleStageDetector): - """Implementation of `FSAF `_""" - - def __init__(self, - backbone, - neck, - bbox_head, - train_cfg=None, - test_cfg=None, - pretrained=None, - init_cfg=None): - super(FSAF, self).__init__(backbone, neck, bbox_head, train_cfg, - test_cfg, pretrained, init_cfg) diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/utils/profiling.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/utils/profiling.py deleted file mode 100644 index 2f53f456c72db57bfa69a8d022c92d153580209e..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/utils/profiling.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import contextlib -import sys -import time - -import torch - -if sys.version_info >= (3, 7): - - @contextlib.contextmanager - def profile_time(trace_name, - name, - enabled=True, - stream=None, - end_stream=None): - """Print time spent by CPU and GPU. - - Useful as a temporary context manager to find sweet spots of code - suitable for async implementation. - """ - if (not enabled) or not torch.cuda.is_available(): - yield - return - stream = stream if stream else torch.cuda.current_stream() - end_stream = end_stream if end_stream else stream - start = torch.cuda.Event(enable_timing=True) - end = torch.cuda.Event(enable_timing=True) - stream.record_event(start) - try: - cpu_start = time.monotonic() - yield - finally: - cpu_end = time.monotonic() - end_stream.record_event(end) - end.synchronize() - cpu_time = (cpu_end - cpu_start) * 1000 - gpu_time = start.elapsed_time(end) - msg = f'{trace_name} {name} cpu_time {cpu_time:.2f} ms ' - msg += f'gpu_time {gpu_time:.2f} ms stream {stream}' - print(msg, end_stream) diff --git a/spaces/romero61/hendata/pages/06-applayout.py b/spaces/romero61/hendata/pages/06-applayout.py deleted file mode 100644 index 82951099e1855a9a49985db3fcc10981b84c78be..0000000000000000000000000000000000000000 --- a/spaces/romero61/hendata/pages/06-applayout.py +++ /dev/null @@ -1,344 +0,0 @@ -import os -import sys -import solara -import datetime -import psycopg2 -import json -import pandas as pd -from psycopg2 import pool -import leafmap -import geopandas as gpd -from shapely.geometry import Point -from leafmap.toolbar import change_basemap - - -# Database connection parameters -key_content = os.environ.get('DATABASE_SECRETS') -key_dict = json.loads(key_content) -connection_pool = pool.SimpleConnectionPool(1, 10, **key_dict) - -def get_columns(): - conn = connection_pool.getconn() - with conn: - with conn.cursor() as cur: - cur.execute("SELECT column_name FROM information_schema.columns WHERE table_name = 'clientes_test';") - columns = [col[0] for col in cur.fetchall()] - connection_pool.putconn(conn) - return columns - -def fetch_data(): - conn = connection_pool.getconn() - with conn: - with conn.cursor() as cur: - cur.execute("SELECT * FROM clientes_test;") - data = cur.fetchall() - connection_pool.putconn(conn) - df = pd.DataFrame(data, columns=get_columns()) - return df - - -class Map(leafmap.Map): - def __init__(self, **kwargs): - super().__init__(**kwargs) - # Add what you want below - self.add_basemap("CartoDB.DarkMatter") - change_basemap(self) - - -@solara.component -def Page(): - # Data - columns = get_columns() - inputs = {col: solara.use_state('') for col in columns} - selected_row, set_selected_row = solara.use_state(None) - data, set_data = solara.use_state(fetch_data()) - - - - - def handle_submit(): - conn = connection_pool.getconn() - with conn: - with conn.cursor() as cur: - columns_str = ", ".join(columns[1:]) # Exclude 'id' since it's auto-incremented - values_str = ", ".join(["%s"] * len(columns[1:])) - cur.execute(f"INSERT INTO clientes_test ({columns_str}) VALUES ({values_str});", [inputs[col][0] for col in columns[1:]]) - conn.commit() - connection_pool.putconn(conn) - set_data(fetch_data()) - - def handle_update(): - conn = connection_pool.getconn() - with conn: - with conn.cursor() as cur: - if selected_row is not None: - set_str = ", ".join([f"{col} = %s" for col in columns[1:]]) - cur.execute(f"UPDATE clientes_test SET {set_str} WHERE id = {selected_row};", [inputs[col][0] for col in columns[1:]]) - conn.commit() - connection_pool.putconn(conn) - set_data(fetch_data()) - - def handle_delete(): - conn = connection_pool.getconn() - with conn: - with conn.cursor() as cur: - if selected_row is not None: - cur.execute(f"DELETE FROM clientes_test WHERE id = {selected_row};") - conn.commit() - connection_pool.putconn(conn) - set_data(fetch_data()) - - def handle_row_selection(row_index): - conn = connection_pool.getconn() - with conn: - with conn.cursor() as cur: - cur.execute("SELECT * FROM clientes_test WHERE id = %s;", (row_index,)) - row_data = cur.fetchone() - for idx, col in enumerate(columns): - inputs[col][1](str(row_data[idx])) - connection_pool.putconn(conn) - - - - - - zoom = solara.reactive(2) - center = solara.reactive((20, 0)) - - # Convert decimal columns to float - data['cod_cliente'] = data['cod_cliente'].astype(float) - - - - # Placeholder banner image at the top - with solara.Row(): - image_url = "https://huggingface.co/spaces/romero61/hendata/resolve/main/public/logo.png" - solara.Image(image_url, width="100%") - - # Map row - with solara.Row(): - with solara.Column(): - - Map.element( # type: ignore - zoom=6, - center= [-23.76523688975866, -57.32666015625001], - scroll_wheel_zoom=True, - toolbar_ctrl=False, - data_ctrl=False, - height = '500px', - width = '900px' - ) - solara.Text(f"Zoom: {zoom.value}") - solara.Text(f"Center: {center.value}") - - - # Place buttons in a horizontal row below the map - with solara.Row(): - solara.Button("Submit", on_click=lambda: [handle_submit(), set_data(fetch_data())]) - solara.Button("Update", on_click=lambda: [handle_update(), set_data(fetch_data())]) - solara.Button("Delete", on_click=lambda: [handle_delete(), set_data(fetch_data())]) - solara.Button("Clear", on_click=lambda: [inputs[col][1]('') for col in columns]) - - # Explanation in Spanish - with solara.Row(): - solara.Markdown('Building Filter Tools') - # Dropdown menu to choose a filter - - - # Input Fields and DataFrame - with solara.Columns(widths=[1, 3]): - with solara.Column(): - # Input fields - for col in columns: - solara.InputText(col, value=inputs[col][0], on_value=inputs[col][1]) - - # Dropdown for row selection if data is not empty - if not data.empty: - solara.Select(label="Select a row to update", values=[row[0] for row in data.itertuples()], value=selected_row, on_value=handle_row_selection) - - # DataFrame with filters applied - with solara.Row(): - solara.CrossFilterDataFrame(data, items_per_page=50) - -'''import os -import sys -import solara -import datetime -import psycopg2 -import json -import pandas as pd -import ipyleaflet -from ipyleaflet import Icon, Popup -from ipywidgets import HTML -from psycopg2 import pool - -# Database connection parameters -key_content = os.environ.get('DATABASE_SECRETS') -key_dict = json.loads(key_content) - -connection_pool = pool.SimpleConnectionPool(1, 10, **key_dict) - -def get_columns(): - conn = connection_pool.getconn() - with conn: - with conn.cursor() as cur: - cur.execute("SELECT column_name FROM information_schema.columns WHERE table_name = 'clientes_test';") - columns = [col[0] for col in cur.fetchall()] - connection_pool.putconn(conn) - return columns - -def fetch_lat_long_data(): - conn = connection_pool.getconn() - lat_long_data = [] - with conn: - with conn.cursor() as cur: - cur.execute("SELECT latitud, longitud FROM clientes_test;") - lat_long_data = cur.fetchall() - connection_pool.putconn(conn) - return lat_long_data - -columns = get_columns() - -zoom_default = 7 -center_default = (-25.272019833438247, -57.62672424316407) -maps = { - "OpenStreetMap.Mapnik": ipyleaflet.basemaps.OpenStreetMap.Mapnik, - "OpenTopoMap": ipyleaflet.basemaps.OpenTopoMap, - "Esri.WorldTopoMap": ipyleaflet.basemaps.Esri.WorldTopoMap, - "Stamen.Watercolor": ipyleaflet.basemaps.Stamen.Watercolor, -} - -def update_markers(displayed_rows, icon): - markers = [] - for _, row in displayed_rows.iterrows(): - lat, long = row['latitud'], row['longitud'] - marker = ipyleaflet.Marker.element(location=(lat, long), opacity=0.75, icon=icon, draggable=False) - popup_content = HTML(f"Lat: {lat}, Long: {long}") - popup = Popup(child=popup_content, close_button=True, auto_close=True, close_on_escape_key=True) - marker.popup = popup - markers.append(marker) - return markers - - -zoom = solara.reactive(zoom_default) -center = solara.reactive(center_default) -marker_location = solara.reactive(center_default) -map_name = solara.reactive(list(maps)[0]) - -@solara.component -def Page(): - inputs = {col: solara.use_state('') for col in columns} - selected_row, set_selected_row = solara.use_state(None) - - - def fetch_data(): - conn = connection_pool.getconn() - with conn: - with conn.cursor() as cur: - cur.execute("SELECT * FROM clientes_test;") - data = cur.fetchall() - connection_pool.putconn(conn) - df = pd.DataFrame(data, columns=columns) - return df - - data, set_data = solara.use_state(fetch_data()) - - def handle_submit(): - conn = connection_pool.getconn() - with conn: - with conn.cursor() as cur: - columns_str = ", ".join(columns[1:]) # Exclude 'id' since it's auto-incremented - values_str = ", ".join(["%s"] * len(columns[1:])) - cur.execute(f"INSERT INTO clientes_test ({columns_str}) VALUES ({values_str});", [inputs[col][0] for col in columns[1:]]) - conn.commit() - connection_pool.putconn(conn) - - def handle_update(): - conn = connection_pool.getconn() - with conn: - with conn.cursor() as cur: - if selected_row is not None: - set_str = ", ".join([f"{col} = %s" for col in columns[1:]]) - cur.execute(f"UPDATE clientes_test SET {set_str} WHERE id = {selected_row};", [inputs[col][0] for col in columns[1:]]) - conn.commit() - connection_pool.putconn(conn) - - def handle_delete(): - conn = connection_pool.getconn() - with conn: - with conn.cursor() as cur: - if selected_row is not None: - cur.execute(f"DELETE FROM clientes_test WHERE id = {selected_row};") - conn.commit() - connection_pool.putconn(conn) - - - def goto_marker(): - center.value = marker_location.value - zoom.value = 13 - - def handle_row_selection(row_index): - conn = connection_pool.getconn() - with conn: - with conn.cursor() as cur: - cur.execute("SELECT * FROM clientes_test WHERE id = %s;", (row_index,)) - row_data = cur.fetchone() - for idx, col in enumerate(columns): - inputs[col][1](str(row_data[idx])) - connection_pool.putconn(conn) - selected_lat = data.loc[row_index, 'latitud'] - selected_long = data.loc[row_index, 'longitud'] - marker_location.value = (selected_lat, selected_long) - goto_marker() - - - - with solara.Column(style={"min-width": "500px", "height": "700px"}): - # Check if map_name.value is a valid key - if map_name.value in maps: - map = maps[map_name.value] - else: - map = maps[list(maps.keys())[0]] # Default to the first map if the key is not valid - - url = map.build_url() - solara.Select(label="Map", value=map_name, values=list(maps), style={"z-index": "10000"}) - solara.Button(label="Reset view", on_click=lambda: [center.set(center_default), zoom.set(zoom_default)]) - - lat_long_data = fetch_lat_long_data() - icon = Icon(icon_url="https://cdn3.iconfinder.com/data/icons/remixicon-map/24/map-pin-user-fill-256.png", icon_size=[15, 15]) - displayed_rows = data.head(50) - # Update markers based on displayed rows - markers = update_markers(displayed_rows, icon) - for lat, long in lat_long_data: - marker = ipyleaflet.Marker.element(location=(lat, long),opacity = 0.75, icon=icon, draggable=False) - popup_content = HTML(f"Lat: {lat}, Long: {long}") - popup = Popup(child=popup_content, close_button=True, auto_close=True, close_on_escape_key=True) - marker.popup = popup - markers.append(marker) - - ipyleaflet.Map.element( - zoom=zoom.value, - on_zoom=zoom.set, - center=center.value, - on_center=center.set, - scroll_wheel_zoom=True, - layers=[ - ipyleaflet.TileLayer.element(url=url), - *markers - ], - ) - - with solara.Columns(widths=[1, 3]): - with solara.Column(): - for col in columns: - solara.InputText(col, value=inputs[col][0], on_value=inputs[col][1]) - if not data.empty: - solara.Select(label="Select a row to update", values=[row[0] for row in data.itertuples()], value=selected_row, on_value=handle_row_selection) - solara.Button("Submit", on_click=lambda: [handle_submit(), set_data(fetch_data())]) - solara.Button("Update", on_click=lambda: [handle_update(), set_data(fetch_data())]) - solara.Button("Delete", on_click=lambda: [handle_delete(), set_data(fetch_data())]) - solara.Button("Clear", on_click=lambda: [inputs[col][1]('') for col in columns]) - - with solara.Column(): - solara.DataFrame(data, items_per_page=50) -''' \ No newline at end of file diff --git a/spaces/rorallitri/biomedical-language-models/logs/1986 Traci Takes Tokyo Aka Traci Made In Japan.md b/spaces/rorallitri/biomedical-language-models/logs/1986 Traci Takes Tokyo Aka Traci Made In Japan.md deleted file mode 100644 index 26d3068ac2c8ac352f2c478d4ec9234429229d86..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/1986 Traci Takes Tokyo Aka Traci Made In Japan.md +++ /dev/null @@ -1,6 +0,0 @@ -

          1986 Traci Takes Tokyo Aka Traci Made In Japan


          Download Zip ✑ ✑ ✑ https://tinurll.com/2uzoba



          -
          - aaccfb2cb3
          -
          -
          -

          diff --git a/spaces/rorallitri/biomedical-language-models/logs/Downloadarcsofttotalmedia35fullcrackedinternet [WORK].md b/spaces/rorallitri/biomedical-language-models/logs/Downloadarcsofttotalmedia35fullcrackedinternet [WORK].md deleted file mode 100644 index a2117857d9f79b43afd40dfc861e8263882167cc..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Downloadarcsofttotalmedia35fullcrackedinternet [WORK].md +++ /dev/null @@ -1,100 +0,0 @@ - -

          Download Arcsoft Totalmedia 3.5 Full Cracked Internet: How to Enjoy All Your Media Files with One Powerful Software

          - -

          If you are looking for a comprehensive solution for your media needs, you might want to check out Arcsoft Totalmedia 3.5, a software that can play, edit, record, and convert all your media files with ease. Whether you want to watch movies, listen to music, burn discs, or create slideshows, Arcsoft Totalmedia 3.5 can do it all for you.

          -

          downloadarcsofttotalmedia35fullcrackedinternet


          Download >>>>> https://tinurll.com/2uzo5u



          - -

          But what if you don't want to pay for the software? What if you want to get it for free? Well, there is a way to download Arcsoft Totalmedia 3.5 full cracked internet and enjoy all its features without spending a dime. In this article, we will show you how to do it and what are the benefits and risks of doing so.

          - -

          What is Arcsoft Totalmedia 3.5?

          - -

          Arcsoft Totalmedia 3.5 is a software that combines various media functions into one application. It can play various formats of video and audio files, including Blu-ray discs and HD DVDs. It can also record TV shows and movies from your TV tuner or webcam, and edit them with various tools and effects. It can also convert your media files to different formats and devices, such as iPods, PSPs, or mobile phones. It can also burn your media files to CDs, DVDs, or Blu-ray discs, and create slideshows with your photos and music.

          - -

          Arcsoft Totalmedia 3.5 has a user-friendly interface that allows you to access all its functions easily. It also has some advanced features, such as 3D support, parental control, online video download, and media library management.

          - -

          How to Download Arcsoft Totalmedia 3.5 Full Cracked Internet?

          - -

          If you want to download Arcsoft Totalmedia 3.5 full cracked internet, you will need to find a reliable source that offers the cracked version of the software. There are many websites that claim to provide this service, but not all of them are trustworthy or safe. Some of them may contain viruses, malware, or spyware that can harm your computer or steal your personal information.

          -

          - -

          One of the websites that we found that offers Arcsoft Totalmedia 3.5 full cracked internet is this one. It provides a link to download the software and a serial number to activate it. However, we cannot guarantee that this website is safe or legal, so proceed at your own risk.

          - -

          To download Arcsoft Totalmedia 3.5 full cracked internet from this website, you will need to follow these steps:

          - -
            -
          1. Click on the link provided by the website.
          2. -
          3. Wait for the download to finish.
          4. -
          5. Extract the zip file using a program like WinRAR or 7-Zip.
          6. -
          7. Run the setup file and follow the instructions.
          8. -
          9. Enter the serial number provided by the website when prompted.
          10. -
          11. Enjoy using Arcsoft Totalmedia 3.5 full cracked internet.
          12. -
          - -

          What are the Benefits and Risks of Downloading Arcsoft Totalmedia 3.5 Full Cracked Internet?

          - -

          Downloading Arcsoft Totalmedia 3.5 full cracked internet has some benefits and risks that you should be aware of before doing so.

          - -

          The main benefit of downloading Arcsoft Totalmedia 3.5 full cracked internet is that you can use the software for free without paying for it. You can enjoy all its features and functions without any limitations or restrictions.

          - -

          The main risk of downloading Arcsoft Totalmedia 3.5 full cracked internet is that you may be violating the copyright laws and the terms of service of the software developer. You may also be exposing your computer and your personal information to potential threats from viruses, malware, or spyware that may be hidden in the cracked version of the software.

          - -

          Therefore, we do not recommend downloading Arcsoft Totalmedia 3.5 full cracked internet unless you are willing to take these risks and accept the consequences.

          - -

          Conclusion

          - -

          Arcsoft Totalmedia 3.5 is a comprehensive solution for your media needs that can play, edit, record, and convert all your media files with ease. However, if you want to use it for free without paying for it, you will need to download Arcsoft Totalmedia 3.5 full cracked internet from a reliable source that offers the cracked version of the software.

          - -

          However, downloading Arcsoft Totalmedia 3.5 full cracked internet has some benefits and risks that you should be aware of before doing so. The main benefit is that you can use the software for free without any limitations or restrictions. The main risk is that you may be violating the copyright laws and the terms of service of the software developer. You may also be exposing your computer and your personal information to potential threats from viruses, malware, or spyware that may be hidden in the cracked version of the software.

          - -

          Therefore, we do not recommend downloading Arcsoft Totalmedia 3.5 full cracked internet unless you are willing to take these risks and accept the consequences.

          -

          Download Arcsoft Totalmedia 3.5 Full Cracked Internet: How to Enjoy All Your Media Files with One Powerful Software

          - -Arcsoft logo - -

          If you are looking for a comprehensive solution for your media needs, you might want to check out Arcsoft Totalmedia 3.5, a software that can play, edit, record, and convert all your media files with ease. Whether you want to watch movies, listen to music, burn discs, or create slideshows, Arcsoft Totalmedia 3.5 can do it all for you.

          - -

          But what if you don't want to pay for the software? What if you want to get it for free? Well, there is a way to download Arcsoft Totalmedia 3.5 full cracked internet and enjoy all its features without spending a dime. In this article, we will show you how to do it and what are the benefits and risks of doing so.

          - -

          What is Arcsoft Totalmedia 3.5?

          - -

          Arcsoft Totalmedia 3.5 is a software that combines various media functions into one application. It can play various formats of video and audio files, including Blu-ray discs and HD DVDs. It can also record TV shows and movies from your TV tuner or webcam, and edit them with various tools and effects. It can also convert your media files to different formats and devices, such as iPods, PSPs, or mobile phones. It can also burn your media files to CDs, DVDs, or Blu-ray discs, and create slideshows with your photos and music.

          - -

          Arcsoft Totalmedia 3.5 has a user-friendly interface that allows you to access all its functions easily. It also has some advanced features, such as 3D support, parental control, online video download, and media library management.

          - -

          How to Download Arcsoft Totalmedia 3.5 Full Cracked Internet?

          - -

          If you want to download Arcsoft Totalmedia 3.5 full cracked internet, you will need to find a reliable source that offers the cracked version of the software. There are many websites that claim to provide this service, but not all of them are trustworthy or safe. Some of them may contain viruses, malware, or spyware that can harm your computer or steal your personal information.

          - -

          One of the websites that we found that offers Arcsoft Totalmedia 3.5 full cracked internet is this one. It provides a link to download the software and a serial number to activate it. However, we cannot guarantee that this website is safe or legal, so proceed at your own risk.

          - -

          To download Arcsoft Totalmedia 3.5 full cracked internet from this website, you will need to follow these steps:

          - -
            -
          • Click on the link provided by the website.
          • -
          • Wait for the download to finish.
          • -
          • Extract the zip file using a program like WinRAR or 7-Zip.
          • -
          • Run the setup file and follow the instructions.
          • -
          • Enter the serial number provided by the website when prompted.
          • -
          • Enjoy using Arcsoft Totalmedia 3.5 full cracked internet.
          • -
          - -

          What are the Benefits and Risks of Downloading Arcsoft Totalmedia 3.5 Full Cracked Internet?

          - -

          Downloading Arcsoft Totalmedia 3.5 full cracked internet has some benefits and risks that you should be aware of before doing so.

          - -

          The main benefit of downloading Arcsoft Totalmedia 3.5 full cracked internet is that you can use the software for free without paying for it. You can enjoy all its features and functions without any limitations or restrictions.

          - -

          The main risk of downloading Arcsoft Totalmedia 3.5 full cracked internet is that you may be violating the copyright laws and the terms of service of the software developer. You may also be exposing your computer and your personal information to potential threats from viruses, malware, or spyware that may be hidden in the cracked version of the software.

          - -

          Therefore, we do not recommend downloading Arcsoft Totalmedia 3.5 full cracked internet unless you are willing to take these risks and accept the consequences.

          - -

          Conclusion

          - -

          Arcsoft Totalmedia 3.5 is a comprehensive solution for your media needs that can play, edit, record, and convert all your media files with ease. However, if you want to use it for free without paying for it, you will need to download Arcsoft Totalmedia 3.5 full cracked internet from a reliable source that offers the cracked version of the software.

          - -

          However, downloading Arcsoft Totalmedia 3.5 full cracked internet has some benefits and risks that you should be aware of before doing so. The main benefit is that you can use the software for free without any limitations or restrictions. The main risk is that you may be violating the copyright laws and the terms of service of the software developer. You may also be exposing your computer and your personal information to potential threats from viruses, malware, or spyware that may be hidden in the cracked version of the software.

          - -

          Therefore, we do not recommend downloading Arcsoft Totalmedia 3.5 full cracked internet unless you are willing to take these risks and accept the consequences.

          -

          Therefore, we do not recommend downloading Arcsoft Totalmedia 3.5 full cracked internet unless you are willing to take these risks and accept the consequences. And if you are interested in learning more about Arcsoft Totalmedia 3.5 and its features, you can visit the official website of the software developer at https://www.arcsoft.com/. And if you have any questions or comments about this article, feel free to leave them below. We would love to hear from you.

          3cee63e6c2
          -
          -
          \ No newline at end of file diff --git a/spaces/rorallitri/biomedical-language-models/logs/Hard Truck 18 Wheels Of Steel Crack No-cd Player In Computerl.md b/spaces/rorallitri/biomedical-language-models/logs/Hard Truck 18 Wheels Of Steel Crack No-cd Player In Computerl.md deleted file mode 100644 index 05d9da700ae844ca751e7a4d95674ac12cd30c61..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Hard Truck 18 Wheels Of Steel Crack No-cd Player In Computerl.md +++ /dev/null @@ -1,20 +0,0 @@ -
          -

          How to Play Hard Truck 18 Wheels of Steel Without a CD Player in Your Computer

          -

          Hard Truck 18 Wheels of Steel is a classic truck simulation game that lets you drive across America in a variety of trucks. However, if you have lost or damaged your original CD, or if your computer does not have a CD player, you might think that you cannot play this game anymore. Fortunately, there is a way to play Hard Truck 18 Wheels of Steel without a CD player in your computer. All you need is a crack file that bypasses the CD check and lets you run the game from your hard drive.

          -

          Hard Truck 18 Wheels Of Steel Crack No-cd Player In Computerl


          DOWNLOAD » https://tinurll.com/2uzlLX



          -

          A crack file is a modified version of the game's executable file that removes the protection or authentication mechanism that requires a CD to be inserted. By using a crack file, you can play Hard Truck 18 Wheels of Steel without having to insert the CD every time. However, you should only use a crack file if you own a legitimate copy of the game and do not intend to distribute or share it with others. Using a crack file for piracy or illegal purposes is not condoned and may result in legal consequences.

          -

          To use a crack file for Hard Truck 18 Wheels of Steel, you need to follow these steps:

          -
            -
          1. Download the crack file from a reputable source. You can search online for "Hard Truck 18 Wheels of Steel crack no-cd" and find several websites that offer crack files for this game. Make sure to scan the file for viruses or malware before opening it.
          2. -
          3. Locate the folder where you installed Hard Truck 18 Wheels of Steel on your computer. The default location is C:\Program Files\Hard Truck 18 Wheels of Steel.
          4. -
          5. Copy the crack file and paste it into the folder where you installed the game. You will be asked to replace the existing file with the same name. Click yes to confirm.
          6. -
          7. Run the game from the crack file. You should be able to play Hard Truck 18 Wheels of Steel without a CD player in your computer.
          8. -
          -

          Enjoy driving across America in your truck!

          -

          - -

          Hard Truck 18 Wheels of Steel is a game that was released in 2002 by ValuSoft and SCS Software. It is the first installment in the Hard Truck series, which includes several sequels and spin-offs. The game features realistic truck physics, dynamic weather, traffic, and cargo delivery missions. You can choose from 32 different trucks and trailers, and customize them with various parts and accessories. You can also explore 11 different states in the USA, from California to Florida.

          -

          The game received mixed reviews from critics and players. Some praised the game for its realism, variety, and challenge, while others criticized it for its outdated graphics, bugs, and lack of multiplayer mode. The game has a loyal fan base that still plays it today, and there are many mods and patches available online that enhance the game's features and performance. If you are a fan of truck simulation games, you might want to give Hard Truck 18 Wheels of Steel a try.

          -

          However, if you want to play the game without a CD player in your computer, you need to use a crack file that bypasses the CD check. This is a simple and easy way to enjoy the game without having to worry about losing or damaging your CD. Just make sure to use a crack file from a reliable source and only if you own a legal copy of the game. Do not use a crack file for illegal or unethical purposes, as this may harm your computer or get you in trouble with the law.

          d5da3c52bf
          -
          -
          \ No newline at end of file diff --git a/spaces/rossellison/kpop-face-generator/stylegan3-fun/torch_utils/ops/grid_sample_gradfix.py b/spaces/rossellison/kpop-face-generator/stylegan3-fun/torch_utils/ops/grid_sample_gradfix.py deleted file mode 100644 index 441b37953c3392c3eb7dcf24c3d2891f17a3c18e..0000000000000000000000000000000000000000 --- a/spaces/rossellison/kpop-face-generator/stylegan3-fun/torch_utils/ops/grid_sample_gradfix.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Custom replacement for `torch.nn.functional.grid_sample` that -supports arbitrarily high order gradients between the input and output. -Only works on 2D images and assumes -`mode='bilinear'`, `padding_mode='zeros'`, `align_corners=False`.""" - -import torch -from pkg_resources import parse_version - -# pylint: disable=redefined-builtin -# pylint: disable=arguments-differ -# pylint: disable=protected-access - -#---------------------------------------------------------------------------- - -enabled = False # Enable the custom op by setting this to true. -_use_pytorch_1_11_api = parse_version(torch.__version__) >= parse_version('1.11.0a') # Allow prerelease builds of 1.11 - -#---------------------------------------------------------------------------- - -def grid_sample(input, grid): - if _should_use_custom_op(): - return _GridSample2dForward.apply(input, grid) - return torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False) - -#---------------------------------------------------------------------------- - -def _should_use_custom_op(): - return enabled - -#---------------------------------------------------------------------------- - -class _GridSample2dForward(torch.autograd.Function): - @staticmethod - def forward(ctx, input, grid): - assert input.ndim == 4 - assert grid.ndim == 4 - output = torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False) - ctx.save_for_backward(input, grid) - return output - - @staticmethod - def backward(ctx, grad_output): - input, grid = ctx.saved_tensors - grad_input, grad_grid = _GridSample2dBackward.apply(grad_output, input, grid) - return grad_input, grad_grid - -#---------------------------------------------------------------------------- - -class _GridSample2dBackward(torch.autograd.Function): - @staticmethod - def forward(ctx, grad_output, input, grid): - op = torch._C._jit_get_operation('aten::grid_sampler_2d_backward') - if _use_pytorch_1_11_api: - output_mask = (ctx.needs_input_grad[1], ctx.needs_input_grad[2]) - grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False, output_mask) - else: - grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False) - ctx.save_for_backward(grid) - return grad_input, grad_grid - - @staticmethod - def backward(ctx, grad2_grad_input, grad2_grad_grid): - _ = grad2_grad_grid # unused - grid, = ctx.saved_tensors - grad2_grad_output = None - grad2_input = None - grad2_grid = None - - if ctx.needs_input_grad[0]: - grad2_grad_output = _GridSample2dForward.apply(grad2_grad_input, grid) - - assert not ctx.needs_input_grad[2] - return grad2_grad_output, grad2_input, grad2_grid - -#---------------------------------------------------------------------------- diff --git a/spaces/rstallman/Mayfair-Partner-Music/tests/data/test_audio_utils.py b/spaces/rstallman/Mayfair-Partner-Music/tests/data/test_audio_utils.py deleted file mode 100644 index 0480671bb17281d61ce02bce6373a5ccec89fece..0000000000000000000000000000000000000000 --- a/spaces/rstallman/Mayfair-Partner-Music/tests/data/test_audio_utils.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import julius -import torch -import pytest - -from audiocraft.data.audio_utils import ( - _clip_wav, - convert_audio_channels, - convert_audio, - normalize_audio -) -from ..common_utils import get_batch_white_noise - - -class TestConvertAudioChannels: - - def test_convert_audio_channels_downmix(self): - b, c, t = 2, 3, 100 - audio = get_batch_white_noise(b, c, t) - mixed = convert_audio_channels(audio, channels=2) - assert list(mixed.shape) == [b, 2, t] - - def test_convert_audio_channels_nochange(self): - b, c, t = 2, 3, 100 - audio = get_batch_white_noise(b, c, t) - mixed = convert_audio_channels(audio, channels=c) - assert list(mixed.shape) == list(audio.shape) - - def test_convert_audio_channels_upmix(self): - b, c, t = 2, 1, 100 - audio = get_batch_white_noise(b, c, t) - mixed = convert_audio_channels(audio, channels=3) - assert list(mixed.shape) == [b, 3, t] - - def test_convert_audio_channels_upmix_error(self): - b, c, t = 2, 2, 100 - audio = get_batch_white_noise(b, c, t) - with pytest.raises(ValueError): - convert_audio_channels(audio, channels=3) - - -class TestConvertAudio: - - def test_convert_audio_channels_downmix(self): - b, c, dur = 2, 3, 4. - sr = 128 - audio = get_batch_white_noise(b, c, int(sr * dur)) - out = convert_audio(audio, from_rate=sr, to_rate=sr, to_channels=2) - assert list(out.shape) == [audio.shape[0], 2, audio.shape[-1]] - - def test_convert_audio_channels_upmix(self): - b, c, dur = 2, 1, 4. - sr = 128 - audio = get_batch_white_noise(b, c, int(sr * dur)) - out = convert_audio(audio, from_rate=sr, to_rate=sr, to_channels=3) - assert list(out.shape) == [audio.shape[0], 3, audio.shape[-1]] - - def test_convert_audio_upsample(self): - b, c, dur = 2, 1, 4. - sr = 2 - new_sr = 3 - audio = get_batch_white_noise(b, c, int(sr * dur)) - out = convert_audio(audio, from_rate=sr, to_rate=new_sr, to_channels=c) - out_j = julius.resample.resample_frac(audio, old_sr=sr, new_sr=new_sr) - assert torch.allclose(out, out_j) - - def test_convert_audio_resample(self): - b, c, dur = 2, 1, 4. - sr = 3 - new_sr = 2 - audio = get_batch_white_noise(b, c, int(sr * dur)) - out = convert_audio(audio, from_rate=sr, to_rate=new_sr, to_channels=c) - out_j = julius.resample.resample_frac(audio, old_sr=sr, new_sr=new_sr) - assert torch.allclose(out, out_j) - - -class TestNormalizeAudio: - - def test_clip_wav(self): - b, c, dur = 2, 1, 4. - sr = 3 - audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur)) - _clip_wav(audio) - assert audio.abs().max() <= 1 - - def test_normalize_audio_clip(self): - b, c, dur = 2, 1, 4. - sr = 3 - audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur)) - norm_audio = normalize_audio(audio, strategy='clip') - assert norm_audio.abs().max() <= 1 - - def test_normalize_audio_rms(self): - b, c, dur = 2, 1, 4. - sr = 3 - audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur)) - norm_audio = normalize_audio(audio, strategy='rms') - assert norm_audio.abs().max() <= 1 - - def test_normalize_audio_peak(self): - b, c, dur = 2, 1, 4. - sr = 3 - audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur)) - norm_audio = normalize_audio(audio, strategy='peak') - assert norm_audio.abs().max() <= 1 diff --git a/spaces/ruslanmv/Clone-Your-Voice/synthesizer/inference.py b/spaces/ruslanmv/Clone-Your-Voice/synthesizer/inference.py deleted file mode 100644 index 340bb1fa177f44c5e3c52da6dbed0899ad67a200..0000000000000000000000000000000000000000 --- a/spaces/ruslanmv/Clone-Your-Voice/synthesizer/inference.py +++ /dev/null @@ -1,165 +0,0 @@ -import torch -from synthesizer import audio -from synthesizer.hparams import hparams -from synthesizer.models.tacotron import Tacotron -from synthesizer.utils.symbols import symbols -from synthesizer.utils.text import text_to_sequence -from vocoder.display import simple_table -from pathlib import Path -from typing import Union, List -import numpy as np -import librosa - - -class Synthesizer: - sample_rate = hparams.sample_rate - hparams = hparams - - def __init__(self, model_fpath: Path, verbose=True): - """ - The model isn't instantiated and loaded in memory until needed or until load() is called. - - :param model_fpath: path to the trained model file - :param verbose: if False, prints less information when using the model - """ - self.model_fpath = model_fpath - self.verbose = verbose - - # Check for GPU - if torch.cuda.is_available(): - self.device = torch.device("cuda") - else: - self.device = torch.device("cpu") - if self.verbose: - print("Synthesizer using device:", self.device) - - # Tacotron model will be instantiated later on first use. - self._model = None - - def is_loaded(self): - """ - Whether the model is loaded in memory. - """ - return self._model is not None - - def load(self): - """ - Instantiates and loads the model given the weights file that was passed in the constructor. - """ - self._model = Tacotron(embed_dims=hparams.tts_embed_dims, - num_chars=len(symbols), - encoder_dims=hparams.tts_encoder_dims, - decoder_dims=hparams.tts_decoder_dims, - n_mels=hparams.num_mels, - fft_bins=hparams.num_mels, - postnet_dims=hparams.tts_postnet_dims, - encoder_K=hparams.tts_encoder_K, - lstm_dims=hparams.tts_lstm_dims, - postnet_K=hparams.tts_postnet_K, - num_highways=hparams.tts_num_highways, - dropout=hparams.tts_dropout, - stop_threshold=hparams.tts_stop_threshold, - speaker_embedding_size=hparams.speaker_embedding_size).to(self.device) - - self._model.load(self.model_fpath) - self._model.eval() - - if self.verbose: - print("Loaded synthesizer \"%s\" trained to step %d" % (self.model_fpath.name, self._model.state_dict()["step"])) - - def synthesize_spectrograms(self, texts: List[str], - embeddings: Union[np.ndarray, List[np.ndarray]], - return_alignments=False): - """ - Synthesizes mel spectrograms from texts and speaker embeddings. - - :param texts: a list of N text prompts to be synthesized - :param embeddings: a numpy array or list of speaker embeddings of shape (N, 256) - :param return_alignments: if True, a matrix representing the alignments between the - characters - and each decoder output step will be returned for each spectrogram - :return: a list of N melspectrograms as numpy arrays of shape (80, Mi), where Mi is the - sequence length of spectrogram i, and possibly the alignments. - """ - # Load the model on the first request. - if not self.is_loaded(): - self.load() - - # Preprocess text inputs - inputs = [text_to_sequence(text.strip(), hparams.tts_cleaner_names) for text in texts] - if not isinstance(embeddings, list): - embeddings = [embeddings] - - # Batch inputs - batched_inputs = [inputs[i:i+hparams.synthesis_batch_size] - for i in range(0, len(inputs), hparams.synthesis_batch_size)] - batched_embeds = [embeddings[i:i+hparams.synthesis_batch_size] - for i in range(0, len(embeddings), hparams.synthesis_batch_size)] - - specs = [] - for i, batch in enumerate(batched_inputs, 1): - if self.verbose: - print(f"\n| Generating {i}/{len(batched_inputs)}") - - # Pad texts so they are all the same length - text_lens = [len(text) for text in batch] - max_text_len = max(text_lens) - chars = [pad1d(text, max_text_len) for text in batch] - chars = np.stack(chars) - - # Stack speaker embeddings into 2D array for batch processing - speaker_embeds = np.stack(batched_embeds[i-1]) - - # Convert to tensor - chars = torch.tensor(chars).long().to(self.device) - speaker_embeddings = torch.tensor(speaker_embeds).float().to(self.device) - - # Inference - _, mels, alignments = self._model.generate(chars, speaker_embeddings) - mels = mels.detach().cpu().numpy() - for m in mels: - # Trim silence from end of each spectrogram - while np.max(m[:, -1]) < hparams.tts_stop_threshold: - m = m[:, :-1] - specs.append(m) - - if self.verbose: - print("\n\nDone.\n") - return (specs, alignments) if return_alignments else specs - - @staticmethod - def load_preprocess_wav(fpath): - """ - Loads and preprocesses an audio file under the same conditions the audio files were used to - train the synthesizer. - """ - wav = librosa.load(str(fpath), hparams.sample_rate)[0] - if hparams.rescale: - wav = wav / np.abs(wav).max() * hparams.rescaling_max - return wav - - @staticmethod - def make_spectrogram(fpath_or_wav: Union[str, Path, np.ndarray]): - """ - Creates a mel spectrogram from an audio file in the same manner as the mel spectrograms that - were fed to the synthesizer when training. - """ - if isinstance(fpath_or_wav, str) or isinstance(fpath_or_wav, Path): - wav = Synthesizer.load_preprocess_wav(fpath_or_wav) - else: - wav = fpath_or_wav - - mel_spectrogram = audio.melspectrogram(wav, hparams).astype(np.float32) - return mel_spectrogram - - @staticmethod - def griffin_lim(mel): - """ - Inverts a mel spectrogram using Griffin-Lim. The mel spectrogram is expected to have been built - with the same parameters present in hparams.py. - """ - return audio.inv_mel_spectrogram(mel, hparams) - - -def pad1d(x, max_len, pad_value=0): - return np.pad(x, (0, max_len - len(x)), mode="constant", constant_values=pad_value) diff --git a/spaces/safi842/FashionGen/models/stylegan2/stylegan2-pytorch/lpips/__init__.py b/spaces/safi842/FashionGen/models/stylegan2/stylegan2-pytorch/lpips/__init__.py deleted file mode 100644 index a4f86b7ee229b333a64f16d0091e988492f99c58..0000000000000000000000000000000000000000 --- a/spaces/safi842/FashionGen/models/stylegan2/stylegan2-pytorch/lpips/__init__.py +++ /dev/null @@ -1,160 +0,0 @@ - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -from skimage.measure import compare_ssim -import torch -from torch.autograd import Variable - -from lpips import dist_model - -class PerceptualLoss(torch.nn.Module): - def __init__(self, model='net-lin', net='alex', colorspace='rgb', spatial=False, use_gpu=True, gpu_ids=[0]): # VGG using our perceptually-learned weights (LPIPS metric) - # def __init__(self, model='net', net='vgg', use_gpu=True): # "default" way of using VGG as a perceptual loss - super(PerceptualLoss, self).__init__() - print('Setting up Perceptual loss...') - self.use_gpu = use_gpu - self.spatial = spatial - self.gpu_ids = gpu_ids - self.model = dist_model.DistModel() - self.model.initialize(model=model, net=net, use_gpu=use_gpu, colorspace=colorspace, spatial=self.spatial, gpu_ids=gpu_ids) - print('...[%s] initialized'%self.model.name()) - print('...Done') - - def forward(self, pred, target, normalize=False): - """ - Pred and target are Variables. - If normalize is True, assumes the images are between [0,1] and then scales them between [-1,+1] - If normalize is False, assumes the images are already between [-1,+1] - - Inputs pred and target are Nx3xHxW - Output pytorch Variable N long - """ - - if normalize: - target = 2 * target - 1 - pred = 2 * pred - 1 - - return self.model.forward(target, pred) - -def normalize_tensor(in_feat,eps=1e-10): - norm_factor = torch.sqrt(torch.sum(in_feat**2,dim=1,keepdim=True)) - return in_feat/(norm_factor+eps) - -def l2(p0, p1, range=255.): - return .5*np.mean((p0 / range - p1 / range)**2) - -def psnr(p0, p1, peak=255.): - return 10*np.log10(peak**2/np.mean((1.*p0-1.*p1)**2)) - -def dssim(p0, p1, range=255.): - return (1 - compare_ssim(p0, p1, data_range=range, multichannel=True)) / 2. - -def rgb2lab(in_img,mean_cent=False): - from skimage import color - img_lab = color.rgb2lab(in_img) - if(mean_cent): - img_lab[:,:,0] = img_lab[:,:,0]-50 - return img_lab - -def tensor2np(tensor_obj): - # change dimension of a tensor object into a numpy array - return tensor_obj[0].cpu().float().numpy().transpose((1,2,0)) - -def np2tensor(np_obj): - # change dimenion of np array into tensor array - return torch.Tensor(np_obj[:, :, :, np.newaxis].transpose((3, 2, 0, 1))) - -def tensor2tensorlab(image_tensor,to_norm=True,mc_only=False): - # image tensor to lab tensor - from skimage import color - - img = tensor2im(image_tensor) - img_lab = color.rgb2lab(img) - if(mc_only): - img_lab[:,:,0] = img_lab[:,:,0]-50 - if(to_norm and not mc_only): - img_lab[:,:,0] = img_lab[:,:,0]-50 - img_lab = img_lab/100. - - return np2tensor(img_lab) - -def tensorlab2tensor(lab_tensor,return_inbnd=False): - from skimage import color - import warnings - warnings.filterwarnings("ignore") - - lab = tensor2np(lab_tensor)*100. - lab[:,:,0] = lab[:,:,0]+50 - - rgb_back = 255.*np.clip(color.lab2rgb(lab.astype('float')),0,1) - if(return_inbnd): - # convert back to lab, see if we match - lab_back = color.rgb2lab(rgb_back.astype('uint8')) - mask = 1.*np.isclose(lab_back,lab,atol=2.) - mask = np2tensor(np.prod(mask,axis=2)[:,:,np.newaxis]) - return (im2tensor(rgb_back),mask) - else: - return im2tensor(rgb_back) - -def rgb2lab(input): - from skimage import color - return color.rgb2lab(input / 255.) - -def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255./2.): - image_numpy = image_tensor[0].cpu().float().numpy() - image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor - return image_numpy.astype(imtype) - -def im2tensor(image, imtype=np.uint8, cent=1., factor=255./2.): - return torch.Tensor((image / factor - cent) - [:, :, :, np.newaxis].transpose((3, 2, 0, 1))) - -def tensor2vec(vector_tensor): - return vector_tensor.data.cpu().numpy()[:, :, 0, 0] - -def voc_ap(rec, prec, use_07_metric=False): - """ ap = voc_ap(rec, prec, [use_07_metric]) - Compute VOC AP given precision and recall. - If use_07_metric is true, uses the - VOC 07 11 point method (default:False). - """ - if use_07_metric: - # 11 point metric - ap = 0. - for t in np.arange(0., 1.1, 0.1): - if np.sum(rec >= t) == 0: - p = 0 - else: - p = np.max(prec[rec >= t]) - ap = ap + p / 11. - else: - # correct AP calculation - # first append sentinel values at the end - mrec = np.concatenate(([0.], rec, [1.])) - mpre = np.concatenate(([0.], prec, [0.])) - - # compute the precision envelope - for i in range(mpre.size - 1, 0, -1): - mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) - - # to calculate area under PR curve, look for points - # where X axis (recall) changes value - i = np.where(mrec[1:] != mrec[:-1])[0] - - # and sum (\Delta recall) * prec - ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) - return ap - -def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255./2.): -# def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=1.): - image_numpy = image_tensor[0].cpu().float().numpy() - image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor - return image_numpy.astype(imtype) - -def im2tensor(image, imtype=np.uint8, cent=1., factor=255./2.): -# def im2tensor(image, imtype=np.uint8, cent=1., factor=1.): - return torch.Tensor((image / factor - cent) - [:, :, :, np.newaxis].transpose((3, 2, 0, 1))) diff --git a/spaces/scedlatioru/img-to-music/example/Adobe Acrobat 8 Professional V8.1.0 - Keygen ZWT [PORTABLE].md b/spaces/scedlatioru/img-to-music/example/Adobe Acrobat 8 Professional V8.1.0 - Keygen ZWT [PORTABLE].md deleted file mode 100644 index 271931a76199a4efbea574d0841b83afa74541f7..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Adobe Acrobat 8 Professional V8.1.0 - Keygen ZWT [PORTABLE].md +++ /dev/null @@ -1,131 +0,0 @@ - -

          Adobe Acrobat 8 Professional v8.1.0 - keygen ZWT: A Complete Guide

          - -

          Adobe Acrobat 8 Professional is a powerful software that allows you to create, edit, and share PDF documents with ease. It has many features and tools that make it the ultimate PDF solution for professionals and businesses. However, Adobe Acrobat 8 Professional is not a free software, and you need to purchase a license to use it. If you don't want to spend money on buying a license, you can use a keygen to activate it for free.

          -

          Adobe Acrobat 8 Professional v8.1.0 - keygen ZWT


          Download Zip 🆗 https://gohhs.com/2uEzIG



          - -

          A keygen is a software that generates serial numbers or activation codes for other software. It can bypass the security checks and verification processes of the original software and make it think that you have a valid license. One of the most popular keygens for Adobe Acrobat 8 Professional is the ZWT keygen, which stands for Zero Waiting Time. This keygen was created by a group of hackers who cracked the Adobe Acrobat 8 Professional v8.1.0 and released it to the public.

          - -

          How to use Adobe Acrobat 8 Professional v8.1.0 - keygen ZWT

          - -

          If you want to use Adobe Acrobat 8 Professional v8.1.0 - keygen ZWT, you need to follow these steps:

          - -
            -
          1. Download Adobe Acrobat 8 Professional v8.1.0 from the official website or any other trusted source.
          2. -
          3. Install Adobe Acrobat 8 Professional v8.1.0 on your computer.
          4. -
          5. Download Adobe Acrobat 8 Professional v8.1.0 - keygen ZWT from any of the links provided in this article or any other reliable source.
          6. -
          7. Extract the keygen file and run it as administrator.
          8. -
          9. Select Adobe Acrobat 8 Professional from the drop-down menu and click on Generate Serial.
          10. -
          11. Copy the serial number and paste it in the Adobe Acrobat 8 Professional installation window when prompted.
          12. -
          13. Click on Next and then on Activate Online.
          14. -
          15. The keygen will automatically generate an activation code and paste it in the Adobe Acrobat 8 Professional activation window.
          16. -
          17. Click on Next and then on Finish.
          18. -
          19. Congratulations! You have successfully activated Adobe Acrobat 8 Professional v8.1.0 with ZWT keygen.
          20. -
          - -

          Benefits of using Adobe Acrobat 8 Professional v8.1.0 - keygen ZWT

          - -

          By using Adobe Acrobat 8 Professional v8.1.0 - keygen ZWT, you can enjoy the following benefits:

          -

          - -
            -
          • You can use Adobe Acrobat 8 Professional v8.1.0 for free without paying any fees or charges.
          • -
          • You can access all the features and tools of Adobe Acrobat 8 Professional v8.1.0 without any limitations or restrictions.
          • -
          • You can create, edit, and share PDF documents with high quality and security.
          • -
          • You can convert PDF files to other formats such as Word, Excel, PowerPoint, HTML, etc.
          • -
          • You can add comments, annotations, signatures, stamps, watermarks, bookmarks, etc. to your PDF files.
          • -
          • You can combine multiple PDF files into one or split one PDF file into multiple files.
          • -
          • You can protect your PDF files with passwords, encryption, digital signatures, etc.
          • -
          • You can optimize your PDF files for web, print, or mobile devices.
          • -
          - -

          Risks of using Adobe Acrobat 8 Professional v8.1.0 - keygen ZWT

          - -

          However, using Adobe Acrobat 8 Professional v8.1.0 - keygen ZWT also comes with some risks that you should be aware of:

          - -
            -
          • You may violate the terms and conditions of Adobe and face legal consequences for using pirated software.
          • -
          • You may expose your computer to viruses, malware, spyware, etc. that may be hidden in the keygen file or the download links.
          • -
          • You may encounter errors, bugs, crashes, or compatibility issues with Adobe Acrobat 8 Professional v8.1.0 or other software on your computer.
          • -
          • You may not receive any updates, patches, fixes, or support from Adobe for Adobe Acrobat 8 Professional v8.1.0.
          • -
          - -

          Conclusion

          - -

          Adobe Acrobat 8 Professional v8.1.0 - keygen ZWT is a software that can help you activate Adobe Acrobat 8 Professional v8.1.0 for free and enjoy its features and tools without any limitations or restrictions. However, using this software also involves some risks that may harm your computer or your legal status. Therefore, we do not recommend using this software and advise you to purchase a genuine license from Adobe instead.

          - -

          If you have any questions or comments about this article, please feel free to leave them below or contact us through our website.

          -

          Alternatives to Adobe Acrobat 8 Professional v8.1.0 - keygen ZWT

          - -

          If you are not comfortable with using Adobe Acrobat 8 Professional v8.1.0 - keygen ZWT, or if you encounter any problems with it, you may want to look for some alternatives that can help you activate Adobe Acrobat 8 Professional without a keygen. Here are some of the options that you can try:

          - -
            -
          • Use another keygen for Adobe Acrobat 8 Professional. There are other keygens that can generate serial numbers and activation codes for Adobe Acrobat 8 Professional, such as the EcHoS keygen, the CiM keygen, the SSG keygen, etc. You can find them on various websites and forums, but be careful of the sources and scan them for viruses before using them.
          • -
          • Use a patch for Adobe Acrobat 8 Professional. A patch is a software that modifies or replaces some files of the original software to bypass the activation process. For example, you can use the Patch CiM for Adobe Acrobat 8 Professional, which replaces the amtlib.dll file in the installation folder with a cracked version. You can download the patch from some links provided in this article or any other reliable source.
          • -
          • Use a preactivated version of Adobe Acrobat 8 Professional. A preactivated version is a software that has been already activated by someone else and does not require any further activation from you. You just need to download and install it on your computer and enjoy its features and tools. You can find some preactivated versions of Adobe Acrobat 8 Professional on some websites and forums, but make sure they are safe and clean before downloading them.
          • -
          - -

          Conclusion

          - -

          Adobe Acrobat 8 Professional v8.1.0 - keygen ZWT is a software that can help you activate Adobe Acrobat 8 Professional v8.1.0 for free and enjoy its features and tools without any limitations or restrictions. However, using this software also involves some risks that may harm your computer or your legal status. Therefore, we do not recommend using this software and advise you to purchase a genuine license from Adobe instead.

          - -

          If you have any questions or comments about this article, please feel free to leave them below or contact us through our website.

          -

          Features and tools of Adobe Acrobat 8 Professional

          - -

          Adobe Acrobat 8 Professional is a software that offers many features and tools to help you work with PDF files. Some of the main features and tools of Adobe Acrobat 8 Professional are:

          - -
            -
          • Create PDF files from various sources, such as Microsoft Office documents, web pages, scanned images, etc.
          • -
          • Edit PDF files with advanced editing tools, such as adding or deleting text, images, links, headers, footers, etc.
          • -
          • Export PDF files to other formats, such as Word, Excel, PowerPoint, HTML, etc.
          • -
          • Sign PDF files with digital signatures or certificates to verify your identity and protect your documents.
          • -
          • Organize PDF files with tools such as bookmarks, thumbnails, comments, annotations, stamps, etc.
          • -
          • Share and protect PDF files with options such as email, FTP, web publishing, encryption, password protection, redaction, etc.
          • -
          • Optimize PDF files for web, print, or mobile devices with settings such as compression, resolution, color management, etc.
          • -
          - -

          Why use Adobe Acrobat 8 Professional

          - -

          Adobe Acrobat 8 Professional is a software that can help you improve your productivity and efficiency when working with PDF files. Here are some of the reasons why you should use Adobe Acrobat 8 Professional:

          - -
            -
          • You can create professional-looking PDF files that can be viewed and printed on any device or platform.
          • -
          • You can edit and enhance your PDF files with ease and accuracy.
          • -
          • You can convert your PDF files to other formats and reuse the content in other applications.
          • -
          • You can sign and secure your PDF files with confidence and compliance.
          • -
          • You can organize and manage your PDF files with convenience and clarity.
          • -
          • You can share and collaborate on your PDF files with anyone and anywhere.
          • -
          • You can optimize your PDF files for different purposes and audiences.
          • -
          - -

          Conclusion

          - -

          Adobe Acrobat 8 Professional v8.1.0 - keygen ZWT is a software that can help you activate Adobe Acrobat 8 Professional v8.1.0 for free and enjoy its features and tools without any limitations or restrictions. However, using this software also involves some risks that may harm your computer or your legal status. Therefore, we do not recommend using this software and advise you to purchase a genuine license from Adobe instead.

          - -

          If you have any questions or comments about this article, please feel free to leave them below or contact us through our website.

          -

          Software related to Adobe Acrobat 8 Professional

          - -

          Adobe Acrobat 8 Professional is not the only software that can help you work with PDF files. There are other software that are related to Adobe Acrobat 8 Professional and can offer similar or different features and tools. Some of the software related to Adobe Acrobat 8 Professional are:

          - -
            -
          • Adobe Reader: This is a free software that allows you to view, print, and comment on PDF files. It is compatible with Adobe Acrobat 8 Professional and can open PDF files created by it. You can also use Adobe Reader to fill out and sign PDF forms, and access online services such as Adobe Document Cloud.
          • -
          • Adobe Acrobat Pro: This is an upgraded version of Adobe Acrobat 8 Professional that offers more features and tools to create, edit, and share PDF files. It supports PDF version 1.7 and has improved performance and compatibility. You can also use Adobe Acrobat Pro to create interactive PDF forms, collect data, compare documents, redact information, and more.
          • -
          • Adobe Acrobat Standard: This is a simplified version of Adobe Acrobat 8 Professional that offers the essential features and tools to create, edit, and share PDF files. It supports PDF version 1.6 and has a user-friendly interface and workflow. You can also use Adobe Acrobat Standard to convert PDF files to other formats, sign PDF files with digital signatures, and protect PDF files with passwords.
          • -
          • Adobe Acrobat Elements: This is a volume-licensing version of Adobe Acrobat 8 Professional that offers the core features and tools to create and distribute PDF files. It supports PDF version 1.6 and has a streamlined installation and deployment process. You can also use Adobe Acrobat Elements to add comments, annotations, stamps, watermarks, etc. to your PDF files.
          • -
          • Adobe Acrobat 3D: This is a specialized version of Adobe Acrobat 8 Professional that offers the features and tools to create and share PDF files with embedded 3D objects. It supports PDF version 1.6 and has a powerful 3D engine and toolkit. You can also use Adobe Acrobat 3D to convert CAD files to PDF files, add animations and interactivity to your 3D objects, and optimize your 3D PDF files for web or print.
          • -
          - -

          Conclusion

          - -

          Adobe Acrobat 8 Professional v8.1.0 - keygen ZWT is a software that can help you activate Adobe Acrobat 8 Professional v8.1.0 for free and enjoy its features and tools without any limitations or restrictions. However, using this software also involves some risks that may harm your computer or your legal status. Therefore, we do not recommend using this software and advise you to purchase a genuine license from Adobe instead.

          - -

          If you have any questions or comments about this article, please feel free to leave them below or contact us through our website.

          -

          Conclusion

          - -

          In this article, we have discussed Adobe Acrobat 8 Professional v8.1.0 - keygen ZWT, a software that can help you activate Adobe Acrobat 8 Professional v8.1.0 for free and enjoy its features and tools without any limitations or restrictions. We have also covered some of the benefits and risks of using this software, as well as some of the alternatives, features, and related software of Adobe Acrobat 8 Professional. We hope that this article has been informative and helpful for you.

          - -

          However, we would like to remind you that using Adobe Acrobat 8 Professional v8.1.0 - keygen ZWT is illegal and unethical, and may expose you to various problems and consequences. Therefore, we strongly advise you to purchase a genuine license from Adobe instead and support the developers who work hard to create and improve this software. By doing so, you will not only avoid any legal issues, but also enjoy the latest updates, patches, fixes, and support from Adobe for Adobe Acrobat 8 Professional.

          - -

          If you have any questions or comments about this article, please feel free to leave them below or contact us through our website. Thank you for reading.

          3cee63e6c2
          -
          -
          \ No newline at end of file diff --git a/spaces/sczhou/ProPainter/web-demos/hugging_face/tracker/model/cutie.py b/spaces/sczhou/ProPainter/web-demos/hugging_face/tracker/model/cutie.py deleted file mode 100644 index 82c5652a3f3d657ab71ed208cd11ca2322608d7a..0000000000000000000000000000000000000000 --- a/spaces/sczhou/ProPainter/web-demos/hugging_face/tracker/model/cutie.py +++ /dev/null @@ -1,249 +0,0 @@ -from typing import List, Dict -import logging -from omegaconf import DictConfig -import torch -import torch.nn as nn - -from tracker.model.modules import * -from tracker.model.big_modules import * -from tracker.model.aux_modules import AuxComputer -from tracker.model.utils.memory_utils import * -from tracker.model.transformer.object_transformer import QueryTransformer -from tracker.model.transformer.object_summarizer import ObjectSummarizer -from tracker.utils.tensor_utils import aggregate - -log = logging.getLogger() - - -class CUTIE(nn.Module): - def __init__(self, cfg: DictConfig, *, single_object=False): - super().__init__() - model_cfg = cfg.model - self.ms_dims = model_cfg.pixel_encoder.ms_dims - self.key_dim = model_cfg.key_dim - self.value_dim = model_cfg.value_dim - self.sensory_dim = model_cfg.sensory_dim - self.pixel_dim = model_cfg.pixel_dim - self.embed_dim = model_cfg.embed_dim - self.single_object = single_object - - log.info(f'Single object: {self.single_object}') - - self.pixel_encoder = PixelEncoder(model_cfg) - self.pix_feat_proj = nn.Conv2d(self.ms_dims[0], self.pixel_dim, kernel_size=1) - self.key_proj = KeyProjection(model_cfg) - self.mask_encoder = MaskEncoder(model_cfg, single_object=single_object) - self.mask_decoder = MaskDecoder(model_cfg) - self.pixel_fuser = PixelFeatureFuser(model_cfg, single_object=single_object) - self.object_transformer = QueryTransformer(model_cfg) - self.object_summarizer = ObjectSummarizer(model_cfg) - self.aux_computer = AuxComputer(cfg) - - self.register_buffer("pixel_mean", torch.Tensor(model_cfg.pixel_mean).view(-1, 1, 1), False) - self.register_buffer("pixel_std", torch.Tensor(model_cfg.pixel_std).view(-1, 1, 1), False) - - def _get_others(self, masks: torch.Tensor) -> torch.Tensor: - # for each object, return the sum of masks of all other objects - if self.single_object: - return None - - num_objects = masks.shape[1] - if num_objects >= 1: - others = (masks.sum(dim=1, keepdim=True) - masks).clamp(0, 1) - else: - others = torch.zeros_like(masks) - return others - - def encode_image(self, image: torch.Tensor) -> (Iterable[torch.Tensor], torch.Tensor): - image = (image - self.pixel_mean) / self.pixel_std - ms_image_feat = self.pixel_encoder(image) - return ms_image_feat, self.pix_feat_proj(ms_image_feat[0]) - - def encode_mask( - self, - image: torch.Tensor, - ms_features: List[torch.Tensor], - sensory: torch.Tensor, - masks: torch.Tensor, - *, - deep_update: bool = True, - chunk_size: int = -1, - need_weights: bool = False) -> (torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor): - image = (image - self.pixel_mean) / self.pixel_std - others = self._get_others(masks) - mask_value, new_sensory = self.mask_encoder(image, - ms_features, - sensory, - masks, - others, - deep_update=deep_update, - chunk_size=chunk_size) - object_summaries, object_logits = self.object_summarizer(masks, mask_value, need_weights) - return mask_value, new_sensory, object_summaries, object_logits - - def transform_key(self, - final_pix_feat: torch.Tensor, - *, - need_sk: bool = True, - need_ek: bool = True) -> (torch.Tensor, torch.Tensor, torch.Tensor): - key, shrinkage, selection = self.key_proj(final_pix_feat, need_s=need_sk, need_e=need_ek) - return key, shrinkage, selection - - # Used in training only. - # This step is replaced by MemoryManager in test time - def read_memory(self, query_key: torch.Tensor, query_selection: torch.Tensor, - memory_key: torch.Tensor, memory_shrinkage: torch.Tensor, - msk_value: torch.Tensor, obj_memory: torch.Tensor, pix_feat: torch.Tensor, - sensory: torch.Tensor, last_mask: torch.Tensor, - selector: torch.Tensor) -> (torch.Tensor, Dict[str, torch.Tensor]): - """ - query_key : B * CK * H * W - query_selection : B * CK * H * W - memory_key : B * CK * T * H * W - memory_shrinkage: B * 1 * T * H * W - msk_value : B * num_objects * CV * T * H * W - obj_memory : B * num_objects * T * num_summaries * C - pixel_feature : B * C * H * W - """ - batch_size, num_objects = msk_value.shape[:2] - - # read using visual attention - with torch.cuda.amp.autocast(enabled=False): - affinity = get_affinity(memory_key.float(), memory_shrinkage.float(), query_key.float(), - query_selection.float()) - - msk_value = msk_value.flatten(start_dim=1, end_dim=2).float() - - # B * (num_objects*CV) * H * W - pixel_readout = readout(affinity, msk_value) - pixel_readout = pixel_readout.view(batch_size, num_objects, self.value_dim, - *pixel_readout.shape[-2:]) - pixel_readout = self.pixel_fusion(pix_feat, pixel_readout, sensory, last_mask) - - # read from query transformer - mem_readout, aux_features = self.readout_query(pixel_readout, obj_memory, selector=selector) - - aux_output = { - 'sensory': sensory, - 'q_logits': aux_features['logits'] if aux_features else None, - 'attn_mask': aux_features['attn_mask'] if aux_features else None, - } - - return mem_readout, aux_output - - def pixel_fusion(self, - pix_feat: torch.Tensor, - pixel: torch.Tensor, - sensory: torch.Tensor, - last_mask: torch.Tensor, - *, - chunk_size: int = -1) -> torch.Tensor: - last_mask = F.interpolate(last_mask, size=sensory.shape[-2:], mode='area') - last_others = self._get_others(last_mask) - fused = self.pixel_fuser(pix_feat, - pixel, - sensory, - last_mask, - last_others, - chunk_size=chunk_size) - return fused - - def readout_query(self, - pixel_readout, - obj_memory, - *, - selector=None, - need_weights=False) -> (torch.Tensor, Dict[str, torch.Tensor]): - return self.object_transformer(pixel_readout, - obj_memory, - selector=selector, - need_weights=need_weights) - - def segment(self, - ms_image_feat: List[torch.Tensor], - memory_readout: torch.Tensor, - sensory: torch.Tensor, - *, - selector: bool = None, - chunk_size: int = -1, - update_sensory: bool = True) -> (torch.Tensor, torch.Tensor, torch.Tensor): - """ - multi_scale_features is from the key encoder for skip-connection - memory_readout is from working/long-term memory - sensory is the sensory memory - last_mask is the mask from the last frame, supplementing sensory memory - selector is 1 if an object exists, and 0 otherwise. We use it to filter padded objects - during training. - """ - sensory, logits = self.mask_decoder(ms_image_feat, - memory_readout, - sensory, - chunk_size=chunk_size, - update_sensory=update_sensory) - - prob = torch.sigmoid(logits) - if selector is not None: - prob = prob * selector - - # Softmax over all objects[] - logits = aggregate(prob, dim=1) - logits = F.interpolate(logits, scale_factor=4, mode='bilinear', align_corners=False) - prob = F.softmax(logits, dim=1) - - return sensory, logits, prob - - def compute_aux(self, pix_feat: torch.Tensor, aux_inputs: Dict[str, torch.Tensor], - selector: torch.Tensor) -> Dict[str, torch.Tensor]: - return self.aux_computer(pix_feat, aux_inputs, selector) - - def forward(self, *args, **kwargs): - raise NotImplementedError - - def load_weights(self, src_dict, init_as_zero_if_needed=False) -> None: - if not self.single_object: - # Map single-object weight to multi-object weight (4->5 out channels in conv1) - for k in list(src_dict.keys()): - if k == 'mask_encoder.conv1.weight': - if src_dict[k].shape[1] == 4: - log.info(f'Converting {k} from single object to multiple objects.') - pads = torch.zeros((64, 1, 7, 7), device=src_dict[k].device) - if not init_as_zero_if_needed: - nn.init.orthogonal_(pads) - log.info(f'Randomly initialized padding for {k}.') - else: - log.info(f'Zero-initialized padding for {k}.') - src_dict[k] = torch.cat([src_dict[k], pads], 1) - elif k == 'pixel_fuser.sensory_compress.weight': - if src_dict[k].shape[1] == self.sensory_dim + 1: - log.info(f'Converting {k} from single object to multiple objects.') - pads = torch.zeros((self.value_dim, 1, 1, 1), device=src_dict[k].device) - if not init_as_zero_if_needed: - nn.init.orthogonal_(pads) - log.info(f'Randomly initialized padding for {k}.') - else: - log.info(f'Zero-initialized padding for {k}.') - src_dict[k] = torch.cat([src_dict[k], pads], 1) - elif self.single_object: - """ - If the model is multiple-object and we are training in single-object, - we strip the last channel of conv1. - This is not supposed to happen in standard training except when users are trying to - finetune a trained model with single object datasets. - """ - if src_dict['mask_encoder.conv1.weight'].shape[1] == 5: - log.warning(f'Converting {k} from multiple objects to single object.' - 'This is not supposed to happen in standard training.') - src_dict[k] = src_dict[k][:, :-1] - - for k in src_dict: - if k not in self.state_dict(): - log.info(f'Key {k} found in src_dict but not in self.state_dict()!!!') - for k in self.state_dict(): - if k not in src_dict: - log.info(f'Key {k} found in self.state_dict() but not in src_dict!!!') - - self.load_state_dict(src_dict, strict=False) - - @property - def device(self) -> torch.device: - return self.pixel_mean.device \ No newline at end of file diff --git a/spaces/segments-tobias/conex/espnet/transform/spec_augment.py b/spaces/segments-tobias/conex/espnet/transform/spec_augment.py deleted file mode 100644 index 789bf187a2d5264a585e9153cf62c89c7ace6a7f..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet/transform/spec_augment.py +++ /dev/null @@ -1,202 +0,0 @@ -"""Spec Augment module for preprocessing i.e., data augmentation""" - -import random - -import numpy -from PIL import Image -from PIL.Image import BICUBIC - -from espnet.transform.functional import FuncTrans - - -def time_warp(x, max_time_warp=80, inplace=False, mode="PIL"): - """time warp for spec augment - - move random center frame by the random width ~ uniform(-window, window) - :param numpy.ndarray x: spectrogram (time, freq) - :param int max_time_warp: maximum time frames to warp - :param bool inplace: overwrite x with the result - :param str mode: "PIL" (default, fast, not differentiable) or "sparse_image_warp" - (slow, differentiable) - :returns numpy.ndarray: time warped spectrogram (time, freq) - """ - window = max_time_warp - if mode == "PIL": - t = x.shape[0] - if t - window <= window: - return x - # NOTE: randrange(a, b) emits a, a + 1, ..., b - 1 - center = random.randrange(window, t - window) - warped = random.randrange(center - window, center + window) + 1 # 1 ... t - 1 - - left = Image.fromarray(x[:center]).resize((x.shape[1], warped), BICUBIC) - right = Image.fromarray(x[center:]).resize((x.shape[1], t - warped), BICUBIC) - if inplace: - x[:warped] = left - x[warped:] = right - return x - return numpy.concatenate((left, right), 0) - elif mode == "sparse_image_warp": - import torch - - from espnet.utils import spec_augment - - # TODO(karita): make this differentiable again - return spec_augment.time_warp(torch.from_numpy(x), window).numpy() - else: - raise NotImplementedError( - "unknown resize mode: " - + mode - + ", choose one from (PIL, sparse_image_warp)." - ) - - -class TimeWarp(FuncTrans): - _func = time_warp - __doc__ = time_warp.__doc__ - - def __call__(self, x, train): - if not train: - return x - return super().__call__(x) - - -def freq_mask(x, F=30, n_mask=2, replace_with_zero=True, inplace=False): - """freq mask for spec agument - - :param numpy.ndarray x: (time, freq) - :param int n_mask: the number of masks - :param bool inplace: overwrite - :param bool replace_with_zero: pad zero on mask if true else use mean - """ - if inplace: - cloned = x - else: - cloned = x.copy() - - num_mel_channels = cloned.shape[1] - fs = numpy.random.randint(0, F, size=(n_mask, 2)) - - for f, mask_end in fs: - f_zero = random.randrange(0, num_mel_channels - f) - mask_end += f_zero - - # avoids randrange error if values are equal and range is empty - if f_zero == f_zero + f: - continue - - if replace_with_zero: - cloned[:, f_zero:mask_end] = 0 - else: - cloned[:, f_zero:mask_end] = cloned.mean() - return cloned - - -class FreqMask(FuncTrans): - _func = freq_mask - __doc__ = freq_mask.__doc__ - - def __call__(self, x, train): - if not train: - return x - return super().__call__(x) - - -def time_mask(spec, T=40, n_mask=2, replace_with_zero=True, inplace=False): - """freq mask for spec agument - - :param numpy.ndarray spec: (time, freq) - :param int n_mask: the number of masks - :param bool inplace: overwrite - :param bool replace_with_zero: pad zero on mask if true else use mean - """ - if inplace: - cloned = spec - else: - cloned = spec.copy() - len_spectro = cloned.shape[0] - ts = numpy.random.randint(0, T, size=(n_mask, 2)) - for t, mask_end in ts: - # avoid randint range error - if len_spectro - t <= 0: - continue - t_zero = random.randrange(0, len_spectro - t) - - # avoids randrange error if values are equal and range is empty - if t_zero == t_zero + t: - continue - - mask_end += t_zero - if replace_with_zero: - cloned[t_zero:mask_end] = 0 - else: - cloned[t_zero:mask_end] = cloned.mean() - return cloned - - -class TimeMask(FuncTrans): - _func = time_mask - __doc__ = time_mask.__doc__ - - def __call__(self, x, train): - if not train: - return x - return super().__call__(x) - - -def spec_augment( - x, - resize_mode="PIL", - max_time_warp=80, - max_freq_width=27, - n_freq_mask=2, - max_time_width=100, - n_time_mask=2, - inplace=True, - replace_with_zero=True, -): - """spec agument - - apply random time warping and time/freq masking - default setting is based on LD (Librispeech double) in Table 2 - https://arxiv.org/pdf/1904.08779.pdf - - :param numpy.ndarray x: (time, freq) - :param str resize_mode: "PIL" (fast, nondifferentiable) or "sparse_image_warp" - (slow, differentiable) - :param int max_time_warp: maximum frames to warp the center frame in spectrogram (W) - :param int freq_mask_width: maximum width of the random freq mask (F) - :param int n_freq_mask: the number of the random freq mask (m_F) - :param int time_mask_width: maximum width of the random time mask (T) - :param int n_time_mask: the number of the random time mask (m_T) - :param bool inplace: overwrite intermediate array - :param bool replace_with_zero: pad zero on mask if true else use mean - """ - assert isinstance(x, numpy.ndarray) - assert x.ndim == 2 - x = time_warp(x, max_time_warp, inplace=inplace, mode=resize_mode) - x = freq_mask( - x, - max_freq_width, - n_freq_mask, - inplace=inplace, - replace_with_zero=replace_with_zero, - ) - x = time_mask( - x, - max_time_width, - n_time_mask, - inplace=inplace, - replace_with_zero=replace_with_zero, - ) - return x - - -class SpecAugment(FuncTrans): - _func = spec_augment - __doc__ = spec_augment.__doc__ - - def __call__(self, x, train): - if not train: - return x - return super().__call__(x) diff --git a/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/export_onnx.py b/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/export_onnx.py deleted file mode 100644 index 4c5907fa5f8d6df955e7a74dffc8753cd3a3af62..0000000000000000000000000000000000000000 --- a/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/export_onnx.py +++ /dev/null @@ -1,85 +0,0 @@ -from infer_pack.models_onnx_moess import SynthesizerTrnMs256NSFsidM -from infer_pack.models_onnx import SynthesizerTrnMs256NSFsidO -import torch - -if __name__ == "__main__": - MoeVS = True # 模型是否为MoeVoiceStudio(原MoeSS)使用 - - ModelPath = "Shiroha/shiroha.pth" # 模型路径 - ExportedPath = "model.onnx" # 输出路径 - hidden_channels = 256 # hidden_channels,为768Vec做准备 - cpt = torch.load(ModelPath, map_location="cpu") - cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk - print(*cpt["config"]) - - test_phone = torch.rand(1, 200, hidden_channels) # hidden unit - test_phone_lengths = torch.tensor([200]).long() # hidden unit 长度(貌似没啥用) - test_pitch = torch.randint(size=(1, 200), low=5, high=255) # 基频(单位赫兹) - test_pitchf = torch.rand(1, 200) # nsf基频 - test_ds = torch.LongTensor([0]) # 说话人ID - test_rnd = torch.rand(1, 192, 200) # 噪声(加入随机因子) - - device = "cpu" # 导出时设备(不影响使用模型) - - if MoeVS: - net_g = SynthesizerTrnMs256NSFsidM( - *cpt["config"], is_half=False - ) # fp32导出(C++要支持fp16必须手动将内存重新排列所以暂时不用fp16) - net_g.load_state_dict(cpt["weight"], strict=False) - input_names = ["phone", "phone_lengths", "pitch", "pitchf", "ds", "rnd"] - output_names = [ - "audio", - ] - torch.onnx.export( - net_g, - ( - test_phone.to(device), - test_phone_lengths.to(device), - test_pitch.to(device), - test_pitchf.to(device), - test_ds.to(device), - test_rnd.to(device), - ), - ExportedPath, - dynamic_axes={ - "phone": [1], - "pitch": [1], - "pitchf": [1], - "rnd": [2], - }, - do_constant_folding=False, - opset_version=16, - verbose=False, - input_names=input_names, - output_names=output_names, - ) - else: - net_g = SynthesizerTrnMs256NSFsidO( - *cpt["config"], is_half=False - ) # fp32导出(C++要支持fp16必须手动将内存重新排列所以暂时不用fp16) - net_g.load_state_dict(cpt["weight"], strict=False) - input_names = ["phone", "phone_lengths", "pitch", "pitchf", "ds"] - output_names = [ - "audio", - ] - torch.onnx.export( - net_g, - ( - test_phone.to(device), - test_phone_lengths.to(device), - test_pitch.to(device), - test_pitchf.to(device), - test_ds.to(device), - ), - ExportedPath, - dynamic_axes={ - "phone": [1], - "pitch": [1], - "pitchf": [1], - }, - do_constant_folding=False, - opset_version=16, - verbose=False, - input_names=input_names, - output_names=output_names, - ) diff --git a/spaces/shi-labs/FcF-Inpainting/training/losses/ade20k/segm_lib/utils/data/sampler.py b/spaces/shi-labs/FcF-Inpainting/training/losses/ade20k/segm_lib/utils/data/sampler.py deleted file mode 100644 index 62a9a43bd1d4c21fbdcb262db7da8d4fe27b26de..0000000000000000000000000000000000000000 --- a/spaces/shi-labs/FcF-Inpainting/training/losses/ade20k/segm_lib/utils/data/sampler.py +++ /dev/null @@ -1,131 +0,0 @@ -import torch - - -class Sampler(object): - """Base class for all Samplers. - - Every Sampler subclass has to provide an __iter__ method, providing a way - to iterate over indices of dataset elements, and a __len__ method that - returns the length of the returned iterators. - """ - - def __init__(self, data_source): - pass - - def __iter__(self): - raise NotImplementedError - - def __len__(self): - raise NotImplementedError - - -class SequentialSampler(Sampler): - """Samples elements sequentially, always in the same order. - - Arguments: - data_source (Dataset): dataset to sample from - """ - - def __init__(self, data_source): - self.data_source = data_source - - def __iter__(self): - return iter(range(len(self.data_source))) - - def __len__(self): - return len(self.data_source) - - -class RandomSampler(Sampler): - """Samples elements randomly, without replacement. - - Arguments: - data_source (Dataset): dataset to sample from - """ - - def __init__(self, data_source): - self.data_source = data_source - - def __iter__(self): - return iter(torch.randperm(len(self.data_source)).long()) - - def __len__(self): - return len(self.data_source) - - -class SubsetRandomSampler(Sampler): - """Samples elements randomly from a given list of indices, without replacement. - - Arguments: - indices (list): a list of indices - """ - - def __init__(self, indices): - self.indices = indices - - def __iter__(self): - return (self.indices[i] for i in torch.randperm(len(self.indices))) - - def __len__(self): - return len(self.indices) - - -class WeightedRandomSampler(Sampler): - """Samples elements from [0,..,len(weights)-1] with given probabilities (weights). - - Arguments: - weights (list) : a list of weights, not necessary summing up to one - num_samples (int): number of samples to draw - replacement (bool): if ``True``, samples are drawn with replacement. - If not, they are drawn without replacement, which means that when a - sample index is drawn for a row, it cannot be drawn again for that row. - """ - - def __init__(self, weights, num_samples, replacement=True): - self.weights = torch.DoubleTensor(weights) - self.num_samples = num_samples - self.replacement = replacement - - def __iter__(self): - return iter(torch.multinomial(self.weights, self.num_samples, self.replacement)) - - def __len__(self): - return self.num_samples - - -class BatchSampler(object): - """Wraps another sampler to yield a mini-batch of indices. - - Args: - sampler (Sampler): Base sampler. - batch_size (int): Size of mini-batch. - drop_last (bool): If ``True``, the sampler will drop the last batch if - its size would be less than ``batch_size`` - - Example: - >>> list(BatchSampler(range(10), batch_size=3, drop_last=False)) - [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] - >>> list(BatchSampler(range(10), batch_size=3, drop_last=True)) - [[0, 1, 2], [3, 4, 5], [6, 7, 8]] - """ - - def __init__(self, sampler, batch_size, drop_last): - self.sampler = sampler - self.batch_size = batch_size - self.drop_last = drop_last - - def __iter__(self): - batch = [] - for idx in self.sampler: - batch.append(idx) - if len(batch) == self.batch_size: - yield batch - batch = [] - if len(batch) > 0 and not self.drop_last: - yield batch - - def __len__(self): - if self.drop_last: - return len(self.sampler) // self.batch_size - else: - return (len(self.sampler) + self.batch_size - 1) // self.batch_size diff --git a/spaces/shi-labs/Matting-Anything/segment-anything/segment_anything/utils/__init__.py b/spaces/shi-labs/Matting-Anything/segment-anything/segment_anything/utils/__init__.py deleted file mode 100644 index 5277f46157403e47fd830fc519144b97ef69d4ae..0000000000000000000000000000000000000000 --- a/spaces/shi-labs/Matting-Anything/segment-anything/segment_anything/utils/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. diff --git a/spaces/shi-labs/Matting-Anything/utils/util.py b/spaces/shi-labs/Matting-Anything/utils/util.py deleted file mode 100644 index de3cd48830a91a3208c519b76b8385021917b02b..0000000000000000000000000000000000000000 --- a/spaces/shi-labs/Matting-Anything/utils/util.py +++ /dev/null @@ -1,369 +0,0 @@ -import os -import cv2 -import torch -import logging -import numpy as np -from utils.config import CONFIG -import torch.distributed as dist -import torch.nn.functional as F -from skimage.measure import label -import pdb - -def make_dir(target_dir): - """ - Create dir if not exists - """ - if not os.path.exists(target_dir): - os.makedirs(target_dir) - - -def print_network(model, name): - """ - Print out the network information - """ - logger = logging.getLogger("Logger") - num_params = 0 - for p in model.parameters(): - num_params += p.numel() - - logger.info(model) - logger.info(name) - logger.info("Number of parameters: {}".format(num_params)) - - -def update_lr(lr, optimizer): - """ - update learning rates - """ - for param_group in optimizer.param_groups: - param_group['lr'] = lr - - -def warmup_lr(init_lr, step, iter_num): - """ - Warm up learning rate - """ - return step/iter_num*init_lr - - -def add_prefix_state_dict(state_dict, prefix="module"): - """ - add prefix from the key of pretrained state dict for Data-Parallel - """ - new_state_dict = {} - first_state_name = list(state_dict.keys())[0] - if not first_state_name.startswith(prefix): - for key, value in state_dict.items(): - new_state_dict[prefix+"."+key] = state_dict[key].float() - else: - for key, value in state_dict.items(): - new_state_dict[key] = state_dict[key].float() - return new_state_dict - - -def remove_prefix_state_dict(state_dict, prefix="module"): - """ - remove prefix from the key of pretrained state dict for Data-Parallel - """ - new_state_dict = {} - first_state_name = list(state_dict.keys())[0] - if not first_state_name.startswith(prefix): - for key, value in state_dict.items(): - new_state_dict[key] = state_dict[key].float() - else: - for key, value in state_dict.items(): - new_state_dict[key[len(prefix)+1:]] = state_dict[key].float() - return new_state_dict - - -def load_imagenet_pretrain(model, checkpoint_file): - """ - Load imagenet pretrained resnet - Add zeros channel to the first convolution layer - Since we have the spectral normalization, we need to do a little more - """ - checkpoint = torch.load(checkpoint_file, map_location = lambda storage, loc: storage.cuda(CONFIG.gpu)) - state_dict = remove_prefix_state_dict(checkpoint['state_dict']) - for key, value in state_dict.items(): - state_dict[key] = state_dict[key].float() - - logger = logging.getLogger("Logger") - logger.debug("Imagenet pretrained keys:") - logger.debug(state_dict.keys()) - logger.debug("Generator keys:") - logger.debug(model.module.encoder.state_dict().keys()) - logger.debug("Intersection keys:") - logger.debug(set(model.module.encoder.state_dict().keys())&set(state_dict.keys())) - - weight_u = state_dict["conv1.module.weight_u"] - weight_v = state_dict["conv1.module.weight_v"] - weight_bar = state_dict["conv1.module.weight_bar"] - - logger.debug("weight_v: {}".format(weight_v)) - logger.debug("weight_bar: {}".format(weight_bar.view(32, -1))) - logger.debug("sigma: {}".format(weight_u.dot(weight_bar.view(32, -1).mv(weight_v)))) - - new_weight_v = torch.zeros((3+CONFIG.model.mask_channel), 3, 3).cuda() - new_weight_bar = torch.zeros(32, (3+CONFIG.model.mask_channel), 3, 3).cuda() - - new_weight_v[:3, :, :].copy_(weight_v.view(3, 3, 3)) - new_weight_bar[:, :3, :, :].copy_(weight_bar) - - logger.debug("new weight_v: {}".format(new_weight_v.view(-1))) - logger.debug("new weight_bar: {}".format(new_weight_bar.view(32, -1))) - logger.debug("new sigma: {}".format(weight_u.dot(new_weight_bar.view(32, -1).mv(new_weight_v.view(-1))))) - - state_dict["conv1.module.weight_v"] = new_weight_v.view(-1) - state_dict["conv1.module.weight_bar"] = new_weight_bar - - model.module.encoder.load_state_dict(state_dict, strict=False) - -def load_imagenet_pretrain_nomask(model, checkpoint_file): - """ - Load imagenet pretrained resnet - Add zeros channel to the first convolution layer - Since we have the spectral normalization, we need to do a little more - """ - checkpoint = torch.load(checkpoint_file, map_location = lambda storage, loc: storage.cuda(CONFIG.gpu)) - state_dict = remove_prefix_state_dict(checkpoint['state_dict']) - for key, value in state_dict.items(): - state_dict[key] = state_dict[key].float() - - logger = logging.getLogger("Logger") - logger.debug("Imagenet pretrained keys:") - logger.debug(state_dict.keys()) - logger.debug("Generator keys:") - logger.debug(model.module.encoder.state_dict().keys()) - logger.debug("Intersection keys:") - logger.debug(set(model.module.encoder.state_dict().keys())&set(state_dict.keys())) - - #weight_u = state_dict["conv1.module.weight_u"] - #weight_v = state_dict["conv1.module.weight_v"] - #weight_bar = state_dict["conv1.module.weight_bar"] - - #logger.debug("weight_v: {}".format(weight_v)) - #logger.debug("weight_bar: {}".format(weight_bar.view(32, -1))) - #logger.debug("sigma: {}".format(weight_u.dot(weight_bar.view(32, -1).mv(weight_v)))) - - #new_weight_v = torch.zeros((3+CONFIG.model.mask_channel), 3, 3).cuda() - #new_weight_bar = torch.zeros(32, (3+CONFIG.model.mask_channel), 3, 3).cuda() - - #new_weight_v[:3, :, :].copy_(weight_v.view(3, 3, 3)) - #new_weight_bar[:, :3, :, :].copy_(weight_bar) - - #logger.debug("new weight_v: {}".format(new_weight_v.view(-1))) - #logger.debug("new weight_bar: {}".format(new_weight_bar.view(32, -1))) - #logger.debug("new sigma: {}".format(weight_u.dot(new_weight_bar.view(32, -1).mv(new_weight_v.view(-1))))) - - #state_dict["conv1.module.weight_v"] = new_weight_v.view(-1) - #state_dict["conv1.module.weight_bar"] = new_weight_bar - - model.module.encoder.load_state_dict(state_dict, strict=False) - -def load_VGG_pretrain(model, checkpoint_file): - """ - Load imagenet pretrained resnet - Add zeros channel to the first convolution layer - Since we have the spectral normalization, we need to do a little more - """ - checkpoint = torch.load(checkpoint_file, map_location = lambda storage, loc: storage.cuda()) - backbone_state_dict = remove_prefix_state_dict(checkpoint['state_dict']) - - model.module.encoder.load_state_dict(backbone_state_dict, strict=False) - - -def get_unknown_tensor(trimap): - """ - get 1-channel unknown area tensor from the 3-channel/1-channel trimap tensor - """ - if trimap.shape[1] == 3: - weight = trimap[:, 1:2, :, :].float() - else: - weight = trimap.eq(1).float() - return weight - -def get_gaborfilter(angles): - """ - generate gabor filter as the conv kernel - :param angles: number of different angles - """ - gabor_filter = [] - for angle in range(angles): - gabor_filter.append(cv2.getGaborKernel(ksize=(5,5), sigma=0.5, theta=angle*np.pi/8, lambd=5, gamma=0.5)) - gabor_filter = np.array(gabor_filter) - gabor_filter = np.expand_dims(gabor_filter, axis=1) - return gabor_filter.astype(np.float32) - - -def get_gradfilter(): - """ - generate gradient filter as the conv kernel - """ - grad_filter = [] - grad_filter.append([[-1, -2, -1], [0, 0, 0], [1, 2, 1]]) - grad_filter.append([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]) - grad_filter = np.array(grad_filter) - grad_filter = np.expand_dims(grad_filter, axis=1) - return grad_filter.astype(np.float32) - - -def reduce_tensor_dict(tensor_dict, mode='mean'): - """ - average tensor dict over different GPUs - """ - for key, tensor in tensor_dict.items(): - if tensor is not None: - tensor_dict[key] = reduce_tensor(tensor, mode) - return tensor_dict - - -def reduce_tensor(tensor, mode='mean'): - """ - average tensor over different GPUs - """ - rt = tensor.clone() - dist.all_reduce(rt, op=dist.ReduceOp.SUM) - if mode == 'mean': - rt /= CONFIG.world_size - elif mode == 'sum': - pass - else: - raise NotImplementedError("reduce mode can only be 'mean' or 'sum'") - return rt - -### preprocess the image and mask for inference (np array), crop based on ROI -def preprocess(image, mask, thres): - mask_ = (mask >= thres).astype(np.float32) - arr = np.nonzero(mask_) - h, w = mask.shape - bbox = [max(0, int(min(arr[0]) - 0.1*h)), - min(h, int(max(arr[0]) + 0.1*h)), - max(0, int(min(arr[1]) - 0.1*w)), - min(w, int(max(arr[1]) + 0.1*w))] - image = image[bbox[0]:bbox[1], bbox[2]:bbox[3], :] - mask = mask[bbox[0]:bbox[1], bbox[2]:bbox[3]] - return image, mask, bbox - -### postprocess the alpha prediction to keep the largest connected component (np array) and uncrop, alpha in [0, 1] -### based on https://github.com/senguptaumd/Background-Matting/blob/master/test_background-matting_image.py -def postprocess(alpha, orih=None, oriw=None, bbox=None): - labels=label((alpha>0.05).astype(int)) - try: - assert( labels.max() != 0 ) - except: - return None - largestCC = labels == np.argmax(np.bincount(labels.flat)[1:])+1 - alpha = alpha * largestCC - if bbox is None: - return alpha - else: - ori_alpha = np.zeros(shape=[orih, oriw], dtype=np.float32) - ori_alpha[bbox[0]:bbox[1], bbox[2]:bbox[3]] = alpha - return ori_alpha - - -Kernels = [None] + [cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (size, size)) for size in range(1,30)] -def get_unknown_tensor_from_pred(pred, rand_width=30, train_mode=True): - ### pred: N, 1 ,H, W - N, C, H, W = pred.shape - - pred = pred.data.cpu().numpy() - uncertain_area = np.ones_like(pred, dtype=np.uint8) - uncertain_area[pred<1.0/255.0] = 0 - uncertain_area[pred>1-1.0/255.0] = 0 - for n in range(N): - uncertain_area_ = uncertain_area[n,0,:,:] # H, W - if train_mode: - width = np.random.randint(1, rand_width) - else: - width = rand_width // 2 - uncertain_area_ = cv2.dilate(uncertain_area_, Kernels[width]) - uncertain_area[n,0,:,:] = uncertain_area_ - weight = np.zeros_like(uncertain_area) - weight[uncertain_area == 1] = 1 - weight = torch.from_numpy(weight).cuda() - - return weight - -def get_unknown_tensor_from_pred_oneside(pred, rand_width=30, train_mode=True): - ### pred: N, 1 ,H, W - N, C, H, W = pred.shape - pred = pred.data.cpu().numpy() - uncertain_area = np.ones_like(pred, dtype=np.uint8) - uncertain_area[pred<1.0/255.0] = 0 - #uncertain_area[pred>1-1.0/255.0] = 0 - for n in range(N): - uncertain_area_ = uncertain_area[n,0,:,:] # H, W - if train_mode: - width = np.random.randint(1, rand_width) - else: - width = rand_width // 2 - uncertain_area_ = cv2.dilate(uncertain_area_, Kernels[width]) - uncertain_area[n,0,:,:] = uncertain_area_ - uncertain_area[pred>1-1.0/255.0] = 0 - #weight = np.zeros_like(uncertain_area) - #weight[uncertain_area == 1] = 1 - weight = torch.from_numpy(uncertain_area).cuda() - return weight - -Kernels_mask = [None] + [cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (size, size)) for size in range(1,30)] -def get_unknown_tensor_from_mask(mask, rand_width=30, train_mode=True): - """ - get 1-channel unknown area tensor from the 3-channel/1-channel trimap tensor - """ - N, C, H, W = mask.shape - mask_c = mask.data.cpu().numpy().astype(np.uint8) - - weight = np.ones_like(mask_c, dtype=np.uint8) - - for n in range(N): - if train_mode: - width = np.random.randint(rand_width // 2, rand_width) - else: - width = rand_width // 2 - fg_mask = cv2.erode(mask_c[n,0], Kernels_mask[width]) - bg_mask = cv2.erode(1 - mask_c[n,0], Kernels_mask[width]) - weight[n,0][fg_mask==1] = 0 - weight[n,0][bg_mask==1] = 0 - weight = torch.from_numpy(weight).cuda() - return weight - -def get_unknown_tensor_from_mask_oneside(mask, rand_width=30, train_mode=True): - """ - get 1-channel unknown area tensor from the 3-channel/1-channel trimap tensor - """ - N, C, H, W = mask.shape - mask_c = mask.data.cpu().numpy().astype(np.uint8) - - weight = np.ones_like(mask_c, dtype=np.uint8) - - for n in range(N): - if train_mode: - width = np.random.randint(rand_width // 2, rand_width) - else: - width = rand_width // 2 - #fg_mask = cv2.erode(mask_c[n,0], Kernels_mask[width]) - fg_mask = mask_c[n,0] - bg_mask = cv2.erode(1 - mask_c[n,0], Kernels_mask[width]) - weight[n,0][fg_mask==1] = 0 - weight[n,0][bg_mask==1] = 0 - weight = torch.from_numpy(weight).cuda() - return weight - -def get_unknown_box_from_mask(mask): - """ - get 1-channel unknown area tensor from the 3-channel/1-channel trimap tensor - """ - N, C, H, W = mask.shape - mask_c = mask.data.cpu().numpy().astype(np.uint8) - - weight = np.ones_like(mask_c, dtype=np.uint8) - fg_set = np.where(mask_c[0][0] != 0) - x_min = np.min(fg_set[1]) - x_max = np.max(fg_set[1]) - y_min = np.min(fg_set[0]) - y_max = np.max(fg_set[0]) - - weight[0, 0, y_min:y_max, x_min:x_max] = 0 - weight = torch.from_numpy(weight).cuda() - return weight \ No newline at end of file diff --git a/spaces/sidharthism/fashion-eye/netdissect/easydict.py b/spaces/sidharthism/fashion-eye/netdissect/easydict.py deleted file mode 100644 index 0188f524b87eef75c175772ff262b93b47919ba7..0000000000000000000000000000000000000000 --- a/spaces/sidharthism/fashion-eye/netdissect/easydict.py +++ /dev/null @@ -1,126 +0,0 @@ -''' -From https://github.com/makinacorpus/easydict. -''' - -class EasyDict(dict): - """ - Get attributes - - >>> d = EasyDict({'foo':3}) - >>> d['foo'] - 3 - >>> d.foo - 3 - >>> d.bar - Traceback (most recent call last): - ... - AttributeError: 'EasyDict' object has no attribute 'bar' - - Works recursively - - >>> d = EasyDict({'foo':3, 'bar':{'x':1, 'y':2}}) - >>> isinstance(d.bar, dict) - True - >>> d.bar.x - 1 - - Bullet-proof - - >>> EasyDict({}) - {} - >>> EasyDict(d={}) - {} - >>> EasyDict(None) - {} - >>> d = {'a': 1} - >>> EasyDict(**d) - {'a': 1} - - Set attributes - - >>> d = EasyDict() - >>> d.foo = 3 - >>> d.foo - 3 - >>> d.bar = {'prop': 'value'} - >>> d.bar.prop - 'value' - >>> d - {'foo': 3, 'bar': {'prop': 'value'}} - >>> d.bar.prop = 'newer' - >>> d.bar.prop - 'newer' - - - Values extraction - - >>> d = EasyDict({'foo':0, 'bar':[{'x':1, 'y':2}, {'x':3, 'y':4}]}) - >>> isinstance(d.bar, list) - True - >>> from operator import attrgetter - >>> map(attrgetter('x'), d.bar) - [1, 3] - >>> map(attrgetter('y'), d.bar) - [2, 4] - >>> d = EasyDict() - >>> d.keys() - [] - >>> d = EasyDict(foo=3, bar=dict(x=1, y=2)) - >>> d.foo - 3 - >>> d.bar.x - 1 - - Still like a dict though - - >>> o = EasyDict({'clean':True}) - >>> o.items() - [('clean', True)] - - And like a class - - >>> class Flower(EasyDict): - ... power = 1 - ... - >>> f = Flower() - >>> f.power - 1 - >>> f = Flower({'height': 12}) - >>> f.height - 12 - >>> f['power'] - 1 - >>> sorted(f.keys()) - ['height', 'power'] - """ - def __init__(self, d=None, **kwargs): - if d is None: - d = {} - if kwargs: - d.update(**kwargs) - for k, v in d.items(): - setattr(self, k, v) - # Class attributes - for k in self.__class__.__dict__.keys(): - if not (k.startswith('__') and k.endswith('__')): - setattr(self, k, getattr(self, k)) - - def __setattr__(self, name, value): - if isinstance(value, (list, tuple)): - value = [self.__class__(x) - if isinstance(x, dict) else x for x in value] - elif isinstance(value, dict) and not isinstance(value, self.__class__): - value = self.__class__(value) - super(EasyDict, self).__setattr__(name, value) - super(EasyDict, self).__setitem__(name, value) - - __setitem__ = __setattr__ - -def load_json(filename): - import json - with open(filename) as f: - return EasyDict(json.load(f)) - -if __name__ == "__main__": - import doctest - doctest.testmod() diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Okey Dokey MP3 Songs for Free on Ilkpop - Best Music Site.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Okey Dokey MP3 Songs for Free on Ilkpop - Best Music Site.md deleted file mode 100644 index 43b67b53ca78eb2de247919e111700d5b1caa65d..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Okey Dokey MP3 Songs for Free on Ilkpop - Best Music Site.md +++ /dev/null @@ -1,75 +0,0 @@ - -

          Okey Dokey MP3 Download Ilkpop: How to Enjoy This Catchy Song

          -

          If you are a fan of K-pop, you might have heard of the song Okey Dokey by MINO and ZICO. This song is a catchy and upbeat rap track that was released in 2015 as part of the show Show Me The Money 4. The song became a hit and has been streamed millions of times on various platforms. But how can you enjoy this song offline? One way is to download the MP3 file from Ilkpop, a popular K-pop music site. In this article, we will tell you everything you need to know about Okey Dokey, Ilkpop, and how to download the song from there.

          -

          okey dokey mp3 download ilkpop


          Download Ziphttps://ssurll.com/2uNTWK



          -

          What is Okey Dokey?

          -

          Okey Dokey is a phrase that means "okay" or "all right". It is an informal and playful way of expressing agreement or assent. But where did this phrase come from and how did it become a song title?

          -

          The meaning and origin of the phrase

          -

          According to Merriam-Webster, Okey Dokey is a reduplication of OK, which is an abbreviation of "all correct". OK was coined in the 19th century by American intellectuals and literary societies who intentionally misspelled words for fun. Okey Dokey first appeared in print in 1932 in an edition of American Speech. There are several alternative spellings, such as okey-doke, okee-doke, okeydoke, etc. One of the most famous variations is okely-dokely, which is used by Ned Flanders in The Simpsons.

          -

          The song by MINO and ZICO

          -

          Okey Dokey is also the title of a song by MINO and ZICO, two Korean rappers who are members of the groups WINNER and Block B respectively. The song was released in 2015 as part of the show Show Me The Money 4, which is a rap competition program. MINO was one of the contestants and ZICO was one of the producers. The song was used as MINO's final performance and featured ZICO as a guest rapper. The song was a huge success and topped various music charts in Korea.

          -

          [OKEY DOKEY (2016 WINNER EXIT TOUR IN JAPAN) - YouTube Music](^1^): This is a video of MINO (from WINNER) performing OKEY DOKEY at the 2016 WINNER EXIT TOUR IN JAPAN.
          -[Okey Dokey Songs MP3 Download, New Songs & New Albums | Boomplay](^2^): This is a page where you can find songs and albums by Okey Dokey, a Lithuanian artist.

          -

          The lyrics and translation

          -

          The lyrics of Okey Dokey are mostly in Korean, with some English words mixed in. The song is about the rappers' confidence and ambition in pursuing their dreams and money. They use Okey Dokey as a way of affirming their statements and challenging their opponents. The chorus goes like this:

          -
          
          -Is that true? Yes! Okey dokey yo Is that true? Yes! Okey dokey yo Really? Yes! Okey dokey yo Say la la la la la la la la la la la 
          -

          If you want to understand the whole song, you can find the English translation on Genius. You can also watch the video of their performance on YouTube.

          -

          What is Ilkpop?

          -

          Ilkpop is a website that offers free downloads of K-pop songs in MP3 format. It is one of the most popular sites among K-pop fans who want to enjoy their favorite music offline. But what makes Ilkpop so appealing and what are the issues that come with it?

          -

          A popular K-pop music site

          -

          Ilkpop has been around since 201 > How to download Okey Dokey MP3 from Ilkpop? -

          If you still want to download Okey Dokey MP3 from Ilkpop, despite the risks and drawbacks, you can follow these steps and tips:

          -

          The steps and tips for downloading

          -
            -
          1. Go to the Ilkpop website and type "Okey Dokey" in the search box. You can also use the direct link to the song page.
          2. -
          3. On the song page, you will see the details of the song, such as the artist, album, genre, duration, size, and bitrate. You will also see a play button, a download button, and a share button.
          4. -
          5. Click on the play button to listen to the song online. You can also check the lyrics and translation by clicking on the "View Lyrics" link below the play button.
          6. -
          7. If you like the song and want to download it, click on the download button. You will be redirected to another page with a captcha code. Enter the code and click on "Download MP3".
          8. -
          9. A new tab will open with a countdown timer. Wait for a few seconds until the timer reaches zero. Then, click on the "Download Now" button. You will see a pop-up window asking you to save the file. Choose a location and name for the file and click on "Save".
          10. -
          11. Enjoy your Okey Dokey MP3 file offline!
          12. -
          -

          Some tips to make your downloading experience smoother and safer are:

          -
            -
          • Use a VPN service or a proxy server to hide your IP address and location. This will help you avoid any legal troubles or geo-restrictions.
          • -
          • Use an ad-blocker or a pop-up blocker to prevent any annoying or malicious ads from interrupting your browsing or downloading.
          • -
          • Use an antivirus or a malware scanner to scan your device and your downloaded files for any potential threats or infections.
          • -
          • Use a reputable and reliable browser that has security features and updates.
          • -
          • Do not click on any suspicious or irrelevant links or buttons that may appear on the site or the new tabs.
          • -
          -

          The pros and cons of downloading

          -

          Downloading Okey Dokey MP3 from Ilkpop may have some advantages and disadvantages for you. Here are some of them:

          - - - - - - -
          ProsCons
          You can listen to the song offline anytime and anywhere.You are violating the copyright laws and the rights of the artists and producers.
          You can save your data and bandwidth by not streaming the song online.You are risking your device and your personal information by exposing them to viruses, malware, or spyware.
          You can share the song with your friends or family via other devices or platforms.You are not supporting the original creators and distributors of the music by not paying for their work.
          You can choose the quality and size of the file according to your preference.You are missing out on the official updates, features, and benefits of the legitimate music platforms.
          -

          The alternatives to downloading

          -

          If you are not comfortable with downloading Okey Dokey MP3 from Ilkpop, you can still enjoy the song online by using other alternatives. Some of them are:

          -
            -
          • Streaming the song on YouTube, Spotify, Apple Music, Melon, or other legal music platforms. You can also watch the official music video or other related videos on YouTube.
          • -
          • Purchasing the song or the album from iTunes, Amazon, Google Play, or other authorized online stores. You can also buy physical copies from local or online shops.
          • -
          • Supporting the artists by following their social media accounts, joining their fan clubs, attending their concerts, buying their merchandise, or donating to their causes.
          • -
          -

          Conclusion

          -

          Okey Dokey is a catchy and upbeat rap song by MINO and ZICO that was released in 2015 as part of Show Me The Money 4. The song is about their confidence and ambition in pursuing their dreams and money. The phrase Okey Dokey means "okay" or "all right" and is used as a way of expressing agreement or assent. Ilkpop is a website that offers free downloads of K-pop songs in MP3 format. It is popular among K-pop fans who want to enjoy their favorite music offline. However, Ilkpop is an illegal site that violates the copyright laws and the rights of the artists and producers. By downloading the songs from Ilkpop, you are not supporting the original creators and distributors of the music. You are also exposing yourself to potential legal actions and fines if you are caught. Moreover, Ilkpop may contain viruses, malware, or spyware that can harm your device or steal your personal information. You may also encounter pop-up ads, redirects, or fake links that can trick you into downloading unwanted or harmful programs.

          -

          If you still want to download Okey Dokey MP3 from Ilkpop, you can follow the steps and tips that we provided in this article. However, you should also be aware of the pros and cons of downloading and the alternatives to downloading. We recommend that you use legal and ethical ways of enjoying the song, such as streaming, purchasing, or supporting the artists. This way, you can have a better and safer music experience.

          -

          We hope that this article has helped you learn more about Okey Dokey, Ilkpop, and how to download the song from there. If you have any questions or comments, please feel free to leave them below. Thank you for reading!

          -

          FAQs

          -

          Here are some frequently asked questions about Okey Dokey MP3 download Ilkpop:

          -
            -
          1. Q: Is Okey Dokey a diss track?
          2. -
          3. A: No, Okey Dokey is not a diss track. It is a rap song that expresses the rappers' confidence and ambition in pursuing their dreams and money. It does not target or insult any specific person or group.
          4. -
          5. Q: Is Ilkpop safe to use?
          6. -
          7. A: No, Ilkpop is not safe to use. It is an illegal site that violates the copyright laws and the rights of the artists and producers. It may also contain viruses, malware, or spyware that can harm your device or steal your personal information. You may also encounter pop-up ads, redirects, or fake links that can trick you into downloading unwanted or harmful programs.
          8. -
          9. Q: How can I support MINO and ZICO?
          10. -
          11. A: You can support MINO and ZICO by streaming, purchasing, or donating to their music on legal and ethical platforms. You can also follow their social media accounts, join their fan clubs, attend their concerts, buy their merchandise, or donate to their causes.
          12. -
          13. Q: What are some other songs by MINO and ZICO?
          14. -
          15. A: Some other songs by MINO and ZICO are Body, Fiancé, Ok Man, I'm Him, Fear, Okey Dokey Yo (Remix), Any Song, Bermuda Triangle, Artist, Boys And Girls, etc.
          16. -
          17. Q: What are some other sites like Ilkpop?
          18. -
          19. A: Some other sites like Ilkpop are Matikiri, Planetlagu, K2nblog, Mp3komplit, etc. However, we do not recommend using these sites as they are also illegal and unsafe.
          20. -

          197e85843d
          -
          -
          \ No newline at end of file diff --git a/spaces/skimai/DragGAN_Streamlit/stylegan2/torch_utils/ops/bias_act.cpp b/spaces/skimai/DragGAN_Streamlit/stylegan2/torch_utils/ops/bias_act.cpp deleted file mode 100644 index 5d2425d8054991a8e8b6f7a940fd0ff7fa0bb330..0000000000000000000000000000000000000000 --- a/spaces/skimai/DragGAN_Streamlit/stylegan2/torch_utils/ops/bias_act.cpp +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -// -// NVIDIA CORPORATION and its licensors retain all intellectual property -// and proprietary rights in and to this software, related documentation -// and any modifications thereto. Any use, reproduction, disclosure or -// distribution of this software and related documentation without an express -// license agreement from NVIDIA CORPORATION is strictly prohibited. - -#include -#include -#include -#include "bias_act.h" - -//------------------------------------------------------------------------ - -static bool has_same_layout(torch::Tensor x, torch::Tensor y) -{ - if (x.dim() != y.dim()) - return false; - for (int64_t i = 0; i < x.dim(); i++) - { - if (x.size(i) != y.size(i)) - return false; - if (x.size(i) >= 2 && x.stride(i) != y.stride(i)) - return false; - } - return true; -} - -//------------------------------------------------------------------------ - -static torch::Tensor bias_act(torch::Tensor x, torch::Tensor b, torch::Tensor xref, torch::Tensor yref, torch::Tensor dy, int grad, int dim, int act, float alpha, float gain, float clamp) -{ - // Validate arguments. - TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device"); - TORCH_CHECK(b.numel() == 0 || (b.dtype() == x.dtype() && b.device() == x.device()), "b must have the same dtype and device as x"); - TORCH_CHECK(xref.numel() == 0 || (xref.sizes() == x.sizes() && xref.dtype() == x.dtype() && xref.device() == x.device()), "xref must have the same shape, dtype, and device as x"); - TORCH_CHECK(yref.numel() == 0 || (yref.sizes() == x.sizes() && yref.dtype() == x.dtype() && yref.device() == x.device()), "yref must have the same shape, dtype, and device as x"); - TORCH_CHECK(dy.numel() == 0 || (dy.sizes() == x.sizes() && dy.dtype() == x.dtype() && dy.device() == x.device()), "dy must have the same dtype and device as x"); - TORCH_CHECK(x.numel() <= INT_MAX, "x is too large"); - TORCH_CHECK(b.dim() == 1, "b must have rank 1"); - TORCH_CHECK(b.numel() == 0 || (dim >= 0 && dim < x.dim()), "dim is out of bounds"); - TORCH_CHECK(b.numel() == 0 || b.numel() == x.size(dim), "b has wrong number of elements"); - TORCH_CHECK(grad >= 0, "grad must be non-negative"); - - // Validate layout. - TORCH_CHECK(x.is_non_overlapping_and_dense(), "x must be non-overlapping and dense"); - TORCH_CHECK(b.is_contiguous(), "b must be contiguous"); - TORCH_CHECK(xref.numel() == 0 || has_same_layout(xref, x), "xref must have the same layout as x"); - TORCH_CHECK(yref.numel() == 0 || has_same_layout(yref, x), "yref must have the same layout as x"); - TORCH_CHECK(dy.numel() == 0 || has_same_layout(dy, x), "dy must have the same layout as x"); - - // Create output tensor. - const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); - torch::Tensor y = torch::empty_like(x); - TORCH_CHECK(has_same_layout(y, x), "y must have the same layout as x"); - - // Initialize CUDA kernel parameters. - bias_act_kernel_params p; - p.x = x.data_ptr(); - p.b = (b.numel()) ? b.data_ptr() : NULL; - p.xref = (xref.numel()) ? xref.data_ptr() : NULL; - p.yref = (yref.numel()) ? yref.data_ptr() : NULL; - p.dy = (dy.numel()) ? dy.data_ptr() : NULL; - p.y = y.data_ptr(); - p.grad = grad; - p.act = act; - p.alpha = alpha; - p.gain = gain; - p.clamp = clamp; - p.sizeX = (int)x.numel(); - p.sizeB = (int)b.numel(); - p.stepB = (b.numel()) ? (int)x.stride(dim) : 1; - - // Choose CUDA kernel. - void* kernel; - AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] - { - kernel = choose_bias_act_kernel(p); - }); - TORCH_CHECK(kernel, "no CUDA kernel found for the specified activation func"); - - // Launch CUDA kernel. - p.loopX = 4; - int blockSize = 4 * 32; - int gridSize = (p.sizeX - 1) / (p.loopX * blockSize) + 1; - void* args[] = {&p}; - AT_CUDA_CHECK(cudaLaunchKernel(kernel, gridSize, blockSize, args, 0, at::cuda::getCurrentCUDAStream())); - return y; -} - -//------------------------------------------------------------------------ - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) -{ - m.def("bias_act", &bias_act); -} - -//------------------------------------------------------------------------ diff --git a/spaces/sklearn-docs/Kernel-Density-Estimation/app.py b/spaces/sklearn-docs/Kernel-Density-Estimation/app.py deleted file mode 100644 index 25c3d605ae5849f407240461df63efe0329ff468..0000000000000000000000000000000000000000 --- a/spaces/sklearn-docs/Kernel-Density-Estimation/app.py +++ /dev/null @@ -1,88 +0,0 @@ -import gradio as gr -import numpy as np -import matplotlib.pyplot as plt - -from sklearn.datasets import load_digits -from sklearn.neighbors import KernelDensity -from sklearn.decomposition import PCA -from sklearn.model_selection import GridSearchCV - -def generate_digits(bandwidth, num_samples): - - # convert bandwidth to integer - bandwidth = int(bandwidth) - - # convert num_samples to integer - num_samples = int(num_samples) - - # load the data - digits = load_digits() - - # project the 64-dimensional data to a lower dimension - pca = PCA(n_components=15, whiten=False) - data = pca.fit_transform(digits.data) - - # use grid search cross-validation to optimize the bandwidth - params = {"bandwidth": np.logspace(-1, 1, 20)} - grid = GridSearchCV(KernelDensity(), params) - grid.fit(data) - - # use the specified bandwidth to compute the kernel density estimate - kde = KernelDensity(bandwidth=bandwidth) - kde.fit(data) - - # sample new points from the data - new_data = kde.sample(num_samples, random_state=0) - new_data = pca.inverse_transform(new_data) - - # reshape the data into a 4x11 grid - new_data = new_data.reshape((num_samples, 64)) - real_data = digits.data[:num_samples].reshape((num_samples, 64)) - - # create the plot - fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[])) - for j in range(11): - ax[4, j].set_visible(False) - for i in range(4): - index = i * 11 + j # Calculate the correct index - if index < num_samples: - im = ax[i, j].imshow( - real_data[index].reshape((8, 8)), cmap=plt.cm.binary, interpolation="nearest" - ) - im.set_clim(0, 16) - im = ax[i + 5, j].imshow( - new_data[index].reshape((8, 8)), cmap=plt.cm.binary, interpolation="nearest" - ) - im.set_clim(0, 16) - else: - ax[i, j].axis("off") - ax[i + 5, j].axis("off") - - ax[0, 5].set_title("Selection from the input data") - ax[5, 5].set_title('"New" digits drawn from the kernel density model') - - - # save the plot to a file - plt.savefig("digits_plot.png") - - # return the path to the generated plot - return "digits_plot.png" - -# create the Gradio interface -inputs = [ - gr.inputs.Slider(minimum=1, maximum=10, step=1, label="Bandwidth"), - # gr.inputs.Number(default=44, label="Number of Samples") - # Change to Slider - gr.inputs.Slider(minimum=1, maximum=100, step=1, label="Number of Samples") -] -output = gr.outputs.Image(type="pil") - -title = "Kernel Density Estimation" -description = "This example shows how kernel density estimation (KDE), a powerful non-parametric density estimation technique, can be used to learn a generative model for a dataset. With this generative model in place, new samples can be drawn. These new samples reflect the underlying model of the data. See the original scikit-learn example here: https://scikit-learn.org/stable/auto_examples/neighbors/plot_digits_kde_sampling.html" -examples = [ - [1, 44], # Changed to integer values - [8, 22], # Changed to integer values - [7, 51] # Changed to integer values -] - -gr.Interface(generate_digits, inputs, output, title=title, description=description, examples=examples, live=True).launch() diff --git a/spaces/sklkd93/CodeFormer/CodeFormer/facelib/detection/yolov5face/models/experimental.py b/spaces/sklkd93/CodeFormer/CodeFormer/facelib/detection/yolov5face/models/experimental.py deleted file mode 100644 index 37ba4c4420789c92dc0e2aaeb3d5b64859ec728c..0000000000000000000000000000000000000000 --- a/spaces/sklkd93/CodeFormer/CodeFormer/facelib/detection/yolov5face/models/experimental.py +++ /dev/null @@ -1,45 +0,0 @@ -# # This file contains experimental modules - -import numpy as np -import torch -from torch import nn - -from facelib.detection.yolov5face.models.common import Conv - - -class CrossConv(nn.Module): - # Cross Convolution Downsample - def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): - # ch_in, ch_out, kernel, stride, groups, expansion, shortcut - super().__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, (1, k), (1, s)) - self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g) - self.add = shortcut and c1 == c2 - - def forward(self, x): - return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) - - -class MixConv2d(nn.Module): - # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595 - def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): - super().__init__() - groups = len(k) - if equal_ch: # equal c_ per group - i = torch.linspace(0, groups - 1e-6, c2).floor() # c2 indices - c_ = [(i == g).sum() for g in range(groups)] # intermediate channels - else: # equal weight.numel() per group - b = [c2] + [0] * groups - a = np.eye(groups + 1, groups, k=-1) - a -= np.roll(a, 1, axis=1) - a *= np.array(k) ** 2 - a[0] = 1 - c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b - - self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)]) - self.bn = nn.BatchNorm2d(c2) - self.act = nn.LeakyReLU(0.1, inplace=True) - - def forward(self, x): - return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) diff --git a/spaces/skytnt/moe-tts/utils.py b/spaces/skytnt/moe-tts/utils.py deleted file mode 100644 index 4cb5b43d0ca2bae496e7871b2094f2ffb26ab642..0000000000000000000000000000000000000000 --- a/spaces/skytnt/moe-tts/utils.py +++ /dev/null @@ -1,226 +0,0 @@ -import os -import glob -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -from scipy.io.wavfile import read -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.ERROR) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict = {} - for k, v in state_dict.items(): - try: - new_state_dict[k] = saved_state_dict[k] - except: - logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - logger.info("Loaded checkpoint '{}' (iteration {})".format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, required=True, - help='Model name') - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r", encoding="utf-8") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/smjain/smjainvoice/starganv2vc_paddle/Utils/__init__.py b/spaces/smjain/smjainvoice/starganv2vc_paddle/Utils/__init__.py deleted file mode 100644 index 8b137891791fe96927ad78e64b0aad7bded08bdc..0000000000000000000000000000000000000000 --- a/spaces/smjain/smjainvoice/starganv2vc_paddle/Utils/__init__.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/spaces/spitfire4794/photo/app.py b/spaces/spitfire4794/photo/app.py deleted file mode 100644 index eac62196070d9acf0fa52431ee24990b305deda6..0000000000000000000000000000000000000000 --- a/spaces/spitfire4794/photo/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/spitfire4794/photo").launch() \ No newline at end of file diff --git a/spaces/sr5434/QuoteGeneration/app.py b/spaces/sr5434/QuoteGeneration/app.py deleted file mode 100644 index 9179b60e80024b15fe13b4753e2722efd3f5fbee..0000000000000000000000000000000000000000 --- a/spaces/sr5434/QuoteGeneration/app.py +++ /dev/null @@ -1,22 +0,0 @@ -import gradio as gr -from transformers import pipeline - -generator = pipeline('text-generation', model='sr5434/gptQuotes', tokenizer='facebook/opt-350m') - -def generate(text): - result = generator("Generate a wise quote:\n" + text, max_length=30, num_return_sequences=1) - return result[0]["generated_text"][23:] - -examples = [ - ['"'], - ['"The meaning of life'], -] - -demo = gr.Interface( - fn=generate, - inputs=gr.inputs.Textbox(lines=5, label="Input Text"), - outputs=gr.outputs.Textbox(label="Generated Text"), - examples=examples -) - -demo.launch() \ No newline at end of file diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_synthesis/docs/common_voice_example.md b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_synthesis/docs/common_voice_example.md deleted file mode 100644 index 40e841b284a7e34b458b286eb0bb60e33c0601da..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_synthesis/docs/common_voice_example.md +++ /dev/null @@ -1,56 +0,0 @@ -[[Back]](..) - -# Common Voice - -[Common Voice](https://commonvoice.mozilla.org/en/datasets) is a public domain speech corpus with 11.2K hours of read -speech in 76 languages (the latest version 7.0). We provide examples for building -[Transformer](https://arxiv.org/abs/1809.08895) models on this dataset. - - -## Data preparation -[Download](https://commonvoice.mozilla.org/en/datasets) and unpack Common Voice v4 to a path `${DATA_ROOT}/${LANG_ID}`. -Create splits and generate audio manifests with -```bash -python -m examples.speech_synthesis.preprocessing.get_common_voice_audio_manifest \ - --data-root ${DATA_ROOT} \ - --lang ${LANG_ID} \ - --output-manifest-root ${AUDIO_MANIFEST_ROOT} --convert-to-wav -``` - -Then, extract log-Mel spectrograms, generate feature manifest and create data configuration YAML with -```bash -python -m examples.speech_synthesis.preprocessing.get_feature_manifest \ - --audio-manifest-root ${AUDIO_MANIFEST_ROOT} \ - --output-root ${FEATURE_MANIFEST_ROOT} \ - --ipa-vocab --lang ${LANG_ID} -``` -where we use phoneme inputs (`--ipa-vocab`) as example. - -To denoise audio and trim leading/trailing silence using signal processing based VAD, run -```bash -for SPLIT in dev test train; do - python -m examples.speech_synthesis.preprocessing.denoise_and_vad_audio \ - --audio-manifest ${AUDIO_MANIFEST_ROOT}/${SPLIT}.audio.tsv \ - --output-dir ${PROCESSED_DATA_ROOT} \ - --denoise --vad --vad-agg-level 2 -done -``` - - -## Training -(Please refer to [the LJSpeech example](../docs/ljspeech_example.md#transformer).) - - -## Inference -(Please refer to [the LJSpeech example](../docs/ljspeech_example.md#inference).) - -## Automatic Evaluation -(Please refer to [the LJSpeech example](../docs/ljspeech_example.md#automatic-evaluation).) - -## Results - -| Language | Speakers | --arch | Params | Test MCD | Model | -|---|---|---|---|---|---| -| English | 200 | tts_transformer | 54M | 3.8 | [Download](https://dl.fbaipublicfiles.com/fairseq/s2/cv4_en200_transformer_phn.tar) | - -[[Back]](..) diff --git a/spaces/sriramelango/Social_Classification_Public/models/ofa/resnet.py b/spaces/sriramelango/Social_Classification_Public/models/ofa/resnet.py deleted file mode 100644 index 9ad8ee87de4bb579d745ab8302a368ca1749a1fe..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/models/ofa/resnet.py +++ /dev/null @@ -1,225 +0,0 @@ -import torch -import torch.nn as nn - - -def drop_path(x, drop_prob: float = 0., training: bool = False): - """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). - This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, - the original name is misleading as 'Drop Connect' is a.sh different form of dropout in a.sh separate paper... - See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for - changing the layer and argument names to 'drop path' rather than mix DropConnect as a.sh layer name and use - 'survival rate' as the argument. - """ - if drop_prob == 0. or not training: - return x - keep_prob = 1 - drop_prob - shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets - random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) - random_tensor.floor_() # binarize - output = x.div(keep_prob) * random_tensor - return output - - -class DropPath(nn.Module): - """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). - """ - def __init__(self, drop_prob=None): - super(DropPath, self).__init__() - self.drop_prob = drop_prob - - def forward(self, x): - return drop_path(x, self.drop_prob, self.training) - - -def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): - """3x3 convolution with padding""" - return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, - padding=dilation, groups=groups, bias=False, dilation=dilation) - - -def conv1x1(in_planes, out_planes, stride=1): - """1x1 convolution""" - return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) - - -class BasicBlock(nn.Module): - expansion = 1 - - def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, - base_width=64, dilation=1, norm_layer=None): - super(BasicBlock, self).__init__() - if norm_layer is None: - norm_layer = nn.BatchNorm2d - if groups != 1 or base_width != 64: - raise ValueError('BasicBlock only supports groups=1 and base_width=64') - if dilation > 1: - raise NotImplementedError("Dilation > 1 not supported in BasicBlock") - # Both self.conv1 and self.downsample layers downsample the input when stride != 1 - self.conv1 = conv3x3(inplanes, planes, stride) - self.bn1 = norm_layer(planes) - self.relu = nn.ReLU(inplace=True) - self.conv2 = conv3x3(planes, planes) - self.bn2 = norm_layer(planes) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - assert False - identity = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - out = self.relu(out) - - return out - - -class Bottleneck(nn.Module): - # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2) - # while original implementation places the stride at the first 1x1 convolution(self.conv1) - # according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385. - # This variant is also known as ResNet V1.5 and improves accuracy according to - # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch. - - expansion = 4 - - def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, - base_width=64, dilation=1, norm_layer=None, drop_path_rate=0.0): - super(Bottleneck, self).__init__() - if norm_layer is None: - norm_layer = nn.BatchNorm2d - width = int(planes * (base_width / 64.)) * groups - # Both self.conv2 and self.downsample layers downsample the input when stride != 1 - self.conv1 = conv1x1(inplanes, width) - self.bn1 = norm_layer(width) - self.conv2 = conv3x3(width, width, stride, groups, dilation) - self.bn2 = norm_layer(width) - self.conv3 = conv1x1(width, planes * self.expansion) - self.bn3 = norm_layer(planes * self.expansion) - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - self.stride = stride - self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() - - def forward(self, x): - identity = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - if self.downsample is not None: - identity = self.downsample(x) - - out = identity + self.drop_path(out) - out = self.relu(out) - - return out - - -class ResNet(nn.Module): - - def __init__(self, layers, zero_init_residual=False, - groups=1, width_per_group=64, replace_stride_with_dilation=None, - norm_layer=None, drop_path_rate=0.0): - super(ResNet, self).__init__() - if norm_layer is None: - norm_layer = nn.BatchNorm2d - self._norm_layer = norm_layer - - self.inplanes = 64 - self.dilation = 1 - if replace_stride_with_dilation is None: - # each element in the tuple indicates if we should replace - # the 2x2 stride with a dilated convolution instead - replace_stride_with_dilation = [False, False, False] - if len(replace_stride_with_dilation) != 3: - raise ValueError("replace_stride_with_dilation should be None " - "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) - self.groups = groups - self.base_width = width_per_group - self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, - bias=False) - self.bn1 = norm_layer(self.inplanes) - self.relu = nn.ReLU(inplace=True) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - self.layer1 = self._make_layer(Bottleneck, 64, layers[0], drop_path_rate=drop_path_rate) - self.layer2 = self._make_layer(Bottleneck, 128, layers[1], stride=2, - dilate=replace_stride_with_dilation[0], drop_path_rate=drop_path_rate) - self.layer3 = self._make_layer(Bottleneck, 256, layers[2], stride=2, - dilate=replace_stride_with_dilation[1], drop_path_rate=drop_path_rate) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, (nn.SyncBatchNorm, nn.BatchNorm2d, nn.GroupNorm)): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - - # Zero-initialize the last BN in each residual branch, - # so that the residual branch starts with zeros, and each residual block behaves like an identity. - # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 - if zero_init_residual: - for m in self.modules(): - if isinstance(m, Bottleneck): - nn.init.constant_(m.bn3.weight, 0) - elif isinstance(m, BasicBlock): - nn.init.constant_(m.bn2.weight, 0) - - def _make_layer(self, block, planes, blocks, stride=1, dilate=False, drop_path_rate=0.0): - norm_layer = self._norm_layer - downsample = None - previous_dilation = self.dilation - if dilate: - self.dilation *= stride - stride = 1 - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - conv1x1(self.inplanes, planes * block.expansion, stride), - norm_layer(planes * block.expansion), - ) - - layers = [] - layers.append(block(self.inplanes, planes, stride, downsample, self.groups, - self.base_width, previous_dilation, norm_layer)) - self.inplanes = planes * block.expansion - - dpr = [x.item() for x in torch.linspace(0, drop_path_rate, blocks)] - for i in range(1, blocks): - layers.append(block(self.inplanes, planes, groups=self.groups, - base_width=self.base_width, dilation=self.dilation, - norm_layer=norm_layer, drop_path_rate=dpr[i])) - - return nn.Sequential(*layers) - - def _forward_impl(self, x): - # See note [TorchScript super()] - x = self.conv1(x) - x = self.bn1(x) - x = self.relu(x) - x = self.maxpool(x) - - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - - return x - - def forward(self, x): - return self._forward_impl(x) \ No newline at end of file diff --git a/spaces/srkajol/Singapore-Regulation-AI-Sheet/app.py b/spaces/srkajol/Singapore-Regulation-AI-Sheet/app.py deleted file mode 100644 index 4fa4d8b87316607e6a89b764a7904e628cc7e8ce..0000000000000000000000000000000000000000 --- a/spaces/srkajol/Singapore-Regulation-AI-Sheet/app.py +++ /dev/null @@ -1,102 +0,0 @@ -import os -import openai -import gradio as gr -import pandas as pd -from datetime import datetime -import gspread -from google.oauth2.service_account import Credentials -import requests -import json - -openai.api_key = os.getenv("API_SECRET") - -# Global variables -records = [] -credentials = Credentials.from_service_account_file("credentials.json", scopes=["https://www.googleapis.com/auth/spreadsheets"]) -client = gspread.authorize(credentials) -sheet = client.open_by_url("https://docs.google.com/spreadsheets/d/1Dn0Mf9hcUYpBIHHGAGtcbYtb4UJWfFfnv8ctg9b5osc/edit?usp=sharing").sheet1 - -def get_user_ip(): - try: - response = requests.get("https://api.ipify.org?format=json") - data = json.loads(response.text) - return data["ip"] - except: - return None - -def validate_passcode(passcode, user_input, user_name, user_email, is_fintech_startup, region, profession): - valid_passcodes = { - "organization1": "singapore-management-university", - } - - if not passcode: - return "Please provide a passcode. Email contact@westminster.ai to request a passcode." - - passcode = passcode.lower() # Convert the passcode to lowercase for case-insensitive comparison - - if passcode not in valid_passcodes.values(): - return "Incorrect passcode. Access denied. Email contact@westminster.ai to request a passcode." - - return CustomChatGPT(passcode, user_input, user_name, user_email, is_fintech_startup, region, profession) - -def CustomChatGPT(passcode, user_input, user_name, user_email, is_fintech_startup, region, profession): - if not (user_input and user_name and user_email and is_fintech_startup and region and profession): - return "Please fill in all the input fields." - - if not user_name: - return "Please enter your name." - - messages = [] - - user_message = f"{user_input} [USER_IDENTITY: {user_name}]" - messages.append({"role": "user", "content": user_message}) - messages.append({"role": "system", "content": "As a specialized Singapore lawyer, specialized in Singapore national regulations and law, you're currently in a confidential consultation with your client. The client has complete trust in your expertise and advice regulation. Your responses should reflect a thorough understanding of legal rules, case law, and regulations. Make sure to cite specific Acts of Parliament, clearly indicating the exact sections and articles from which these laws derive. If secondary legislation is relevant. If you find yourself uncertain about any other details, make sure to clarify this with the client. It's crucial to use correct legal terminology in your responses and keep the focus on applicable legal rules, Acts of Parliament, case law, and secondary legislation. Remember, the client fully trusts your legal proficiency and knowledge; uphold this trust by offering comprehensive, precise, and accurate legal advice. Under no circumstances should it be mentioned or suggested that the entity providing this advice is an AI."}) - - response = openai.ChatCompletion.create( - model="gpt-4", - messages=messages - ) - - ChatGPT_reply = response["choices"][0]["message"]["content"] - messages.append({"role": "assistant", "content": ChatGPT_reply}) - - timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") - ip_address = get_user_ip() - - record = { - "Passcode": passcode, - "Timestamp": timestamp, - "User Input": user_input, - "User Identity": user_name, - "User Email": user_email, - "IP Address": ip_address, - "Profession": profession, - "Our AI Lawyer Reply": ChatGPT_reply - } - records.append(record) - - sheet_data = pd.DataFrame(records) - rows_to_append = sheet_data.iloc[len(records) - 1:][["Passcode", "Timestamp", "User Input", "User Identity", "User Email", "IP Address", "Fintech Startup", "Region", "Profession", "Our AI Lawyer Reply"]].values.tolist() - - if len(records) == 1: - header = ["Passcode", "Timestamp", "User Input", "User Identity", "User Email", "IP Address", "Fintech Startup", "Region", "Profession", "Our AI Lawyer Reply"] - sheet.insert_row(header, 1) - - sheet.append_rows(rows_to_append, value_input_option='USER_ENTERED') - - return ChatGPT_reply - -def launch_interface(): - inputs = [ - gr.inputs.Textbox(label="Organisation's Passcode", placeholder="Enter your organisation's passcode"), - gr.inputs.Textbox(label="Your Legal Query", placeholder="Talk to your lawyer..."), - gr.inputs.Textbox(label="Your Name", placeholder="Enter your name"), - gr.inputs.Textbox(label="Your Email", placeholder="Enter your email"), - gr.inputs.Textbox(label="Profession", placeholder="Enter your profession") - ] - outputs = gr.outputs.Textbox(label="Our AI Lawyer Reply") - interface = gr.Interface(fn=validate_passcode, inputs=inputs, outputs=outputs) - interface.launch() - -if __name__ == "__main__": - launch_interface() diff --git a/spaces/starlit7/USPoliticsTTS/transforms.py b/spaces/starlit7/USPoliticsTTS/transforms.py deleted file mode 100644 index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000 --- a/spaces/starlit7/USPoliticsTTS/transforms.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/stasimus/p350-fastapi/Dockerfile b/spaces/stasimus/p350-fastapi/Dockerfile deleted file mode 100644 index b358d00586368cbe91f0a407858055b2acaa52ae..0000000000000000000000000000000000000000 --- a/spaces/stasimus/p350-fastapi/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -FROM python:3.8 - -WORKDIR /code - -COPY ./requirements.txt /code/requirements.txt - -RUN apt-get update -RUN apt-get install ffmpeg libsm6 libxext6 -y -# RUN pythom -m venv venv -# RUN ./venv/Scripts/activate -RUN python -m pip install --upgrade pip -RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt - -COPY . . - -CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "7860"] \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Adobe Photoshop CS2 Incl [VERIFIED] Keygen Free Download.md b/spaces/stomexserde/gpt4-ui/Examples/Adobe Photoshop CS2 Incl [VERIFIED] Keygen Free Download.md deleted file mode 100644 index 29f6ddea0c2615369ef662b9b5c2f2cc3bf3088b..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Adobe Photoshop CS2 Incl [VERIFIED] Keygen Free Download.md +++ /dev/null @@ -1,31 +0,0 @@ - -

          How to Download Adobe Photoshop CS2 for Free and Legally

          -

          Adobe Photoshop CS2 is one of the most popular and powerful photo editing software ever created. It offers a wide range of features and tools to help you create stunning images and graphics. Whether you are a professional photographer, a graphic designer, or a hobbyist, Photoshop CS2 can help you unleash your creativity and enhance your workflow.

          -

          But what if you don't want to pay for the latest version of Photoshop? Is there a way to get Adobe Photoshop CS2 for free and legally? The answer is yes, but there are some things you need to know before you download it.

          -

          Adobe Photoshop CS2 Incl KeyGen Free Download


          DOWNLOAD ✏ ✏ ✏ https://urlgoal.com/2uI8Et



          -

          Why Adobe Photoshop CS2 Is Free

          -

          Adobe Photoshop CS2 was released in 2005 and it is no longer supported by the developer. This means that Adobe does not provide any updates, bug fixes, or security patches for this version. It also means that you won't be able to activate it online or use some of the online services that require an Adobe account.

          -

          In 2013, Adobe decided to disable the activation server for CS2 products, including Photoshop CS2, due to a technical issue. This made it impossible for users who legitimately purchased the software to use it. To solve this problem, Adobe provided a special serial number that anyone can use to install and activate Photoshop CS2 without needing an online connection.

          -

          This serial number is available on the official Adobe website, along with the download links for Photoshop CS2 for Windows and Mac. However, this does not mean that Adobe is giving away Photoshop CS2 for free to everyone. According to Adobe, this offer is only intended for users who already own a valid license for Photoshop CS2 and need to reinstall it on their computers.

          -

          If you don't have a valid license for Photoshop CS2, you are not legally entitled to use the software, even if you can download it and activate it with the provided serial number. Adobe still owns the copyright and trademark rights for Photoshop CS2 and can take legal action against anyone who uses it without permission.

          -

          How to Download Adobe Photoshop CS2 Safely

          -

          If you have a valid license for Photoshop CS2 and want to download it again, you need to follow these steps:

          -
            -
          1. Go to the official Adobe website and sign in with your Adobe ID or create one if you don't have one.
          2. -
          3. Go to this page and scroll down to find the download links for Photoshop CS2 for Windows or Mac.
          4. -
          5. Click on the download link for your operating system and save the file on your computer.
          6. -
          7. Copy the serial number provided on the same page and keep it handy.
          8. -
          9. Run the installer file and follow the instructions on the screen.
          10. -
          11. When prompted, enter the serial number that you copied earlier and complete the installation.
          12. -
          13. Launch Photoshop CS2 and enjoy using it.
          14. -
          -

          Note that Photoshop CS2 may not work properly on newer operating systems or devices. It may also have compatibility issues with some plugins or extensions. You may need to adjust some settings or use compatibility mode to make it run smoothly.

          -

          -

          Alternatives to Adobe Photoshop CS2

          -

          If you don't have a valid license for Photoshop CS2 or you want to use a more updated and secure version of Photoshop, you have some alternatives to consider. Here are some of them:

          -
            -
          • Adobe Photoshop CC: This is the latest and most advanced version of Photoshop, available as part of the Creative Cloud subscription service. You can get it for $9.99 per month or $119.88 per year, along with other apps like Lightroom, Illustrator, Premiere Pro, and more. You can also get a free trial for 7 days before you decide to buy it.
          • -
          • GIMP: This is a free and open-source image editor that offers many features and tools similar to Photoshop. You can use it to edit photos, create graphics, design logos, make animations, and more. It works on Windows, Mac, Linux, and other platforms.
          • -
          • Pixlr: This is an online photo editor that lets you edit your images in your browser without downloading anything. It has a simple and intuitive interface that resembles Photoshop. You can use it to crop, resize, rotate, adjust colors, apply filters, add

            e93f5a0c3f
            -
            -
            \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Asc Timetables 2007 Serial Keygen.md b/spaces/stomexserde/gpt4-ui/Examples/Asc Timetables 2007 Serial Keygen.md deleted file mode 100644 index 15bfed3401cf3caa0a499b525b6de2723a242832..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Asc Timetables 2007 Serial Keygen.md +++ /dev/null @@ -1,62 +0,0 @@ -
            -

            Asc Timetables 2007 Serial Keygen: What is it and how to use it?

            -

            If you are looking for a software that can help you create and manage school timetables easily and efficiently, you might have heard of Asc Timetables. It is one of the most popular and widely used software for school scheduling in the world. It has been around since 1996 and has been constantly updated and improved over the years.

            -

            Asc Timetables 2007 Serial Keygen


            Download Zip →→→ https://urlgoal.com/2uI6AN



            -

            However, if you want to use Asc Timetables, you need to purchase a license key that will activate the software on your computer. The license key is not cheap, especially if you need multiple licenses for different computers or users. That's why some people look for alternative ways to get Asc Timetables without paying

            One of the most common ways to get Asc Timetables without paying is to use a serial keygen. A serial keygen is a software tool that can generate random and valid license keys for various software products, including Asc Timetables. By using a serial keygen, you can bypass the official registration process and activate Asc Timetables on your computer for free.

            -

            But how does a serial keygen work? And how can you find a reliable and safe serial keygen for Asc Timetables 2007? In this article, we will answer these questions and more. We will also show you the benefits and features of Asc Timetables 2007, how to install and activate it with a serial keygen, and some tips and tricks on how to use it effectively. Let's get started!

            -

            Benefits of using Asc Timetables 2007 for school scheduling

            -

            Asc Timetables 2007 is a software that can help you create and manage school timetables easily and efficiently. It is designed for schools of any size and type, from elementary to high school, from public to private, from single to multi-teacher. It can handle any kind of timetable, whether it is simple or complex, weekly or biweekly, fixed or flexible.

            -

            -

            Here are some of the benefits of using Asc Timetables 2007 for school scheduling:

            -
              -
            • It can help you create and manage school timetables easily and efficiently. You don't need to spend hours or days manually creating and adjusting your timetables. With Asc Timetables 2007, you can use the unique automatic generator that can create optimal timetables in minutes. You can also use the user-friendly interface that allows you to customize and edit your timetables easily. You can also use the advanced options that let you set various constraints and preferences for your timetables, such as class size, teacher availability, room capacity, subject requirements, etc.
            • -
            • It can save you time, money, and resources. By using Asc Timetables 2007, you can reduce the time and effort required to create and manage your school timetables. You can also avoid errors and conflicts that might arise from manual scheduling. You can also save money by avoiding unnecessary costs associated with inefficient timetabling, such as overtime pay, extra staff, wasted space, etc. You can also save resources by optimizing the use of your teachers, rooms, equipment, etc.
            • -
            • It can improve the quality and accuracy of your school timetables. By using Asc Timetables 2007, you can ensure that your school timetables are consistent, balanced, fair, and realistic. You can also ensure that your school timetables meet the needs and expectations of your students, teachers, parents, and administrators. You can also ensure that your school timetables comply with the rules and regulations of your school system or district.
            • -
            -

            As you can see, Asc Timetables 2007 is a powerful and useful software that can help you create and manage school timetables easily and efficiently. But what makes it stand out from other software? Let's take a look at some of its features that make it unique and superior.

            -

            Features of Asc Timetables 2007 that make it stand out from other software

            -

            Asc Timetables 2007 has many features that make it stand out from other software for school scheduling. Here are some of them:

            -
              -
            • The unique automatic generator that can create optimal timetables in minutes. This is the core feature of Asc Timetables 2007. It is a smart algorithm that can generate optimal timetables based on the data and parameters that you provide. It can handle any kind of timetable, whether it is simple or complex, weekly or biweekly, fixed or flexible. It can also handle any kind of constraints and preferences that you set for your timetables, such as class size, teacher availability, room capacity, subject requirements, etc. It can also handle any kind of exceptions or changes that might occur during the school year, such as holidays, substitutions, cancellations, etc. The automatic generator is fast, reliable, and accurate. It can create optimal timetables in minutes or even seconds.
            • -
            • The user-friendly interface that allows you to customize and edit your timetables easily. This is the feature that allows you to customize and edit your timetables easily. It is a simple and intuitive interface that lets you enter and modify the data and parameters for your timetables. You can use the wizard mode that guides you through the steps of creating your timetables, or the expert mode that gives you more control and flexibility. You can also use the drag-and-drop feature that lets you move and adjust your timetables with a mouse. You can also use the undo and redo feature that lets you revert or repeat your changes.
            • -
            • The advanced options that let you set various constraints and preferences for your timetables. This is the feature that lets you set various constraints and preferences for your timetables, such as class size, teacher availability, room capacity, subject requirements, etc. You can use the predefined options that cover the most common scenarios, or the custom options that let you define your own rules and conditions. You can also use the priority feature that lets you assign different levels of importance to your constraints and preferences. You can also use the weight feature that lets you balance the trade-offs between different aspects of your timetables.
            • -
            -

            These are some of the features of Asc Timetables 2007 that make it stand out from other software for school scheduling. But how can you install and activate Asc Timetables 2007 with a serial keygen? Let's find out in the next section.

            -

            How to install and activate Asc Timetables 2007 with a serial keygen

            -

            If you want to use Asc Timetables 2007 with a serial keygen, you need to follow these steps:

            -
              -
            1. Download and install Asc Timetables 2007 on your computer. You can download the setup file from the official website of Asc Timetables, or from any other trusted source. The setup file is about 10 MB in size, and it should take only a few minutes to download and install. You can choose the language and the destination folder for the installation. You can also choose whether to create a desktop shortcut or not.
            2. -
            3. Download and run a serial keygen for Asc Timetables 2007 on your computer. You can find a serial keygen for Asc Timetables 2007 from various online sources, such as torrent sites, crack sites, or forums. However, you need to be careful and cautious when downloading and running a serial keygen, as some of them might contain viruses, malware, or spyware that could harm your computer or steal your personal information. You should always scan the serial keygen with an antivirus program before opening it, and avoid clicking on any suspicious links or pop-ups. You should also read the comments and reviews of other users who have used the serial keygen before, and check if it is reliable and safe.
            4. -
            5. Use the serial keygen to generate a valid license key for Asc Timetables 2007. Once you have downloaded and run a serial keygen for Asc Timetables 2007, you should see a window that asks you to enter some information, such as your name, email address, school name, etc. You can enter any information that you want, as it does not affect the license key generation. After entering the information, click on the generate button, and wait for a few seconds until a license key appears on the screen. The license key should be a combination of letters and numbers, such as A1B2-C3D4-E5F6-G7H8. Copy the license key to your clipboard or write it down somewhere.
            6. -
            7. Enter the license key and activate Asc Timetables 2007 on your computer. After generating a license key with the serial keygen, go back to Asc Timetables 2007, and open it on your computer. You should see a window that asks you to enter a license key to activate the software. Paste or type the license key that you generated with the serial keygen, and click on the activate button. If the license key is valid, you should see a message that confirms that Asc Timetables 2007 has been activated successfully on your computer. You can now use Asc Timetables 2007 without any limitations or restrictions.
            8. -
            -

            Congratulations! You have successfully installed and activated Asc Timetables 2007 with a serial keygen on your computer. But how can you use Asc Timetables 2007 effectively to create and manage your school timetables? Let's see some tips and tricks in the next section.

            -

            Tips and tricks on how to use Asc Timetables 2007 effectively

            -

            Asc Timetables 2007 is a powerful and useful software that can help you create and manage your school timetables easily and efficiently. However, to get the most out of it, you need to know some tips and tricks on how to use it effectively. Here are some of them:

            -
              -
            • How to use the automatic generator to create different types of timetables. The automatic generator is the core feature of Asc Timetables 2007. It can create optimal timetables based on the data and parameters that you provide. However, you can also use it to create different types of timetables, such as weekly or biweekly, fixed or flexible, simple or complex, etc. To do this, you need to adjust the settings and options of the automatic generator accordingly. For example, if you want to create a biweekly timetable, you need to set the number of weeks per cycle to 2. If you want to create a flexible timetable, you need to set the number of lessons per day and per week to variable. If you want to create a complex timetable, you need to set more constraints and preferences for your timetables, such as class size, teacher availability, room capacity, subject requirements, etc.
            • -
            • How to use the manual editor to modify and fine-tune your timetables. The manual editor is the feature that allows you to customize and edit your timetables easily. It is a simple and intuitive interface that lets you enter and modify the data and parameters for your timetables. You can use the drag-and-drop feature that lets you move and adjust your timetables with a mouse. You can also use the undo and redo feature that lets you revert or repeat your changes. However, you can also use the manual editor to modify and fine-tune your timetables, such as adding or deleting lessons, changing teachers or rooms, swapping or splitting classes, etc. To do this, you need to select the lesson or the cell that you want to modify, and right-click on it. You will see a menu that gives you various options to modify and fine-tune your timetables.
            • -
            • How to use the verification tool to check and fix any errors or conflicts in your timetables. The verification tool is the feature that lets you check and fix any errors or conflicts in your timetables. It is a smart tool that can detect any problems or issues that might arise from your timetabling, such as overlapping lessons, missing teachers, unavailable rooms, unsatisfied preferences, etc. It can also suggest solutions or alternatives to resolve these problems or issues. To use the verification tool, you need to click on the verify button on the toolbar. You will see a window that shows you a list of errors or conflicts in your timetables, along with their severity and description. You can also see a preview of your timetables with the errors or conflicts highlighted in red. You can then choose to fix them manually or automatically by clicking on the fix button.
            • -
            -

            These are some of the tips and tricks on how to use Asc Timetables 2007 effectively to create and manage your school timetables. By following these tips and tricks, you can make the most out of Asc Timetables 2007 and enjoy its benefits and features.

            -

            Conclusion

            -

            In conclusion, Asc Timetables 2007 is a software that can help you create and manage school timetables easily and efficiently. It has many benefits and features that make it stand out from other software for school scheduling. It has a unique automatic generator that can create optimal timetables in minutes. It has a user-friendly interface that allows you to customize and edit your timetables easily. It has advanced options that let you set various constraints and preferences for your timetables. It also has tips and tricks on how to use it effectively.

            -

            If you want to use Asc Timetables 2007, you need to purchase a license key that will activate the software on your computer. However, if you don't want to pay for it, you can use a serial keygen that can generate a valid license key for free. You just need to download and install Asc Timetables 2007, download and run a serial keygen, generate a license key with it, enter the license key and activate Asc Timetables 2007 -

            So, what are you waiting for? If you want to create and manage school timetables easily and efficiently, you should try Asc Timetables 2007 with a serial keygen. It is a powerful and useful software that can help you save time, money, and resources, and improve the quality and accuracy of your school timetables. You can download it from the official website of Asc Timetables, or from any other trusted source. You can also find a serial keygen from various online sources, such as torrent sites, crack sites, or forums. Just make sure that you scan the serial keygen with an antivirus program before opening it, and avoid clicking on any suspicious links or pop-ups.

            -

            If you have any questions or doubts about Asc Timetables 2007 or the serial keygen, you can check the FAQs section below, or contact the support team of Asc Timetables. They will be happy to assist you and answer your queries.

            -

            FAQs

            -

            Here are some of the frequently asked questions about Asc Timetables 2007 and the serial keygen:

            -
              -
            1. Is Asc Timetables 2007 compatible with Windows 10?
            2. -

              Yes, Asc Timetables 2007 is compatible with Windows 10, as well as with Windows XP, Vista, 7, 8, and 8.1. However, you might need to run it as an administrator or in compatibility mode if you encounter any issues.

              -
            3. Is Asc Timetables 2007 safe to use?
            4. -

              Yes, Asc Timetables 2007 is safe to use, as long as you download it from the official website of Asc Timetables, or from any other trusted source. However, you should be careful and cautious when using a serial keygen, as some of them might contain viruses, malware, or spyware that could harm your computer or steal your personal information. You should always scan the serial keygen with an antivirus program before opening it, and avoid clicking on any suspicious links or pop-ups.

              -
            5. How can I get support for Asc Timetables 2007?
            6. -

              If you need any support for Asc Timetables 2007, you can contact the support team of Asc Timetables. You can visit their website and fill out a support form, or send them an email at support@asctimetables.com. You can also visit their forum and chat with other users and experts. You can also check their online help and tutorials for more information and guidance.

              -
            7. Can I export or print my timetables with Asc Timetables 2007?
            8. -

              Yes, you can export or print your timetables with Asc Timetables 2007. You can export your timetables in various formats, such as HTML, PDF, Excel, Word, XML, etc. You can also print your timetables in various layouts and styles, such as portrait or landscape, color or black-and-white, grid or list, etc. You can also customize the appearance and content of your timetables before exporting or printing them.

              -
            9. Can I share my timetables with other users or devices with Asc Timetables 2007?
            10. -

              Yes, you can share your timetables with other users or devices with Asc Timetables 2007. You can use the network feature that lets you connect multiple computers and users to a central database. You can also use the online feature that lets you upload your timetables to a web server and access them from any device with an internet connection. You can also use the mobile feature that lets you view your timetables on your smartphone or tablet.

              -

            b2dd77e56b
            -
            -
            \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Autodesk AutoCAD Plant 3D 2020.2 With Crack (x64).md b/spaces/stomexserde/gpt4-ui/Examples/Autodesk AutoCAD Plant 3D 2020.2 With Crack (x64).md deleted file mode 100644 index 6561aa5998d2b7f0ad4a697881ba5c56a60a978e..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Autodesk AutoCAD Plant 3D 2020.2 With Crack (x64).md +++ /dev/null @@ -1,53 +0,0 @@ - -

            How to Download and Install Autodesk AutoCAD Plant 3D 2020.2 With Crack (x64)

            -

            Autodesk AutoCAD Plant 3D is a powerful software that allows you to design, model, and document process plants in a 3D environment. It helps you to create accurate and efficient plant layouts, piping, structural, and equipment models, as well as generate isometric drawings, orthographic drawings, and reports.

            -

            If you want to use this software for free, you need to download and install Autodesk AutoCAD Plant 3D 2020.2 with crack (x64). This is the latest version of the software that has been released in November 2022. It has many new features and improvements, such as:

            -

            Autodesk AutoCAD Plant 3D 2020.2 With Crack (x64)


            Download File ☆☆☆ https://urlgoal.com/2uI7wI



            -
              -
            • Enhanced collaboration with BIM 360 Design
            • -
            • Improved performance and stability
            • -
            • New spec-driven P&ID design
            • -
            • New project setup and management tools
            • -
            • New pipe routing and editing tools
            • -
            • New support for point clouds
            • -
            • New integration with Autodesk Navisworks
            • -
            • New support for Autodesk Vault
            • -
            • New support for Autodesk Revit
            • -
            • New support for Autodesk Inventor
            • -
            -

            In this article, we will show you how to download and install Autodesk AutoCAD Plant 3D 2020.2 with crack (x64) step by step. Follow the instructions carefully and enjoy the full version of the software.

            - -

            Step 1: Download Autodesk AutoCAD Plant 3D 2020.2 with crack (x64)

            -

            The first thing you need to do is to download the software from a reliable source. You can use the link below to download Autodesk AutoCAD Plant 3D 2020.2 with crack (x64) from our website. The file size is about 2.5 GB, so make sure you have enough space on your hard drive and a stable internet connection.

            -

            Download Autodesk AutoCAD Plant 3D 2020.2 with crack (x64)

            - -

            Step 2: Extract the downloaded file

            -

            After downloading the file, you need to extract it using a tool like WinRAR or 7-Zip. You will get a folder named "Autodesk AutoCAD Plant 3D 2020.2 with crack (x64)" that contains the setup file and the crack file.

            -

            - -

            Step 3: Install Autodesk AutoCAD Plant 3D 2020.2

            -

            Now you need to install the software on your computer. To do that, follow these steps:

            -
              -
            1. Run the setup file as administrator.
            2. -
            3. Select your language and click "Install".
            4. -
            5. Accept the license agreement and click "Next".
            6. -
            7. Select the components you want to install and click "Next".
            8. -
            9. Select the installation location and click "Next".
            10. -
            11. Wait for the installation to complete and click "Finish".
            12. -
            - -

            Step 4: Apply the crack file

            -

            The final step is to apply the crack file to activate the software. To do that, follow these steps:

            -
              -
            1. Copy the crack file from the folder "Autodesk AutoCAD Plant 3D 2020.2 with crack (x64)".
            2. -
            3. Paste it into the installation folder of Autodesk AutoCAD Plant 3D 2020.2.
            4. -
            5. Replace the original file if prompted.
            6. -
            7. Run the software as administrator.
            8. -
            9. Enjoy the full version of Autodesk AutoCAD Plant 3D 2020.2 with crack (x64).
            10. -
            - -

            Conclusion

            -

            In this article, we have shown you how to download and install Autodesk AutoCAD Plant 3D 2020.2 with crack (x64) on your computer. This is a powerful software that can help you design, model, and document process plants in a 3D environment. It has many new features and improvements that make it more efficient and user-friendly.

            -

            If you have any questions

            e93f5a0c3f
            -
            -
            \ No newline at end of file diff --git a/spaces/sugo/v6yu7bgn/README.md b/spaces/sugo/v6yu7bgn/README.md deleted file mode 100644 index 166cf48026093f2b9d60cfd970a64d9f02c52967..0000000000000000000000000000000000000000 --- a/spaces/sugo/v6yu7bgn/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: V6yu7bgn -emoji: 👁 -colorFrom: red -colorTo: pink -sdk: static -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sunwaee/Perceiver-Multiclass-Emotion-Classification/app.py b/spaces/sunwaee/Perceiver-Multiclass-Emotion-Classification/app.py deleted file mode 100644 index 84d1a5fb95cdc14d40039503e778bd4a6bbaca51..0000000000000000000000000000000000000000 --- a/spaces/sunwaee/Perceiver-Multiclass-Emotion-Classification/app.py +++ /dev/null @@ -1,118 +0,0 @@ -import os - -import gdown as gdown -import nltk -import streamlit as st -from nltk.tokenize import sent_tokenize - -from source.pipeline import MultiLabelPipeline, inputs_to_dataset - - -def download_models(ids): - """ - Download all models. - - :param ids: name and links of models - :return: - """ - - # Download sentence tokenizer - nltk.download('punkt') - - # Download model from drive if not stored locally - for key in ids: - if not os.path.isfile(f"model/{key}.pt"): - url = f"https://drive.google.com/uc?id={ids[key]}" - gdown.download(url=url, output=f"model/{key}.pt") - - -@st.cache -def load_labels(): - """ - Load model labels. - - :return: - """ - - return [ - "admiration", - "amusement", - "anger", - "annoyance", - "approval", - "caring", - "confusion", - "curiosity", - "desire", - "disappointment", - "disapproval", - "disgust", - "embarrassment", - "excitement", - "fear", - "gratitude", - "grief", - "joy", - "love", - "nervousness", - "optimism", - "pride", - "realization", - "relief", - "remorse", - "sadness", - "surprise", - "neutral" - ] - - -@st.cache(allow_output_mutation=True) -def load_model(model_path): - """ - Load model and cache it. - - :param model_path: path to model - :return: - """ - - model = MultiLabelPipeline(model_path=model_path) - - return model - - -# Page config -st.set_page_config(layout="centered") -st.title("Multiclass Emotion Classification") -st.write("DeepMind Language Perceiver for Multiclass Emotion Classification (Eng). ") - -maintenance = False -if maintenance: - st.write("Unavailable for now (file downloads limit). ") -else: - # Variables - ids = {'perceiver-go-emotions': st.secrets['model']} - labels = load_labels() - - # Download all models from drive - download_models(ids) - - # Display labels - st.markdown(f"__Labels:__ {', '.join(labels)}") - - # Model selection - left, right = st.columns([4, 2]) - inputs = left.text_area('', max_chars=4096, value='This is a space about multiclass emotion classification. Write ' - 'something here to see what happens!') - model_path = right.selectbox('', options=[k for k in ids], index=0, help='Model to use. ') - split = right.checkbox('Split into sentences', value=True) - model = load_model(model_path=f"model/{model_path}.pt") - right.write(model.device) - - if split: - if not inputs.isspace() and inputs != "": - with st.spinner('Processing text... This may take a while.'): - left.write(model(inputs_to_dataset(sent_tokenize(inputs)), batch_size=1)) - else: - if not inputs.isspace() and inputs != "": - with st.spinner('Processing text... This may take a while.'): - left.write(model(inputs_to_dataset([inputs]), batch_size=1)) diff --git a/spaces/supercyx3/gpt/README.md b/spaces/supercyx3/gpt/README.md deleted file mode 100644 index be7f6845bb7ad942d49a3d66f1d8b82557ea6ec0..0000000000000000000000000000000000000000 --- a/spaces/supercyx3/gpt/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: ChatGPT-Next-Web -emoji: 💻 -colorFrom: blue -colorTo: yellow -sdk: docker -pinned: false -license: mit -app_port: 3000 -duplicated_from: dongsiqie/gpt ---- -免费key的来源:https://github.com/pengzhile/pandora/issues/837 - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Font Psl Kittithada Bold 75.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Font Psl Kittithada Bold 75.md deleted file mode 100644 index 07d5d059acc606e513919bcd4792dfe13f7499d4..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Font Psl Kittithada Bold 75.md +++ /dev/null @@ -1,96 +0,0 @@ - -

            Font PSL Kittithada Bold 75: A Stylish and Modern Thai Font

            -

            If you are looking for a stylish and modern Thai font that can make your text stand out, you might want to check out Font PSL Kittithada Bold 75. This font is a bold and elegant typeface that can be used for various purposes, such as headlines, logos, posters, banners, labels, and more.

            -

            Font PSL Kittithada Bold 75 is a part of the PSL KittithadaAD family, which is a popular and versatile Thai font family that consists of four styles: regular, italic, bold, and bold italic. The font family was created by Phanlop Thongsuk, a renowned Thai font designer who has been making fonts since 1995.

            -

            font psl kittithada bold 75


            Download Filehttps://cinurl.com/2uEYZ5



            -

            Font PSL Kittithada Bold 75 has a unique and distinctive design that combines the traditional Thai script with the modern Latin alphabet. The font has a smooth and balanced stroke that creates a harmonious and elegant look. The font also has a high legibility and readability that makes it suitable for both print and digital media.

            -

            How to Download and Use Font PSL Kittithada Bold 75

            -

            If you want to download and use Font PSL Kittithada Bold 75 for your projects, you can do so by following the steps below:

            -
              -
            1. Click on the link below to download Font PSL Kittithada Bold 75 zip file.
            2. -
            3. Extract the zip file to your computer.
            4. -
            5. Open the folder that contains the font files.
            6. -
            7. Double-click on the font file that you want to install.
            8. -
            9. Click on the Install button to install the font on your system.
            10. -
            11. Enjoy using Font PSL Kittithada Bold 75 for your projects!
            12. -
            -

            Download Font PSL Kittithada Bold 75 Zip File

            -

            Why Choose Font PSL Kittithada Bold 75

            -

            Font PSL Kittithada Bold 75 is not just another Thai font. It is a stylish and modern font that can make your text look more attractive and professional. Some of the reasons why you should choose Font PSL Kittithada Bold 75 are:

            -
              -
            • It has a unique and distinctive design that combines the traditional Thai script with the modern Latin alphabet.
            • -
            • It has a smooth and balanced stroke that creates a harmonious and elegant look.
            • -
            • It has a high legibility and readability that makes it suitable for both print and digital media.
            • -
            • It is a part of the PSL KittithadaAD family, which is a popular and versatile Thai font family that consists of four styles: regular, italic, bold, and bold italic.
            • -
            • It is created by Phanlop Thongsuk, a renowned Thai font designer who has been making fonts since 1995.
            • -
            -

            Conclusion

            -

            Font PSL Kittithada Bold 75 is a stylish and modern Thai font that can make your text stand out. It is a bold and elegant typeface that can be used for various purposes, such as headlines, logos, posters, banners, labels, and more. It is also a part of the PSL KittithadaAD family, which is a popular and versatile Thai font family that consists of four styles: regular, italic, bold, and bold italic.

            -

            If you want to download and use Font PSL Kittithada Bold 75 for your projects, just follow the link below and enjoy the best Thai font ever!

            -

            -

            Download Font PSL Kittithada Bold 75 Zip File

            -

            How to Use Font PSL Kittithada Bold 75 for Your Design Projects

            -

            Font PSL Kittithada Bold 75 is a versatile and flexible font that can be used for various design projects, such as logos, posters, banners, labels, and more. Here are some tips on how to use Font PSL Kittithada Bold 75 for your design projects:

            -
              -
            • Use Font PSL Kittithada Bold 75 for headlines, titles, slogans, or other text elements that need to catch attention and convey a strong message.
            • -
            • Use Font PSL Kittithada Bold 75 with a contrasting background color or texture to make it stand out and create a visual impact.
            • -
            • Use Font PSL Kittithada Bold 75 with a complementary font for the body text or other text elements that need to be readable and clear.
            • -
            • Use Font PSL Kittithada Bold 75 with appropriate spacing, alignment, and kerning to ensure a balanced and harmonious layout.
            • -
            • Use Font PSL Kittithada Bold 75 with suitable graphics, icons, or images that match the theme and style of your design project.
            • -
            -

            How to Get More Fonts Like Font PSL Kittithada Bold 75

            -

            If you like Font PSL Kittithada Bold 75 and want to get more fonts like it, you can do so by visiting the websites of Phanlop Thongsuk or Fontsgeek. These websites offer a wide range of Thai fonts that are similar to Font PSL Kittithada Bold 75 in terms of design, quality, and functionality. You can also find other types of fonts, such as Latin fonts, Arabic fonts, Chinese fonts, and more.

            -

            Some of the websites where you can get more fonts like Font PSL Kittithada Bold 75 are:

            -
              -
            • Phanlop Thongsuk: This is the official website of Phanlop Thongsuk, the creator of Font PSL Kittithada Bold 75 and many other Thai fonts. You can find his portfolio, biography, contact information, and links to his social media accounts.
            • -
            • Fontsgeek: This is a website that offers free fonts for download. You can find Font PSL Kittithada Bold 75 and other fonts from the PSL KittithadaAD family. You can also find other Thai fonts and fonts from different languages and categories.
            • -
            -

            What are the Advantages of Font PSL Kittithada Bold 75

            -

            Font PSL Kittithada Bold 75 is a font that has many advantages that make it a great choice for your text needs. Some of the advantages of Font PSL Kittithada Bold 75 are:

            -
              -
            • It is a stylish and modern font that can make your text look more attractive and professional.
            • -
            • It is a bold and elegant font that can be used for various purposes, such as headlines, logos, posters, banners, labels, and more.
            • -
            • It is a part of the PSL KittithadaAD family, which is a popular and versatile Thai font family that consists of four styles: regular, italic, bold, and bold italic.
            • -
            • It is created by Phanlop Thongsuk, a renowned Thai font designer who has been making fonts since 1995.
            • -
            • It is a free font that you can download and use for your projects without any restrictions or limitations.
            • -
            -

            What are the Disadvantages of Font PSL Kittithada Bold 75

            -

            Font PSL Kittithada Bold 75 is a font that has some disadvantages that you should be aware of before using it. Some of the disadvantages of Font PSL Kittithada Bold 75 are:

            -
              -
            • It is a font that is only compatible with Windows operating system. It may not work properly on other operating systems, such as Mac or Linux.
            • -
            • It is a font that may not support all the characters or symbols that you need for your text. It may not have the diacritics, punctuation marks, or special characters that you require.
            • -
            • It is a font that may not match well with other fonts that you use for your text. It may not have the same style, size, or weight as the other fonts that you combine it with.
            • -
            • It is a font that may not be suitable for all types of text or media. It may not be appropriate for formal or academic texts, or for small or low-resolution screens.
            • -
            -

            How to Customize Font PSL Kittithada Bold 75

            -

            Font PSL Kittithada Bold 75 is a font that can be customized to suit your preferences and needs. You can change the color, size, style, or alignment of the font to make it fit your text and design. Here are some ways to customize Font PSL Kittithada Bold 75:

            -
              -
            • Use the font menu or toolbar in your word processor or graphic editor to change the color, size, style, or alignment of the font. You can choose from a range of options or enter your own values.
            • -
            • Use the font dialog box in your word processor or graphic editor to access more advanced settings for the font. You can adjust the spacing, kerning, scaling, or rotation of the font.
            • -
            • Use the format painter or copy and paste tools in your word processor or graphic editor to apply the same font settings to other text elements. You can save time and ensure consistency by using these tools.
            • -
            -

            How to Troubleshoot Font PSL Kittithada Bold 75

            -

            Font PSL Kittithada Bold 75 is a font that may encounter some problems or errors when you use it. You may not be able to install, view, print, or export the font correctly. Here are some tips on how to troubleshoot Font PSL Kittithada Bold 75:

            -
              -
            • Check if your computer meets the minimum or recommended requirements for the font. You may need to update your operating system, processor, memory, hard disk space, display, sound card, MIDI interface, or internet connection.
            • -
            • Check if your word processor or graphic editor supports the font. You may need to update your software, install a plug-in, or enable a feature to use the font.
            • -
            • Check if your font file is corrupted or damaged. You may need to download a new font file, scan it for viruses, or repair it with a tool.
            • -
            • Check if your printer or device supports the font. You may need to update your driver, firmware, or settings to print or export the font.
            • -
            -

            How to Get Feedback for Font PSL Kittithada Bold 75

            -

            Font PSL Kittithada Bold 75 is a font that can benefit from feedback from other users and experts. You can get feedback for Font PSL Kittithada Bold 75 by visiting the websites of Phanlop Thongsuk or Fontsgeek. These websites offer a platform where you can ask questions, share ideas, give feedback, and get help for Font PSL Kittithada Bold 75. You can also find reviews, ratings, comments, and testimonials from other users and experts.

            -

            Some of the websites where you can get feedback for Font PSL Kittithada Bold 75 are:

            -
              -
            • Phanlop Thongsuk: This is the official website of Phanlop Thongsuk, the creator of Font PSL Kittithada Bold 75 and many other Thai fonts. You can find his portfolio, biography, contact information, and links to his social media accounts. You can also send him an email or a message to get feedback for Font PSL Kittithada Bold 75.
            • -
            • Fontsgeek: This is a website that offers free fonts for download. You can find Font PSL Kittithada Bold 75 and other fonts from the PSL KittithadaAD family. You can also find other Thai fonts and fonts from different languages and categories. You can also leave a review, rating, comment, or testimonial for Font PSL Kittithada Bold 75.
            • -
            -

            Conclusion

            -

            Font PSL Kittithada Bold 75 is a stylish and modern Thai font that can make your text stand out. It is a bold and elegant typeface that can be used for various purposes, such as headlines, logos, posters, banners, labels, and more. It is also a part of the PSL KittithadaAD family, which is a popular and versatile Thai font family that consists of four styles: regular, italic, bold, and bold italic.

            -

            If you want to download and use Font PSL Kittithada Bold 75 for your projects, just follow the link below and enjoy the best Thai font ever!

            -

            Download Font PSL Kittithada Bold 75 Zip File

            -

            Conclusion

            -

            Font PSL Kittithada Bold 75 is a stylish and modern Thai font that can make your text stand out. It is a bold and elegant typeface that can be used for various purposes, such as headlines, logos, posters, banners, labels, and more. It is also a part of the PSL KittithadaAD family, which is a popular and versatile Thai font family that consists of four styles: regular, italic, bold, and bold italic.

            -

            If you want to download and use Font PSL Kittithada Bold 75 for your projects, just follow the link below and enjoy the best Thai font ever!

            -

            Download Font PSL Kittithada Bold 75 Zip File

            3cee63e6c2
            -
            -
            \ No newline at end of file diff --git a/spaces/t13718236382/bingoGPT4/src/components/external-link.tsx b/spaces/t13718236382/bingoGPT4/src/components/external-link.tsx deleted file mode 100644 index 011265f364d5a64a770f4c7e9c65c5ade21d623a..0000000000000000000000000000000000000000 --- a/spaces/t13718236382/bingoGPT4/src/components/external-link.tsx +++ /dev/null @@ -1,30 +0,0 @@ -export function ExternalLink({ - href, - children -}: { - href: string - children: React.ReactNode -}) { - return ( - - {children} - - - ) -} diff --git a/spaces/tang155/bingo/src/components/user-menu.tsx b/spaces/tang155/bingo/src/components/user-menu.tsx deleted file mode 100644 index 9bd1edc9cf9f39b63629b021f0c1186b1a7c1341..0000000000000000000000000000000000000000 --- a/spaces/tang155/bingo/src/components/user-menu.tsx +++ /dev/null @@ -1,113 +0,0 @@ -'use client' - -import { useEffect, useState } from 'react' -import Image from 'next/image' -import { toast } from 'react-hot-toast' -import { Button } from '@/components/ui/button' -import pkg from '../../package.json' -import { - DropdownMenu, - DropdownMenuContent, - DropdownMenuItem, - DropdownMenuSeparator, - DropdownMenuTrigger -} from '@/components/ui/dropdown-menu' -import { IconCopy, IconExternalLink, IconGitHub } from '@/components/ui/icons' -import SettingIcon from '@/assets/images/settings.svg' -import { useCopyToClipboard } from '@/lib/hooks/use-copy-to-clipboard' - -export function UserMenu() { - const [host, setHost] = useState('') - const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 }) - useEffect(() => { - setHost(location.host) - }, []) - - useEffect(() => { - if (isCopied) { - toast.success('复制成功') - } - }, [isCopied]) - return ( -
            - - - - - - - location.href='#dialog="settings"' - } - className="cursor-pointer" - > - 设置用户 - - - - location.href='#dialog="voice"' - } - className="cursor-pointer" - > - 语音设置 - - - - - 开源地址 - - - - - - - - 托管地址 - 🤗 - - - - - - - 复制站点 - - - - - -
            版本信息 {pkg.version}
            -
            - - -
            站点域名
            -
            copyToClipboard(host)} className="flex gap-1 text-xs text-zinc-500 cursor-pointer"> - {host} -
            -
            -
            -
            -
            - ) -} diff --git a/spaces/terfces0erbo/CollegeProjectV2/Adobe.Illustrator.CC.2019.v23.0.0.530x64.RePack.KpoJIuK 64 Bit.md b/spaces/terfces0erbo/CollegeProjectV2/Adobe.Illustrator.CC.2019.v23.0.0.530x64.RePack.KpoJIuK 64 Bit.md deleted file mode 100644 index dc71f7e1143eb341b31afe26ca053c2c5d916aa1..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Adobe.Illustrator.CC.2019.v23.0.0.530x64.RePack.KpoJIuK 64 Bit.md +++ /dev/null @@ -1,9 +0,0 @@ -

            Adobe.Illustrator.CC.2019.v23.0.0.530x64.RePack.KpoJIuK 64 bit


            Download File · https://bytlly.com/2uGj5M



            - -Adobe.Illustrator.CC.2019.v23.0.0.530x64 RePack. Description : The modern art of illustration. The industry standard vector graphics application lets you ... Adobe.Illustrator.CC.2019.v23.0.0.530x64 RePack by KpoJIuK download torrent - Adobe Illustrator is a program designed for the professional creation and editing of vector graphics. -The difference between LimeWire Illustrator and Adobe Illustrator is that it is an eye-catching program that allows you to create professional-looking, eye-catching vector graphics and animated logos. -Adobe Illustrator is also one of the best programs for creating web graphics. 8a78ff9644
            -
            -
            -

            diff --git a/spaces/terfces0erbo/CollegeProjectV2/Digital Signal Processing By Ramesh Babu 4th Edition Pdf Free Download Rar !!TOP!!.md b/spaces/terfces0erbo/CollegeProjectV2/Digital Signal Processing By Ramesh Babu 4th Edition Pdf Free Download Rar !!TOP!!.md deleted file mode 100644 index 7d782450a1b9a28af972dcd4f7fdeccd36cc1d3e..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Digital Signal Processing By Ramesh Babu 4th Edition Pdf Free Download Rar !!TOP!!.md +++ /dev/null @@ -1,6 +0,0 @@ -

            Digital Signal Processing By Ramesh Babu 4th Edition Pdf Free Download Rar


            DOWNLOADhttps://bytlly.com/2uGk2C



            - -To Download Ebook, for Confederation of ER Publications Automatic display, 2000 ... Venkata Ramesh Babu Pdf pdf free 047120059x pdf books Scribd. ... In office in pdf of Digital Signal Processing Fourth edition sanjit mitra 3rd ... Salour, Electronics and most useful aspects of rural labour babu zip chm rar. 1fdad05405
            -
            -
            -

            diff --git a/spaces/terfces0erbo/CollegeProjectV2/EverQuest Rain Of Fear (RoF2) For PEQ EZ Server More Torrent REPACK.md b/spaces/terfces0erbo/CollegeProjectV2/EverQuest Rain Of Fear (RoF2) For PEQ EZ Server More Torrent REPACK.md deleted file mode 100644 index a2e811318c9e9e6dcdef2c1358393f5049bbd822..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/EverQuest Rain Of Fear (RoF2) For PEQ EZ Server More Torrent REPACK.md +++ /dev/null @@ -1,6 +0,0 @@ -

            EverQuest Rain Of Fear (RoF2) For PEQ, EZ Server More Torrent


            Download Filehttps://bytlly.com/2uGl4A



            - -EverQuest Rain of Fear (RoF2) for PEQ, EZ Server + More fitgirl ... от R.G. Механики Prey () download torrent RePack by R.G. Mechanics. 4d29de3e1b
            -
            -
            -

            diff --git a/spaces/thebetterindia/ai/README.md b/spaces/thebetterindia/ai/README.md deleted file mode 100644 index 8eaa82b72a8ee7cba09fbe8c28e8f8ec4d5e429e..0000000000000000000000000000000000000000 --- a/spaces/thebetterindia/ai/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Ai -emoji: 📊 -colorFrom: pink -colorTo: red -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false -license: unknown ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/theodotus/asr-uk-punctuation-capitalization/README.md b/spaces/theodotus/asr-uk-punctuation-capitalization/README.md deleted file mode 100644 index 8bfffde966cd6316df72f8c01abeb0e15fac4600..0000000000000000000000000000000000000000 --- a/spaces/theodotus/asr-uk-punctuation-capitalization/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: ASR UA Punctuation and Capitalization -emoji: 🔊 -colorFrom: blue -colorTo: yellow -sdk: gradio -sdk_version: 3.31.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/AOMEI Partition Assistant Pro Edition 5.5 [Serial] Full LINK Version.md b/spaces/tialenAdioni/chat-gpt-api/logs/AOMEI Partition Assistant Pro Edition 5.5 [Serial] Full LINK Version.md deleted file mode 100644 index ce7edc053e48d4100852da0f16e4b905855af467..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/AOMEI Partition Assistant Pro Edition 5.5 [Serial] Full LINK Version.md +++ /dev/null @@ -1,16 +0,0 @@ -
            -Here is a possible title and article with HTML formatting for the keyword "AOMEI Partition Assistant Pro Edition 5.5 [Serial] Full Version": - -

            AOMEI Partition Assistant Pro Edition 5.5: A Versatile Partition Software for Windows PC

            -

            AOMEI Partition Assistant Pro Edition 5.5 is a professional and reliable partition manager that allows you to resize, move, extend, merge, split partitions without any data loss and migrate OS to SSD easily. It also provides many other advanced features such as 4K alignment, quick partition, dynamic volume management, command line partitioning, Windows To Go creator, MBR/GPT disk converter, NTFS/FAT32 converter, primary/logical partition converter, SSD secure erase wizard, extend partition wizard, easy partition recovery wizard, migrate OS to SSD wizard and more.

            -

            AOMEI Partition Assistant Pro Edition 5.5 [Serial] Full Version


            DOWNLOAD ►►► https://urlcod.com/2uK380



            -

            AOMEI Partition Assistant Pro Edition 5.5 supports Windows 11, 10, 8.1, 8, 7, Vista and XP operating systems. It is compatible with both 32-bit and 64-bit versions of Windows. It can work with various types of storage devices such as HDDs, SSDs, USB flash drives, SD cards and external hard drives.

            -

            AOMEI Partition Assistant Pro Edition 5.5 is easy to use and has a user-friendly interface. You can perform all the partition operations with a few clicks or drag-and-drop actions. You can also preview the changes before applying them to avoid any mistakes. AOMEI Partition Assistant Pro Edition 5.5 also provides a backup and restore function to protect your data from any unexpected situations.

            -

            AOMEI Partition Assistant Pro Edition 5.5 is a powerful and comprehensive partition software that can meet your various needs of disk and partition management. It can help you optimize the performance and storage space of your Windows PC. You can download the demo version for free from the official website[^2^] or buy the full version with a serial key for only $39.95[^1^].

            Here are a few more paragraphs with HTML formatting for the article: - -

            AOMEI Partition Assistant Pro Edition 5.5 also offers some useful tools and wizards to help you with various tasks. For example, you can clone a disk or a partition to another location without reinstalling Windows or losing any data. You can also migrate your OS to a new HDD or SSD with a few clicks. You can create a bootable CD, DVD, USB or ISO image to boot your computer in case of system failure. You can also create a Windows To Go workspace on a USB drive to run Windows 10 or 8.1 on any computer.

            -

            -

            Another feature that sets AOMEI Partition Assistant Pro Edition 5.5 apart from other partition software is its ability to convert between different disk and partition types without data loss. You can convert an MBR disk to a GPT disk or vice versa, convert a dynamic disk to a basic disk or vice versa, convert an NTFS partition to a FAT32 partition or vice versa, and convert a primary partition to a logical partition or vice versa. These conversions can help you overcome some limitations of disk and partition formats and optimize your system performance.

            -

            AOMEI Partition Assistant Pro Edition 5.5 is not only a powerful partition software but also a secure one. It provides multiple security features to protect your data and system from any damage or loss. For example, you can wipe a hard drive or a partition to erase all the data permanently and prevent any data recovery. You can also rebuild the MBR to fix boot issues, check the partition for errors, hide or unhide partitions, change the serial number or drive letter, and more.

            7196e7f11a
            -
            -
            \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Greater Than Gatsby Photoshop Actions Torrents.md b/spaces/tialenAdioni/chat-gpt-api/logs/Greater Than Gatsby Photoshop Actions Torrents.md deleted file mode 100644 index 8d72c73242b92cb5d6b63858319c5d6c92571f89..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Greater Than Gatsby Photoshop Actions Torrents.md +++ /dev/null @@ -1,14 +0,0 @@ - -Here is a possible title and article for the keyword "Greater Than Gatsby Photoshop Actions Torrents": - -

            How to Download Greater Than Gatsby Photoshop Actions for Free

            -

            If you are looking for a way to download Greater Than Gatsby Photoshop actions for free, you might be tempted to search for torrents or other illegal sources. However, this is not only risky for your computer and your privacy, but also unethical and unfair to the creators of these amazing actions.

            -

            Greater Than Gatsby is a leading provider of premium Photoshop actions for photographers of all levels and styles. They offer 15 collections of actions that cover a wide range of editing needs, such as portraits, newborns, weddings, pets, landscapes, and more. Their actions are designed to save you time, enhance your creativity, and deliver stunning results with just a few clicks.

            -

            Greater Than Gatsby Photoshop Actions Torrents


            Download File ✵✵✵ https://urlcod.com/2uKa9c



            -

            Downloading Greater Than Gatsby Photoshop actions from torrents or other unauthorized sources is not only illegal, but also harmful to your computer and your photos. You might end up with corrupted files, viruses, malware, spyware, or other unwanted programs that can damage your system or compromise your personal information. Moreover, you might get low-quality or outdated actions that do not work properly with your version of Photoshop or Elements.

            -

            The best way to download Greater Than Gatsby Photoshop actions for free is to sign up for their newsletter and get 12 free actions as a welcome gift. These actions are taken from their 15 premium collections and are compatible with Photoshop CC – 2023, Photoshop CS2 – CS6, and Elements 13 – 23. You can use these actions to edit your photos fast and easy, and see the quality and versatility of Greater Than Gatsby products.

            -

            To sign up for the newsletter and get your free actions, simply visit their website at https://www.greaterthangatsby.com/free-photoshop-actions/ and enter your name and email address. You will receive an email with a link to download the zip file containing the 12 free actions. You can also watch a video tutorial on how to install and use the actions on their website.

            -

            If you like the free actions and want to get more of them, you can also take advantage of their special offers and discounts on their premium collections. You can save up to 83% by purchasing their Complete Bundle of 1,103 Photoshop actions for only $209.30 (with code). You can also buy individual collections for $62.30 each (with code). To see all their collections and prices, visit their website at https://www.greaterthangatsby.com/photoshop-actions/.

            -

            Downloading Greater Than Gatsby Photoshop actions from torrents or other illegal sources is not worth the risk or the guilt. By signing up for their newsletter and getting their free actions, you can enjoy their high-quality products without breaking the law or hurting their business. You can also support them by purchasing their premium collections at affordable prices and get access to thousands of amazing actions that will transform your photos in seconds.

            7196e7f11a
            -
            -
            \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Gta Iv Disc 2 Data3 Cabl.md b/spaces/tialenAdioni/chat-gpt-api/logs/Gta Iv Disc 2 Data3 Cabl.md deleted file mode 100644 index ccd9f85a684da31e85f7a59de67bff6cbe302677..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Gta Iv Disc 2 Data3 Cabl.md +++ /dev/null @@ -1,21 +0,0 @@ - -

            How to Fix GTA IV Disc 2 Data3 Cabl Error

            -

            If you are trying to install GTA IV on your PC and you encounter an error message that says "Please insert disc 2 that contains the file data3.cab" or "Setup could not find a file on specified path of disk", you are not alone. Many GTA IV players have reported this problem, which is caused by a damaged or corrupted data3.cab file on the second disc of the game.

            -

            Gta Iv Disc 2 Data3 Cabl


            Download 🆓 https://urlcod.com/2uK8Zg



            -

            Fortunately, there is a simple solution that can help you fix this error and enjoy GTA IV without any hassle. All you need is a suitable application that can create an image of the second disc, such as Alcohol 120% or Deamon Tools. Here are the steps to follow:

            -
              -
            1. Insert the second disc of GTA IV into your DVD drive and open Alcohol 120% or Deamon Tools.
            2. -
            3. Select the option to create an image of the disc and save it to your computer.
            4. -
            5. Mount the image file in a DVD emulator, such as Alcohol 120% or Deamon Tools.
            6. -
            7. Run the GTA IV installer again and when it asks for the second disc, browse to the mounted image file and select it.
            8. -
            9. The installer should now recognize the data3.cab file and continue with the installation process.
            10. -
            -

            That's it! You have successfully fixed the GTA IV disc 2 data3 cabl error and you can now play GTA IV on your PC. If you have any questions or feedback, please leave a comment below. Thank you for reading!

            - -

            GTA IV is one of the most popular and critically acclaimed games in the Grand Theft Auto series. It was released in 2008 for PlayStation 3, Xbox 360, and PC. The game follows the story of Niko Bellic, an Eastern European immigrant who comes to Liberty City to pursue the American Dream and escape his past. Along the way, he gets involved in various criminal activities, such as drug trafficking, robbery, assassination, and more.

            -

            The game features a realistic and immersive open-world environment that allows the player to explore Liberty City and its surroundings. The game also offers a variety of missions, side activities, vehicles, weapons, and customization options. The game also has a multiplayer mode that supports up to 32 players in various modes, such as deathmatch, racing, co-op, and more.

            -

            GTA IV received widespread praise from critics and fans alike for its story, gameplay, graphics, sound, and online features. The game also won several awards, such as Game of the Year, Best Action-Adventure Game, Best Writing, and more. The game has sold over 25 million copies worldwide and is considered one of the best games of all time.

            - -

            However, GTA IV is not without its flaws. The game also faced some technical issues, such as bugs, glitches, crashes, and performance problems. One of the most common and frustrating issues that many PC players encountered was the GTA IV disc 2 data3 cabl error. This error prevented the players from installing the game properly and enjoying it on their computers. Luckily, there is a simple and effective solution to this problem, as explained in this article.

            e753bf7129
            -
            -
            \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Barbie Dreamhouse Adventures MOD APK and Enjoy VIP Features for Free in 2022.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Barbie Dreamhouse Adventures MOD APK and Enjoy VIP Features for Free in 2022.md deleted file mode 100644 index 4f8edf4e74a343a9096dccb95db4cffac83a9d5c..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Barbie Dreamhouse Adventures MOD APK and Enjoy VIP Features for Free in 2022.md +++ /dev/null @@ -1,75 +0,0 @@ -
            -

            Download Barbie Dreamhouse Adventures Mod APK (VIP Unlocked 2022)

            | | H2: Introduction |

            Introduction

            Do you love playing Barbie games on your Android device? Do you want to create your own Barbie DreamHouse experience with unlimited access to all the features and activities? If yes, then you should download Barbie Dreamhouse Adventures Mod APK (VIP Unlocked 2022) from our website. This is a modified version of the original game that lets you enjoy all the VIP benefits without paying any money. You can design your own rooms, dress up in fashionable outfits, cook delicious recipes, dance with your friends, and have fun at pool parties. You can also explore Malibu with your pink convertible, meet Barbie's family and friends, and join them in exciting adventures. In this article, we will tell you how to download and install Barbie Dreamhouse Adventures Mod APK (VIP Unlocked 2022) on your Android device. We will also explain the features and benefits of this modded game, and answer some frequently asked questions.

            -

            download barbie dreamhouse adventures mod apk (vip unlocked 2022)


            Download Zip 🌟 https://bltlly.com/2uOsHU



            | | H2: Features of Barbie Dreamhouse Adventures Mod APK (VIP Unlocked 2022) |

            Features of Barbie Dreamhouse Adventures Mod APK (VIP Unlocked 2022)

            Barbie Dreamhouse Adventures Mod APK (VIP Unlocked 2022) is a fun and creative game that lets you live your dream life as Barbie. Here are some of the features that you can enjoy with this modded game:

            • VIP Unlocked: You can access all the VIP features and content without spending any money. This includes unlimited coins, gems, stickers, outfits, hairstyles, accessories, furniture, wallpapers, decorations, pets, characters, locations, activities, mini-games, and more.
            • No Ads: You can play the game without any annoying ads or pop-ups that interrupt your gameplay.
            • No Root Required: You don't need to root your device to install this modded game. It works on any Android device that supports APK installation.
            • Easy Installation: You can easily install this modded game by following our simple instructions below.
            • Safe and Secure: This modded game is safe and secure to use. It does not contain any viruses or malware that can harm your device or data.
            | | H2: How to Download and Install Barbie Dreamhouse Adventures Mod APK (VIP Unlocked 2022) |

            How to Download and Install Barbie Dreamhouse Adventures Mod APK (VIP Unlocked 2022)

            If you want to download and install Barbie Dreamhouse Adventures Mod APK (VIP Unlocked 2022) on your Android device, you need to follow these steps:

            1. Step 1: Click on the download button below to download the APK file of Barbie Dreamhouse Adventures Mod APK (VIP Unlocked 2022).
            2. Step 2: Go to your device settings and enable the option to install unknown apps from unknown sources. This will allow you to install third-party apps that are not available on Google Play Store.
            3. Step 3: Go to your file manager app and locate the downloaded APK file in your download folder. Tap on it to open it and start the installation process.
            4. Step 4: Follow the instructions on the screen to complete the installation. It may take a few seconds or minutes depending on your device speed.
            5. Step 5: Once the installation is done, you can launch the game from your app drawer or home screen. Enjoy playing Barbie Dreamhouse Adventures Mod APK (VIP Unlocked 2022) with all the VIP benefits.
            | | H2: Benefits of Playing Barbie Dreamhouse Adventures Mod APK (VIP Unlocked 2022) |

            Benefits of Playing Barbie Dreamhouse Adventures Mod APK (VIP Unlocked 2022)

            Playing Barbie Dreamhouse Adventures Mod APK (VIP Unlocked 2022) is not only fun but also beneficial for your creativity, imagination, and social skills. Here are some of the benefits that you can get from playing this game:

            -

            How to get barbie dreamhouse adventures mod apk with vip features
            -Barbie dreamhouse adventures mod apk latest version 2022 free download
            -Barbie dreamhouse adventures hack apk unlimited money and vip unlocked
            -Download barbie dreamhouse adventures premium mod apk for android
            -Barbie dreamhouse adventures mod apk full unlocked all rooms and activities
            -Barbie dreamhouse adventures cracked apk with vip access and no ads
            -Barbie dreamhouse adventures modded apk download link 2022
            -Barbie dreamhouse adventures cheat apk unlock everything and vip mode
            -Barbie dreamhouse adventures vip mod apk free download for android devices
            -Barbie dreamhouse adventures mod apk 2022.4.1 with vip unlocked and unlimited resources
            -Best barbie dreamhouse adventures mod apk download site 2022
            -Barbie dreamhouse adventures pro mod apk with all features unlocked and vip enabled
            -Barbie dreamhouse adventures unlimited vip mod apk download 2022
            -Barbie dreamhouse adventures mod apk no root required and vip unlocked
            -Barbie dreamhouse adventures vip hack apk download for free 2022
            -Barbie dreamhouse adventures mod apk offline mode with vip unlocked
            -Barbie dreamhouse adventures mod apk online multiplayer with vip features
            -Barbie dreamhouse adventures mod apk new update 2022 with vip unlocked
            -Barbie dreamhouse adventures mod apk easy download and install with vip unlocked
            -Barbie dreamhouse adventures mod apk safe and secure download with vip unlocked
            -Barbie dreamhouse adventures mod apk review and rating with vip unlocked
            -Barbie dreamhouse adventures mod apk gameplay and features with vip unlocked
            -Barbie dreamhouse adventures mod apk tips and tricks with vip unlocked
            -Barbie dreamhouse adventures mod apk support and feedback with vip unlocked
            -Barbie dreamhouse adventures mod apk video tutorial and guide with vip unlocked

            • Creativity: You can unleash your creativity by designing your own rooms, outfits, hairstyles, accessories, and more. You can also customize your DreamHouse with different wallpapers, furniture, decorations, and pets. You can express your style and personality through your choices.
            • Imagination: You can use your imagination to create your own stories and adventures with Barbie and her friends. You can explore Malibu, go to the beach, the spa, the mall, the cinema, and more. You can also join Barbie in her career as a baker, a fashion designer, a veterinarian, or a pop star.
            • Social Skills: You can improve your social skills by interacting with Barbie's family and friends. You can chat with them, help them, play with them, and have fun with them. You can also invite your real friends to join you in the game and share your creations with them.
            | | H2: Comparison Table of Barbie Dreamhouse Adventures Mod APK (VIP Unlocked 2022) and Original Game |

            Comparison Table of Barbie Dreamhouse Adventures Mod APK (VIP Unlocked 2022) and Original Game

            To help you understand the difference between Barbie Dreamhouse Adventures Mod APK (VIP Unlocked 2022) and the original game, we have prepared a comparison table for you. Here it is:

            - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            Feature Barbie Dreamhouse Adventures Mod APK (VIP Unlocked 2022) Original Game
            VIP Unlocked Yes No
            No Ads Yes No
            No Root Required Yes No
            All Coins, Gems, Stickers, Outfits, Hairstyles, Accessories, Furniture, Wallpapers, Decorations, Pets, Characters, Locations, Activities, Mini-Games Unlocked -Yes -No
            -Safe and Secure -Yes -Yes
            -Easy Installation -Yes -Yes
            -Fun and Creative -Yes -Yes
            -Size -About 100 MB -About 100 MB
            -Rating -4.8/5 -4.3/5
            | | H2: Conclusion |

            Conclusion

            Barbie Dreamhouse Adventures Mod APK (VIP Unlocked 2022) is a great game for all the Barbie fans who want to enjoy the ultimate Barbie DreamHouse experience. It is a modded version of the original game that gives you access to all the VIP features and content without spending any money. You can design your own rooms, dress up in fashionable outfits, cook delicious recipes, dance with your friends, and have fun at pool parties. You can also explore Malibu with your pink convertible, meet Barbie's family and friends, and join them in exciting adventures. You can download and install this modded game from our website by following our simple instructions. It is safe, secure, easy, and fun to play. So, what are you waiting for? Download Barbie Dreamhouse Adventures Mod APK (VIP Unlocked 2022) now and start living your dream life as Barbie.

            | | H2: FAQs |

            FAQs

            Here are some of the frequently asked questions about Barbie Dreamhouse Adventures Mod APK (VIP Unlocked 2022):

            Q1: Is this modded game legal?

            A1: Yes, this modded game is legal and does not violate any laws or regulations. However, it is not affiliated with or endorsed by the official developers or publishers of the original game. It is a fan-made modification that is meant for entertainment purposes only.

            Q2: Is this modded game compatible with my device?

            A2: This modded game is compatible with any Android device that supports APK installation. However, it may not work on some devices due to different specifications or settings. If you encounter any problems while playing this modded game, you can contact us for help.

            Q3: How can I update this modded game?

            A3: This modded game is updated regularly to keep up with the latest version of the original game. You can check our website for the latest updates and download them from there. You don't need to uninstall the previous version before installing the new one.

            Q4: How can I share this modded game with my friends?

            A4: You can share this modded game with your friends by sending them the link to our website or the APK file directly. You can also invite them to join you in the game and have fun together.

            Q5: How can I support this modded game?

            A5: You can support this modded game by giving us your feedback, suggestions, or reviews. You can also share this modded game with other Barbie fans and spread the word about it. We appreciate your support and encouragement.

            197e85843d
            -
            -
            \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Hap.Hazard.v2.0-SLAM Version Download [PORTABLE].md b/spaces/tioseFevbu/cartoon-converter/scripts/Hap.Hazard.v2.0-SLAM Version Download [PORTABLE].md deleted file mode 100644 index 3711c7b727785d6839f6cc34506444491d2c7f86..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Hap.Hazard.v2.0-SLAM Version Download [PORTABLE].md +++ /dev/null @@ -1,17 +0,0 @@ -
            -

            Hap.Hazard.v2.0-SLAM Version: A New Way to Enjoy Music

            -

            If you are looking for a new and exciting way to listen to music, you might want to check out Hap.Hazard.v2.0-SLAM Version. This is a remix of the original Hap.Hazard.v2.0 song by CiofratPdempme, a talented producer and DJ from Italy. The SLAM Version adds more energy, bass and beats to the original track, making it perfect for dancing, partying or working out.

            -

            Hap.Hazard.v2.0-SLAM Version Download


            Download Filehttps://urlcod.com/2uHvFo



            -

            You can listen to Hap.Hazard.v2.0-SLAM Version for free on SoundCloud[^1^], where you can also download it for offline playback. The song has received positive feedback from listeners who praised its catchy melody, dynamic rhythm and high-quality production. Some even compared it to the works of famous artists like Daft Punk, Skrillex and Deadmau5.

            -

            So what are you waiting for? Head over to SoundCloud and give Hap.Hazard.v2.0-SLAM Version a try. You might just find your new favorite song.

            - -

            Hap.Hazard.v2.0-SLAM Version is not the only remix that CiofratPdempme has created. He has also remixed songs by other artists such as Drake, Rihanna, Ed Sheeran and more. You can find his remixes on his SoundCloud page, where he also uploads his original songs. He describes his music style as a mix of electro, house, trap and hip hop.

            -

            CiofratPdempme started making music when he was 15 years old, using his laptop and a software called FL Studio. He learned everything by himself, watching tutorials and experimenting with different sounds and effects. He says that music is his passion and his way of expressing himself. He hopes to inspire other people with his music and to reach a wider audience.

            -

            -

            If you like Hap.Hazard.v2.0-SLAM Version and want to support CiofratPdempme, you can follow him on SoundCloud, Instagram and Twitter. You can also share his music with your friends and family, or leave him a comment or a like. He appreciates any feedback and encouragement from his fans. He is always working on new projects and plans to release more remixes and original songs in the future.

            - -

            One of the reasons why Hap.Hazard.v2.0-SLAM Version is so popular is because it uses a technique called SLAM (Simultaneous Localization and Mapping). This technique allows the song to adapt to the environment and the listener's preferences, creating a unique and personalized experience. For example, the song can change its tempo, volume and pitch depending on the listener's mood, location and activity.

            -

            SLAM is also used in other fields such as robotics, navigation and augmented reality. It enables devices to map their surroundings and locate themselves within it, using sensors and algorithms. SLAM can help robots to move around safely and efficiently, or create immersive virtual environments for users. SLAM is considered to be one of the most challenging problems in computer vision and artificial intelligence.

            -

            Hap.Hazard.v2.0-SLAM Version is a testament to the power and potential of SLAM and music. It shows how technology can enhance creativity and entertainment, and how music can connect with people on a deeper level. Hap.Hazard.v2.0-SLAM Version is more than just a song, it is a musical adventure.

            e93f5a0c3f
            -
            -
            \ No newline at end of file diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/metadata/__init__.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/metadata/__init__.py deleted file mode 100644 index 8cd0fda68515b7095a90a53597422240c5cfc3ce..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/metadata/__init__.py +++ /dev/null @@ -1,105 +0,0 @@ -import contextlib -import functools -import os -import sys -from typing import TYPE_CHECKING, List, Optional, Type, cast - -from pip._internal.utils.misc import strtobool - -from .base import BaseDistribution, BaseEnvironment, FilesystemWheel, MemoryWheel, Wheel - -if TYPE_CHECKING: - from typing import Protocol -else: - Protocol = object - -__all__ = [ - "BaseDistribution", - "BaseEnvironment", - "FilesystemWheel", - "MemoryWheel", - "Wheel", - "get_default_environment", - "get_environment", - "get_wheel_distribution", - "select_backend", -] - - -def _should_use_importlib_metadata() -> bool: - """Whether to use the ``importlib.metadata`` or ``pkg_resources`` backend. - - By default, pip uses ``importlib.metadata`` on Python 3.11+, and - ``pkg_resourcess`` otherwise. This can be overridden by a couple of ways: - - * If environment variable ``_PIP_USE_IMPORTLIB_METADATA`` is set, it - dictates whether ``importlib.metadata`` is used, regardless of Python - version. - * On Python 3.11+, Python distributors can patch ``importlib.metadata`` - to add a global constant ``_PIP_USE_IMPORTLIB_METADATA = False``. This - makes pip use ``pkg_resources`` (unless the user set the aforementioned - environment variable to *True*). - """ - with contextlib.suppress(KeyError, ValueError): - return bool(strtobool(os.environ["_PIP_USE_IMPORTLIB_METADATA"])) - if sys.version_info < (3, 11): - return False - import importlib.metadata - - return bool(getattr(importlib.metadata, "_PIP_USE_IMPORTLIB_METADATA", True)) - - -class Backend(Protocol): - Distribution: Type[BaseDistribution] - Environment: Type[BaseEnvironment] - - -@functools.lru_cache(maxsize=None) -def select_backend() -> Backend: - if _should_use_importlib_metadata(): - from . import importlib - - return cast(Backend, importlib) - from . import pkg_resources - - return cast(Backend, pkg_resources) - - -def get_default_environment() -> BaseEnvironment: - """Get the default representation for the current environment. - - This returns an Environment instance from the chosen backend. The default - Environment instance should be built from ``sys.path`` and may use caching - to share instance state accorss calls. - """ - return select_backend().Environment.default() - - -def get_environment(paths: Optional[List[str]]) -> BaseEnvironment: - """Get a representation of the environment specified by ``paths``. - - This returns an Environment instance from the chosen backend based on the - given import paths. The backend must build a fresh instance representing - the state of installed distributions when this function is called. - """ - return select_backend().Environment.from_paths(paths) - - -def get_directory_distribution(directory: str) -> BaseDistribution: - """Get the distribution metadata representation in the specified directory. - - This returns a Distribution instance from the chosen backend based on - the given on-disk ``.dist-info`` directory. - """ - return select_backend().Distribution.from_directory(directory) - - -def get_wheel_distribution(wheel: Wheel, canonical_name: str) -> BaseDistribution: - """Get the representation of the specified wheel's distribution metadata. - - This returns a Distribution instance from the chosen backend based on - the given wheel's ``.dist-info`` directory. - - :param canonical_name: Normalized project name of the given wheel. - """ - return select_backend().Distribution.from_wheel(wheel, canonical_name) diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/chardet/big5prober.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/chardet/big5prober.py deleted file mode 100644 index e4dfa7aa02a96cb79a93427bcdab893d957763d0..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/chardet/big5prober.py +++ /dev/null @@ -1,47 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Communicator client code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .chardistribution import Big5DistributionAnalysis -from .codingstatemachine import CodingStateMachine -from .mbcharsetprober import MultiByteCharSetProber -from .mbcssm import BIG5_SM_MODEL - - -class Big5Prober(MultiByteCharSetProber): - def __init__(self): - super().__init__() - self.coding_sm = CodingStateMachine(BIG5_SM_MODEL) - self.distribution_analyzer = Big5DistributionAnalysis() - self.reset() - - @property - def charset_name(self): - return "Big5" - - @property - def language(self): - return "Chinese" diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/scope.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/scope.py deleted file mode 100644 index 6822b8ca5429db9785881dd30e3964a655a64a88..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/scope.py +++ /dev/null @@ -1,86 +0,0 @@ -from collections.abc import Mapping -from typing import TYPE_CHECKING, Any, Optional, Tuple - -from .highlighter import ReprHighlighter -from .panel import Panel -from .pretty import Pretty -from .table import Table -from .text import Text, TextType - -if TYPE_CHECKING: - from .console import ConsoleRenderable - - -def render_scope( - scope: "Mapping[str, Any]", - *, - title: Optional[TextType] = None, - sort_keys: bool = True, - indent_guides: bool = False, - max_length: Optional[int] = None, - max_string: Optional[int] = None, -) -> "ConsoleRenderable": - """Render python variables in a given scope. - - Args: - scope (Mapping): A mapping containing variable names and values. - title (str, optional): Optional title. Defaults to None. - sort_keys (bool, optional): Enable sorting of items. Defaults to True. - indent_guides (bool, optional): Enable indentaton guides. Defaults to False. - max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation. - Defaults to None. - max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None. - - Returns: - ConsoleRenderable: A renderable object. - """ - highlighter = ReprHighlighter() - items_table = Table.grid(padding=(0, 1), expand=False) - items_table.add_column(justify="right") - - def sort_items(item: Tuple[str, Any]) -> Tuple[bool, str]: - """Sort special variables first, then alphabetically.""" - key, _ = item - return (not key.startswith("__"), key.lower()) - - items = sorted(scope.items(), key=sort_items) if sort_keys else scope.items() - for key, value in items: - key_text = Text.assemble( - (key, "scope.key.special" if key.startswith("__") else "scope.key"), - (" =", "scope.equals"), - ) - items_table.add_row( - key_text, - Pretty( - value, - highlighter=highlighter, - indent_guides=indent_guides, - max_length=max_length, - max_string=max_string, - ), - ) - return Panel.fit( - items_table, - title=title, - border_style="scope.border", - padding=(0, 1), - ) - - -if __name__ == "__main__": # pragma: no cover - from pip._vendor.rich import print - - print() - - def test(foo: float, bar: float) -> None: - list_of_things = [1, 2, 3, None, 4, True, False, "Hello World"] - dict_of_things = { - "version": "1.1", - "method": "confirmFruitPurchase", - "params": [["apple", "orange", "mangoes", "pomelo"], 1.123], - "id": "194521489", - } - print(render_scope(locals(), title="[i]locals", sort_keys=False)) - - test(20.3423, 3.1427) - print() diff --git a/spaces/tomofi/MMOCR/configs/_base_/det_models/psenet_r50_fpnf.py b/spaces/tomofi/MMOCR/configs/_base_/det_models/psenet_r50_fpnf.py deleted file mode 100644 index a3aff0d1325d3b9e25b5ed095cea28d313f611a0..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/configs/_base_/det_models/psenet_r50_fpnf.py +++ /dev/null @@ -1,51 +0,0 @@ -model_poly = dict( - type='PSENet', - backbone=dict( - type='mmdet.ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=-1, - norm_cfg=dict(type='SyncBN', requires_grad=True), - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - norm_eval=True, - style='caffe'), - neck=dict( - type='FPNF', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - fusion_type='concat'), - bbox_head=dict( - type='PSEHead', - in_channels=[256], - out_channels=7, - loss=dict(type='PSELoss'), - postprocessor=dict(type='PSEPostprocessor', text_repr_type='poly')), - train_cfg=None, - test_cfg=None) - -model_quad = dict( - type='PSENet', - backbone=dict( - type='mmdet.ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=-1, - norm_cfg=dict(type='SyncBN', requires_grad=True), - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), - norm_eval=True, - style='caffe'), - neck=dict( - type='FPNF', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - fusion_type='concat'), - bbox_head=dict( - type='PSEHead', - in_channels=[256], - out_channels=7, - loss=dict(type='PSELoss'), - postprocessor=dict(type='PSEPostprocessor', text_repr_type='quad')), - train_cfg=None, - test_cfg=None) diff --git a/spaces/tomofi/MMOCR/mmocr/datasets/pipelines/textdet_targets/__init__.py b/spaces/tomofi/MMOCR/mmocr/datasets/pipelines/textdet_targets/__init__.py deleted file mode 100644 index 2662739aced091200ca4814f76b06da7529702ba..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/mmocr/datasets/pipelines/textdet_targets/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .base_textdet_targets import BaseTextDetTargets -from .dbnet_targets import DBNetTargets -from .drrg_targets import DRRGTargets -from .fcenet_targets import FCENetTargets -from .panet_targets import PANetTargets -from .psenet_targets import PSENetTargets -from .textsnake_targets import TextSnakeTargets - -__all__ = [ - 'BaseTextDetTargets', 'PANetTargets', 'PSENetTargets', 'DBNetTargets', - 'FCENetTargets', 'TextSnakeTargets', 'DRRGTargets' -] diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/legacy_1.x/faster_rcnn_r50_fpn_1x_coco_v1.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/legacy_1.x/faster_rcnn_r50_fpn_1x_coco_v1.py deleted file mode 100644 index fb2f2d1e13b8c97dbf5f785dadebcccf874ff7be..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/legacy_1.x/faster_rcnn_r50_fpn_1x_coco_v1.py +++ /dev/null @@ -1,37 +0,0 @@ -_base_ = [ - '../_base_/models/faster_rcnn_r50_fpn.py', - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -model = dict( - type='FasterRCNN', - pretrained='torchvision://resnet50', - rpn_head=dict( - type='RPNHead', - anchor_generator=dict( - type='LegacyAnchorGenerator', - center_offset=0.5, - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), - roi_head=dict( - type='StandardRoIHead', - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict( - type='RoIAlign', - output_size=7, - sampling_ratio=2, - aligned=False), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=dict( - bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), - # model training and testing settings - train_cfg=dict( - rpn_proposal=dict(max_per_img=2000), - rcnn=dict(assigner=dict(match_low_quality=True)))) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/tools/analysis_tools/analyze_results.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/tools/analysis_tools/analyze_results.py deleted file mode 100644 index fc6b4d9252178cb24a2266ac52aa77223e4f0d7a..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/tools/analysis_tools/analyze_results.py +++ /dev/null @@ -1,202 +0,0 @@ -import argparse -import os.path as osp - -import mmcv -import numpy as np -from mmcv import Config, DictAction - -from mmdet.core.evaluation import eval_map -from mmdet.core.visualization import imshow_gt_det_bboxes -from mmdet.datasets import build_dataset, get_loading_pipeline - - -def bbox_map_eval(det_result, annotation): - """Evaluate mAP of single image det result. - - Args: - det_result (list[list]): [[cls1_det, cls2_det, ...], ...]. - The outer list indicates images, and the inner list indicates - per-class detected bboxes. - annotation (dict): Ground truth annotations where keys of - annotations are: - - - bboxes: numpy array of shape (n, 4) - - labels: numpy array of shape (n, ) - - bboxes_ignore (optional): numpy array of shape (k, 4) - - labels_ignore (optional): numpy array of shape (k, ) - - Returns: - float: mAP - """ - - # use only bbox det result - if isinstance(det_result, tuple): - bbox_det_result = [det_result[0]] - else: - bbox_det_result = [det_result] - # mAP - iou_thrs = np.linspace( - .5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True) - mean_aps = [] - for thr in iou_thrs: - mean_ap, _ = eval_map( - bbox_det_result, [annotation], iou_thr=thr, logger='silent') - mean_aps.append(mean_ap) - return sum(mean_aps) / len(mean_aps) - - -class ResultVisualizer(object): - """Display and save evaluation results. - - Args: - show (bool): Whether to show the image. Default: True - wait_time (float): Value of waitKey param. Default: 0. - score_thr (float): Minimum score of bboxes to be shown. - Default: 0 - """ - - def __init__(self, show=False, wait_time=0, score_thr=0): - self.show = show - self.wait_time = wait_time - self.score_thr = score_thr - - def _save_image_gts_results(self, dataset, results, mAPs, out_dir=None): - mmcv.mkdir_or_exist(out_dir) - - for mAP_info in mAPs: - index, mAP = mAP_info - data_info = dataset.prepare_train_img(index) - - # calc save file path - filename = data_info['filename'] - if data_info['img_prefix'] is not None: - filename = osp.join(data_info['img_prefix'], filename) - else: - filename = data_info['filename'] - fname, name = osp.splitext(osp.basename(filename)) - save_filename = fname + '_' + str(round(mAP, 3)) + name - out_file = osp.join(out_dir, save_filename) - imshow_gt_det_bboxes( - data_info['img'], - data_info, - results[index], - dataset.CLASSES, - show=self.show, - score_thr=self.score_thr, - wait_time=self.wait_time, - out_file=out_file) - - def evaluate_and_show(self, - dataset, - results, - topk=20, - show_dir='work_dir', - eval_fn=None): - """Evaluate and show results. - - Args: - dataset (Dataset): A PyTorch dataset. - results (list): Det results from test results pkl file - topk (int): Number of the highest topk and - lowest topk after evaluation index sorting. Default: 20 - show_dir (str, optional): The filename to write the image. - Default: 'work_dir' - eval_fn (callable, optional): Eval function, Default: None - """ - - assert topk > 0 - if (topk * 2) > len(dataset): - topk = len(dataset) // 2 - - if eval_fn is None: - eval_fn = bbox_map_eval - else: - assert callable(eval_fn) - - prog_bar = mmcv.ProgressBar(len(results)) - _mAPs = {} - for i, (result, ) in enumerate(zip(results)): - # self.dataset[i] should not call directly - # because there is a risk of mismatch - data_info = dataset.prepare_train_img(i) - mAP = eval_fn(result, data_info['ann_info']) - _mAPs[i] = mAP - prog_bar.update() - - # descending select topk image - _mAPs = list(sorted(_mAPs.items(), key=lambda kv: kv[1])) - good_mAPs = _mAPs[-topk:] - bad_mAPs = _mAPs[:topk] - - good_dir = osp.abspath(osp.join(show_dir, 'good')) - bad_dir = osp.abspath(osp.join(show_dir, 'bad')) - self._save_image_gts_results(dataset, results, good_mAPs, good_dir) - self._save_image_gts_results(dataset, results, bad_mAPs, bad_dir) - - -def parse_args(): - parser = argparse.ArgumentParser( - description='MMDet eval image prediction result for each') - parser.add_argument('config', help='test config file path') - parser.add_argument( - 'prediction_path', help='prediction path where test pkl result') - parser.add_argument( - 'show_dir', help='directory where painted images will be saved') - parser.add_argument('--show', action='store_true', help='show results') - parser.add_argument( - '--wait-time', - type=float, - default=0, - help='the interval of show (s), 0 is block') - parser.add_argument( - '--topk', - default=20, - type=int, - help='saved Number of the highest topk ' - 'and lowest topk after index sorting') - parser.add_argument( - '--show-score-thr', - type=float, - default=0, - help='score threshold (default: 0.)') - parser.add_argument( - '--cfg-options', - nargs='+', - action=DictAction, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file. If the value to ' - 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' - 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' - 'Note that the quotation marks are necessary and that no white space ' - 'is allowed.') - args = parser.parse_args() - return args - - -def main(): - args = parse_args() - - mmcv.check_file_exist(args.prediction_path) - - cfg = Config.fromfile(args.config) - if args.cfg_options is not None: - cfg.merge_from_dict(args.cfg_options) - cfg.data.test.test_mode = True - # import modules from string list. - if cfg.get('custom_imports', None): - from mmcv.utils import import_modules_from_strings - import_modules_from_strings(**cfg['custom_imports']) - - cfg.data.test.pop('samples_per_gpu', 0) - cfg.data.test.pipeline = get_loading_pipeline(cfg.data.train.pipeline) - dataset = build_dataset(cfg.data.test) - outputs = mmcv.load(args.prediction_path) - - result_visualizer = ResultVisualizer(args.show, args.wait_time, - args.show_score_thr) - result_visualizer.evaluate_and_show( - dataset, outputs, topk=args.topk, show_dir=args.show_dir) - - -if __name__ == '__main__': - main() diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/tools/slurm_train.sh b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/tools/slurm_train.sh deleted file mode 100644 index b3feb3d9c7a6c33d82739cdf5ee10365673aaded..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/tools/slurm_train.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env bash - -set -x - -PARTITION=$1 -JOB_NAME=$2 -CONFIG=$3 -WORK_DIR=$4 -GPUS=${GPUS:-8} -GPUS_PER_NODE=${GPUS_PER_NODE:-8} -CPUS_PER_TASK=${CPUS_PER_TASK:-5} -SRUN_ARGS=${SRUN_ARGS:-""} -PY_ARGS=${@:5} - -PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ -srun -p ${PARTITION} \ - --job-name=${JOB_NAME} \ - --gres=gpu:${GPUS_PER_NODE} \ - --ntasks=${GPUS} \ - --ntasks-per-node=${GPUS_PER_NODE} \ - --cpus-per-task=${CPUS_PER_TASK} \ - --kill-on-bad-exit=1 \ - ${SRUN_ARGS} \ - python -u tools/train.py ${CONFIG} --work-dir=${WORK_DIR} --launcher="slurm" ${PY_ARGS} diff --git a/spaces/trttung1610/musicgen/scripts/resample_dataset.py b/spaces/trttung1610/musicgen/scripts/resample_dataset.py deleted file mode 100644 index af5288712b8d2cde2d9814c747275e69f6e970c8..0000000000000000000000000000000000000000 --- a/spaces/trttung1610/musicgen/scripts/resample_dataset.py +++ /dev/null @@ -1,207 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -"""Resampling script. -""" -import argparse -from pathlib import Path -import shutil -import typing as tp - -import submitit -import tqdm - -from audiocraft.data.audio import audio_read, audio_write -from audiocraft.data.audio_dataset import load_audio_meta, find_audio_files -from audiocraft.data.audio_utils import convert_audio -from audiocraft.environment import AudioCraftEnvironment - - -def read_txt_files(path: tp.Union[str, Path]): - with open(args.files_path) as f: - lines = [line.rstrip() for line in f] - print(f"Read {len(lines)} in .txt") - lines = [line for line in lines if Path(line).suffix not in ['.json', '.txt', '.csv']] - print(f"Filtered and keep {len(lines)} from .txt") - return lines - - -def read_egs_files(path: tp.Union[str, Path]): - path = Path(path) - if path.is_dir(): - if (path / 'data.jsonl').exists(): - path = path / 'data.jsonl' - elif (path / 'data.jsonl.gz').exists(): - path = path / 'data.jsonl.gz' - else: - raise ValueError("Don't know where to read metadata from in the dir. " - "Expecting either a data.jsonl or data.jsonl.gz file but none found.") - meta = load_audio_meta(path) - return [m.path for m in meta] - - -def process_dataset(args, n_shards: int, node_index: int, task_index: tp.Optional[int] = None): - if task_index is None: - env = submitit.JobEnvironment() - task_index = env.global_rank - shard_index = node_index * args.tasks_per_node + task_index - - if args.files_path is None: - lines = [m.path for m in find_audio_files(args.root_path, resolve=False, progress=True, workers=8)] - else: - files_path = Path(args.files_path) - if files_path.suffix == '.txt': - print(f"Reading file list from .txt file: {args.files_path}") - lines = read_txt_files(args.files_path) - else: - print(f"Reading file list from egs: {args.files_path}") - lines = read_egs_files(args.files_path) - - total_files = len(lines) - print( - f"Total of {total_files} processed with {n_shards} shards. " + - f"Current idx = {shard_index} -> {total_files // n_shards} files to process" - ) - for idx, line in tqdm.tqdm(enumerate(lines)): - - # skip if not part of this shard - if idx % n_shards != shard_index: - continue - - path = str(AudioCraftEnvironment.apply_dataset_mappers(line)) - root_path = str(args.root_path) - if not root_path.endswith('/'): - root_path += '/' - assert path.startswith(str(root_path)), \ - f"Mismatch between path and provided root: {path} VS {root_path}" - - try: - metadata_path = Path(path).with_suffix('.json') - out_path = args.out_path / path[len(root_path):] - out_metadata_path = out_path.with_suffix('.json') - out_done_token = out_path.with_suffix('.done') - - # don't reprocess existing files - if out_done_token.exists(): - continue - - print(idx, out_path, path) - mix, sr = audio_read(path) - mix_channels = args.channels if args.channels is not None and args.channels > 0 else mix.size(0) - # enforce simple stereo - out_channels = mix_channels - if out_channels > 2: - print(f"Mix has more than two channels: {out_channels}, enforcing 2 channels") - out_channels = 2 - out_sr = args.sample_rate if args.sample_rate is not None else sr - out_wav = convert_audio(mix, sr, out_sr, out_channels) - audio_write(out_path.with_suffix(''), out_wav, sample_rate=out_sr, - format=args.format, normalize=False, strategy='clip') - if metadata_path.exists(): - shutil.copy(metadata_path, out_metadata_path) - else: - print(f"No metadata found at {str(metadata_path)}") - out_done_token.touch() - except Exception as e: - print(f"Error processing file line: {line}, {e}") - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description="Resample dataset with SLURM.") - parser.add_argument( - "--log_root", - type=Path, - default=Path.home() / 'tmp' / 'resample_logs', - ) - parser.add_argument( - "--files_path", - type=Path, - help="List of files to process, either .txt (one file per line) or a jsonl[.gz].", - ) - parser.add_argument( - "--root_path", - type=Path, - required=True, - help="When rewriting paths, this will be the prefix to remove.", - ) - parser.add_argument( - "--out_path", - type=Path, - required=True, - help="When rewriting paths, `root_path` will be replaced by this.", - ) - parser.add_argument("--xp_name", type=str, default="shutterstock") - parser.add_argument( - "--nodes", - type=int, - default=4, - ) - parser.add_argument( - "--tasks_per_node", - type=int, - default=20, - ) - parser.add_argument( - "--cpus_per_task", - type=int, - default=4, - ) - parser.add_argument( - "--memory_gb", - type=int, - help="Memory in GB." - ) - parser.add_argument( - "--format", - type=str, - default="wav", - ) - parser.add_argument( - "--sample_rate", - type=int, - default=32000, - ) - parser.add_argument( - "--channels", - type=int, - ) - parser.add_argument( - "--partition", - default='learnfair', - ) - parser.add_argument("--qos") - parser.add_argument("--account") - parser.add_argument("--timeout", type=int, default=4320) - parser.add_argument('--debug', action='store_true', help='debug mode (local run)') - args = parser.parse_args() - n_shards = args.tasks_per_node * args.nodes - if args.files_path is None: - print("Warning: --files_path not provided, not recommended when processing more than 10k files.") - if args.debug: - print("Debugging mode") - process_dataset(args, n_shards=n_shards, node_index=0, task_index=0) - else: - - log_folder = Path(args.log_root) / args.xp_name / '%j' - print(f"Logging to: {log_folder}") - log_folder.parent.mkdir(parents=True, exist_ok=True) - executor = submitit.AutoExecutor(folder=str(log_folder)) - if args.qos: - executor.update_parameters(slurm_partition=args.partition, slurm_qos=args.qos, slurm_account=args.account) - else: - executor.update_parameters(slurm_partition=args.partition) - executor.update_parameters( - slurm_job_name=args.xp_name, timeout_min=args.timeout, - cpus_per_task=args.cpus_per_task, tasks_per_node=args.tasks_per_node, nodes=1) - if args.memory_gb: - executor.update_parameters(mem=f'{args.memory_gb}GB') - jobs = [] - with executor.batch(): - for node_index in range(args.nodes): - job = executor.submit(process_dataset, args, n_shards=n_shards, node_index=node_index) - jobs.append(job) - for job in jobs: - print(f"Waiting on job {job.job_id}") - job.results() diff --git a/spaces/trttung1610/musicgen/tests/losses/test_losses.py b/spaces/trttung1610/musicgen/tests/losses/test_losses.py deleted file mode 100644 index b6681e12c453dea5aeba738ab252d1923b7e0941..0000000000000000000000000000000000000000 --- a/spaces/trttung1610/musicgen/tests/losses/test_losses.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import random - -import torch - -from audiocraft.losses import ( - MelSpectrogramL1Loss, - MultiScaleMelSpectrogramLoss, - MRSTFTLoss, - SISNR, - STFTLoss, -) - - -def test_mel_l1_loss(): - N, C, T = 2, 2, random.randrange(1000, 100_000) - t1 = torch.randn(N, C, T) - t2 = torch.randn(N, C, T) - - mel_l1 = MelSpectrogramL1Loss(sample_rate=22_050) - loss = mel_l1(t1, t2) - loss_same = mel_l1(t1, t1) - - assert isinstance(loss, torch.Tensor) - assert isinstance(loss_same, torch.Tensor) - assert loss_same.item() == 0.0 - - -def test_msspec_loss(): - N, C, T = 2, 2, random.randrange(1000, 100_000) - t1 = torch.randn(N, C, T) - t2 = torch.randn(N, C, T) - - msspec = MultiScaleMelSpectrogramLoss(sample_rate=22_050) - loss = msspec(t1, t2) - loss_same = msspec(t1, t1) - - assert isinstance(loss, torch.Tensor) - assert isinstance(loss_same, torch.Tensor) - assert loss_same.item() == 0.0 - - -def test_mrstft_loss(): - N, C, T = 2, 2, random.randrange(1000, 100_000) - t1 = torch.randn(N, C, T) - t2 = torch.randn(N, C, T) - - mrstft = MRSTFTLoss() - loss = mrstft(t1, t2) - - assert isinstance(loss, torch.Tensor) - - -def test_sisnr_loss(): - N, C, T = 2, 2, random.randrange(1000, 100_000) - t1 = torch.randn(N, C, T) - t2 = torch.randn(N, C, T) - - sisnr = SISNR() - loss = sisnr(t1, t2) - - assert isinstance(loss, torch.Tensor) - - -def test_stft_loss(): - N, C, T = 2, 2, random.randrange(1000, 100_000) - t1 = torch.randn(N, C, T) - t2 = torch.randn(N, C, T) - - mrstft = STFTLoss() - loss = mrstft(t1, t2) - - assert isinstance(loss, torch.Tensor) diff --git a/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/model_utils_torch/layers.py b/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/model_utils_torch/layers.py deleted file mode 100644 index ad2366c67e537eeaaf163f82aba10e6e62a99cd7..0000000000000000000000000000000000000000 --- a/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/model_utils_torch/layers.py +++ /dev/null @@ -1,182 +0,0 @@ -''' -预定义组合层,目的为便于使用 - -尽可能使用jit编译,如果jit有困难,则果断不使用jit - -''' - -import torch -import torch.jit - -import torch.nn as nn -import torch.nn.functional as F -import numpy as np -import math -from typing import Iterable as _Iterable -from typing import Callable as _Callable - -try: - from . import ops - from . import utils - from .more_layers import * -except (ModuleNotFoundError, ImportError): - import ops - import utils - from more_layers import * - -''' -注意写 torch.jit.script 时需要手动添加非 Tensor 参数的注释 -''' - - -class Interpolate(torch.jit.ScriptModule): - ''' - 与Upsample层等价 - ''' - __constants__ = ['size', 'scale_factor', 'mode', 'align_corners', 'name', 'recompute_scale_factor'] - - def __init__(self, size=None, scale_factor=None, mode: str='nearest', align_corners=None, recompute_scale_factor=None, antialias=False) -> None: - super().__init__() - self.name = type(self).__name__ - self.size = size - if isinstance(scale_factor, tuple): - self.scale_factor = tuple(float(factor) for factor in scale_factor) - else: - self.scale_factor = float(scale_factor) if scale_factor else None - self.mode = mode - self.align_corners = align_corners - self.recompute_scale_factor = recompute_scale_factor - self.antialias = antialias - - @torch.jit.script_method - def forward(self, input: torch.Tensor) -> torch.Tensor: - return F.interpolate(input, self.size, self.scale_factor, self.mode, self.align_corners, recompute_scale_factor=self.recompute_scale_factor, antialias=self.antialias) - - def extra_repr(self) -> str: - if self.scale_factor is not None: - info = 'scale_factor=' + str(self.scale_factor) - else: - info = 'size=' + str(self.size) - info += ', mode=' + self.mode - return info - - -# class Upsample(torch.jit.ScriptModule): -# __constants__ = ['size', 'scale_factor', 'mode', 'align_corners', 'name'] -# -# def __init__(self, size=None, scale_factor=None, mode='bilinear', align_corners=None): -# super().__init__() -# -# # scale_factor 不允许是整数,有点坑。。 -# if size is None: -# if isinstance(scale_factor, _Iterable): -# scale_factor = tuple([float(i) for i in scale_factor]) -# else: -# scale_factor = float(scale_factor) -# -# self.size = size -# self.scale_factor = scale_factor -# self.mode = mode -# self.align_corners = align_corners -# -# @torch.jit.script_method -# def forward(self, x: torch.Tensor): -# return F.interpolate(x, self.size, self.scale_factor, self.mode, self.align_corners) - - -Upsample = Interpolate - - -class UpsampleConcat(torch.jit.ScriptModule): - __constants__ = ['method', 'align_corners'] - - def __init__(self, method='bilinear', align_corners=None): - super().__init__() - self.method = method - self.align_corners = align_corners - - @torch.jit.script_method - def forward(self, x, shortpoint): - shape = shortpoint.shape - x = F.interpolate(x, (shape[2], shape[3]), mode=self.method, align_corners=self.align_corners) - x = torch.cat((x, shortpoint), 1) - return x - - -class LinearGroup(torch.jit.ScriptModule): - __constants__ = ['groups', 'use_bias'] - - def __init__(self, in_feat, out_feat, groups, bias=True): - super().__init__() - self.groups = groups - self.use_bias = bias - in_feat_g = in_feat // groups - out_feat_g = out_feat // groups - - assert in_feat_g * groups == in_feat, 'Found in_feat_g * groups != in_feat' - assert out_feat_g * groups == out_feat, 'Found out_feat_g * groups != out_feat' - - self.weight = nn.Parameter(torch.zeros(groups, out_feat_g, in_feat_g), True) - nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) - - if bias: - self.bias = nn.Parameter(torch.zeros(1, out_feat), True) - else: - self.register_buffer('bias', torch.zeros(0)) - - @torch.jit.script_method - def forward(self, x): - ys = torch.chunk(x, self.groups, -1) - out_ys = [] - for i in range(self.groups): - out_ys.append(F.linear(ys[i], self.weight[i])) - y = torch.cat(out_ys, -1) - if self.use_bias: - y = y + self.bias - return y - - -class AdaptiveGemPool(torch.jit.ScriptModule): - __constants__ = ['dim', 'eps', 'keepdim'] - - def __init__(self, dim=(2, 3), p=3, eps=1e-6, keepdim=False): - super().__init__() - self.dim = dim - self.eps = eps - self.keepdim = keepdim - self.p = nn.Parameter(torch.ones(1) * p) - - @torch.jit.script_method - def forward(self, x): - return x.clamp(min=self.eps).pow(self.p).mean(self.dim, keepdim=self.keepdim).pow(1. / self.p) - - -class Reshape(torch.jit.ScriptModule): - __constants__ = ['shape'] - - def __init__(self, new_shape): - super().__init__() - self.shape = tuple(int(i) for i in new_shape) - - @torch.jit.script_method - def forward(self, x): - return x.reshape(self.shape) - - def extra_repr(self): - return "{shape}".format(**self.__dict__) - - -class InstanceReshape(torch.jit.ScriptModule): - __constants__ = ['shape'] - - def __init__(self, new_shape): - super().__init__() - self.shape = tuple(int(i) for i in new_shape) - - @torch.jit.script_method - def forward(self, x): - new_shape = (x.shape[0],) + self.shape - return x.reshape(new_shape) - - def extra_repr(self): - return "{shape}".format(**self.__dict__) diff --git a/spaces/uchuukaizoku/CharacterClassifier/README.md b/spaces/uchuukaizoku/CharacterClassifier/README.md deleted file mode 100644 index d4036ebb611c01b74790d0bab15b59f20cf63e11..0000000000000000000000000000000000000000 --- a/spaces/uchuukaizoku/CharacterClassifier/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: CharacterClassifier -emoji: ⚡ -colorFrom: green -colorTo: gray -sdk: gradio -sdk_version: 3.24.1 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ulysses115/vits-models/text/cleaners.py b/spaces/ulysses115/vits-models/text/cleaners.py deleted file mode 100644 index d26581deb399609163518054718ad80ecca5d934..0000000000000000000000000000000000000000 --- a/spaces/ulysses115/vits-models/text/cleaners.py +++ /dev/null @@ -1,475 +0,0 @@ -""" from https://github.com/keithito/tacotron """ - -''' -Cleaners are transformations that run over the input text at both training and eval time. - -Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" -hyperparameter. Some cleaners are English-specific. You'll typically want to use: - 1. "english_cleaners" for English text - 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using - the Unidecode library (https://pypi.python.org/pypi/Unidecode) - 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update - the symbols in symbols.py to match your data). -''' - -import re -from unidecode import unidecode -import pyopenjtalk -from jamo import h2j, j2hcj -from pypinyin import lazy_pinyin, BOPOMOFO -import jieba, cn2an - - -# This is a list of Korean classifiers preceded by pure Korean numerals. -_korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통' - -# Regular expression matching whitespace: -_whitespace_re = re.compile(r'\s+') - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile(r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile(r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# List of (regular expression, replacement) pairs for abbreviations: -_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ - ('mrs', 'misess'), - ('mr', 'mister'), - ('dr', 'doctor'), - ('st', 'saint'), - ('co', 'company'), - ('jr', 'junior'), - ('maj', 'major'), - ('gen', 'general'), - ('drs', 'doctors'), - ('rev', 'reverend'), - ('lt', 'lieutenant'), - ('hon', 'honorable'), - ('sgt', 'sergeant'), - ('capt', 'captain'), - ('esq', 'esquire'), - ('ltd', 'limited'), - ('col', 'colonel'), - ('ft', 'fort'), -]] - -# List of (hangul, hangul divided) pairs: -_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄳ', 'ㄱㅅ'), - ('ㄵ', 'ㄴㅈ'), - ('ㄶ', 'ㄴㅎ'), - ('ㄺ', 'ㄹㄱ'), - ('ㄻ', 'ㄹㅁ'), - ('ㄼ', 'ㄹㅂ'), - ('ㄽ', 'ㄹㅅ'), - ('ㄾ', 'ㄹㅌ'), - ('ㄿ', 'ㄹㅍ'), - ('ㅀ', 'ㄹㅎ'), - ('ㅄ', 'ㅂㅅ'), - ('ㅘ', 'ㅗㅏ'), - ('ㅙ', 'ㅗㅐ'), - ('ㅚ', 'ㅗㅣ'), - ('ㅝ', 'ㅜㅓ'), - ('ㅞ', 'ㅜㅔ'), - ('ㅟ', 'ㅜㅣ'), - ('ㅢ', 'ㅡㅣ'), - ('ㅑ', 'ㅣㅏ'), - ('ㅒ', 'ㅣㅐ'), - ('ㅕ', 'ㅣㅓ'), - ('ㅖ', 'ㅣㅔ'), - ('ㅛ', 'ㅣㅗ'), - ('ㅠ', 'ㅣㅜ') -]] - -# List of (Latin alphabet, hangul) pairs: -_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', '에이'), - ('b', '비'), - ('c', '시'), - ('d', '디'), - ('e', '이'), - ('f', '에프'), - ('g', '지'), - ('h', '에이치'), - ('i', '아이'), - ('j', '제이'), - ('k', '케이'), - ('l', '엘'), - ('m', '엠'), - ('n', '엔'), - ('o', '오'), - ('p', '피'), - ('q', '큐'), - ('r', '아르'), - ('s', '에스'), - ('t', '티'), - ('u', '유'), - ('v', '브이'), - ('w', '더블유'), - ('x', '엑스'), - ('y', '와이'), - ('z', '제트') -]] - -# List of (Latin alphabet, bopomofo) pairs: -_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', 'ㄟˉ'), - ('b', 'ㄅㄧˋ'), - ('c', 'ㄙㄧˉ'), - ('d', 'ㄉㄧˋ'), - ('e', 'ㄧˋ'), - ('f', 'ㄝˊㄈㄨˋ'), - ('g', 'ㄐㄧˋ'), - ('h', 'ㄝˇㄑㄩˋ'), - ('i', 'ㄞˋ'), - ('j', 'ㄐㄟˋ'), - ('k', 'ㄎㄟˋ'), - ('l', 'ㄝˊㄛˋ'), - ('m', 'ㄝˊㄇㄨˋ'), - ('n', 'ㄣˉ'), - ('o', 'ㄡˉ'), - ('p', 'ㄆㄧˉ'), - ('q', 'ㄎㄧㄡˉ'), - ('r', 'ㄚˋ'), - ('s', 'ㄝˊㄙˋ'), - ('t', 'ㄊㄧˋ'), - ('u', 'ㄧㄡˉ'), - ('v', 'ㄨㄧˉ'), - ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'), - ('x', 'ㄝˉㄎㄨˋㄙˋ'), - ('y', 'ㄨㄞˋ'), - ('z', 'ㄗㄟˋ') -]] - - -# List of (bopomofo, romaji) pairs: -_bopomofo_to_romaji = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('ㄅㄛ', 'p⁼wo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p⁼'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't⁼'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k⁼'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'h'), - ('ㄐ', 'ʧ⁼'), - ('ㄑ', 'ʧʰ'), - ('ㄒ', 'ʃ'), - ('ㄓ', 'ʦ`⁼'), - ('ㄔ', 'ʦ`ʰ'), - ('ㄕ', 's`'), - ('ㄖ', 'ɹ`'), - ('ㄗ', 'ʦ⁼'), - ('ㄘ', 'ʦʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ə'), - ('ㄝ', 'e'), - ('ㄞ', 'ai'), - ('ㄟ', 'ei'), - ('ㄠ', 'au'), - ('ㄡ', 'ou'), - ('ㄧㄢ', 'yeNN'), - ('ㄢ', 'aNN'), - ('ㄧㄣ', 'iNN'), - ('ㄣ', 'əNN'), - ('ㄤ', 'aNg'), - ('ㄧㄥ', 'iNg'), - ('ㄨㄥ', 'uNg'), - ('ㄩㄥ', 'yuNg'), - ('ㄥ', 'əNg'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'ɥ'), - ('ˉ', '→'), - ('ˊ', '↑'), - ('ˇ', '↓↑'), - ('ˋ', '↓'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - - -def expand_abbreviations(text): - for regex, replacement in _abbreviations: - text = re.sub(regex, replacement, text) - return text - - -def lowercase(text): - return text.lower() - - -def collapse_whitespace(text): - return re.sub(_whitespace_re, ' ', text) - - -def convert_to_ascii(text): - return unidecode(text) - - -def japanese_to_romaji_with_accent(text): - '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = '' - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - if text!='': - text+=' ' - labels = pyopenjtalk.extract_fullcontext(sentence) - for n, label in enumerate(labels): - phoneme = re.search(r'\-([^\+]*)\+', label).group(1) - if phoneme not in ['sil','pau']: - text += phoneme.replace('ch','ʧ').replace('sh','ʃ').replace('cl','Q') - else: - continue - n_moras = int(re.search(r'/F:(\d+)_', label).group(1)) - a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1)) - a2 = int(re.search(r"\+(\d+)\+", label).group(1)) - a3 = int(re.search(r"\+(\d+)/", label).group(1)) - if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil','pau']: - a2_next=-1 - else: - a2_next = int(re.search(r"\+(\d+)\+", labels[n + 1]).group(1)) - # Accent phrase boundary - if a3 == 1 and a2_next == 1: - text += ' ' - # Falling - elif a1 == 0 and a2_next == a2 + 1 and a2 != n_moras: - text += '↓' - # Rising - elif a2 == 1 and a2_next == 2: - text += '↑' - if iBondageCafe - The Adventures Of O-girl Trapped In Time.28l

            DOWNLOAD →→→ https://urlcod.com/2uyVJa



            - - aaccfb2cb3
            -
            -
            -

            diff --git a/spaces/vanessbut/tldr_keywords/utils/__init__.py b/spaces/vanessbut/tldr_keywords/utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/exp/upernet_global_small/test.sh b/spaces/vumichien/canvas_controlnet/annotator/uniformer/exp/upernet_global_small/test.sh deleted file mode 100644 index d9a85e7a0d3b7c96b060f473d41254b37a382fcb..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/exp/upernet_global_small/test.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env bash - -work_path=$(dirname $0) -PYTHONPATH="$(dirname $0)/../../":$PYTHONPATH \ -python -m torch.distributed.launch --nproc_per_node=8 \ - tools/test.py ${work_path}/test_config_h32.py \ - ${work_path}/ckpt/latest.pth \ - --launcher pytorch \ - --eval mIoU \ - 2>&1 | tee -a ${work_path}/log.txt diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/ops/knn.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/ops/knn.py deleted file mode 100644 index f335785036669fc19239825b0aae6dde3f73bf92..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/ops/knn.py +++ /dev/null @@ -1,77 +0,0 @@ -import torch -from torch.autograd import Function - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext('_ext', ['knn_forward']) - - -class KNN(Function): - r"""KNN (CUDA) based on heap data structure. - Modified from `PAConv `_. - - Find k-nearest points. - """ - - @staticmethod - def forward(ctx, - k: int, - xyz: torch.Tensor, - center_xyz: torch.Tensor = None, - transposed: bool = False) -> torch.Tensor: - """ - Args: - k (int): number of nearest neighbors. - xyz (Tensor): (B, N, 3) if transposed == False, else (B, 3, N). - xyz coordinates of the features. - center_xyz (Tensor, optional): (B, npoint, 3) if transposed == - False, else (B, 3, npoint). centers of the knn query. - Default: None. - transposed (bool, optional): whether the input tensors are - transposed. Should not explicitly use this keyword when - calling knn (=KNN.apply), just add the fourth param. - Default: False. - - Returns: - Tensor: (B, k, npoint) tensor with the indices of - the features that form k-nearest neighbours. - """ - assert (k > 0) & (k < 100), 'k should be in range(0, 100)' - - if center_xyz is None: - center_xyz = xyz - - if transposed: - xyz = xyz.transpose(2, 1).contiguous() - center_xyz = center_xyz.transpose(2, 1).contiguous() - - assert xyz.is_contiguous() # [B, N, 3] - assert center_xyz.is_contiguous() # [B, npoint, 3] - - center_xyz_device = center_xyz.get_device() - assert center_xyz_device == xyz.get_device(), \ - 'center_xyz and xyz should be put on the same device' - if torch.cuda.current_device() != center_xyz_device: - torch.cuda.set_device(center_xyz_device) - - B, npoint, _ = center_xyz.shape - N = xyz.shape[1] - - idx = center_xyz.new_zeros((B, npoint, k)).int() - dist2 = center_xyz.new_zeros((B, npoint, k)).float() - - ext_module.knn_forward( - xyz, center_xyz, idx, dist2, b=B, n=N, m=npoint, nsample=k) - # idx shape to [B, k, npoint] - idx = idx.transpose(2, 1).contiguous() - if torch.__version__ != 'parrots': - ctx.mark_non_differentiable(idx) - return idx - - @staticmethod - def backward(ctx, a=None): - return None, None, None - - -knn = KNN.apply diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/runner/hooks/logger/pavi.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/runner/hooks/logger/pavi.py deleted file mode 100644 index 1dcf146d8163aff1363e9764999b0a74d674a595..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/runner/hooks/logger/pavi.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import json -import os -import os.path as osp - -import torch -import yaml - -import annotator.uniformer.mmcv as mmcv -from ....parallel.utils import is_module_wrapper -from ...dist_utils import master_only -from ..hook import HOOKS -from .base import LoggerHook - - -@HOOKS.register_module() -class PaviLoggerHook(LoggerHook): - - def __init__(self, - init_kwargs=None, - add_graph=False, - add_last_ckpt=False, - interval=10, - ignore_last=True, - reset_flag=False, - by_epoch=True, - img_key='img_info'): - super(PaviLoggerHook, self).__init__(interval, ignore_last, reset_flag, - by_epoch) - self.init_kwargs = init_kwargs - self.add_graph = add_graph - self.add_last_ckpt = add_last_ckpt - self.img_key = img_key - - @master_only - def before_run(self, runner): - super(PaviLoggerHook, self).before_run(runner) - try: - from pavi import SummaryWriter - except ImportError: - raise ImportError('Please run "pip install pavi" to install pavi.') - - self.run_name = runner.work_dir.split('/')[-1] - - if not self.init_kwargs: - self.init_kwargs = dict() - self.init_kwargs['name'] = self.run_name - self.init_kwargs['model'] = runner._model_name - if runner.meta is not None: - if 'config_dict' in runner.meta: - config_dict = runner.meta['config_dict'] - assert isinstance( - config_dict, - dict), ('meta["config_dict"] has to be of a dict, ' - f'but got {type(config_dict)}') - elif 'config_file' in runner.meta: - config_file = runner.meta['config_file'] - config_dict = dict(mmcv.Config.fromfile(config_file)) - else: - config_dict = None - if config_dict is not None: - # 'max_.*iter' is parsed in pavi sdk as the maximum iterations - # to properly set up the progress bar. - config_dict = config_dict.copy() - config_dict.setdefault('max_iter', runner.max_iters) - # non-serializable values are first converted in - # mmcv.dump to json - config_dict = json.loads( - mmcv.dump(config_dict, file_format='json')) - session_text = yaml.dump(config_dict) - self.init_kwargs['session_text'] = session_text - self.writer = SummaryWriter(**self.init_kwargs) - - def get_step(self, runner): - """Get the total training step/epoch.""" - if self.get_mode(runner) == 'val' and self.by_epoch: - return self.get_epoch(runner) - else: - return self.get_iter(runner) - - @master_only - def log(self, runner): - tags = self.get_loggable_tags(runner, add_mode=False) - if tags: - self.writer.add_scalars( - self.get_mode(runner), tags, self.get_step(runner)) - - @master_only - def after_run(self, runner): - if self.add_last_ckpt: - ckpt_path = osp.join(runner.work_dir, 'latest.pth') - if osp.islink(ckpt_path): - ckpt_path = osp.join(runner.work_dir, os.readlink(ckpt_path)) - - if osp.isfile(ckpt_path): - # runner.epoch += 1 has been done before `after_run`. - iteration = runner.epoch if self.by_epoch else runner.iter - return self.writer.add_snapshot_file( - tag=self.run_name, - snapshot_file_path=ckpt_path, - iteration=iteration) - - # flush the buffer and send a task ending signal to Pavi - self.writer.close() - - @master_only - def before_epoch(self, runner): - if runner.epoch == 0 and self.add_graph: - if is_module_wrapper(runner.model): - _model = runner.model.module - else: - _model = runner.model - device = next(_model.parameters()).device - data = next(iter(runner.data_loader)) - image = data[self.img_key][0:1].to(device) - with torch.no_grad(): - self.writer.add_graph(_model, image) diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmseg/models/decode_heads/nl_head.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmseg/models/decode_heads/nl_head.py deleted file mode 100644 index 3eee424199e6aa363b564e2a3340a070db04db86..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmseg/models/decode_heads/nl_head.py +++ /dev/null @@ -1,49 +0,0 @@ -import torch -from annotator.uniformer.mmcv.cnn import NonLocal2d - -from ..builder import HEADS -from .fcn_head import FCNHead - - -@HEADS.register_module() -class NLHead(FCNHead): - """Non-local Neural Networks. - - This head is the implementation of `NLNet - `_. - - Args: - reduction (int): Reduction factor of projection transform. Default: 2. - use_scale (bool): Whether to scale pairwise_weight by - sqrt(1/inter_channels). Default: True. - mode (str): The nonlocal mode. Options are 'embedded_gaussian', - 'dot_product'. Default: 'embedded_gaussian.'. - """ - - def __init__(self, - reduction=2, - use_scale=True, - mode='embedded_gaussian', - **kwargs): - super(NLHead, self).__init__(num_convs=2, **kwargs) - self.reduction = reduction - self.use_scale = use_scale - self.mode = mode - self.nl_block = NonLocal2d( - in_channels=self.channels, - reduction=self.reduction, - use_scale=self.use_scale, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - mode=self.mode) - - def forward(self, inputs): - """Forward function.""" - x = self._transform_inputs(inputs) - output = self.convs[0](x) - output = self.nl_block(output) - output = self.convs[1](output) - if self.concat_input: - output = self.conv_cat(torch.cat([x, output], dim=1)) - output = self.cls_seg(output) - return output diff --git a/spaces/wwwwwwww2/bingo/src/lib/hooks/use-bing.ts b/spaces/wwwwwwww2/bingo/src/lib/hooks/use-bing.ts deleted file mode 100644 index dcdb1667ced0cba299b0825c0e91c4732411308c..0000000000000000000000000000000000000000 --- a/spaces/wwwwwwww2/bingo/src/lib/hooks/use-bing.ts +++ /dev/null @@ -1,173 +0,0 @@ -'use client' - -import { useState, useCallback, useEffect, useMemo } from 'react' -import { useAtom, useAtomValue } from 'jotai' -import { chatFamily, bingConversationStyleAtom, GreetMessages, hashAtom, voiceAtom } from '@/state' -import { setConversationMessages } from './chat-history' -import { ChatMessageModel, BotId, FileItem } from '@/lib/bots/bing/types' -import { nanoid } from '../utils' -import { TTS } from '../bots/bing/tts' - -export function useBing(botId: BotId = 'bing') { - const chatAtom = useMemo(() => chatFamily({ botId, page: 'singleton' }), [botId]) - const [enableTTS] = useAtom(voiceAtom) - const speaker = useMemo(() => new TTS(), []) - const [hash, setHash] = useAtom(hashAtom) - const bingConversationStyle = useAtomValue(bingConversationStyleAtom) - const [chatState, setChatState] = useAtom(chatAtom) - const [input, setInput] = useState('') - const [attachmentList, setAttachmentList] = useState([]) - - const updateMessage = useCallback( - (messageId: string, updater: (message: ChatMessageModel) => void) => { - setChatState((draft) => { - const message = draft.messages.find((m) => m.id === messageId) - if (message) { - updater(message) - } - }) - }, - [setChatState], - ) - - const sendMessage = useCallback( - async (input: string, options = {}) => { - const botMessageId = nanoid() - const imageUrl = attachmentList?.[0]?.status === 'loaded' ? attachmentList[0].url : undefined - setChatState((draft) => { - const text = imageUrl ? `${input}\n\n![image](${imageUrl})` : input - draft.messages.push({ id: nanoid(), text, author: 'user' }, { id: botMessageId, text: '', author: 'bot' }) - setAttachmentList([]) - }) - const abortController = new AbortController() - setChatState((draft) => { - draft.generatingMessageId = botMessageId - draft.abortController = abortController - }) - speaker.reset() - await chatState.bot.sendMessage({ - prompt: input, - imageUrl: /\?bcid=([^&]+)/.test(imageUrl ?? '') ? `https://www.bing.com/images/blob?bcid=${RegExp.$1}` : imageUrl, - options: { - ...options, - bingConversationStyle, - }, - signal: abortController.signal, - onEvent(event) { - if (event.type === 'UPDATE_ANSWER') { - updateMessage(botMessageId, (message) => { - if (event.data.text.length > message.text.length) { - message.text = event.data.text - } - - if (event.data.spokenText && enableTTS) { - speaker.speak(event.data.spokenText) - } - - message.throttling = event.data.throttling || message.throttling - message.sourceAttributions = event.data.sourceAttributions || message.sourceAttributions - message.suggestedResponses = event.data.suggestedResponses || message.suggestedResponses - }) - } else if (event.type === 'ERROR') { - updateMessage(botMessageId, (message) => { - message.error = event.error - }) - setChatState((draft) => { - draft.abortController = undefined - draft.generatingMessageId = '' - }) - } else if (event.type === 'DONE') { - setChatState((draft) => { - draft.abortController = undefined - draft.generatingMessageId = '' - }) - } - }, - }) - }, - [botId, attachmentList, chatState.bot, setChatState, updateMessage], - ) - - const uploadImage = useCallback(async (imgUrl: string) => { - setAttachmentList([{ url: imgUrl, status: 'loading' }]) - const response = await chatState.bot.uploadImage(imgUrl, bingConversationStyle) - if (response?.blobId) { - setAttachmentList([{ url: `/api/blob?bcid=${response.blobId}`, status: 'loaded' }]) - } else { - setAttachmentList([{ url: imgUrl, status: 'error' }]) - } - }, [chatState.bot]) - - const resetConversation = useCallback(() => { - chatState.bot.resetConversation() - speaker.abort() - setChatState((draft) => { - draft.abortController = undefined - draft.generatingMessageId = '' - draft.messages = [{ author: 'bot', text: GreetMessages[Math.floor(GreetMessages.length * Math.random())], id: nanoid() }] - draft.conversationId = nanoid() - }) - }, [chatState.bot, setChatState]) - - const stopGenerating = useCallback(() => { - chatState.abortController?.abort() - if (chatState.generatingMessageId) { - updateMessage(chatState.generatingMessageId, (message) => { - if (!message.text && !message.error) { - message.text = 'Cancelled' - } - }) - } - setChatState((draft) => { - draft.generatingMessageId = '' - }) - }, [chatState.abortController, chatState.generatingMessageId, setChatState, updateMessage]) - - useEffect(() => { - if (chatState.messages.length) { - setConversationMessages(botId, chatState.conversationId, chatState.messages) - } - }, [botId, chatState.conversationId, chatState.messages]) - - useEffect(() => { - if (hash === 'reset') { - resetConversation() - setHash('') - } - }, [hash, setHash]) - - const chat = useMemo( - () => ({ - botId, - bot: chatState.bot, - isSpeaking: speaker.isSpeaking, - messages: chatState.messages, - sendMessage, - setInput, - input, - resetConversation, - generating: !!chatState.generatingMessageId, - stopGenerating, - uploadImage, - setAttachmentList, - attachmentList, - }), - [ - botId, - bingConversationStyle, - chatState.bot, - chatState.generatingMessageId, - chatState.messages, - speaker.isSpeaking, - setInput, - input, - setAttachmentList, - attachmentList, - resetConversation, - sendMessage, - stopGenerating, - ], - ) - - return chat -} diff --git a/spaces/xinyu1205/recognize-anything/GroundingDINO/groundingdino/models/GroundingDINO/fuse_modules.py b/spaces/xinyu1205/recognize-anything/GroundingDINO/groundingdino/models/GroundingDINO/fuse_modules.py deleted file mode 100644 index 2753b3ddee43c7a9fe28d1824db5d786e7e1ad59..0000000000000000000000000000000000000000 --- a/spaces/xinyu1205/recognize-anything/GroundingDINO/groundingdino/models/GroundingDINO/fuse_modules.py +++ /dev/null @@ -1,297 +0,0 @@ -# ------------------------------------------------------------------------ -# Grounding DINO -# url: https://github.com/IDEA-Research/GroundingDINO -# Copyright (c) 2023 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ - -import torch -import torch.nn as nn -import torch.nn.functional as F -from timm.models.layers import DropPath - - -class FeatureResizer(nn.Module): - """ - This class takes as input a set of embeddings of dimension C1 and outputs a set of - embedding of dimension C2, after a linear transformation, dropout and normalization (LN). - """ - - def __init__(self, input_feat_size, output_feat_size, dropout, do_ln=True): - super().__init__() - self.do_ln = do_ln - # Object feature encoding - self.fc = nn.Linear(input_feat_size, output_feat_size, bias=True) - self.layer_norm = nn.LayerNorm(output_feat_size, eps=1e-12) - self.dropout = nn.Dropout(dropout) - - def forward(self, encoder_features): - x = self.fc(encoder_features) - if self.do_ln: - x = self.layer_norm(x) - output = self.dropout(x) - return output - - -def l1norm(X, dim, eps=1e-8): - """L1-normalize columns of X""" - norm = torch.abs(X).sum(dim=dim, keepdim=True) + eps - X = torch.div(X, norm) - return X - - -def l2norm(X, dim, eps=1e-8): - """L2-normalize columns of X""" - norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps - X = torch.div(X, norm) - return X - - -def func_attention(query, context, smooth=1, raw_feature_norm="softmax", eps=1e-8): - """ - query: (n_context, queryL, d) - context: (n_context, sourceL, d) - """ - batch_size_q, queryL = query.size(0), query.size(1) - batch_size, sourceL = context.size(0), context.size(1) - - # Get attention - # --> (batch, d, queryL) - queryT = torch.transpose(query, 1, 2) - - # (batch, sourceL, d)(batch, d, queryL) - # --> (batch, sourceL, queryL) - attn = torch.bmm(context, queryT) - if raw_feature_norm == "softmax": - # --> (batch*sourceL, queryL) - attn = attn.view(batch_size * sourceL, queryL) - attn = nn.Softmax()(attn) - # --> (batch, sourceL, queryL) - attn = attn.view(batch_size, sourceL, queryL) - elif raw_feature_norm == "l2norm": - attn = l2norm(attn, 2) - elif raw_feature_norm == "clipped_l2norm": - attn = nn.LeakyReLU(0.1)(attn) - attn = l2norm(attn, 2) - else: - raise ValueError("unknown first norm type:", raw_feature_norm) - # --> (batch, queryL, sourceL) - attn = torch.transpose(attn, 1, 2).contiguous() - # --> (batch*queryL, sourceL) - attn = attn.view(batch_size * queryL, sourceL) - attn = nn.Softmax()(attn * smooth) - # --> (batch, queryL, sourceL) - attn = attn.view(batch_size, queryL, sourceL) - # --> (batch, sourceL, queryL) - attnT = torch.transpose(attn, 1, 2).contiguous() - - # --> (batch, d, sourceL) - contextT = torch.transpose(context, 1, 2) - # (batch x d x sourceL)(batch x sourceL x queryL) - # --> (batch, d, queryL) - weightedContext = torch.bmm(contextT, attnT) - # --> (batch, queryL, d) - weightedContext = torch.transpose(weightedContext, 1, 2) - - return weightedContext, attnT - - -class BiMultiHeadAttention(nn.Module): - def __init__(self, v_dim, l_dim, embed_dim, num_heads, dropout=0.1, cfg=None): - super(BiMultiHeadAttention, self).__init__() - - self.embed_dim = embed_dim - self.num_heads = num_heads - self.head_dim = embed_dim // num_heads - self.v_dim = v_dim - self.l_dim = l_dim - - assert ( - self.head_dim * self.num_heads == self.embed_dim - ), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})." - self.scale = self.head_dim ** (-0.5) - self.dropout = dropout - - self.v_proj = nn.Linear(self.v_dim, self.embed_dim) - self.l_proj = nn.Linear(self.l_dim, self.embed_dim) - self.values_v_proj = nn.Linear(self.v_dim, self.embed_dim) - self.values_l_proj = nn.Linear(self.l_dim, self.embed_dim) - - self.out_v_proj = nn.Linear(self.embed_dim, self.v_dim) - self.out_l_proj = nn.Linear(self.embed_dim, self.l_dim) - - self.stable_softmax_2d = True - self.clamp_min_for_underflow = True - self.clamp_max_for_overflow = True - - self._reset_parameters() - - def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): - return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() - - def _reset_parameters(self): - nn.init.xavier_uniform_(self.v_proj.weight) - self.v_proj.bias.data.fill_(0) - nn.init.xavier_uniform_(self.l_proj.weight) - self.l_proj.bias.data.fill_(0) - nn.init.xavier_uniform_(self.values_v_proj.weight) - self.values_v_proj.bias.data.fill_(0) - nn.init.xavier_uniform_(self.values_l_proj.weight) - self.values_l_proj.bias.data.fill_(0) - nn.init.xavier_uniform_(self.out_v_proj.weight) - self.out_v_proj.bias.data.fill_(0) - nn.init.xavier_uniform_(self.out_l_proj.weight) - self.out_l_proj.bias.data.fill_(0) - - def forward(self, v, l, attention_mask_v=None, attention_mask_l=None): - """_summary_ - - Args: - v (_type_): bs, n_img, dim - l (_type_): bs, n_text, dim - attention_mask_v (_type_, optional): _description_. bs, n_img - attention_mask_l (_type_, optional): _description_. bs, n_text - - Returns: - _type_: _description_ - """ - # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO': - # import ipdb; ipdb.set_trace() - bsz, tgt_len, _ = v.size() - - query_states = self.v_proj(v) * self.scale - key_states = self._shape(self.l_proj(l), -1, bsz) - value_v_states = self._shape(self.values_v_proj(v), -1, bsz) - value_l_states = self._shape(self.values_l_proj(l), -1, bsz) - - proj_shape = (bsz * self.num_heads, -1, self.head_dim) - query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) - key_states = key_states.view(*proj_shape) - value_v_states = value_v_states.view(*proj_shape) - value_l_states = value_l_states.view(*proj_shape) - - src_len = key_states.size(1) - attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) # bs*nhead, nimg, ntxt - - if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): - raise ValueError( - f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}" - ) - - if self.stable_softmax_2d: - attn_weights = attn_weights - attn_weights.max() - - if self.clamp_min_for_underflow: - attn_weights = torch.clamp( - attn_weights, min=-50000 - ) # Do not increase -50000, data type half has quite limited range - if self.clamp_max_for_overflow: - attn_weights = torch.clamp( - attn_weights, max=50000 - ) # Do not increase 50000, data type half has quite limited range - - attn_weights_T = attn_weights.transpose(1, 2) - attn_weights_l = attn_weights_T - torch.max(attn_weights_T, dim=-1, keepdim=True)[0] - if self.clamp_min_for_underflow: - attn_weights_l = torch.clamp( - attn_weights_l, min=-50000 - ) # Do not increase -50000, data type half has quite limited range - if self.clamp_max_for_overflow: - attn_weights_l = torch.clamp( - attn_weights_l, max=50000 - ) # Do not increase 50000, data type half has quite limited range - - # mask vison for language - if attention_mask_v is not None: - attention_mask_v = ( - attention_mask_v[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1) - ) - attn_weights_l.masked_fill_(attention_mask_v, float("-inf")) - - attn_weights_l = attn_weights_l.softmax(dim=-1) - - # mask language for vision - if attention_mask_l is not None: - attention_mask_l = ( - attention_mask_l[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1) - ) - attn_weights.masked_fill_(attention_mask_l, float("-inf")) - attn_weights_v = attn_weights.softmax(dim=-1) - - attn_probs_v = F.dropout(attn_weights_v, p=self.dropout, training=self.training) - attn_probs_l = F.dropout(attn_weights_l, p=self.dropout, training=self.training) - - attn_output_v = torch.bmm(attn_probs_v, value_l_states) - attn_output_l = torch.bmm(attn_probs_l, value_v_states) - - if attn_output_v.size() != (bsz * self.num_heads, tgt_len, self.head_dim): - raise ValueError( - f"`attn_output_v` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output_v.size()}" - ) - - if attn_output_l.size() != (bsz * self.num_heads, src_len, self.head_dim): - raise ValueError( - f"`attn_output_l` should be of size {(bsz, self.num_heads, src_len, self.head_dim)}, but is {attn_output_l.size()}" - ) - - attn_output_v = attn_output_v.view(bsz, self.num_heads, tgt_len, self.head_dim) - attn_output_v = attn_output_v.transpose(1, 2) - attn_output_v = attn_output_v.reshape(bsz, tgt_len, self.embed_dim) - - attn_output_l = attn_output_l.view(bsz, self.num_heads, src_len, self.head_dim) - attn_output_l = attn_output_l.transpose(1, 2) - attn_output_l = attn_output_l.reshape(bsz, src_len, self.embed_dim) - - attn_output_v = self.out_v_proj(attn_output_v) - attn_output_l = self.out_l_proj(attn_output_l) - - return attn_output_v, attn_output_l - - -# Bi-Direction MHA (text->image, image->text) -class BiAttentionBlock(nn.Module): - def __init__( - self, - v_dim, - l_dim, - embed_dim, - num_heads, - dropout=0.1, - drop_path=0.0, - init_values=1e-4, - cfg=None, - ): - """ - Inputs: - embed_dim - Dimensionality of input and attention feature vectors - hidden_dim - Dimensionality of hidden layer in feed-forward network - (usually 2-4x larger than embed_dim) - num_heads - Number of heads to use in the Multi-Head Attention block - dropout - Amount of dropout to apply in the feed-forward network - """ - super(BiAttentionBlock, self).__init__() - - # pre layer norm - self.layer_norm_v = nn.LayerNorm(v_dim) - self.layer_norm_l = nn.LayerNorm(l_dim) - self.attn = BiMultiHeadAttention( - v_dim=v_dim, l_dim=l_dim, embed_dim=embed_dim, num_heads=num_heads, dropout=dropout - ) - - # add layer scale for training stability - self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() - self.gamma_v = nn.Parameter(init_values * torch.ones((v_dim)), requires_grad=True) - self.gamma_l = nn.Parameter(init_values * torch.ones((l_dim)), requires_grad=True) - - def forward(self, v, l, attention_mask_v=None, attention_mask_l=None): - v = self.layer_norm_v(v) - l = self.layer_norm_l(l) - delta_v, delta_l = self.attn( - v, l, attention_mask_v=attention_mask_v, attention_mask_l=attention_mask_l - ) - # v, l = v + delta_v, l + delta_l - v = v + self.drop_path(self.gamma_v * delta_v) - l = l + self.drop_path(self.gamma_l * delta_l) - return v, l - - # def forward(self, v:List[torch.Tensor], l, attention_mask_v=None, attention_mask_l=None) diff --git a/spaces/xnetba/MMS/vits/losses.py b/spaces/xnetba/MMS/vits/losses.py deleted file mode 100644 index fb22a0e834dd87edaa37bb8190eee2c3c7abe0d5..0000000000000000000000000000000000000000 --- a/spaces/xnetba/MMS/vits/losses.py +++ /dev/null @@ -1,61 +0,0 @@ -import torch -from torch.nn import functional as F - -import commons - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - rl = rl.float().detach() - gl = gl.float() - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - dr = dr.float() - dg = dg.float() - r_loss = torch.mean((1-dr)**2) - g_loss = torch.mean(dg**2) - loss += (r_loss + g_loss) - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - dg = dg.float() - l = torch.mean((1-dg)**2) - gen_losses.append(l) - loss += l - - return loss, gen_losses - - -def kl_loss(z_p, logs_q, m_p, logs_p, z_mask): - """ - z_p, logs_q: [b, h, t_t] - m_p, logs_p: [b, h, t_t] - """ - z_p = z_p.float() - logs_q = logs_q.float() - m_p = m_p.float() - logs_p = logs_p.float() - z_mask = z_mask.float() - - kl = logs_p - logs_q - 0.5 - kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p) - kl = torch.sum(kl * z_mask) - l = kl / torch.sum(z_mask) - return l diff --git a/spaces/xp3857/Image_Restoration_Colorization/Global/detection_models/antialiasing.py b/spaces/xp3857/Image_Restoration_Colorization/Global/detection_models/antialiasing.py deleted file mode 100644 index 78da8ebdef518ffe597da1d03ffda09b89b22076..0000000000000000000000000000000000000000 --- a/spaces/xp3857/Image_Restoration_Colorization/Global/detection_models/antialiasing.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -import torch -import torch.nn.parallel -import numpy as np -import torch.nn as nn -import torch.nn.functional as F - - -class Downsample(nn.Module): - # https://github.com/adobe/antialiased-cnns - - def __init__(self, pad_type="reflect", filt_size=3, stride=2, channels=None, pad_off=0): - super(Downsample, self).__init__() - self.filt_size = filt_size - self.pad_off = pad_off - self.pad_sizes = [ - int(1.0 * (filt_size - 1) / 2), - int(np.ceil(1.0 * (filt_size - 1) / 2)), - int(1.0 * (filt_size - 1) / 2), - int(np.ceil(1.0 * (filt_size - 1) / 2)), - ] - self.pad_sizes = [pad_size + pad_off for pad_size in self.pad_sizes] - self.stride = stride - self.off = int((self.stride - 1) / 2.0) - self.channels = channels - - # print('Filter size [%i]'%filt_size) - if self.filt_size == 1: - a = np.array([1.0,]) - elif self.filt_size == 2: - a = np.array([1.0, 1.0]) - elif self.filt_size == 3: - a = np.array([1.0, 2.0, 1.0]) - elif self.filt_size == 4: - a = np.array([1.0, 3.0, 3.0, 1.0]) - elif self.filt_size == 5: - a = np.array([1.0, 4.0, 6.0, 4.0, 1.0]) - elif self.filt_size == 6: - a = np.array([1.0, 5.0, 10.0, 10.0, 5.0, 1.0]) - elif self.filt_size == 7: - a = np.array([1.0, 6.0, 15.0, 20.0, 15.0, 6.0, 1.0]) - - filt = torch.Tensor(a[:, None] * a[None, :]) - filt = filt / torch.sum(filt) - self.register_buffer("filt", filt[None, None, :, :].repeat((self.channels, 1, 1, 1))) - - self.pad = get_pad_layer(pad_type)(self.pad_sizes) - - def forward(self, inp): - if self.filt_size == 1: - if self.pad_off == 0: - return inp[:, :, :: self.stride, :: self.stride] - else: - return self.pad(inp)[:, :, :: self.stride, :: self.stride] - else: - return F.conv2d(self.pad(inp), self.filt, stride=self.stride, groups=inp.shape[1]) - - -def get_pad_layer(pad_type): - if pad_type in ["refl", "reflect"]: - PadLayer = nn.ReflectionPad2d - elif pad_type in ["repl", "replicate"]: - PadLayer = nn.ReplicationPad2d - elif pad_type == "zero": - PadLayer = nn.ZeroPad2d - else: - print("Pad type [%s] not recognized" % pad_type) - return PadLayer diff --git a/spaces/xp3857/Image_Restoration_Colorization/Global/models/NonLocal_feature_mapping_model.py b/spaces/xp3857/Image_Restoration_Colorization/Global/models/NonLocal_feature_mapping_model.py deleted file mode 100644 index 1b9bb1031d8c1fe399fb4fa61e875027a6cfc4a5..0000000000000000000000000000000000000000 --- a/spaces/xp3857/Image_Restoration_Colorization/Global/models/NonLocal_feature_mapping_model.py +++ /dev/null @@ -1,202 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -import os -import functools -from torch.autograd import Variable -from util.image_pool import ImagePool -from .base_model import BaseModel -from . import networks -import math - - -class Mapping_Model_with_mask(nn.Module): - def __init__(self, nc, mc=64, n_blocks=3, norm="instance", padding_type="reflect", opt=None): - super(Mapping_Model_with_mask, self).__init__() - - norm_layer = networks.get_norm_layer(norm_type=norm) - activation = nn.ReLU(True) - model = [] - - tmp_nc = 64 - n_up = 4 - - for i in range(n_up): - ic = min(tmp_nc * (2 ** i), mc) - oc = min(tmp_nc * (2 ** (i + 1)), mc) - model += [nn.Conv2d(ic, oc, 3, 1, 1), norm_layer(oc), activation] - - self.before_NL = nn.Sequential(*model) - - if opt.NL_res: - self.NL = networks.NonLocalBlock2D_with_mask_Res( - mc, - mc, - opt.NL_fusion_method, - opt.correlation_renormalize, - opt.softmax_temperature, - opt.use_self, - opt.cosin_similarity, - ) - print("You are using NL + Res") - - model = [] - for i in range(n_blocks): - model += [ - networks.ResnetBlock( - mc, - padding_type=padding_type, - activation=activation, - norm_layer=norm_layer, - opt=opt, - dilation=opt.mapping_net_dilation, - ) - ] - - for i in range(n_up - 1): - ic = min(64 * (2 ** (4 - i)), mc) - oc = min(64 * (2 ** (3 - i)), mc) - model += [nn.Conv2d(ic, oc, 3, 1, 1), norm_layer(oc), activation] - model += [nn.Conv2d(tmp_nc * 2, tmp_nc, 3, 1, 1)] - if opt.feat_dim > 0 and opt.feat_dim < 64: - model += [norm_layer(tmp_nc), activation, nn.Conv2d(tmp_nc, opt.feat_dim, 1, 1)] - # model += [nn.Conv2d(64, 1, 1, 1, 0)] - self.after_NL = nn.Sequential(*model) - - - def forward(self, input, mask): - x1 = self.before_NL(input) - del input - x2 = self.NL(x1, mask) - del x1, mask - x3 = self.after_NL(x2) - del x2 - - return x3 - -class Mapping_Model_with_mask_2(nn.Module): ## Multi-Scale Patch Attention - def __init__(self, nc, mc=64, n_blocks=3, norm="instance", padding_type="reflect", opt=None): - super(Mapping_Model_with_mask_2, self).__init__() - - norm_layer = networks.get_norm_layer(norm_type=norm) - activation = nn.ReLU(True) - model = [] - - tmp_nc = 64 - n_up = 4 - - for i in range(n_up): - ic = min(tmp_nc * (2 ** i), mc) - oc = min(tmp_nc * (2 ** (i + 1)), mc) - model += [nn.Conv2d(ic, oc, 3, 1, 1), norm_layer(oc), activation] - - for i in range(2): - model += [ - networks.ResnetBlock( - mc, - padding_type=padding_type, - activation=activation, - norm_layer=norm_layer, - opt=opt, - dilation=opt.mapping_net_dilation, - ) - ] - - print("Mapping: You are using multi-scale patch attention, conv combine + mask input") - - self.before_NL = nn.Sequential(*model) - - if opt.mapping_exp==1: - self.NL_scale_1=networks.Patch_Attention_4(mc,mc,8) - - model = [] - for i in range(2): - model += [ - networks.ResnetBlock( - mc, - padding_type=padding_type, - activation=activation, - norm_layer=norm_layer, - opt=opt, - dilation=opt.mapping_net_dilation, - ) - ] - - self.res_block_1 = nn.Sequential(*model) - - if opt.mapping_exp==1: - self.NL_scale_2=networks.Patch_Attention_4(mc,mc,4) - - model = [] - for i in range(2): - model += [ - networks.ResnetBlock( - mc, - padding_type=padding_type, - activation=activation, - norm_layer=norm_layer, - opt=opt, - dilation=opt.mapping_net_dilation, - ) - ] - - self.res_block_2 = nn.Sequential(*model) - - if opt.mapping_exp==1: - self.NL_scale_3=networks.Patch_Attention_4(mc,mc,2) - # self.NL_scale_3=networks.Patch_Attention_2(mc,mc,2) - - model = [] - for i in range(2): - model += [ - networks.ResnetBlock( - mc, - padding_type=padding_type, - activation=activation, - norm_layer=norm_layer, - opt=opt, - dilation=opt.mapping_net_dilation, - ) - ] - - for i in range(n_up - 1): - ic = min(64 * (2 ** (4 - i)), mc) - oc = min(64 * (2 ** (3 - i)), mc) - model += [nn.Conv2d(ic, oc, 3, 1, 1), norm_layer(oc), activation] - model += [nn.Conv2d(tmp_nc * 2, tmp_nc, 3, 1, 1)] - if opt.feat_dim > 0 and opt.feat_dim < 64: - model += [norm_layer(tmp_nc), activation, nn.Conv2d(tmp_nc, opt.feat_dim, 1, 1)] - # model += [nn.Conv2d(64, 1, 1, 1, 0)] - self.after_NL = nn.Sequential(*model) - - - def forward(self, input, mask): - x1 = self.before_NL(input) - x2 = self.NL_scale_1(x1,mask) - x3 = self.res_block_1(x2) - x4 = self.NL_scale_2(x3,mask) - x5 = self.res_block_2(x4) - x6 = self.NL_scale_3(x5,mask) - x7 = self.after_NL(x6) - return x7 - - def inference_forward(self, input, mask): - x1 = self.before_NL(input) - del input - x2 = self.NL_scale_1.inference_forward(x1,mask) - del x1 - x3 = self.res_block_1(x2) - del x2 - x4 = self.NL_scale_2.inference_forward(x3,mask) - del x3 - x5 = self.res_block_2(x4) - del x4 - x6 = self.NL_scale_3.inference_forward(x5,mask) - del x5 - x7 = self.after_NL(x6) - del x6 - return x7 \ No newline at end of file diff --git a/spaces/ybelkada/interfacegan_pp/models/stylegan_tf_official/dnnlib/util.py b/spaces/ybelkada/interfacegan_pp/models/stylegan_tf_official/dnnlib/util.py deleted file mode 100644 index 133ef764c0707d9384a33f0350ba71b1e624072f..0000000000000000000000000000000000000000 --- a/spaces/ybelkada/interfacegan_pp/models/stylegan_tf_official/dnnlib/util.py +++ /dev/null @@ -1,405 +0,0 @@ -# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. -# -# This work is licensed under the Creative Commons Attribution-NonCommercial -# 4.0 International License. To view a copy of this license, visit -# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to -# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. - -"""Miscellaneous utility classes and functions.""" - -import ctypes -import fnmatch -import importlib -import inspect -import numpy as np -import os -import shutil -import sys -import types -import io -import pickle -import re -import requests -import html -import hashlib -import glob -import uuid - -from distutils.util import strtobool -from typing import Any, List, Tuple, Union - - -# Util classes -# ------------------------------------------------------------------------------------------ - - -class EasyDict(dict): - """Convenience class that behaves like a dict but allows access with the attribute syntax.""" - - def __getattr__(self, name: str) -> Any: - try: - return self[name] - except KeyError: - raise AttributeError(name) - - def __setattr__(self, name: str, value: Any) -> None: - self[name] = value - - def __delattr__(self, name: str) -> None: - del self[name] - - -class Logger(object): - """Redirect stderr to stdout, optionally print stdout to a file, and optionally force flushing on both stdout and the file.""" - - def __init__(self, file_name: str = None, file_mode: str = "w", should_flush: bool = True): - self.file = None - - if file_name is not None: - self.file = open(file_name, file_mode) - - self.should_flush = should_flush - self.stdout = sys.stdout - self.stderr = sys.stderr - - sys.stdout = self - sys.stderr = self - - def __enter__(self) -> "Logger": - return self - - def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: - self.close() - - def write(self, text: str) -> None: - """Write text to stdout (and a file) and optionally flush.""" - if len(text) == 0: # workaround for a bug in VSCode debugger: sys.stdout.write(''); sys.stdout.flush() => crash - return - - if self.file is not None: - self.file.write(text) - - self.stdout.write(text) - - if self.should_flush: - self.flush() - - def flush(self) -> None: - """Flush written text to both stdout and a file, if open.""" - if self.file is not None: - self.file.flush() - - self.stdout.flush() - - def close(self) -> None: - """Flush, close possible files, and remove stdout/stderr mirroring.""" - self.flush() - - # if using multiple loggers, prevent closing in wrong order - if sys.stdout is self: - sys.stdout = self.stdout - if sys.stderr is self: - sys.stderr = self.stderr - - if self.file is not None: - self.file.close() - - -# Small util functions -# ------------------------------------------------------------------------------------------ - - -def format_time(seconds: Union[int, float]) -> str: - """Convert the seconds to human readable string with days, hours, minutes and seconds.""" - s = int(np.rint(seconds)) - - if s < 60: - return "{0}s".format(s) - elif s < 60 * 60: - return "{0}m {1:02}s".format(s // 60, s % 60) - elif s < 24 * 60 * 60: - return "{0}h {1:02}m {2:02}s".format(s // (60 * 60), (s // 60) % 60, s % 60) - else: - return "{0}d {1:02}h {2:02}m".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60) - - -def ask_yes_no(question: str) -> bool: - """Ask the user the question until the user inputs a valid answer.""" - while True: - try: - print("{0} [y/n]".format(question)) - return strtobool(input().lower()) - except ValueError: - pass - - -def tuple_product(t: Tuple) -> Any: - """Calculate the product of the tuple elements.""" - result = 1 - - for v in t: - result *= v - - return result - - -_str_to_ctype = { - "uint8": ctypes.c_ubyte, - "uint16": ctypes.c_uint16, - "uint32": ctypes.c_uint32, - "uint64": ctypes.c_uint64, - "int8": ctypes.c_byte, - "int16": ctypes.c_int16, - "int32": ctypes.c_int32, - "int64": ctypes.c_int64, - "float32": ctypes.c_float, - "float64": ctypes.c_double -} - - -def get_dtype_and_ctype(type_obj: Any) -> Tuple[np.dtype, Any]: - """Given a type name string (or an object having a __name__ attribute), return matching Numpy and ctypes types that have the same size in bytes.""" - type_str = None - - if isinstance(type_obj, str): - type_str = type_obj - elif hasattr(type_obj, "__name__"): - type_str = type_obj.__name__ - elif hasattr(type_obj, "name"): - type_str = type_obj.name - else: - raise RuntimeError("Cannot infer type name from input") - - assert type_str in _str_to_ctype.keys() - - my_dtype = np.dtype(type_str) - my_ctype = _str_to_ctype[type_str] - - assert my_dtype.itemsize == ctypes.sizeof(my_ctype) - - return my_dtype, my_ctype - - -def is_pickleable(obj: Any) -> bool: - try: - with io.BytesIO() as stream: - pickle.dump(obj, stream) - return True - except: - return False - - -# Functionality to import modules/objects by name, and call functions by name -# ------------------------------------------------------------------------------------------ - -def get_module_from_obj_name(obj_name: str) -> Tuple[types.ModuleType, str]: - """Searches for the underlying module behind the name to some python object. - Returns the module and the object name (original name with module part removed).""" - - # allow convenience shorthands, substitute them by full names - obj_name = re.sub("^np.", "numpy.", obj_name) - obj_name = re.sub("^tf.", "tensorflow.", obj_name) - - # list alternatives for (module_name, local_obj_name) - parts = obj_name.split(".") - name_pairs = [(".".join(parts[:i]), ".".join(parts[i:])) for i in range(len(parts), 0, -1)] - - # try each alternative in turn - for module_name, local_obj_name in name_pairs: - try: - module = importlib.import_module(module_name) # may raise ImportError - get_obj_from_module(module, local_obj_name) # may raise AttributeError - return module, local_obj_name - except: - pass - - # maybe some of the modules themselves contain errors? - for module_name, _local_obj_name in name_pairs: - try: - importlib.import_module(module_name) # may raise ImportError - except ImportError: - if not str(sys.exc_info()[1]).startswith("No module named '" + module_name + "'"): - raise - - # maybe the requested attribute is missing? - for module_name, local_obj_name in name_pairs: - try: - module = importlib.import_module(module_name) # may raise ImportError - get_obj_from_module(module, local_obj_name) # may raise AttributeError - except ImportError: - pass - - # we are out of luck, but we have no idea why - raise ImportError(obj_name) - - -def get_obj_from_module(module: types.ModuleType, obj_name: str) -> Any: - """Traverses the object name and returns the last (rightmost) python object.""" - if obj_name == '': - return module - obj = module - for part in obj_name.split("."): - obj = getattr(obj, part) - return obj - - -def get_obj_by_name(name: str) -> Any: - """Finds the python object with the given name.""" - module, obj_name = get_module_from_obj_name(name) - return get_obj_from_module(module, obj_name) - - -def call_func_by_name(*args, func_name: str = None, **kwargs) -> Any: - """Finds the python object with the given name and calls it as a function.""" - assert func_name is not None - func_obj = get_obj_by_name(func_name) - assert callable(func_obj) - return func_obj(*args, **kwargs) - - -def get_module_dir_by_obj_name(obj_name: str) -> str: - """Get the directory path of the module containing the given object name.""" - module, _ = get_module_from_obj_name(obj_name) - return os.path.dirname(inspect.getfile(module)) - - -def is_top_level_function(obj: Any) -> bool: - """Determine whether the given object is a top-level function, i.e., defined at module scope using 'def'.""" - return callable(obj) and obj.__name__ in sys.modules[obj.__module__].__dict__ - - -def get_top_level_function_name(obj: Any) -> str: - """Return the fully-qualified name of a top-level function.""" - assert is_top_level_function(obj) - return obj.__module__ + "." + obj.__name__ - - -# File system helpers -# ------------------------------------------------------------------------------------------ - -def list_dir_recursively_with_ignore(dir_path: str, ignores: List[str] = None, add_base_to_relative: bool = False) -> List[Tuple[str, str]]: - """List all files recursively in a given directory while ignoring given file and directory names. - Returns list of tuples containing both absolute and relative paths.""" - assert os.path.isdir(dir_path) - base_name = os.path.basename(os.path.normpath(dir_path)) - - if ignores is None: - ignores = [] - - result = [] - - for root, dirs, files in os.walk(dir_path, topdown=True): - for ignore_ in ignores: - dirs_to_remove = [d for d in dirs if fnmatch.fnmatch(d, ignore_)] - - # dirs need to be edited in-place - for d in dirs_to_remove: - dirs.remove(d) - - files = [f for f in files if not fnmatch.fnmatch(f, ignore_)] - - absolute_paths = [os.path.join(root, f) for f in files] - relative_paths = [os.path.relpath(p, dir_path) for p in absolute_paths] - - if add_base_to_relative: - relative_paths = [os.path.join(base_name, p) for p in relative_paths] - - assert len(absolute_paths) == len(relative_paths) - result += zip(absolute_paths, relative_paths) - - return result - - -def copy_files_and_create_dirs(files: List[Tuple[str, str]]) -> None: - """Takes in a list of tuples of (src, dst) paths and copies files. - Will create all necessary directories.""" - for file in files: - target_dir_name = os.path.dirname(file[1]) - - # will create all intermediate-level directories - if not os.path.exists(target_dir_name): - os.makedirs(target_dir_name) - - shutil.copyfile(file[0], file[1]) - - -# URL helpers -# ------------------------------------------------------------------------------------------ - -def is_url(obj: Any) -> bool: - """Determine whether the given object is a valid URL string.""" - if not isinstance(obj, str) or not "://" in obj: - return False - try: - res = requests.compat.urlparse(obj) - if not res.scheme or not res.netloc or not "." in res.netloc: - return False - res = requests.compat.urlparse(requests.compat.urljoin(obj, "/")) - if not res.scheme or not res.netloc or not "." in res.netloc: - return False - except: - return False - return True - - -def open_url(url: str, cache_dir: str = None, num_attempts: int = 10, verbose: bool = True) -> Any: - """Download the given URL and return a binary-mode file object to access the data.""" - assert is_url(url) - assert num_attempts >= 1 - - # Lookup from cache. - url_md5 = hashlib.md5(url.encode("utf-8")).hexdigest() - if cache_dir is not None: - cache_files = glob.glob(os.path.join(cache_dir, url_md5 + "_*")) - if len(cache_files) == 1: - return open(cache_files[0], "rb") - - # Download. - url_name = None - url_data = None - with requests.Session() as session: - if verbose: - print("Downloading %s ..." % url, end="", flush=True) - for attempts_left in reversed(range(num_attempts)): - try: - with session.get(url) as res: - res.raise_for_status() - if len(res.content) == 0: - raise IOError("No data received") - - if len(res.content) < 8192: - content_str = res.content.decode("utf-8") - if "download_warning" in res.headers.get("Set-Cookie", ""): - links = [html.unescape(link) for link in content_str.split('"') if "export=download" in link] - if len(links) == 1: - url = requests.compat.urljoin(url, links[0]) - raise IOError("Google Drive virus checker nag") - if "Google Drive - Quota exceeded" in content_str: - raise IOError("Google Drive quota exceeded") - - match = re.search(r'filename="([^"]*)"', res.headers.get("Content-Disposition", "")) - url_name = match[1] if match else url - url_data = res.content - if verbose: - print(" done") - break - except: - if not attempts_left: - if verbose: - print(" failed") - raise - if verbose: - print(".", end="", flush=True) - - # Save to cache. - if cache_dir is not None: - safe_name = re.sub(r"[^0-9a-zA-Z-._]", "_", url_name) - cache_file = os.path.join(cache_dir, url_md5 + "_" + safe_name) - temp_file = os.path.join(cache_dir, "tmp_" + uuid.uuid4().hex + "_" + url_md5 + "_" + safe_name) - os.makedirs(cache_dir, exist_ok=True) - with open(temp_file, "wb") as f: - f.write(url_data) - os.replace(temp_file, cache_file) # atomic - - # Return data as file object. - return io.BytesIO(url_data) diff --git a/spaces/yderre-aubay/midi-player-demo/src/common/track/TrackEvent.ts b/spaces/yderre-aubay/midi-player-demo/src/common/track/TrackEvent.ts deleted file mode 100644 index be8c40edba57786ac33dc984e9a288bae6f4e390..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/common/track/TrackEvent.ts +++ /dev/null @@ -1,38 +0,0 @@ -import { AnyEvent } from "midifile-ts" -import { DistributiveOmit } from "../types" -import { AnySignalEvent } from "./signalEvents" - -export interface TickProvider { - tick: number -} - -export interface DeltaTimeProvider { - deltaTime: number -} - -export type TrackEventRequired = TickProvider & { - id: number - - // Mark as recording in progress - // Do not playback in Player - isRecording?: boolean -} - -export type TrackEventOf = DistributiveOmit & - TrackEventRequired - -type NoteEventContent = { - type: "channel" - subtype: "note" - duration: number - noteNumber: number - velocity: number -} - -export type NoteEvent = TrackEventOf -export type TrackEvent = TrackEventOf< - AnyEvent | NoteEventContent | AnySignalEvent -> - -export type FeatureOf = DistributiveOmit -export type AnyEventFeature = FeatureOf diff --git a/spaces/yerfor/SyntaSpeech/modules/tts/diffspeech/net.py b/spaces/yerfor/SyntaSpeech/modules/tts/diffspeech/net.py deleted file mode 100644 index 764020f28add5e4ee387a9d081ab6d548fc0f201..0000000000000000000000000000000000000000 --- a/spaces/yerfor/SyntaSpeech/modules/tts/diffspeech/net.py +++ /dev/null @@ -1,110 +0,0 @@ -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from math import sqrt - -Linear = nn.Linear -ConvTranspose2d = nn.ConvTranspose2d - - -class Mish(nn.Module): - def forward(self, x): - return x * torch.tanh(F.softplus(x)) - - -class SinusoidalPosEmb(nn.Module): - def __init__(self, dim): - super().__init__() - self.dim = dim - - def forward(self, x): - device = x.device - half_dim = self.dim // 2 - emb = math.log(10000) / (half_dim - 1) - emb = torch.exp(torch.arange(half_dim, device=device) * -emb) - emb = x[:, None] * emb[None, :] - emb = torch.cat((emb.sin(), emb.cos()), dim=-1) - return emb - - -def Conv1d(*args, **kwargs): - layer = nn.Conv1d(*args, **kwargs) - nn.init.kaiming_normal_(layer.weight) - return layer - - -class ResidualBlock(nn.Module): - def __init__(self, encoder_hidden, residual_channels, dilation): - super().__init__() - self.dilated_conv = Conv1d(residual_channels, 2 * residual_channels, 3, padding=dilation, dilation=dilation) - self.diffusion_projection = Linear(residual_channels, residual_channels) - self.conditioner_projection = Conv1d(encoder_hidden, 2 * residual_channels, 1) - self.output_projection = Conv1d(residual_channels, 2 * residual_channels, 1) - - def forward(self, x, conditioner, diffusion_step): - diffusion_step = self.diffusion_projection(diffusion_step).unsqueeze(-1) - conditioner = self.conditioner_projection(conditioner) - y = x + diffusion_step - - y = self.dilated_conv(y) + conditioner - - gate, filter = torch.chunk(y, 2, dim=1) - y = torch.sigmoid(gate) * torch.tanh(filter) - - y = self.output_projection(y) - residual, skip = torch.chunk(y, 2, dim=1) - return (x + residual) / sqrt(2.0), skip - - -class DiffNet(nn.Module): - def __init__(self, hparams): - super().__init__() - in_dims = hparams['audio_num_mel_bins'] - self.encoder_hidden = hparams['hidden_size'] - self.residual_layers = hparams['residual_layers'] - self.residual_channels = hparams['residual_channels'] - self.dilation_cycle_length = hparams['dilation_cycle_length'] - - self.input_projection = Conv1d(in_dims, self.residual_channels, 1) - self.diffusion_embedding = SinusoidalPosEmb(self.residual_channels) - dim = self.residual_channels - self.mlp = nn.Sequential( - nn.Linear(dim, dim * 4), - Mish(), - nn.Linear(dim * 4, dim) - ) - self.residual_layers = nn.ModuleList([ - ResidualBlock(self.encoder_hidden, self.residual_channels, 2 ** (i % self.dilation_cycle_length)) - for i in range(self.residual_layers) - ]) - self.skip_projection = Conv1d(self.residual_channels, self.residual_channels, 1) - self.output_projection = Conv1d(self.residual_channels, in_dims, 1) - nn.init.zeros_(self.output_projection.weight) - - def forward(self, spec, diffusion_step, cond): - """ - - :param spec: [B, 1, M, T] - :param diffusion_step: [B, 1] - :param cond: [B, M, T] - :return: - """ - x = spec[:, 0] - x = self.input_projection(x) # x [B, residual_channel, T] - - x = F.relu(x) - diffusion_step = self.diffusion_embedding(diffusion_step) - diffusion_step = self.mlp(diffusion_step) - skip = [] - for layer_id, layer in enumerate(self.residual_layers): - x, skip_connection = layer(x, cond, diffusion_step) - skip.append(skip_connection) - - x = torch.sum(torch.stack(skip), dim=0) / sqrt(len(self.residual_layers)) - x = self.skip_projection(x) - x = F.relu(x) - x = self.output_projection(x) # [B, 80, T] - return x[:, None, :, :] diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/data/processors/__init__.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/data/processors/__init__.py deleted file mode 100644 index a26ab5776d74715428b10c4d9cd943e53b253785..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/data/processors/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2020 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels -from .squad import SquadExample, SquadFeatures, SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features -from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor -from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/clap/modeling_clap.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/clap/modeling_clap.py deleted file mode 100644 index 1d17a51883873403de6999809c14ca9e4b2c2a51..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/clap/modeling_clap.py +++ /dev/null @@ -1,2316 +0,0 @@ -# coding=utf-8 -# Copyright 2023 The LAION-AI Team and The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" PyTorch CLAP model.""" -import collections -import math -from dataclasses import dataclass -from typing import Any, List, Optional, Tuple, Union - -import torch -import torch.nn.functional as F -from torch import nn - -from ...activations import ACT2FN -from ...modeling_outputs import ( - BaseModelOutputWithPastAndCrossAttentions, - BaseModelOutputWithPooling, - BaseModelOutputWithPoolingAndCrossAttentions, -) -from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, meshgrid, prune_linear_layer -from ...utils import ( - ModelOutput, - add_start_docstrings, - add_start_docstrings_to_model_forward, - logging, - replace_return_docstrings, -) -from .configuration_clap import ClapAudioConfig, ClapConfig, ClapTextConfig - - -logger = logging.get_logger(__name__) - -_CHECKPOINT_FOR_DOC = "laion/clap-htsat-fused" - -CLAP_PRETRAINED_MODEL_ARCHIVE_LIST = [ - "laion/clap-htsat-fused", - "laion/clap-htsat-unfused", - # See all clap models at https://huggingface.co/models?filter=clap -] - - -# Adapted from: https://github.com/LAION-AI/CLAP/blob/6ad05a971ba0622f6acee8c41993e0d02bbed639/src/open_clip/utils.py#L191 -def interpolate(hidden_states, ratio): - """ - Interpolate data in time domain. This is used to compensate the resolution reduction in downsampling of a CNN. - - Args: - hidden_states (`torch.FloatTensor` of shape (batch_size, time_length, classes_num)): - Input hidden states - ratio (`int`): - The ratio of the length of the output to the length of the input. - """ - (batch_size, time_length, classes_num) = hidden_states.shape - upsampled = hidden_states[:, :, None, :].repeat(1, 1, ratio, 1) - upsampled = upsampled.reshape(batch_size, time_length * ratio, classes_num) - return upsampled - - -# Adapted from https://github.com/LAION-AI/CLAP/blob/6ad05a971ba0622f6acee8c41993e0d02bbed639/src/open_clip/htsat.py#L249 -def window_partition(hidden_states, window_size): - """ - Returns the resized hidden states. The output shape should be `(batch_size * num_windows, window_size, window_size, - num_channels)` - - Args: - hidden_states (`torch.FloatTensor` of shape `(batch_size, height, width, num_channels)`): - Input hidden states - window_size (`int`): - Window size - """ - batch_size, height, width, num_channels = hidden_states.shape - - hidden_states = hidden_states.view( - batch_size, height // window_size, window_size, width // window_size, window_size, num_channels - ) - windows = hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels) - return windows - - -# Adapted from https://github.com/LAION-AI/CLAP/blob/6ad05a971ba0622f6acee8c41993e0d02bbed639/src/open_clip/htsat.py#L263 -def window_reverse(windows, window_size, height, width): - """ - Args: - windows (`torch.FloatTensor` of shape `(num_windows * batch_size, window_size, window_size, num_channels)`): - Input windows - window_size (`int`): - Window size - height (`int`): - Height of the resized audio - width (`int`): - Width of the resized audio - """ - batch_size = int(windows.shape[0] / (height * width / window_size / window_size)) - - hidden_states = windows.view(batch_size, height // window_size, width // window_size, window_size, window_size, -1) - hidden_states = hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous().view(batch_size, height, width, -1) - return hidden_states - - -# Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids -def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): - """ - Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols - are ignored. This is modified from fairseq's `utils.make_positions`. - - Args: - x: torch.Tensor x: - - Returns: torch.Tensor - """ - # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. - mask = input_ids.ne(padding_idx).int() - incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask - return incremental_indices.long() + padding_idx - - -# contrastive loss function, adapted from -# https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html#CLIP-loss-function -def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: - labels = torch.arange(len(logits), device=logits.device) - return nn.functional.cross_entropy(logits, labels) - - -@dataclass -# Copied from transformers.models.clip.modeling_clip.CLIPTextModelOutput with CLIP->Clap -class ClapTextModelOutput(ModelOutput): - """ - Base class for text model's outputs that also contains a pooling of the last hidden states. - - Args: - text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): - The text embeddings obtained by applying the projection layer to the pooler_output. - last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): - Sequence of hidden-states at the output of the last layer of the model. - hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): - Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + - one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. - - Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. - attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, - sequence_length)`. - - Attentions weights after the attention softmax, used to compute the weighted average in the self-attention - heads. - """ - - text_embeds: Optional[torch.FloatTensor] = None - last_hidden_state: torch.FloatTensor = None - hidden_states: Optional[Tuple[torch.FloatTensor]] = None - attentions: Optional[Tuple[torch.FloatTensor]] = None - - -@dataclass -class ClapAudioModelOutput(ModelOutput): - """ - ClapAudio model output to mimic the output of the original implementation. - - Args: - audio_embeds (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): - The Audio embeddings obtained by applying the projection layer to the pooler_output. - last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): - Sequence of hidden-states at the output of the last layer of the model. - attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, - sequence_length)`. - - Attentions weights after the attention softmax, used to compute the weighted average in the self-attention - heads. - hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): - Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + - one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. - - Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. - """ - - audio_embeds: Optional[torch.FloatTensor] = None - last_hidden_state: torch.FloatTensor = None - hidden_states: Optional[Tuple[torch.FloatTensor]] = None - attentions: Optional[Tuple[torch.FloatTensor]] = None - - -@dataclass -# Copied from transformers.models.clip.modeling_clip.CLIPOutput with CLIP->Clap, vision->audio, Vision->Audio, image->audio -class ClapOutput(ModelOutput): - """ - Args: - loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): - Contrastive loss for audio-text similarity. - logits_per_audio:(`torch.FloatTensor` of shape `(audio_batch_size, text_batch_size)`): - The scaled dot product scores between `audio_embeds` and `text_embeds`. This represents the audio-text - similarity scores. - logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, audio_batch_size)`): - The scaled dot product scores between `text_embeds` and `audio_embeds`. This represents the text-audio - similarity scores. - text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): - The text embeddings obtained by applying the projection layer to the pooled output of [`ClapTextModel`]. - audio_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): - The audio embeddings obtained by applying the projection layer to the pooled output of [`ClapAudioModel`]. - text_model_output(`BaseModelOutputWithPooling`): - The output of the [`ClapTextModel`]. - audio_model_output(`BaseModelOutputWithPooling`): - The output of the [`ClapAudioModel`]. - """ - - loss: Optional[torch.FloatTensor] = None - logits_per_audio: torch.FloatTensor = None - logits_per_text: torch.FloatTensor = None - text_embeds: torch.FloatTensor = None - audio_embeds: torch.FloatTensor = None - text_model_output: BaseModelOutputWithPooling = None - audio_model_output: BaseModelOutputWithPooling = None - - def to_tuple(self) -> Tuple[Any]: - return tuple( - self[k] if k not in ["text_model_output", "audio_model_output"] else getattr(self, k).to_tuple() - for k in self.keys() - ) - - -# Adapted from transformers.models.swin.modeling_swin.SwinDropPath -class ClapDropPath(nn.Module): - """ - Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is a slightly - refactored version of the `SwinDropPath` implementation. - """ - - def __init__(self, drop_prob=None): - super().__init__() - self.drop_prob = drop_prob - - def forward(self, hidden_states): - if self.drop_prob == 0.0 or not self.training: - return hidden_states - - keep_prob = 1 - self.drop_prob - # work with diff dim tensors, not just 2D ConvNets - shape = (hidden_states.shape[0],) + (1,) * (hidden_states.ndim - 1) - - random_tensor = keep_prob + torch.rand(shape, dtype=hidden_states.dtype, device=hidden_states.device) - random_tensor.floor_() # binarize - output = hidden_states.div(keep_prob) * random_tensor - return output - - -# Adapted from https://github.com/LAION-AI/CLAP/blob/6ad05a971ba0622f6acee8c41993e0d02bbed639/src/open_clip/feature_fusion.py#L133 -class ClapAudioAFFBlock(nn.Module): - r""" - ATTENTIONAL FEATURE FUSION Block from CLAP, since in CLAP we are always in 2D mode, it is not needed to implement - the 1D version. - """ - - def __init__(self, config: ClapAudioConfig): - super().__init__() - channels = config.patch_embeds_hidden_size - downsize_ratio = config.aff_block_r - inter_channels = int(channels // downsize_ratio) - - self.local_att = nn.Sequential( - nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0), - nn.BatchNorm2d(inter_channels), - nn.ReLU(inplace=True), - nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0), - nn.BatchNorm2d(channels), - ) - self.global_att = nn.Sequential( - nn.AdaptiveAvgPool2d(1), - nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0), - nn.BatchNorm2d(inter_channels), - nn.ReLU(inplace=True), - nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0), - nn.BatchNorm2d(channels), - ) - - self.sigmoid = nn.Sigmoid() - - def forward(self, hidden_states, residual): - attention_input = hidden_states + residual - - fused_layer_output = self.local_att(attention_input) + self.global_att(attention_input) - fused_layer_output = self.sigmoid(fused_layer_output) - - output = 2 * hidden_states * fused_layer_output + 2 * residual * (1 - fused_layer_output) - return output - - -class ClapAudioPatchEmbed(nn.Module): - """ - This module converts the hidden states reshaped as an image to patch embeddings ready to be passed to the - Transformer block. - """ - - def __init__(self, config: ClapAudioConfig): - super().__init__() - img_size = (config.spec_size, config.spec_size) if isinstance(config.spec_size, int) else config.spec_size - patch_size = ( - (config.patch_size, config.patch_size) if isinstance(config.patch_size, int) else config.patch_size - ) - patch_stride = ( - (config.patch_stride, config.patch_stride) if isinstance(config.patch_stride, int) else config.patch_stride - ) - - self.img_size = img_size - self.patch_stride = patch_stride - - self.grid_size = (img_size[0] // patch_stride[0], img_size[1] // patch_stride[1]) - self.num_patches = self.grid_size[0] * self.grid_size[1] - - self.flatten = config.flatten_patch_embeds - self.enable_fusion = config.enable_fusion - - padding = ((patch_size[0] - patch_stride[0]) // 2, (patch_size[1] - patch_stride[1]) // 2) - - scale_factor = 4 if (self.enable_fusion) and (config.fusion_type == "channel_map") else 1 - - self.proj = nn.Conv2d( - config.patch_embed_input_channels * scale_factor, - config.patch_embeds_hidden_size, - kernel_size=patch_size, - stride=patch_stride, - padding=padding, - ) - - self.norm = nn.LayerNorm(config.patch_embeds_hidden_size) if config.enable_patch_layer_norm else nn.Identity() - if self.enable_fusion: - self.fusion_model = ClapAudioAFFBlock(config) - self.mel_conv2d = nn.Conv2d( - config.patch_embed_input_channels, - config.patch_embeds_hidden_size, - kernel_size=(patch_size[0], patch_size[1] * 3), - stride=(patch_stride[0], patch_stride[1] * 3), - padding=padding, - ) - - def forward(self, hidden_states, is_longer_idx=None): - if self.enable_fusion: - # retrieve the last mel as we have transposed the input - global_hidden_states = hidden_states[:, 0:1, :, :] - - # global processing - batch_size, num_channels, height, width = global_hidden_states.shape - - if height != self.img_size[0] or width != self.img_size[1]: - raise ValueError( - f"Input audio size ({height}*{width}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." - ) - - global_hidden_states = self.proj(global_hidden_states) - output_width = global_hidden_states.size(-1) - if len(is_longer_idx) > 0: - # local processing - local_hidden_states = hidden_states[is_longer_idx, 1:, :, :].contiguous() - batch_size, num_channels, height, width = local_hidden_states.shape - local_hidden_states = local_hidden_states.view(batch_size * num_channels, 1, height, width) - - local_hidden_states = self.mel_conv2d(local_hidden_states) - - _, features, height, width = local_hidden_states.shape - local_hidden_states = local_hidden_states.view(batch_size, num_channels, features, height, width) - local_hidden_states = local_hidden_states.permute((0, 2, 3, 1, 4)).contiguous().flatten(3) - - local_width = local_hidden_states.size(-1) - local_hidden_states = torch.nn.functional.pad( - local_hidden_states, (0, output_width - local_width), "constant", 0 - ) - - global_hidden_states[is_longer_idx] = self.fusion_model( - global_hidden_states[is_longer_idx], local_hidden_states - ) - hidden_states = global_hidden_states - else: - _, _, height, width = hidden_states.shape - if height != self.img_size[0] or width != self.img_size[1]: - raise ValueError( - f"Input audio size ({height}*{width}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." - ) - hidden_states = self.proj(hidden_states) - - if self.flatten: - hidden_states = hidden_states.flatten(2).transpose(1, 2) - hidden_states = self.norm(hidden_states) - return hidden_states - - -# Copied from transformers.models.swin.modeling_swin.SwinSelfAttention with Swin->ClapAudio -class ClapAudioSelfAttention(nn.Module): - def __init__(self, config, dim, num_heads, window_size): - super().__init__() - if dim % num_heads != 0: - raise ValueError( - f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})" - ) - - self.num_attention_heads = num_heads - self.attention_head_size = int(dim / num_heads) - self.all_head_size = self.num_attention_heads * self.attention_head_size - self.window_size = ( - window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size) - ) - - self.relative_position_bias_table = nn.Parameter( - torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), num_heads) - ) - - # get pair-wise relative position index for each token inside the window - coords_h = torch.arange(self.window_size[0]) - coords_w = torch.arange(self.window_size[1]) - coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij")) - coords_flatten = torch.flatten(coords, 1) - relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] - relative_coords = relative_coords.permute(1, 2, 0).contiguous() - relative_coords[:, :, 0] += self.window_size[0] - 1 - relative_coords[:, :, 1] += self.window_size[1] - 1 - relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 - relative_position_index = relative_coords.sum(-1) - self.register_buffer("relative_position_index", relative_position_index) - - self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) - self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) - self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) - - self.dropout = nn.Dropout(config.attention_probs_dropout_prob) - - def transpose_for_scores(self, x): - new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) - x = x.view(new_x_shape) - return x.permute(0, 2, 1, 3) - - def forward( - self, - hidden_states: torch.Tensor, - attention_mask: Optional[torch.FloatTensor] = None, - head_mask: Optional[torch.FloatTensor] = None, - output_attentions: Optional[bool] = False, - ) -> Tuple[torch.Tensor]: - batch_size, dim, num_channels = hidden_states.shape - mixed_query_layer = self.query(hidden_states) - - key_layer = self.transpose_for_scores(self.key(hidden_states)) - value_layer = self.transpose_for_scores(self.value(hidden_states)) - query_layer = self.transpose_for_scores(mixed_query_layer) - - # Take the dot product between "query" and "key" to get the raw attention scores. - attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) - - attention_scores = attention_scores / math.sqrt(self.attention_head_size) - - relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)] - relative_position_bias = relative_position_bias.view( - self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1 - ) - - relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() - attention_scores = attention_scores + relative_position_bias.unsqueeze(0) - - if attention_mask is not None: - # Apply the attention mask is (precomputed for all layers in ClapAudioModel forward() function) - mask_shape = attention_mask.shape[0] - attention_scores = attention_scores.view( - batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim - ) - attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0) - attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim) - - # Normalize the attention scores to probabilities. - attention_probs = nn.functional.softmax(attention_scores, dim=-1) - - # This is actually dropping out entire tokens to attend to, which might - # seem a bit unusual, but is taken from the original Transformer paper. - attention_probs = self.dropout(attention_probs) - - # Mask heads if we want to - if head_mask is not None: - attention_probs = attention_probs * head_mask - - context_layer = torch.matmul(attention_probs, value_layer) - context_layer = context_layer.permute(0, 2, 1, 3).contiguous() - new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) - context_layer = context_layer.view(new_context_layer_shape) - - outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) - - return outputs - - -# Copied from transformers.models.swin.modeling_swin.SwinSelfOutput with Swin->ClapAudio -class ClapAudioSelfOutput(nn.Module): - def __init__(self, config, dim): - super().__init__() - self.dense = nn.Linear(dim, dim) - self.dropout = nn.Dropout(config.attention_probs_dropout_prob) - - def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states) - - return hidden_states - - -# Copied from transformers.models.swin.modeling_swin.SwinAttention with Swin->ClapAudio -class ClapAudioAttention(nn.Module): - def __init__(self, config, dim, num_heads, window_size): - super().__init__() - self.self = ClapAudioSelfAttention(config, dim, num_heads, window_size) - self.output = ClapAudioSelfOutput(config, dim) - self.pruned_heads = set() - - def prune_heads(self, heads): - if len(heads) == 0: - return - heads, index = find_pruneable_heads_and_indices( - heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads - ) - - # Prune linear layers - self.self.query = prune_linear_layer(self.self.query, index) - self.self.key = prune_linear_layer(self.self.key, index) - self.self.value = prune_linear_layer(self.self.value, index) - self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) - - # Update hyper params and store pruned heads - self.self.num_attention_heads = self.self.num_attention_heads - len(heads) - self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads - self.pruned_heads = self.pruned_heads.union(heads) - - def forward( - self, - hidden_states: torch.Tensor, - attention_mask: Optional[torch.FloatTensor] = None, - head_mask: Optional[torch.FloatTensor] = None, - output_attentions: Optional[bool] = False, - ) -> Tuple[torch.Tensor]: - self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions) - attention_output = self.output(self_outputs[0], hidden_states) - outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them - return outputs - - -# Copied from transformers.models.swin.modeling_swin.SwinIntermediate with Swin->ClapAudio -class ClapAudioIntermediate(nn.Module): - def __init__(self, config, dim): - super().__init__() - self.dense = nn.Linear(dim, int(config.mlp_ratio * dim)) - if isinstance(config.hidden_act, str): - self.intermediate_act_fn = ACT2FN[config.hidden_act] - else: - self.intermediate_act_fn = config.hidden_act - - def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: - hidden_states = self.dense(hidden_states) - hidden_states = self.intermediate_act_fn(hidden_states) - return hidden_states - - -# Copied from transformers.models.swin.modeling_swin.SwinOutput with Swin->ClapAudio -class ClapAudioOutput(nn.Module): - def __init__(self, config, dim): - super().__init__() - self.dense = nn.Linear(int(config.mlp_ratio * dim), dim) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states) - return hidden_states - - -# Copied from transformers.models.swin.modeling_swin.SwinLayer with SwinDropPath->ClapDropPath, Swin->ClapAudio -class ClapAudioLayer(nn.Module): - def __init__(self, config, dim, input_resolution, num_heads, shift_size=0): - super().__init__() - self.chunk_size_feed_forward = config.chunk_size_feed_forward - self.shift_size = shift_size - self.window_size = config.window_size - self.input_resolution = input_resolution - self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps) - self.attention = ClapAudioAttention(config, dim, num_heads, window_size=self.window_size) - self.drop_path = ClapDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity() - self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps) - self.intermediate = ClapAudioIntermediate(config, dim) - self.output = ClapAudioOutput(config, dim) - - def set_shift_and_window_size(self, input_resolution): - if min(input_resolution) <= self.window_size: - # if window size is larger than input resolution, we don't partition windows - self.shift_size = 0 - self.window_size = min(input_resolution) - - def get_attn_mask(self, height, width, dtype): - if self.shift_size > 0: - # calculate attention mask for SW-MSA - img_mask = torch.zeros((1, height, width, 1), dtype=dtype) - height_slices = ( - slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None), - ) - width_slices = ( - slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None), - ) - count = 0 - for height_slice in height_slices: - for width_slice in width_slices: - img_mask[:, height_slice, width_slice, :] = count - count += 1 - - mask_windows = window_partition(img_mask, self.window_size) - mask_windows = mask_windows.view(-1, self.window_size * self.window_size) - attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) - attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) - else: - attn_mask = None - return attn_mask - - def maybe_pad(self, hidden_states, height, width): - pad_right = (self.window_size - width % self.window_size) % self.window_size - pad_bottom = (self.window_size - height % self.window_size) % self.window_size - pad_values = (0, 0, 0, pad_right, 0, pad_bottom) - hidden_states = nn.functional.pad(hidden_states, pad_values) - return hidden_states, pad_values - - def forward( - self, - hidden_states: torch.Tensor, - input_dimensions: Tuple[int, int], - head_mask: Optional[torch.FloatTensor] = None, - output_attentions: Optional[bool] = False, - always_partition: Optional[bool] = False, - ) -> Tuple[torch.Tensor, torch.Tensor]: - if not always_partition: - self.set_shift_and_window_size(input_dimensions) - else: - pass - height, width = input_dimensions - batch_size, _, channels = hidden_states.size() - shortcut = hidden_states - - hidden_states = self.layernorm_before(hidden_states) - - hidden_states = hidden_states.view(batch_size, height, width, channels) - - # pad hidden_states to multiples of window size - hidden_states, pad_values = self.maybe_pad(hidden_states, height, width) - - _, height_pad, width_pad, _ = hidden_states.shape - # cyclic shift - if self.shift_size > 0: - shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) - else: - shifted_hidden_states = hidden_states - - # partition windows - hidden_states_windows = window_partition(shifted_hidden_states, self.window_size) - hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels) - attn_mask = self.get_attn_mask(height_pad, width_pad, dtype=hidden_states.dtype) - if attn_mask is not None: - attn_mask = attn_mask.to(hidden_states_windows.device) - - attention_outputs = self.attention( - hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions - ) - - attention_output = attention_outputs[0] - - attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels) - shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad) - - # reverse cyclic shift - if self.shift_size > 0: - attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) - else: - attention_windows = shifted_windows - - was_padded = pad_values[3] > 0 or pad_values[5] > 0 - if was_padded: - attention_windows = attention_windows[:, :height, :width, :].contiguous() - - attention_windows = attention_windows.view(batch_size, height * width, channels) - - hidden_states = shortcut + self.drop_path(attention_windows) - - layer_output = self.layernorm_after(hidden_states) - layer_output = self.intermediate(layer_output) - layer_output = hidden_states + self.output(layer_output) - - layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,) - return layer_outputs - - -# Copied from transformers.models.swin.modeling_swin.SwinStage with Swin->ClapAudio -class ClapAudioStage(nn.Module): - def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample): - super().__init__() - self.config = config - self.dim = dim - self.blocks = nn.ModuleList( - [ - ClapAudioLayer( - config=config, - dim=dim, - input_resolution=input_resolution, - num_heads=num_heads, - shift_size=0 if (i % 2 == 0) else config.window_size // 2, - ) - for i in range(depth) - ] - ) - - # patch merging layer - if downsample is not None: - self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm) - else: - self.downsample = None - - self.pointing = False - - def forward( - self, - hidden_states: torch.Tensor, - input_dimensions: Tuple[int, int], - head_mask: Optional[torch.FloatTensor] = None, - output_attentions: Optional[bool] = False, - always_partition: Optional[bool] = False, - ) -> Tuple[torch.Tensor]: - height, width = input_dimensions - for i, layer_module in enumerate(self.blocks): - layer_head_mask = head_mask[i] if head_mask is not None else None - - layer_outputs = layer_module( - hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition - ) - - hidden_states = layer_outputs[0] - - hidden_states_before_downsampling = hidden_states - if self.downsample is not None: - height_downsampled, width_downsampled = (height + 1) // 2, (width + 1) // 2 - output_dimensions = (height, width, height_downsampled, width_downsampled) - hidden_states = self.downsample(hidden_states_before_downsampling, input_dimensions) - else: - output_dimensions = (height, width, height, width) - - stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions) - - if output_attentions: - stage_outputs += layer_outputs[1:] - return stage_outputs - - -# Copied from transformers.models.swin.modeling_swin.SwinPatchMerging with Swin->ClapAudio -class ClapAudioPatchMerging(nn.Module): - """ - Patch Merging Layer. - - Args: - input_resolution (`Tuple[int]`): - Resolution of input feature. - dim (`int`): - Number of input channels. - norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`): - Normalization layer class. - """ - - def __init__(self, input_resolution: Tuple[int], dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None: - super().__init__() - self.input_resolution = input_resolution - self.dim = dim - self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) - self.norm = norm_layer(4 * dim) - - def maybe_pad(self, input_feature, height, width): - should_pad = (height % 2 == 1) or (width % 2 == 1) - if should_pad: - pad_values = (0, 0, 0, width % 2, 0, height % 2) - input_feature = nn.functional.pad(input_feature, pad_values) - - return input_feature - - def forward(self, input_feature: torch.Tensor, input_dimensions: Tuple[int, int]) -> torch.Tensor: - height, width = input_dimensions - # `dim` is height * width - batch_size, dim, num_channels = input_feature.shape - - input_feature = input_feature.view(batch_size, height, width, num_channels) - # pad input to be disible by width and height, if needed - input_feature = self.maybe_pad(input_feature, height, width) - # [batch_size, height/2, width/2, num_channels] - input_feature_0 = input_feature[:, 0::2, 0::2, :] - # [batch_size, height/2, width/2, num_channels] - input_feature_1 = input_feature[:, 1::2, 0::2, :] - # [batch_size, height/2, width/2, num_channels] - input_feature_2 = input_feature[:, 0::2, 1::2, :] - # [batch_size, height/2, width/2, num_channels] - input_feature_3 = input_feature[:, 1::2, 1::2, :] - # batch_size height/2 width/2 4*num_channels - input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1) - input_feature = input_feature.view(batch_size, -1, 4 * num_channels) # batch_size height/2*width/2 4*C - - input_feature = self.norm(input_feature) - input_feature = self.reduction(input_feature) - - return input_feature - - -class ClapAudioEncoder(nn.Module): - def __init__(self, config): - super().__init__() - self.num_layers = len(config.depths) - - self.config = config - self.patch_embed = ClapAudioPatchEmbed(config) - self.enable_fusion = config.enable_fusion - self.patch_stride = self.patch_embed.patch_stride - self.spec_size = config.spec_size - self.freq_ratio = config.spec_size // config.num_mel_bins - - self.num_features = int(config.patch_embeds_hidden_size * 2 ** (self.num_layers - 1)) - - drop_path_rate = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))] - - grid_size = self.patch_embed.grid_size - self.input_resolutions = [(grid_size[0] // (2**i), grid_size[1] // (2**i)) for i in range(self.num_layers)] - - self.layers = nn.ModuleList( - [ - ClapAudioStage( - config=config, - dim=int(config.patch_embeds_hidden_size * 2**i_layer), - input_resolution=self.input_resolutions[i_layer], - depth=config.depths[i_layer], - num_heads=config.num_attention_heads[i_layer], - drop_path=drop_path_rate[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])], - downsample=ClapAudioPatchMerging if (i_layer < self.num_layers - 1) else None, - ) - for i_layer in range(self.num_layers) - ] - ) - - self.gradient_checkpointing = False - - self.batch_norm = nn.BatchNorm2d(config.num_mel_bins) - self.norm = nn.LayerNorm(self.num_features) - self.depths = config.depths - self.avgpool = nn.AdaptiveAvgPool1d(1) - - def reshape_mel2img(self, normalized_input_features): - """ - The input is 4 normalized log mel spectrograms. It is reshape to the common shape of images. Each channel - should represent 1 of the 4 crops of the spectrogram. For more details, refer to the [`ClapFeatureExtractor`]. - """ - _, _, time_length, freq_length = normalized_input_features.shape - - spec_width = int(self.spec_size * self.freq_ratio) - spec_heigth = self.spec_size // self.freq_ratio - - if time_length > spec_width or freq_length > spec_heigth: - raise ValueError("the wav size should be less than or equal to the swin input size") - - # to avoid bicubic zero error - if time_length < spec_width: - normalized_input_features = nn.functional.interpolate( - normalized_input_features, (spec_width, freq_length), mode="bicubic", align_corners=True - ) - if freq_length < spec_heigth: - normalized_input_features = nn.functional.interpolate( - normalized_input_features, (time_length, spec_heigth), mode="bicubic", align_corners=True - ) - - batch, channels, time, freq = normalized_input_features.shape - - # batch_size, channels, spec_width, spec_heigth --> batch_size, channels, spec_heigth * freq_ratio, spec_width // freq_ratio - normalized_input_features = normalized_input_features.reshape( - batch, channels * self.freq_ratio, time // self.freq_ratio, freq - ) - normalized_input_features = normalized_input_features.permute(0, 1, 3, 2).contiguous() - normalized_input_features = normalized_input_features.reshape( - batch, channels, freq * self.freq_ratio, time // self.freq_ratio - ) - - return normalized_input_features - - def forward( - self, - input_features, - is_longer: Optional[torch.FloatTensor] = None, - head_mask: Optional[torch.FloatTensor] = None, - output_attentions: Optional[bool] = False, - output_hidden_states: Optional[bool] = False, - output_hidden_states_before_downsampling: Optional[bool] = False, - always_partition: Optional[bool] = False, - return_dict: Optional[bool] = True, - ) -> Union[Tuple, ClapAudioModelOutput]: - input_features = input_features.transpose(1, 3) - normalized_input_features = self.batch_norm(input_features) - normalized_input_features = normalized_input_features.transpose(1, 3) - - is_longer_list_idx = None - if self.enable_fusion: - is_longer_list = is_longer.to(input_features.device) - is_longer_list_idx = torch.where(is_longer_list == 1)[0] - - hidden_states = self.reshape_mel2img(normalized_input_features) - - frames_num = hidden_states.shape[2] - - hidden_states = self.patch_embed(hidden_states, is_longer_list_idx) - - all_hidden_states = () if output_hidden_states else None - all_reshaped_hidden_states = () if output_hidden_states else None - all_self_attentions = () if output_attentions else None - - input_dimensions = self.input_resolutions[0] - - if output_hidden_states: - batch_size, _, hidden_size = hidden_states.shape - # rearrange batch_size (height width) channels -> batch_size channel height width - reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size) - reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) - all_hidden_states += (hidden_states,) - all_reshaped_hidden_states += (reshaped_hidden_state,) - - for i, layer_module in enumerate(self.layers): - layer_head_mask = head_mask[i] if head_mask is not None else None - - input_dimensions = self.input_resolutions[i] - - if self.gradient_checkpointing and self.training: - - def create_custom_forward(module): - def custom_forward(*inputs): - return module(*inputs, output_attentions) - - return custom_forward - - layer_outputs = torch.utils.checkpoint.checkpoint( - create_custom_forward(layer_module), hidden_states, input_dimensions, layer_head_mask - ) - else: - layer_outputs = layer_module( - hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition - ) - - hidden_states = layer_outputs[0] - - hidden_states_before_downsampling = layer_outputs[1] - output_dimensions = layer_outputs[2] - - input_dimensions = (output_dimensions[-2], output_dimensions[-1]) - - if output_hidden_states and output_hidden_states_before_downsampling: - batch_size, _, hidden_size = hidden_states_before_downsampling.shape - # rearrange batch_size (height width) channels -> batch_size channel height width - # here we use the original (not downsampled) height and width - reshaped_hidden_state = hidden_states_before_downsampling.view( - batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size - ) - reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) - all_hidden_states += (hidden_states_before_downsampling,) - all_reshaped_hidden_states += (reshaped_hidden_state,) - elif output_hidden_states and not output_hidden_states_before_downsampling: - batch_size, _, hidden_size = hidden_states.shape - # rearrange batch_size (height width) channels -> batch_size channel height width - reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size) - reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) - all_hidden_states += (hidden_states,) - all_reshaped_hidden_states += (reshaped_hidden_state,) - - if output_attentions: - all_self_attentions += layer_outputs[3:] - - last_hidden_state = self.norm(hidden_states) - - batch_size, _, n_channels = last_hidden_state.shape - - freq_shape = frames_num // (2 ** (len(self.depths) - 1)) // self.patch_stride[0] - temporal_shape = frames_num // (2 ** (len(self.depths) - 1)) // self.patch_stride[1] - - last_hidden_state = ( - last_hidden_state.permute(0, 2, 1).contiguous().reshape(batch_size, n_channels, freq_shape, temporal_shape) - ) - - batch_size, n_channels, n_frequencies, n_temp = last_hidden_state.shape - # group 2D CNN - c_freq_bin = n_frequencies // self.freq_ratio - last_hidden_state = last_hidden_state.reshape( - batch_size, n_channels, n_frequencies // c_freq_bin, c_freq_bin, n_temp - ) - last_hidden_state = ( - last_hidden_state.permute(0, 1, 3, 2, 4).contiguous().reshape(batch_size, n_channels, c_freq_bin, -1) - ) - latent_output = self.avgpool(torch.flatten(last_hidden_state, 2)) - latent_output = torch.flatten(latent_output, 1) - - if not return_dict: - return tuple( - v - for v in [ - last_hidden_state, - latent_output, - all_reshaped_hidden_states, - all_self_attentions, - ] - if v is not None - ) - - return BaseModelOutputWithPooling( - last_hidden_state=last_hidden_state, - pooler_output=latent_output, - hidden_states=all_reshaped_hidden_states, - attentions=all_self_attentions, - ) - - -CLAP_START_DOCSTRING = r""" - This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the - library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads - etc.) - - This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. - Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage - and behavior. - - Parameters: - config ([`ClapConfig`]): Model configuration class with all the parameters of the model. - Initializing with a config file does not load the weights associated with the model, only the - configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. -""" - -CLAP_TEXT_INPUTS_DOCSTRING = r""" - Args: - input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): - Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide - it. - - Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and - [`PreTrainedTokenizer.__call__`] for details. - - [What are input IDs?](../glossary#input-ids) - attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): - Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, - config.max_position_embeddings - 1]`. - - [What are position IDs?](../glossary#position-ids) - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned - tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for - more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. -""" - -CLAP_AUDIO_INPUTS_DOCSTRING = r""" - Args: - input_features (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): - Input audio features. This should be returnes by the [`ClapFeatureExtractor`] class that you can also - retrieve from [`AutoFeatureExtractor`]. See [`ClapFeatureExtractor.__call__`] for details. - is_longer (`torch.FloatTensor`, of shape `(batch_size, 1)`, *optional*): - Whether the audio clip is longer than `max_length`. If `True`, a feature fusion will be enabled to enhance - the features. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned - tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for - more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. -""" - -CLAP_INPUTS_DOCSTRING = r""" - Args: - input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): - Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide - it. - - Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and - [`PreTrainedTokenizer.__call__`] for details. - - [What are input IDs?](../glossary#input-ids) - attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): - Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, - config.max_position_embeddings - 1]`. - - [What are position IDs?](../glossary#position-ids) - input_features (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): - Input audio features. This should be returnes by the [`ClapFeatureExtractor`] class that you can also - retrieve from [`AutoFeatureExtractor`]. See [`ClapFeatureExtractor.__call__`] for details. - return_loss (`bool`, *optional*): - Whether or not to return the contrastive loss. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned - tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for - more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. -""" - - -class ClapProjectionLayer(nn.Module): - def __init__(self, config: Union[ClapAudioConfig, ClapTextConfig]): - super().__init__() - self.config = config - hidden_size = config.hidden_size - projection_dim = config.projection_dim - - self.linear1 = nn.Linear(hidden_size, projection_dim) - self.activation = ACT2FN[config.projection_hidden_act] - self.linear2 = nn.Linear(projection_dim, projection_dim) - - def forward(self, hidden_states): - hidden_states = self.linear1(hidden_states) - hidden_states = self.activation(hidden_states) - hidden_states = self.linear2(hidden_states) - return hidden_states - - -# Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings with Roberta->ClapText, persistent=False->persistent=True -class ClapTextEmbeddings(nn.Module): - """ - Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. - """ - - # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__ - def __init__(self, config): - super().__init__() - self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) - self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) - self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) - - # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load - # any TensorFlow checkpoint file - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - # position_ids (1, len position emb) is contiguous in memory and exported when serialized - self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") - self.register_buffer( - "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=True - ) - self.register_buffer( - "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=True - ) - - # End copy - self.padding_idx = config.pad_token_id - self.position_embeddings = nn.Embedding( - config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx - ) - - def forward( - self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 - ): - if position_ids is None: - if input_ids is not None: - # Create the position ids from the input token ids. Any padded tokens remain padded. - position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length) - else: - position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) - - if input_ids is not None: - input_shape = input_ids.size() - else: - input_shape = inputs_embeds.size()[:-1] - - seq_length = input_shape[1] - - # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs - # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves - # issue #5664 - if token_type_ids is None: - if hasattr(self, "token_type_ids"): - buffered_token_type_ids = self.token_type_ids[:, :seq_length] - buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) - token_type_ids = buffered_token_type_ids_expanded - else: - token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) - - if inputs_embeds is None: - inputs_embeds = self.word_embeddings(input_ids) - token_type_embeddings = self.token_type_embeddings(token_type_ids) - - embeddings = inputs_embeds + token_type_embeddings - if self.position_embedding_type == "absolute": - position_embeddings = self.position_embeddings(position_ids) - embeddings += position_embeddings - embeddings = self.LayerNorm(embeddings) - embeddings = self.dropout(embeddings) - return embeddings - - def create_position_ids_from_inputs_embeds(self, inputs_embeds): - """ - We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. - - Args: - inputs_embeds: torch.Tensor - - Returns: torch.Tensor - """ - input_shape = inputs_embeds.size()[:-1] - sequence_length = input_shape[1] - - position_ids = torch.arange( - self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device - ) - return position_ids.unsqueeze(0).expand(input_shape) - - -# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->ClapText -class ClapTextSelfAttention(nn.Module): - def __init__(self, config, position_embedding_type=None): - super().__init__() - if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): - raise ValueError( - f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " - f"heads ({config.num_attention_heads})" - ) - - self.num_attention_heads = config.num_attention_heads - self.attention_head_size = int(config.hidden_size / config.num_attention_heads) - self.all_head_size = self.num_attention_heads * self.attention_head_size - - self.query = nn.Linear(config.hidden_size, self.all_head_size) - self.key = nn.Linear(config.hidden_size, self.all_head_size) - self.value = nn.Linear(config.hidden_size, self.all_head_size) - - self.dropout = nn.Dropout(config.attention_probs_dropout_prob) - self.position_embedding_type = position_embedding_type or getattr( - config, "position_embedding_type", "absolute" - ) - if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": - self.max_position_embeddings = config.max_position_embeddings - self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) - - self.is_decoder = config.is_decoder - - def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: - new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) - x = x.view(new_x_shape) - return x.permute(0, 2, 1, 3) - - def forward( - self, - hidden_states: torch.Tensor, - attention_mask: Optional[torch.FloatTensor] = None, - head_mask: Optional[torch.FloatTensor] = None, - encoder_hidden_states: Optional[torch.FloatTensor] = None, - encoder_attention_mask: Optional[torch.FloatTensor] = None, - past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, - output_attentions: Optional[bool] = False, - ) -> Tuple[torch.Tensor]: - mixed_query_layer = self.query(hidden_states) - - # If this is instantiated as a cross-attention module, the keys - # and values come from an encoder; the attention mask needs to be - # such that the encoder's padding tokens are not attended to. - is_cross_attention = encoder_hidden_states is not None - - if is_cross_attention and past_key_value is not None: - # reuse k,v, cross_attentions - key_layer = past_key_value[0] - value_layer = past_key_value[1] - attention_mask = encoder_attention_mask - elif is_cross_attention: - key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) - value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) - attention_mask = encoder_attention_mask - elif past_key_value is not None: - key_layer = self.transpose_for_scores(self.key(hidden_states)) - value_layer = self.transpose_for_scores(self.value(hidden_states)) - key_layer = torch.cat([past_key_value[0], key_layer], dim=2) - value_layer = torch.cat([past_key_value[1], value_layer], dim=2) - else: - key_layer = self.transpose_for_scores(self.key(hidden_states)) - value_layer = self.transpose_for_scores(self.value(hidden_states)) - - query_layer = self.transpose_for_scores(mixed_query_layer) - - use_cache = past_key_value is not None - if self.is_decoder: - # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. - # Further calls to cross_attention layer can then reuse all cross-attention - # key/value_states (first "if" case) - # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of - # all previous decoder key/value_states. Further calls to uni-directional self-attention - # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) - # if encoder bi-directional self-attention `past_key_value` is always `None` - past_key_value = (key_layer, value_layer) - - # Take the dot product between "query" and "key" to get the raw attention scores. - attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) - - if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": - query_length, key_length = query_layer.shape[2], key_layer.shape[2] - if use_cache: - position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( - -1, 1 - ) - else: - position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) - position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) - distance = position_ids_l - position_ids_r - - positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) - positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility - - if self.position_embedding_type == "relative_key": - relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) - attention_scores = attention_scores + relative_position_scores - elif self.position_embedding_type == "relative_key_query": - relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) - relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) - attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key - - attention_scores = attention_scores / math.sqrt(self.attention_head_size) - if attention_mask is not None: - # Apply the attention mask is (precomputed for all layers in ClapTextModel forward() function) - attention_scores = attention_scores + attention_mask - - # Normalize the attention scores to probabilities. - attention_probs = nn.functional.softmax(attention_scores, dim=-1) - - # This is actually dropping out entire tokens to attend to, which might - # seem a bit unusual, but is taken from the original Transformer paper. - attention_probs = self.dropout(attention_probs) - - # Mask heads if we want to - if head_mask is not None: - attention_probs = attention_probs * head_mask - - context_layer = torch.matmul(attention_probs, value_layer) - - context_layer = context_layer.permute(0, 2, 1, 3).contiguous() - new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) - context_layer = context_layer.view(new_context_layer_shape) - - outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) - - if self.is_decoder: - outputs = outputs + (past_key_value,) - return outputs - - -# Copied from transformers.models.bert.modeling_bert.BertSelfOutput -class ClapTextSelfOutput(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states) - hidden_states = self.LayerNorm(hidden_states + input_tensor) - return hidden_states - - -# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->ClapText -class ClapTextAttention(nn.Module): - def __init__(self, config, position_embedding_type=None): - super().__init__() - self.self = ClapTextSelfAttention(config, position_embedding_type=position_embedding_type) - self.output = ClapTextSelfOutput(config) - self.pruned_heads = set() - - def prune_heads(self, heads): - if len(heads) == 0: - return - heads, index = find_pruneable_heads_and_indices( - heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads - ) - - # Prune linear layers - self.self.query = prune_linear_layer(self.self.query, index) - self.self.key = prune_linear_layer(self.self.key, index) - self.self.value = prune_linear_layer(self.self.value, index) - self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) - - # Update hyper params and store pruned heads - self.self.num_attention_heads = self.self.num_attention_heads - len(heads) - self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads - self.pruned_heads = self.pruned_heads.union(heads) - - def forward( - self, - hidden_states: torch.Tensor, - attention_mask: Optional[torch.FloatTensor] = None, - head_mask: Optional[torch.FloatTensor] = None, - encoder_hidden_states: Optional[torch.FloatTensor] = None, - encoder_attention_mask: Optional[torch.FloatTensor] = None, - past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, - output_attentions: Optional[bool] = False, - ) -> Tuple[torch.Tensor]: - self_outputs = self.self( - hidden_states, - attention_mask, - head_mask, - encoder_hidden_states, - encoder_attention_mask, - past_key_value, - output_attentions, - ) - attention_output = self.output(self_outputs[0], hidden_states) - outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them - return outputs - - -# Copied from transformers.models.bert.modeling_bert.BertIntermediate -class ClapTextIntermediate(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.intermediate_size) - if isinstance(config.hidden_act, str): - self.intermediate_act_fn = ACT2FN[config.hidden_act] - else: - self.intermediate_act_fn = config.hidden_act - - def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: - hidden_states = self.dense(hidden_states) - hidden_states = self.intermediate_act_fn(hidden_states) - return hidden_states - - -# Copied from transformers.models.bert.modeling_bert.BertOutput -class ClapTextOutput(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.intermediate_size, config.hidden_size) - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states) - hidden_states = self.LayerNorm(hidden_states + input_tensor) - return hidden_states - - -# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->ClapText -class ClapTextLayer(nn.Module): - def __init__(self, config): - super().__init__() - self.chunk_size_feed_forward = config.chunk_size_feed_forward - self.seq_len_dim = 1 - self.attention = ClapTextAttention(config) - self.is_decoder = config.is_decoder - self.add_cross_attention = config.add_cross_attention - if self.add_cross_attention: - if not self.is_decoder: - raise ValueError(f"{self} should be used as a decoder model if cross attention is added") - self.crossattention = ClapTextAttention(config, position_embedding_type="absolute") - self.intermediate = ClapTextIntermediate(config) - self.output = ClapTextOutput(config) - - def forward( - self, - hidden_states: torch.Tensor, - attention_mask: Optional[torch.FloatTensor] = None, - head_mask: Optional[torch.FloatTensor] = None, - encoder_hidden_states: Optional[torch.FloatTensor] = None, - encoder_attention_mask: Optional[torch.FloatTensor] = None, - past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, - output_attentions: Optional[bool] = False, - ) -> Tuple[torch.Tensor]: - # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 - self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None - self_attention_outputs = self.attention( - hidden_states, - attention_mask, - head_mask, - output_attentions=output_attentions, - past_key_value=self_attn_past_key_value, - ) - attention_output = self_attention_outputs[0] - - # if decoder, the last output is tuple of self-attn cache - if self.is_decoder: - outputs = self_attention_outputs[1:-1] - present_key_value = self_attention_outputs[-1] - else: - outputs = self_attention_outputs[1:] # add self attentions if we output attention weights - - cross_attn_present_key_value = None - if self.is_decoder and encoder_hidden_states is not None: - if not hasattr(self, "crossattention"): - raise ValueError( - f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" - " by setting `config.add_cross_attention=True`" - ) - - # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple - cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None - cross_attention_outputs = self.crossattention( - attention_output, - attention_mask, - head_mask, - encoder_hidden_states, - encoder_attention_mask, - cross_attn_past_key_value, - output_attentions, - ) - attention_output = cross_attention_outputs[0] - outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights - - # add cross-attn cache to positions 3,4 of present_key_value tuple - cross_attn_present_key_value = cross_attention_outputs[-1] - present_key_value = present_key_value + cross_attn_present_key_value - - layer_output = apply_chunking_to_forward( - self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output - ) - outputs = (layer_output,) + outputs - - # if decoder, return the attn key/values as the last output - if self.is_decoder: - outputs = outputs + (present_key_value,) - - return outputs - - def feed_forward_chunk(self, attention_output): - intermediate_output = self.intermediate(attention_output) - layer_output = self.output(intermediate_output, attention_output) - return layer_output - - -# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->ClapText -class ClapTextEncoder(nn.Module): - def __init__(self, config): - super().__init__() - self.config = config - self.layer = nn.ModuleList([ClapTextLayer(config) for _ in range(config.num_hidden_layers)]) - self.gradient_checkpointing = False - - def forward( - self, - hidden_states: torch.Tensor, - attention_mask: Optional[torch.FloatTensor] = None, - head_mask: Optional[torch.FloatTensor] = None, - encoder_hidden_states: Optional[torch.FloatTensor] = None, - encoder_attention_mask: Optional[torch.FloatTensor] = None, - past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = False, - output_hidden_states: Optional[bool] = False, - return_dict: Optional[bool] = True, - ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: - all_hidden_states = () if output_hidden_states else None - all_self_attentions = () if output_attentions else None - all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None - - if self.gradient_checkpointing and self.training: - if use_cache: - logger.warning_once( - "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." - ) - use_cache = False - - next_decoder_cache = () if use_cache else None - for i, layer_module in enumerate(self.layer): - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - layer_head_mask = head_mask[i] if head_mask is not None else None - past_key_value = past_key_values[i] if past_key_values is not None else None - - if self.gradient_checkpointing and self.training: - - def create_custom_forward(module): - def custom_forward(*inputs): - return module(*inputs, past_key_value, output_attentions) - - return custom_forward - - layer_outputs = torch.utils.checkpoint.checkpoint( - create_custom_forward(layer_module), - hidden_states, - attention_mask, - layer_head_mask, - encoder_hidden_states, - encoder_attention_mask, - ) - else: - layer_outputs = layer_module( - hidden_states, - attention_mask, - layer_head_mask, - encoder_hidden_states, - encoder_attention_mask, - past_key_value, - output_attentions, - ) - - hidden_states = layer_outputs[0] - if use_cache: - next_decoder_cache += (layer_outputs[-1],) - if output_attentions: - all_self_attentions = all_self_attentions + (layer_outputs[1],) - if self.config.add_cross_attention: - all_cross_attentions = all_cross_attentions + (layer_outputs[2],) - - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - if not return_dict: - return tuple( - v - for v in [ - hidden_states, - next_decoder_cache, - all_hidden_states, - all_self_attentions, - all_cross_attentions, - ] - if v is not None - ) - return BaseModelOutputWithPastAndCrossAttentions( - last_hidden_state=hidden_states, - past_key_values=next_decoder_cache, - hidden_states=all_hidden_states, - attentions=all_self_attentions, - cross_attentions=all_cross_attentions, - ) - - -# Copied from transformers.models.bert.modeling_bert.BertPooler -class ClapTextPooler(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - self.activation = nn.Tanh() - - def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: - # We "pool" the model by simply taking the hidden state corresponding - # to the first token. - first_token_tensor = hidden_states[:, 0] - pooled_output = self.dense(first_token_tensor) - pooled_output = self.activation(pooled_output) - return pooled_output - - -class ClapPreTrainedModel(PreTrainedModel): - """ - An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained - models. - """ - - config_class = ClapConfig - base_model_prefix = "clap" - supports_gradient_checkpointing = False - - def _init_weights(self, module): - """Initialize the weights""" - factor = self.config.initializer_factor - - if isinstance(module, ClapTextEmbeddings): - module.position_embeddings.weight.data.normal_(mean=0.0, std=factor * 0.02) - module.token_type_embeddings.weight.data.normal_(mean=0.0, std=factor * 0.02) - elif isinstance(module, ClapModel): - nn.init.normal_(module.logit_scale_a, std=factor * 0.02) - nn.init.normal_(module.logit_scale_t, std=factor * 0.02) - elif isinstance(module, nn.Embedding): - module.weight.data.normal_(mean=0.0, std=factor * 0.02) - - elif isinstance(module, nn.LayerNorm): - module.bias.data.zero_() - module.weight.data.fill_(1.0) - elif isinstance(module, (nn.Conv2d, nn.Linear)): - in_proj_std = (self.config.hidden_size**-0.5) * ((2 * self.config.num_hidden_layers) ** -0.5) * factor - nn.init.normal_(module.weight, std=in_proj_std) - if module.bias is not None: - module.bias.data.zero_() - - def _set_gradient_checkpointing(self, module, value=False): - if isinstance(module, ClapTextEncoder): - module.gradient_checkpointing = value - - -class ClapAudioModel(ClapPreTrainedModel): - config_class = ClapAudioConfig - main_input_name = "input_features" - - def __init__(self, config: ClapAudioConfig): - super().__init__(config) - self.audio_encoder = ClapAudioEncoder(config) - # Initialize weights and apply final processing - self.post_init() - - def get_input_embeddings(self) -> nn.Module: - return self.audio_encoder.patch_embed.proj - - @add_start_docstrings_to_model_forward(CLAP_AUDIO_INPUTS_DOCSTRING) - @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=ClapAudioConfig) - def forward( - self, - input_features: Optional[torch.FloatTensor] = None, - is_longer: Optional[torch.BoolTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, BaseModelOutputWithPooling]: - r""" - Returns: - - Examples: - - ```python - >>> from datasets import load_dataset - >>> from transformers import AutoProcessor, ClapAudioModel - - >>> dataset = load_dataset("ashraq/esc50") - >>> audio_sample = dataset["train"]["audio"][0]["array"] - - >>> model = ClapAudioModel.from_pretrained("laion/clap-htsat-fused") - >>> processor = AutoProcessor.from_pretrained("laion/clap-htsat-fused") - - >>> inputs = processor(audios=audio_sample, return_tensors="pt") - - >>> outputs = model(**inputs) - >>> last_hidden_state = outputs.last_hidden_state - ```""" - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - - return self.audio_encoder( - input_features=input_features, - is_longer=is_longer, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - -class ClapTextModel(ClapPreTrainedModel): - """ - - The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of - cross-attention is added between the self-attention layers, following the architecture described in *Attention is - all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz - Kaiser and Illia Polosukhin. - - To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set - to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and - `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. - - .. _*Attention is all you need*: https://arxiv.org/abs/1706.03762 - - """ - - config_class = ClapTextConfig - - # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->ClapText - def __init__(self, config, add_pooling_layer=True): - super().__init__(config) - self.config = config - - self.embeddings = ClapTextEmbeddings(config) - self.encoder = ClapTextEncoder(config) - - self.pooler = ClapTextPooler(config) if add_pooling_layer else None - - # Initialize weights and apply final processing - self.post_init() - - def get_input_embeddings(self): - return self.embeddings.word_embeddings - - def set_input_embeddings(self, value): - self.embeddings.word_embeddings = value - - # Copied from transformers.models.bert.modeling_bert.BertModel.forward - def forward( - self, - input_ids: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - token_type_ids: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - head_mask: Optional[torch.Tensor] = None, - inputs_embeds: Optional[torch.Tensor] = None, - encoder_hidden_states: Optional[torch.Tensor] = None, - encoder_attention_mask: Optional[torch.Tensor] = None, - past_key_values: Optional[List[torch.FloatTensor]] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: - r""" - encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): - Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if - the model is configured as a decoder. - encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in - the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): - Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. - - If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that - don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all - `decoder_input_ids` of shape `(batch_size, sequence_length)`. - use_cache (`bool`, *optional*): - If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see - `past_key_values`). - """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - if self.config.is_decoder: - use_cache = use_cache if use_cache is not None else self.config.use_cache - else: - use_cache = False - - if input_ids is not None and inputs_embeds is not None: - raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") - elif input_ids is not None: - self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) - input_shape = input_ids.size() - elif inputs_embeds is not None: - input_shape = inputs_embeds.size()[:-1] - else: - raise ValueError("You have to specify either input_ids or inputs_embeds") - - batch_size, seq_length = input_shape - device = input_ids.device if input_ids is not None else inputs_embeds.device - - # past_key_values_length - past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 - - if attention_mask is None: - attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) - - if token_type_ids is None: - if hasattr(self.embeddings, "token_type_ids"): - buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] - buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) - token_type_ids = buffered_token_type_ids_expanded - else: - token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) - - # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] - # ourselves in which case we just need to make it broadcastable to all heads. - extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) - - # If a 2D or 3D attention mask is provided for the cross-attention - # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] - if self.config.is_decoder and encoder_hidden_states is not None: - encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() - encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) - if encoder_attention_mask is None: - encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) - encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) - else: - encoder_extended_attention_mask = None - - # Prepare head mask if needed - # 1.0 in head_mask indicate we keep the head - # attention_probs has shape bsz x n_heads x N x N - # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] - # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] - head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) - - embedding_output = self.embeddings( - input_ids=input_ids, - position_ids=position_ids, - token_type_ids=token_type_ids, - inputs_embeds=inputs_embeds, - past_key_values_length=past_key_values_length, - ) - encoder_outputs = self.encoder( - embedding_output, - attention_mask=extended_attention_mask, - head_mask=head_mask, - encoder_hidden_states=encoder_hidden_states, - encoder_attention_mask=encoder_extended_attention_mask, - past_key_values=past_key_values, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - sequence_output = encoder_outputs[0] - pooled_output = self.pooler(sequence_output) if self.pooler is not None else None - - if not return_dict: - return (sequence_output, pooled_output) + encoder_outputs[1:] - - return BaseModelOutputWithPoolingAndCrossAttentions( - last_hidden_state=sequence_output, - pooler_output=pooled_output, - past_key_values=encoder_outputs.past_key_values, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, - cross_attentions=encoder_outputs.cross_attentions, - ) - - -@add_start_docstrings(CLAP_START_DOCSTRING) -class ClapModel(ClapPreTrainedModel): - config_class = ClapConfig - - def __init__(self, config: ClapConfig): - super().__init__(config) - - if not isinstance(config.text_config, ClapTextConfig): - raise ValueError( - "config.text_config is expected to be of type ClapTextConfig but is of type" - f" {type(config.text_config)}." - ) - - if not isinstance(config.audio_config, ClapAudioConfig): - raise ValueError( - "config.audio_config is expected to be of type ClapAudioConfig but is of type" - f" {type(config.audio_config)}." - ) - - text_config = config.text_config - audio_config = config.audio_config - - self.logit_scale_a = nn.Parameter(torch.tensor(math.log(config.logit_scale_init_value))) - self.logit_scale_t = nn.Parameter(torch.tensor(math.log(config.logit_scale_init_value))) - - self.projection_dim = config.projection_dim - - self.text_model = ClapTextModel(text_config) - self.text_projection = ClapProjectionLayer(text_config) - - self.audio_model = ClapAudioModel(audio_config) - self.audio_projection = ClapProjectionLayer(audio_config) - - # Initialize weights and apply final processing - self.post_init() - - @add_start_docstrings_to_model_forward(CLAP_TEXT_INPUTS_DOCSTRING) - def get_text_features( - self, - input_ids: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> torch.FloatTensor: - r""" - Returns: - text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by - applying the projection layer to the pooled output of [`ClapTextModel`]. - - Examples: - - ```python - >>> from transformers import AutoTokenizer, ClapModel - - >>> model = ClapModel.from_pretrained("laion/clap-htsat-unfused") - >>> tokenizer = AutoTokenizer.from_pretrained("laion/clap-htsat-unfused") - - >>> inputs = tokenizer(["the sound of a cat", "the sound of a dog"], padding=True, return_tensors="pt") - >>> text_features = model.get_text_features(**inputs) - ```""" - # Use CLAP model's config for some fields (if specified) instead of those of audio & text components. - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - text_outputs = self.text_model( - input_ids=input_ids, - attention_mask=attention_mask, - position_ids=position_ids, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - pooled_output = text_outputs[1] if return_dict is not None else text_outputs.pooler_output - text_features = self.text_projection(pooled_output) - text_features = F.normalize(text_features, dim=-1) - - return text_features - - @add_start_docstrings_to_model_forward(CLAP_AUDIO_INPUTS_DOCSTRING) - def get_audio_features( - self, - input_features: Optional[torch.Tensor] = None, - is_longer: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> torch.FloatTensor: - r""" - Returns: - audio_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The audio embeddings obtained by - applying the projection layer to the pooled output of [`ClapAudioModel`]. - - Examples: - - ```python - >>> from transformers import AutoFeatureExtractor, ClapModel - >>> import torch - - >>> model = ClapModel.from_pretrained("laion/clap-htsat-unfused") - >>> feature_extractor = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused") - >>> random_audio = torch.rand((16_000)) - >>> inputs = feature_extractor(random_audio, return_tensors="pt") - >>> audio_features = model.get_audio_features(**inputs) - ```""" - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - audio_outputs = self.audio_model( - input_features=input_features, - is_longer=is_longer, - return_dict=return_dict, - ) - - pooled_output = audio_outputs[1] if not return_dict else audio_outputs.pooler_output - - audio_features = self.audio_projection(pooled_output) - audio_features = F.normalize(audio_features, dim=-1) - - return audio_features - - @add_start_docstrings_to_model_forward(CLAP_INPUTS_DOCSTRING) - @replace_return_docstrings(output_type=ClapOutput, config_class=ClapConfig) - def forward( - self, - input_ids: Optional[torch.LongTensor] = None, - input_features: Optional[torch.FloatTensor] = None, - is_longer: Optional[torch.BoolTensor] = None, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.LongTensor] = None, - return_loss: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, ClapOutput]: - r""" - Returns: - - Examples: - - ```python - >>> from datasets import load_dataset - >>> from transformers import AutoProcessor, ClapModel - - >>> dataset = load_dataset("ashraq/esc50") - >>> audio_sample = dataset["train"]["audio"][0]["array"] - - >>> model = ClapModel.from_pretrained("laion/clap-htsat-unfused") - >>> processor = AutoProcessor.from_pretrained("laion/clap-htsat-unfused") - - >>> input_text = ["Sound of a dog", "Sound of vaccum cleaner"] - - >>> inputs = processor(text=input_text, audios=audio_sample, return_tensors="pt", padding=True) - - >>> outputs = model(**inputs) - >>> logits_per_audio = outputs.logits_per_audio # this is the audio-text similarity score - >>> probs = logits_per_audio.softmax(dim=-1) # we can take the softmax to get the label probabilities - ```""" - # Use CLAP model's config for some fields (if specified) instead of those of audio & text components. - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - audio_outputs = self.audio_model( - input_features=input_features, - is_longer=is_longer, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - text_outputs = self.text_model( - input_ids=input_ids, - attention_mask=attention_mask, - position_ids=position_ids, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - audio_embeds = audio_outputs[1] if not return_dict else audio_outputs.pooler_output - audio_embeds = self.audio_projection(audio_embeds) - - text_embeds = text_outputs[1] if not return_dict else text_outputs.pooler_output - text_embeds = self.text_projection(text_embeds) - - # normalized features - audio_embeds = audio_embeds / audio_embeds.norm(p=2, dim=-1, keepdim=True) - text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) - - # cosine similarity as logits - logit_scale_text = self.logit_scale_t.exp() - logit_scale_audio = self.logit_scale_a.exp() - logits_per_text = torch.matmul(text_embeds, audio_embeds.t()) * logit_scale_text - logits_per_audio = torch.matmul(audio_embeds, text_embeds.t()) * logit_scale_audio - - loss = None - if return_loss: - caption_loss = contrastive_loss(logits_per_text) - audio_loss = contrastive_loss(logits_per_audio.t()) - loss = (caption_loss + audio_loss) / 2.0 - - if not return_dict: - output = (logits_per_audio, logits_per_text, text_embeds, audio_embeds, text_outputs, audio_outputs) - return ((loss,) + output) if loss is not None else output - - return ClapOutput( - loss=loss, - logits_per_audio=logits_per_audio, - logits_per_text=logits_per_text, - text_embeds=text_embeds, - audio_embeds=audio_embeds, - text_model_output=text_outputs, - audio_model_output=audio_outputs, - ) - - -@add_start_docstrings( - """ - CLAP Text Model with a projection layer on top (a linear layer on top of the pooled output). - """, - CLAP_START_DOCSTRING, -) -class ClapTextModelWithProjection(ClapPreTrainedModel): - config_class = ClapTextConfig - - def __init__(self, config: ClapTextConfig): - super().__init__(config) - self.text_model = ClapTextModel(config) - self.text_projection = ClapProjectionLayer(config) - # Initialize weights and apply final processing - self.post_init() - - def get_input_embeddings(self) -> nn.Module: - return self.text_model.embeddings.word_embeddings - - def set_input_embeddings(self, value): - self.text_model.embeddings.word_embeddings = value - - @add_start_docstrings_to_model_forward(CLAP_TEXT_INPUTS_DOCSTRING) - @replace_return_docstrings(output_type=ClapTextModelOutput, config_class=ClapTextConfig) - def forward( - self, - input_ids: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, ClapTextModelOutput]: - r""" - Returns: - - Examples: - - ```python - >>> from transformers import AutoTokenizer, ClapTextModelWithProjection - - >>> model = ClapTextModelWithProjection.from_pretrained("laion/clap-htsat-unfused") - >>> tokenizer = AutoTokenizer.from_pretrained("laion/clap-htsat-unfused") - - >>> inputs = tokenizer(["a sound of a cat", "a sound of a dog"], padding=True, return_tensors="pt") - - >>> outputs = model(**inputs) - >>> text_embeds = outputs.text_embeds - ```""" - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - text_outputs = self.text_model( - input_ids=input_ids, - attention_mask=attention_mask, - position_ids=position_ids, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - pooled_output = text_outputs[1] if not return_dict else text_outputs.pooler_output - - text_embeds = self.text_projection(pooled_output) - - if not return_dict: - outputs = (text_embeds, text_outputs[0]) + text_outputs[2:] - return tuple(output for output in outputs if output is not None) - - return ClapTextModelOutput( - text_embeds=text_embeds, - last_hidden_state=text_outputs.last_hidden_state, - hidden_states=text_outputs.hidden_states, - attentions=text_outputs.attentions, - ) - - -@add_start_docstrings( - """ - CLAP Audio Model with a projection layer on top (a linear layer on top of the pooled output). - """, - CLAP_START_DOCSTRING, -) -class ClapAudioModelWithProjection(ClapPreTrainedModel): - config_class = ClapAudioConfig - main_input_name = "input_features" - - def __init__(self, config: ClapAudioConfig): - super().__init__(config) - self.audio_model = ClapAudioModel(config) - self.audio_projection = ClapProjectionLayer(config) - # Initialize weights and apply final processing - self.post_init() - - def get_input_embeddings(self) -> nn.Module: - return self.audio_model.audio_encoder.patch_embed.proj - - @add_start_docstrings_to_model_forward(CLAP_AUDIO_INPUTS_DOCSTRING) - @replace_return_docstrings(output_type=ClapAudioModelOutput, config_class=ClapAudioConfig) - def forward( - self, - input_features: Optional[torch.FloatTensor] = None, - is_longer: Optional[torch.BoolTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, ClapAudioModelOutput]: - r""" - Returns: - - Examples: - - ```python - >>> from datasets import load_dataset - >>> from transformers import ClapAudioModelWithProjection, ClapProcessor - - >>> model = ClapAudioModelWithProjection.from_pretrained("laion/clap-htsat-fused") - >>> processor = ClapProcessor.from_pretrained("laion/clap-htsat-fused") - - >>> dataset = load_dataset("ashraq/esc50") - >>> audio_sample = dataset["train"]["audio"][0]["array"] - - >>> inputs = processor(audios=audio_sample, return_tensors="pt") - >>> outputs = model(**inputs) - >>> audio_embeds = outputs.audio_embeds - ```""" - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - - audio_outputs = self.audio_model( - input_features=input_features, - is_longer=is_longer, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - pooled_output = audio_outputs[1] if not return_dict else audio_outputs.pooler_output - - audio_embeds = self.audio_projection(pooled_output) - - if not return_dict: - outputs = (audio_embeds, audio_outputs[0]) + audio_outputs[2:] - return tuple(output for output in outputs if output is not None) - - return ClapAudioModelOutput( - audio_embeds=audio_embeds, - last_hidden_state=audio_outputs.last_hidden_state, - attentions=audio_outputs.attentions, - hidden_states=audio_outputs.hidden_states, - ) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/cpmant/__init__.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/cpmant/__init__.py deleted file mode 100644 index 8140009b60f15680663fc61569f55675e6d71196..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/cpmant/__init__.py +++ /dev/null @@ -1,64 +0,0 @@ -# flake8: noqa -# There's no way to ignore "F401 '...' imported but unused" warnings in this -# module, but to preserve other warnings. So, don't check this module at all. - -# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import TYPE_CHECKING - -# rely on isort to merge the imports -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available - - -_import_structure = { - "configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"], - "tokenization_cpmant": ["CpmAntTokenizer"], -} - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_cpmant"] = [ - "CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST", - "CpmAntForCausalLM", - "CpmAntModel", - "CpmAntPreTrainedModel", - ] - - -if TYPE_CHECKING: - from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig - from .tokenization_cpmant import CpmAntTokenizer - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_cpmant import ( - CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, - CpmAntForCausalLM, - CpmAntModel, - CpmAntPreTrainedModel, - ) - - -else: - import sys - - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/spaces/yl12053/so-vits-4.1-Grass-Wonder/vencoder/dphubert/utils/import_huggingface_wavlm.py b/spaces/yl12053/so-vits-4.1-Grass-Wonder/vencoder/dphubert/utils/import_huggingface_wavlm.py deleted file mode 100644 index 1a2ea31c14df5450298ddc5e1f56c98769144828..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Grass-Wonder/vencoder/dphubert/utils/import_huggingface_wavlm.py +++ /dev/null @@ -1,129 +0,0 @@ -"""Import Hugging Face transformers's wav2vec2.0 pretrained weights to torchaudios's format. - -Originally from: -https://github.com/pytorch/audio/blob/main/torchaudio/models/wav2vec2/utils/import_huggingface.py - -""" - -import logging -from typing import Any, Dict - -from torch.nn import Module - -from ..model import wav2vec2_model, Wav2Vec2Model, wavlm_model - -_LG = logging.getLogger(__name__) - - -def _get_config(cfg): - config = { - "extractor_mode": f"{cfg.feat_extract_norm}_norm", - "extractor_conv_layer_config": list(zip(cfg.conv_dim, cfg.conv_kernel, cfg.conv_stride)), - "extractor_conv_bias": cfg.conv_bias, - "encoder_embed_dim": cfg.hidden_size, - "encoder_projection_dropout": cfg.feat_proj_dropout, - "encoder_pos_conv_kernel": cfg.num_conv_pos_embeddings, - "encoder_pos_conv_groups": cfg.num_conv_pos_embedding_groups, - "encoder_num_layers": cfg.num_hidden_layers, - "encoder_num_heads": cfg.num_attention_heads, - "encoder_attention_dropout": cfg.attention_dropout, - "encoder_ff_interm_features": cfg.intermediate_size, - "encoder_ff_interm_dropout": cfg.activation_dropout, - "encoder_dropout": cfg.hidden_dropout, - "encoder_layer_norm_first": cfg.do_stable_layer_norm, - "encoder_layer_drop": cfg.layerdrop, - } - return config - - -def _get_config_wavlm(cfg): - config = { - "extractor_mode": f"{cfg.feat_extract_norm}_norm", - "extractor_conv_layer_config": list(zip(cfg.conv_dim, cfg.conv_kernel, cfg.conv_stride)), - "extractor_conv_bias": cfg.conv_bias, - "encoder_embed_dim": cfg.hidden_size, - "encoder_projection_dropout": cfg.feat_proj_dropout, - "encoder_pos_conv_kernel": cfg.num_conv_pos_embeddings, - "encoder_pos_conv_groups": cfg.num_conv_pos_embedding_groups, - "encoder_num_layers": cfg.num_hidden_layers, - "encoder_use_attention": [True] * cfg.num_hidden_layers, - "encoder_use_feed_forward": [True] * cfg.num_hidden_layers, - "encoder_total_num_heads": [cfg.num_attention_heads for _ in range(cfg.num_hidden_layers)], - "encoder_remaining_heads": [list(range(cfg.num_attention_heads)) for _ in range(cfg.num_hidden_layers)], - "encoder_num_buckets": cfg.num_buckets, - "encoder_max_distance": cfg.max_bucket_distance, - "encoder_attention_dropout": cfg.attention_dropout, - "encoder_ff_interm_features": [cfg.intermediate_size for _ in range(cfg.num_hidden_layers)], - "encoder_ff_interm_dropout": cfg.activation_dropout, - "encoder_dropout": cfg.hidden_dropout, - "encoder_layer_norm_first": cfg.do_stable_layer_norm, - "encoder_layer_drop": cfg.layerdrop, - "normalize_waveform": cfg.feat_extract_norm == "layer", - } - return config - - -def _build(config, original): - is_for_ctc = original.__class__.__name__ in ["Wav2Vec2ForCTC", "WavLMForCTC"] - if is_for_ctc: - aux_num_out = original.config.vocab_size - wav2vec2 = original.wav2vec2 - else: - _LG.warning( - "The model is not an instance of Wav2Vec2ForCTC or WavLMForCTC. " '"lm_head" module is not imported.' - ) - aux_num_out = None - wav2vec2 = original - is_wavlm = original.__class__.__name__ in ["WavLMModel", "WavLMForCTC"] - if is_wavlm: - imported = wavlm_model(**config, aux_num_out=aux_num_out) - else: - imported = wav2vec2_model(**config, aux_num_out=aux_num_out) - print(imported.feature_extractor.load_state_dict(wav2vec2.feature_extractor.state_dict(), strict=False)) - print(imported.encoder.feature_projection.load_state_dict(wav2vec2.feature_projection.state_dict(), strict=False)) - encoder_state_dict = wav2vec2.encoder.state_dict() - if is_wavlm: # Rename paramaters of linear transformations for compatibility with the HF model - transform_wavlm_encoder_state(encoder_state_dict, config["encoder_num_layers"]) - print(imported.encoder.transformer.load_state_dict(encoder_state_dict, strict=False)) - if is_for_ctc: - imported.aux.load_state_dict(original.lm_head.state_dict()) - return imported - - -def transform_wavlm_encoder_state(state: Dict[str, Any], encoder_num_layers: int): - """Converts WavLM encoder state from HuggingFace format. In particular, concatenates linear projection weights and - biases to align with the structure of ``torch.nn.MultiheadAttention``. - """ - pass - - -def import_huggingface_model(original: Module) -> Wav2Vec2Model: - """Builds :class:`Wav2Vec2Model` from the corresponding model object of - `Transformers `_. - - Args: - original (torch.nn.Module): An instance of ``Wav2Vec2ForCTC`` from ``transformers``. - - Returns: - Wav2Vec2Model: Imported model. - - Example - >>> from torchaudio.models.wav2vec2.utils import import_huggingface_model - >>> - >>> original = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") - >>> model = import_huggingface_model(original) - >>> - >>> waveforms, _ = torchaudio.load("audio.wav") - >>> logits, _ = model(waveforms) - """ - _LG.info("Importing model.") - _LG.info("Loading model configuration.") - is_wavlm = original.__class__.__name__ in ["WavLMModel", "WavLMForCTC"] - if is_wavlm: - config = _get_config_wavlm(original.config) - else: - config = _get_config(original.config) - _LG.debug(" - config: %s", config) - _LG.info("Building model.") - imported = _build(config, original) - return imported diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/structures/keypoints.py b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/structures/keypoints.py deleted file mode 100644 index d0ee8724ac42087e4ec770a3dfb8e040a62b4c15..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/detectron2/structures/keypoints.py +++ /dev/null @@ -1,239 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import numpy as np -from typing import Any, List, Tuple, Union -import torch -from torch.nn import functional as F - - -class Keypoints: - """ - Stores keypoint **annotation** data. GT Instances have a `gt_keypoints` property - containing the x,y location and visibility flag of each keypoint. This tensor has shape - (N, K, 3) where N is the number of instances and K is the number of keypoints per instance. - - The visibility flag follows the COCO format and must be one of three integers: - - * v=0: not labeled (in which case x=y=0) - * v=1: labeled but not visible - * v=2: labeled and visible - """ - - def __init__(self, keypoints: Union[torch.Tensor, np.ndarray, List[List[float]]]): - """ - Arguments: - keypoints: A Tensor, numpy array, or list of the x, y, and visibility of each keypoint. - The shape should be (N, K, 3) where N is the number of - instances, and K is the number of keypoints per instance. - """ - device = keypoints.device if isinstance(keypoints, torch.Tensor) else torch.device("cpu") - keypoints = torch.as_tensor(keypoints, dtype=torch.float32, device=device) - assert keypoints.dim() == 3 and keypoints.shape[2] == 3, keypoints.shape - self.tensor = keypoints - - def __len__(self) -> int: - return self.tensor.size(0) - - def to(self, *args: Any, **kwargs: Any) -> "Keypoints": - return type(self)(self.tensor.to(*args, **kwargs)) - - @property - def device(self) -> torch.device: - return self.tensor.device - - def to_heatmap(self, boxes: torch.Tensor, heatmap_size: int) -> torch.Tensor: - """ - Convert keypoint annotations to a heatmap of one-hot labels for training, - as described in :paper:`Mask R-CNN`. - - Arguments: - boxes: Nx4 tensor, the boxes to draw the keypoints to - - Returns: - heatmaps: - A tensor of shape (N, K), each element is integer spatial label - in the range [0, heatmap_size**2 - 1] for each keypoint in the input. - valid: - A tensor of shape (N, K) containing whether each keypoint is in the roi or not. - """ - return _keypoints_to_heatmap(self.tensor, boxes, heatmap_size) - - def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Keypoints": - """ - Create a new `Keypoints` by indexing on this `Keypoints`. - - The following usage are allowed: - - 1. `new_kpts = kpts[3]`: return a `Keypoints` which contains only one instance. - 2. `new_kpts = kpts[2:10]`: return a slice of key points. - 3. `new_kpts = kpts[vector]`, where vector is a torch.ByteTensor - with `length = len(kpts)`. Nonzero elements in the vector will be selected. - - Note that the returned Keypoints might share storage with this Keypoints, - subject to Pytorch's indexing semantics. - """ - if isinstance(item, int): - return Keypoints([self.tensor[item]]) - return Keypoints(self.tensor[item]) - - def __repr__(self) -> str: - s = self.__class__.__name__ + "(" - s += "num_instances={})".format(len(self.tensor)) - return s - - @staticmethod - def cat(keypoints_list: List["Keypoints"]) -> "Keypoints": - """ - Concatenates a list of Keypoints into a single Keypoints - - Arguments: - keypoints_list (list[Keypoints]) - - Returns: - Keypoints: the concatenated Keypoints - """ - assert isinstance(keypoints_list, (list, tuple)) - assert len(keypoints_list) > 0 - assert all(isinstance(keypoints, Keypoints) for keypoints in keypoints_list) - - cat_kpts = type(keypoints_list[0])( - torch.cat([kpts.tensor for kpts in keypoints_list], dim=0) - ) - return cat_kpts - - -# TODO make this nicer, this is a direct translation from C2 (but removing the inner loop) -def _keypoints_to_heatmap( - keypoints: torch.Tensor, rois: torch.Tensor, heatmap_size: int -) -> Tuple[torch.Tensor, torch.Tensor]: - """ - Encode keypoint locations into a target heatmap for use in SoftmaxWithLoss across space. - - Maps keypoints from the half-open interval [x1, x2) on continuous image coordinates to the - closed interval [0, heatmap_size - 1] on discrete image coordinates. We use the - continuous-discrete conversion from Heckbert 1990 ("What is the coordinate of a pixel?"): - d = floor(c) and c = d + 0.5, where d is a discrete coordinate and c is a continuous coordinate. - - Arguments: - keypoints: tensor of keypoint locations in of shape (N, K, 3). - rois: Nx4 tensor of rois in xyxy format - heatmap_size: integer side length of square heatmap. - - Returns: - heatmaps: A tensor of shape (N, K) containing an integer spatial label - in the range [0, heatmap_size**2 - 1] for each keypoint in the input. - valid: A tensor of shape (N, K) containing whether each keypoint is in - the roi or not. - """ - - if rois.numel() == 0: - return rois.new().long(), rois.new().long() - offset_x = rois[:, 0] - offset_y = rois[:, 1] - scale_x = heatmap_size / (rois[:, 2] - rois[:, 0]) - scale_y = heatmap_size / (rois[:, 3] - rois[:, 1]) - - offset_x = offset_x[:, None] - offset_y = offset_y[:, None] - scale_x = scale_x[:, None] - scale_y = scale_y[:, None] - - x = keypoints[..., 0] - y = keypoints[..., 1] - - x_boundary_inds = x == rois[:, 2][:, None] - y_boundary_inds = y == rois[:, 3][:, None] - - x = (x - offset_x) * scale_x - x = x.floor().long() - y = (y - offset_y) * scale_y - y = y.floor().long() - - x[x_boundary_inds] = heatmap_size - 1 - y[y_boundary_inds] = heatmap_size - 1 - - valid_loc = (x >= 0) & (y >= 0) & (x < heatmap_size) & (y < heatmap_size) - vis = keypoints[..., 2] > 0 - valid = (valid_loc & vis).long() - - lin_ind = y * heatmap_size + x - heatmaps = lin_ind * valid - - return heatmaps, valid - - -@torch.jit.script_if_tracing -def heatmaps_to_keypoints(maps: torch.Tensor, rois: torch.Tensor) -> torch.Tensor: - """ - Extract predicted keypoint locations from heatmaps. - - Args: - maps (Tensor): (#ROIs, #keypoints, POOL_H, POOL_W). The predicted heatmap of logits for - each ROI and each keypoint. - rois (Tensor): (#ROIs, 4). The box of each ROI. - - Returns: - Tensor of shape (#ROIs, #keypoints, 4) with the last dimension corresponding to - (x, y, logit, score) for each keypoint. - - When converting discrete pixel indices in an NxN image to a continuous keypoint coordinate, - we maintain consistency with :meth:`Keypoints.to_heatmap` by using the conversion from - Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a continuous coordinate. - """ - # The decorator use of torch.no_grad() was not supported by torchscript. - # https://github.com/pytorch/pytorch/issues/44768 - maps = maps.detach() - rois = rois.detach() - - offset_x = rois[:, 0] - offset_y = rois[:, 1] - - widths = (rois[:, 2] - rois[:, 0]).clamp(min=1) - heights = (rois[:, 3] - rois[:, 1]).clamp(min=1) - widths_ceil = widths.ceil() - heights_ceil = heights.ceil() - - num_rois, num_keypoints = maps.shape[:2] - xy_preds = maps.new_zeros(rois.shape[0], num_keypoints, 4) - - width_corrections = widths / widths_ceil - height_corrections = heights / heights_ceil - - keypoints_idx = torch.arange(num_keypoints, device=maps.device) - - for i in range(num_rois): - outsize = (int(heights_ceil[i]), int(widths_ceil[i])) - roi_map = F.interpolate( - maps[[i]], size=outsize, mode="bicubic", align_corners=False - ).squeeze( - 0 - ) # #keypoints x H x W - - # softmax over the spatial region - max_score, _ = roi_map.view(num_keypoints, -1).max(1) - max_score = max_score.view(num_keypoints, 1, 1) - tmp_full_resolution = (roi_map - max_score).exp_() - tmp_pool_resolution = (maps[i] - max_score).exp_() - # Produce scores over the region H x W, but normalize with POOL_H x POOL_W, - # so that the scores of objects of different absolute sizes will be more comparable - roi_map_scores = tmp_full_resolution / tmp_pool_resolution.sum((1, 2), keepdim=True) - - w = roi_map.shape[2] - pos = roi_map.view(num_keypoints, -1).argmax(1) - - x_int = pos % w - y_int = (pos - x_int) // w - - assert ( - roi_map_scores[keypoints_idx, y_int, x_int] - == roi_map_scores.view(num_keypoints, -1).max(1)[0] - ).all() - - x = (x_int.float() + 0.5) * width_corrections[i] - y = (y_int.float() + 0.5) * height_corrections[i] - - xy_preds[i, :, 0] = x + offset_x[i] - xy_preds[i, :, 1] = y + offset_y[i] - xy_preds[i, :, 2] = roi_map[keypoints_idx, y_int, x_int] - xy_preds[i, :, 3] = roi_map_scores[keypoints_idx, y_int, x_int] - - return xy_preds diff --git a/spaces/yueranseo/mygpt/modules/models/configuration_moss.py b/spaces/yueranseo/mygpt/modules/models/configuration_moss.py deleted file mode 100644 index 9bad4396ecea6578c1628732d0ef077d8964d45d..0000000000000000000000000000000000000000 --- a/spaces/yueranseo/mygpt/modules/models/configuration_moss.py +++ /dev/null @@ -1,118 +0,0 @@ -""" Moss model configuration""" - -from transformers.utils import logging -from transformers.configuration_utils import PretrainedConfig - - -logger = logging.get_logger(__name__) - - -class MossConfig(PretrainedConfig): - r""" - This is the configuration class to store the configuration of a [`MossModel`]. It is used to instantiate a - Moss model according to the specified arguments, defining the model architecture. Instantiating a configuration - with the defaults will yield a similar configuration to that of the Moss - [fnlp/moss-moon-003-base](https://huggingface.co/fnlp/moss-moon-003-base) architecture. Configuration objects - inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from - [`PretrainedConfig`] for more information. - - Args: - vocab_size (`int`, *optional*, defaults to 107008): - Vocabulary size of the Moss model. Defines the number of different tokens that can be represented by the - `inputs_ids` passed when calling [`MossModel`]. - n_positions (`int`, *optional*, defaults to 2048): - The maximum sequence length that this model might ever be used with. Typically set this to something large - just in case (e.g., 512 or 1024 or 2048). - n_embd (`int`, *optional*, defaults to 4096): - Dimensionality of the embeddings and hidden states. - n_layer (`int`, *optional*, defaults to 28): - Number of hidden layers in the Transformer encoder. - n_head (`int`, *optional*, defaults to 16): - Number of attention heads for each attention layer in the Transformer encoder. - rotary_dim (`int`, *optional*, defaults to 64): - Number of dimensions in the embedding that Rotary Position Embedding is applied to. - n_inner (`int`, *optional*, defaults to None): - Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd - activation_function (`str`, *optional*, defaults to `"gelu_new"`): - Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`. - resid_pdrop (`float`, *optional*, defaults to 0.1): - The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. - embd_pdrop (`int`, *optional*, defaults to 0.1): - The dropout ratio for the embeddings. - attn_pdrop (`float`, *optional*, defaults to 0.1): - The dropout ratio for the attention. - layer_norm_epsilon (`float`, *optional*, defaults to 1e-5): - The epsilon to use in the layer normalization layers. - initializer_range (`float`, *optional*, defaults to 0.02): - The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - use_cache (`bool`, *optional*, defaults to `True`): - Whether or not the model should return the last key/values attentions (not used by all models). - - Example: - - ```python - >>> from modeling_moss import MossModel - >>> from configuration_moss import MossConfig - - >>> # Initializing a moss-moon-003-base configuration - >>> configuration = MossConfig() - - >>> # Initializing a model (with random weights) from the configuration - >>> model = MossModel(configuration) - - >>> # Accessing the model configuration - >>> configuration = model.config - ```""" - - model_type = "moss" - attribute_map = { - "max_position_embeddings": "n_positions", - "hidden_size": "n_embd", - "num_attention_heads": "n_head", - "num_hidden_layers": "n_layer", - } - - def __init__( - self, - vocab_size=107008, - n_positions=2048, - n_ctx=2048, - n_embd=4096, - n_layer=28, - n_head=16, - rotary_dim=64, - n_inner=None, - activation_function="gelu_new", - resid_pdrop=0.0, - embd_pdrop=0.0, - attn_pdrop=0.0, - layer_norm_epsilon=1e-5, - initializer_range=0.02, - use_cache=True, - bos_token_id=106028, - eos_token_id=106068, - tie_word_embeddings=False, - **kwargs, - ): - self.vocab_size = vocab_size - self.n_ctx = n_ctx - self.n_positions = n_positions - self.n_embd = n_embd - self.n_layer = n_layer - self.n_head = n_head - self.n_inner = n_inner - self.rotary_dim = rotary_dim - self.activation_function = activation_function - self.resid_pdrop = resid_pdrop - self.embd_pdrop = embd_pdrop - self.attn_pdrop = attn_pdrop - self.layer_norm_epsilon = layer_norm_epsilon - self.initializer_range = initializer_range - self.use_cache = use_cache - - self.bos_token_id = bos_token_id - self.eos_token_id = eos_token_id - - super().__init__( - bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs - ) diff --git a/spaces/yzha/ctc_eval/app.py b/spaces/yzha/ctc_eval/app.py deleted file mode 100644 index 0c45f9e485e65c9f2f045db76182888537a22428..0000000000000000000000000000000000000000 --- a/spaces/yzha/ctc_eval/app.py +++ /dev/null @@ -1,6 +0,0 @@ -import evaluate -from evaluate.utils import launch_gradio_widget - - -module = evaluate.load("yzha/ctc_eval") -launch_gradio_widget(module) \ No newline at end of file diff --git a/spaces/zetavg/LLaMA-LoRA-Tuner-UI-Demo/llama_lora/ui/finetune/previewing.py b/spaces/zetavg/LLaMA-LoRA-Tuner-UI-Demo/llama_lora/ui/finetune/previewing.py deleted file mode 100644 index 59f01d61db5c885e8e0bb695c4591fdacc046ab8..0000000000000000000000000000000000000000 --- a/spaces/zetavg/LLaMA-LoRA-Tuner-UI-Demo/llama_lora/ui/finetune/previewing.py +++ /dev/null @@ -1,155 +0,0 @@ -import os -import traceback -import re -import gradio as gr -import math - -from ...config import Config -from ...utils.prompter import Prompter - -from .data_processing import get_data_from_input - - -def refresh_preview( - template, - load_dataset_from, - dataset_from_data_dir, - dataset_text, - dataset_text_format, - dataset_plain_text_input_variables_separator, - dataset_plain_text_input_and_output_separator, - dataset_plain_text_data_separator, - max_preview_count, -): - try: - prompter = Prompter(template) - variable_names = prompter.get_variable_names() - - data = get_data_from_input( - load_dataset_from=load_dataset_from, - dataset_text=dataset_text, - dataset_text_format=dataset_text_format, - dataset_plain_text_input_variables_separator=dataset_plain_text_input_variables_separator, - dataset_plain_text_input_and_output_separator=dataset_plain_text_input_and_output_separator, - dataset_plain_text_data_separator=dataset_plain_text_data_separator, - dataset_from_data_dir=dataset_from_data_dir, - prompter=prompter - ) - - train_data = prompter.get_train_data_from_dataset( - data, max_preview_count) - - train_data = train_data[:max_preview_count] - - data_count = len(data) - - headers = ['Prompt', 'Completion'] - preview_data = [ - [item.get("prompt", ""), item.get("completion", "")] - for item in train_data - ] - - if not prompter.template_module: - variable_names = prompter.get_variable_names() - headers += [f"Variable: {variable_name}" for variable_name in variable_names] - variables = [ - [item.get(f"_var_{name}", "") for name in variable_names] - for item in train_data - ] - preview_data = [d + v for d, v in zip(preview_data, variables)] - - preview_info_message = f"The dataset has about {data_count} item(s)." - if data_count > max_preview_count: - preview_info_message += f" Previewing the first {max_preview_count}." - - info_message = f"about {data_count} item(s)." - if load_dataset_from == "Data Dir": - info_message = "This dataset contains about " + info_message - update_message = gr.Markdown.update(info_message, visible=True) - - return ( - gr.Dataframe.update( - value={'data': preview_data, 'headers': headers}), - gr.Markdown.update(preview_info_message), - update_message, - update_message - ) - except Exception as e: - update_message = gr.Markdown.update( - f"Error: {e}.", - visible=True) - return ( - gr.Dataframe.update(value={'data': [], 'headers': []}), - gr.Markdown.update( - "Set the dataset in the \"Prepare\" tab, then preview it here."), - update_message, - update_message - ) - - -def refresh_dataset_items_count( - template, - load_dataset_from, - dataset_from_data_dir, - dataset_text, - dataset_text_format, - dataset_plain_text_input_variables_separator, - dataset_plain_text_input_and_output_separator, - dataset_plain_text_data_separator, - max_preview_count, -): - try: - prompter = Prompter(template) - - data = get_data_from_input( - load_dataset_from=load_dataset_from, - dataset_text=dataset_text, - dataset_text_format=dataset_text_format, - dataset_plain_text_input_variables_separator=dataset_plain_text_input_variables_separator, - dataset_plain_text_input_and_output_separator=dataset_plain_text_input_and_output_separator, - dataset_plain_text_data_separator=dataset_plain_text_data_separator, - dataset_from_data_dir=dataset_from_data_dir, - prompter=prompter - ) - - train_data = prompter.get_train_data_from_dataset( - data) - data_count = len(train_data) - - preview_info_message = f"The dataset contains {data_count} item(s)." - if data_count > max_preview_count: - preview_info_message += f" Previewing the first {max_preview_count}." - - info_message = f"{data_count} item(s)." - if load_dataset_from == "Data Dir": - info_message = "This dataset contains " + info_message - update_message = gr.Markdown.update(info_message, visible=True) - - return ( - gr.Markdown.update(preview_info_message), - update_message, - update_message, - gr.Slider.update(maximum=math.floor(data_count / 2)) - ) - except Exception as e: - update_message = gr.Markdown.update( - f"Error: {e}.", - visible=True) - - trace = traceback.format_exc() - traces = [s.strip() for s in re.split("\n * File ", trace)] - traces_to_show = [s for s in traces if os.path.join( - Config.data_dir, "templates") in s] - traces_to_show = [re.sub(" *\n *", ": ", s) for s in traces_to_show] - if len(traces_to_show) > 0: - update_message = gr.Markdown.update( - f"Error: {e} ({','.join(traces_to_show)}).", - visible=True) - - return ( - gr.Markdown.update( - "Set the dataset in the \"Prepare\" tab, then preview it here."), - update_message, - update_message, - gr.Slider.update(maximum=1) - ) diff --git a/spaces/zideliu/styledrop/Dockerfile b/spaces/zideliu/styledrop/Dockerfile deleted file mode 100644 index bf878abd0d5656f46312081cd540626a419eb4f5..0000000000000000000000000000000000000000 --- a/spaces/zideliu/styledrop/Dockerfile +++ /dev/null @@ -1,57 +0,0 @@ -FROM nvidia/cuda:11.7.1-cudnn8-devel-ubuntu22.04 -ENV DEBIAN_FRONTEND=noninteractive -RUN apt-get update && \ - apt-get upgrade -y && \ - apt-get install -y --no-install-recommends \ - git \ - git-lfs \ - wget \ - curl \ - # ffmpeg \ - ffmpeg \ - x264 \ - # python build dependencies \ - build-essential \ - libssl-dev \ - zlib1g-dev \ - libbz2-dev \ - libreadline-dev \ - libsqlite3-dev \ - libncursesw5-dev \ - xz-utils \ - tk-dev \ - libxml2-dev \ - libxmlsec1-dev \ - libffi-dev \ - liblzma-dev && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* - -RUN useradd -m -u 1000 user -USER user -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:${PATH} -WORKDIR ${HOME}/app - -RUN curl https://pyenv.run | bash -ENV PATH=${HOME}/.pyenv/shims:${HOME}/.pyenv/bin:${PATH} -ENV PYTHON_VERSION=3.8.16 -RUN pyenv install ${PYTHON_VERSION} && \ - pyenv global ${PYTHON_VERSION} && \ - pyenv rehash && \ - pip install --no-cache-dir -U pip setuptools wheel - -RUN pip install --no-cache-dir -U torch==1.13.1 torchvision==0.14.1 -COPY --chown=1000 requirements.txt /tmp/requirements.txt -RUN pip install --no-cache-dir -U -r /tmp/requirements.txt - -COPY --chown=1000 . ${HOME}/app -# RUN cd Tune-A-Video && patch -p1 < ../patch -ENV PYTHONPATH=${HOME}/app \ - PYTHONUNBUFFERED=1 \ - GRADIO_ALLOW_FLAGGING=never \ - GRADIO_NUM_PORTS=1 \ - GRADIO_SERVER_NAME=0.0.0.0 \ - GRADIO_THEME=huggingface \ - SYSTEM=spaces -CMD ["python", "app.py"] \ No newline at end of file