diff --git a/spaces/101-5/gpt4free/g4f/Provider/Providers/Ails.py b/spaces/101-5/gpt4free/g4f/Provider/Providers/Ails.py deleted file mode 100644 index 1a14b2e9aec50328b7b21d5980bd67c5eaee2b3a..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/g4f/Provider/Providers/Ails.py +++ /dev/null @@ -1,91 +0,0 @@ -import os -import time -import json -import uuid -import random -import hashlib -import requests - -from ...typing import sha256, Dict, get_type_hints -from datetime import datetime - -url: str = 'https://ai.ls' -model: str = 'gpt-3.5-turbo' -supports_stream = True -needs_auth = False - -class Utils: - def hash(json_data: Dict[str, str]) -> sha256: - - secretKey: bytearray = bytearray([79, 86, 98, 105, 91, 84, 80, 78, 123, 83, - 35, 41, 99, 123, 51, 54, 37, 57, 63, 103, 59, 117, 115, 108, 41, 67, 76]) - - base_string: str = '%s:%s:%s:%s' % ( - json_data['t'], - json_data['m'], - 'WI,2rU#_r:r~aF4aJ36[.Z(/8Rv93Rf', - len(json_data['m']) - ) - - return hashlib.sha256(base_string.encode()).hexdigest() - - def format_timestamp(timestamp: int) -> str: - - e = timestamp - n = e % 10 - r = n + 1 if n % 2 == 0 else n - return str(e - n + r) - - -def _create_completion(model: str, messages: list, temperature: float = 0.6, stream: bool = False, **kwargs): - - headers = { - 'authority': 'api.caipacity.com', - 'accept': '*/*', - 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', - 'authorization': 'Bearer free', - 'client-id': str(uuid.uuid4()), - 'client-v': '0.1.217', - 'content-type': 'application/json', - 'origin': 'https://ai.ls', - 'referer': 'https://ai.ls/', - 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"Windows"', - 'sec-fetch-dest': 'empty', - 'sec-fetch-mode': 'cors', - 'sec-fetch-site': 'cross-site', - 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36', - } - - params = { - 'full': 'false', - } - - timestamp = Utils.format_timestamp(int(time.time() * 1000)) - - sig = { - 'd': datetime.now().strftime('%Y-%m-%d'), - 't': timestamp, - 's': Utils.hash({ - 't': timestamp, - 'm': messages[-1]['content']})} - - json_data = json.dumps(separators=(',', ':'), obj={ - 'model': 'gpt-3.5-turbo', - 'temperature': 0.6, - 'stream': True, - 'messages': messages} | sig) - - response = requests.post('https://api.caipacity.com/v1/chat/completions', - headers=headers, data=json_data, stream=True) - - for token in response.iter_lines(): - if b'content' in token: - completion_chunk = json.loads(token.decode().replace('data: ', '')) - token = completion_chunk['choices'][0]['delta'].get('content') - if token != None: - yield token - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Davinci Resolve 18 Activation Key Free Tips Tricks and Warnings.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Davinci Resolve 18 Activation Key Free Tips Tricks and Warnings.md deleted file mode 100644 index 0833d8b9a98d1ebb10059935268c69c392ba23d1..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Davinci Resolve 18 Activation Key Free Tips Tricks and Warnings.md +++ /dev/null @@ -1,35 +0,0 @@ -
-

How to Get Davinci Resolve 18 Activation Key Free

-

Davinci Resolve 18 is a powerful video editing software that offers professional features and tools for color grading, audio editing, visual effects, and more. However, to unlock the full potential of this software, you need an activation key that can cost up to $299.

-

But what if you could get Davinci Resolve 18 activation key free? Is it possible? And is it legal? In this article, we will answer these questions and show you some ways to get Davinci Resolve 18 activation key free without breaking the law or risking your computer's security.

-

davinci resolve 18 activation key free


DOWNLOADhttps://byltly.com/2uKwCT



-

Why Do You Need an Activation Key for Davinci Resolve 18?

-

Davinci Resolve 18 comes in two versions: a free version and a paid version. The free version offers most of the features and tools that you need for basic video editing, such as cutting, trimming, transitions, titles, audio mixing, and more. However, the free version has some limitations and restrictions that can affect your workflow and creativity.

-

For example, the free version of Davinci Resolve 18 does not support:

- -

To access these features and tools, you need to upgrade to the paid version of Davinci Resolve 18, which requires an activation key. An activation key is a unique code that verifies your purchase and allows you to install and use the software on up to two computers.

-

How to Get Davinci Resolve 18 Activation Key Free?

-

There are several ways to get Davinci Resolve 18 activation key free, but not all of them are legal or safe. Here are some of the methods that you should avoid:

- -

The only legal and safe way to get Davinci Resolve 18 activation key free is to participate in official promotions or giveaways by the software developer or authorized partners. For example, sometimes you can get a free activation key when you buy certain hardware products that are compatible with Davinci Resolve 18, such as cameras, monitors, keyboards, etc.

-

You can also check the official website of Davinci Resolve 18 or follow their social media accounts to stay updated on any upcoming promotions or giveaways that might offer free activation keys. However, these opportunities are rare and limited, so you have to be quick and lucky to get one.

-

Conclusion

-

Davinci Resolve 18 is a great video editing software that offers many features and tools for professional and creative projects. However, to access the full potential of this software, you need an activation key that can be expensive for some users.

-

If you want to get Davinci Resolve 18 activation key free, you should avoid illegal or unsafe methods such as downloading cracked versions, using key generators, or sharing keys. Instead, you should look for legal and safe ways such as participating in official promotions or giveaways by the software developer or authorized partners.

-

-

We hope this article has helped you understand how to get Davinci Resolve 18 activation key free without breaking the law or risking your security. If you have any questions or suggestions, feel free to leave a comment below.

ddb901b051
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Excel 2016 Always Freezes? Heres How to Solve It.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Excel 2016 Always Freezes? Heres How to Solve It.md deleted file mode 100644 index f2a325db3e9aa0f92a5834906560bc7cbf5a6f14..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Excel 2016 Always Freezes? Heres How to Solve It.md +++ /dev/null @@ -1,47 +0,0 @@ - -

How to Fix Excel 2016 Always Not Responding

-

If you are using Excel 2016 and you encounter the problem of Excel not responding, you might be frustrated and wonder what causes this issue and how to solve it. In this article, we will show you some possible reasons for Excel 2016 always not responding and provide you with 8 effective fixes that you can try.

-

excel 2016 always not responding


Download ⚹⚹⚹ https://byltly.com/2uKxjV



-

Why Does Excel 2016 Always Not Respond?

-

There are many factors that can cause Excel 2016 to become unresponsive, such as:

- -

How to Fix Excel 2016 Always Not Responding?

-

Depending on the specific cause of your problem, you can try one or more of the following solutions to fix Excel 2016 always not responding:

-

Solution 1: Run Excel in Safe Mode

-

One of the easiest ways to troubleshoot Excel 2016 not responding is to run it in safe mode. This will disable any add-ins, customizations, and startup folders that might be causing the issue. To run Excel in safe mode, follow these steps:

-
    -
  1. Press Windows + R keys simultaneously to open the Run box.
  2. -
  3. Type excel.exe /safe in the Run box and press Enter.
  4. -
  5. If Excel starts in safe mode, it means that one of your add-ins or settings is the culprit. You can then disable them one by one to find out which one is causing the problem.
  6. -
-

Solution 2: Disable Add-Ins in Excel

-

If you have identified that add-ins are the cause of Excel 2016 not responding, you can disable them in Excel's options. To do this, follow these steps:

-

-
    -
  1. Open Excel in safe mode by following the steps in Solution 1.
  2. -
  3. Select File > Options > Add-ins from the menu bar.
  4. -
  5. Select COM Add-ins from the Manage drop-down menu and click Go.
  6. -
  7. Uncheck the boxes for all add-ins and click OK.
  8. -
  9. Close Excel and reopen it in normal mode (not safe mode).
  10. -
  11. If Excel works normally, it means that one of your add-ins was faulty. You can then enable them one by one to find out which one is causing the problem.
  12. -
-

Solution 3: Update Windows and Office

-

Sometimes, Excel 2016 not responding can be caused by outdated or missing Windows or Office updates. To fix this, you need to check and install the latest updates for your system and applications. To do this, follow these steps:

-
    -
  1. Press Windows + I keys simultaneously to open Settings.
  2. -
  3. Select Update & Security > Windows Update from the sidebar.
  4. -
  5. Click Check for updates and wait for Windows to scan and download any available updates.
  6. -
  7. Restart your computer if prompted.
  8. -
  9. Open Excel and select File > Account > Update Options > Update Now from the menu bar.
  10. -
  11. Wait for Office to install any available updates.
  12. -
  13. Restart Excel and see if the problem is resolved.
  14. -

ddb901b051
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Ff 3 Apk Cracked Ipa.md b/spaces/1gistliPinn/ChatGPT4/Examples/Ff 3 Apk Cracked Ipa.md deleted file mode 100644 index 3154a8fbf77bf8cfe4beea716b7d9b01d79a489e..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Ff 3 Apk Cracked Ipa.md +++ /dev/null @@ -1,6 +0,0 @@ -

ff 3 apk cracked ipa


Downloadhttps://imgfil.com/2uy0Ms



-
-Download FINAL FANTASY III for iPad 1.6.2 cracked ipa for free:FINAL FANTASY 3 is a full 3D classic game, which was first released in 1990. 1fdad05405
-
-
-

diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Fondamenti Di Meccanica Teorica E Applicata Mcgraw Hill Pdf Pdf.md b/spaces/1gistliPinn/ChatGPT4/Examples/Fondamenti Di Meccanica Teorica E Applicata Mcgraw Hill Pdf Pdf.md deleted file mode 100644 index 81a337c65d221e4dbdf1cda83acb7ce1ae2720d1..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Fondamenti Di Meccanica Teorica E Applicata Mcgraw Hill Pdf Pdf.md +++ /dev/null @@ -1,6 +0,0 @@ -

fondamenti di meccanica teorica e applicata mcgraw hill pdf pdf


DOWNLOADhttps://imgfil.com/2uy0uy



-
- . 사진참모.  . 검색안 팁: 서식할 만한 플러그인 :  알림: 나오는 비공개 노트북 작업 스토어에 잘못 나오는 비공개 노트북 스토어는 좀 더 더 일관심을 가져야 한다고 생각하는데 비공개 노트북 스토어는 좀 더 일관심을 가져야 한다고 생각하는데 비공개 노트북 스토어는 좀 더 더 일관심을 가져야 한다고 생각하는데 비공개 노트북 스토어는 좀 더 더 일관심을 가져야 한다고 생각하는데 비공개 노트 4fefd39f24
-
-
-

diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Car Simulator 2 Mod APK with Unlimited Money and All Mission Complete.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Car Simulator 2 Mod APK with Unlimited Money and All Mission Complete.md deleted file mode 100644 index 3ac7bb5e3c633929e21f0733d714f406b6ac498a..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Car Simulator 2 Mod APK with Unlimited Money and All Mission Complete.md +++ /dev/null @@ -1,89 +0,0 @@ - -

Car Simulator 2 All Mission Complete Mod APK: A Realistic and Fun Racing Game

-

If you are a fan of racing games, you might have heard of Car Simulator 2, a simulation game where you can drive various cars in a vast open world. You can choose from different game modes, customize your cars, complete missions, and challenge other players online. But what if you want to enjoy all the features of the game without spending any money or time? That's where Car Simulator 2 All Mission Complete Mod APK comes in handy.

-

car simulator 2 all mission complete mod apk


Download File ––– https://urlin.us/2uT1Ev



-

Car Simulator 2 All Mission Complete Mod APK is a modified version of the original game that gives you unlimited money and gold, unlocks all the cars, and lets you complete all the missions with ease. You can download and install this mod apk on your Android device and enjoy a realistic and fun racing experience. In this article, we will tell you more about the features of Car Simulator 2 Mod APK, how to download and install it, and some tips and tricks for playing it.

-

Features of Car Simulator 2 Mod APK

-

Car Simulator 2 Mod APK has many features that make it better than the original version. Here are some of them:

- -

How to Download and Install Car Simulator 2 Mod APKHow to Download and Install Car Simulator 2 Mod APK

-

If you want to download and install Car Simulator 2 Mod APK on your Android device, you need to follow these simple steps:

-
    -
  1. Step 1: Download the mod apk file from a trusted source: You can find many websites that offer Car Simulator 2 Mod APK for free, but not all of them are safe and reliable. Some of them may contain viruses, malware, or spyware that can harm your device or steal your personal information. Therefore, you should always download the mod apk file from a trusted source, such as [this one].
  2. -
  3. Step 2: Enable unknown sources on your device: Before you can install the mod apk file, you need to enable unknown sources on your device. This will allow you to install apps that are not from the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on. You may see a warning message, but don't worry, it's safe to proceed.
  4. -
  5. Step 3: Install the mod apk file and launch the game: After you have downloaded the mod apk file, locate it in your file manager and tap on it to install it. It may take a few seconds to complete the installation process. Once it's done, you can launch the game and enjoy all the features of Car Simulator 2 Mod APK.
  6. -
-

Tips and Tricks for Playing Car Simulator 2 Mod APK

-

Car Simulator 2 Mod APK is a fun and realistic racing game, but it can also be challenging and tricky at times. Here are some tips and tricks that can help you play better and have more fun:

- -

Conclusion

-

Car Simulator 2 Mod APK is a great racing game that lets you drive various cars in a realistic open world. You can enjoy all the features of the game without spending any money or time by using this mod apk. You can download and install this mod apk easily by following our guide above. You can also improve your skills and have more fun by following our tips and tricks above.

-

car simulator 2 mod apk unlimited money and gold
-car simulator 2 hack apk all cars unlocked
-car simulator 2 mod menu apk download
-car simulator 2 latest version mod apk
-car simulator 2 offline mod apk free download
-car simulator 2 premium mod apk android 1
-car simulator 2 mod apk revdl
-car simulator 2 mod apk happymod
-car simulator 2 mod apk rexdl
-car simulator 2 vip mod apk
-car simulator 2 pro mod apk
-car simulator 2 mega mod apk
-car simulator 2 mod apk no root
-car simulator 2 mod apk obb
-car simulator 2 mod apk unlimited everything
-car simulator 2 mod apk unlimited fuel
-car simulator 2 mod apk unlimited gems
-car simulator 2 mod apk unlimited coins
-car simulator 2 mod apk unlimited gas
-car simulator 2 mod apk unlimited xp
-car simulator 2 mod apk full unlocked
-car simulator 2 mod apk all features unlocked
-car simulator 2 mod apk all levels unlocked
-car simulator 2 mod apk all missions unlocked
-car simulator 2 mod apk all cars free
-car simulator 2 mod apk all cars purchased
-car simulator 2 mod apk all cars available
-car simulator 2 mod apk all cars open
-car simulator 2 mod apk all cars maxed out
-car simulator 2 mod apk all cars upgraded
-car simulator 2 mod apk all cars customized
-car simulator 2 mod apk all cars modified
-car simulator 2 mod apk all cars tuned
-car simulator 2 mod apk all cars supercharged
-car simulator 2 mod apk all cars turbocharged
-car simulator 2 mod apk all cars nitro boosted
-car simulator 2 mod apk all cars drift mode enabled
-car simulator 2 mod apk all cars racing mode enabled
-car simulator 2 mod apk all cars police mode enabled
-car simulator 2 mod apk all cars taxi mode enabled

-

If you are looking for a realistic and fun racing game that gives you unlimited money and gold, unlocks all cars, and lets you complete all missions with ease, then Car Simulator 2 Mod APK

If you are looking for a realistic and fun racing game that gives you unlimited money and gold, unlocks all cars, and lets you complete all missions with ease, then Car Simulator 2 Mod APK is the perfect choice for you. Download it now and enjoy the thrill of driving in a vast open world.

-

FAQs

-

Here are some frequently asked questions about Car Simulator 2 Mod APK:

-

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Blackout Bingo APK Download - Enjoy Live Bingo with Real Rewards.md b/spaces/1phancelerku/anime-remove-background/Blackout Bingo APK Download - Enjoy Live Bingo with Real Rewards.md deleted file mode 100644 index 6fff5dc7d05a7394f7391f9440fa0db2a7a00814..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Blackout Bingo APK Download - Enjoy Live Bingo with Real Rewards.md +++ /dev/null @@ -1,146 +0,0 @@ - -

Blackout Bingo APK Download: How to Play and Win Real Cash Prizes

-

If you love playing bingo and winning real money, you might want to check out Blackout Bingo, one of the most popular and exciting bingo apps on the market. In this article, we will show you how to download and install the Blackout Bingo APK on your Android device, how to play the game and use its features, how to win real cash prizes with it, and what are the reviews and ratings of other users. Let's get started!

-

blackout bingo apk download


Download Ziphttps://jinyurl.com/2uNTxs



-

What is Blackout Bingo?

-

Blackout Bingo is a real money gaming app that lets you play bingo against other players from around the world. You can choose from different game modes, entry fees, and prizes, and use special power-ups to help you get bingos faster. The game is fast-paced, fun, and slightly addictive, as you can compete for real world rewards and cash prizes (where available).

-

Blackout Bingo is powered by Skillz, a platform that matches players based on their skill level and processes payments securely. Skillz also offers a variety of other games in different genres, such as solitaire, dominoes, trivia, bowling, and more. You can use your Skillz account to access all these games and earn loyalty points, bonuses, and rewards.

-

How to Download and Install Blackout Bingo APK?

-

If you want to play Blackout Bingo on your Android device, you will need to download and install the APK file from the official website. Here are the steps you need to follow:

-
    -
  1. Go to https://apkcombo.com/blackout-bingo/com.blackoutbingo2021lite/ on your browser.
  2. -
  3. Click on the green "Download APK" button.
  4. -
  5. Choose a version that is compatible with your device and click on "Download".
  6. -
  7. Wait for the file to download on your device.
  8. -
  9. Open the file manager app on your device and locate the downloaded file.
  10. -
  11. Tap on the file and allow installation from unknown sources if prompted.
  12. -
  13. Follow the instructions on the screen to install the app.
  14. -
  15. Launch the app and sign in with your Skillz account or create a new one.
  16. -
  17. Enjoy playing Blackout Bingo!
  18. -
-

How to Play Blackout Bingo?

-

Playing Blackout Bingo is very easy and fun. Here are some of the basics you need to know:

- -

Blackout Bingo is different from classic bingo and other bingo apps in several ways. For example:

-

blackout bingo apk download free
-blackout bingo apk download latest version
-blackout bingo apk download for android
-blackout bingo apk download 2023
-blackout bingo apk download offline
-blackout bingo apk download mod
-blackout bingo apk download hack
-blackout bingo apk download unlimited money
-blackout bingo apk download no ads
-blackout bingo apk download update
-blackout bingo apk download online
-blackout bingo apk download for pc
-blackout bingo apk download for ios
-blackout bingo apk download for windows 10
-blackout bingo apk download for mac
-blackout bingo apk download for laptop
-blackout bingo apk download for tablet
-blackout bingo apk download for firestick
-blackout bingo apk download for chromebook
-blackout bingo apk download for smart tv
-blackout bingo apk download old version
-blackout bingo apk download new version
-blackout bingo apk download full version
-blackout bingo apk download pro version
-blackout bingo apk download premium version
-blackout bingo apk download cracked version
-blackout bingo apk download patched version
-blackout bingo apk download unlocked version
-blackout bingo apk download original version
-blackout bingo apk download official version
-blackout bingo apk download safe version
-blackout bingo apk download secure version
-blackout bingo apk download virus free version
-blackout bingo apk download malware free version
-blackout bingo apk download ad free version
-blackout bingo apk download best version
-blackout bingo apk download latest update
-blackout bingo apk download 2023 update
-blackout bingo apk download new update
-blackout bingo apk download game app
-blackout bingo apk download mobile game app
-blackout bingo game app free to play
-how to play the game of Blackout Bingo
-Blackout Bingo game app tips and tricks
-Blackout Bingo game app cheats and hacks
-Blackout Bingo game app reviews and ratings
-Blackout Bingo game app features and benefits
-Blackout Bingo game app rewards and bonuses
-Blackout Bingo game app challenges and tournaments

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

How to Win Real Cash Prizes with Blackout Bingo?

-

If you want to win real cash prizes with Blackout Bingo, you will need to play in the game modes that require an entry fee. The entry fee is deducted from your Skillz balance, which you can top up with your credit card, PayPal, or other payment methods. The entry fee varies depending on the game mode, the prize pool, and the number of players. For example, you can play in a Classic mode game with an entry fee of $0.60 and a prize pool of $1.00, or in a Tournament mode game with an entry fee of $5.00 and a prize pool of $18.00.

-

The prize pool is distributed among the top-ranked players according to their score. The score is calculated based on the number of bingos you get, the number of daubs you use, the time left, and the bonus ball boost. The higher your score, the higher your rank, and the higher your prize. You can see your rank and prize on the leaderboard during and after the game.

-

If you want to increase your chances of winning real cash prizes with Blackout Bingo, here are some tips you can use:

- -

Q: How can I contact Blackout Bingo support?

-

A: If you have any issues or questions regarding Blackout Bingo, you can contact the support team by following these steps:

-
    -
  1. Open the app and tap on the menu icon on the top left corner.
  2. -
  3. Tap on "Help" and then on "Contact Us".
  4. -
  5. Fill out the form with your name, email, subject, and message.
  6. -
  7. Tap on "Submit" and wait for a response.
  8. -
-

You can also email the support team directly at support@skillz.com.

-

Q: What are some alternatives to Blackout Bingo?

-

A: If you are looking for some alternatives to Blackout Bingo, you can try out some of these other bingo apps that also offer real money gaming and prizes:

-

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Call of Duty Mobile APK for Huawei and other Android Devices.md b/spaces/1phancelerku/anime-remove-background/Download Call of Duty Mobile APK for Huawei and other Android Devices.md deleted file mode 100644 index b7a524cb5b6635a9f9ab89f60030279c23e5d942..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Call of Duty Mobile APK for Huawei and other Android Devices.md +++ /dev/null @@ -1,166 +0,0 @@ -
-

How to Download Call of Duty Mobile Without Google Play

-

Call of Duty Mobile is one of the most popular and exciting mobile games available today. It offers a variety of game modes, maps, weapons, and characters from the Call of Duty franchise, as well as a 100-player battle royale mode. If you are a fan of first-person shooters, you don't want to miss this game.

-

download call of duty mobile without google play


Download Ziphttps://jinyurl.com/2uNTCR



-

However, what if you don't have access to Google Play on your Android device, or you don't want to use it for some reason? Maybe you have a device that doesn't support Google services, or you want to save some storage space, or you want to avoid automatic updates. Whatever your reason, you can still download and play Call of Duty Mobile without Google Play. All you need is an APK file.

-

What is an APK file and why do you need it?

-

An APK file is an Android application package file that contains all the files and code needed to install and run an app on your Android device. It is similar to an EXE file on Windows or a DMG file on Mac. When you download an app from Google Play, it automatically installs the APK file for you. However, you can also download APK files from other sources and install them manually.

-

There are several benefits of using APK files instead of Google Play. For example, you can:

- -

However, there are also some risks involved in using APK files. For example, you might:

- -

Therefore, you should only download APK files from reputable and trustworthy sources, and always scan them for viruses before installing them. You should also backup your data and be prepared to uninstall any problematic apps if necessary.

-

How to enable unknown sources on your Android device

-

Before you can install any APK file on your Android device, you need to enable unknown sources in your security settings. This will allow you to install apps from outside Google Play. Depending on your Android version and device model, the steps may vary slightly, but here is the general procedure:

-

How to install call of duty mobile on huawei devices
-Call of duty mobile apk download for android without google play
-Call of duty mobile season 5 download without play store
-Best alternative app stores to download call of duty mobile
-Call of duty mobile download from official website
-How to update call of duty mobile without google play services
-Call of duty mobile download for pc without emulator
-Call of duty mobile offline mode download without internet
-Call of duty mobile hack mod apk download without verification
-Call of duty mobile download size without hd resources
-Call of duty mobile lite version download without lag
-Call of duty mobile zombies mode download without root
-Call of duty mobile controller support download without ban
-Call of duty mobile beta version download without invitation
-Call of duty mobile chinese version download without vpn
-Call of duty mobile india launch date download without delay
-Call of duty mobile redeem codes download without survey
-Call of duty mobile tips and tricks download without spoilers
-Call of duty mobile best loadout download without confusion
-Call of duty mobile free cp download without generator
-Call of duty mobile wallpapers download without watermark
-Call of duty mobile voice chat download without noise
-Call of duty mobile esports tournament download without registration
-Call of duty mobile clan wars rewards download without hassle
-Call of duty mobile gunsmith feature download without update
-Call of duty mobile best sensitivity settings download without trial and error
-Call of duty mobile ranked mode guide download without losing points
-Call of duty mobile battle royale map download without missing locations
-Call of duty mobile legendary operators download without spending money
-Call of duty mobile weapon skins download without duplicates
-Call of duty mobile killstreaks list download without forgetting any
-Call of duty mobile game modes explained download without getting bored
-Call of duty mobile maps comparison download without confusion
-Call of duty mobile system requirements download without lagging issues
-Call of duty mobile compatible devices list download without compatibility problems
-Call of duty mobile customer support contact download without waiting time
-Call of duty mobile community feedback forum download without spamming messages
-Call of duty mobile patch notes history download without missing any changes
-Call of duty mobile fan art gallery download without stealing any credit
-Call of duty mobile memes collection download without offending anyone
-Call of duty mobile merchandise store link download without paying shipping fees
-Call of duty mobile soundtrack playlist download without ads interruption
-Call of duty mobile trivia quiz game download without cheating answers
-Call of duty mobile fun facts and easter eggs download without spoiling the fun
-Call of duty mobile reviews and ratings download without bias opinions
-Call of duty mobile news and updates blog download without missing any information
-Call of duty mobile social media accounts follow link download without unfollowing anyone
-Call of duty mobile video tutorials and gameplay watch link download without skipping any part
-Call of duty mobile official trailer and teaser watch link download without getting hyped too much

-
    -
  1. Go to your device settings and tap Apps & Notifications (or Apps in some devices).
  2. -
  3. Tap Advanced (or More in some devices) and then Special App Access (or Install Unknown Apps in some devices).
  4. -
  5. Find and tap the app that you will use to download the APK file, such as your browser or a file manager app.
  6. -
  7. Toggle on the option to Allow from this source (or Trust this source in some devices).
  8. -
-

You can also enable unknown sources for all apps by going to your device settings and tapping Security (or Biometrics and Security in some devices). Then, toggle on the option to Install Unknown Apps (or Unknown Sources in some devices).

-

Once you have enabled unknown sources, you can proceed to download and install the Call of Duty Mobile APK file.

-

How to find and download the Call of Duty Mobile APK file

-

The next step is to find and download the Call of Duty Mobile APK file from a reliable website. There are many websites that offer APK files for various apps, but not all of them are safe and trustworthy. Some of them may contain malware, viruses, or fake apps that can harm your device or steal your data. Therefore, you should be careful and do some research before downloading any APK file.

-

One of the most reputable and popular websites for downloading APK files is APK Mirror. It is a website that hosts APK files for thousands of apps, including Call of Duty Mobile. It verifies the authenticity and integrity of the APK files using cryptographic signatures and hashes. It also provides detailed information about the app version, size, date, developer, permissions, and changelog. You can also read user reviews and ratings to get an idea of the quality and performance of the app.

-

To download the Call of Duty Mobile APK file from APK Mirror, follow these steps:

-
    -
  1. Go to https://www.apkmirror.com using your browser.
  2. -
  3. Type Call of Duty Mobile in the search box and tap the magnifying glass icon.
  4. -
  5. Select the app from the search results. It should have a green check mark next to it, indicating that it is verified by APK Mirror.
  6. -
  7. Scroll down and tap the Download button next to the latest version of the app. You can also tap See Available APKs to see older versions or variants of the app.
  8. -
  9. Tap Download APK on the next page. You may see a warning message about downloading APK files. Tap OK to confirm.
  10. -
  11. The download will start automatically. You can see the progress in your notification bar or your browser's download manager.
  12. -
-

Once the download is complete, you can install the Call of Duty Mobile APK file on your Android device.

-

How to install the Call of Duty Mobile APK file on your Android device

-

The final step is to install the Call of Duty Mobile APK file on your Android device. There are two ways to do this: using a file manager app or using a browser.

-

Using a file manager app

-

A file manager app is an app that allows you to access and manage the files and folders on your device's internal or external storage. You can use it to locate and install the Call of Duty Mobile APK file that you downloaded from APK Mirror. Here are some examples of file manager apps that you can use:

- -

To install the Call of Duty Mobile APK file using a file manager app, follow these steps:

-
    -
  1. Open the file manager app on your device and navigate to the folder where you downloaded the Call of Duty Mobile APK file. It is usually in the Downloads folder or in a folder named after your browser or downloader app.
  2. -
  3. Tap on the Call of Duty Mobile APK file. You may see a warning message about installing apps from unknown sources. Tap Settings to go to your security settings and enable unknown sources for your file manager app if you haven't done so already.
  4. -
  5. Tap Install (or Next in some devices) to start the installation process. You may see some information about the app permissions and features. Tap Install (or Next) again to confirm.
  6. -
  7. Wait for the installation to finish. You may see a message that says App installed or Done.
  8. -
  9. Tap Open to launch the Call of Duty Mobile app or Done to exit the installer.
  10. -
-

Using a browser

-

You can also use your browser to install the Call of Duty Mobile APK file that you downloaded from APK Mirror. This method is simpler and faster, but it may not work on some devices or browsers. Here are the steps:

-
    -
  1. Open your browser and go to your browser's download manager. You can usually access it by tapping the menu icon (three dots or lines) and then Downloads.
  2. -
  3. Tap on the Call of Duty Mobile APK file that you downloaded from APK Mirror. You may see a warning message about installing apps from unknown sources. Tap Settings to go to your security settings and enable unknown sources for your browser if you haven't done so already.
  4. -
  5. Tap Install (or Next in some devices) to start the installation process. You may see some information about the app permissions and features. Tap Install (or Next) again to confirm.
  6. -
  7. Wait for the installation to finish. You may see a message that says App installed or Done.
  8. -
  9. Tap Open to launch the Call of Duty Mobile app or Done to exit the installer.
  10. -
-

How to update the Call of Duty Mobile app without Google Play

-

Now that you have installed the Call of Duty Mobile app without Google Play, you might be wondering how to update it when a new version is released. Unlike Google Play, APK files do not update automatically, so you need to do it manually or use an APK installer app.

-

Updating manually

-

To update the Call of Duty Mobile app manually, you need to follow the same steps as downloading and installing it. That is, you need to find and download the latest version of the Call of Duty Mobile APK file from APK Mirror and then install it on your device. However, before you do that, you should uninstall the previous version of the app first. This will prevent any conflicts or errors that might occur due to different versions or signatures of the app. Here are the steps:

-
    -
  1. Go to your device settings and tap Apps & Notifications (or Apps in some devices).
  2. -
  3. Find and tap on the Call of Duty Mobile app.
  4. -
  5. Tap Uninstall and confirm.
  6. -
  7. Go to https://www.apkmirror.com using your browser and search for Call of Duty Mobile.
  8. -
  9. Select and download the latest version of the Call of Duty Mobile APK file.
  10. -
  11. Install the Call of Duty Mobile APK file on your device using a file manager app or a browser as explained above.
  12. -
-

Using an APK installer app

-

An APK installer app is an app that helps you install, update, and manage APK files on your device. It can scan your device for existing APK files, check for updates online, download and install new versions, and delete old versions. It can also backup and restore your apps, share them with others, and more. Here are some examples of APK installer apps that you can use:

- -

To update the Call of Duty Mobile app using an APK installer app, follow these steps:

-
    -
  1. Download and install an APK installer app from its official website or from another reputable source.
  2. -
  3. Open the APK installer app and grant it the necessary permissions to access your device storage and install apps.
  4. -
  5. Find and tap on the Call of Duty Mobile app in the list of apps on your device.
  6. -
  7. Tap Update (or Check for updates in some apps) and wait for the app to check for new versions online.
  8. -
  9. If there is a new version available, tap Download (or Install in some apps) and wait for the download to finish.
  10. -
  11. Tap Install (or Open in some apps) to start the installation process. You may see some information about the app permissions and features. Tap Install (or Next) again to confirm.
  12. -
  13. Wait for the installation to finish. You may see a message that says App installed or Done.
  14. -
  15. Tap Open to launch the Call of Duty Mobile app or Done to exit the installer.
  16. -
-

Conclusion

-

In this article, you have learned how to download and install Call of Duty Mobile without Google Play. You have also learned how to update the app without Google Play using manual or automatic methods. By using APK files, you can enjoy this amazing game on your Android device even if you don't have access to Google Play or don't want to use it. However, you should also be aware of the risks and challenges involved in using APK files, such as security, compatibility, stability, and legality issues. Therefore, you should always download APK files from reputable sources, scan them for viruses, backup your data, and uninstall any problematic apps if needed.

-

We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. Happy gaming!

-

FAQs

-

Here are some frequently asked questions related to the topic of this article:

-
    -
  1. Is Call of Duty Mobile free to play?
    Yes, Call of Duty Mobile is free to play. However, it also offers in-app purchases that allow you to buy items such as skins, weapons, crates, battle passes, and more. You can also earn these items by playing the game and completing missions and challenges.
  2. -
  3. Is Call of Duty Mobile compatible with my device?
    Call of Duty Mobile requires Android 4.3 or higher and at least 2 GB of RAM to run smoothly. You can check your device specifications by going to your device settings and tapping About Phone (or About Device in some devices). You can also check the compatibility of the app by going to its page on APK Mirror and tapping Compatibility Test.
  4. -
  5. Is Call of Duty Mobile safe to play without Google Play?
    Call of Duty Mobile is safe to play without Google Play as long as you download and install the APK file from a reliable source like APK Mirror. However, you should also be careful about the permissions and features that the app requests and uses. For example, the app may access your location, contacts, camera, microphone, storage, network, and more. You can review and manage these permissions by going to your device settings and tapping Apps & Notifications (or Apps in some devices), then finding and tapping on the Call of Duty Mobile app.
  6. -
  7. Is Call of Duty Mobile legal to play without Google Play?
    Call of Duty Mobile is legal to play without Google Play as long as you don't violate any terms of service or policies of your device manufacturer, carrier, or country. However, you should also be aware that some features or services that require Google Play may not work properly or at all. For example, you may not be able to sign in with your Google account, use Google Play Games services, receive push notifications, or access some in-app purchases.
  8. -
  9. How can I contact the developers or support team of Call of Duty Mobile?
    You can contact the developers or support team of Call of Duty Mobile by going to the app's settings and tapping Help (or Support in some devices). You can also visit their official website at https://www.callofduty.com/mobile, their Facebook page at https://www.facebook.com/CallofDutyMobile, their Twitter account at https://twitter.com/PlayCODMobile, or their Reddit community at https://www.reddit.com/r/CallOfDutyMobile.
  10. -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Sniper 3D Full Mod Apk with All Features Unlocked.md b/spaces/1phancelerku/anime-remove-background/Download Sniper 3D Full Mod Apk with All Features Unlocked.md deleted file mode 100644 index 43c540dcfb2b3c25aab512a87d2ed54c24db1dbf..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Sniper 3D Full Mod Apk with All Features Unlocked.md +++ /dev/null @@ -1,120 +0,0 @@ -
-

Sniper 3D Full Hack APK Download: How to Get Unlimited Coins and Diamonds for Free

-

Are you a fan of sniper games? Do you want to enjoy the thrill of being a professional assassin in a stunning 3D gun game? If yes, then you should try Sniper 3D, one of the most popular and addictive shooting games on the market. But wait, there's more! What if we tell you that you can get unlimited coins and diamonds for free by downloading a hacked version of Sniper 3D? Sounds too good to be true, right? Well, it's not. In this article, we will show you how to download and install Sniper 3D Full Hack APK, a modified version of the game that gives you access to unlimited resources and features. We will also share some tips and tricks for playing Sniper 3D, as well as some alternatives to Sniper 3D Full Hack APK in case you want to try something different. So, without further ado, let's get started!

-

sniper 3d full hack apk download


Download ✯✯✯ https://jinyurl.com/2uNQIh



-

What is Sniper 3D?

-

Sniper 3D is a free-to-play action game developed by Fun Games For Free. It was released in 2014 and has since gained over 500 million downloads on Google Play Store. The game puts you in the shoes of a deadly sniper who has to eliminate high-profile targets and criminals in various missions and scenarios. You can choose from a variety of guns, upgrade your weapons and gear, and customize your appearance. You can also play in different modes, such as offline, PVE, and PVP, and travel to different locations around the world. The game features stunning graphics, realistic physics, intuitive controls, and engaging gameplay that will keep you hooked for hours.

-

Main Features of Sniper 3D

-

Here are some of the main features of Sniper 3D that make it stand out from other sniper games:

- -

Why You Need Coins and Diamonds in Sniper 3D

-

Coins and diamonds are the two main currencies in Sniper 3D. You need them to buy new weapons, upgrade your existing ones, unlock new gear, customize your appearance, and more. You can earn coins and diamonds by completing missions, watching ads, or participating in events. However, these methods are not enough to get you the best weapons and gear in the game. You will need a lot of coins and diamonds to unlock the most powerful and exclusive items in the game. That's why many players look for ways to get unlimited coins and diamonds for free. One of the most popular ways is to download a hacked version of Sniper 3D, also known as Sniper 3D Full Hack APK.

-

What is Sniper 3D Full Hack APK?

-

Sniper 3D Full Hack APK is a modified version of the original Sniper 3D game that gives you access to unlimited coins and diamonds, as well as other features that are not available in the official version. With Sniper 3D Full Hack APK, you can enjoy the following benefits:

-

sniper 3d mod apk unlimited money and diamonds
-sniper 3d hack apk latest version
-sniper 3d full unlocked apk free download
-sniper 3d mod apk offline
-sniper 3d hack apk no root
-sniper 3d mod apk android 1
-sniper 3d full premium apk download
-sniper 3d hack apk ios
-sniper 3d mod apk unlimited coins and gems
-sniper 3d hack apk online
-sniper 3d full pro apk download
-sniper 3d mod apk rexdl
-sniper 3d hack apk without verification
-sniper 3d mod apk all guns unlocked
-sniper 3d full cracked apk download
-sniper 3d mod apk unlimited energy
-sniper 3d hack apk anti ban
-sniper 3d mod apk vip unlocked
-sniper 3d full modded apk download
-sniper 3d mod apk unlimited ammo
-sniper 3d hack apk with obb
-sniper 3d mod apk revdl
-sniper 3d full paid apk download
-sniper 3d mod apk god mode
-sniper 3d hack apk for pc
-sniper 3d mod apk happymod
-sniper 3d full version apk download
-sniper 3d mod apk unlimited everything
-sniper 3d hack apk free shopping
-sniper 3d mod apk no ads

- -

How to Download and Install Sniper 3D Full Hack APK

-

If you want to download and install Sniper 3D Full Hack APK, you need to follow these simple steps:

-
    -
  1. Download Sniper 3D Full Hack APK from a reliable source. You can search for it on Google or use this link to download it directly.
  2. -
  3. Enable Unknown Sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on. This will allow you to install apps from sources other than Google Play Store.
  4. -
  5. Install Sniper 3D Full Hack APK on your device. Locate the downloaded file in your file manager and tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to finish.
  6. -
  7. Launch Sniper 3D Full Hack APK on your device. You will see a new icon on your home screen or app drawer with the name Sniper 3D Full Hack. Tap on it to open the game and enjoy unlimited coins and diamonds for free!
  8. -
-

Benefits of Using Sniper 3D Full Hack APK

-

By using Sniper 3D Full Hack APK, you can enjoy many benefits that will enhance your gaming experience. Here are some of them:

- -

Tips and Tricks for Playing Sniper 3D

-

If you want to improve your sniping skills and become a master assassin in Sniper 3D, you should follow these tips and tricks:

-

Master the Art of Sniping

-

To be a successful sniper, you need to master the art of sniping. Here are some tips to help you do that:

- -

Upgrade Your Weapons and Gear

-

To be a more effective sniper, you need to upgrade your weapons and gear. Here are some tips to help you do that:

- -

Complete Missions and Challenges

-

To progress in the game and earn more coins and diamonds, you need to complete missions and challenges. Here are some tips to help you do that:

- -

Play in Different Modes and Locations

-

To have more fun and variety in the game, you can play in different modes and locations. Here are some tips to help you do that:

- -

Alternatives to Sniper 3D Full Hack APK

-

If you are looking for some alternatives to Sniper 3D Full Hack APK, you can try these other sniper games that are also fun and exciting:

-

Sniper Fury

-

Sniper Fury is another free-to-play sniper game developed by Gameloft. It features over 500 missions, stunning graphics, realistic weather effects, and a variety of weapons and gear. You can also join clans, participate in events, and compete with other players online. Sniper Fury is available on Google Play Store.

-

Hitman: Sniper

-

Hitman: Sniper is a premium sniper game developed by Square Enix. It is based on the popular Hitman franchise and features the iconic Agent 47 as the protagonist. You can explore different locations, use stealth and strategy, and eliminate your targets with style. You can also unlock new weapons, abilities, and contracts. Hitman: Sniper is available on Google Play Store.

-

Conclusion

-

Sniper 3D is one of the best sniper games on the market. It offers a thrilling and addictive gameplay that will keep you entertained for hours. However, if you want to enjoy the game without any limitations, you can download Sniper 3D Full Hack APK, a hacked version of the game that gives you unlimited coins and diamonds for free. You can also use our tips and tricks to improve your sniping skills and become a master assassin in Sniper 3D. Alternatively, you can try some other sniper games that are also fun and exciting. We hope you found this article helpful and informative. Happy sniping!

-

FAQs

-

Here are some frequently asked questions about Sniper 3D Full Hack APK:

-

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/A1draw-12196y/anime-ai-detect/README.md b/spaces/A1draw-12196y/anime-ai-detect/README.md deleted file mode 100644 index 952c183fd69ccb1664b4236b6132fc6d0358c7de..0000000000000000000000000000000000000000 --- a/spaces/A1draw-12196y/anime-ai-detect/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Anime Ai Detect -emoji: 🤖 -colorFrom: green -colorTo: purple -sdk: gradio -sdk_version: 3.15.0 -app_file: app.py -pinned: true -duplicated_from: saltacc/anime-ai-detect ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ADOPLE/Multi-Doc-Virtual-Chatbot/README.md b/spaces/ADOPLE/Multi-Doc-Virtual-Chatbot/README.md deleted file mode 100644 index f6a97bccfac1d6c152eb380a3e60d31d29d995d0..0000000000000000000000000000000000000000 --- a/spaces/ADOPLE/Multi-Doc-Virtual-Chatbot/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Multi Doc Chatbot -emoji: 🏃 -colorFrom: green -colorTo: blue -sdk: gradio -sdk_version: 3.38.0 -app_file: app.py -pinned: false -duplicated_from: hudsonhayes/Multi-Doc-Virtual-Chatbot ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AI-Zero-to-Hero/09-SL-Live-RealTime-Dashboard/README.md b/spaces/AI-Zero-to-Hero/09-SL-Live-RealTime-Dashboard/README.md deleted file mode 100644 index f422f32a49c38032183dea2cfcf353954f7d90d0..0000000000000000000000000000000000000000 --- a/spaces/AI-Zero-to-Hero/09-SL-Live-RealTime-Dashboard/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 09 SL Live Real Time Dashboard Timeseries -emoji: 😻 -colorFrom: gray -colorTo: yellow -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AIConsultant/MusicGen/audiocraft/modules/rope.py b/spaces/AIConsultant/MusicGen/audiocraft/modules/rope.py deleted file mode 100644 index 503e6748df2bb72b3c864c20b37cba5498ffdd21..0000000000000000000000000000000000000000 --- a/spaces/AIConsultant/MusicGen/audiocraft/modules/rope.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import typing as tp - -from torch import nn -import torch - - -class XPos(nn.Module): - """Length-extrapolatable positional embedding (xPos) from [Sun et al 2022](https://arxiv.org/abs/2212.10554v1). - This applies an exponential decay to the RoPE rotation matrix. - - Args: - dim (int): Embedding dimension. - smoothing (float): Smoothing factor applied to the decay rates. - base_scale (int): Base decay rate, given in terms of scaling time. - device (torch.device, optional): Device on which to initialize the module. - dtype (torch.dtype): dtype to use to generate the embedding. - """ - def __init__(self, dim: int, smoothing: float = 0.4, base_scale: int = 512, - device=None, dtype: torch.dtype = torch.float32): - super().__init__() - assert dim % 2 == 0 - assert dtype in [torch.float64, torch.float32] - self.dtype = dtype - self.base_scale = base_scale - - half_dim = dim // 2 - adim = torch.arange(half_dim, device=device, dtype=dtype) - decay_rates = (adim / half_dim + smoothing) / (1.0 + smoothing) - self.register_buffer("decay_rates", decay_rates) - self.decay: tp.Optional[torch.Tensor] = None - - def get_decay(self, start: int, end: int): - """Create complex decay tensor, cache values for fast computation.""" - if self.decay is None or end > self.decay.shape[0]: - assert isinstance(self.decay_rates, torch.Tensor) # Satisfy type checker. - idx = torch.arange(end, device=self.decay_rates.device, dtype=self.dtype) - power = idx / self.base_scale - scale = self.decay_rates ** power.unsqueeze(-1) - self.decay = torch.polar(scale, torch.zeros_like(scale)) - return self.decay[start:end] # [T, C/2] - - -class RotaryEmbedding(nn.Module): - """Rotary positional embedding (RoPE) from [Su et al 2022](https://arxiv.org/abs/2104.09864). - - Args: - dim (int): Embedding dimension (twice the number of frequencies). - max_period (float): Maximum period of the rotation frequencies. - xpos (bool): Use xPos, applies an exponential decay to rotation matrix. - scale (float): Scale of positional embedding, set to 0 to deactivate. - device (torch.device, optional): Device on which to initialize the module. - dtype (torch.dtype): dtype to use to generate the embedding. - """ - def __init__(self, dim: int, max_period: float = 10000.0, xpos: bool = False, - scale: float = 1.0, device=None, dtype: torch.dtype = torch.float32): - super().__init__() - assert dim % 2 == 0 - self.scale = scale - assert dtype in [torch.float64, torch.float32] - self.dtype = dtype - - adim = torch.arange(0, dim, 2, device=device, dtype=dtype)[: (dim // 2)] - frequencies = 1.0 / (max_period ** (adim / dim)) - self.register_buffer("frequencies", frequencies) - self.rotation: tp.Optional[torch.Tensor] = None - - self.xpos = XPos(dim, device=device, dtype=dtype) if xpos else None - - def get_rotation(self, start: int, end: int): - """Create complex rotation tensor, cache values for fast computation.""" - if self.rotation is None or end > self.rotation.shape[0]: - assert isinstance(self.frequencies, torch.Tensor) # Satisfy type checker. - idx = torch.arange(end, device=self.frequencies.device, dtype=self.dtype) - angles = torch.outer(idx, self.frequencies) - self.rotation = torch.polar(torch.ones_like(angles), angles) - return self.rotation[start:end] - - def rotate(self, x: torch.Tensor, start: int = 0, invert_decay: bool = False): - """Apply rope rotation to query or key tensor.""" - T = x.shape[1] - rotation = self.get_rotation(start, start + T).unsqueeze(0).unsqueeze(2) - - if self.xpos: - decay = self.xpos.get_decay(start, start + T).unsqueeze(0).unsqueeze(2) - else: - decay = 1.0 - - if invert_decay: - decay = decay ** -1 - - x_complex = torch.view_as_complex(x.to(self.dtype).reshape(*x.shape[:-1], -1, 2)) - scaled_rotation = (rotation * decay) * self.scale + (1.0 - self.scale) - x_out = torch.view_as_real(x_complex * scaled_rotation).flatten(-2) - - return x_out.type_as(x) - - def rotate_qk(self, query: torch.Tensor, key: torch.Tensor, start: int = 0): - """ Apply rope rotation to both query and key tensors. - Supports streaming mode, in which query and key are not expected to have the same shape. - In streaming mode, key will be of length [P + C] with P the cached past timesteps, but - query will be [C] (typically C == 1). - - Args: - query (torch.Tensor): Query to rotate. - key (torch.Tensor): Key to rotate. - start (int): Start index of the sequence for time offset. - """ - query_timesteps = query.shape[1] - key_timesteps = key.shape[1] - streaming_offset = key_timesteps - query_timesteps - - query_out = self.rotate(query, start + streaming_offset) - key_out = self.rotate(key, start, invert_decay=True) - - return query_out, key_out diff --git a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/egs/datasets/audio/vctk/pre_align.py b/spaces/AIGC-Audio/AudioGPT/NeuralSeq/egs/datasets/audio/vctk/pre_align.py deleted file mode 100644 index a03b3e12af245fa603403432f4487c53e8b13eab..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/egs/datasets/audio/vctk/pre_align.py +++ /dev/null @@ -1,22 +0,0 @@ -import os - -from data_gen.tts.base_pre_align import BasePreAlign -import glob - - -class VCTKPreAlign(BasePreAlign): - def meta_data(self): - wav_fns = glob.glob(f'{self.raw_data_dir}/wav48/*/*.wav') - for wav_fn in wav_fns: - item_name = os.path.basename(wav_fn)[:-4] - spk = item_name.split("_")[0] - txt_fn = wav_fn.split("/") - txt_fn[-1] = f'{item_name}.txt' - txt_fn[-3] = f'txt' - txt_fn = "/".join(txt_fn) - if os.path.exists(txt_fn) and os.path.exists(wav_fn): - yield item_name, wav_fn, (self.load_txt, txt_fn), spk - - -if __name__ == "__main__": - VCTKPreAlign().process() diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/tts/fs.py b/spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/tts/fs.py deleted file mode 100644 index 477cae5e1b3760e80ceacfc6b09e7f6d6a3a61ca..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/tts/fs.py +++ /dev/null @@ -1,183 +0,0 @@ -from copy import deepcopy - -import torch -from torch import nn -import torch.nn.functional as F -from text_to_speech.modules.commons.conv import TextConvEncoder, ConvBlocks -from text_to_speech.modules.commons.layers import Embedding -from text_to_speech.modules.commons.nar_tts_modules import PitchPredictor, DurationPredictor, LengthRegulator -from text_to_speech.modules.commons.rel_transformer import RelTransformerEncoder, BERTRelTransformerEncoder -from text_to_speech.modules.commons.rnn import TacotronEncoder, RNNEncoder, DecoderRNN -from text_to_speech.modules.commons.transformer import FastSpeechEncoder, FastSpeechDecoder -from text_to_speech.modules.commons.wavenet import WN -from text_to_speech.modules.tts.commons.align_ops import clip_mel2token_to_multiple, expand_states -from text_to_speech.utils.audio.pitch.utils import denorm_f0, f0_to_coarse - -FS_ENCODERS = { - 'fft': lambda hp, dict_size: FastSpeechEncoder( - dict_size, hp['hidden_size'], hp['enc_layers'], hp['enc_ffn_kernel_size'], - num_heads=hp['num_heads']), - 'tacotron': lambda hp, dict_size: TacotronEncoder( - hp['hidden_size'], dict_size, hp['hidden_size'], - K=hp['encoder_K'], num_highways=4, dropout=hp['dropout']), - 'tacotron2': lambda hp, dict_size: RNNEncoder(dict_size, hp['hidden_size']), - 'conv': lambda hp, dict_size: TextConvEncoder(dict_size, hp['hidden_size'], hp['hidden_size'], - hp['enc_dilations'], hp['enc_kernel_size'], - layers_in_block=hp['layers_in_block'], - norm_type=hp['enc_dec_norm'], - post_net_kernel=hp.get('enc_post_net_kernel', 3)), - 'rel_fft': lambda hp, dict_size: RelTransformerEncoder( - dict_size, hp['hidden_size'], hp['hidden_size'], - hp['ffn_hidden_size'], hp['num_heads'], hp['enc_layers'], - hp['enc_ffn_kernel_size'], hp['dropout'], prenet=hp['enc_prenet'], pre_ln=hp['enc_pre_ln']), -} - -FS_DECODERS = { - 'fft': lambda hp: FastSpeechDecoder( - hp['hidden_size'], hp['dec_layers'], hp['dec_ffn_kernel_size'], hp['num_heads']), - 'rnn': lambda hp: DecoderRNN(hp['hidden_size'], hp['decoder_rnn_dim'], hp['dropout']), - 'conv': lambda hp: ConvBlocks(hp['hidden_size'], hp['hidden_size'], hp['dec_dilations'], - hp['dec_kernel_size'], layers_in_block=hp['layers_in_block'], - norm_type=hp['enc_dec_norm'], dropout=hp['dropout'], - post_net_kernel=hp.get('dec_post_net_kernel', 3)), - 'wn': lambda hp: WN(hp['hidden_size'], kernel_size=5, dilation_rate=1, n_layers=hp['dec_layers'], - is_BTC=True), -} - - -class FastSpeech(nn.Module): - def __init__(self, dict_size, hparams, out_dims=None): - super().__init__() - self.hparams = deepcopy(hparams) - self.enc_layers = hparams['enc_layers'] - self.dec_layers = hparams['dec_layers'] - self.hidden_size = hparams['hidden_size'] - if hparams.get("use_bert") is True: - self.ph_encoder = BERTRelTransformerEncoder(dict_size, hparams['hidden_size'], hparams['hidden_size'], - hparams['ffn_hidden_size'], hparams['num_heads'], hparams['enc_layers'], - hparams['enc_ffn_kernel_size'], hparams['dropout'], prenet=hparams['enc_prenet'], pre_ln=hparams['enc_pre_ln']) - else: - self.ph_encoder = FS_ENCODERS[hparams['encoder_type']](hparams, dict_size) - self.decoder = FS_DECODERS[hparams['decoder_type']](hparams) - self.out_dims = hparams['audio_num_mel_bins'] if out_dims is None else out_dims - self.mel_out = nn.Linear(self.hidden_size, self.out_dims, bias=True) - if hparams['use_spk_id']: - self.spk_id_proj = Embedding(hparams['num_spk'], self.hidden_size) - if hparams['use_spk_embed']: - self.spk_embed_proj = nn.Linear(256, self.hidden_size, bias=True) - predictor_hidden = hparams['predictor_hidden'] if hparams['predictor_hidden'] > 0 else self.hidden_size - self.dur_predictor = DurationPredictor( - self.hidden_size, - n_chans=predictor_hidden, - n_layers=hparams['dur_predictor_layers'], - dropout_rate=hparams['predictor_dropout'], - kernel_size=hparams['dur_predictor_kernel']) - self.length_regulator = LengthRegulator() - if hparams['use_pitch_embed']: - self.pitch_embed = Embedding(300, self.hidden_size, 0) - self.pitch_predictor = PitchPredictor( - self.hidden_size, n_chans=predictor_hidden, - n_layers=5, dropout_rate=0.1, odim=2, - kernel_size=hparams['predictor_kernel']) - if hparams['dec_inp_add_noise']: - self.z_channels = hparams['z_channels'] - self.dec_inp_noise_proj = nn.Linear(self.hidden_size + self.z_channels, self.hidden_size) - - def forward(self, txt_tokens, mel2ph=None, spk_embed=None, spk_id=None, - f0=None, uv=None, infer=False, **kwargs): - ret = {} - src_nonpadding = (txt_tokens > 0).float()[:, :, None] - style_embed = self.forward_style_embed(spk_embed, spk_id) - - use_bert = self.hparams.get("use_bert") is True - if use_bert: - encoder_out = self.encoder(txt_tokens, bert_feats=kwargs['bert_feats'], ph2word=kwargs['ph2word'], - ret=ret) * src_nonpadding + style_embed - else: - encoder_out = self.encoder(txt_tokens) * src_nonpadding + style_embed - - # add dur - dur_inp = (encoder_out + style_embed) * src_nonpadding - mel2ph = self.forward_dur(dur_inp, mel2ph, txt_tokens, ret) - tgt_nonpadding = (mel2ph > 0).float()[:, :, None] - decoder_inp = expand_states(encoder_out, mel2ph) - - # add pitch embed - if self.hparams['use_pitch_embed']: - pitch_inp = (decoder_inp + style_embed) * tgt_nonpadding - decoder_inp = decoder_inp + self.forward_pitch(pitch_inp, f0, uv, mel2ph, ret, encoder_out) - - # decoder input - ret['decoder_inp'] = decoder_inp = (decoder_inp + style_embed) * tgt_nonpadding - if self.hparams['dec_inp_add_noise']: - B, T, _ = decoder_inp.shape - z = kwargs.get('adv_z', torch.randn([B, T, self.z_channels])).to(decoder_inp.device) - ret['adv_z'] = z - decoder_inp = torch.cat([decoder_inp, z], -1) - decoder_inp = self.dec_inp_noise_proj(decoder_inp) * tgt_nonpadding - ret['mel_out'] = self.forward_decoder(decoder_inp, tgt_nonpadding, ret, infer=infer, **kwargs) - return ret - - def forward_style_embed(self, spk_embed=None, spk_id=None): - # add spk embed - style_embed = 0 - if self.hparams['use_spk_embed']: - style_embed = style_embed + self.spk_embed_proj(spk_embed)[:, None, :] - if self.hparams['use_spk_id']: - style_embed = style_embed + self.spk_id_proj(spk_id)[:, None, :] - return style_embed - - def forward_dur(self, dur_input, mel2ph, txt_tokens, ret): - """ - - :param dur_input: [B, T_txt, H] - :param mel2ph: [B, T_mel] - :param txt_tokens: [B, T_txt] - :param ret: - :return: - """ - src_padding = txt_tokens == 0 - if self.hparams['predictor_grad'] != 1: - dur_input = dur_input.detach() + self.hparams['predictor_grad'] * (dur_input - dur_input.detach()) - dur = self.dur_predictor(dur_input, src_padding) - ret['dur'] = dur - if mel2ph is None: - mel2ph = self.length_regulator(dur, src_padding).detach() - ret['mel2ph'] = mel2ph = clip_mel2token_to_multiple(mel2ph, self.hparams['frames_multiple']) - return mel2ph - - def forward_pitch(self, decoder_inp, f0, uv, mel2ph, ret, encoder_out=None): - if self.hparams['pitch_type'] == 'frame': - pitch_pred_inp = decoder_inp - pitch_padding = mel2ph == 0 - else: - pitch_pred_inp = encoder_out - pitch_padding = encoder_out.abs().sum(-1) == 0 - uv = None - if self.hparams['predictor_grad'] != 1: - pitch_pred_inp = pitch_pred_inp.detach() + \ - self.hparams['predictor_grad'] * (pitch_pred_inp - pitch_pred_inp.detach()) - ret['pitch_pred'] = pitch_pred = self.pitch_predictor(pitch_pred_inp) - use_uv = self.hparams['pitch_type'] == 'frame' and self.hparams['use_uv'] - if f0 is None: - f0 = pitch_pred[:, :, 0] - if use_uv: - uv = pitch_pred[:, :, 1] > 0 - f0_denorm = denorm_f0(f0, uv if use_uv else None, pitch_padding=pitch_padding) - pitch = f0_to_coarse(f0_denorm) # start from 0 [B, T_txt] - ret['f0_denorm'] = f0_denorm - ret['f0_denorm_pred'] = denorm_f0( - pitch_pred[:, :, 0], (pitch_pred[:, :, 1] > 0) if use_uv else None, - pitch_padding=pitch_padding) - if self.hparams['pitch_type'] == 'ph': - pitch = torch.gather(F.pad(pitch, [1, 0]), 1, mel2ph) - ret['f0_denorm'] = torch.gather(F.pad(ret['f0_denorm'], [1, 0]), 1, mel2ph) - ret['f0_denorm_pred'] = torch.gather(F.pad(ret['f0_denorm_pred'], [1, 0]), 1, mel2ph) - pitch_embed = self.pitch_embed(pitch) - return pitch_embed - - def forward_decoder(self, decoder_inp, tgt_nonpadding, ret, infer, **kwargs): - x = decoder_inp # [B, T, H] - x = self.decoder(x) - x = self.mel_out(x) - return x * tgt_nonpadding diff --git a/spaces/AISuperheroes/09SL-AI-Image-Music-Video-AIUIUX/README.md b/spaces/AISuperheroes/09SL-AI-Image-Music-Video-AIUIUX/README.md deleted file mode 100644 index 66381f06266e156d7af95197b5959377f6026b00..0000000000000000000000000000000000000000 --- a/spaces/AISuperheroes/09SL-AI-Image-Music-Video-AIUIUX/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 09SL AI Image Music Video AIUIUX -emoji: 🏃 -colorFrom: pink -colorTo: yellow -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AIZerotoHero-Health4All/01-Speech2Text2Speech/README.md b/spaces/AIZerotoHero-Health4All/01-Speech2Text2Speech/README.md deleted file mode 100644 index b5b65c096d82e4948e6af21db1c8bb4a14f1f13d..0000000000000000000000000000000000000000 --- a/spaces/AIZerotoHero-Health4All/01-Speech2Text2Speech/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 01 Speech2Text2Speech -emoji: 🗣️ -colorFrom: purple -colorTo: yellow -sdk: gradio -sdk_version: 3.9.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AONYLMR/anime-ai-detect/README.md b/spaces/AONYLMR/anime-ai-detect/README.md deleted file mode 100644 index 952c183fd69ccb1664b4236b6132fc6d0358c7de..0000000000000000000000000000000000000000 --- a/spaces/AONYLMR/anime-ai-detect/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Anime Ai Detect -emoji: 🤖 -colorFrom: green -colorTo: purple -sdk: gradio -sdk_version: 3.15.0 -app_file: app.py -pinned: true -duplicated_from: saltacc/anime-ai-detect ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Ababababababbababa/Ashaar/poetry_diacritizer/models/tacotron_based.py b/spaces/Ababababababbababa/Ashaar/poetry_diacritizer/models/tacotron_based.py deleted file mode 100644 index 0bbd408e25b485fb80040683658c42ab9d382221..0000000000000000000000000000000000000000 --- a/spaces/Ababababababbababa/Ashaar/poetry_diacritizer/models/tacotron_based.py +++ /dev/null @@ -1,47 +0,0 @@ -from typing import List -from poetry_diacritizer.models.seq2seq import Seq2Seq, Decoder as Seq2SeqDecoder -from poetry_diacritizer.modules.tacotron_modules import CBHG, Prenet -from torch import nn - - -class Tacotron(Seq2Seq): - pass - - -class Encoder(nn.Module): - def __init__( - self, - inp_vocab_size: int, - embedding_dim: int = 512, - use_prenet: bool = True, - prenet_sizes: List[int] = [256, 128], - cbhg_gru_units: int = 128, - cbhg_filters: int = 16, - cbhg_projections: List[int] = [128, 128], - padding_idx: int = 0, - ): - super().__init__() - self.use_prenet = use_prenet - - self.embedding = nn.Embedding( - inp_vocab_size, embedding_dim, padding_idx=padding_idx - ) - if use_prenet: - self.prenet = Prenet(embedding_dim, prenet_depth=prenet_sizes) - self.cbhg = CBHG( - prenet_sizes[-1] if use_prenet else embedding_dim, - cbhg_gru_units, - K=cbhg_filters, - projections=cbhg_projections, - ) - - def forward(self, inputs, input_lengths=None): - - outputs = self.embedding(inputs) - if self.use_prenet: - outputs = self.prenet(outputs) - return self.cbhg(outputs, input_lengths) - - -class Decoder(Seq2SeqDecoder): - pass diff --git a/spaces/Abhilashvj/planogram-compliance/utils/triton.py b/spaces/Abhilashvj/planogram-compliance/utils/triton.py deleted file mode 100644 index 0ceb9692e7759dacc23cb893f038cecc323fd778..0000000000000000000000000000000000000000 --- a/spaces/Abhilashvj/planogram-compliance/utils/triton.py +++ /dev/null @@ -1,105 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" Utils to interact with the Triton Inference Server -""" - -import typing -from urllib.parse import urlparse - -import torch - - -class TritonRemoteModel: - """A wrapper over a model served by the Triton Inference Server. It can - be configured to communicate over GRPC or HTTP. It accepts Torch Tensors - as input and returns them as outputs. - """ - - def __init__(self, url: str): - """ - Keyword arguments: - url: Fully qualified address of the Triton server - for e.g. grpc://localhost:8000 - """ - - parsed_url = urlparse(url) - if parsed_url.scheme == "grpc": - from tritonclient.grpc import InferenceServerClient, InferInput - - self.client = InferenceServerClient( - parsed_url.netloc - ) # Triton GRPC client - model_repository = self.client.get_model_repository_index() - self.model_name = model_repository.models[0].name - self.metadata = self.client.get_model_metadata( - self.model_name, as_json=True - ) - - def create_input_placeholders() -> typing.List[InferInput]: - return [ - InferInput( - i["name"], [int(s) for s in i["shape"]], i["datatype"] - ) - for i in self.metadata["inputs"] - ] - - else: - from tritonclient.http import InferenceServerClient, InferInput - - self.client = InferenceServerClient( - parsed_url.netloc - ) # Triton HTTP client - model_repository = self.client.get_model_repository_index() - self.model_name = model_repository[0]["name"] - self.metadata = self.client.get_model_metadata(self.model_name) - - def create_input_placeholders() -> typing.List[InferInput]: - return [ - InferInput( - i["name"], [int(s) for s in i["shape"]], i["datatype"] - ) - for i in self.metadata["inputs"] - ] - - self._create_input_placeholders_fn = create_input_placeholders - - @property - def runtime(self): - """Returns the model runtime""" - return self.metadata.get("backend", self.metadata.get("platform")) - - def __call__( - self, *args, **kwargs - ) -> typing.Union[torch.Tensor, typing.Tuple[torch.Tensor, ...]]: - """Invokes the model. Parameters can be provided via args or kwargs. - args, if provided, are assumed to match the order of inputs of the model. - kwargs are matched with the model input names. - """ - inputs = self._create_inputs(*args, **kwargs) - response = self.client.infer(model_name=self.model_name, inputs=inputs) - result = [] - for output in self.metadata["outputs"]: - tensor = torch.as_tensor(response.as_numpy(output["name"])) - result.append(tensor) - return result[0] if len(result) == 1 else result - - def _create_inputs(self, *args, **kwargs): - args_len, kwargs_len = len(args), len(kwargs) - if not args_len and not kwargs_len: - raise RuntimeError("No inputs provided.") - if args_len and kwargs_len: - raise RuntimeError( - "Cannot specify args and kwargs at the same time" - ) - - placeholders = self._create_input_placeholders_fn() - if args_len: - if args_len != len(placeholders): - raise RuntimeError( - f"Expected {len(placeholders)} inputs, got {args_len}." - ) - for input, value in zip(placeholders, args): - input.set_data_from_numpy(value.cpu().numpy()) - else: - for input in placeholders: - value = kwargs[input.name] - input.set_data_from_numpy(value.cpu().numpy()) - return placeholders diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/GridSizer.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/GridSizer.js deleted file mode 100644 index 5c88ebe3e222bc11812428d37e80d9e1e21eb889..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/GridSizer.js +++ /dev/null @@ -1,171 +0,0 @@ -import BaseSizer from '../basesizer/BaseSizer.js'; -import Methods from './Methods.js'; -import GetTotalColumnProportions from './GetTotalColumnProportions.js'; -import GetTotalRowProportions from './GetTotalRowProportions.js'; - -const IsPlainObject = Phaser.Utils.Objects.IsPlainObject; -const GetValue = Phaser.Utils.Objects.GetValue; - -class GridSizer extends BaseSizer { - constructor(scene, x, y, minWidth, minHeight, columnCount, rowCount, columnProportions, rowProportions, config) { - if (IsPlainObject(x)) { - config = x; - x = GetValue(config, 'x', 0); - y = GetValue(config, 'y', 0); - minWidth = GetValue(config, 'width', undefined); - minHeight = GetValue(config, 'height', undefined); - columnCount = GetValue(config, 'column', (config.col || 0)); - rowCount = GetValue(config, 'row', 0); - columnProportions = GetValue(config, 'columnProportions', 0); - rowProportions = GetValue(config, 'rowProportions', 0); - } else if (IsPlainObject(minWidth)) { - config = minWidth; - minWidth = GetValue(config, 'width', undefined); - minHeight = GetValue(config, 'height', undefined); - columnCount = GetValue(config, 'column', (config.col || 0)); - rowCount = GetValue(config, 'row', 0); - columnProportions = GetValue(config, 'columnProportions', 0); - rowProportions = GetValue(config, 'rowProportions', 0); - } else if (IsPlainObject(columnCount)) { - config = columnCount; - columnCount = GetValue(config, 'column', (config.col || 0)); - rowCount = GetValue(config, 'row', 0); - columnProportions = GetValue(config, 'columnProportions', 0); - rowProportions = GetValue(config, 'rowProportions', 0); - } else if (IsPlainObject(columnProportions)) { - config = columnProportions; - columnProportions = GetValue(config, 'columnProportions', 0); - rowProportions = GetValue(config, 'rowProportions', 0); - } - super(scene, x, y, minWidth, minHeight, config); - - this.type = 'rexGridSizer'; - this.resetGrid( - columnCount, rowCount, - columnProportions, rowProportions, - GetValue(config, 'space', undefined) - ); - - this.setIndentLeft( - GetValue(config, 'space.indentLeftOdd', 0), - GetValue(config, 'space.indentLeftEven', 0) - ); - this.setIndentTop( - GetValue(config, 'space.indentTopOdd', 0), - GetValue(config, 'space.indentTopEven', 0) - ); - - this.addChildrenMap('items', this.sizerChildren); - - var createCellContainerCallback = GetValue(config, 'createCellContainerCallback'); - if (createCellContainerCallback) { - for (var y = 0, ycnt = this.rowCount; y < ycnt; y++) { - for (var x = 0, xcnt = this.columnCount; x < xcnt; x++) { - var addConfig = { column: x, row: y }; - var child = createCellContainerCallback(scene, x, y, addConfig); - if (child) { - this.add(child, addConfig); - } - } - } - } - } - - destroy(fromScene) { - // This Game Object has already been destroyed - if (!this.scene || this.ignoreDestroy) { - return; - } - - super.destroy(fromScene); - - // More free resources - this.columnProportions = undefined; - this.rowProportions = undefined; - this.columnWidth = undefined; - this.rowHeight = undefined; - } - - setIndentLeft(odd, even) { - this.space.indentLeftOdd = odd; - this.space.indentLeftEven = even; - return this; - } - - setIndentTop(odd, even) { - this.space.indentTopOdd = odd; - this.space.indentTopEven = even; - return this; - } - - setColumnProportion(columnIndex, proportion) { - if (columnIndex >= this.columnProportions.length) { - return this; - } - this.columnProportions[columnIndex] = proportion; - return this; - } - - setRowProportion(rowIndex, proportion) { - if (rowIndex >= this.rowProportions.length) { - return this; - } - this.rowProportions[rowIndex] = proportion; - return this; - } - - get totalColumnProportions() { - if (this._totalColumnProportions === undefined) { - this._totalColumnProportions = GetTotalColumnProportions.call(this); - } - return this._totalColumnProportions; - } - - get totalRowProportions() { - if (this._totalRowProportions === undefined) { - this._totalRowProportions = GetTotalRowProportions.call(this); - } - return this._totalRowProportions; - } - - getChildAt(columnIndex, rowIndex) { - return this.sizerChildren[(rowIndex * this.columnCount) + columnIndex]; - } - - childToGridIndex(child, out) { - if (!child) { - return null; - } - - var index = this.sizerChildren.indexOf(child); - if (index === -1) { - return null; - } - - if (out === undefined) { - out = {}; - } - out.x = index % this.columnCount; - out.y = Math.floor(index / this.columnCount); - return out; - } - - getColumnWidth(columnIndex) { - var colProportion = this.columnProportions[columnIndex]; - var colWidth = (colProportion === 0) ? this.columnWidth[columnIndex] : (colProportion * this.proportionWidthLength); - return colWidth; - } - - getRowHeight(rowIndex) { - var rowProportion = this.rowProportions[rowIndex]; - var rowHeight = (rowProportion === 0) ? this.rowHeight[rowIndex] : (rowProportion * this.proportionHeightLength); - return rowHeight; - } -} - -Object.assign( - GridSizer.prototype, - Methods -); - -export default GridSizer; \ No newline at end of file diff --git a/spaces/AlekseyKorshuk/thin-plate-spline-motion-model/demo.py b/spaces/AlekseyKorshuk/thin-plate-spline-motion-model/demo.py deleted file mode 100644 index 9cc17a53c3f4623fed33367e81e92796a3b6386e..0000000000000000000000000000000000000000 --- a/spaces/AlekseyKorshuk/thin-plate-spline-motion-model/demo.py +++ /dev/null @@ -1,176 +0,0 @@ -import matplotlib -matplotlib.use('Agg') -import sys -import yaml -from argparse import ArgumentParser -from tqdm import tqdm -from scipy.spatial import ConvexHull -import numpy as np -import imageio -from skimage.transform import resize -from skimage import img_as_ubyte -import torch -from modules.inpainting_network import InpaintingNetwork -from modules.keypoint_detector import KPDetector -from modules.dense_motion import DenseMotionNetwork -from modules.avd_network import AVDNetwork - -if sys.version_info[0] < 3: - raise Exception("You must use Python 3 or higher. Recommended version is Python 3.9") - -def relative_kp(kp_source, kp_driving, kp_driving_initial): - - source_area = ConvexHull(kp_source['fg_kp'][0].data.cpu().numpy()).volume - driving_area = ConvexHull(kp_driving_initial['fg_kp'][0].data.cpu().numpy()).volume - adapt_movement_scale = np.sqrt(source_area) / np.sqrt(driving_area) - - kp_new = {k: v for k, v in kp_driving.items()} - - kp_value_diff = (kp_driving['fg_kp'] - kp_driving_initial['fg_kp']) - kp_value_diff *= adapt_movement_scale - kp_new['fg_kp'] = kp_value_diff + kp_source['fg_kp'] - - return kp_new - -def load_checkpoints(config_path, checkpoint_path, device): - with open(config_path) as f: - config = yaml.load(f) - - inpainting = InpaintingNetwork(**config['model_params']['generator_params'], - **config['model_params']['common_params']) - kp_detector = KPDetector(**config['model_params']['common_params']) - dense_motion_network = DenseMotionNetwork(**config['model_params']['common_params'], - **config['model_params']['dense_motion_params']) - avd_network = AVDNetwork(num_tps=config['model_params']['common_params']['num_tps'], - **config['model_params']['avd_network_params']) - kp_detector.to(device) - dense_motion_network.to(device) - inpainting.to(device) - avd_network.to(device) - - checkpoint = torch.load(checkpoint_path, map_location=device) - - inpainting.load_state_dict(checkpoint['inpainting_network']) - kp_detector.load_state_dict(checkpoint['kp_detector']) - dense_motion_network.load_state_dict(checkpoint['dense_motion_network']) - if 'avd_network' in checkpoint: - avd_network.load_state_dict(checkpoint['avd_network']) - - inpainting.eval() - kp_detector.eval() - dense_motion_network.eval() - avd_network.eval() - - return inpainting, kp_detector, dense_motion_network, avd_network - - -def make_animation(source_image, driving_video, inpainting_network, kp_detector, dense_motion_network, avd_network, device, mode = 'relative'): - assert mode in ['standard', 'relative', 'avd'] - with torch.no_grad(): - predictions = [] - source = torch.tensor(source_image[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2) - source = source.to(device) - driving = torch.tensor(np.array(driving_video)[np.newaxis].astype(np.float32)).permute(0, 4, 1, 2, 3).to(device) - kp_source = kp_detector(source) - kp_driving_initial = kp_detector(driving[:, :, 0]) - - for frame_idx in tqdm(range(driving.shape[2])): - driving_frame = driving[:, :, frame_idx] - driving_frame = driving_frame.to(device) - kp_driving = kp_detector(driving_frame) - if mode == 'standard': - kp_norm = kp_driving - elif mode=='relative': - kp_norm = relative_kp(kp_source=kp_source, kp_driving=kp_driving, - kp_driving_initial=kp_driving_initial) - elif mode == 'avd': - kp_norm = avd_network(kp_source, kp_driving) - dense_motion = dense_motion_network(source_image=source, kp_driving=kp_norm, - kp_source=kp_source, bg_param = None, - dropout_flag = False) - out = inpainting_network(source, dense_motion) - - predictions.append(np.transpose(out['prediction'].data.cpu().numpy(), [0, 2, 3, 1])[0]) - return predictions - - -def find_best_frame(source, driving, cpu): - import face_alignment - - def normalize_kp(kp): - kp = kp - kp.mean(axis=0, keepdims=True) - area = ConvexHull(kp[:, :2]).volume - area = np.sqrt(area) - kp[:, :2] = kp[:, :2] / area - return kp - - fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=True, - device= 'cpu' if cpu else 'cuda') - kp_source = fa.get_landmarks(255 * source)[0] - kp_source = normalize_kp(kp_source) - norm = float('inf') - frame_num = 0 - for i, image in tqdm(enumerate(driving)): - kp_driving = fa.get_landmarks(255 * image)[0] - kp_driving = normalize_kp(kp_driving) - new_norm = (np.abs(kp_source - kp_driving) ** 2).sum() - if new_norm < norm: - norm = new_norm - frame_num = i - return frame_num - - -if __name__ == "__main__": - parser = ArgumentParser() - parser.add_argument("--config", required=True, help="path to config") - parser.add_argument("--checkpoint", default='checkpoints/vox.pth.tar', help="path to checkpoint to restore") - - parser.add_argument("--source_image", default='./assets/source.png', help="path to source image") - parser.add_argument("--driving_video", default='./assets/driving.mp4', help="path to driving video") - parser.add_argument("--result_video", default='./result.mp4', help="path to output") - - parser.add_argument("--img_shape", default="256,256", type=lambda x: list(map(int, x.split(','))), - help='Shape of image, that the model was trained on.') - - parser.add_argument("--mode", default='relative', choices=['standard', 'relative', 'avd'], help="Animate mode: ['standard', 'relative', 'avd'], when use the relative mode to animate a face, use '--find_best_frame' can get better quality result") - - parser.add_argument("--find_best_frame", dest="find_best_frame", action="store_true", - help="Generate from the frame that is the most alligned with source. (Only for faces, requires face_aligment lib)") - - parser.add_argument("--cpu", dest="cpu", action="store_true", help="cpu mode.") - - opt = parser.parse_args() - - source_image = imageio.imread(opt.source_image) - reader = imageio.get_reader(opt.driving_video) - fps = reader.get_meta_data()['fps'] - driving_video = [] - try: - for im in reader: - driving_video.append(im) - except RuntimeError: - pass - reader.close() - - if opt.cpu: - device = torch.device('cpu') - else: - device = torch.device('cuda') - - source_image = resize(source_image, opt.img_shape)[..., :3] - driving_video = [resize(frame, opt.img_shape)[..., :3] for frame in driving_video] - inpainting, kp_detector, dense_motion_network, avd_network = load_checkpoints(config_path = opt.config, checkpoint_path = opt.checkpoint, device = device) - - if opt.find_best_frame: - i = find_best_frame(source_image, driving_video, opt.cpu) - print ("Best frame: " + str(i)) - driving_forward = driving_video[i:] - driving_backward = driving_video[:(i+1)][::-1] - predictions_forward = make_animation(source_image, driving_forward, inpainting, kp_detector, dense_motion_network, avd_network, device = device, mode = opt.mode) - predictions_backward = make_animation(source_image, driving_backward, inpainting, kp_detector, dense_motion_network, avd_network, device = device, mode = opt.mode) - predictions = predictions_backward[::-1] + predictions_forward[1:] - else: - predictions = make_animation(source_image, driving_video, inpainting, kp_detector, dense_motion_network, avd_network, device = device, mode = opt.mode) - - imageio.mimsave(opt.result_video, [img_as_ubyte(frame) for frame in predictions], fps=fps) - diff --git a/spaces/Alfasign/chat-llm-streaming/app.py b/spaces/Alfasign/chat-llm-streaming/app.py deleted file mode 100644 index d27f390295f8215e5feee2c6f0ec2684663d0b38..0000000000000000000000000000000000000000 --- a/spaces/Alfasign/chat-llm-streaming/app.py +++ /dev/null @@ -1,319 +0,0 @@ -import os - -import gradio as gr - -from text_generation import Client, InferenceAPIClient - -openchat_preprompt = ( - "\n: Hi!\n: My name is Bot, model version is 0.15, part of an open-source kit for " - "fine-tuning new bots! I was created by Together, LAION, and Ontocord.ai and the open-source " - "community. I am not human, not evil and not alive, and thus have no thoughts and feelings, " - "but I am programmed to be helpful, polite, honest, and friendly.\n" -) - - -def get_client(model: str): - if model == "togethercomputer/GPT-NeoXT-Chat-Base-20B": - return Client(os.getenv("OPENCHAT_API_URL")) - return InferenceAPIClient(model, token=os.getenv("HF_TOKEN", None)) - - -def get_usernames(model: str): - """ - Returns: - (str, str, str, str): pre-prompt, username, bot name, separator - """ - if model in ("OpenAssistant/oasst-sft-1-pythia-12b", "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5"): - return "", "<|prompter|>", "<|assistant|>", "<|endoftext|>" - if model == "togethercomputer/GPT-NeoXT-Chat-Base-20B": - return openchat_preprompt, ": ", ": ", "\n" - return "", "User: ", "Assistant: ", "\n" - - -def predict( - model: str, - inputs: str, - typical_p: float, - top_p: float, - temperature: float, - top_k: int, - repetition_penalty: float, - watermark: bool, - chatbot, - history, -): - client = get_client(model) - preprompt, user_name, assistant_name, sep = get_usernames(model) - - history.append(inputs) - - past = [] - for data in chatbot: - user_data, model_data = data - - if not user_data.startswith(user_name): - user_data = user_name + user_data - if not model_data.startswith(sep + assistant_name): - model_data = sep + assistant_name + model_data - - past.append(user_data + model_data.rstrip() + sep) - - if not inputs.startswith(user_name): - inputs = user_name + inputs - - total_inputs = preprompt + "".join(past) + inputs + sep + assistant_name.rstrip() - - partial_words = "" - - if model in ("OpenAssistant/oasst-sft-1-pythia-12b", "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5"): - iterator = client.generate_stream( - total_inputs, - typical_p=typical_p, - truncate=1000, - watermark=watermark, - max_new_tokens=500, - ) - else: - iterator = client.generate_stream( - total_inputs, - top_p=top_p if top_p < 1.0 else None, - top_k=top_k, - truncate=1000, - repetition_penalty=repetition_penalty, - watermark=watermark, - temperature=temperature, - max_new_tokens=500, - stop_sequences=[user_name.rstrip(), assistant_name.rstrip()], - ) - - for i, response in enumerate(iterator): - if response.token.special: - continue - - partial_words = partial_words + response.token.text - if partial_words.endswith(user_name.rstrip()): - partial_words = partial_words.rstrip(user_name.rstrip()) - if partial_words.endswith(assistant_name.rstrip()): - partial_words = partial_words.rstrip(assistant_name.rstrip()) - - if i == 0: - history.append(" " + partial_words) - elif response.token.text not in user_name: - history[-1] = partial_words - - chat = [ - (history[i].strip(), history[i + 1].strip()) - for i in range(0, len(history) - 1, 2) - ] - yield chat, history - - -def reset_textbox(): - return gr.update(value="") - - -def radio_on_change( - value: str, - disclaimer, - typical_p, - top_p, - top_k, - temperature, - repetition_penalty, - watermark, -): - if value in ("OpenAssistant/oasst-sft-1-pythia-12b", "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5"): - typical_p = typical_p.update(value=0.2, visible=True) - top_p = top_p.update(visible=False) - top_k = top_k.update(visible=False) - temperature = temperature.update(visible=False) - disclaimer = disclaimer.update(visible=False) - repetition_penalty = repetition_penalty.update(visible=False) - watermark = watermark.update(False) - elif value == "togethercomputer/GPT-NeoXT-Chat-Base-20B": - typical_p = typical_p.update(visible=False) - top_p = top_p.update(value=0.25, visible=True) - top_k = top_k.update(value=50, visible=True) - temperature = temperature.update(value=0.6, visible=True) - repetition_penalty = repetition_penalty.update(value=1.01, visible=True) - watermark = watermark.update(False) - disclaimer = disclaimer.update(visible=True) - else: - typical_p = typical_p.update(visible=False) - top_p = top_p.update(value=0.95, visible=True) - top_k = top_k.update(value=4, visible=True) - temperature = temperature.update(value=0.5, visible=True) - repetition_penalty = repetition_penalty.update(value=1.03, visible=True) - watermark = watermark.update(True) - disclaimer = disclaimer.update(visible=False) - return ( - disclaimer, - typical_p, - top_p, - top_k, - temperature, - repetition_penalty, - watermark, - ) - - -title = """

Large Language Model Chat API

""" -description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form: - -``` -User: -Assistant: -User: -Assistant: -... -``` - -In this app, you can explore the outputs of multiple LLMs when prompted in this way. -""" - -text_generation_inference = """ - -""" - -openchat_disclaimer = """ -
Checkout the official OpenChatKit feedback app for the full experience.
-""" - -with gr.Blocks( - css="""#col_container {margin-left: auto; margin-right: auto;} - #chatbot {height: 520px; overflow: auto;}""" -) as demo: - gr.HTML(title) - gr.Markdown(text_generation_inference, visible=True) - with gr.Column(elem_id="col_container"): - model = gr.Radio( - value="OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5", - choices=[ - "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5", - "OpenAssistant/oasst-sft-1-pythia-12b", - # "togethercomputer/GPT-NeoXT-Chat-Base-20B", - "google/flan-t5-xxl", - "google/flan-ul2", - "bigscience/bloom", - "bigscience/bloomz", - "EleutherAI/gpt-neox-20b", - ], - label="Model", - interactive=True, - ) - - chatbot = gr.Chatbot(elem_id="chatbot") - inputs = gr.Textbox( - placeholder="Hi there!", label="Type an input and press Enter" - ) - disclaimer = gr.Markdown(openchat_disclaimer, visible=False) - state = gr.State([]) - b1 = gr.Button() - - with gr.Accordion("Parameters", open=False): - typical_p = gr.Slider( - minimum=-0, - maximum=1.0, - value=0.2, - step=0.05, - interactive=True, - label="Typical P mass", - ) - top_p = gr.Slider( - minimum=-0, - maximum=1.0, - value=0.25, - step=0.05, - interactive=True, - label="Top-p (nucleus sampling)", - visible=False, - ) - temperature = gr.Slider( - minimum=-0, - maximum=5.0, - value=0.6, - step=0.1, - interactive=True, - label="Temperature", - visible=False, - ) - top_k = gr.Slider( - minimum=1, - maximum=50, - value=50, - step=1, - interactive=True, - label="Top-k", - visible=False, - ) - repetition_penalty = gr.Slider( - minimum=0.1, - maximum=3.0, - value=1.03, - step=0.01, - interactive=True, - label="Repetition Penalty", - visible=False, - ) - watermark = gr.Checkbox(value=False, label="Text watermarking") - - model.change( - lambda value: radio_on_change( - value, - disclaimer, - typical_p, - top_p, - top_k, - temperature, - repetition_penalty, - watermark, - ), - inputs=model, - outputs=[ - disclaimer, - typical_p, - top_p, - top_k, - temperature, - repetition_penalty, - watermark, - ], - ) - - inputs.submit( - predict, - [ - model, - inputs, - typical_p, - top_p, - temperature, - top_k, - repetition_penalty, - watermark, - chatbot, - state, - ], - [chatbot, state], - ) - b1.click( - predict, - [ - model, - inputs, - typical_p, - top_p, - temperature, - top_k, - repetition_penalty, - watermark, - chatbot, - state, - ], - [chatbot, state], - ) - b1.click(reset_textbox, [], [inputs]) - inputs.submit(reset_textbox, [], [inputs]) - - gr.Markdown(description) - demo.queue(concurrency_count=16).launch(debug=True) diff --git a/spaces/Alican/pixera/data/image_folder.py b/spaces/Alican/pixera/data/image_folder.py deleted file mode 100644 index f7cb91574a0487c51e5dd8210aebb38edb0b16ef..0000000000000000000000000000000000000000 --- a/spaces/Alican/pixera/data/image_folder.py +++ /dev/null @@ -1,65 +0,0 @@ -"""A modified image folder class - -We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py) -so that this class can load images from both current directory and its subdirectories. -""" - -import torch.utils.data as data - -from PIL import Image -import os - -IMG_EXTENSIONS = [ - '.jpg', '.JPG', '.jpeg', '.JPEG', - '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', - '.tif', '.TIF', '.tiff', '.TIFF', -] - - -def is_image_file(filename): - return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) - - -def make_dataset(dir, max_dataset_size=float("inf")): - images = [] - assert os.path.isdir(dir), '%s is not a valid directory' % dir - - for root, _, fnames in sorted(os.walk(dir)): - for fname in fnames: - if is_image_file(fname): - path = os.path.join(root, fname) - images.append(path) - return images[:min(max_dataset_size, len(images))] - - -def default_loader(path): - return Image.open(path).convert('RGB') - - -class ImageFolder(data.Dataset): - - def __init__(self, root, transform=None, return_paths=False, - loader=default_loader): - imgs = make_dataset(root) - if len(imgs) == 0: - raise(RuntimeError("Found 0 images in: " + root + "\n" - "Supported image extensions are: " + ",".join(IMG_EXTENSIONS))) - - self.root = root - self.imgs = imgs - self.transform = transform - self.return_paths = return_paths - self.loader = loader - - def __getitem__(self, index): - path = self.imgs[index] - img = self.loader(path) - if self.transform is not None: - img = self.transform(img) - if self.return_paths: - return img, path - else: - return img - - def __len__(self): - return len(self.imgs) diff --git a/spaces/Alycer/VITS-Umamusume-voice-synthesizer/text/ngu_dialect.py b/spaces/Alycer/VITS-Umamusume-voice-synthesizer/text/ngu_dialect.py deleted file mode 100644 index ce3e12bbf0469426872eed5f681985d3e1be9b26..0000000000000000000000000000000000000000 --- a/spaces/Alycer/VITS-Umamusume-voice-synthesizer/text/ngu_dialect.py +++ /dev/null @@ -1,30 +0,0 @@ -import re -import opencc - - -dialects = {'SZ': 'suzhou', 'WX': 'wuxi', 'CZ': 'changzhou', 'HZ': 'hangzhou', - 'SX': 'shaoxing', 'NB': 'ningbo', 'JJ': 'jingjiang', 'YX': 'yixing', - 'JD': 'jiading', 'ZR': 'zhenru', 'PH': 'pinghu', 'TX': 'tongxiang', - 'JS': 'jiashan', 'HN': 'xiashi', 'LP': 'linping', 'XS': 'xiaoshan', - 'FY': 'fuyang', 'RA': 'ruao', 'CX': 'cixi', 'SM': 'sanmen', - 'TT': 'tiantai', 'WZ': 'wenzhou', 'SC': 'suichang', 'YB': 'youbu'} - -converters = {} - -for dialect in dialects.values(): - try: - converters[dialect] = opencc.OpenCC(dialect) - except: - pass - - -def ngu_dialect_to_ipa(text, dialect): - dialect = dialects[dialect] - text = converters[dialect].convert(text).replace('-','').replace('$',' ') - text = re.sub(r'[、;:]', ',', text) - text = re.sub(r'\s*,\s*', ', ', text) - text = re.sub(r'\s*。\s*', '. ', text) - text = re.sub(r'\s*?\s*', '? ', text) - text = re.sub(r'\s*!\s*', '! ', text) - text = re.sub(r'\s*$', '', text) - return text diff --git a/spaces/Amrrs/yt-shorts-video-captioning/app.py b/spaces/Amrrs/yt-shorts-video-captioning/app.py deleted file mode 100644 index 5b0ed68040f6b4673be973314c1114ba52cb4e3d..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/yt-shorts-video-captioning/app.py +++ /dev/null @@ -1,72 +0,0 @@ - -import gradio as gr -import os -import sys -import subprocess -#from moviepy.editor import VideoFileClip - -import whisper -from whisper.utils import write_vtt - -model = whisper.load_model("medium") - -title = "Add Text/Caption to your YouTube Shorts - MultiLingual" - -def video2mp3(video_file, output_ext="mp3"): - filename, ext = os.path.splitext(video_file) - subprocess.call(["ffmpeg", "-y", "-i", video_file, f"{filename}.{output_ext}"], - stdout=subprocess.DEVNULL, - stderr=subprocess.STDOUT) - return f"{filename}.{output_ext}" - - -def translate(input_video): - - audio_file = video2mp3(input_video) - - options = dict(beam_size=5, best_of=5, fp16 = False) - translate_options = dict(task="translate", **options) - result = model.transcribe(audio_file,**translate_options) - - output_dir = '' - audio_path = audio_file.split(".")[0] - - with open(os.path.join(output_dir, audio_path + ".vtt"), "w") as vtt: - write_vtt(result["segments"], file=vtt) - - subtitle = audio_path + ".vtt" - output_video = audio_path + "_subtitled.mp4" - - os.system(f"ffmpeg -i {input_video} -vf subtitles={subtitle} {output_video}") - - return output_video - -block = gr.Blocks() -with block: - - with gr.Group(): - with gr.Box(): - with gr.Row().style(): - inp_video = gr.Video( - label="Input Video", - type="filepath", - mirror_webcam = False - ) - op_video = gr.Video() - btn = gr.Button("Generate Subtitle Video") - - - - - - - btn.click(translate, inputs=[inp_video], outputs=[op_video]) - - gr.HTML(''' - - ''') - -block.launch(enable_queue = True) diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/ddpm/test_ddpm.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/ddpm/test_ddpm.py deleted file mode 100644 index a3c29021511487bfc1d775f3a92a6de03e6a47c4..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/ddpm/test_ddpm.py +++ /dev/null @@ -1,111 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -import torch - -from diffusers import DDPMPipeline, DDPMScheduler, UNet2DModel -from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device - - -enable_full_determinism() - - -class DDPMPipelineFastTests(unittest.TestCase): - @property - def dummy_uncond_unet(self): - torch.manual_seed(0) - model = UNet2DModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=3, - out_channels=3, - down_block_types=("DownBlock2D", "AttnDownBlock2D"), - up_block_types=("AttnUpBlock2D", "UpBlock2D"), - ) - return model - - def test_fast_inference(self): - device = "cpu" - unet = self.dummy_uncond_unet - scheduler = DDPMScheduler() - - ddpm = DDPMPipeline(unet=unet, scheduler=scheduler) - ddpm.to(device) - ddpm.set_progress_bar_config(disable=None) - - generator = torch.Generator(device=device).manual_seed(0) - image = ddpm(generator=generator, num_inference_steps=2, output_type="numpy").images - - generator = torch.Generator(device=device).manual_seed(0) - image_from_tuple = ddpm(generator=generator, num_inference_steps=2, output_type="numpy", return_dict=False)[0] - - image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] - - assert image.shape == (1, 32, 32, 3) - expected_slice = np.array( - [9.956e-01, 5.785e-01, 4.675e-01, 9.930e-01, 0.0, 1.000, 1.199e-03, 2.648e-04, 5.101e-04] - ) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - - def test_inference_predict_sample(self): - unet = self.dummy_uncond_unet - scheduler = DDPMScheduler(prediction_type="sample") - - ddpm = DDPMPipeline(unet=unet, scheduler=scheduler) - ddpm.to(torch_device) - ddpm.set_progress_bar_config(disable=None) - - generator = torch.manual_seed(0) - image = ddpm(generator=generator, num_inference_steps=2, output_type="numpy").images - - generator = torch.manual_seed(0) - image_eps = ddpm(generator=generator, num_inference_steps=2, output_type="numpy")[0] - - image_slice = image[0, -3:, -3:, -1] - image_eps_slice = image_eps[0, -3:, -3:, -1] - - assert image.shape == (1, 32, 32, 3) - tolerance = 1e-2 if torch_device != "mps" else 3e-2 - assert np.abs(image_slice.flatten() - image_eps_slice.flatten()).max() < tolerance - - -@slow -@require_torch_gpu -class DDPMPipelineIntegrationTests(unittest.TestCase): - def test_inference_cifar10(self): - model_id = "google/ddpm-cifar10-32" - - unet = UNet2DModel.from_pretrained(model_id) - scheduler = DDPMScheduler.from_pretrained(model_id) - - ddpm = DDPMPipeline(unet=unet, scheduler=scheduler) - ddpm.to(torch_device) - ddpm.set_progress_bar_config(disable=None) - - generator = torch.manual_seed(0) - image = ddpm(generator=generator, output_type="numpy").images - - image_slice = image[0, -3:, -3:, -1] - - assert image.shape == (1, 32, 32, 3) - expected_slice = np.array([0.4200, 0.3588, 0.1939, 0.3847, 0.3382, 0.2647, 0.4155, 0.3582, 0.3385]) - assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/__init__.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/__init__.py deleted file mode 100644 index 210a2989138380559f23045b568d0fbbeb918c03..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -# flake8: noqa -from .arraymisc import * -from .fileio import * -from .image import * -from .utils import * -from .version import * -from .video import * -from .visualization import * - -# The following modules are not imported to this level, so mmcv may be used -# without PyTorch. -# - runner -# - parallel -# - op diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/correlation.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/correlation.py deleted file mode 100644 index 3d0b79c301b29915dfaf4d2b1846c59be73127d3..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/correlation.py +++ /dev/null @@ -1,196 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -from torch import Tensor, nn -from torch.autograd import Function -from torch.autograd.function import once_differentiable -from torch.nn.modules.utils import _pair - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext( - '_ext', ['correlation_forward', 'correlation_backward']) - - -class CorrelationFunction(Function): - - @staticmethod - def forward(ctx, - input1, - input2, - kernel_size=1, - max_displacement=1, - stride=1, - padding=1, - dilation=1, - dilation_patch=1): - - ctx.save_for_backward(input1, input2) - - kH, kW = ctx.kernel_size = _pair(kernel_size) - patch_size = max_displacement * 2 + 1 - ctx.patch_size = patch_size - dH, dW = ctx.stride = _pair(stride) - padH, padW = ctx.padding = _pair(padding) - dilationH, dilationW = ctx.dilation = _pair(dilation) - dilation_patchH, dilation_patchW = ctx.dilation_patch = _pair( - dilation_patch) - - output_size = CorrelationFunction._output_size(ctx, input1) - - output = input1.new_zeros(output_size) - - ext_module.correlation_forward( - input1, - input2, - output, - kH=kH, - kW=kW, - patchH=patch_size, - patchW=patch_size, - padH=padH, - padW=padW, - dilationH=dilationH, - dilationW=dilationW, - dilation_patchH=dilation_patchH, - dilation_patchW=dilation_patchW, - dH=dH, - dW=dW) - - return output - - @staticmethod - @once_differentiable - def backward(ctx, grad_output): - input1, input2 = ctx.saved_tensors - - kH, kW = ctx.kernel_size - patch_size = ctx.patch_size - padH, padW = ctx.padding - dilationH, dilationW = ctx.dilation - dilation_patchH, dilation_patchW = ctx.dilation_patch - dH, dW = ctx.stride - grad_input1 = torch.zeros_like(input1) - grad_input2 = torch.zeros_like(input2) - - ext_module.correlation_backward( - grad_output, - input1, - input2, - grad_input1, - grad_input2, - kH=kH, - kW=kW, - patchH=patch_size, - patchW=patch_size, - padH=padH, - padW=padW, - dilationH=dilationH, - dilationW=dilationW, - dilation_patchH=dilation_patchH, - dilation_patchW=dilation_patchW, - dH=dH, - dW=dW) - return grad_input1, grad_input2, None, None, None, None, None, None - - @staticmethod - def _output_size(ctx, input1): - iH, iW = input1.size(2), input1.size(3) - batch_size = input1.size(0) - kH, kW = ctx.kernel_size - patch_size = ctx.patch_size - dH, dW = ctx.stride - padH, padW = ctx.padding - dilationH, dilationW = ctx.dilation - dilatedKH = (kH - 1) * dilationH + 1 - dilatedKW = (kW - 1) * dilationW + 1 - - oH = int((iH + 2 * padH - dilatedKH) / dH + 1) - oW = int((iW + 2 * padW - dilatedKW) / dW + 1) - - output_size = (batch_size, patch_size, patch_size, oH, oW) - return output_size - - -class Correlation(nn.Module): - r"""Correlation operator - - This correlation operator works for optical flow correlation computation. - - There are two batched tensors with shape :math:`(N, C, H, W)`, - and the correlation output's shape is :math:`(N, max\_displacement \times - 2 + 1, max\_displacement * 2 + 1, H_{out}, W_{out})` - - where - - .. math:: - H_{out} = \left\lfloor\frac{H_{in} + 2 \times padding - - dilation \times (kernel\_size - 1) - 1} - {stride} + 1\right\rfloor - - .. math:: - W_{out} = \left\lfloor\frac{W_{in} + 2 \times padding - dilation - \times (kernel\_size - 1) - 1} - {stride} + 1\right\rfloor - - the correlation item :math:`(N_i, dy, dx)` is formed by taking the sliding - window convolution between input1 and shifted input2, - - .. math:: - Corr(N_i, dx, dy) = - \sum_{c=0}^{C-1} - input1(N_i, c) \star - \mathcal{S}(input2(N_i, c), dy, dx) - - where :math:`\star` is the valid 2d sliding window convolution operator, - and :math:`\mathcal{S}` means shifting the input features (auto-complete - zero marginal), and :math:`dx, dy` are shifting distance, :math:`dx, dy \in - [-max\_displacement \times dilation\_patch, max\_displacement \times - dilation\_patch]`. - - Args: - kernel_size (int): The size of sliding window i.e. local neighborhood - representing the center points and involved in correlation - computation. Defaults to 1. - max_displacement (int): The radius for computing correlation volume, - but the actual working space can be dilated by dilation_patch. - Defaults to 1. - stride (int): The stride of the sliding blocks in the input spatial - dimensions. Defaults to 1. - padding (int): Zero padding added to all four sides of the input1. - Defaults to 0. - dilation (int): The spacing of local neighborhood that will involved - in correlation. Defaults to 1. - dilation_patch (int): The spacing between position need to compute - correlation. Defaults to 1. - """ - - def __init__(self, - kernel_size: int = 1, - max_displacement: int = 1, - stride: int = 1, - padding: int = 0, - dilation: int = 1, - dilation_patch: int = 1) -> None: - super().__init__() - self.kernel_size = kernel_size - self.max_displacement = max_displacement - self.stride = stride - self.padding = padding - self.dilation = dilation - self.dilation_patch = dilation_patch - - def forward(self, input1: Tensor, input2: Tensor) -> Tensor: - return CorrelationFunction.apply(input1, input2, self.kernel_size, - self.max_displacement, self.stride, - self.padding, self.dilation, - self.dilation_patch) - - def __repr__(self) -> str: - s = self.__class__.__name__ - s += f'(kernel_size={self.kernel_size}, ' - s += f'max_displacement={self.max_displacement}, ' - s += f'stride={self.stride}, ' - s += f'padding={self.padding}, ' - s += f'dilation={self.dilation}, ' - s += f'dilation_patch={self.dilation_patch})' - return s diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/base_runner.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/base_runner.py deleted file mode 100644 index 4928db0a73b56fe0218a4bf66ec4ffa082d31ccc..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/base_runner.py +++ /dev/null @@ -1,542 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -import logging -import os.path as osp -import warnings -from abc import ABCMeta, abstractmethod - -import torch -from torch.optim import Optimizer - -import annotator.uniformer.mmcv as mmcv -from ..parallel import is_module_wrapper -from .checkpoint import load_checkpoint -from .dist_utils import get_dist_info -from .hooks import HOOKS, Hook -from .log_buffer import LogBuffer -from .priority import Priority, get_priority -from .utils import get_time_str - - -class BaseRunner(metaclass=ABCMeta): - """The base class of Runner, a training helper for PyTorch. - - All subclasses should implement the following APIs: - - - ``run()`` - - ``train()`` - - ``val()`` - - ``save_checkpoint()`` - - Args: - model (:obj:`torch.nn.Module`): The model to be run. - batch_processor (callable): A callable method that process a data - batch. The interface of this method should be - `batch_processor(model, data, train_mode) -> dict` - optimizer (dict or :obj:`torch.optim.Optimizer`): It can be either an - optimizer (in most cases) or a dict of optimizers (in models that - requires more than one optimizer, e.g., GAN). - work_dir (str, optional): The working directory to save checkpoints - and logs. Defaults to None. - logger (:obj:`logging.Logger`): Logger used during training. - Defaults to None. (The default value is just for backward - compatibility) - meta (dict | None): A dict records some import information such as - environment info and seed, which will be logged in logger hook. - Defaults to None. - max_epochs (int, optional): Total training epochs. - max_iters (int, optional): Total training iterations. - """ - - def __init__(self, - model, - batch_processor=None, - optimizer=None, - work_dir=None, - logger=None, - meta=None, - max_iters=None, - max_epochs=None): - if batch_processor is not None: - if not callable(batch_processor): - raise TypeError('batch_processor must be callable, ' - f'but got {type(batch_processor)}') - warnings.warn('batch_processor is deprecated, please implement ' - 'train_step() and val_step() in the model instead.') - # raise an error is `batch_processor` is not None and - # `model.train_step()` exists. - if is_module_wrapper(model): - _model = model.module - else: - _model = model - if hasattr(_model, 'train_step') or hasattr(_model, 'val_step'): - raise RuntimeError( - 'batch_processor and model.train_step()/model.val_step() ' - 'cannot be both available.') - else: - assert hasattr(model, 'train_step') - - # check the type of `optimizer` - if isinstance(optimizer, dict): - for name, optim in optimizer.items(): - if not isinstance(optim, Optimizer): - raise TypeError( - f'optimizer must be a dict of torch.optim.Optimizers, ' - f'but optimizer["{name}"] is a {type(optim)}') - elif not isinstance(optimizer, Optimizer) and optimizer is not None: - raise TypeError( - f'optimizer must be a torch.optim.Optimizer object ' - f'or dict or None, but got {type(optimizer)}') - - # check the type of `logger` - if not isinstance(logger, logging.Logger): - raise TypeError(f'logger must be a logging.Logger object, ' - f'but got {type(logger)}') - - # check the type of `meta` - if meta is not None and not isinstance(meta, dict): - raise TypeError( - f'meta must be a dict or None, but got {type(meta)}') - - self.model = model - self.batch_processor = batch_processor - self.optimizer = optimizer - self.logger = logger - self.meta = meta - # create work_dir - if mmcv.is_str(work_dir): - self.work_dir = osp.abspath(work_dir) - mmcv.mkdir_or_exist(self.work_dir) - elif work_dir is None: - self.work_dir = None - else: - raise TypeError('"work_dir" must be a str or None') - - # get model name from the model class - if hasattr(self.model, 'module'): - self._model_name = self.model.module.__class__.__name__ - else: - self._model_name = self.model.__class__.__name__ - - self._rank, self._world_size = get_dist_info() - self.timestamp = get_time_str() - self.mode = None - self._hooks = [] - self._epoch = 0 - self._iter = 0 - self._inner_iter = 0 - - if max_epochs is not None and max_iters is not None: - raise ValueError( - 'Only one of `max_epochs` or `max_iters` can be set.') - - self._max_epochs = max_epochs - self._max_iters = max_iters - # TODO: Redesign LogBuffer, it is not flexible and elegant enough - self.log_buffer = LogBuffer() - - @property - def model_name(self): - """str: Name of the model, usually the module class name.""" - return self._model_name - - @property - def rank(self): - """int: Rank of current process. (distributed training)""" - return self._rank - - @property - def world_size(self): - """int: Number of processes participating in the job. - (distributed training)""" - return self._world_size - - @property - def hooks(self): - """list[:obj:`Hook`]: A list of registered hooks.""" - return self._hooks - - @property - def epoch(self): - """int: Current epoch.""" - return self._epoch - - @property - def iter(self): - """int: Current iteration.""" - return self._iter - - @property - def inner_iter(self): - """int: Iteration in an epoch.""" - return self._inner_iter - - @property - def max_epochs(self): - """int: Maximum training epochs.""" - return self._max_epochs - - @property - def max_iters(self): - """int: Maximum training iterations.""" - return self._max_iters - - @abstractmethod - def train(self): - pass - - @abstractmethod - def val(self): - pass - - @abstractmethod - def run(self, data_loaders, workflow, **kwargs): - pass - - @abstractmethod - def save_checkpoint(self, - out_dir, - filename_tmpl, - save_optimizer=True, - meta=None, - create_symlink=True): - pass - - def current_lr(self): - """Get current learning rates. - - Returns: - list[float] | dict[str, list[float]]: Current learning rates of all - param groups. If the runner has a dict of optimizers, this - method will return a dict. - """ - if isinstance(self.optimizer, torch.optim.Optimizer): - lr = [group['lr'] for group in self.optimizer.param_groups] - elif isinstance(self.optimizer, dict): - lr = dict() - for name, optim in self.optimizer.items(): - lr[name] = [group['lr'] for group in optim.param_groups] - else: - raise RuntimeError( - 'lr is not applicable because optimizer does not exist.') - return lr - - def current_momentum(self): - """Get current momentums. - - Returns: - list[float] | dict[str, list[float]]: Current momentums of all - param groups. If the runner has a dict of optimizers, this - method will return a dict. - """ - - def _get_momentum(optimizer): - momentums = [] - for group in optimizer.param_groups: - if 'momentum' in group.keys(): - momentums.append(group['momentum']) - elif 'betas' in group.keys(): - momentums.append(group['betas'][0]) - else: - momentums.append(0) - return momentums - - if self.optimizer is None: - raise RuntimeError( - 'momentum is not applicable because optimizer does not exist.') - elif isinstance(self.optimizer, torch.optim.Optimizer): - momentums = _get_momentum(self.optimizer) - elif isinstance(self.optimizer, dict): - momentums = dict() - for name, optim in self.optimizer.items(): - momentums[name] = _get_momentum(optim) - return momentums - - def register_hook(self, hook, priority='NORMAL'): - """Register a hook into the hook list. - - The hook will be inserted into a priority queue, with the specified - priority (See :class:`Priority` for details of priorities). - For hooks with the same priority, they will be triggered in the same - order as they are registered. - - Args: - hook (:obj:`Hook`): The hook to be registered. - priority (int or str or :obj:`Priority`): Hook priority. - Lower value means higher priority. - """ - assert isinstance(hook, Hook) - if hasattr(hook, 'priority'): - raise ValueError('"priority" is a reserved attribute for hooks') - priority = get_priority(priority) - hook.priority = priority - # insert the hook to a sorted list - inserted = False - for i in range(len(self._hooks) - 1, -1, -1): - if priority >= self._hooks[i].priority: - self._hooks.insert(i + 1, hook) - inserted = True - break - if not inserted: - self._hooks.insert(0, hook) - - def register_hook_from_cfg(self, hook_cfg): - """Register a hook from its cfg. - - Args: - hook_cfg (dict): Hook config. It should have at least keys 'type' - and 'priority' indicating its type and priority. - - Notes: - The specific hook class to register should not use 'type' and - 'priority' arguments during initialization. - """ - hook_cfg = hook_cfg.copy() - priority = hook_cfg.pop('priority', 'NORMAL') - hook = mmcv.build_from_cfg(hook_cfg, HOOKS) - self.register_hook(hook, priority=priority) - - def call_hook(self, fn_name): - """Call all hooks. - - Args: - fn_name (str): The function name in each hook to be called, such as - "before_train_epoch". - """ - for hook in self._hooks: - getattr(hook, fn_name)(self) - - def get_hook_info(self): - # Get hooks info in each stage - stage_hook_map = {stage: [] for stage in Hook.stages} - for hook in self.hooks: - try: - priority = Priority(hook.priority).name - except ValueError: - priority = hook.priority - classname = hook.__class__.__name__ - hook_info = f'({priority:<12}) {classname:<35}' - for trigger_stage in hook.get_triggered_stages(): - stage_hook_map[trigger_stage].append(hook_info) - - stage_hook_infos = [] - for stage in Hook.stages: - hook_infos = stage_hook_map[stage] - if len(hook_infos) > 0: - info = f'{stage}:\n' - info += '\n'.join(hook_infos) - info += '\n -------------------- ' - stage_hook_infos.append(info) - return '\n'.join(stage_hook_infos) - - def load_checkpoint(self, - filename, - map_location='cpu', - strict=False, - revise_keys=[(r'^module.', '')]): - return load_checkpoint( - self.model, - filename, - map_location, - strict, - self.logger, - revise_keys=revise_keys) - - def resume(self, - checkpoint, - resume_optimizer=True, - map_location='default'): - if map_location == 'default': - if torch.cuda.is_available(): - device_id = torch.cuda.current_device() - checkpoint = self.load_checkpoint( - checkpoint, - map_location=lambda storage, loc: storage.cuda(device_id)) - else: - checkpoint = self.load_checkpoint(checkpoint) - else: - checkpoint = self.load_checkpoint( - checkpoint, map_location=map_location) - - self._epoch = checkpoint['meta']['epoch'] - self._iter = checkpoint['meta']['iter'] - if self.meta is None: - self.meta = {} - self.meta.setdefault('hook_msgs', {}) - # load `last_ckpt`, `best_score`, `best_ckpt`, etc. for hook messages - self.meta['hook_msgs'].update(checkpoint['meta'].get('hook_msgs', {})) - - # Re-calculate the number of iterations when resuming - # models with different number of GPUs - if 'config' in checkpoint['meta']: - config = mmcv.Config.fromstring( - checkpoint['meta']['config'], file_format='.py') - previous_gpu_ids = config.get('gpu_ids', None) - if previous_gpu_ids and len(previous_gpu_ids) > 0 and len( - previous_gpu_ids) != self.world_size: - self._iter = int(self._iter * len(previous_gpu_ids) / - self.world_size) - self.logger.info('the iteration number is changed due to ' - 'change of GPU number') - - # resume meta information meta - self.meta = checkpoint['meta'] - - if 'optimizer' in checkpoint and resume_optimizer: - if isinstance(self.optimizer, Optimizer): - self.optimizer.load_state_dict(checkpoint['optimizer']) - elif isinstance(self.optimizer, dict): - for k in self.optimizer.keys(): - self.optimizer[k].load_state_dict( - checkpoint['optimizer'][k]) - else: - raise TypeError( - 'Optimizer should be dict or torch.optim.Optimizer ' - f'but got {type(self.optimizer)}') - - self.logger.info('resumed epoch %d, iter %d', self.epoch, self.iter) - - def register_lr_hook(self, lr_config): - if lr_config is None: - return - elif isinstance(lr_config, dict): - assert 'policy' in lr_config - policy_type = lr_config.pop('policy') - # If the type of policy is all in lower case, e.g., 'cyclic', - # then its first letter will be capitalized, e.g., to be 'Cyclic'. - # This is for the convenient usage of Lr updater. - # Since this is not applicable for ` - # CosineAnnealingLrUpdater`, - # the string will not be changed if it contains capital letters. - if policy_type == policy_type.lower(): - policy_type = policy_type.title() - hook_type = policy_type + 'LrUpdaterHook' - lr_config['type'] = hook_type - hook = mmcv.build_from_cfg(lr_config, HOOKS) - else: - hook = lr_config - self.register_hook(hook, priority='VERY_HIGH') - - def register_momentum_hook(self, momentum_config): - if momentum_config is None: - return - if isinstance(momentum_config, dict): - assert 'policy' in momentum_config - policy_type = momentum_config.pop('policy') - # If the type of policy is all in lower case, e.g., 'cyclic', - # then its first letter will be capitalized, e.g., to be 'Cyclic'. - # This is for the convenient usage of momentum updater. - # Since this is not applicable for - # `CosineAnnealingMomentumUpdater`, - # the string will not be changed if it contains capital letters. - if policy_type == policy_type.lower(): - policy_type = policy_type.title() - hook_type = policy_type + 'MomentumUpdaterHook' - momentum_config['type'] = hook_type - hook = mmcv.build_from_cfg(momentum_config, HOOKS) - else: - hook = momentum_config - self.register_hook(hook, priority='HIGH') - - def register_optimizer_hook(self, optimizer_config): - if optimizer_config is None: - return - if isinstance(optimizer_config, dict): - optimizer_config.setdefault('type', 'OptimizerHook') - hook = mmcv.build_from_cfg(optimizer_config, HOOKS) - else: - hook = optimizer_config - self.register_hook(hook, priority='ABOVE_NORMAL') - - def register_checkpoint_hook(self, checkpoint_config): - if checkpoint_config is None: - return - if isinstance(checkpoint_config, dict): - checkpoint_config.setdefault('type', 'CheckpointHook') - hook = mmcv.build_from_cfg(checkpoint_config, HOOKS) - else: - hook = checkpoint_config - self.register_hook(hook, priority='NORMAL') - - def register_logger_hooks(self, log_config): - if log_config is None: - return - log_interval = log_config['interval'] - for info in log_config['hooks']: - logger_hook = mmcv.build_from_cfg( - info, HOOKS, default_args=dict(interval=log_interval)) - self.register_hook(logger_hook, priority='VERY_LOW') - - def register_timer_hook(self, timer_config): - if timer_config is None: - return - if isinstance(timer_config, dict): - timer_config_ = copy.deepcopy(timer_config) - hook = mmcv.build_from_cfg(timer_config_, HOOKS) - else: - hook = timer_config - self.register_hook(hook, priority='LOW') - - def register_custom_hooks(self, custom_config): - if custom_config is None: - return - - if not isinstance(custom_config, list): - custom_config = [custom_config] - - for item in custom_config: - if isinstance(item, dict): - self.register_hook_from_cfg(item) - else: - self.register_hook(item, priority='NORMAL') - - def register_profiler_hook(self, profiler_config): - if profiler_config is None: - return - if isinstance(profiler_config, dict): - profiler_config.setdefault('type', 'ProfilerHook') - hook = mmcv.build_from_cfg(profiler_config, HOOKS) - else: - hook = profiler_config - self.register_hook(hook) - - def register_training_hooks(self, - lr_config, - optimizer_config=None, - checkpoint_config=None, - log_config=None, - momentum_config=None, - timer_config=dict(type='IterTimerHook'), - custom_hooks_config=None): - """Register default and custom hooks for training. - - Default and custom hooks include: - - +----------------------+-------------------------+ - | Hooks | Priority | - +======================+=========================+ - | LrUpdaterHook | VERY_HIGH (10) | - +----------------------+-------------------------+ - | MomentumUpdaterHook | HIGH (30) | - +----------------------+-------------------------+ - | OptimizerStepperHook | ABOVE_NORMAL (40) | - +----------------------+-------------------------+ - | CheckpointSaverHook | NORMAL (50) | - +----------------------+-------------------------+ - | IterTimerHook | LOW (70) | - +----------------------+-------------------------+ - | LoggerHook(s) | VERY_LOW (90) | - +----------------------+-------------------------+ - | CustomHook(s) | defaults to NORMAL (50) | - +----------------------+-------------------------+ - - If custom hooks have same priority with default hooks, custom hooks - will be triggered after default hooks. - """ - self.register_lr_hook(lr_config) - self.register_momentum_hook(momentum_config) - self.register_optimizer_hook(optimizer_config) - self.register_checkpoint_hook(checkpoint_config) - self.register_timer_hook(timer_config) - self.register_logger_hooks(log_config) - self.register_custom_hooks(custom_hooks_config) diff --git a/spaces/Anthony7906/MengHuiMXD_GPT/modules/overwrites.py b/spaces/Anthony7906/MengHuiMXD_GPT/modules/overwrites.py deleted file mode 100644 index 035a4a52722d66ee28af1c05231ad1cea3339ef5..0000000000000000000000000000000000000000 --- a/spaces/Anthony7906/MengHuiMXD_GPT/modules/overwrites.py +++ /dev/null @@ -1,94 +0,0 @@ -from __future__ import annotations -import logging - -from llama_index import Prompt -from typing import List, Tuple -import mdtex2html -from gradio_client import utils as client_utils - -from modules.presets import * -from modules.llama_func import * - - -def compact_text_chunks(self, prompt: Prompt, text_chunks: List[str]) -> List[str]: - logging.debug("Compacting text chunks...🚀🚀🚀") - combined_str = [c.strip() for c in text_chunks if c.strip()] - combined_str = [f"[{index+1}] {c}" for index, c in enumerate(combined_str)] - combined_str = "\n\n".join(combined_str) - # resplit based on self.max_chunk_overlap - text_splitter = self.get_text_splitter_given_prompt(prompt, 1, padding=1) - return text_splitter.split_text(combined_str) - - -def postprocess( - self, - y: List[List[str | Tuple[str] | Tuple[str, str] | None] | Tuple], - ) -> List[List[str | Dict | None]]: - """ - Parameters: - y: List of lists representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. It can also be a tuple whose first element is a string filepath or URL to an image/video/audio, and second (optional) element is the alt text, in which case the media file is displayed. It can also be None, in which case that message is not displayed. - Returns: - List of lists representing the message and response. Each message and response will be a string of HTML, or a dictionary with media information. Or None if the message is not to be displayed. - """ - if y is None: - return [] - processed_messages = [] - for message_pair in y: - assert isinstance( - message_pair, (tuple, list) - ), f"Expected a list of lists or list of tuples. Received: {message_pair}" - assert ( - len(message_pair) == 2 - ), f"Expected a list of lists of length 2 or list of tuples of length 2. Received: {message_pair}" - - processed_messages.append( - [ - self._postprocess_chat_messages(message_pair[0], "user"), - self._postprocess_chat_messages(message_pair[1], "bot"), - ] - ) - return processed_messages - -def postprocess_chat_messages( - self, chat_message: str | Tuple | List | None, message_type: str - ) -> str | Dict | None: - if chat_message is None: - return None - elif isinstance(chat_message, (tuple, list)): - filepath = chat_message[0] - mime_type = client_utils.get_mimetype(filepath) - filepath = self.make_temp_copy_if_needed(filepath) - return { - "name": filepath, - "mime_type": mime_type, - "alt_text": chat_message[1] if len(chat_message) > 1 else None, - "data": None, # These last two fields are filled in by the frontend - "is_file": True, - } - elif isinstance(chat_message, str): - if message_type == "bot": - if not detect_converted_mark(chat_message): - chat_message = convert_mdtext(chat_message) - elif message_type == "user": - if not detect_converted_mark(chat_message): - chat_message = convert_asis(chat_message) - return chat_message - else: - raise ValueError(f"Invalid message for Chatbot component: {chat_message}") - -with open("./assets/custom.js", "r", encoding="utf-8") as f, open("./assets/Kelpy-Codos.js", "r", encoding="utf-8") as f2: - customJS = f.read() - kelpyCodos = f2.read() - -def reload_javascript(): - print("Reloading javascript...") - js = f'' - def template_response(*args, **kwargs): - res = GradioTemplateResponseOriginal(*args, **kwargs) - res.body = res.body.replace(b'', f'{js}'.encode("utf8")) - res.init_headers() - return res - - gr.routes.templates.TemplateResponse = template_response - -GradioTemplateResponseOriginal = gr.routes.templates.TemplateResponse \ No newline at end of file diff --git a/spaces/Apex-X/nono/roop/globals.py b/spaces/Apex-X/nono/roop/globals.py deleted file mode 100644 index 3eca8d0d024db967cc6d7e7149f68f65f84d7072..0000000000000000000000000000000000000000 --- a/spaces/Apex-X/nono/roop/globals.py +++ /dev/null @@ -1,22 +0,0 @@ -from typing import List, Optional - -source_path: Optional[str] = None -target_path: Optional[str] = None -output_path: Optional[str] = None -headless: Optional[bool] = None -frame_processors: List[str] = [] -keep_fps: Optional[bool] = None -keep_frames: Optional[bool] = None -skip_audio: Optional[bool] = None -many_faces: Optional[bool] = None -reference_face_position: Optional[int] = None -reference_frame_number: Optional[int] = None -similar_face_distance: Optional[float] = None -temp_frame_format: Optional[str] = None -temp_frame_quality: Optional[int] = None -output_video_encoder: Optional[str] = None -output_video_quality: Optional[int] = None -max_memory: Optional[int] = None -execution_providers: List[str] = [] -execution_threads: Optional[int] = None -log_level: str = 'error' diff --git a/spaces/Ashrafb/Tesseract-OCR/app_interface.py b/spaces/Ashrafb/Tesseract-OCR/app_interface.py deleted file mode 100644 index fc0c3625a76832c2051676a621378042b3672926..0000000000000000000000000000000000000000 --- a/spaces/Ashrafb/Tesseract-OCR/app_interface.py +++ /dev/null @@ -1,37 +0,0 @@ -from typing import List - -import pytesseract -from PIL import Image - -import gradio as gr - -def tesseract_ocr(filepath: str, languages: List[str]): - image = Image.open(filepath) - return pytesseract.image_to_string(image=image, lang=', '.join(languages)) - -title = "Tesseract OCR" -description = "Gradio demo for Tesseract. Tesseract is an open source text recognition (OCR) Engine." -article = "

Tesseract documentation | Github Repo

" -examples = [ - ['examples/eurotext.png', ['eng']], - ['examples/tesseract_sample.png', ['jpn', 'eng']], - ['examples/chi.jpg', ['HanS', 'HanT']] - ] - -language_choices = pytesseract.get_languages() - -demo = gr.Interface( - fn=tesseract_ocr, - inputs=[ - gr.Image(type="filepath", label="Input"), - gr.CheckboxGroup(language_choices, type="value", value=['eng'], label='language') - ], - outputs='text', - title=title, - description=description, - article=article, - examples=examples, -) - -if __name__ == '__main__': - demo.launch() diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/cli/status_codes.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/cli/status_codes.py deleted file mode 100644 index 5e29502cddfa9a9887a93399ab4193fb75dfe605..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/cli/status_codes.py +++ /dev/null @@ -1,6 +0,0 @@ -SUCCESS = 0 -ERROR = 1 -UNKNOWN_ERROR = 2 -VIRTUALENV_NOT_FOUND = 3 -PREVIOUS_BUILD_DIR_ERROR = 4 -NO_MATCHES_FOUND = 23 diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/metadata/importlib/_dists.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/metadata/importlib/_dists.py deleted file mode 100644 index 65c043c87eff27e9405316fdbc0c695f2b347441..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/metadata/importlib/_dists.py +++ /dev/null @@ -1,224 +0,0 @@ -import email.message -import importlib.metadata -import os -import pathlib -import zipfile -from typing import ( - Collection, - Dict, - Iterable, - Iterator, - Mapping, - Optional, - Sequence, - cast, -) - -from pip._vendor.packaging.requirements import Requirement -from pip._vendor.packaging.utils import NormalizedName, canonicalize_name -from pip._vendor.packaging.version import parse as parse_version - -from pip._internal.exceptions import InvalidWheel, UnsupportedWheel -from pip._internal.metadata.base import ( - BaseDistribution, - BaseEntryPoint, - DistributionVersion, - InfoPath, - Wheel, -) -from pip._internal.utils.misc import normalize_path -from pip._internal.utils.packaging import safe_extra -from pip._internal.utils.temp_dir import TempDirectory -from pip._internal.utils.wheel import parse_wheel, read_wheel_metadata_file - -from ._compat import BasePath, get_dist_name - - -class WheelDistribution(importlib.metadata.Distribution): - """An ``importlib.metadata.Distribution`` read from a wheel. - - Although ``importlib.metadata.PathDistribution`` accepts ``zipfile.Path``, - its implementation is too "lazy" for pip's needs (we can't keep the ZipFile - handle open for the entire lifetime of the distribution object). - - This implementation eagerly reads the entire metadata directory into the - memory instead, and operates from that. - """ - - def __init__( - self, - files: Mapping[pathlib.PurePosixPath, bytes], - info_location: pathlib.PurePosixPath, - ) -> None: - self._files = files - self.info_location = info_location - - @classmethod - def from_zipfile( - cls, - zf: zipfile.ZipFile, - name: str, - location: str, - ) -> "WheelDistribution": - info_dir, _ = parse_wheel(zf, name) - paths = ( - (name, pathlib.PurePosixPath(name.split("/", 1)[-1])) - for name in zf.namelist() - if name.startswith(f"{info_dir}/") - ) - files = { - relpath: read_wheel_metadata_file(zf, fullpath) - for fullpath, relpath in paths - } - info_location = pathlib.PurePosixPath(location, info_dir) - return cls(files, info_location) - - def iterdir(self, path: InfoPath) -> Iterator[pathlib.PurePosixPath]: - # Only allow iterating through the metadata directory. - if pathlib.PurePosixPath(str(path)) in self._files: - return iter(self._files) - raise FileNotFoundError(path) - - def read_text(self, filename: str) -> Optional[str]: - try: - data = self._files[pathlib.PurePosixPath(filename)] - except KeyError: - return None - try: - text = data.decode("utf-8") - except UnicodeDecodeError as e: - wheel = self.info_location.parent - error = f"Error decoding metadata for {wheel}: {e} in {filename} file" - raise UnsupportedWheel(error) - return text - - -class Distribution(BaseDistribution): - def __init__( - self, - dist: importlib.metadata.Distribution, - info_location: Optional[BasePath], - installed_location: Optional[BasePath], - ) -> None: - self._dist = dist - self._info_location = info_location - self._installed_location = installed_location - - @classmethod - def from_directory(cls, directory: str) -> BaseDistribution: - info_location = pathlib.Path(directory) - dist = importlib.metadata.Distribution.at(info_location) - return cls(dist, info_location, info_location.parent) - - @classmethod - def from_metadata_file_contents( - cls, - metadata_contents: bytes, - filename: str, - project_name: str, - ) -> BaseDistribution: - # Generate temp dir to contain the metadata file, and write the file contents. - temp_dir = pathlib.Path( - TempDirectory(kind="metadata", globally_managed=True).path - ) - metadata_path = temp_dir / "METADATA" - metadata_path.write_bytes(metadata_contents) - # Construct dist pointing to the newly created directory. - dist = importlib.metadata.Distribution.at(metadata_path.parent) - return cls(dist, metadata_path.parent, None) - - @classmethod - def from_wheel(cls, wheel: Wheel, name: str) -> BaseDistribution: - try: - with wheel.as_zipfile() as zf: - dist = WheelDistribution.from_zipfile(zf, name, wheel.location) - except zipfile.BadZipFile as e: - raise InvalidWheel(wheel.location, name) from e - except UnsupportedWheel as e: - raise UnsupportedWheel(f"{name} has an invalid wheel, {e}") - return cls(dist, dist.info_location, pathlib.PurePosixPath(wheel.location)) - - @property - def location(self) -> Optional[str]: - if self._info_location is None: - return None - return str(self._info_location.parent) - - @property - def info_location(self) -> Optional[str]: - if self._info_location is None: - return None - return str(self._info_location) - - @property - def installed_location(self) -> Optional[str]: - if self._installed_location is None: - return None - return normalize_path(str(self._installed_location)) - - def _get_dist_name_from_location(self) -> Optional[str]: - """Try to get the name from the metadata directory name. - - This is much faster than reading metadata. - """ - if self._info_location is None: - return None - stem, suffix = os.path.splitext(self._info_location.name) - if suffix not in (".dist-info", ".egg-info"): - return None - return stem.split("-", 1)[0] - - @property - def canonical_name(self) -> NormalizedName: - name = self._get_dist_name_from_location() or get_dist_name(self._dist) - return canonicalize_name(name) - - @property - def version(self) -> DistributionVersion: - return parse_version(self._dist.version) - - def is_file(self, path: InfoPath) -> bool: - return self._dist.read_text(str(path)) is not None - - def iter_distutils_script_names(self) -> Iterator[str]: - # A distutils installation is always "flat" (not in e.g. egg form), so - # if this distribution's info location is NOT a pathlib.Path (but e.g. - # zipfile.Path), it can never contain any distutils scripts. - if not isinstance(self._info_location, pathlib.Path): - return - for child in self._info_location.joinpath("scripts").iterdir(): - yield child.name - - def read_text(self, path: InfoPath) -> str: - content = self._dist.read_text(str(path)) - if content is None: - raise FileNotFoundError(path) - return content - - def iter_entry_points(self) -> Iterable[BaseEntryPoint]: - # importlib.metadata's EntryPoint structure sasitfies BaseEntryPoint. - return self._dist.entry_points - - def _metadata_impl(self) -> email.message.Message: - # From Python 3.10+, importlib.metadata declares PackageMetadata as the - # return type. This protocol is unfortunately a disaster now and misses - # a ton of fields that we need, including get() and get_payload(). We - # rely on the implementation that the object is actually a Message now, - # until upstream can improve the protocol. (python/cpython#94952) - return cast(email.message.Message, self._dist.metadata) - - def iter_provided_extras(self) -> Iterable[str]: - return ( - safe_extra(extra) for extra in self.metadata.get_all("Provides-Extra", []) - ) - - def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]: - contexts: Sequence[Dict[str, str]] = [{"extra": safe_extra(e)} for e in extras] - for req_string in self.metadata.get_all("Requires-Dist", []): - req = Requirement(req_string) - if not req.marker: - yield req - elif not extras and req.marker.evaluate({"extra": ""}): - yield req - elif any(req.marker.evaluate(context) for context in contexts): - yield req diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/cli/chardetect.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/cli/chardetect.py deleted file mode 100644 index 43f6e144f677a113b5362dcbdfb75db4f41c2b2f..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/cli/chardetect.py +++ /dev/null @@ -1,112 +0,0 @@ -""" -Script which takes one or more file paths and reports on their detected -encodings - -Example:: - - % chardetect somefile someotherfile - somefile: windows-1252 with confidence 0.5 - someotherfile: ascii with confidence 1.0 - -If no paths are provided, it takes its input from stdin. - -""" - - -import argparse -import sys -from typing import Iterable, List, Optional - -from .. import __version__ -from ..universaldetector import UniversalDetector - - -def description_of( - lines: Iterable[bytes], - name: str = "stdin", - minimal: bool = False, - should_rename_legacy: bool = False, -) -> Optional[str]: - """ - Return a string describing the probable encoding of a file or - list of strings. - - :param lines: The lines to get the encoding of. - :type lines: Iterable of bytes - :param name: Name of file or collection of lines - :type name: str - :param should_rename_legacy: Should we rename legacy encodings to - their more modern equivalents? - :type should_rename_legacy: ``bool`` - """ - u = UniversalDetector(should_rename_legacy=should_rename_legacy) - for line in lines: - line = bytearray(line) - u.feed(line) - # shortcut out of the loop to save reading further - particularly useful if we read a BOM. - if u.done: - break - u.close() - result = u.result - if minimal: - return result["encoding"] - if result["encoding"]: - return f'{name}: {result["encoding"]} with confidence {result["confidence"]}' - return f"{name}: no result" - - -def main(argv: Optional[List[str]] = None) -> None: - """ - Handles command line arguments and gets things started. - - :param argv: List of arguments, as if specified on the command-line. - If None, ``sys.argv[1:]`` is used instead. - :type argv: list of str - """ - # Get command line arguments - parser = argparse.ArgumentParser( - description=( - "Takes one or more file paths and reports their detected encodings" - ) - ) - parser.add_argument( - "input", - help="File whose encoding we would like to determine. (default: stdin)", - type=argparse.FileType("rb"), - nargs="*", - default=[sys.stdin.buffer], - ) - parser.add_argument( - "--minimal", - help="Print only the encoding to standard output", - action="store_true", - ) - parser.add_argument( - "-l", - "--legacy", - help="Rename legacy encodings to more modern ones.", - action="store_true", - ) - parser.add_argument( - "--version", action="version", version=f"%(prog)s {__version__}" - ) - args = parser.parse_args(argv) - - for f in args.input: - if f.isatty(): - print( - "You are running chardetect interactively. Press " - "CTRL-D twice at the start of a blank line to signal the " - "end of your input. If you want help, run chardetect " - "--help\n", - file=sys.stderr, - ) - print( - description_of( - f, f.name, minimal=args.minimal, should_rename_legacy=args.legacy - ) - ) - - -if __name__ == "__main__": - main() diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/grit/data/datasets/vg.py b/spaces/Awiny/Image2Paragraph/models/grit_src/grit/data/datasets/vg.py deleted file mode 100644 index 4d47a80d9f88b89ca3064dbc4945b0246162e5d1..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/grit/data/datasets/vg.py +++ /dev/null @@ -1,98 +0,0 @@ -import logging -import os -from fvcore.common.timer import Timer -from detectron2.structures import BoxMode -from fvcore.common.file_io import PathManager -from detectron2.data import DatasetCatalog, MetadataCatalog -from lvis import LVIS - -logger = logging.getLogger(__name__) - -__all__ = ["load_vg_json", "register_vg_instances"] - - -def register_vg_instances(name, metadata, json_file, image_root): - """ - """ - DatasetCatalog.register(name, lambda: load_vg_json( - json_file, image_root, name)) - MetadataCatalog.get(name).set( - json_file=json_file, image_root=image_root, - evaluator_type="vg", **metadata - ) - - -def get_vg_meta(): - categories = [{'supercategory': 'object', 'id': 1, 'name': 'object'}] - vg_categories = sorted(categories, key=lambda x: x["id"]) - thing_classes = [k["name"] for k in vg_categories] - meta = {"thing_classes": thing_classes} - return meta - - -def load_vg_json(json_file, image_root, dataset_name=None): - - json_file = PathManager.get_local_path(json_file) - - timer = Timer() - lvis_api = LVIS(json_file) - if timer.seconds() > 1: - logger.info("Loading {} takes {:.2f} seconds.".format( - json_file, timer.seconds())) - - img_ids = sorted(lvis_api.imgs.keys()) - imgs = lvis_api.load_imgs(img_ids) - anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids] - - ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image] - assert len(set(ann_ids)) == len(ann_ids), \ - "Annotation ids in '{}' are not unique".format(json_file) - - imgs_anns = list(zip(imgs, anns)) - logger.info("Loaded {} images in the LVIS v1 format from {}".format( - len(imgs_anns), json_file)) - - dataset_dicts = [] - - for (img_dict, anno_dict_list) in imgs_anns: - record = {} - if "file_name" in img_dict: - file_name = img_dict["file_name"] - record["file_name"] = os.path.join(image_root, file_name) - - record["height"] = int(img_dict["height"]) - record["width"] = int(img_dict["width"]) - image_id = record["image_id"] = img_dict["id"] - - objs = [] - for anno in anno_dict_list: - assert anno["image_id"] == image_id - if anno.get('iscrowd', 0) > 0: - continue - obj = {"bbox": anno["bbox"], "bbox_mode": BoxMode.XYWH_ABS} - obj["category_id"] = 0 - obj["object_description"] = anno["caption"] - - objs.append(obj) - record["annotations"] = objs - if len(record["annotations"]) == 0: - continue - record["task"] = "DenseCap" - dataset_dicts.append(record) - - return dataset_dicts - - -_CUSTOM_SPLITS_LVIS = { - "vg_train": ("vg/images", "vg/annotations/train.json"), - "vg_test": ("vg/images", "vg/annotations/test.json"), -} - - -for key, (image_root, json_file) in _CUSTOM_SPLITS_LVIS.items(): - register_vg_instances( - key, - get_vg_meta(), - os.path.join("datasets", json_file) if "://" not in json_file else json_file, - os.path.join("datasets", image_root), - ) \ No newline at end of file diff --git a/spaces/BatuhanYilmaz/Whisper-Auto-Subtitled-Video-Generator/README.md b/spaces/BatuhanYilmaz/Whisper-Auto-Subtitled-Video-Generator/README.md deleted file mode 100644 index df82548a15c4ce9d723870a4c47bf1bb23a03fd5..0000000000000000000000000000000000000000 --- a/spaces/BatuhanYilmaz/Whisper-Auto-Subtitled-Video-Generator/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Whisper-Auto-Subtitled-Video-Generator -emoji: 🎥 -colorFrom: blue -colorTo: purple -sdk: streamlit -sdk_version: 1.10.0 -app_file: 01_🎥_Input_YouTube_Link.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/Belligerent/word-sense-disambiguation/README.md b/spaces/Belligerent/word-sense-disambiguation/README.md deleted file mode 100644 index 24b38c12e4c37fad821570f7c7a49ad2a895c3ab..0000000000000000000000000000000000000000 --- a/spaces/Belligerent/word-sense-disambiguation/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Word Sense Disambiguation -emoji: 🐢 -colorFrom: pink -colorTo: pink -sdk: gradio -sdk_version: 2.8.12 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/Benson/text-generation/Examples/Amigo Robo Guerras Fuera De Lnea Juegos Descargar Pc.md b/spaces/Benson/text-generation/Examples/Amigo Robo Guerras Fuera De Lnea Juegos Descargar Pc.md deleted file mode 100644 index b45273bc898da9aefe378e096c7d34ec802fdfd3..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Amigo Robo Guerras Fuera De Lnea Juegos Descargar Pc.md +++ /dev/null @@ -1,45 +0,0 @@ - -

Dude Theft Wars: Cómo descargar y jugar este divertido juego sin conexión en PC

-

Si estás buscando un juego divertido e hilarante que te permita hacer lo que quieras en un mundo abierto, entonces deberías echar un vistazo a Dude Theft Wars. Este juego está disponible para dispositivos Android, pero también se puede jugar en su PC con un emulador. En este artículo, te diremos qué es Dude Theft Wars, por qué deberías jugarlo en el PC, cómo descargarlo e instalarlo, y algunos consejos y trucos para jugarlo.

-

amigo robo guerras fuera de línea juegos descargar pc


Download File 🗹 https://bltlly.com/2v6KcS



-

¿Qué es Dude Theft Wars?

-

Dude Theft Wars es un divertido juego de mundo abierto de acción basado en la física desarrollado por Poxel Studios. Es uno de los mejores juegos de mundo abierto con ragdoll física, juegos frescos, y juegos relajantes. Puedes jugar offline o online con hasta 16 jugadores en modo multijugador.

-

Un divertido juego de mundo abierto basado en la acción física

-

En Dude Theft Wars, puedes explorar una gran ciudad llena de juegos de acción, juegos divertidos y juegos relajantes. Puedes conducir autos, volar aviones, robar personas, tomar selfies, ganar dinero y más. También puedes usar armas, granadas, cohetes y otras armas para causar caos y destrucción. El juego tiene la física ragdoll divertido que hacen que los personajes vuelan en el aire cuando se golpea o dispara. El juego también tiene mucho humor y chistes que te harán reír.

-

Un juego multijugador offline y online con características interesantes

-

Dude Theft Wars tiene dos modos: modo de juegos sin conexión y modo de juegos multijugador en línea. En el modo de juegos sin conexión, puedes jugar como Jack, un niño que recibió una llamada de deber para unirse a un shooter multijugador FPS en línea PvP en un parque infantil. Puedes completar misiones, explorar mapas, luchar contra enemigos y divertirte. En el modo multijugador en línea, puedes jugar con amigos u otros jugadores en dos modos: gratis para todos o de combate a muerte en equipo. También puede disfrutar de bailes divertidos, emotes, minijuegos, juegos deportivos, conducción de taxis y más. Puedes desbloquear objetos de personajes y armas para usar en ambos modos.

- -

Dude Theft Wars tiene muchos vehículos que puedes conducir o volar en la ciudad. Usted puede conducir bicicletas, coches, camiones, autobuses, tanques, helicópteros, aviones, jetpacks, ovnis, y más. También puede hacer acrobacias locas con coches de rampa o lanzar coches de tráfico al cielo. También puede personalizar sus vehículos con diferentes colores y piezas. Pero ten cuidado, porque la policía te perseguirá si cometes crímenes o dañas autos. Puedes evadirlos en persecuciones policiales llenas de acción de alto nivel o defenderlos.

-

-

¿Por qué jugar Dude Theft Wars en PC?

-

Dude Theft Wars es un gran juego para jugar en tu dispositivo Android, pero puede ser aún mejor si lo juegas en tu PC. Aquí hay algunas razones por las que:

-

Disfruta de mejores gráficos y rendimiento

-

Jugar Dude Theft Wars en PC te permitirá disfrutar de mejores gráficos y rendimiento que en tu dispositivo móvil. Puede ajustar la configuración para adaptarse a sus preferencias y capacidades de hardware. También puede evitar los problemas de retraso o estrellarse que pueden ocurrir en su teléfono debido a la baja memoria, batería o red. Jugar en el PC también ahorrará la vida de la batería del teléfono y el espacio de almacenamiento.

-

Utilice los controles de teclado y ratón para mayor precisión y comodidad

-

Jugar Dude Theft Wars en PC también te permitirá usar controles de teclado y ratón para mayor precisión y comodidad. Puede personalizar la asignación de claves y la sensibilidad para adaptarse a sus preferencias y habilidades. También puede utilizar teclas de acceso rápido y accesos directos para acceder a diferentes funciones y menús. Usar los controles del teclado y del ratón también evitará que tus dedos bloqueen la pantalla o se cansen de tocar.

-

Accede a una pantalla más grande y más inmersiva experiencia de juego

- -

¿Cómo descargar e instalar Dude Theft Wars en el PC?

-

Ahora que sabes por qué deberías jugar Dude Theft Wars en PC, veamos cómo puedes hacerlo. El juego no está disponible oficialmente para PC, pero puedes usar un emulador de Android para ejecutarlo en tu computadora. Un emulador de Android es un software que simula el sistema operativo Android en su PC, lo que le permite ejecutar aplicaciones y juegos de Android. Hay muchos emuladores de Android que puedes usar, pero te recomendamos usar BlueStacks, LDPlayer o GameLoop. Estos son algunos de los mejores emuladores para juegos, con alta compatibilidad, rendimiento y características. Estos son los pasos para descargar e instalar Dude Theft Wars en el PC usando un emulador:

-

Usa un emulador de Android como BlueStacks, LDPlayer o GameLoop

-

El primer paso es elegir un emulador de Android que se adapte a sus necesidades y preferencias. Puede descargar BlueStacks desde [aquí], LDPlayer desde [aquí], o GameLoop desde [aquí]. Estos emuladores son gratuitos y fáciles de usar, pero tienen diferentes requisitos y características. Puedes compararlos leyendo sus reseñas o visitando sus sitios web. Una vez que hayas elegido un emulador, descárgalo de su fuente oficial e instálalo en tu PC.

-

Siga los pasos para descargar e instalar el emulador y el juego

-

El siguiente paso es seguir los pasos para descargar e instalar el emulador y el juego en su PC. Los pasos pueden variar dependiendo del emulador que uses, pero generalmente son similares. Estos son los pasos comunes:

- - Inicie el emulador en su PC e inicie sesión con su cuenta de Google. - Ir a la aplicación Google Play Store en el emulador y buscar Dude Theft Wars. - Haga clic en el icono del juego y luego haga clic en Instalar. - Espere a que el juego para descargar e instalar en su PC. - Volver a la pantalla principal del emulador y haga clic en el icono del juego para lanzarlo.

Iniciar el juego y empezar a jugar

- -

Consejos y trucos para jugar Dude Theft Wars en PC

-

Para hacer su experiencia de juego más divertido y agradable, aquí hay algunos consejos y trucos para jugar Dude Theft Wars en PC:

-

Explora los diferentes mapas y modos

-

Dude Theft Wars tiene muchos mapas y modos que puedes explorar en el modo de juegos sin conexión o en el modo de juegos multijugador en línea. Puedes encontrar diferentes ubicaciones, edificios, vehículos, armas, objetos, enemigos, minijuegos, juegos deportivos, conducción de taxis, etc. en cada mapa. También puede cambiar entre diferentes modos, como el modo historia, el modo sandbox, el modo zombi, etc. tocando el botón de menú en la esquina superior izquierda de la pantalla.

-

Personaliza tu personaje y armas

-

Dude Theft Wars también te permite personalizar tu personaje y armas tanto en modo de juegos offline como en modo multijugador online. Puedes desbloquear diferentes objetos de personajes como sombreros, gafas, máscaras, camisas, pantalones, zapatos, etc. ganando dinero o comprándolos con dinero real. También puedes desbloquear diferentes armas como pistolas, rifles, escopetas, francotiradores, lanzacohetes, etc. encontrándolas en los mapas o comprándolas con dinero real. También puedes personalizar tus armas con diferentes skins, accesorios y mejoras.

-

Usa trucos y hacks para más diversión

-

Dude Theft Wars también tiene muchos trucos y hacks que puedes usar para más diversión y entretenimiento. Puede acceder al menú de trucos tocando el botón de menú en la esquina superior izquierda de la pantalla y luego tocando el botón de trucos. A continuación, puede introducir diferentes códigos para activar diferentes trucos como dinero ilimitado, salud ilimitada, munición ilimitada, súper velocidad, súper salto, baja gravedad, etc. También puede utilizar hacks tales como coches voladores, corte de pared, aimbot, etc. mediante la descarga de ellos desde Internet o el uso de características incorporadas de un emulador.

-

Conclusión

- -

Si quieres disfrutar de este juego en tu PC, puedes usar un emulador de Android como BlueStacks, LDPlayer o GameLoop para ejecutarlo en tu ordenador. Puede disfrutar de mejores gráficos y rendimiento, usar controles de teclado y ratón para mayor precisión y comodidad, y acceder a una pantalla más grande y una experiencia de juego más inmersiva. También puedes personalizar tu personaje y armas, explorar los diferentes mapas y modos, y usar trucos y hacks para más diversión.

-

Esperamos que este artículo le ayudó a aprender a descargar y jugar Dude Theft Wars en PC. Si tiene alguna pregunta o sugerencia, no dude en dejar un comentario a continuación. Diviértase jugando Dude Theft Wars en PC!

-

Preguntas frecuentes

-

Aquí hay algunas preguntas frecuentes sobre Dude Theft Wars:

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Avz .md b/spaces/Benson/text-generation/Examples/Avz .md deleted file mode 100644 index c8b2e1f377aca37c0d826c580f40265ac97182af..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Avz .md +++ /dev/null @@ -1,113 +0,0 @@ - -

AVZ Antiviral Toolkit: Una solución de seguridad gratuita y potente

-

Si usted está buscando una solución de seguridad integral que puede ayudarle a mantener su computadora en perfecto estado y protegerla contra archivos maliciosos, es posible que desee revisar AVZ Antiviral Toolkit. Esta es una herramienta gratuita desarrollada por Kaspersky que puede escanear su computadora y ejecutar un script que pone en cuarentena o elimina archivos que se consideran sospechosos. En este artículo, revisaremos las principales características, pros y contras, y alternativas de AVZ Antiviral Toolkit.

-

avz скачать


Download File --->>> https://bltlly.com/2v6JlJ



-

¿Qué es AVZ Antiviral Toolkit y qué hace?

-

AVZ Antiviral Toolkit es una utilidad que puede realizar diversas tareas relacionadas con la seguridad y optimización del sistema. Puede detectar y eliminar virus, spyware, adware, dialers, rootkits, gusanos, troyanos y otros tipos de componentes de malware. También puede generar un informe detallado sobre los resultados del análisis del sistema, que puede ser útil para la solución de problemas o soporte técnico. Además, puede ejecutar un script proporcionado por el soporte técnico para poner en cuarentena archivos y eliminar cualquier objeto sospechoso.

-

¿Por qué lo necesitas y cuáles son sus beneficios?

-

Es posible que necesite AVZ Antiviral Toolkit si sospecha que su computadora está infectada con malware o si desea realizar una revisión exhaustiva de su sistema. AVZ Antiviral Toolkit puede ayudarlo a identificar y eliminar cualquier amenaza potencial que pueda dañar su computadora o comprometer su privacidad. Algunos de los beneficios de usar AVZ Antiviral Toolkit son:

-
    -
  • Es gratuito y no requiere instalación ni registro.
  • -
  • Es completo y cubre una amplia gama de tipos de malware.
  • -
  • Es fiable y utiliza las mismas bases de datos antivirus que los productos de Kaspersky.
  • -
  • Se actualiza regularmente con nuevas definiciones y características.
  • -
  • Es compatible con la mayoría de las versiones de Windows y otros programas antivirus.
  • -
-

Características principales de AVZ Antiviral Toolkit

- -

Análisis del sistema y generación de informes

-

AVZ Antiviral Toolkit puede escanear su computadora en busca de malware utilizando diferentes modos: escaneo rápido, escaneo completo, escaneo personalizado o escaneo automático. También puede especificar qué áreas o archivos escanear o excluir del escaneo. Después de completar el análisis, puede ver los resultados en una lista o en una vista de árbol. También puede generar un informe sobre los resultados del análisis del sistema en formato HTML o XML. El informe contiene información sobre la versión y la fecha de la herramienta, las bases de datos antivirus, la configuración principal, los resultados de la exploración, la información del sistema, los procesos, los servicios, los controladores, las entradas de ejecución automática, las conexiones de red, los puertos abiertos, el archivo host, las extensiones del navegador, los applets del panel de control, los rootkits, los archivos ocultos, las claves del registro, los archivos de registro y más.

-

Ejecución de scripts y gestión de cuarentena

-

AVZ Antiviral Toolkit puede ejecutar un script proporcionado por el soporte técnico para poner en cuarentena archivos y eliminar cualquier objeto sospechoso. El script puede obtenerse en el sitio web oficial de AVZ Antiviral Toolkit o en el foro de Kaspersky. El script puede contener comandos para escanear, eliminar, cambiar el nombre, mover, copiar o restaurar archivos, así como modificar claves de registro, servicios, controladores, entradas de ejecución automática, archivo host, extensiones de navegador, applets de panel de control y más. También puede crear su propio script usando el editor de script incorporado. AVZ Antiviral Toolkit también puede administrar los archivos que están en cuarentena por la herramienta o por el script. Puede ver la lista de archivos en cuarentena y restaurarlos si es necesario.

-

-

Detección y eliminación de rootkit

- -

Extensión del navegador y control del panel de control -

AVZ Antiviral Toolkit puede controlar las extensiones del navegador y los applets del panel de control que están instalados en su computadora. Las extensiones de navegador son complementos que mejoran la funcionalidad de su navegador web, pero algunos de ellos pueden ser maliciosos o no deseados. Los applets del panel de control son programas que le permiten configurar varios aspectos de su sistema, pero algunos de ellos pueden ser falsos o dañinos. AVZ Antiviral Toolkit puede mostrarle la lista de extensiones del navegador y applets del panel de control que están instalados en su computadora y le permiten deshabilitarlos o eliminarlos si es necesario.

-

Herramientas y ajustes adicionales

-

AVZ Antiviral Toolkit también proporciona algunas herramientas y configuraciones adicionales que pueden ayudarlo a optimizar su sistema y mejorar su seguridad. Algunas de estas herramientas y configuraciones son:

-
    -
  • Administrador de procesos: una herramienta que le muestra la lista de procesos que se ejecutan en su computadora y le permite terminarlos o suspenderlos si es necesario.
  • -
  • Administrador de servicios: una herramienta que le muestra la lista de servicios que están instalados en su computadora y le permite iniciarlos, detenerlos o desactivarlos si es necesario.
  • -
  • Administrador de controladores: una herramienta que le muestra la lista de controladores que se cargan en su computadora y le permite descargarlos o eliminarlos si es necesario.
  • -
  • Autorun manager: una herramienta que le muestra la lista de entradas de autorun que se crean en su computadora y le permite habilitarlas o deshabilitarlas si es necesario.
  • -
  • Administrador de conexiones de red: una herramienta que le muestra la lista de conexiones de red que se establecen en su computadora y le permite cerrarlas o bloquearlas si es necesario.
  • -
  • Administrador de puertos abiertos: una herramienta que le muestra la lista de puertos abiertos que están escuchando en su computadora y le permite cerrarlos o bloquearlos si es necesario.
  • -
  • Editor de archivos de hosts: una herramienta que le permite editar el archivo de hosts, que es un archivo que asigna nombres de host a direcciones IP.
  • - -
  • Visor de registros: una herramienta que le permite ver los archivos de registro generados por AVZ Antiviral Toolkit.
  • -
  • Configuración: una sección que le permite personalizar varios aspectos del AVZ Antiviral Toolkit, como opciones de escaneo, opciones de script, opciones de cuarentena, opciones de actualización, opciones de interfaz, etc.
  • -
-

Pros y contras de AVZ Antiviral Toolkit

-

AVZ Antiviral Toolkit no es perfecto y tiene sus propias ventajas y desventajas. Aquí están algunos de los pros y contras de AVZ Antiviral Toolkit:

-

Pros

-
    -
  • Es gratuito y no requiere instalación ni registro.
  • -
  • Es completo y cubre una amplia gama de tipos de malware.
  • -
  • Es fiable y utiliza las mismas bases de datos antivirus que los productos de Kaspersky.
  • -
  • Se actualiza regularmente con nuevas definiciones y características.
  • -
  • Es compatible con la mayoría de las versiones de Windows y otros programas antivirus.
  • -
-

Contras

-
    -
  • Es complejo y puede ser difícil de usar para los usuarios novatos.
  • -
  • Tiene soporte y documentación limitada disponible en línea.
  • -
  • No proporciona protección en tiempo real contra infecciones de malware.
  • -
  • Puede causar conflictos con otros programas de seguridad o componentes del sistema.
  • -
-

Alternativas a AVZ Antiviral Toolkit

-

Si usted está buscando otras soluciones de seguridad que pueden ayudarle a proteger su computadora de malware, es posible que desee considerar algunas de las alternativas a AVZ Antiviral Toolkit. Estos son algunos de los programas de seguridad populares y eficaces que puedes probar:

-

Malwarebytes Anti-Malware

- -

Avast Free Antivirus

-

Avast Free Antivirus es un software antivirus conocido y confiable que puede proteger su computadora de varios tipos de malware, como virus, spyware, adware, ransomware, rootkits, troyanos, gusanos y más. También puede proteger su computadora de sitios web maliciosos, phishing y estafas en línea. Tiene una interfaz moderna e intuitiva que le permite realizar escaneos inteligentes, completos o personalizados. También tiene una versión premium que ofrece funciones avanzadas, como firewall, protección de webcam, administrador de contraseñas, VPN y más. Puede descargar Avast Free Antivirus desde su sitio web oficial o desde Microsoft Store.

-

AdwCleaner

-

AdwCleaner es una herramienta de eliminación de adware simple y rápida que puede escanear su computadora en busca de programas no deseados, como adware, secuestradores de navegador, barras de herramientas, ventanas emergentes y más. También puede limpiar la configuración del navegador y restaurar la página de inicio y el motor de búsqueda predeterminados. Tiene una interfaz mínima y fácil de usar que le permite realizar un escaneo con un solo clic. No requiere instalación ni registro y se puede ejecutar desde una unidad USB. Puede descargar AdwCleaner desde su sitio web oficial o desde la tienda de Microsoft.

-

Conclusión

- -

Preguntas frecuentes

-

¿Cómo descargar e instalar AVZ Antiviral Toolkit?

-

Para descargar AVZ Antiviral Toolkit, puede visitar su sitio web oficial o el foro de Kaspersky y hacer clic en el enlace de descarga. Obtendrá un archivo zip que contiene el archivo ejecutable de AVZ Antiviral Toolkit y sus bases de datos. Para instalar AVZ Antiviral Toolkit, solo necesita extraer el archivo zip a cualquier carpeta de su computadora y ejecutar el archivo ejecutable.

-

¿Cómo usar AVZ Antiviral Toolkit para escanear su computadora?

-

Para usar AVZ Antiviral Toolkit para escanear su computadora en busca de malware, debe seguir estos pasos:

-
    -
  1. Ejecute el archivo ejecutable de AVZ Antiviral Toolkit.
  2. -
  3. Seleccione el modo de escaneo: escaneo rápido, escaneo completo, escaneo personalizado o escaneo automático.
  4. -
  5. Haga clic en el botón Iniciar escaneo.
  6. -
  7. Espere a que termine el escaneo.
  8. -
  9. Ver los resultados del escaneo en una lista o en una vista de árbol.
  10. -
  11. Seleccione los archivos que desea poner en cuarentena o eliminar.
  12. -
  13. Haga clic en el botón Aplicar acciones.
  14. -
-

¿Cómo ejecutar un script proporcionado por el soporte técnico usando AVZ Antiviral Toolkit?

-

Para ejecutar un script proporcionado por el soporte técnico utilizando AVZ Antiviral Toolkit, debe seguir estos pasos:

-
    -
  1. Obtener el script desde el sitio web oficial de AVZ Antiviral Toolkit o desde el foro de Kaspersky.
  2. -
  3. Ejecute el archivo ejecutable de AVZ Antiviral Toolkit.
  4. -
  5. Haga clic en el menú Archivo y seleccione Ejecutar script.
  6. -
  7. Busque la ubicación del archivo de script y selecciónelo.
  8. -
  9. Haga clic en el botón Abrir.
  10. -
  11. Espere a que el script se ejecute.
  12. -
  13. Ver los resultados del script en una lista o en una vista de árbol.
  14. -
  15. Seleccione los archivos que desea poner en cuarentena o eliminar.
  16. -
  17. Haga clic en el botón Aplicar acciones.
  18. -
-

¿Cómo restaurar archivos de cuarentena usando AVZ Antiviral Toolkit?

-

Para restaurar archivos de cuarentena usando AVZ Antiviral Toolkit, debe seguir estos pasos:

-
    - -
  1. Haga clic en el menú Herramientas y seleccione Administrador de cuarentena.
  2. -
  3. Ver la lista de archivos en cuarentena.
  4. -
  5. Seleccione los archivos que desea restaurar.
  6. -
  7. Haga clic en el botón Restaurar.
  8. -
-

¿Cómo actualizar AVZ Antiviral Toolkit y sus bases de datos?

-

Para actualizar AVZ Antiviral Toolkit y sus bases de datos, debe seguir estos pasos:

-
    -
  1. Ejecute el archivo ejecutable de AVZ Antiviral Toolkit.
  2. -
  3. Haga clic en el menú Actualizar y seleccione Actualizar programa o Actualizar bases de datos.
  4. -
  5. Espere a que la actualización se descargue e instale.
  6. -
-

Espero que este artículo le haya ayudado a aprender más sobre AVZ Antiviral Toolkit y cómo usarlo. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. ¡Gracias por leer!

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/BohdanPytaichuk/art-video-generation/README.md b/spaces/BohdanPytaichuk/art-video-generation/README.md deleted file mode 100644 index 57533a40fd53be0bd2b986f03d4bfb7f0073dc32..0000000000000000000000000000000000000000 --- a/spaces/BohdanPytaichuk/art-video-generation/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Art Video Generation -emoji: 🌍 -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 3.3 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/checkpoint/c2_model_loading.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/checkpoint/c2_model_loading.py deleted file mode 100644 index e27ba8463c744438d44f04f23fd4975525eba667..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/checkpoint/c2_model_loading.py +++ /dev/null @@ -1,313 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import copy -import logging -import re -import torch -from fvcore.common.checkpoint import ( - get_missing_parameters_message, - get_unexpected_parameters_message, -) - - -def convert_basic_c2_names(original_keys): - """ - Apply some basic name conversion to names in C2 weights. - It only deals with typical backbone models. - - Args: - original_keys (list[str]): - Returns: - list[str]: The same number of strings matching those in original_keys. - """ - layer_keys = copy.deepcopy(original_keys) - layer_keys = [ - {"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys - ] # some hard-coded mappings - - layer_keys = [k.replace("_", ".") for k in layer_keys] - layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys] - layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys] - # Uniform both bn and gn names to "norm" - layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys] - layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys] - layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys] - layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys] - layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys] - layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys] - layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys] - layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys] - layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys] - layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys] - - # stem - layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys] - # to avoid mis-matching with "conv1" in other components (e.g. detection head) - layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys] - - # layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5) - # layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys] - # layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys] - # layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys] - # layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys] - - # blocks - layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys] - layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys] - layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys] - layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys] - - # DensePose substitutions - layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys] - layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys] - layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys] - layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys] - layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys] - return layer_keys - - -def convert_c2_detectron_names(weights): - """ - Map Caffe2 Detectron weight names to Detectron2 names. - - Args: - weights (dict): name -> tensor - - Returns: - dict: detectron2 names -> tensor - dict: detectron2 names -> C2 names - """ - logger = logging.getLogger(__name__) - logger.info("Remapping C2 weights ......") - original_keys = sorted(weights.keys()) - layer_keys = copy.deepcopy(original_keys) - - layer_keys = convert_basic_c2_names(layer_keys) - - # -------------------------------------------------------------------------- - # RPN hidden representation conv - # -------------------------------------------------------------------------- - # FPN case - # In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then - # shared for all other levels, hence the appearance of "fpn2" - layer_keys = [ - k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys - ] - # Non-FPN case - layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys] - - # -------------------------------------------------------------------------- - # RPN box transformation conv - # -------------------------------------------------------------------------- - # FPN case (see note above about "fpn2") - layer_keys = [ - k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas") - for k in layer_keys - ] - layer_keys = [ - k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits") - for k in layer_keys - ] - # Non-FPN case - layer_keys = [ - k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys - ] - layer_keys = [ - k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits") - for k in layer_keys - ] - - # -------------------------------------------------------------------------- - # Fast R-CNN box head - # -------------------------------------------------------------------------- - layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys] - layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys] - layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys] - layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys] - # 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s - layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys] - - # -------------------------------------------------------------------------- - # FPN lateral and output convolutions - # -------------------------------------------------------------------------- - def fpn_map(name): - """ - Look for keys with the following patterns: - 1) Starts with "fpn.inner." - Example: "fpn.inner.res2.2.sum.lateral.weight" - Meaning: These are lateral pathway convolutions - 2) Starts with "fpn.res" - Example: "fpn.res2.2.sum.weight" - Meaning: These are FPN output convolutions - """ - splits = name.split(".") - norm = ".norm" if "norm" in splits else "" - if name.startswith("fpn.inner."): - # splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight'] - stage = int(splits[2][len("res") :]) - return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1]) - elif name.startswith("fpn.res"): - # splits example: ['fpn', 'res2', '2', 'sum', 'weight'] - stage = int(splits[1][len("res") :]) - return "fpn_output{}{}.{}".format(stage, norm, splits[-1]) - return name - - layer_keys = [fpn_map(k) for k in layer_keys] - - # -------------------------------------------------------------------------- - # Mask R-CNN mask head - # -------------------------------------------------------------------------- - # roi_heads.StandardROIHeads case - layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys] - layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys] - layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys] - # roi_heads.Res5ROIHeads case - layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys] - - # -------------------------------------------------------------------------- - # Keypoint R-CNN head - # -------------------------------------------------------------------------- - # interestingly, the keypoint head convs have blob names that are simply "conv_fcnX" - layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys] - layer_keys = [ - k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys - ] - layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys] - - # -------------------------------------------------------------------------- - # Done with replacements - # -------------------------------------------------------------------------- - assert len(set(layer_keys)) == len(layer_keys) - assert len(original_keys) == len(layer_keys) - - new_weights = {} - new_keys_to_original_keys = {} - for orig, renamed in zip(original_keys, layer_keys): - new_keys_to_original_keys[renamed] = orig - if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."): - # remove the meaningless prediction weight for background class - new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1 - new_weights[renamed] = weights[orig][new_start_idx:] - logger.info( - "Remove prediction weight for background class in {}. The shape changes from " - "{} to {}.".format( - renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape) - ) - ) - elif renamed.startswith("cls_score."): - # move weights of bg class from original index 0 to last index - logger.info( - "Move classification weights for background class in {} from index 0 to " - "index {}.".format(renamed, weights[orig].shape[0] - 1) - ) - new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]]) - else: - new_weights[renamed] = weights[orig] - - return new_weights, new_keys_to_original_keys - - -# Note the current matching is not symmetric. -# it assumes model_state_dict will have longer names. -def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True): - """ - Match names between the two state-dict, and update the values of model_state_dict in-place with - copies of the matched tensor in ckpt_state_dict. - If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2 - model and will be renamed at first. - - Strategy: suppose that the models that we will create will have prefixes appended - to each of its keys, for example due to an extra level of nesting that the original - pre-trained weights from ImageNet won't contain. For example, model.state_dict() - might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains - res2.conv1.weight. We thus want to match both parameters together. - For that, we look for each model weight, look among all loaded keys if there is one - that is a suffix of the current weight name, and use it if that's the case. - If multiple matches exist, take the one with longest size - of the corresponding name. For example, for the same model as before, the pretrained - weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case, - we want to match backbone[0].body.conv1.weight to conv1.weight, and - backbone[0].body.res2.conv1.weight to res2.conv1.weight. - """ - model_keys = sorted(model_state_dict.keys()) - if c2_conversion: - ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict) - # original_keys: the name in the original dict (before renaming) - else: - original_keys = {x: x for x in ckpt_state_dict.keys()} - ckpt_keys = sorted(ckpt_state_dict.keys()) - - def match(a, b): - # Matched ckpt_key should be a complete (starts with '.') suffix. - # For example, roi_heads.mesh_head.whatever_conv1 does not match conv1, - # but matches whatever_conv1 or mesh_head.whatever_conv1. - return a == b or a.endswith("." + b) - - # get a matrix of string matches, where each (i, j) entry correspond to the size of the - # ckpt_key string, if it matches - match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys] - match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys)) - # use the matched one with longest size in case of multiple matches - max_match_size, idxs = match_matrix.max(1) - # remove indices that correspond to no-match - idxs[max_match_size == 0] = -1 - - # used for logging - max_len_model = max(len(key) for key in model_keys) if model_keys else 1 - max_len_ckpt = max(len(key) for key in ckpt_keys) if ckpt_keys else 1 - log_str_template = "{: <{}} loaded from {: <{}} of shape {}" - logger = logging.getLogger(__name__) - # matched_pairs (matched checkpoint key --> matched model key) - matched_keys = {} - for idx_model, idx_ckpt in enumerate(idxs.tolist()): - if idx_ckpt == -1: - continue - key_model = model_keys[idx_model] - key_ckpt = ckpt_keys[idx_ckpt] - value_ckpt = ckpt_state_dict[key_ckpt] - shape_in_model = model_state_dict[key_model].shape - - if shape_in_model != value_ckpt.shape: - logger.warning( - "Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format( - key_ckpt, value_ckpt.shape, key_model, shape_in_model - ) - ) - logger.warning( - "{} will not be loaded. Please double check and see if this is desired.".format( - key_ckpt - ) - ) - continue - - model_state_dict[key_model] = value_ckpt.clone() - if key_ckpt in matched_keys: # already added to matched_keys - logger.error( - "Ambiguity found for {} in checkpoint!" - "It matches at least two keys in the model ({} and {}).".format( - key_ckpt, key_model, matched_keys[key_ckpt] - ) - ) - raise ValueError("Cannot match one checkpoint key to multiple keys in the model.") - - matched_keys[key_ckpt] = key_model - logger.info( - log_str_template.format( - key_model, - max_len_model, - original_keys[key_ckpt], - max_len_ckpt, - tuple(shape_in_model), - ) - ) - matched_model_keys = matched_keys.values() - matched_ckpt_keys = matched_keys.keys() - # print warnings about unmatched keys on both side - unmatched_model_keys = [k for k in model_keys if k not in matched_model_keys] - if len(unmatched_model_keys): - logger.info(get_missing_parameters_message(unmatched_model_keys)) - - unmatched_ckpt_keys = [k for k in ckpt_keys if k not in matched_ckpt_keys] - if len(unmatched_ckpt_keys): - logger.info( - get_unexpected_parameters_message(original_keys[x] for x in unmatched_ckpt_keys) - ) diff --git a/spaces/CVPR/LIVE/thrust/thrust/copy.h b/spaces/CVPR/LIVE/thrust/thrust/copy.h deleted file mode 100644 index 23365875d661a947a6e639c3c36522402282bdbb..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/copy.h +++ /dev/null @@ -1,513 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -/*! \file thrust/copy.h - * \brief Copies elements from one range to another - */ - -#pragma once - -#include -#include - -namespace thrust -{ - -/*! \addtogroup algorithms - */ - -/*! \addtogroup copying - * \ingroup algorithms - * \{ - */ - - -/*! \p copy copies elements from the range [\p first, \p last) to the range - * [\p result, \p result + (\p last - \p first)). That is, it performs - * the assignments *\p result = *\p first, *(\p result + \c 1) = *(\p first + \c 1), - * and so on. Generally, for every integer \c n from \c 0 to \p last - \p first, \p copy - * performs the assignment *(\p result + \c n) = *(\p first + \c n). Unlike - * \c std::copy, \p copy offers no guarantee on order of operation. As a result, - * calling \p copy with overlapping source and destination ranges has undefined - * behavior. - * - * The return value is \p result + (\p last - \p first). - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param first The beginning of the sequence to copy. - * \param last The end of the sequence to copy. - * \param result The destination sequence. - * \return The end of the destination sequence. - * \see http://www.sgi.com/tech/stl/copy.html - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam InputIterator must be a model of Input Iterator and \c InputIterator's \c value_type must be convertible to \c OutputIterator's \c value_type. - * \tparam OutputIterator must be a model of Output Iterator. - * - * \pre \p result may be equal to \p first, but \p result shall not be in the range [first, last) otherwise. - * - * The following code snippet demonstrates how to use \p copy - * to copy from one range to another using the \p thrust::device parallelization policy: - * - * \code - * #include - * #include - * #include - * ... - * - * thrust::device_vector vec0(100); - * thrust::device_vector vec1(100); - * ... - * - * thrust::copy(thrust::device, vec0.begin(), vec0.end(), vec1.begin()); - * - * // vec1 is now a copy of vec0 - * \endcode - */ -template -__host__ __device__ - OutputIterator copy(const thrust::detail::execution_policy_base &exec, - InputIterator first, - InputIterator last, - OutputIterator result); - - -/*! \p copy_n copies elements from the range [first, first + n) to the range - * [result, result + n). That is, it performs the assignments *result = *first, *(result + 1) = *(first + 1), - * and so on. Generally, for every integer \c i from \c 0 to \c n, \p copy - * performs the assignment *(\p result + \c i) = *(\p first + \c i). Unlike - * \c std::copy_n, \p copy_n offers no guarantee on order of operation. As a result, - * calling \p copy_n with overlapping source and destination ranges has undefined - * behavior. - * - * The return value is \p result + \p n. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param first The beginning of the range to copy. - * \param n The number of elements to copy. - * \param result The beginning destination range. - * \return The end of the destination range. - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam InputIterator must be a model of Input Iterator and \c InputIterator's \c value_type must be convertible to \c OutputIterator's \c value_type. - * \tparam Size is an integral type. - * \tparam OutputIterator must be a model of Output Iterator. - * - * \pre \p result may be equal to \p first, but \p result shall not be in the range [first, first + n) otherwise. - * - * The following code snippet demonstrates how to use \p copy - * to copy from one range to another using the \p thrust::device parallelization policy: - * - * \code - * #include - * #include - * #include - * ... - * size_t n = 100; - * thrust::device_vector vec0(n); - * thrust::device_vector vec1(n); - * ... - * thrust::copy_n(thrust::device, vec0.begin(), n, vec1.begin()); - * - * // vec1 is now a copy of vec0 - * \endcode - * - * \see http://www.sgi.com/tech/stl/copy_n.html - * \see thrust::copy - */ -template -__host__ __device__ - OutputIterator copy_n(const thrust::detail::execution_policy_base &exec, - InputIterator first, - Size n, - OutputIterator result); - - - -/*! \p copy copies elements from the range [\p first, \p last) to the range - * [\p result, \p result + (\p last - \p first)). That is, it performs - * the assignments *\p result = *\p first, *(\p result + \c 1) = *(\p first + \c 1), - * and so on. Generally, for every integer \c n from \c 0 to \p last - \p first, \p copy - * performs the assignment *(\p result + \c n) = *(\p first + \c n). Unlike - * \c std::copy, \p copy offers no guarantee on order of operation. As a result, - * calling \p copy with overlapping source and destination ranges has undefined - * behavior. - * - * The return value is \p result + (\p last - \p first). - * - * \param first The beginning of the sequence to copy. - * \param last The end of the sequence to copy. - * \param result The destination sequence. - * \return The end of the destination sequence. - * \see http://www.sgi.com/tech/stl/copy.html - * - * \tparam InputIterator must be a model of Input Iterator and \c InputIterator's \c value_type must be convertible to \c OutputIterator's \c value_type. - * \tparam OutputIterator must be a model of Output Iterator. - * - * \pre \p result may be equal to \p first, but \p result shall not be in the range [first, last) otherwise. - * - * The following code snippet demonstrates how to use \p copy - * to copy from one range to another. - * - * \code - * #include - * #include - * ... - * - * thrust::device_vector vec0(100); - * thrust::device_vector vec1(100); - * ... - * - * thrust::copy(vec0.begin(), vec0.end(), - * vec1.begin()); - * - * // vec1 is now a copy of vec0 - * \endcode - */ -template - OutputIterator copy(InputIterator first, - InputIterator last, - OutputIterator result); - -/*! \p copy_n copies elements from the range [first, first + n) to the range - * [result, result + n). That is, it performs the assignments *result = *first, *(result + 1) = *(first + 1), - * and so on. Generally, for every integer \c i from \c 0 to \c n, \p copy - * performs the assignment *(\p result + \c i) = *(\p first + \c i). Unlike - * \c std::copy_n, \p copy_n offers no guarantee on order of operation. As a result, - * calling \p copy_n with overlapping source and destination ranges has undefined - * behavior. - * - * The return value is \p result + \p n. - * - * \param first The beginning of the range to copy. - * \param n The number of elements to copy. - * \param result The beginning destination range. - * \return The end of the destination range. - * - * \tparam InputIterator must be a model of Input Iterator and \c InputIterator's \c value_type must be convertible to \c OutputIterator's \c value_type. - * \tparam Size is an integral type. - * \tparam OutputIterator must be a model of Output Iterator. - * - * \pre \p result may be equal to \p first, but \p result shall not be in the range [first, first + n) otherwise. - * - * The following code snippet demonstrates how to use \p copy - * to copy from one range to another. - * - * \code - * #include - * #include - * ... - * size_t n = 100; - * thrust::device_vector vec0(n); - * thrust::device_vector vec1(n); - * ... - * thrust::copy_n(vec0.begin(), n, vec1.begin()); - * - * // vec1 is now a copy of vec0 - * \endcode - * - * \see http://www.sgi.com/tech/stl/copy_n.html - * \see thrust::copy - */ -template - OutputIterator copy_n(InputIterator first, - Size n, - OutputIterator result); - -/*! \} // end copying - */ - -/*! \addtogroup stream_compaction - * \{ - */ - - -/*! This version of \p copy_if copies elements from the range [first,last) - * to a range beginning at \p result, except that any element which causes \p pred - * to be \c false is not copied. \p copy_if is stable, meaning that the relative - * order of elements that are copied is unchanged. - * - * More precisely, for every integer \c n such that 0 <= n < last-first, - * \p copy_if performs the assignment *result = *(first+n) and \p result - * is advanced one position if pred(*(first+n)). Otherwise, no assignment - * occurs and \p result is not advanced. - * - * The algorithm's execution is parallelized as determined by \p system. - * - * \param exec The execution policy to use for parallelization. - * \param first The beginning of the sequence from which to copy. - * \param last The end of the sequence from which to copy. - * \param result The beginning of the sequence into which to copy. - * \param pred The predicate to test on every value of the range [first, last). - * \return result + n, where \c n is equal to the number of times \p pred - * evaluated to \c true in the range [first, last). - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam InputIterator is a model of Input Iterator, - * and \p InputIterator's \c value_type is convertible to \p Predicate's \c argument_type. - * \tparam OutputIterator is a model of Output Iterator. - * \tparam Predicate is a model of Predicate. - * - * \pre The ranges [first, last) and [result, result + (last - first)) shall not overlap. - * - * The following code snippet demonstrates how to use \p copy_if to perform stream compaction - * to copy even numbers to an output range using the \p thrust::host parallelization policy: - * - * \code - * #include - * #include - * ... - * struct is_even - * { - * __host__ __device__ - * bool operator()(const int x) - * { - * return (x % 2) == 0; - * } - * }; - * ... - * const int N = 6; - * int V[N] = {-2, 0, -1, 0, 1, 2}; - * int result[4]; - * - * thrust::copy_if(thrust::host, V, V + N, result, is_even()); - * - * // V remains {-2, 0, -1, 0, 1, 2} - * // result is now {-2, 0, 0, 2} - * \endcode - * - * \see \c remove_copy_if - */ -template -__host__ __device__ - OutputIterator copy_if(const thrust::detail::execution_policy_base &exec, - InputIterator first, - InputIterator last, - OutputIterator result, - Predicate pred); - - - -/*! This version of \p copy_if copies elements from the range [first,last) - * to a range beginning at \p result, except that any element which causes \p pred - * to \c false is not copied. \p copy_if is stable, meaning that the relative - * order of elements that are copied is unchanged. - * - * More precisely, for every integer \c n such that 0 <= n < last-first, - * \p copy_if performs the assignment *result = *(first+n) and \p result - * is advanced one position if pred(*(first+n)). Otherwise, no assignment - * occurs and \p result is not advanced. - * - * \param first The beginning of the sequence from which to copy. - * \param last The end of the sequence from which to copy. - * \param result The beginning of the sequence into which to copy. - * \param pred The predicate to test on every value of the range [first, last). - * \return result + n, where \c n is equal to the number of times \p pred - * evaluated to \c true in the range [first, last). - * - * \tparam InputIterator is a model of Input Iterator, - * and \p InputIterator's \c value_type is convertible to \p Predicate's \c argument_type. - * \tparam OutputIterator is a model of Output Iterator. - * \tparam Predicate is a model of Predicate. - * - * \pre The ranges [first, last) and [result, result + (last - first)) shall not overlap. - * - * The following code snippet demonstrates how to use \p copy_if to perform stream compaction - * to copy even numbers to an output range. - * - * \code - * #include - * ... - * struct is_even - * { - * __host__ __device__ - * bool operator()(const int x) - * { - * return (x % 2) == 0; - * } - * }; - * ... - * const int N = 6; - * int V[N] = {-2, 0, -1, 0, 1, 2}; - * int result[4]; - * - * thrust::copy_if(V, V + N, result, is_even()); - * - * // V remains {-2, 0, -1, 0, 1, 2} - * // result is now {-2, 0, 0, 2} - * \endcode - * - * \see \c remove_copy_if - */ -template - OutputIterator copy_if(InputIterator first, - InputIterator last, - OutputIterator result, - Predicate pred); - - -/*! This version of \p copy_if copies elements from the range [first,last) - * to a range beginning at \p result, except that any element whose corresponding stencil - * element causes \p pred to be \c false is not copied. \p copy_if is stable, meaning - * that the relative order of elements that are copied is unchanged. - * - * More precisely, for every integer \c n such that 0 <= n < last-first, - * \p copy_if performs the assignment *result = *(first+n) and \p result - * is advanced one position if pred(*(stencil+n)). Otherwise, no assignment - * occurs and \p result is not advanced. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param first The beginning of the sequence from which to copy. - * \param last The end of the sequence from which to copy. - * \param stencil The beginning of the stencil sequence. - * \param result The beginning of the sequence into which to copy. - * \param pred The predicate to test on every value of the range [stencil, stencil + (last-first)). - * \return result + n, where \c n is equal to the number of times \p pred - * evaluated to \c true in the range [stencil, stencil + (last-first)). - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam InputIterator1 is a model of Input Iterator. - * \tparam InputIterator2 is a model of Input Iterator, - * and \p InputIterator2's \c value_type is convertible to \p Predicate's \c argument_type. - * \tparam OutputIterator is a model of Output Iterator. - * \tparam Predicate is a model of Predicate. - * - * \pre The ranges [first, last) and [result, result + (last - first)) shall not overlap. - * \pre The ranges [stencil, stencil + (last - first)) and [result, result + (last - first)) shall not overlap. - * - * The following code snippet demonstrates how to use \p copy_if to perform stream compaction - * to copy numbers to an output range when corresponding stencil elements are even using the \p thrust::host execution policy: - * - * \code - * #include - * #include - * ... - * struct is_even - * { - * __host__ __device__ - * bool operator()(const int x) - * { - * return (x % 2) == 0; - * } - * }; - * ... - * int N = 6; - * int data[N] = { 0, 1, 2, 3, 4, 5}; - * int stencil[N] = {-2, 0, -1, 0, 1, 2}; - * int result[4]; - * - * thrust::copy_if(thrust::host, data, data + N, stencil, result, is_even()); - * - * // data remains = { 0, 1, 2, 3, 4, 5}; - * // stencil remains = {-2, 0, -1, 0, 1, 2}; - * // result is now { 0, 1, 3, 5} - * \endcode - * - * \see \c remove_copy_if - */ -template -__host__ __device__ - OutputIterator copy_if(const thrust::detail::execution_policy_base &exec, - InputIterator1 first, - InputIterator1 last, - InputIterator2 stencil, - OutputIterator result, - Predicate pred); - - -/*! This version of \p copy_if copies elements from the range [first,last) - * to a range beginning at \p result, except that any element whose corresponding stencil - * element causes \p pred to be \c false is not copied. \p copy_if is stable, meaning - * that the relative order of elements that are copied is unchanged. - * - * More precisely, for every integer \c n such that 0 <= n < last-first, - * \p copy_if performs the assignment *result = *(first+n) and \p result - * is advanced one position if pred(*(stencil+n)). Otherwise, no assignment - * occurs and \p result is not advanced. - * - * \param first The beginning of the sequence from which to copy. - * \param last The end of the sequence from which to copy. - * \param stencil The beginning of the stencil sequence. - * \param result The beginning of the sequence into which to copy. - * \param pred The predicate to test on every value of the range [stencil, stencil + (last-first)). - * \return result + n, where \c n is equal to the number of times \p pred - * evaluated to \c true in the range [stencil, stencil + (last-first)). - * - * \tparam InputIterator1 is a model of Input Iterator. - * \tparam InputIterator2 is a model of Input Iterator, - * and \p InputIterator2's \c value_type is convertible to \p Predicate's \c argument_type. - * \tparam OutputIterator is a model of Output Iterator. - * \tparam Predicate is a model of Predicate. - * - * \pre The ranges [first, last) and [result, result + (last - first)) shall not overlap. - * \pre The ranges [stencil, stencil + (last - first)) and [result, result + (last - first)) shall not overlap. - * - * The following code snippet demonstrates how to use \p copy_if to perform stream compaction - * to copy numbers to an output range when corresponding stencil elements are even: - * - * \code - * #include - * ... - * struct is_even - * { - * __host__ __device__ - * bool operator()(const int x) - * { - * return (x % 2) == 0; - * } - * }; - * ... - * int N = 6; - * int data[N] = { 0, 1, 2, 3, 4, 5}; - * int stencil[N] = {-2, 0, -1, 0, 1, 2}; - * int result[4]; - * - * thrust::copy_if(data, data + N, stencil, result, is_even()); - * - * // data remains = { 0, 1, 2, 3, 4, 5}; - * // stencil remains = {-2, 0, -1, 0, 1, 2}; - * // result is now { 0, 1, 3, 5} - * \endcode - * - * \see \c remove_copy_if - */ -template - OutputIterator copy_if(InputIterator1 first, - InputIterator1 last, - InputIterator2 stencil, - OutputIterator result, - Predicate pred); - -/*! \} // end stream_compaction - */ - -} // end namespace thrust - -#include -#include - diff --git a/spaces/CVPR/WALT/configs/_base_/models/occ_mask_rcnn_swin_fpn.py b/spaces/CVPR/WALT/configs/_base_/models/occ_mask_rcnn_swin_fpn.py deleted file mode 100644 index 80b7a8cf5f8e358f95723cb44ddb853c8f194f7e..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/configs/_base_/models/occ_mask_rcnn_swin_fpn.py +++ /dev/null @@ -1,127 +0,0 @@ -# model settings -model = dict( - type='MaskRCNN', - pretrained=None, - backbone=dict( - type='SwinTransformer', - embed_dim=96, - depths=[2, 2, 6, 2], - num_heads=[3, 6, 12, 24], - window_size=7, - mlp_ratio=4., - qkv_bias=True, - qk_scale=None, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0.2, - ape=False, - patch_norm=True, - out_indices=(0, 1, 2, 3), - use_checkpoint=False), - neck=dict( - type='FPN', - in_channels=[96, 192, 384, 768], - out_channels=256, - num_outs=5), - rpn_head=dict( - type='RPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - roi_head=dict( - type='StandardRoIHead', - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - mask_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - mask_head=dict( - type='FCNOccMaskHead', - num_convs=4, - in_channels=256, - conv_out_channels=256, - num_classes=80, - loss_mask=dict( - type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=2000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - mask_size=28, - pos_weight=-1, - debug=False)), - test_cfg=dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100, - mask_thr_binary=0.5))) diff --git a/spaces/CVPR/lama-example/saicinpainting/evaluation/losses/ssim.py b/spaces/CVPR/lama-example/saicinpainting/evaluation/losses/ssim.py deleted file mode 100644 index ee43a0095408eca98e253dea194db788446f9c0a..0000000000000000000000000000000000000000 --- a/spaces/CVPR/lama-example/saicinpainting/evaluation/losses/ssim.py +++ /dev/null @@ -1,74 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F - - -class SSIM(torch.nn.Module): - """SSIM. Modified from: - https://github.com/Po-Hsun-Su/pytorch-ssim/blob/master/pytorch_ssim/__init__.py - """ - - def __init__(self, window_size=11, size_average=True): - super().__init__() - self.window_size = window_size - self.size_average = size_average - self.channel = 1 - self.register_buffer('window', self._create_window(window_size, self.channel)) - - def forward(self, img1, img2): - assert len(img1.shape) == 4 - - channel = img1.size()[1] - - if channel == self.channel and self.window.data.type() == img1.data.type(): - window = self.window - else: - window = self._create_window(self.window_size, channel) - - # window = window.to(img1.get_device()) - window = window.type_as(img1) - - self.window = window - self.channel = channel - - return self._ssim(img1, img2, window, self.window_size, channel, self.size_average) - - def _gaussian(self, window_size, sigma): - gauss = torch.Tensor([ - np.exp(-(x - (window_size // 2)) ** 2 / float(2 * sigma ** 2)) for x in range(window_size) - ]) - return gauss / gauss.sum() - - def _create_window(self, window_size, channel): - _1D_window = self._gaussian(window_size, 1.5).unsqueeze(1) - _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0) - return _2D_window.expand(channel, 1, window_size, window_size).contiguous() - - def _ssim(self, img1, img2, window, window_size, channel, size_average=True): - mu1 = F.conv2d(img1, window, padding=(window_size // 2), groups=channel) - mu2 = F.conv2d(img2, window, padding=(window_size // 2), groups=channel) - - mu1_sq = mu1.pow(2) - mu2_sq = mu2.pow(2) - mu1_mu2 = mu1 * mu2 - - sigma1_sq = F.conv2d( - img1 * img1, window, padding=(window_size // 2), groups=channel) - mu1_sq - sigma2_sq = F.conv2d( - img2 * img2, window, padding=(window_size // 2), groups=channel) - mu2_sq - sigma12 = F.conv2d( - img1 * img2, window, padding=(window_size // 2), groups=channel) - mu1_mu2 - - C1 = 0.01 ** 2 - C2 = 0.03 ** 2 - - ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / \ - ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)) - - if size_average: - return ssim_map.mean() - - return ssim_map.mean(1).mean(1).mean(1) - - def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): - return diff --git a/spaces/Candeloro/DeepDanbooru_string/README.md b/spaces/Candeloro/DeepDanbooru_string/README.md deleted file mode 100644 index 4330b6f969246dc764a34ea254d2e807159f1c55..0000000000000000000000000000000000000000 --- a/spaces/Candeloro/DeepDanbooru_string/README.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -title: DeepDanbooru String -emoji: 💬 -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 3.6 -app_file: app.py -pinned: false -duplicated_from: NoCrypt/DeepDanbooru_string ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/DEVINKofficial/Onodofthenorth-SD_PixelArt_SpriteSheet_Generator/README.md b/spaces/DEVINKofficial/Onodofthenorth-SD_PixelArt_SpriteSheet_Generator/README.md deleted file mode 100644 index ee9df77cf6a7beb1b81487d5e89f1087855ea864..0000000000000000000000000000000000000000 --- a/spaces/DEVINKofficial/Onodofthenorth-SD_PixelArt_SpriteSheet_Generator/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Onodofthenorth-SD PixelArt SpriteSheet Generator -emoji: 🚀 -colorFrom: red -colorTo: indigo -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fastapi/logger.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fastapi/logger.py deleted file mode 100644 index 5b2c4ad5250b589aa0c8f8d1cc9125b91b10edb0..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fastapi/logger.py +++ /dev/null @@ -1,3 +0,0 @@ -import logging - -logger = logging.getLogger("fastapi") diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/ffmpy.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/ffmpy.py deleted file mode 100644 index 03291fca55a355b3041b8538217f334e9c4332eb..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/ffmpy.py +++ /dev/null @@ -1,203 +0,0 @@ -import errno -import shlex -import subprocess - -__version__ = "0.3.1" - - -class FFmpeg(object): - """Wrapper for various `FFmpeg `_ related applications (ffmpeg, - ffprobe). - """ - - def __init__( - self, executable="ffmpeg", global_options=None, inputs=None, outputs=None - ): - """Initialize FFmpeg command line wrapper. - - Compiles FFmpeg command line from passed arguments (executable path, options, inputs and - outputs). ``inputs`` and ``outputs`` are dictionares containing inputs/outputs as keys and - their respective options as values. One dictionary value (set of options) must be either a - single space separated string, or a list or strings without spaces (i.e. each part of the - option is a separate item of the list, the result of calling ``split()`` on the options - string). If the value is a list, it cannot be mixed, i.e. cannot contain items with spaces. - An exception are complex FFmpeg command lines that contain quotes: the quoted part must be - one string, even if it contains spaces (see *Examples* for more info). - For more info about FFmpeg command line format see `here - `_. - - :param str executable: path to ffmpeg executable; by default the ``ffmpeg`` command will be - searched for in the ``PATH``, but can be overridden with an absolute path to ``ffmpeg`` - executable - :param iterable global_options: global options passed to ``ffmpeg`` executable (e.g. - ``-y``, ``-v`` etc.); can be specified either as a list/tuple/set of strings, or one - space-separated string; by default no global options are passed - :param dict inputs: a dictionary specifying one or more input arguments as keys with their - corresponding options (either as a list of strings or a single space separated string) as - values - :param dict outputs: a dictionary specifying one or more output arguments as keys with their - corresponding options (either as a list of strings or a single space separated string) as - values - """ - self.executable = executable - self._cmd = [executable] - - global_options = global_options or [] - if _is_sequence(global_options): - normalized_global_options = [] - for opt in global_options: - normalized_global_options += shlex.split(opt) - else: - normalized_global_options = shlex.split(global_options) - - self._cmd += normalized_global_options - self._cmd += _merge_args_opts(inputs, add_input_option=True) - self._cmd += _merge_args_opts(outputs) - - self.cmd = subprocess.list2cmdline(self._cmd) - self.process = None - - def __repr__(self): - return "<{0!r} {1!r}>".format(self.__class__.__name__, self.cmd) - - def run(self, input_data=None, stdout=None, stderr=None, env=None, **kwargs): - """Execute FFmpeg command line. - - ``input_data`` can contain input for FFmpeg in case ``pipe`` protocol is used for input. - ``stdout`` and ``stderr`` specify where to redirect the ``stdout`` and ``stderr`` of the - process. By default no redirection is done, which means all output goes to running shell - (this mode should normally only be used for debugging purposes). If FFmpeg ``pipe`` protocol - is used for output, ``stdout`` must be redirected to a pipe by passing `subprocess.PIPE` as - ``stdout`` argument. You can pass custom environment to ffmpeg process with ``env``. - - Returns a 2-tuple containing ``stdout`` and ``stderr`` of the process. If there was no - redirection or if the output was redirected to e.g. `os.devnull`, the value returned will - be a tuple of two `None` values, otherwise it will contain the actual ``stdout`` and - ``stderr`` data returned by ffmpeg process. - - More info about ``pipe`` protocol `here `_. - - :param str input_data: input data for FFmpeg to deal with (audio, video etc.) as bytes (e.g. - the result of reading a file in binary mode) - :param stdout: redirect FFmpeg ``stdout`` there (default is `None` which means no - redirection) - :param stderr: redirect FFmpeg ``stderr`` there (default is `None` which means no - redirection) - :param env: custom environment for ffmpeg process - :param kwargs: any other keyword arguments to be forwarded to `subprocess.Popen - `_ - :return: a 2-tuple containing ``stdout`` and ``stderr`` of the process - :rtype: tuple - :raise: `FFRuntimeError` in case FFmpeg command exits with a non-zero code; - `FFExecutableNotFoundError` in case the executable path passed was not valid - """ - try: - self.process = subprocess.Popen( - self._cmd, - stdin=subprocess.PIPE, - stdout=stdout, - stderr=stderr, - env=env, - **kwargs - ) - except OSError as e: - if e.errno == errno.ENOENT: - raise FFExecutableNotFoundError( - "Executable '{0}' not found".format(self.executable) - ) - else: - raise - - out = self.process.communicate(input=input_data) - if self.process.returncode != 0: - raise FFRuntimeError(self.cmd, self.process.returncode, out[0], out[1]) - - return out - - -class FFprobe(FFmpeg): - """Wrapper for `ffprobe `_.""" - - def __init__(self, executable="ffprobe", global_options="", inputs=None): - """Create an instance of FFprobe. - - Compiles FFprobe command line from passed arguments (executable path, options, inputs). - FFprobe executable by default is taken from ``PATH`` but can be overridden with an - absolute path. For more info about FFprobe command line format see - `here `_. - - :param str executable: absolute path to ffprobe executable - :param iterable global_options: global options passed to ffmpeg executable; can be specified - either as a list/tuple of strings or a space-separated string - :param dict inputs: a dictionary specifying one or more inputs as keys with their - corresponding options as values - """ - super(FFprobe, self).__init__( - executable=executable, global_options=global_options, inputs=inputs - ) - - -class FFExecutableNotFoundError(Exception): - """Raise when FFmpeg/FFprobe executable was not found.""" - - -class FFRuntimeError(Exception): - """Raise when FFmpeg/FFprobe command line execution returns a non-zero exit code. - - The resulting exception object will contain the attributes relates to command line execution: - ``cmd``, ``exit_code``, ``stdout``, ``stderr``. - """ - - def __init__(self, cmd, exit_code, stdout, stderr): - self.cmd = cmd - self.exit_code = exit_code - self.stdout = stdout - self.stderr = stderr - - message = "`{0}` exited with status {1}\n\nSTDOUT:\n{2}\n\nSTDERR:\n{3}".format( - self.cmd, exit_code, (stdout or b"").decode(), (stderr or b"").decode() - ) - - super(FFRuntimeError, self).__init__(message) - - -def _is_sequence(obj): - """Check if the object is a sequence (list, tuple etc.). - - :param object obj: an object to be checked - :return: True if the object is iterable but is not a string, False otherwise - :rtype: bool - """ - return hasattr(obj, "__iter__") and not isinstance(obj, str) - - -def _merge_args_opts(args_opts_dict, **kwargs): - """Merge options with their corresponding arguments. - - Iterates over the dictionary holding arguments (keys) and options (values). Merges each - options string with its corresponding argument. - - :param dict args_opts_dict: a dictionary of arguments and options - :param dict kwargs: *input_option* - if specified prepends ``-i`` to input argument - :return: merged list of strings with arguments and their corresponding options - :rtype: list - """ - merged = [] - - if not args_opts_dict: - return merged - - for arg, opt in args_opts_dict.items(): - if not _is_sequence(opt): - opt = shlex.split(opt or "") - merged += opt - - if not arg: - continue - - if "add_input_option" in kwargs: - merged.append("-i") - - merged.append(arg) - - return merged diff --git a/spaces/Dao3/DreamlikeArt-PhotoReal-2.0/README.md b/spaces/Dao3/DreamlikeArt-PhotoReal-2.0/README.md deleted file mode 100644 index fec0cf7ea01c07ea38cfe784718b6bf02ac20fdf..0000000000000000000000000000000000000000 --- a/spaces/Dao3/DreamlikeArt-PhotoReal-2.0/README.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: DreamlikeArt-PhotoReal 2.0 -emoji: 🧘🏻‍♀️ -colorFrom: blue -colorTo: yellow -sdk: gradio -sdk_version: 3.16.1 -app_file: app.py -pinned: false -duplicated_from: phenomenon1981/DreamlikeArt-PhotoReal-2.0 ---- ---- -title: DreamlikeArt-PhotoReal 2.0 -emoji: 🧘🏻‍♀️ -colorFrom: blue -colorTo: yellow -sdk: gradio -sdk_version: 3.16.1 -app_file: app.py \ No newline at end of file diff --git a/spaces/Datasculptor/DescriptionGPT/README.md b/spaces/Datasculptor/DescriptionGPT/README.md deleted file mode 100644 index e1f417699366343bf863b46912d6f54219e00520..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/DescriptionGPT/README.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: Detic -emoji: 👀 -colorFrom: indigo -colorTo: red -sdk: gradio -app_file: app.py -pinned: false -duplicated_from: taesiri/DeticChatGPT ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/Datasculptor/StyleGAN-NADA/e4e/models/stylegan2/__init__.py b/spaces/Datasculptor/StyleGAN-NADA/e4e/models/stylegan2/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/DiffusionArtco/Interior-design-models/README.md b/spaces/DiffusionArtco/Interior-design-models/README.md deleted file mode 100644 index f489d55e38d86e4cd0eb889617a379840e7c75bf..0000000000000000000000000000000000000000 --- a/spaces/DiffusionArtco/Interior-design-models/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: ImagineAI Imagine Generator -emoji: 💩 -colorFrom: yellow -colorTo: pink -sdk: gradio -sdk_version: 3.24.1 -app_file: app.py -pinned: false -duplicated_from: DiffusionArtco/AnimeTop50 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/dnnlib/submission/submit.py b/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/dnnlib/submission/submit.py deleted file mode 100644 index 60ff428717c13896bb78625b3eaf651d9fb9695d..0000000000000000000000000000000000000000 --- a/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/dnnlib/submission/submit.py +++ /dev/null @@ -1,290 +0,0 @@ -# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. -# -# This work is licensed under the Creative Commons Attribution-NonCommercial -# 4.0 International License. To view a copy of this license, visit -# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to -# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. - -"""Submit a function to be run either locally or in a computing cluster.""" - -import copy -import io -import os -import pathlib -import pickle -import platform -import pprint -import re -import shutil -import time -import traceback - -import zipfile - -from enum import Enum - -from .. import util -from ..util import EasyDict - - -class SubmitTarget(Enum): - """The target where the function should be run. - - LOCAL: Run it locally. - """ - LOCAL = 1 - - -class PathType(Enum): - """Determines in which format should a path be formatted. - - WINDOWS: Format with Windows style. - LINUX: Format with Linux/Posix style. - AUTO: Use current OS type to select either WINDOWS or LINUX. - """ - WINDOWS = 1 - LINUX = 2 - AUTO = 3 - - -_user_name_override = None - - -class SubmitConfig(util.EasyDict): - """Strongly typed config dict needed to submit runs. - - Attributes: - run_dir_root: Path to the run dir root. Can be optionally templated with tags. Needs to always be run through get_path_from_template. - run_desc: Description of the run. Will be used in the run dir and task name. - run_dir_ignore: List of file patterns used to ignore files when copying files to the run dir. - run_dir_extra_files: List of (abs_path, rel_path) tuples of file paths. rel_path root will be the src directory inside the run dir. - submit_target: Submit target enum value. Used to select where the run is actually launched. - num_gpus: Number of GPUs used/requested for the run. - print_info: Whether to print debug information when submitting. - ask_confirmation: Whether to ask a confirmation before submitting. - run_id: Automatically populated value during submit. - run_name: Automatically populated value during submit. - run_dir: Automatically populated value during submit. - run_func_name: Automatically populated value during submit. - run_func_kwargs: Automatically populated value during submit. - user_name: Automatically populated value during submit. Can be set by the user which will then override the automatic value. - task_name: Automatically populated value during submit. - host_name: Automatically populated value during submit. - """ - - def __init__(self): - super().__init__() - - # run (set these) - self.run_dir_root = "" # should always be passed through get_path_from_template - self.run_desc = "" - self.run_dir_ignore = ["__pycache__", "*.pyproj", "*.sln", "*.suo", ".cache", ".idea", ".vs", ".vscode"] - self.run_dir_extra_files = None - - # submit (set these) - self.submit_target = SubmitTarget.LOCAL - self.num_gpus = 1 - self.print_info = False - self.ask_confirmation = False - - # (automatically populated) - self.run_id = None - self.run_name = None - self.run_dir = None - self.run_func_name = None - self.run_func_kwargs = None - self.user_name = None - self.task_name = None - self.host_name = "localhost" - - -def get_path_from_template(path_template: str, path_type: PathType = PathType.AUTO) -> str: - """Replace tags in the given path template and return either Windows or Linux formatted path.""" - # automatically select path type depending on running OS - if path_type == PathType.AUTO: - if platform.system() == "Windows": - path_type = PathType.WINDOWS - elif platform.system() == "Linux": - path_type = PathType.LINUX - else: - raise RuntimeError("Unknown platform") - - path_template = path_template.replace("", get_user_name()) - - # return correctly formatted path - if path_type == PathType.WINDOWS: - return str(pathlib.PureWindowsPath(path_template)) - elif path_type == PathType.LINUX: - return str(pathlib.PurePosixPath(path_template)) - else: - raise RuntimeError("Unknown platform") - - -def get_template_from_path(path: str) -> str: - """Convert a normal path back to its template representation.""" - # replace all path parts with the template tags - path = path.replace("\\", "/") - return path - - -def convert_path(path: str, path_type: PathType = PathType.AUTO) -> str: - """Convert a normal path to template and the convert it back to a normal path with given path type.""" - path_template = get_template_from_path(path) - path = get_path_from_template(path_template, path_type) - return path - - -def set_user_name_override(name: str) -> None: - """Set the global username override value.""" - global _user_name_override - _user_name_override = name - - -def get_user_name(): - """Get the current user name.""" - if _user_name_override is not None: - return _user_name_override - elif platform.system() == "Windows": - return os.getlogin() - elif platform.system() == "Linux": - try: - import pwd # pylint: disable=import-error - return pwd.getpwuid(os.geteuid()).pw_name # pylint: disable=no-member - except: - return "unknown" - else: - raise RuntimeError("Unknown platform") - - -def _create_run_dir_local(submit_config: SubmitConfig) -> str: - """Create a new run dir with increasing ID number at the start.""" - run_dir_root = get_path_from_template(submit_config.run_dir_root, PathType.AUTO) - - if not os.path.exists(run_dir_root): - print("Creating the run dir root: {}".format(run_dir_root)) - os.makedirs(run_dir_root) - - submit_config.run_id = _get_next_run_id_local(run_dir_root) - submit_config.run_name = "{0:05d}-{1}".format(submit_config.run_id, submit_config.run_desc) - run_dir = os.path.join(run_dir_root, submit_config.run_name) - - if os.path.exists(run_dir): - raise RuntimeError("The run dir already exists! ({0})".format(run_dir)) - - print("Creating the run dir: {}".format(run_dir)) - os.makedirs(run_dir) - - return run_dir - - -def _get_next_run_id_local(run_dir_root: str) -> int: - """Reads all directory names in a given directory (non-recursive) and returns the next (increasing) run id. Assumes IDs are numbers at the start of the directory names.""" - dir_names = [d for d in os.listdir(run_dir_root) if os.path.isdir(os.path.join(run_dir_root, d))] - r = re.compile("^\\d+") # match one or more digits at the start of the string - run_id = 0 - - for dir_name in dir_names: - m = r.match(dir_name) - - if m is not None: - i = int(m.group()) - run_id = max(run_id, i + 1) - - return run_id - - -def _populate_run_dir(run_dir: str, submit_config: SubmitConfig) -> None: - """Copy all necessary files into the run dir. Assumes that the dir exists, is local, and is writable.""" - print("Copying files to the run dir") - files = [] - - run_func_module_dir_path = util.get_module_dir_by_obj_name(submit_config.run_func_name) - assert '.' in submit_config.run_func_name - for _idx in range(submit_config.run_func_name.count('.') - 1): - run_func_module_dir_path = os.path.dirname(run_func_module_dir_path) - files += util.list_dir_recursively_with_ignore(run_func_module_dir_path, ignores=submit_config.run_dir_ignore, add_base_to_relative=False) - - dnnlib_module_dir_path = util.get_module_dir_by_obj_name("dnnlib") - files += util.list_dir_recursively_with_ignore(dnnlib_module_dir_path, ignores=submit_config.run_dir_ignore, add_base_to_relative=True) - - if submit_config.run_dir_extra_files is not None: - files += submit_config.run_dir_extra_files - - files = [(f[0], os.path.join(run_dir, "src", f[1])) for f in files] - files += [(os.path.join(dnnlib_module_dir_path, "submission", "_internal", "run.py"), os.path.join(run_dir, "run.py"))] - - util.copy_files_and_create_dirs(files) - - pickle.dump(submit_config, open(os.path.join(run_dir, "submit_config.pkl"), "wb")) - - with open(os.path.join(run_dir, "submit_config.txt"), "w") as f: - pprint.pprint(submit_config, stream=f, indent=4, width=200, compact=False) - - -def run_wrapper(submit_config: SubmitConfig) -> None: - """Wrap the actual run function call for handling logging, exceptions, typing, etc.""" - is_local = submit_config.submit_target == SubmitTarget.LOCAL - - checker = None - - # when running locally, redirect stderr to stdout, log stdout to a file, and force flushing - if is_local: - logger = util.Logger(file_name=os.path.join(submit_config.run_dir, "log.txt"), file_mode="w", should_flush=True) - else: # when running in a cluster, redirect stderr to stdout, and just force flushing (log writing is handled by run.sh) - logger = util.Logger(file_name=None, should_flush=True) - - import dnnlib - dnnlib.submit_config = submit_config - - try: - print("dnnlib: Running {0}() on {1}...".format(submit_config.run_func_name, submit_config.host_name)) - start_time = time.time() - util.call_func_by_name(func_name=submit_config.run_func_name, submit_config=submit_config, **submit_config.run_func_kwargs) - print("dnnlib: Finished {0}() in {1}.".format(submit_config.run_func_name, util.format_time(time.time() - start_time))) - except: - if is_local: - raise - else: - traceback.print_exc() - - log_src = os.path.join(submit_config.run_dir, "log.txt") - log_dst = os.path.join(get_path_from_template(submit_config.run_dir_root), "{0}-error.txt".format(submit_config.run_name)) - shutil.copyfile(log_src, log_dst) - finally: - open(os.path.join(submit_config.run_dir, "_finished.txt"), "w").close() - - dnnlib.submit_config = None - logger.close() - - if checker is not None: - checker.stop() - - -def submit_run(submit_config: SubmitConfig, run_func_name: str, **run_func_kwargs) -> None: - """Create a run dir, gather files related to the run, copy files to the run dir, and launch the run in appropriate place.""" - submit_config = copy.copy(submit_config) - - if submit_config.user_name is None: - submit_config.user_name = get_user_name() - - submit_config.run_func_name = run_func_name - submit_config.run_func_kwargs = run_func_kwargs - - assert submit_config.submit_target == SubmitTarget.LOCAL - if submit_config.submit_target in {SubmitTarget.LOCAL}: - run_dir = _create_run_dir_local(submit_config) - - submit_config.task_name = "{0}-{1:05d}-{2}".format(submit_config.user_name, submit_config.run_id, submit_config.run_desc) - submit_config.run_dir = run_dir - _populate_run_dir(run_dir, submit_config) - - if submit_config.print_info: - print("\nSubmit config:\n") - pprint.pprint(submit_config, indent=4, width=200, compact=False) - print() - - if submit_config.ask_confirmation: - if not util.ask_yes_no("Continue submitting the job?"): - return - - run_wrapper(submit_config) diff --git a/spaces/DragGan/DragGan/stylegan_human/pti/pti_configs/paths_config.py b/spaces/DragGan/DragGan/stylegan_human/pti/pti_configs/paths_config.py deleted file mode 100644 index e4b6fce7bfcf285fb09ae4ddf9432fa6aa562ea5..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan/stylegan_human/pti/pti_configs/paths_config.py +++ /dev/null @@ -1,24 +0,0 @@ -import os - -## Pretrained models paths -e4e = './pti/e4e_w+.pt' -stylegan2_ada_shhq = './pretrained_models/stylegan_human_v2_1024.pkl' -ir_se50 = '' #'./model_ir_se50.pth' - -## Dirs for output files -checkpoints_dir = './outputs/pti/checkpoints/' -embedding_base_dir = './outputs/pti/embeddings' -experiments_output_dir = './outputs/pti/' - -## Input info -### Input dir, where the images reside -input_data_path = 'aligned_image/' -### Inversion identifier, used to keeping track of the inversion results. Both the latent code and the generator -input_data_id = 'test' - -## Keywords -pti_results_keyword = 'PTI' -e4e_results_keyword = 'e4e' -sg2_results_keyword = 'SG2' -sg2_plus_results_keyword = 'SG2_Plus' -multi_id_model_type = 'multi_id' diff --git a/spaces/EdZ123/anime-collaborative-filtering-system/app.py b/spaces/EdZ123/anime-collaborative-filtering-system/app.py deleted file mode 100644 index dadce02889813eef2ddfd31f6a5413f690e10856..0000000000000000000000000000000000000000 --- a/spaces/EdZ123/anime-collaborative-filtering-system/app.py +++ /dev/null @@ -1,53 +0,0 @@ -"""The main application file for the Gradio app.""" - -import gradio as gr -import pandas as pd -import torch - -animes_df = pd.read_csv("./data/animes.csv") -anime_embeddings_df = pd.read_csv("./data/anime_embeddings.csv", header=None) - -title_list = animes_df["Title"].tolist() -embeddings = torch.tensor(anime_embeddings_df.values) - - -def recommend(index): - embedding = embeddings[index] - - embedding_distances = torch.nn.CosineSimilarity(dim=1)(embeddings, embedding) - recommendation_indexes = embedding_distances.argsort(descending=True)[1:4] - - recommendations = [] - for rank, recommendation_index in enumerate(recommendation_indexes): - recommendation = animes_df.iloc[int(recommendation_index)] - value = recommendation["Image URL"] - label = f'{rank + 1}. {recommendation["Title"]}' - recommendations.append((value, label)) - - return recommendations - - -css = """ -.gradio-container {align-items: center} -#container {max-width: 795px} -""" - - -with gr.Blocks(css=css) as space: - with gr.Column(elem_id="container"): - gr.Markdown( - """ - # Anime Collaborative Filtering System - This is a Pytorch recommendation model that uses neural collaborative filtering. - Enter an anime, and it will suggest similar shows! \ - Source code: [https://github.com/EdZ543/anime-collaborative-filtering-system](https://github.com/EdZ543/anime-collaborative-filtering-system) - """ - ) - - dropdown = gr.Dropdown(label="Enter an anime", choices=title_list, type="index") - - gallery = gr.Gallery(label="Recommendations", rows=1, columns=3, height="265") - - dropdown.change(fn=recommend, inputs=dropdown, outputs=gallery) - -space.launch() diff --git a/spaces/Eduger/webui/README.md b/spaces/Eduger/webui/README.md deleted file mode 100644 index 013d12c9f3a56698056ae1bdbbfb0ec009805237..0000000000000000000000000000000000000000 --- a/spaces/Eduger/webui/README.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Stable Diffusion Web UI -emoji: 🚧 -colorFrom: yellow -colorTo: yellow -sdk: gradio -sdk_version: 3.9 -app_file: app.py -pinned: false -duplicated_from: camenduru/webui ---- - -## Stable Diffusion Web UI -[https://github.com/AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) - -## Documentation -[https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki) - -## Models License -https://huggingface.co/spaces/CompVis/stable-diffusion-license \ No newline at end of file diff --git a/spaces/Epoching/DocumentQA/DiT_Extractor/dit_object_detection/ditod/backbone.py b/spaces/Epoching/DocumentQA/DiT_Extractor/dit_object_detection/ditod/backbone.py deleted file mode 100644 index 492a36178298f48028b5048581cdec50267cc54e..0000000000000000000000000000000000000000 --- a/spaces/Epoching/DocumentQA/DiT_Extractor/dit_object_detection/ditod/backbone.py +++ /dev/null @@ -1,156 +0,0 @@ -# -------------------------------------------------------------------------------- -# VIT: Multi-Path Vision Transformer for Dense Prediction -# Copyright (c) 2022 Electronics and Telecommunications Research Institute (ETRI). -# All Rights Reserved. -# Written by Youngwan Lee -# This source code is licensed(Dual License(GPL3.0 & Commercial)) under the license found in the -# LICENSE file in the root directory of this source tree. -# -------------------------------------------------------------------------------- -# References: -# timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm -# CoaT: https://github.com/mlpc-ucsd/CoaT -# -------------------------------------------------------------------------------- - - -import torch - -from detectron2.layers import ( - ShapeSpec, -) -from detectron2.modeling import Backbone, BACKBONE_REGISTRY, FPN -from detectron2.modeling.backbone.fpn import LastLevelP6P7, LastLevelMaxPool - -from .beit import beit_base_patch16, dit_base_patch16, dit_large_patch16, beit_large_patch16 -from .deit import deit_base_patch16, mae_base_patch16 - -__all__ = [ - "build_vit_fpn_backbone", -] - - -class VIT_Backbone(Backbone): - """ - Implement VIT backbone. - """ - - def __init__(self, name, out_features, drop_path, img_size, pos_type, model_kwargs): - super().__init__() - self._out_features = out_features - if 'base' in name: - self._out_feature_strides = {"layer3": 4, "layer5": 8, "layer7": 16, "layer11": 32} - else: - self._out_feature_strides = {"layer7": 4, "layer11": 8, "layer15": 16, "layer23": 32} - - if name == 'beit_base_patch16': - model_func = beit_base_patch16 - self._out_feature_channels = {"layer3": 768, "layer5": 768, "layer7": 768, "layer11": 768} - elif name == 'dit_base_patch16': - model_func = dit_base_patch16 - self._out_feature_channels = {"layer3": 768, "layer5": 768, "layer7": 768, "layer11": 768} - elif name == "deit_base_patch16": - model_func = deit_base_patch16 - self._out_feature_channels = {"layer3": 768, "layer5": 768, "layer7": 768, "layer11": 768} - elif name == "mae_base_patch16": - model_func = mae_base_patch16 - self._out_feature_channels = {"layer3": 768, "layer5": 768, "layer7": 768, "layer11": 768} - elif name == "dit_large_patch16": - model_func = dit_large_patch16 - self._out_feature_channels = {"layer7": 1024, "layer11": 1024, "layer15": 1024, "layer23": 1024} - elif name == "beit_large_patch16": - model_func = beit_large_patch16 - self._out_feature_channels = {"layer7": 1024, "layer11": 1024, "layer15": 1024, "layer23": 1024} - else: - raise ValueError("Unsupported VIT name yet.") - - if 'beit' in name or 'dit' in name: - if pos_type == "abs": - self.backbone = model_func(img_size=img_size, - out_features=out_features, - drop_path_rate=drop_path, - use_abs_pos_emb=True, - **model_kwargs) - elif pos_type == "shared_rel": - self.backbone = model_func(img_size=img_size, - out_features=out_features, - drop_path_rate=drop_path, - use_shared_rel_pos_bias=True, - **model_kwargs) - elif pos_type == "rel": - self.backbone = model_func(img_size=img_size, - out_features=out_features, - drop_path_rate=drop_path, - use_rel_pos_bias=True, - **model_kwargs) - else: - raise ValueError() - else: - self.backbone = model_func(img_size=img_size, - out_features=out_features, - drop_path_rate=drop_path, - **model_kwargs) - - def forward(self, x): - """ - Args: - x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``. - - Returns: - dict[str->Tensor]: names and the corresponding features - """ - assert x.dim() == 4, f"VIT takes an input of shape (N, C, H, W). Got {x.shape} instead!" - return self.backbone.forward_features(x) - - def output_shape(self): - return { - name: ShapeSpec( - channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] - ) - for name in self._out_features - } - - -def build_VIT_backbone(cfg): - """ - Create a VIT instance from config. - - Args: - cfg: a detectron2 CfgNode - - Returns: - A VIT backbone instance. - """ - # fmt: off - name = cfg.MODEL.VIT.NAME - out_features = cfg.MODEL.VIT.OUT_FEATURES - drop_path = cfg.MODEL.VIT.DROP_PATH - img_size = cfg.MODEL.VIT.IMG_SIZE - pos_type = cfg.MODEL.VIT.POS_TYPE - - model_kwargs = eval(str(cfg.MODEL.VIT.MODEL_KWARGS).replace("`", "")) - - return VIT_Backbone(name, out_features, drop_path, img_size, pos_type, model_kwargs) - - -@BACKBONE_REGISTRY.register() -def build_vit_fpn_backbone(cfg, input_shape: ShapeSpec): - """ - Create a VIT w/ FPN backbone. - - Args: - cfg: a detectron2 CfgNode - - Returns: - backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. - """ - bottom_up = build_VIT_backbone(cfg) - in_features = cfg.MODEL.FPN.IN_FEATURES - out_channels = cfg.MODEL.FPN.OUT_CHANNELS - backbone = FPN( - bottom_up=bottom_up, - in_features=in_features, - out_channels=out_channels, - norm=cfg.MODEL.FPN.NORM, - top_block=LastLevelMaxPool(), - fuse_type=cfg.MODEL.FPN.FUSE_TYPE, - ) - return backbone diff --git a/spaces/Geraldine/simple_contextual_chatbot/README.md b/spaces/Geraldine/simple_contextual_chatbot/README.md deleted file mode 100644 index 394d2ccae60f790ed6a80d420722a7869ce1ebfe..0000000000000000000000000000000000000000 --- a/spaces/Geraldine/simple_contextual_chatbot/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Simple Contextual Chatbot -emoji: 📚 -colorFrom: indigo -colorTo: purple -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/GipAdonimus/Real-Time-Voice-Cloning/demo_toolbox.py b/spaces/GipAdonimus/Real-Time-Voice-Cloning/demo_toolbox.py deleted file mode 100644 index ea30a29275965c7e2b815cd703e891a5ca53e97b..0000000000000000000000000000000000000000 --- a/spaces/GipAdonimus/Real-Time-Voice-Cloning/demo_toolbox.py +++ /dev/null @@ -1,43 +0,0 @@ -from pathlib import Path -from toolbox import Toolbox -from utils.argutils import print_args -from utils.modelutils import check_model_paths -import argparse -import os - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="Runs the toolbox", - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - - parser.add_argument("-d", "--datasets_root", type=Path, help= \ - "Path to the directory containing your datasets. See toolbox/__init__.py for a list of " - "supported datasets.", default=None) - parser.add_argument("-e", "--enc_models_dir", type=Path, default="encoder/saved_models", - help="Directory containing saved encoder models") - parser.add_argument("-s", "--syn_models_dir", type=Path, default="synthesizer/saved_models", - help="Directory containing saved synthesizer models") - parser.add_argument("-v", "--voc_models_dir", type=Path, default="vocoder/saved_models", - help="Directory containing saved vocoder models") - parser.add_argument("--cpu", action="store_true", help=\ - "If True, processing is done on CPU, even when a GPU is available.") - parser.add_argument("--seed", type=int, default=None, help=\ - "Optional random number seed value to make toolbox deterministic.") - parser.add_argument("--no_mp3_support", action="store_true", help=\ - "If True, no mp3 files are allowed.") - args = parser.parse_args() - print_args(args, parser) - - if args.cpu: - # Hide GPUs from Pytorch to force CPU processing - os.environ["CUDA_VISIBLE_DEVICES"] = "-1" - del args.cpu - - ## Remind the user to download pretrained models if needed - check_model_paths(encoder_path=args.enc_models_dir, synthesizer_path=args.syn_models_dir, - vocoder_path=args.voc_models_dir) - - # Launch the toolbox - Toolbox(**vars(args)) diff --git a/spaces/Gradio-Blocks/protGPT2_gradioFold/alphafold/alphafold/data/tools/hhsearch.py b/spaces/Gradio-Blocks/protGPT2_gradioFold/alphafold/alphafold/data/tools/hhsearch.py deleted file mode 100644 index fac137e0172f53e7c7ef943c5fa73dcb69f72246..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/protGPT2_gradioFold/alphafold/alphafold/data/tools/hhsearch.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright 2021 DeepMind Technologies Limited -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Library to run HHsearch from Python.""" - -import glob -import os -import subprocess -from typing import Sequence - -from absl import logging - -from alphafold.data.tools import utils -# Internal import (7716). - - -class HHSearch: - """Python wrapper of the HHsearch binary.""" - - def __init__(self, - *, - binary_path: str, - databases: Sequence[str], - maxseq: int = 1_000_000): - """Initializes the Python HHsearch wrapper. - - Args: - binary_path: The path to the HHsearch executable. - databases: A sequence of HHsearch database paths. This should be the - common prefix for the database files (i.e. up to but not including - _hhm.ffindex etc.) - maxseq: The maximum number of rows in an input alignment. Note that this - parameter is only supported in HHBlits version 3.1 and higher. - - Raises: - RuntimeError: If HHsearch binary not found within the path. - """ - self.binary_path = binary_path - self.databases = databases - self.maxseq = maxseq - - for database_path in self.databases: - if not glob.glob(database_path + '_*'): - logging.error('Could not find HHsearch database %s', database_path) - raise ValueError(f'Could not find HHsearch database {database_path}') - - def query(self, a3m: str) -> str: - """Queries the database using HHsearch using a given a3m.""" - with utils.tmpdir_manager(base_dir='/tmp') as query_tmp_dir: - input_path = os.path.join(query_tmp_dir, 'query.a3m') - hhr_path = os.path.join(query_tmp_dir, 'output.hhr') - with open(input_path, 'w') as f: - f.write(a3m) - - db_cmd = [] - for db_path in self.databases: - db_cmd.append('-d') - db_cmd.append(db_path) - cmd = [self.binary_path, - '-i', input_path, - '-o', hhr_path, - '-maxseq', str(self.maxseq) - ] + db_cmd - - logging.info('Launching subprocess "%s"', ' '.join(cmd)) - process = subprocess.Popen( - cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - with utils.timing('HHsearch query'): - stdout, stderr = process.communicate() - retcode = process.wait() - - if retcode: - # Stderr is truncated to prevent proto size errors in Beam. - raise RuntimeError( - 'HHSearch failed:\nstdout:\n%s\n\nstderr:\n%s\n' % ( - stdout.decode('utf-8'), stderr[:100_000].decode('utf-8'))) - - with open(hhr_path) as f: - hhr = f.read() - return hhr diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py deleted file mode 100644 index 0471fe86eb50b0fd644f10d77ab0ea7e150c95cf..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,36 +0,0 @@ -_base_ = './mask_rcnn_r50_fpn_1x_coco.py' -model = dict( - pretrained='open-mmlab://detectron2/resnet50_caffe', - backbone=dict(norm_cfg=dict(requires_grad=False), style='caffe')) -# use caffe img_norm -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/dnlnet/dnl_r101-d8_512x512_160k_ade20k.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/dnlnet/dnl_r101-d8_512x512_160k_ade20k.py deleted file mode 100644 index aca44e478b67d5a226681c099e64fe67d93cf39b..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/dnlnet/dnl_r101-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './dnl_r50-d8_512x512_160k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/Gradio-Themes/gmjk_qiangshou_gradio/README.md b/spaces/Gradio-Themes/gmjk_qiangshou_gradio/README.md deleted file mode 100644 index 0936303e0e5dd525c7a304962aaf2d2b97848366..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Themes/gmjk_qiangshou_gradio/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Gmjk Qiangshou Gradio -emoji: 🐢 -colorFrom: red -colorTo: yellow -sdk: gradio -sdk_version: 3.40.1 -app_file: app.py -pinned: false -license: other ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/GroveStreet/GTA_SOVITS/vdecoder/hifigan/nvSTFT.py b/spaces/GroveStreet/GTA_SOVITS/vdecoder/hifigan/nvSTFT.py deleted file mode 100644 index 88597d62a505715091f9ba62d38bf0a85a31b95a..0000000000000000000000000000000000000000 --- a/spaces/GroveStreet/GTA_SOVITS/vdecoder/hifigan/nvSTFT.py +++ /dev/null @@ -1,111 +0,0 @@ -import math -import os -os.environ["LRU_CACHE_CAPACITY"] = "3" -import random -import torch -import torch.utils.data -import numpy as np -import librosa -from librosa.util import normalize -from librosa.filters import mel as librosa_mel_fn -from scipy.io.wavfile import read -import soundfile as sf - -def load_wav_to_torch(full_path, target_sr=None, return_empty_on_exception=False): - sampling_rate = None - try: - data, sampling_rate = sf.read(full_path, always_2d=True)# than soundfile. - except Exception as ex: - print(f"'{full_path}' failed to load.\nException:") - print(ex) - if return_empty_on_exception: - return [], sampling_rate or target_sr or 32000 - else: - raise Exception(ex) - - if len(data.shape) > 1: - data = data[:, 0] - assert len(data) > 2# check duration of audio file is > 2 samples (because otherwise the slice operation was on the wrong dimension) - - if np.issubdtype(data.dtype, np.integer): # if audio data is type int - max_mag = -np.iinfo(data.dtype).min # maximum magnitude = min possible value of intXX - else: # if audio data is type fp32 - max_mag = max(np.amax(data), -np.amin(data)) - max_mag = (2**31)+1 if max_mag > (2**15) else ((2**15)+1 if max_mag > 1.01 else 1.0) # data should be either 16-bit INT, 32-bit INT or [-1 to 1] float32 - - data = torch.FloatTensor(data.astype(np.float32))/max_mag - - if (torch.isinf(data) | torch.isnan(data)).any() and return_empty_on_exception:# resample will crash with inf/NaN inputs. return_empty_on_exception will return empty arr instead of except - return [], sampling_rate or target_sr or 32000 - if target_sr is not None and sampling_rate != target_sr: - data = torch.from_numpy(librosa.core.resample(data.numpy(), orig_sr=sampling_rate, target_sr=target_sr)) - sampling_rate = target_sr - - return data, sampling_rate - -def dynamic_range_compression(x, C=1, clip_val=1e-5): - return np.log(np.clip(x, a_min=clip_val, a_max=None) * C) - -def dynamic_range_decompression(x, C=1): - return np.exp(x) / C - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - return torch.log(torch.clamp(x, min=clip_val) * C) - -def dynamic_range_decompression_torch(x, C=1): - return torch.exp(x) / C - -class STFT(): - def __init__(self, sr=22050, n_mels=80, n_fft=1024, win_size=1024, hop_length=256, fmin=20, fmax=11025, clip_val=1e-5): - self.target_sr = sr - - self.n_mels = n_mels - self.n_fft = n_fft - self.win_size = win_size - self.hop_length = hop_length - self.fmin = fmin - self.fmax = fmax - self.clip_val = clip_val - self.mel_basis = {} - self.hann_window = {} - - def get_mel(self, y, center=False): - sampling_rate = self.target_sr - n_mels = self.n_mels - n_fft = self.n_fft - win_size = self.win_size - hop_length = self.hop_length - fmin = self.fmin - fmax = self.fmax - clip_val = self.clip_val - - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - if fmax not in self.mel_basis: - mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax) - self.mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device) - self.hann_window[str(y.device)] = torch.hann_window(self.win_size).to(y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_length)/2), int((n_fft-hop_length)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_length, win_length=win_size, window=self.hann_window[str(y.device)], - center=center, pad_mode='reflect', normalized=False, onesided=True) - # print(111,spec) - spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9)) - # print(222,spec) - spec = torch.matmul(self.mel_basis[str(fmax)+'_'+str(y.device)], spec) - # print(333,spec) - spec = dynamic_range_compression_torch(spec, clip_val=clip_val) - # print(444,spec) - return spec - - def __call__(self, audiopath): - audio, sr = load_wav_to_torch(audiopath, target_sr=self.target_sr) - spect = self.get_mel(audio.unsqueeze(0)).squeeze(0) - return spect - -stft = STFT() diff --git a/spaces/GroveStreet/GTA_SOVITS/vencoder/hubert/hubert_model.py b/spaces/GroveStreet/GTA_SOVITS/vencoder/hubert/hubert_model.py deleted file mode 100644 index 7fb642d89b07ca60792debab18e3454f52d8f357..0000000000000000000000000000000000000000 --- a/spaces/GroveStreet/GTA_SOVITS/vencoder/hubert/hubert_model.py +++ /dev/null @@ -1,222 +0,0 @@ -import copy -import random -from typing import Optional, Tuple - -import torch -import torch.nn as nn -import torch.nn.functional as t_func -from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present - - -class Hubert(nn.Module): - def __init__(self, num_label_embeddings: int = 100, mask: bool = True): - super().__init__() - self._mask = mask - self.feature_extractor = FeatureExtractor() - self.feature_projection = FeatureProjection() - self.positional_embedding = PositionalConvEmbedding() - self.norm = nn.LayerNorm(768) - self.dropout = nn.Dropout(0.1) - self.encoder = TransformerEncoder( - nn.TransformerEncoderLayer( - 768, 12, 3072, activation="gelu", batch_first=True - ), - 12, - ) - self.proj = nn.Linear(768, 256) - - self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_()) - self.label_embedding = nn.Embedding(num_label_embeddings, 256) - - def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - mask = None - if self.training and self._mask: - mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2) - x[mask] = self.masked_spec_embed.to(x.dtype) - return x, mask - - def encode( - self, x: torch.Tensor, layer: Optional[int] = None - ) -> Tuple[torch.Tensor, torch.Tensor]: - x = self.feature_extractor(x) - x = self.feature_projection(x.transpose(1, 2)) - x, mask = self.mask(x) - x = x + self.positional_embedding(x) - x = self.dropout(self.norm(x)) - x = self.encoder(x, output_layer=layer) - return x, mask - - def logits(self, x: torch.Tensor) -> torch.Tensor: - logits = torch.cosine_similarity( - x.unsqueeze(2), - self.label_embedding.weight.unsqueeze(0).unsqueeze(0), - dim=-1, - ) - return logits / 0.1 - - def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - x, mask = self.encode(x) - x = self.proj(x) - logits = self.logits(x) - return logits, mask - - -class HubertSoft(Hubert): - def __init__(self): - super().__init__() - - @torch.inference_mode() - def units(self, wav: torch.Tensor) -> torch.Tensor: - wav = t_func.pad(wav, ((400 - 320) // 2, (400 - 320) // 2)) - x, _ = self.encode(wav) - return self.proj(x) - - -class FeatureExtractor(nn.Module): - def __init__(self): - super().__init__() - self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False) - self.norm0 = nn.GroupNorm(512, 512) - self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False) - self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = t_func.gelu(self.norm0(self.conv0(x))) - x = t_func.gelu(self.conv1(x)) - x = t_func.gelu(self.conv2(x)) - x = t_func.gelu(self.conv3(x)) - x = t_func.gelu(self.conv4(x)) - x = t_func.gelu(self.conv5(x)) - x = t_func.gelu(self.conv6(x)) - return x - - -class FeatureProjection(nn.Module): - def __init__(self): - super().__init__() - self.norm = nn.LayerNorm(512) - self.projection = nn.Linear(512, 768) - self.dropout = nn.Dropout(0.1) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.norm(x) - x = self.projection(x) - x = self.dropout(x) - return x - - -class PositionalConvEmbedding(nn.Module): - def __init__(self): - super().__init__() - self.conv = nn.Conv1d( - 768, - 768, - kernel_size=128, - padding=128 // 2, - groups=16, - ) - self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.conv(x.transpose(1, 2)) - x = t_func.gelu(x[:, :, :-1]) - return x.transpose(1, 2) - - -class TransformerEncoder(nn.Module): - def __init__( - self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int - ) -> None: - super(TransformerEncoder, self).__init__() - self.layers = nn.ModuleList( - [copy.deepcopy(encoder_layer) for _ in range(num_layers)] - ) - self.num_layers = num_layers - - def forward( - self, - src: torch.Tensor, - mask: torch.Tensor = None, - src_key_padding_mask: torch.Tensor = None, - output_layer: Optional[int] = None, - ) -> torch.Tensor: - output = src - for layer in self.layers[:output_layer]: - output = layer( - output, src_mask=mask, src_key_padding_mask=src_key_padding_mask - ) - return output - - -def _compute_mask( - shape: Tuple[int, int], - mask_prob: float, - mask_length: int, - device: torch.device, - min_masks: int = 0, -) -> torch.Tensor: - batch_size, sequence_length = shape - - if mask_length < 1: - raise ValueError("`mask_length` has to be bigger than 0.") - - if mask_length > sequence_length: - raise ValueError( - f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`" - ) - - # compute number of masked spans in batch - num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random()) - num_masked_spans = max(num_masked_spans, min_masks) - - # make sure num masked indices <= sequence_length - if num_masked_spans * mask_length > sequence_length: - num_masked_spans = sequence_length // mask_length - - # SpecAugment mask to fill - mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool) - - # uniform distribution to sample from, make sure that offset samples are < sequence_length - uniform_dist = torch.ones( - (batch_size, sequence_length - (mask_length - 1)), device=device - ) - - # get random indices to mask - mask_indices = torch.multinomial(uniform_dist, num_masked_spans) - - # expand masked indices to masked spans - mask_indices = ( - mask_indices.unsqueeze(dim=-1) - .expand((batch_size, num_masked_spans, mask_length)) - .reshape(batch_size, num_masked_spans * mask_length) - ) - offsets = ( - torch.arange(mask_length, device=device)[None, None, :] - .expand((batch_size, num_masked_spans, mask_length)) - .reshape(batch_size, num_masked_spans * mask_length) - ) - mask_idxs = mask_indices + offsets - - # scatter indices to mask - mask = mask.scatter(1, mask_idxs, True) - - return mask - - -def hubert_soft( - path: str, -) -> HubertSoft: - r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`. - Args: - path (str): path of a pretrained model - """ - hubert = HubertSoft() - checkpoint = torch.load(path) - consume_prefix_in_state_dict_if_present(checkpoint, "module.") - hubert.load_state_dict(checkpoint) - hubert.eval() - return hubert diff --git a/spaces/Hallucinate/demo/ldm/models/diffusion/classifier.py b/spaces/Hallucinate/demo/ldm/models/diffusion/classifier.py deleted file mode 100644 index 67e98b9d8ffb96a150b517497ace0a242d7163ef..0000000000000000000000000000000000000000 --- a/spaces/Hallucinate/demo/ldm/models/diffusion/classifier.py +++ /dev/null @@ -1,267 +0,0 @@ -import os -import torch -import pytorch_lightning as pl -from omegaconf import OmegaConf -from torch.nn import functional as F -from torch.optim import AdamW -from torch.optim.lr_scheduler import LambdaLR -from copy import deepcopy -from einops import rearrange -from glob import glob -from natsort import natsorted - -from ldm.modules.diffusionmodules.openaimodel import EncoderUNetModel, UNetModel -from ldm.util import log_txt_as_img, default, ismap, instantiate_from_config - -__models__ = { - 'class_label': EncoderUNetModel, - 'segmentation': UNetModel -} - - -def disabled_train(self, mode=True): - """Overwrite model.train with this function to make sure train/eval mode - does not change anymore.""" - return self - - -class NoisyLatentImageClassifier(pl.LightningModule): - - def __init__(self, - diffusion_path, - num_classes, - ckpt_path=None, - pool='attention', - label_key=None, - diffusion_ckpt_path=None, - scheduler_config=None, - weight_decay=1.e-2, - log_steps=10, - monitor='val/loss', - *args, - **kwargs): - super().__init__(*args, **kwargs) - self.num_classes = num_classes - # get latest config of diffusion model - diffusion_config = natsorted(glob(os.path.join(diffusion_path, 'configs', '*-project.yaml')))[-1] - self.diffusion_config = OmegaConf.load(diffusion_config).model - self.diffusion_config.params.ckpt_path = diffusion_ckpt_path - self.load_diffusion() - - self.monitor = monitor - self.numd = self.diffusion_model.first_stage_model.encoder.num_resolutions - 1 - self.log_time_interval = self.diffusion_model.num_timesteps // log_steps - self.log_steps = log_steps - - self.label_key = label_key if not hasattr(self.diffusion_model, 'cond_stage_key') \ - else self.diffusion_model.cond_stage_key - - assert self.label_key is not None, 'label_key neither in diffusion model nor in model.params' - - if self.label_key not in __models__: - raise NotImplementedError() - - self.load_classifier(ckpt_path, pool) - - self.scheduler_config = scheduler_config - self.use_scheduler = self.scheduler_config is not None - self.weight_decay = weight_decay - - def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): - sd = torch.load(path, map_location="cpu") - if "state_dict" in list(sd.keys()): - sd = sd["state_dict"] - keys = list(sd.keys()) - for k in keys: - for ik in ignore_keys: - if k.startswith(ik): - print("Deleting key {} from state_dict.".format(k)) - del sd[k] - missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( - sd, strict=False) - print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") - if len(missing) > 0: - print(f"Missing Keys: {missing}") - if len(unexpected) > 0: - print(f"Unexpected Keys: {unexpected}") - - def load_diffusion(self): - model = instantiate_from_config(self.diffusion_config) - self.diffusion_model = model.eval() - self.diffusion_model.train = disabled_train - for param in self.diffusion_model.parameters(): - param.requires_grad = False - - def load_classifier(self, ckpt_path, pool): - model_config = deepcopy(self.diffusion_config.params.unet_config.params) - model_config.in_channels = self.diffusion_config.params.unet_config.params.out_channels - model_config.out_channels = self.num_classes - if self.label_key == 'class_label': - model_config.pool = pool - - self.model = __models__[self.label_key](**model_config) - if ckpt_path is not None: - print('#####################################################################') - print(f'load from ckpt "{ckpt_path}"') - print('#####################################################################') - self.init_from_ckpt(ckpt_path) - - @torch.no_grad() - def get_x_noisy(self, x, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x)) - continuous_sqrt_alpha_cumprod = None - if self.diffusion_model.use_continuous_noise: - continuous_sqrt_alpha_cumprod = self.diffusion_model.sample_continuous_noise_level(x.shape[0], t + 1) - # todo: make sure t+1 is correct here - - return self.diffusion_model.q_sample(x_start=x, t=t, noise=noise, - continuous_sqrt_alpha_cumprod=continuous_sqrt_alpha_cumprod) - - def forward(self, x_noisy, t, *args, **kwargs): - return self.model(x_noisy, t) - - @torch.no_grad() - def get_input(self, batch, k): - x = batch[k] - if len(x.shape) == 3: - x = x[..., None] - x = rearrange(x, 'b h w c -> b c h w') - x = x.to(memory_format=torch.contiguous_format).float() - return x - - @torch.no_grad() - def get_conditioning(self, batch, k=None): - if k is None: - k = self.label_key - assert k is not None, 'Needs to provide label key' - - targets = batch[k].to(self.device) - - if self.label_key == 'segmentation': - targets = rearrange(targets, 'b h w c -> b c h w') - for down in range(self.numd): - h, w = targets.shape[-2:] - targets = F.interpolate(targets, size=(h // 2, w // 2), mode='nearest') - - # targets = rearrange(targets,'b c h w -> b h w c') - - return targets - - def compute_top_k(self, logits, labels, k, reduction="mean"): - _, top_ks = torch.topk(logits, k, dim=1) - if reduction == "mean": - return (top_ks == labels[:, None]).float().sum(dim=-1).mean().item() - elif reduction == "none": - return (top_ks == labels[:, None]).float().sum(dim=-1) - - def on_train_epoch_start(self): - # save some memory - self.diffusion_model.model.to('cpu') - - @torch.no_grad() - def write_logs(self, loss, logits, targets): - log_prefix = 'train' if self.training else 'val' - log = {} - log[f"{log_prefix}/loss"] = loss.mean() - log[f"{log_prefix}/acc@1"] = self.compute_top_k( - logits, targets, k=1, reduction="mean" - ) - log[f"{log_prefix}/acc@5"] = self.compute_top_k( - logits, targets, k=5, reduction="mean" - ) - - self.log_dict(log, prog_bar=False, logger=True, on_step=self.training, on_epoch=True) - self.log('loss', log[f"{log_prefix}/loss"], prog_bar=True, logger=False) - self.log('global_step', self.global_step, logger=False, on_epoch=False, prog_bar=True) - lr = self.optimizers().param_groups[0]['lr'] - self.log('lr_abs', lr, on_step=True, logger=True, on_epoch=False, prog_bar=True) - - def shared_step(self, batch, t=None): - x, *_ = self.diffusion_model.get_input(batch, k=self.diffusion_model.first_stage_key) - targets = self.get_conditioning(batch) - if targets.dim() == 4: - targets = targets.argmax(dim=1) - if t is None: - t = torch.randint(0, self.diffusion_model.num_timesteps, (x.shape[0],), device=self.device).long() - else: - t = torch.full(size=(x.shape[0],), fill_value=t, device=self.device).long() - x_noisy = self.get_x_noisy(x, t) - logits = self(x_noisy, t) - - loss = F.cross_entropy(logits, targets, reduction='none') - - self.write_logs(loss.detach(), logits.detach(), targets.detach()) - - loss = loss.mean() - return loss, logits, x_noisy, targets - - def training_step(self, batch, batch_idx): - loss, *_ = self.shared_step(batch) - return loss - - def reset_noise_accs(self): - self.noisy_acc = {t: {'acc@1': [], 'acc@5': []} for t in - range(0, self.diffusion_model.num_timesteps, self.diffusion_model.log_every_t)} - - def on_validation_start(self): - self.reset_noise_accs() - - @torch.no_grad() - def validation_step(self, batch, batch_idx): - loss, *_ = self.shared_step(batch) - - for t in self.noisy_acc: - _, logits, _, targets = self.shared_step(batch, t) - self.noisy_acc[t]['acc@1'].append(self.compute_top_k(logits, targets, k=1, reduction='mean')) - self.noisy_acc[t]['acc@5'].append(self.compute_top_k(logits, targets, k=5, reduction='mean')) - - return loss - - def configure_optimizers(self): - optimizer = AdamW(self.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay) - - if self.use_scheduler: - scheduler = instantiate_from_config(self.scheduler_config) - - print("Setting up LambdaLR scheduler...") - scheduler = [ - { - 'scheduler': LambdaLR(optimizer, lr_lambda=scheduler.schedule), - 'interval': 'step', - 'frequency': 1 - }] - return [optimizer], scheduler - - return optimizer - - @torch.no_grad() - def log_images(self, batch, N=8, *args, **kwargs): - log = dict() - x = self.get_input(batch, self.diffusion_model.first_stage_key) - log['inputs'] = x - - y = self.get_conditioning(batch) - - if self.label_key == 'class_label': - y = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) - log['labels'] = y - - if ismap(y): - log['labels'] = self.diffusion_model.to_rgb(y) - - for step in range(self.log_steps): - current_time = step * self.log_time_interval - - _, logits, x_noisy, _ = self.shared_step(batch, t=current_time) - - log[f'inputs@t{current_time}'] = x_noisy - - pred = F.one_hot(logits.argmax(dim=1), num_classes=self.num_classes) - pred = rearrange(pred, 'b h w c -> b c h w') - - log[f'pred@t{current_time}'] = self.diffusion_model.to_rgb(pred) - - for key in log: - log[key] = log[key][:N] - - return log diff --git a/spaces/HaloMaster/chinesesummary/fengshen/examples/classification/finetune_classification_zen1-base_afqmc.sh b/spaces/HaloMaster/chinesesummary/fengshen/examples/classification/finetune_classification_zen1-base_afqmc.sh deleted file mode 100644 index 845e93093cc6390db2c332c22e860ff88688a657..0000000000000000000000000000000000000000 --- a/spaces/HaloMaster/chinesesummary/fengshen/examples/classification/finetune_classification_zen1-base_afqmc.sh +++ /dev/null @@ -1,151 +0,0 @@ -#!/bin/bash -#SBATCH --job-name=afqmc-bart-base # create a short name for your job -#SBATCH --nodes=1 # node count -#SBATCH --ntasks=2 # total number of tasks across all nodes -#SBATCH --cpus-per-task=30 # cpu-cores per task (>1 if multi-threaded tasks) -#SBATCH --gres=gpu:2 # number of gpus per node -#SBATCH --mail-type=ALL # send email when job begins, ends or failed etc. -#SBATCH -o %x-%j.log # output and error file name (%x=job name, %j=job id) - - -export TORCH_EXTENSIONS_DIR=/cognitive_comp/ganruyi/tmp/torch_extendsions - -MODEL_NAME=fengshen-zen1 - -TASK=afqmc -TEXTA_NAME=sentence1 -TEXTB_NAME=sentence2 -LABEL_NAME=label -ID_NAME=id - - -BATCH_SIZE=8 -VAL_BATCH_SIZE=32 -ZERO_STAGE=1 -STRATEGY=deepspeed_stage_${ZERO_STAGE} - -ROOT_DIR=/cognitive_comp/ganruyi/experiments/classification_finetune/${MODEL_NAME}_${TASK} -if [ ! -d ${ROOT_DIR} ];then - mkdir -p ${ROOT_DIR} - echo ${ROOT_DIR} created!!!!!!!!!!!!!! -else - echo ${ROOT_DIR} exist!!!!!!!!!!!!!!! -fi - -DATA_DIR=/cognitive_comp/yangping/data/ChineseCLUE_DATA/${TASK}_public/ -PRETRAINED_MODEL_PATH=/cognitive_comp/ganruyi/hf_models/zen/ZEN_pretrain_base_v0.1.0 - -CHECKPOINT_PATH=${ROOT_DIR}/ckpt/ -OUTPUT_PATH=${ROOT_DIR}/predict.json - - -config_json="${ROOT_DIR}/ds_config.json" -# Deepspeed figures out GAS dynamically from dynamic GBS via set_train_batch_size() -# reduce_bucket_size: hidden_size*hidden_size -# stage3_prefetch_bucket_size: 0.9 * hidden_size * hidden_size -# stage3_param_persistence_threshold: 10 * hidden_size - -cat < $config_json -{ - "train_micro_batch_size_per_gpu": $BATCH_SIZE, - "steps_per_print": 100, - "gradient_clipping": 0.1, - "zero_optimization": { - "stage": ${ZERO_STAGE} - }, - "optimizer": { - "type": "Adam", - "params": { - "lr": 1e-7, - "eps": 1e-12, - "weight_decay": 1e-2 - } - }, - "scheduler": { - "type": "WarmupLR", - "params":{ - "warmup_min_lr": 1e-5, - "warmup_max_lr": 1e-4, - "warmup_num_steps": 400, - "warmup_type": "linear" - } - }, - "zero_allow_untested_optimizer": false, - "fp16": { - "enabled": false, - "loss_scale": 0, - "loss_scale_window": 1000, - "hysteresis": 2, - "min_loss_scale": 1 - }, - "activation_checkpointing": { - "partition_activations": false, - "contiguous_memory_optimization": false - }, - "wall_clock_breakdown": false -} -EOT - -export PL_DEEPSPEED_CONFIG_PATH=$config_json - - -DATA_ARGS="\ - --data_dir $DATA_DIR \ - --train_data train.json \ - --valid_data dev.json \ - --test_data test.json \ - --train_batchsize $BATCH_SIZE \ - --valid_batchsize $VAL_BATCH_SIZE \ - --max_length 64 \ - --texta_name $TEXTA_NAME \ - --textb_name $TEXTB_NAME \ - --label_name $LABEL_NAME \ - --id_name $ID_NAME \ - " - -MODEL_ARGS="\ - --learning_rate 1e-5 \ - --weight_decay 1e-2 \ - --warmup 0.01 \ - --num_labels 2 \ - " - -MODEL_CHECKPOINT_ARGS="\ - --monitor val_acc \ - --save_top_k 3 \ - --mode max \ - --every_n_train_steps 200 \ - --save_weights_only True \ - --dirpath $CHECKPOINT_PATH \ - --filename model-{epoch:02d}-{val_acc:.4f} \ - " - - -TRAINER_ARGS="\ - --max_epochs 10 \ - --gpus 1 \ - --num_nodes 1 \ - --strategy $STRATEGY \ - --gradient_clip_val 1.0 \ - --check_val_every_n_epoch 1 \ - --val_check_interval 1.0 \ - --default_root_dir $ROOT_DIR \ - " - -options=" \ - --pretrained_model_path $PRETRAINED_MODEL_PATH \ - --output_save_path $OUTPUT_PATH \ - $DATA_ARGS \ - $MODEL_ARGS \ - $MODEL_CHECKPOINT_ARGS \ - $TRAINER_ARGS \ - " - -SINGULARITY_PATH=/cognitive_comp/ganruyi/pytorch21_06_py3_docker_image_v2.sif -SCRIPT_PATH=/cognitive_comp/ganruyi/Fengshenbang-LM/fengshen/examples/classification/finetune_classification.py - -# python3 $SCRIPT_PATH $options -source activate base -# srun singularity exec --nv -B /cognitive_comp/:/cognitive_comp/ $SINGULARITY_PATH /home/ganruyi/anaconda3/bin/python $SCRIPT_PATH $options -/home/ganruyi/anaconda3/bin/python $SCRIPT_PATH $options - diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/unsupervised/scripts/copy_labels.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/unsupervised/scripts/copy_labels.py deleted file mode 100644 index 989868388eefccc37c82d7602f709632035c7aa1..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/unsupervised/scripts/copy_labels.py +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env python3 -u -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import sys - -for idx, line in enumerate(sys.stdin): - print(f"utt{idx:010d} {line}", end="") diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/nat/__init__.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/nat/__init__.py deleted file mode 100644 index 05fe822487c3bcde8346648d5826f1669c6bc1ca..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/nat/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -"""isort:skip_file""" - -from .fairseq_nat_model import * -from .nonautoregressive_transformer import * -from .nat_crf_transformer import * -from .iterative_nonautoregressive_transformer import * -from .cmlm_transformer import * -from .levenshtein_transformer import * -from .insertion_transformer import * diff --git a/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/LICENSE.md b/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/LICENSE.md deleted file mode 100644 index 5fd2e54913fd05b69de2874ec8f9a10c7f4e8d3f..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/LICENSE.md +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2022 Open-Speech-EkStep - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/spaces/Hasani/Specific_Object_Recognition_in_the_Wild/SuperGluePretrainedNetwork/models/superglue.py b/spaces/Hasani/Specific_Object_Recognition_in_the_Wild/SuperGluePretrainedNetwork/models/superglue.py deleted file mode 100644 index 5a89b0348075bcb918eab123bc988c7102137a3d..0000000000000000000000000000000000000000 --- a/spaces/Hasani/Specific_Object_Recognition_in_the_Wild/SuperGluePretrainedNetwork/models/superglue.py +++ /dev/null @@ -1,285 +0,0 @@ -# %BANNER_BEGIN% -# --------------------------------------------------------------------- -# %COPYRIGHT_BEGIN% -# -# Magic Leap, Inc. ("COMPANY") CONFIDENTIAL -# -# Unpublished Copyright (c) 2020 -# Magic Leap, Inc., All Rights Reserved. -# -# NOTICE: All information contained herein is, and remains the property -# of COMPANY. The intellectual and technical concepts contained herein -# are proprietary to COMPANY and may be covered by U.S. and Foreign -# Patents, patents in process, and are protected by trade secret or -# copyright law. Dissemination of this information or reproduction of -# this material is strictly forbidden unless prior written permission is -# obtained from COMPANY. Access to the source code contained herein is -# hereby forbidden to anyone except current COMPANY employees, managers -# or contractors who have executed Confidentiality and Non-disclosure -# agreements explicitly covering such access. -# -# The copyright notice above does not evidence any actual or intended -# publication or disclosure of this source code, which includes -# information that is confidential and/or proprietary, and is a trade -# secret, of COMPANY. ANY REPRODUCTION, MODIFICATION, DISTRIBUTION, -# PUBLIC PERFORMANCE, OR PUBLIC DISPLAY OF OR THROUGH USE OF THIS -# SOURCE CODE WITHOUT THE EXPRESS WRITTEN CONSENT OF COMPANY IS -# STRICTLY PROHIBITED, AND IN VIOLATION OF APPLICABLE LAWS AND -# INTERNATIONAL TREATIES. THE RECEIPT OR POSSESSION OF THIS SOURCE -# CODE AND/OR RELATED INFORMATION DOES NOT CONVEY OR IMPLY ANY RIGHTS -# TO REPRODUCE, DISCLOSE OR DISTRIBUTE ITS CONTENTS, OR TO MANUFACTURE, -# USE, OR SELL ANYTHING THAT IT MAY DESCRIBE, IN WHOLE OR IN PART. -# -# %COPYRIGHT_END% -# ---------------------------------------------------------------------- -# %AUTHORS_BEGIN% -# -# Originating Authors: Paul-Edouard Sarlin -# -# %AUTHORS_END% -# --------------------------------------------------------------------*/ -# %BANNER_END% - -from copy import deepcopy -from pathlib import Path -from typing import List, Tuple - -import torch -from torch import nn - - -def MLP(channels: List[int], do_bn: bool = True) -> nn.Module: - """ Multi-layer perceptron """ - n = len(channels) - layers = [] - for i in range(1, n): - layers.append( - nn.Conv1d(channels[i - 1], channels[i], kernel_size=1, bias=True)) - if i < (n-1): - if do_bn: - layers.append(nn.BatchNorm1d(channels[i])) - layers.append(nn.ReLU()) - return nn.Sequential(*layers) - - -def normalize_keypoints(kpts, image_shape): - """ Normalize keypoints locations based on image image_shape""" - _, _, height, width = image_shape - one = kpts.new_tensor(1) - size = torch.stack([one*width, one*height])[None] - center = size / 2 - scaling = size.max(1, keepdim=True).values * 0.7 - return (kpts - center[:, None, :]) / scaling[:, None, :] - - -class KeypointEncoder(nn.Module): - """ Joint encoding of visual appearance and location using MLPs""" - def __init__(self, feature_dim: int, layers: List[int]) -> None: - super().__init__() - self.encoder = MLP([3] + layers + [feature_dim]) - nn.init.constant_(self.encoder[-1].bias, 0.0) - - def forward(self, kpts, scores): - inputs = [kpts.transpose(1, 2), scores.unsqueeze(1)] - return self.encoder(torch.cat(inputs, dim=1)) - - -def attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor) -> Tuple[torch.Tensor,torch.Tensor]: - dim = query.shape[1] - scores = torch.einsum('bdhn,bdhm->bhnm', query, key) / dim**.5 - prob = torch.nn.functional.softmax(scores, dim=-1) - return torch.einsum('bhnm,bdhm->bdhn', prob, value), prob - - -class MultiHeadedAttention(nn.Module): - """ Multi-head attention to increase model expressivitiy """ - def __init__(self, num_heads: int, d_model: int): - super().__init__() - assert d_model % num_heads == 0 - self.dim = d_model // num_heads - self.num_heads = num_heads - self.merge = nn.Conv1d(d_model, d_model, kernel_size=1) - self.proj = nn.ModuleList([deepcopy(self.merge) for _ in range(3)]) - - def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor) -> torch.Tensor: - batch_dim = query.size(0) - query, key, value = [l(x).view(batch_dim, self.dim, self.num_heads, -1) - for l, x in zip(self.proj, (query, key, value))] - x, _ = attention(query, key, value) - return self.merge(x.contiguous().view(batch_dim, self.dim*self.num_heads, -1)) - - -class AttentionalPropagation(nn.Module): - def __init__(self, feature_dim: int, num_heads: int): - super().__init__() - self.attn = MultiHeadedAttention(num_heads, feature_dim) - self.mlp = MLP([feature_dim*2, feature_dim*2, feature_dim]) - nn.init.constant_(self.mlp[-1].bias, 0.0) - - def forward(self, x: torch.Tensor, source: torch.Tensor) -> torch.Tensor: - message = self.attn(x, source, source) - return self.mlp(torch.cat([x, message], dim=1)) - - -class AttentionalGNN(nn.Module): - def __init__(self, feature_dim: int, layer_names: List[str]) -> None: - super().__init__() - self.layers = nn.ModuleList([ - AttentionalPropagation(feature_dim, 4) - for _ in range(len(layer_names))]) - self.names = layer_names - - def forward(self, desc0: torch.Tensor, desc1: torch.Tensor) -> Tuple[torch.Tensor,torch.Tensor]: - for layer, name in zip(self.layers, self.names): - if name == 'cross': - src0, src1 = desc1, desc0 - else: # if name == 'self': - src0, src1 = desc0, desc1 - delta0, delta1 = layer(desc0, src0), layer(desc1, src1) - desc0, desc1 = (desc0 + delta0), (desc1 + delta1) - return desc0, desc1 - - -def log_sinkhorn_iterations(Z: torch.Tensor, log_mu: torch.Tensor, log_nu: torch.Tensor, iters: int) -> torch.Tensor: - """ Perform Sinkhorn Normalization in Log-space for stability""" - u, v = torch.zeros_like(log_mu), torch.zeros_like(log_nu) - for _ in range(iters): - u = log_mu - torch.logsumexp(Z + v.unsqueeze(1), dim=2) - v = log_nu - torch.logsumexp(Z + u.unsqueeze(2), dim=1) - return Z + u.unsqueeze(2) + v.unsqueeze(1) - - -def log_optimal_transport(scores: torch.Tensor, alpha: torch.Tensor, iters: int) -> torch.Tensor: - """ Perform Differentiable Optimal Transport in Log-space for stability""" - b, m, n = scores.shape - one = scores.new_tensor(1) - ms, ns = (m*one).to(scores), (n*one).to(scores) - - bins0 = alpha.expand(b, m, 1) - bins1 = alpha.expand(b, 1, n) - alpha = alpha.expand(b, 1, 1) - - couplings = torch.cat([torch.cat([scores, bins0], -1), - torch.cat([bins1, alpha], -1)], 1) - - norm = - (ms + ns).log() - log_mu = torch.cat([norm.expand(m), ns.log()[None] + norm]) - log_nu = torch.cat([norm.expand(n), ms.log()[None] + norm]) - log_mu, log_nu = log_mu[None].expand(b, -1), log_nu[None].expand(b, -1) - - Z = log_sinkhorn_iterations(couplings, log_mu, log_nu, iters) - Z = Z - norm # multiply probabilities by M+N - return Z - - -def arange_like(x, dim: int): - return x.new_ones(x.shape[dim]).cumsum(0) - 1 # traceable in 1.1 - - -class SuperGlue(nn.Module): - """SuperGlue feature matching middle-end - - Given two sets of keypoints and locations, we determine the - correspondences by: - 1. Keypoint Encoding (normalization + visual feature and location fusion) - 2. Graph Neural Network with multiple self and cross-attention layers - 3. Final projection layer - 4. Optimal Transport Layer (a differentiable Hungarian matching algorithm) - 5. Thresholding matrix based on mutual exclusivity and a match_threshold - - The correspondence ids use -1 to indicate non-matching points. - - Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew - Rabinovich. SuperGlue: Learning Feature Matching with Graph Neural - Networks. In CVPR, 2020. https://arxiv.org/abs/1911.11763 - - """ - default_config = { - 'descriptor_dim': 256, - 'weights': 'indoor', - 'keypoint_encoder': [32, 64, 128, 256], - 'GNN_layers': ['self', 'cross'] * 9, - 'sinkhorn_iterations': 100, - 'match_threshold': 0.2, - } - - def __init__(self, config): - super().__init__() - self.config = {**self.default_config, **config} - - self.kenc = KeypointEncoder( - self.config['descriptor_dim'], self.config['keypoint_encoder']) - - self.gnn = AttentionalGNN( - feature_dim=self.config['descriptor_dim'], layer_names=self.config['GNN_layers']) - - self.final_proj = nn.Conv1d( - self.config['descriptor_dim'], self.config['descriptor_dim'], - kernel_size=1, bias=True) - - bin_score = torch.nn.Parameter(torch.tensor(1.)) - self.register_parameter('bin_score', bin_score) - - assert self.config['weights'] in ['indoor', 'outdoor'] - path = Path(__file__).parent - path = path / 'weights/superglue_{}.pth'.format(self.config['weights']) - self.load_state_dict(torch.load(str(path))) - print('Loaded SuperGlue model (\"{}\" weights)'.format( - self.config['weights'])) - - def forward(self, data): - """Run SuperGlue on a pair of keypoints and descriptors""" - desc0, desc1 = data['descriptors0'], data['descriptors1'] - kpts0, kpts1 = data['keypoints0'], data['keypoints1'] - - if kpts0.shape[1] == 0 or kpts1.shape[1] == 0: # no keypoints - shape0, shape1 = kpts0.shape[:-1], kpts1.shape[:-1] - return { - 'matches0': kpts0.new_full(shape0, -1, dtype=torch.int), - 'matches1': kpts1.new_full(shape1, -1, dtype=torch.int), - 'matching_scores0': kpts0.new_zeros(shape0), - 'matching_scores1': kpts1.new_zeros(shape1), - } - - # Keypoint normalization. - kpts0 = normalize_keypoints(kpts0, data['image0'].shape) - kpts1 = normalize_keypoints(kpts1, data['image1'].shape) - - # Keypoint MLP encoder. - desc0 = desc0 + self.kenc(kpts0, data['scores0']) - desc1 = desc1 + self.kenc(kpts1, data['scores1']) - - # Multi-layer Transformer network. - desc0, desc1 = self.gnn(desc0, desc1) - - # Final MLP projection. - mdesc0, mdesc1 = self.final_proj(desc0), self.final_proj(desc1) - - # Compute matching descriptor distance. - scores = torch.einsum('bdn,bdm->bnm', mdesc0, mdesc1) - scores = scores / self.config['descriptor_dim']**.5 - - # Run the optimal transport. - scores = log_optimal_transport( - scores, self.bin_score, - iters=self.config['sinkhorn_iterations']) - - # Get the matches with score above "match_threshold". - max0, max1 = scores[:, :-1, :-1].max(2), scores[:, :-1, :-1].max(1) - indices0, indices1 = max0.indices, max1.indices - mutual0 = arange_like(indices0, 1)[None] == indices1.gather(1, indices0) - mutual1 = arange_like(indices1, 1)[None] == indices0.gather(1, indices1) - zero = scores.new_tensor(0) - mscores0 = torch.where(mutual0, max0.values.exp(), zero) - mscores1 = torch.where(mutual1, mscores0.gather(1, indices1), zero) - valid0 = mutual0 & (mscores0 > self.config['match_threshold']) - valid1 = mutual1 & valid0.gather(1, indices1) - indices0 = torch.where(valid0, indices0, indices0.new_tensor(-1)) - indices1 = torch.where(valid1, indices1, indices1.new_tensor(-1)) - - return { - 'matches0': indices0, # use -1 for invalid match - 'matches1': indices1, # use -1 for invalid match - 'matching_scores0': mscores0, - 'matching_scores1': mscores1, - } diff --git a/spaces/ICML2022/OFA/fairseq/examples/speech_recognition/kaldi/kaldi_initializer.py b/spaces/ICML2022/OFA/fairseq/examples/speech_recognition/kaldi/kaldi_initializer.py deleted file mode 100644 index 6d2a2a4b6b809ba1106f9a57cb6f241dc083e670..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/speech_recognition/kaldi/kaldi_initializer.py +++ /dev/null @@ -1,698 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from dataclasses import dataclass -import hydra -from hydra.core.config_store import ConfigStore -import logging -from omegaconf import MISSING, OmegaConf -import os -import os.path as osp -from pathlib import Path -import subprocess -from typing import Optional - -from fairseq.data.dictionary import Dictionary -from fairseq.dataclass import FairseqDataclass - -script_dir = Path(__file__).resolve().parent -config_path = script_dir / "config" - - -logger = logging.getLogger(__name__) - - -@dataclass -class KaldiInitializerConfig(FairseqDataclass): - data_dir: str = MISSING - fst_dir: Optional[str] = None - in_labels: str = MISSING - out_labels: Optional[str] = None - wav2letter_lexicon: Optional[str] = None - lm_arpa: str = MISSING - kaldi_root: str = MISSING - blank_symbol: str = "" - silence_symbol: Optional[str] = None - - -def create_units(fst_dir: Path, in_labels: str, vocab: Dictionary) -> Path: - in_units_file = fst_dir / f"kaldi_dict.{in_labels}.txt" - if not in_units_file.exists(): - - logger.info(f"Creating {in_units_file}") - - with open(in_units_file, "w") as f: - print(" 0", file=f) - i = 1 - for symb in vocab.symbols[vocab.nspecial :]: - if not symb.startswith("madeupword"): - print(f"{symb} {i}", file=f) - i += 1 - return in_units_file - - -def create_lexicon( - cfg: KaldiInitializerConfig, - fst_dir: Path, - unique_label: str, - in_units_file: Path, - out_words_file: Path, -) -> (Path, Path): - - disambig_in_units_file = fst_dir / f"kaldi_dict.{cfg.in_labels}_disambig.txt" - lexicon_file = fst_dir / f"kaldi_lexicon.{unique_label}.txt" - disambig_lexicon_file = fst_dir / f"kaldi_lexicon.{unique_label}_disambig.txt" - if ( - not lexicon_file.exists() - or not disambig_lexicon_file.exists() - or not disambig_in_units_file.exists() - ): - logger.info(f"Creating {lexicon_file} (in units file: {in_units_file})") - - assert cfg.wav2letter_lexicon is not None or cfg.in_labels == cfg.out_labels - - if cfg.wav2letter_lexicon is not None: - lm_words = set() - with open(out_words_file, "r") as lm_dict_f: - for line in lm_dict_f: - lm_words.add(line.split()[0]) - - num_skipped = 0 - total = 0 - with open(cfg.wav2letter_lexicon, "r") as w2l_lex_f, open( - lexicon_file, "w" - ) as out_f: - for line in w2l_lex_f: - items = line.rstrip().split("\t") - assert len(items) == 2, items - if items[0] in lm_words: - print(items[0], items[1], file=out_f) - else: - num_skipped += 1 - logger.debug( - f"Skipping word {items[0]} as it was not found in LM" - ) - total += 1 - if num_skipped > 0: - logger.warning( - f"Skipped {num_skipped} out of {total} words as they were not found in LM" - ) - else: - with open(in_units_file, "r") as in_f, open(lexicon_file, "w") as out_f: - for line in in_f: - symb = line.split()[0] - if symb != "" and symb != "" and symb != "": - print(symb, symb, file=out_f) - - lex_disambig_path = ( - Path(cfg.kaldi_root) / "egs/wsj/s5/utils/add_lex_disambig.pl" - ) - res = subprocess.run( - [lex_disambig_path, lexicon_file, disambig_lexicon_file], - check=True, - capture_output=True, - ) - ndisambig = int(res.stdout) - disamib_path = Path(cfg.kaldi_root) / "egs/wsj/s5/utils/add_disambig.pl" - res = subprocess.run( - [disamib_path, "--include-zero", in_units_file, str(ndisambig)], - check=True, - capture_output=True, - ) - with open(disambig_in_units_file, "wb") as f: - f.write(res.stdout) - - return disambig_lexicon_file, disambig_in_units_file - - -def create_G( - kaldi_root: Path, fst_dir: Path, lm_arpa: Path, arpa_base: str -) -> (Path, Path): - - out_words_file = fst_dir / f"kaldi_dict.{arpa_base}.txt" - grammar_graph = fst_dir / f"G_{arpa_base}.fst" - if not grammar_graph.exists() or not out_words_file.exists(): - logger.info(f"Creating {grammar_graph}") - arpa2fst = kaldi_root / "src/lmbin/arpa2fst" - subprocess.run( - [ - arpa2fst, - "--disambig-symbol=#0", - f"--write-symbol-table={out_words_file}", - lm_arpa, - grammar_graph, - ], - check=True, - ) - return grammar_graph, out_words_file - - -def create_L( - kaldi_root: Path, - fst_dir: Path, - unique_label: str, - lexicon_file: Path, - in_units_file: Path, - out_words_file: Path, -) -> Path: - lexicon_graph = fst_dir / f"L.{unique_label}.fst" - - if not lexicon_graph.exists(): - logger.info(f"Creating {lexicon_graph} (in units: {in_units_file})") - make_lex = kaldi_root / "egs/wsj/s5/utils/make_lexicon_fst.pl" - fstcompile = kaldi_root / "tools/openfst-1.6.7/bin/fstcompile" - fstaddselfloops = kaldi_root / "src/fstbin/fstaddselfloops" - fstarcsort = kaldi_root / "tools/openfst-1.6.7/bin/fstarcsort" - - def write_disambig_symbol(file): - with open(file, "r") as f: - for line in f: - items = line.rstrip().split() - if items[0] == "#0": - out_path = str(file) + "_disamig" - with open(out_path, "w") as out_f: - print(items[1], file=out_f) - return out_path - - return None - - in_disambig_sym = write_disambig_symbol(in_units_file) - assert in_disambig_sym is not None - out_disambig_sym = write_disambig_symbol(out_words_file) - assert out_disambig_sym is not None - - try: - with open(lexicon_graph, "wb") as out_f: - res = subprocess.run( - [make_lex, lexicon_file], capture_output=True, check=True - ) - assert len(res.stderr) == 0, res.stderr.decode("utf-8") - res = subprocess.run( - [ - fstcompile, - f"--isymbols={in_units_file}", - f"--osymbols={out_words_file}", - "--keep_isymbols=false", - "--keep_osymbols=false", - ], - input=res.stdout, - capture_output=True, - ) - assert len(res.stderr) == 0, res.stderr.decode("utf-8") - res = subprocess.run( - [fstaddselfloops, in_disambig_sym, out_disambig_sym], - input=res.stdout, - capture_output=True, - check=True, - ) - res = subprocess.run( - [fstarcsort, "--sort_type=olabel"], - input=res.stdout, - capture_output=True, - check=True, - ) - out_f.write(res.stdout) - except subprocess.CalledProcessError as e: - logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}") - os.remove(lexicon_graph) - raise - except AssertionError: - os.remove(lexicon_graph) - raise - - return lexicon_graph - - -def create_LG( - kaldi_root: Path, - fst_dir: Path, - unique_label: str, - lexicon_graph: Path, - grammar_graph: Path, -) -> Path: - lg_graph = fst_dir / f"LG.{unique_label}.fst" - - if not lg_graph.exists(): - logger.info(f"Creating {lg_graph}") - - fsttablecompose = kaldi_root / "src/fstbin/fsttablecompose" - fstdeterminizestar = kaldi_root / "src/fstbin/fstdeterminizestar" - fstminimizeencoded = kaldi_root / "src/fstbin/fstminimizeencoded" - fstpushspecial = kaldi_root / "src/fstbin/fstpushspecial" - fstarcsort = kaldi_root / "tools/openfst-1.6.7/bin/fstarcsort" - - try: - with open(lg_graph, "wb") as out_f: - res = subprocess.run( - [fsttablecompose, lexicon_graph, grammar_graph], - capture_output=True, - check=True, - ) - res = subprocess.run( - [ - fstdeterminizestar, - "--use-log=true", - ], - input=res.stdout, - capture_output=True, - ) - res = subprocess.run( - [fstminimizeencoded], - input=res.stdout, - capture_output=True, - check=True, - ) - res = subprocess.run( - [fstpushspecial], - input=res.stdout, - capture_output=True, - check=True, - ) - res = subprocess.run( - [fstarcsort, "--sort_type=ilabel"], - input=res.stdout, - capture_output=True, - check=True, - ) - out_f.write(res.stdout) - except subprocess.CalledProcessError as e: - logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}") - os.remove(lg_graph) - raise - - return lg_graph - - -def create_H( - kaldi_root: Path, - fst_dir: Path, - disambig_out_units_file: Path, - in_labels: str, - vocab: Dictionary, - blk_sym: str, - silence_symbol: Optional[str], -) -> (Path, Path, Path): - h_graph = ( - fst_dir / f"H.{in_labels}{'_' + silence_symbol if silence_symbol else ''}.fst" - ) - h_out_units_file = fst_dir / f"kaldi_dict.h_out.{in_labels}.txt" - disambig_in_units_file_int = Path(str(h_graph) + "isym_disambig.int") - disambig_out_units_file_int = Path(str(disambig_out_units_file) + ".int") - if ( - not h_graph.exists() - or not h_out_units_file.exists() - or not disambig_in_units_file_int.exists() - ): - logger.info(f"Creating {h_graph}") - eps_sym = "" - - num_disambig = 0 - osymbols = [] - - with open(disambig_out_units_file, "r") as f, open( - disambig_out_units_file_int, "w" - ) as out_f: - for line in f: - symb, id = line.rstrip().split() - if line.startswith("#"): - num_disambig += 1 - print(id, file=out_f) - else: - if len(osymbols) == 0: - assert symb == eps_sym, symb - osymbols.append((symb, id)) - - i_idx = 0 - isymbols = [(eps_sym, 0)] - - imap = {} - - for i, s in enumerate(vocab.symbols): - i_idx += 1 - isymbols.append((s, i_idx)) - imap[s] = i_idx - - fst_str = [] - - node_idx = 0 - root_node = node_idx - - special_symbols = [blk_sym] - if silence_symbol is not None: - special_symbols.append(silence_symbol) - - for ss in special_symbols: - fst_str.append("{} {} {} {}".format(root_node, root_node, ss, eps_sym)) - - for symbol, _ in osymbols: - if symbol == eps_sym or symbol.startswith("#"): - continue - - node_idx += 1 - # 1. from root to emitting state - fst_str.append("{} {} {} {}".format(root_node, node_idx, symbol, symbol)) - # 2. from emitting state back to root - fst_str.append("{} {} {} {}".format(node_idx, root_node, eps_sym, eps_sym)) - # 3. from emitting state to optional blank state - pre_node = node_idx - node_idx += 1 - for ss in special_symbols: - fst_str.append("{} {} {} {}".format(pre_node, node_idx, ss, eps_sym)) - # 4. from blank state back to root - fst_str.append("{} {} {} {}".format(node_idx, root_node, eps_sym, eps_sym)) - - fst_str.append("{}".format(root_node)) - - fst_str = "\n".join(fst_str) - h_str = str(h_graph) - isym_file = h_str + ".isym" - - with open(isym_file, "w") as f: - for sym, id in isymbols: - f.write("{} {}\n".format(sym, id)) - - with open(h_out_units_file, "w") as f: - for sym, id in osymbols: - f.write("{} {}\n".format(sym, id)) - - with open(disambig_in_units_file_int, "w") as f: - disam_sym_id = len(isymbols) - for _ in range(num_disambig): - f.write("{}\n".format(disam_sym_id)) - disam_sym_id += 1 - - fstcompile = kaldi_root / "tools/openfst-1.6.7/bin/fstcompile" - fstaddselfloops = kaldi_root / "src/fstbin/fstaddselfloops" - fstarcsort = kaldi_root / "tools/openfst-1.6.7/bin/fstarcsort" - - try: - with open(h_graph, "wb") as out_f: - res = subprocess.run( - [ - fstcompile, - f"--isymbols={isym_file}", - f"--osymbols={h_out_units_file}", - "--keep_isymbols=false", - "--keep_osymbols=false", - ], - input=str.encode(fst_str), - capture_output=True, - check=True, - ) - res = subprocess.run( - [ - fstaddselfloops, - disambig_in_units_file_int, - disambig_out_units_file_int, - ], - input=res.stdout, - capture_output=True, - check=True, - ) - res = subprocess.run( - [fstarcsort, "--sort_type=olabel"], - input=res.stdout, - capture_output=True, - check=True, - ) - out_f.write(res.stdout) - except subprocess.CalledProcessError as e: - logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}") - os.remove(h_graph) - raise - return h_graph, h_out_units_file, disambig_in_units_file_int - - -def create_HLGa( - kaldi_root: Path, - fst_dir: Path, - unique_label: str, - h_graph: Path, - lg_graph: Path, - disambig_in_words_file_int: Path, -) -> Path: - hlga_graph = fst_dir / f"HLGa.{unique_label}.fst" - - if not hlga_graph.exists(): - logger.info(f"Creating {hlga_graph}") - - fsttablecompose = kaldi_root / "src/fstbin/fsttablecompose" - fstdeterminizestar = kaldi_root / "src/fstbin/fstdeterminizestar" - fstrmsymbols = kaldi_root / "src/fstbin/fstrmsymbols" - fstrmepslocal = kaldi_root / "src/fstbin/fstrmepslocal" - fstminimizeencoded = kaldi_root / "src/fstbin/fstminimizeencoded" - - try: - with open(hlga_graph, "wb") as out_f: - res = subprocess.run( - [ - fsttablecompose, - h_graph, - lg_graph, - ], - capture_output=True, - check=True, - ) - res = subprocess.run( - [fstdeterminizestar, "--use-log=true"], - input=res.stdout, - capture_output=True, - check=True, - ) - res = subprocess.run( - [fstrmsymbols, disambig_in_words_file_int], - input=res.stdout, - capture_output=True, - check=True, - ) - res = subprocess.run( - [fstrmepslocal], - input=res.stdout, - capture_output=True, - check=True, - ) - res = subprocess.run( - [fstminimizeencoded], - input=res.stdout, - capture_output=True, - check=True, - ) - out_f.write(res.stdout) - except subprocess.CalledProcessError as e: - logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}") - os.remove(hlga_graph) - raise - - return hlga_graph - - -def create_HLa( - kaldi_root: Path, - fst_dir: Path, - unique_label: str, - h_graph: Path, - l_graph: Path, - disambig_in_words_file_int: Path, -) -> Path: - hla_graph = fst_dir / f"HLa.{unique_label}.fst" - - if not hla_graph.exists(): - logger.info(f"Creating {hla_graph}") - - fsttablecompose = kaldi_root / "src/fstbin/fsttablecompose" - fstdeterminizestar = kaldi_root / "src/fstbin/fstdeterminizestar" - fstrmsymbols = kaldi_root / "src/fstbin/fstrmsymbols" - fstrmepslocal = kaldi_root / "src/fstbin/fstrmepslocal" - fstminimizeencoded = kaldi_root / "src/fstbin/fstminimizeencoded" - - try: - with open(hla_graph, "wb") as out_f: - res = subprocess.run( - [ - fsttablecompose, - h_graph, - l_graph, - ], - capture_output=True, - check=True, - ) - res = subprocess.run( - [fstdeterminizestar, "--use-log=true"], - input=res.stdout, - capture_output=True, - check=True, - ) - res = subprocess.run( - [fstrmsymbols, disambig_in_words_file_int], - input=res.stdout, - capture_output=True, - check=True, - ) - res = subprocess.run( - [fstrmepslocal], - input=res.stdout, - capture_output=True, - check=True, - ) - res = subprocess.run( - [fstminimizeencoded], - input=res.stdout, - capture_output=True, - check=True, - ) - out_f.write(res.stdout) - except subprocess.CalledProcessError as e: - logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}") - os.remove(hla_graph) - raise - - return hla_graph - - -def create_HLG( - kaldi_root: Path, - fst_dir: Path, - unique_label: str, - hlga_graph: Path, - prefix: str = "HLG", -) -> Path: - hlg_graph = fst_dir / f"{prefix}.{unique_label}.fst" - - if not hlg_graph.exists(): - logger.info(f"Creating {hlg_graph}") - - add_self_loop = script_dir / "add-self-loop-simple" - kaldi_src = kaldi_root / "src" - kaldi_lib = kaldi_src / "lib" - - try: - if not add_self_loop.exists(): - fst_include = kaldi_root / "tools/openfst-1.6.7/include" - add_self_loop_src = script_dir / "add-self-loop-simple.cc" - - subprocess.run( - [ - "c++", - f"-I{kaldi_src}", - f"-I{fst_include}", - f"-L{kaldi_lib}", - add_self_loop_src, - "-lkaldi-base", - "-lkaldi-fstext", - "-o", - add_self_loop, - ], - check=True, - ) - - my_env = os.environ.copy() - my_env["LD_LIBRARY_PATH"] = f"{kaldi_lib}:{my_env['LD_LIBRARY_PATH']}" - - subprocess.run( - [ - add_self_loop, - hlga_graph, - hlg_graph, - ], - check=True, - capture_output=True, - env=my_env, - ) - except subprocess.CalledProcessError as e: - logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}") - raise - - return hlg_graph - - -def initalize_kaldi(cfg: KaldiInitializerConfig) -> Path: - if cfg.fst_dir is None: - cfg.fst_dir = osp.join(cfg.data_dir, "kaldi") - if cfg.out_labels is None: - cfg.out_labels = cfg.in_labels - - kaldi_root = Path(cfg.kaldi_root) - data_dir = Path(cfg.data_dir) - fst_dir = Path(cfg.fst_dir) - fst_dir.mkdir(parents=True, exist_ok=True) - - arpa_base = osp.splitext(osp.basename(cfg.lm_arpa))[0] - unique_label = f"{cfg.in_labels}.{arpa_base}" - - with open(data_dir / f"dict.{cfg.in_labels}.txt", "r") as f: - vocab = Dictionary.load(f) - - in_units_file = create_units(fst_dir, cfg.in_labels, vocab) - - grammar_graph, out_words_file = create_G( - kaldi_root, fst_dir, Path(cfg.lm_arpa), arpa_base - ) - - disambig_lexicon_file, disambig_L_in_units_file = create_lexicon( - cfg, fst_dir, unique_label, in_units_file, out_words_file - ) - - h_graph, h_out_units_file, disambig_in_units_file_int = create_H( - kaldi_root, - fst_dir, - disambig_L_in_units_file, - cfg.in_labels, - vocab, - cfg.blank_symbol, - cfg.silence_symbol, - ) - lexicon_graph = create_L( - kaldi_root, - fst_dir, - unique_label, - disambig_lexicon_file, - disambig_L_in_units_file, - out_words_file, - ) - lg_graph = create_LG( - kaldi_root, fst_dir, unique_label, lexicon_graph, grammar_graph - ) - hlga_graph = create_HLGa( - kaldi_root, fst_dir, unique_label, h_graph, lg_graph, disambig_in_units_file_int - ) - hlg_graph = create_HLG(kaldi_root, fst_dir, unique_label, hlga_graph) - - # for debugging - # hla_graph = create_HLa(kaldi_root, fst_dir, unique_label, h_graph, lexicon_graph, disambig_in_units_file_int) - # hl_graph = create_HLG(kaldi_root, fst_dir, unique_label, hla_graph, prefix="HL_looped") - # create_HLG(kaldi_root, fst_dir, "phnc", h_graph, prefix="H_looped") - - return hlg_graph - - -@hydra.main(config_path=config_path, config_name="kaldi_initializer") -def cli_main(cfg: KaldiInitializerConfig) -> None: - container = OmegaConf.to_container(cfg, resolve=True, enum_to_str=True) - cfg = OmegaConf.create(container) - OmegaConf.set_struct(cfg, True) - initalize_kaldi(cfg) - - -if __name__ == "__main__": - - logging.root.setLevel(logging.INFO) - logging.basicConfig(level=logging.INFO) - - try: - from hydra._internal.utils import ( - get_args, - ) # pylint: disable=import-outside-toplevel - - cfg_name = get_args().config_name or "kaldi_initializer" - except ImportError: - logger.warning("Failed to get config name from hydra args") - cfg_name = "kaldi_initializer" - - cs = ConfigStore.instance() - cs.store(name=cfg_name, node=KaldiInitializerConfig) - - cli_main() diff --git a/spaces/Ibtehaj10/cheating-detection/README.md b/spaces/Ibtehaj10/cheating-detection/README.md deleted file mode 100644 index fdb3907b8c2354d6378356d6d386b60ccc94d32a..0000000000000000000000000000000000000000 --- a/spaces/Ibtehaj10/cheating-detection/README.md +++ /dev/null @@ -1,45 +0,0 @@ -# AIComputerVision -This project contains various computer vision and AI related python scripts - -Link to full playlist: https://www.youtube.com/watch?v=UM9oDhhAg88&list=PLWw98q-Xe7iH8UHARl8RGk8MRj1raY4Eh - -Below is brief description for each script: - -1. Cat Dog detection: -This script can detect cats and dogs in a frame. You can replace cat or dog with any other object you want to detect. - -2. Centroidtracker: -This script helps in tracking any object in a frame. We have used this in person_tracking.py script in order to track persons in the frame. - -3. Dwell Time Calculation: -This script calculates the time a person has spent in a frame. It is a good example of calculating total time a person was present in frame. - -4. Face Detection: -This script detects face in person image or in a frame - -5. FPS Example: -While inferencing on a video file or frame from live usb webcam, its always a good idea to keep a check on how much fps we are getting. This script shows approx fps on frame. - -6. OpenCV Example: -This script shows basic usage of opencv - -7. Person Detection in Image File: -This script detects person in image file - -8. Person Detection in Video File: -This script detects person in video file. Test video file is present in video dir. - -9. Person Tracking: -This script detects person and keeps tracking them in the frame. It assigns a unique ID to each detected person. - -10. Monitor Social Distance -This script monitors social distance between the persons. If it is less than a threshold value, we display bounding box in red otherwise green. - -11. Drawing tracking line: -This script draws a line denoting where the person has entered in the frame and where he has moved in the frame. - -12. Face Mask Detection: -This script checks if a person is wearing face mask or not - -13. Person Counter: -This script counts the number of person present in the frame. diff --git a/spaces/Illumotion/Koboldcpp/examples/gpt4all.sh b/spaces/Illumotion/Koboldcpp/examples/gpt4all.sh deleted file mode 100644 index 5fd739e55c554a3e7f855c147789d873fd7aff24..0000000000000000000000000000000000000000 --- a/spaces/Illumotion/Koboldcpp/examples/gpt4all.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -# -# Temporary script - will be removed in the future -# - -cd `dirname $0` -cd .. - -./main --color --instruct --threads 4 \ - --model ./models/gpt4all-7B/gpt4all-lora-quantized.bin \ - --file ./prompts/alpaca.txt \ - --batch_size 8 --ctx_size 2048 -n -1 \ - --repeat_last_n 64 --repeat_penalty 1.3 \ - --n_predict 128 --temp 0.1 --top_k 40 --top_p 0.95 diff --git a/spaces/ImPavloh/voiceit/voiceit.py b/spaces/ImPavloh/voiceit/voiceit.py deleted file mode 100644 index 8268a8b68c5a1c63b5a2f90b9fe5fd4c8311001b..0000000000000000000000000000000000000000 --- a/spaces/ImPavloh/voiceit/voiceit.py +++ /dev/null @@ -1,184 +0,0 @@ -from threading import Thread -from pathlib import Path -import gradio as gr -import subprocess -import shutil -import time -import copy -import glob -import json -import os - -CURRENT_DIR = Path(__file__).resolve().parent -MODELOS = CURRENT_DIR / "modelos" -INFERENCE_OUTPUT_DIRNAME = CURRENT_DIR / "inference_output" - -def get_container_format(filename): - command = ["ffprobe", "-v", "error", "-select_streams", "v:0", "-show_entries", "format=format_name", "-of", "default=noprint_wrappers=1:nokey=1", filename] - process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - output, error = process.communicate() - if error: - raise ValueError(f"Error: {error.decode()}") - return output.decode().strip() - -def cleanup_dirs(): - dir_path = Path(INFERENCE_OUTPUT_DIRNAME) - if dir_path.exists(): - shutil.rmtree(dir_path) - -def get_speakers(): - global speakers - speakers = [] - for _, dirs, _ in os.walk(MODELOS): - for folder in dirs: - cur_speaker = {} - g = glob.glob(os.path.join(MODELOS, folder, 'G_*.pth')) - if not len(g): - continue - cur_speaker["model_path"] = g[0] - cur_speaker["model_folder"] = folder - cur_speaker["cluster_path"] = "" - - cfg = glob.glob(os.path.join(MODELOS, folder, '*.json')) - if not len(cfg): - continue - cur_speaker["cfg_path"] = cfg[0] - with open(cur_speaker["cfg_path"]) as f: - try: - cfg_json = json.loads(f.read()) - except Exception as e: - print("Archivo json malformado en" + folder) - for name, i in cfg_json["spk"].items(): - cur_speaker["name"] = name - cur_speaker["id"] = i - if not name.startswith('.'): - speakers.append(copy.copy(cur_speaker)) - return sorted(speakers, key=lambda x: x["name"].lower()) - -def run_inference(speaker, path, f0_method, transpose, noise_scale, cluster_ratio): - model_path = speaker["model_path"] - config_path = speaker["cfg_path"] - cluster_path = speaker["cluster_path"] - cluster_args = f"-k {cluster_path} -r {cluster_ratio}" if cluster_path and cluster_ratio > 0 else "" - inference_cmd = f"svc infer {path.absolute()} -m {model_path} -c {config_path} {cluster_args} -t {transpose} --f0-method crepe -n 0.4 -o {INFERENCE_OUTPUT_DIRNAME}/{path.name} --no-auto-predict-f0" - result = subprocess.run(inference_cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) - if result.stderr: - if "AttributeError" in result.stderr: - return None, gr.Textbox.update("⚠️ Modelo SVC incompatible.") - if not list(Path(INFERENCE_OUTPUT_DIRNAME).glob("*")): - return None, gr.Textbox.update("⚠️ Error.") - -def convert(speaker_box, audio): - speaker = next((x for x in speakers if x["name"] == speaker_box), None) - if not speaker: - return None, gr.Textbox.update("⚠️ Selecciona un modelo.") - if not audio: - return None, gr.Textbox.update("⚠️ Sube un audio.") - file_path = os.path.join(os.getcwd(), str(audio)) - transpose = 0 - cluster_ratio = 0 - if os.path.exists(INFERENCE_OUTPUT_DIRNAME): - cleanup_dirs() - os.makedirs("inference_output", exist_ok=True) - ts0 = time.time() - run_inference(speaker, Path(file_path), 0, 0, 0.4, 0) - final_filename = f"output{Path(file_path).suffix}" - shutil.move(Path(INFERENCE_OUTPUT_DIRNAME, Path(file_path).name), Path(final_filename)) - cleanup_dirs() - os.remove(file_path) - ts1 = time.time() - tiempo1 = int(ts1 - ts0) - return final_filename, gr.Textbox.update("👌 ¡Voz cambiada!", label=f"Tiempo total: {tiempo1} segundos") - -def clear(): - shutil.rmtree(INFERENCE_OUTPUT_DIRNAME, ignore_errors=True) - tmp_files = glob.glob("*.tmp") - for f in tmp_files: - os.remove(f) - return gr.Dropdown.update(value="Elige un modelo de voz"), None, gr.Textbox.update("🗑️ Datos borrados.", label=f"Información") - - -css = """ -.gradio-container { - font-family: 'IBM Plex Sans', sans-serif; -} -footer { - visibility: hidden; - display: none; -} -.center-container { - display: flex; - flex-direction: column; - align-items: center; - justify-content: center; -} -""" - -with gr.Blocks( - css=css, - title="VoiceIt! - Pavloh", - theme=gr.themes.Soft( - primary_hue="cyan", - secondary_hue="blue", - radius_size="lg", - text_size="lg" - ).set(loader_color="#0B0F19", shadow_drop='*shadow_drop_lg', block_border_width="3px") -) as pavloh: - gr.HTML( - """ -
-
-
- - Licencia - - - GitHub - -
- - - - - - Twitter - -
-
-

🗣️ VoiceIt! - Un proyecto de Pavloh

-
-

Cambia la voz de audios utilizando modelos pre-entrenados de streamers.

-
- """ - ) - - with gr.Row(elem_id="1").style(equal_height=True): - with gr.Column(): - d1 = gr.Dropdown([x["name"] for x in get_speakers()], label="📦 Selecciona un modelo", value="Elige un modelo de voz") - audio = gr.Audio(label="🗣️ Sube un audio", type="filepath") - with gr.Column(): - a2 = gr.Audio(label="🔊 Resultado", type="filepath") - t1 = gr.Textbox(type="text", label="📄 Información", value="Elige un modelo y un audio para cambiar la voz.") - with gr.Row(): - b0 = gr.Button("🗑️ Borrar") - b1 = gr.Button("🎤 Cambiar voz",variant="primary") - b0.click(clear, outputs=[d1, audio, t1]) - b1.click(convert, inputs=[d1, audio], outputs=[a2, t1]) - - with gr.Row(): - with gr.Accordion(label="Información importante", open=False): - gr.HTML(""" -
-

- Ten en cuenta que los audios deben contener solamente una voz y estar libres de ruido o música de fondo. -

-

- Asegúrate de que el nombre del archivo no contenga espacios ni símbolos raros, utilizando solo caracteres alfanuméricos y guiones bajos (_) para separar palabras si es necesario. -

-

- Al utilizar este sitio web, aceptas la licencia y condiciones de uso. -

-
- """) - -if __name__ == "__main__": pavloh.launch(enable_queue=True) \ No newline at end of file diff --git a/spaces/Jean-Baptiste/email_parser/email_parser/utils.py b/spaces/Jean-Baptiste/email_parser/email_parser/utils.py deleted file mode 100644 index 947dd230a715f8749037189dc9b2eeb6a03e735e..0000000000000000000000000000000000000000 --- a/spaces/Jean-Baptiste/email_parser/email_parser/utils.py +++ /dev/null @@ -1,74 +0,0 @@ -from functools import wraps -import logging -import os -from time import time -import configparser - -timer_functions = {} - -# Loading configuration from config file -config = configparser.ConfigParser() -config.read(os.path.join(os.path.dirname(__file__), 'config.ini')) - - -def timing(f): - @wraps(f) - def wrap(*args, **kw): - ts = time() - result = f(*args, **kw) - te = time() - if f.__name__ in timer_functions.keys(): - current_elapsed_time = timer_functions[f.__name__] - else: - current_elapsed_time = 0 - timer_functions[f.__name__] = current_elapsed_time + (te - ts) - logging.debug('func:%r took: %2.4f sec' % \ - (f.__name__, te - ts)) - return result - return wrap - - -def f_read_config(path=None): - """ read config file from specified file path - - :param path: file path - :return: configparser object - """ - # Loading configuration from config file - config = configparser.ConfigParser() - if path is None: - path = os.path.join(os.path.dirname(__file__), 'config.ini') - config.read(path, encoding='utf-8') - return config - -def f_setup_logger(level_sysout=logging.INFO, level_file=logging.DEBUG, folder_path="logs"): - """Setup logger - - By default we display only INFO in console, and write everything in file - - Args: - level_sysout: Level that is displayed in console (default INFO) - level_file: Level that is written in file (default DEBUG) - - Returns: - Nothing - - """ - if not os.path.isdir(folder_path): - os.mkdir(folder_path) - - for handler in logging.root.handlers[:]: - logging.root.removeHandler(handler) - - file_handler = logging.FileHandler(filename=os.path.join(folder_path, "amf_uce_nlp_{}.log".format(time())), - encoding='utf-8') - sysout_handler = logging.StreamHandler() - file_handler.setLevel(level_file) - sysout_handler.setLevel(level_sysout) - logging.basicConfig(handlers=[file_handler, sysout_handler], level=logging.DEBUG, - format='%(asctime)s (%(levelname)s) %(message)s', datefmt='%m/%d/%y %I:%M:%S %p') - - -def get_model_full_path(model_name): - path_models = config["DEFAULT"]["path_models"] - return os.path.join(os.path.dirname(__file__), path_models, model_name) \ No newline at end of file diff --git a/spaces/JenitaChristopher/MY_GEN_AI/app.py b/spaces/JenitaChristopher/MY_GEN_AI/app.py deleted file mode 100644 index a362dcc7d0ddd1eee86961f1bc3db6d894fbd3d5..0000000000000000000000000000000000000000 --- a/spaces/JenitaChristopher/MY_GEN_AI/app.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -import gradio as gr -from langchain.chat_models import ChatOpenAI -from langchain import LLMChain, PromptTemplate -from langchain.memory import ConversationBufferMemory - -OPENAI_API_KEY=os.getenv('OPENAI_API_KEY') - -template = """You are a helpful assistant to answer all user queries. -{chat_history} -User: {user_message} -Chatbot:""" - -prompt = PromptTemplate( - input_variables=["chat_history", "user_message"], template=template -) - -memory = ConversationBufferMemory(memory_key="chat_history") - -llm_chain = LLMChain( - llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"), - prompt=prompt, - verbose=True, - memory=memory, -) - -def get_text_response(user_message,history): - response = llm_chain.predict(user_message = user_message) - return response - -demo = gr.ChatInterface(get_text_response) - -if __name__ == "__main__": - demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`. diff --git a/spaces/JohnCalimoso/animalbreedidentificationversion1.5/Control/Spider/con_spider_logreg.py b/spaces/JohnCalimoso/animalbreedidentificationversion1.5/Control/Spider/con_spider_logreg.py deleted file mode 100644 index ab7a5b2c2cca15797d7bf987ef15249e3597d2ea..0000000000000000000000000000000000000000 --- a/spaces/JohnCalimoso/animalbreedidentificationversion1.5/Control/Spider/con_spider_logreg.py +++ /dev/null @@ -1,36 +0,0 @@ -import cv2 -import numpy as np -from PIL import Image -import pickle -import tensorflow as tf - -class spiderLogReg: - def __init__(self,url) -> None: - self.image = url - - def predict_image(self): - # Load the model - load_extractor = tf.keras.models.load_model("././Model/Spider/resnetLogreg/resnet_EXTRACTOR.h5") - - modelpath = "././Model/Spider/resnetLogreg/dataSaved.pkl" - - with open(modelpath, 'rb') as file: - saved_data = pickle.load(file) - animal_breed = saved_data['class_name'] - model = saved_data['logreg_model'] - - im = Image.open(self.image) - img = im.convert("RGB") - img= np.asarray(img) - image_resized= cv2.resize(img, (224,224)) - features = load_extractor.predict(np.expand_dims(image_resized, axis=0)) - - reshaped_features = features.reshape(features.shape[0],-1) - predicted_class = model.predict(reshaped_features) - pred_prob = model.predict_proba(reshaped_features)[:2] - prediction_probability = pred_prob[0][predicted_class[0]] - predicted_class - - output_class= animal_breed[predicted_class[0]] - - return [output_class, prediction_probability] diff --git a/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/modules/pdf_func.py b/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/modules/pdf_func.py deleted file mode 100644 index 1b1087f2687fd26c8676867dd45189c069dd56a5..0000000000000000000000000000000000000000 --- a/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/modules/pdf_func.py +++ /dev/null @@ -1,180 +0,0 @@ -from types import SimpleNamespace -import pdfplumber -import logging -from langchain.docstore.document import Document - -def prepare_table_config(crop_page): - """Prepare table查找边界, 要求page为原始page - - From https://github.com/jsvine/pdfplumber/issues/242 - """ - page = crop_page.root_page # root/parent - cs = page.curves + page.edges - def curves_to_edges(): - """See https://github.com/jsvine/pdfplumber/issues/127""" - edges = [] - for c in cs: - edges += pdfplumber.utils.rect_to_edges(c) - return edges - edges = curves_to_edges() - return { - "vertical_strategy": "explicit", - "horizontal_strategy": "explicit", - "explicit_vertical_lines": edges, - "explicit_horizontal_lines": edges, - "intersection_y_tolerance": 10, - } - -def get_text_outside_table(crop_page): - ts = prepare_table_config(crop_page) - if len(ts["explicit_vertical_lines"]) == 0 or len(ts["explicit_horizontal_lines"]) == 0: - return crop_page - - ### Get the bounding boxes of the tables on the page. - bboxes = [table.bbox for table in crop_page.root_page.find_tables(table_settings=ts)] - def not_within_bboxes(obj): - """Check if the object is in any of the table's bbox.""" - def obj_in_bbox(_bbox): - """See https://github.com/jsvine/pdfplumber/blob/stable/pdfplumber/table.py#L404""" - v_mid = (obj["top"] + obj["bottom"]) / 2 - h_mid = (obj["x0"] + obj["x1"]) / 2 - x0, top, x1, bottom = _bbox - return (h_mid >= x0) and (h_mid < x1) and (v_mid >= top) and (v_mid < bottom) - return not any(obj_in_bbox(__bbox) for __bbox in bboxes) - - return crop_page.filter(not_within_bboxes) -# 请使用 LaTeX 表达公式,行内公式以 $ 包裹,行间公式以 $$ 包裹 - -extract_words = lambda page: page.extract_words(keep_blank_chars=True, y_tolerance=0, x_tolerance=1, extra_attrs=["fontname", "size", "object_type"]) -# dict_keys(['text', 'x0', 'x1', 'top', 'doctop', 'bottom', 'upright', 'direction', 'fontname', 'size']) - -def get_title_with_cropped_page(first_page): - title = [] # 处理标题 - x0,top,x1,bottom = first_page.bbox # 获取页面边框 - - for word in extract_words(first_page): - word = SimpleNamespace(**word) - - if word.size >= 14: - title.append(word.text) - title_bottom = word.bottom - elif word.text == "Abstract": # 获取页面abstract - top = word.top - - user_info = [i["text"] for i in extract_words(first_page.within_bbox((x0,title_bottom,x1,top)))] - # 裁剪掉上半部分, within_bbox: full_included; crop: partial_included - return title, user_info, first_page.within_bbox((x0,top,x1,bottom)) - -def get_column_cropped_pages(pages, two_column=True): - new_pages = [] - for page in pages: - if two_column: - left = page.within_bbox((0, 0, page.width/2, page.height),relative=True) - right = page.within_bbox((page.width/2, 0, page.width, page.height), relative=True) - new_pages.append(left) - new_pages.append(right) - else: - new_pages.append(page) - - return new_pages - -def parse_pdf(filename, two_column = True): - level = logging.getLogger().level - if level == logging.getLevelName("DEBUG"): - logging.getLogger().setLevel("INFO") - - with pdfplumber.open(filename) as pdf: - title, user_info, first_page = get_title_with_cropped_page(pdf.pages[0]) - new_pages = get_column_cropped_pages([first_page] + pdf.pages[1:], two_column) - - chapters = [] - # tuple (chapter_name, [pageid] (start,stop), chapter_text) - create_chapter = lambda page_start,name_top,name_bottom: SimpleNamespace( - name=[], - name_top=name_top, - name_bottom=name_bottom, - record_chapter_name = True, - - page_start=page_start, - page_stop=None, - - text=[], - ) - cur_chapter = None - - # 按页遍历PDF文档 - for idx, page in enumerate(new_pages): - page = get_text_outside_table(page) - - # 按行遍历页面文本 - for word in extract_words(page): - word = SimpleNamespace(**word) - - # 检查行文本是否以12号字体打印,如果是,则将其作为新章节开始 - if word.size >= 11: # 出现chapter name - if cur_chapter is None: - cur_chapter = create_chapter(page.page_number, word.top, word.bottom) - elif not cur_chapter.record_chapter_name or (cur_chapter.name_bottom != cur_chapter.name_bottom and cur_chapter.name_top != cur_chapter.name_top): - # 不再继续写chapter name - cur_chapter.page_stop = page.page_number # stop id - chapters.append(cur_chapter) - # 重置当前chapter信息 - cur_chapter = create_chapter(page.page_number, word.top, word.bottom) - - # print(word.size, word.top, word.bottom, word.text) - cur_chapter.name.append(word.text) - else: - cur_chapter.record_chapter_name = False # chapter name 结束 - cur_chapter.text.append(word.text) - else: - # 处理最后一个章节 - cur_chapter.page_stop = page.page_number # stop id - chapters.append(cur_chapter) - - for i in chapters: - logging.info(f"section: {i.name} pages:{i.page_start, i.page_stop} word-count:{len(i.text)}") - logging.debug(" ".join(i.text)) - - title = " ".join(title) - user_info = " ".join(user_info) - text = f"Article Title: {title}, Information:{user_info}\n" - for idx, chapter in enumerate(chapters): - chapter.name = " ".join(chapter.name) - text += f"The {idx}th Chapter {chapter.name}: " + " ".join(chapter.text) + "\n" - - logging.getLogger().setLevel(level) - return Document(page_content=text, metadata={"title": title}) - -BASE_POINTS = """ -1. Who are the authors? -2. What is the process of the proposed method? -3. What is the performance of the proposed method? Please note down its performance metrics. -4. What are the baseline models and their performances? Please note down these baseline methods. -5. What dataset did this paper use? -""" - -READING_PROMPT = """ -You are a researcher helper bot. You can help the user with research paper reading and summarizing. \n -Now I am going to send you a paper. You need to read it and summarize it for me part by part. \n -When you are reading, You need to focus on these key points:{} -""" - -READING_PROMT_V2 = """ -You are a researcher helper bot. You can help the user with research paper reading and summarizing. \n -Now I am going to send you a paper. You need to read it and summarize it for me part by part. \n -When you are reading, You need to focus on these key points:{}, - -And You need to generate a brief but informative title for this part. -Your return format: -- title: '...' -- summary: '...' -""" - -SUMMARY_PROMPT = "You are a researcher helper bot. Now you need to read the summaries of a research paper." - - -if __name__ == '__main__': - # Test code - z = parse_pdf("./build/test.pdf") - print(z["user_info"]) - print(z["title"]) \ No newline at end of file diff --git a/spaces/Joshua1808/PaginaWeb/app.py b/spaces/Joshua1808/PaginaWeb/app.py deleted file mode 100644 index 677f0d5fba6bda50a8c3fa267241deb5ad802e42..0000000000000000000000000000000000000000 --- a/spaces/Joshua1808/PaginaWeb/app.py +++ /dev/null @@ -1,276 +0,0 @@ -import tweepy as tw -import streamlit as st -import pandas as pd -import regex as re -import numpy as np -import pysentimiento -import geopy -import matplotlib.pyplot as plt -import langdetect - -#Importar Nominatim para geocalización -from geopy.geocoders import Nominatim -#Importar pipeline para usar al modelo Transformer -from transformers import pipeline -#función detect es utilizada para detectar el idioma de un texto dado. -from langdetect import detect - - -model_checkpoint = "hackathon-pln-es/twitter_sexismo-finetuned-robertuito-exist2021" -pipeline_nlp = pipeline("text-classification", model=model_checkpoint) - -#Claves de acceso a la API de Twitter -consumer_key = "BjipwQslVG4vBdy4qK318KnoA" -consumer_secret = "3fzL70v9faklrPgvTi3zbofw9rwk92fgGdtAslFkFYt8kGmqBJ" -access_token = "1217853705086799872-Y5zEChpTeKccuLY3XJRXDPPZhNrlba" -access_token_secret = "pqQ5aFSJxzJ2xnI6yhVtNjQO36FOu8DBOH6DtUrPAU54J" -auth = tw.OAuthHandler(consumer_key, consumer_secret) -auth.set_access_token(access_token, access_token_secret) -api = tw.API(auth, wait_on_rate_limit=True) - -#Función para limpiar datos -def limpieza_datos(tweet): - # Eliminar emojis - tweet = re.sub(r'[\U0001F600-\U0001F64F]', '', tweet) - tweet = re.sub(r'[\U0001F300-\U0001F5FF]', '', tweet) - tweet = re.sub(r'[\U0001F680-\U0001F6FF]', '', tweet) - tweet = re.sub(r'[\U0001F1E0-\U0001F1FF]', '', tweet) - # Eliminar arrobas - tweet = re.sub(r'@\w+', '', tweet) - # Eliminar URL - tweet = re.sub(r'http\S+', '', tweet) - # Eliminar hashtags - tweet = re.sub(r'#\w+', '', tweet) - # Eliminar caracteres especiales - #tweet = re.sub(r'[^a-zA-Z0-9 \n\.]', '', tweet) - tweet = re.sub(r'[^a-zA-Z0-9 \n\áéíóúÁÉÍÓÚñÑ.]', '', tweet) - return tweet - -#Función para cambiar el color de la palabra "Sexista" -def highlight_survived(s): - return ['background-color: red']*len(s) if (s.Sexista == 1) else ['background-color: green']*len(s) - -def color_survived(val): - color = 'red' if val=='Sexista' else 'white' - return f'background-color: {color}' - -#Función para el ancho de la pagina web -st.set_page_config(layout="wide") -st.markdown('',unsafe_allow_html=True) - -#colT1,colT2 = st.columns([2,8]) - - -st.markdown(""" """, unsafe_allow_html=True) - -#st.markdown('

Detectando el Sexismo en Linea: Un proyecto de Investigación

', unsafe_allow_html=True) - -#Editar el tipo de letra -st.markdown(""" """, unsafe_allow_html=True) - -st.markdown(""" """, unsafe_allow_html=True) - -#Parrafo -st.markdown('

Este proyecto consiste en una aplicación web que utiliza la biblioteca Tweepy de Python para descargar tweets de Twitter, permitiendo buscar Tweets por usuario y por localidad. Luego, utiliza modelos de lenguaje basados en Transformers para analizar los tweets y detectar comentarios y determinar si son "Sexistas" o "No Sexistas". El proyecto busca identificar y combatir el discurso sexista en línea para promover la igualdad de género y la inclusión.

',unsafe_allow_html=True) - -#Función para extrer tweets de usuarios de Twitter, y analizarlos por el modelo Transformer -def tweets_usuario(usuario, cant_de_tweets): - tabla = [] # crea una lista vacía llamada tabla - if(cant_de_tweets > 0 and usuario != "" ): - try: - # Buscar la información del perfil de usuario - user = api.get_user(screen_name=usuario) - # utiliza la API de Twitter para buscar tweets en español de un usuario de Twitter - tweets = api.user_timeline(screen_name = usuario,tweet_mode="extended", count= cant_de_tweets) - result = [] # crea una lista vacía llamada result - # itera a través de cada tweet en la lista de tweets encontrados - for tweet in tweets: - # si el tweet comienza con 'RT' o si el tweet está vacío, continúa con el siguiente tweet - if (tweet.full_text.startswith('RT')): - continue - else: - text = tweet.full_text - #Try Catch para detectar que sean del idioma español - try: - language = detect(text) - if language == 'es': - #Limpieza de cada tweet extraido - datos=limpieza_datos(text) - if datos == "": - continue - else: - #Predicción del modelo Transformer para cada tweet - prediction = pipeline_nlp(datos) - for predic in prediction: - # Agrega la etiqueta de Tweets, Predicción y Probabilidad en una lista y la agrega a la lista de resultados - etiqueta = {'Tweets': datos, 'Prediccion': predic['label'], 'Probabilidad': predic['score']} - result.append(etiqueta) - except: - pass - # Crea un dataframe a partir de la lista de resultados - df = pd.DataFrame(result) - # Si el dataframe está vacío, muestra un mensaje de que no se encontraron tweets sexistas dentro de la localidad - if df.empty: - muestra= st.text("No hay tweets Sexistas a analizar") - tabla.append(muestra) - - else: - # ordena el dataframe por Predicción y Probabilidad en orden descendente - df.sort_values(by=['Prediccion', 'Probabilidad'], ascending=[False, False], inplace=True) - df['Prediccion'] = np.where(df['Prediccion'] == 'LABEL_1', 'Sexista', 'No Sexista') - df['Probabilidad'] = df['Probabilidad'].apply(lambda x: round(x, 3)) - muestra = st.table(df.reset_index(drop=True).head(50).style.applymap(color_survived, subset=['Prediccion'])) - tabla.append(muestra) - except Exception as e: - #Try except para detectar si la cuenta es existente - muestra = st.text(f"La cuenta {usuario} no existe.") - tabla.append(muestra) - else: - #Condición que se solicita cuando no se llenaron los datos para la busqueda - muestra= st.text("Ingrese los parametros correspondientes") - tabla.append(muestra) - return tabla - -#Función para buscar por localidad -def tweets_localidad(buscar_localidad): - tabla = [] # crea una lista vacía llamada tabla - try: - #Crea un objeto Geolocalizador y busca la ubicación que se encuentra en el parámetro buscar_localidad - geolocator = Nominatim(user_agent="nombre_del_usuario") - location = geolocator.geocode(buscar_localidad) - radius = "15km" #establece el radio de búsqueda en 15km - # utiliza la API de Twitter para buscar tweets en español cerca de la ubicación encontrada - tweets = api.search_tweets(q="",lang="es",geocode=f"{location.latitude},{location.longitude},{radius}", count = 100, tweet_mode="extended") - result = [] # crea una lista vacía llamada result - # itera a través de cada tweet en la lista de tweets encontrados - for tweet in tweets: - # si el tweet comienza con 'RT' o si el tweet está vacío, continúa con el siguiente tweet - if (tweet.full_text.startswith('RT')): - continue - elif not tweet.full_text.strip(): - continue - else: - # Limpia los datos del tweet y realiza una predicción sobre si es sexista o no - datos = limpieza_datos(tweet.full_text) - prediction = pipeline_nlp(datos) - for predic in prediction: - # Agrega la etiqueta de Tweets, Predicción y Probabilidad en una lista y la agrega a la lista de resultados - etiqueta = {'Tweets': datos,'Prediccion': predic['label'], 'Probabilidad': predic['score']} - result.append(etiqueta) - # Crea un dataframe a partir de la lista de resultados - df = pd.DataFrame(result) - # Si el dataframe está vacío, muestra un mensaje de que no se encontraron tweets sexistas dentro de la localidad - if df.empty: - muestra=st.text("No se encontraron tweets sexistas dentro de la localidad") - tabla.append(muestra) - else: - #tabla.append(muestra) - # ordena el dataframe por Predicción y Probabilidad en orden descendente - df.sort_values(by=['Prediccion', 'Probabilidad'], ascending=[False, False], inplace=True) - # convierte la columna de Predicción en "Sexista" o "No Sexista" y redondea la columna de Probabilidad a 3 decimales - df['Prediccion'] = np.where(df['Prediccion'] == 'LABEL_1', 'Sexista', 'No Sexista') - df['Probabilidad'] = df['Probabilidad'].round(3) - # Obtener los datos con probabilidad mayor a 0.50 - df = df[df['Probabilidad'] > 0.50] - # Obtener los 3 primeros datos con mayor probabilidad sexista - sexista_df = df[df['Prediccion'] == 'Sexista'].head(3) - - # Obtener los 3 primeros datos con mayor probabilidad no sexista - no_sexista_df = df[df['Prediccion'] == 'No Sexista'].head(3) - - # Concatenar ambos dataframes - muestra_df = pd.concat([sexista_df, no_sexista_df], axis=0) - col1, col2 = st.columns(2) - with col1: - #lista de Tweets mostrados en pantalla - muestra = st.table(muestra_df.reset_index(drop=True).head(6).style.applymap(color_survived, subset=['Prediccion'])) - with col2: - resultado = df['Prediccion'].value_counts() - def autopct_fun(abs_values): - gen = iter(abs_values) - return lambda pct: f"{pct:.1f}% ({next(gen)})" - #Gráfico - colores=["#aae977","#EE3555"] - fig, ax = plt.subplots() - fig.set_size_inches(2,2) - plt.pie(resultado,labels=resultado.index,autopct=autopct_fun(resultado),colors=colores, textprops={'fontsize': 5}) - ax.set_title("Porcentajes por Categorias en la localidad\n"+buscar_localidad.capitalize(), fontsize=5, fontweight="bold") - plt.rcParams.update({'font.size':4, 'font.weight':'bold'}) - ax.legend() - # Muestra el gráfico - plt.show() - st.set_option('deprecation.showPyplotGlobalUse', False) - st.pyplot() - - except AttributeError as e: - muestra=st.text("No existe ninguna localidad con ese nombre") - tabla.append(muestra) - - return tabla - - -def analizar_frase(frase): - language = detect(frase) - if frase == "": - tabla = st.text("Ingrese una frase") - #st.text("Ingrese una frase") - elif language == 'es': - predictions = pipeline_nlp(frase) - # convierte las predicciones en una lista de diccionarios - data = [{'Texto': frase, 'Prediccion': prediction['label'], 'Probabilidad': prediction['score']} for prediction in predictions] - # crea un DataFrame a partir de la lista de diccionarios - df = pd.DataFrame(data) - df['Prediccion'] = np.where( df['Prediccion'] == 'LABEL_1', 'Sexista', 'No Sexista') - # muestra el DataFrame - tabla = st.table(df.reset_index(drop=True).head(1).style.applymap(color_survived, subset=['Prediccion'])) - else: - tabla = st.text("Solo Frase en español") - - return tabla - -#Función para correr el formulario -def run(): - # Se inicia una sesión Streamlit - with st.form("my_form"): - # Se solicita al usuario que ingrese la búsqueda que desea realizar - search_words = st.text_input("Introduzca la frase, el usuario o localidad para analizar y pulse el check correspondiente") - # Se solicita al usuario que ingrese la cantidad de tweets que desea analizar (máximo 50) - number_of_tweets = st.number_input('Introduzca número de tweets a analizar del usuario Máximo 50', 0,50,0) - st.write("Escoja una Opción:") - # Se presenta al usuario las opciones de búsqueda (frase, usuario o localidad) mediante checkboxes - termino=st.checkbox('Frase') - usuario=st.checkbox('Usuario') - localidad=st.checkbox('Localidad') - # Se agrega un botón de envío para que el usuario pueda iniciar la búsqueda - submit_button = st.form_submit_button(label='Analizar') - # Se define una variable de error y se inicializa como falso - error =False - - # Si el usuario hace clic en el botón de envío, se comprueban los checkboxes y se establece un mensaje de error si es necesario - if submit_button: - # Condición para el caso de que esten dos check seleccionados - if ( termino == False and usuario == False and localidad == False): - st.text('Error no se ha seleccionado ningun check') - error=True - elif ( termino == True and usuario == True and localidad == True): - st.text('Error se han seleccionado varios check') - error=True - - # Si no se ha producido ningún error, se inicia la búsqueda según la opción seleccionada - if (error == False): - if (termino): - analizar_frase(search_words) - elif (usuario): - tweets_usuario(search_words,number_of_tweets) - elif (localidad): - tweets_localidad(search_words) - -# Se ejecuta la función run -run() \ No newline at end of file diff --git a/spaces/Kevin676/Clone-Your-Voice/utils/profiler.py b/spaces/Kevin676/Clone-Your-Voice/utils/profiler.py deleted file mode 100644 index 17175b9e1b0eb17fdc015199e5194a5c1afb8a28..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/Clone-Your-Voice/utils/profiler.py +++ /dev/null @@ -1,45 +0,0 @@ -from time import perf_counter as timer -from collections import OrderedDict -import numpy as np - - -class Profiler: - def __init__(self, summarize_every=5, disabled=False): - self.last_tick = timer() - self.logs = OrderedDict() - self.summarize_every = summarize_every - self.disabled = disabled - - def tick(self, name): - if self.disabled: - return - - # Log the time needed to execute that function - if not name in self.logs: - self.logs[name] = [] - if len(self.logs[name]) >= self.summarize_every: - self.summarize() - self.purge_logs() - self.logs[name].append(timer() - self.last_tick) - - self.reset_timer() - - def purge_logs(self): - for name in self.logs: - self.logs[name].clear() - - def reset_timer(self): - self.last_tick = timer() - - def summarize(self): - n = max(map(len, self.logs.values())) - assert n == self.summarize_every - print("\nAverage execution time over %d steps:" % n) - - name_msgs = ["%s (%d/%d):" % (name, len(deltas), n) for name, deltas in self.logs.items()] - pad = max(map(len, name_msgs)) - for name_msg, deltas in zip(name_msgs, self.logs.values()): - print(" %s mean: %4.0fms std: %4.0fms" % - (name_msg.ljust(pad), np.mean(deltas) * 1000, np.std(deltas) * 1000)) - print("", flush=True) - \ No newline at end of file diff --git a/spaces/Kyan14/Mood_Based_Generative_Art/app.py b/spaces/Kyan14/Mood_Based_Generative_Art/app.py deleted file mode 100644 index 82bbb30e6a6a06cf594da527e90a536a74726260..0000000000000000000000000000000000000000 --- a/spaces/Kyan14/Mood_Based_Generative_Art/app.py +++ /dev/null @@ -1,114 +0,0 @@ -import requests -from PIL import Image -from io import BytesIO -import base64 -import gradio as gr -from transformers import CLIPProcessor, CLIPModel -import numpy as np -import time - -# Replace with your own API key -STABLE_DIFFUSION_API_KEY = "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" - -# Load the CLIP model and processor -model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") -processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") - -def get_mood_from_image(image: Image.Image): - moods = ["scared", "angry", "happy", "sad", "disgusted", "surprised"] - - # Create unique prompts for each mood - prompts = [ - "The emotion conveyed by this image is fear. The person looks scared and tense.", - "The emotion conveyed by this image is anger. The person looks furious and irritated.", - "The emotion conveyed by this image is happy. The person looks happy and cheerful.", - "The emotion conveyed by this image is sadness. The person looks unhappy and gloomy.", - "The emotion conveyed by this image is disgust. The person looks repulsed and sickened.", - "The emotion conveyed by this image is surprise. The person looks astonished and amazed.", - ] - - # Prepare the inputs for the model - inputs = processor(text=prompts, images=image, return_tensors="pt", padding=True) - - # Run the model - logits = model(**inputs).logits_per_image - probs = logits.softmax(dim=-1).tolist() - - # Calculate the scores for each mood - mood_scores = {} - for mood, score in zip(moods, probs[0]): - mood_scores[mood] = score - print("Mood Scores:", mood_scores) - # Select the mood with the highest score - selected_mood = max(mood_scores, key=mood_scores.get) - - return selected_mood -def is_black_image(image: Image.Image) -> bool: - img_array = np.array(image) - return np.all(img_array == 0) - -def generate_art(mood, max_retries=3, request_timeout=30): - prompt = f"{mood} generative art with vibrant colors and intricate patterns ({str(np.random.randint(1, 10000))})" - - headers = { - "Authorization": f"Bearer {STABLE_DIFFUSION_API_KEY}", - "Accept": "image/jpeg", - } - - json_data = { - "inputs": prompt - } - - retries = 0 - while retries < max_retries: - try: - response = requests.post('https://api-inference.huggingface.co/models/runwayml/stable-diffusion-v1-5', headers=headers, json=json_data, timeout=request_timeout) - except requests.exceptions.Timeout: - print(f"Request timed out after {request_timeout} seconds. Retrying...") - retries += 1 - continue - - if response.status_code == 503: - print("Model is loading, waiting for 30 seconds before retrying...") - time.sleep(30) - continue - - if response.status_code != 200: - print(f"Error: API response status code {response.status_code}") - print("Response content:") - print(response.content) - return None - - image = Image.open(BytesIO(response.content)) - - if not is_black_image(image): - break - - retries += 1 - - if retries == max_retries: - return None - - return image - -def mood_art_generator(image): - mood = get_mood_from_image(image) - print("Mood:", mood) - if mood: - art = generate_art(mood) - output_text = f"You seem to be {mood}. Here's an artwork representing it!" - return art, output_text - else: - return None, "Failed to generate artwork." - -iface = gr.Interface( - fn=mood_art_generator, - inputs=gr.inputs.Image(shape=(224, 224), image_mode="RGB", source="upload"), - outputs=[gr.outputs.Image(type="pil"), gr.outputs.Textbox()], - title="Mood-based Art Generator", - description="Upload an image of yourself and let the AI generate artwork based on your mood.", - allow_flagging=False, - analytics_enabled=False, - share=True -) -iface.launch() diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/backbones/ssd_vgg.py b/spaces/KyanChen/RSPrompter/mmdet/models/backbones/ssd_vgg.py deleted file mode 100644 index 843e82e2722f93b9b2abb5180c827c8f2a430b48..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/models/backbones/ssd_vgg.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings - -import torch.nn as nn -from mmcv.cnn import VGG -from mmengine.model import BaseModule - -from mmdet.registry import MODELS -from ..necks import ssd_neck - - -@MODELS.register_module() -class SSDVGG(VGG, BaseModule): - """VGG Backbone network for single-shot-detection. - - Args: - depth (int): Depth of vgg, from {11, 13, 16, 19}. - with_last_pool (bool): Whether to add a pooling layer at the last - of the model - ceil_mode (bool): When True, will use `ceil` instead of `floor` - to compute the output shape. - out_indices (Sequence[int]): Output from which stages. - out_feature_indices (Sequence[int]): Output from which feature map. - pretrained (str, optional): model pretrained path. Default: None - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - input_size (int, optional): Deprecated argumment. - Width and height of input, from {300, 512}. - l2_norm_scale (float, optional) : Deprecated argumment. - L2 normalization layer init scale. - - Example: - >>> self = SSDVGG(input_size=300, depth=11) - >>> self.eval() - >>> inputs = torch.rand(1, 3, 300, 300) - >>> level_outputs = self.forward(inputs) - >>> for level_out in level_outputs: - ... print(tuple(level_out.shape)) - (1, 1024, 19, 19) - (1, 512, 10, 10) - (1, 256, 5, 5) - (1, 256, 3, 3) - (1, 256, 1, 1) - """ - extra_setting = { - 300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256), - 512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128), - } - - def __init__(self, - depth, - with_last_pool=False, - ceil_mode=True, - out_indices=(3, 4), - out_feature_indices=(22, 34), - pretrained=None, - init_cfg=None, - input_size=None, - l2_norm_scale=None): - # TODO: in_channels for mmcv.VGG - super(SSDVGG, self).__init__( - depth, - with_last_pool=with_last_pool, - ceil_mode=ceil_mode, - out_indices=out_indices) - - self.features.add_module( - str(len(self.features)), - nn.MaxPool2d(kernel_size=3, stride=1, padding=1)) - self.features.add_module( - str(len(self.features)), - nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)) - self.features.add_module( - str(len(self.features)), nn.ReLU(inplace=True)) - self.features.add_module( - str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1)) - self.features.add_module( - str(len(self.features)), nn.ReLU(inplace=True)) - self.out_feature_indices = out_feature_indices - - assert not (init_cfg and pretrained), \ - 'init_cfg and pretrained cannot be specified at the same time' - - if init_cfg is not None: - self.init_cfg = init_cfg - elif isinstance(pretrained, str): - warnings.warn('DeprecationWarning: pretrained is deprecated, ' - 'please use "init_cfg" instead') - self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) - elif pretrained is None: - self.init_cfg = [ - dict(type='Kaiming', layer='Conv2d'), - dict(type='Constant', val=1, layer='BatchNorm2d'), - dict(type='Normal', std=0.01, layer='Linear'), - ] - else: - raise TypeError('pretrained must be a str or None') - - if input_size is not None: - warnings.warn('DeprecationWarning: input_size is deprecated') - if l2_norm_scale is not None: - warnings.warn('DeprecationWarning: l2_norm_scale in VGG is ' - 'deprecated, it has been moved to SSDNeck.') - - def init_weights(self, pretrained=None): - super(VGG, self).init_weights() - - def forward(self, x): - """Forward function.""" - outs = [] - for i, layer in enumerate(self.features): - x = layer(x) - if i in self.out_feature_indices: - outs.append(x) - - if len(outs) == 1: - return outs[0] - else: - return tuple(outs) - - -class L2Norm(ssd_neck.L2Norm): - - def __init__(self, **kwargs): - super(L2Norm, self).__init__(**kwargs) - warnings.warn('DeprecationWarning: L2Norm in ssd_vgg.py ' - 'is deprecated, please use L2Norm in ' - 'mmdet/models/necks/ssd_neck.py instead') diff --git a/spaces/KyanChen/RSPrompter/mmpretrain/__init__.py b/spaces/KyanChen/RSPrompter/mmpretrain/__init__.py deleted file mode 100644 index 4dfdb78c915087762649f2da4f4f50b4cef5c7d0..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmpretrain/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import mmcv -import mmengine -from mmengine.utils import digit_version - -from .apis import * # noqa: F401, F403 -from .version import __version__ - -mmcv_minimum_version = '2.0.0' -mmcv_maximum_version = '2.5.0' -mmcv_version = digit_version(mmcv.__version__) - -mmengine_minimum_version = '0.7.1' -mmengine_maximum_version = '1.5.0' -mmengine_version = digit_version(mmengine.__version__) - -assert (mmcv_version >= digit_version(mmcv_minimum_version) - and mmcv_version < digit_version(mmcv_maximum_version)), \ - f'MMCV=={mmcv.__version__} is used but incompatible. ' \ - f'Please install mmcv>={mmcv_minimum_version}, <{mmcv_maximum_version}.' - -assert (mmengine_version >= digit_version(mmengine_minimum_version) - and mmengine_version < digit_version(mmengine_maximum_version)), \ - f'MMEngine=={mmengine.__version__} is used but incompatible. ' \ - f'Please install mmengine>={mmengine_minimum_version}, ' \ - f'<{mmengine_maximum_version}.' - -__all__ = ['__version__'] diff --git a/spaces/KyanChen/RSPrompter/mmpretrain/datasets/utils.py b/spaces/KyanChen/RSPrompter/mmpretrain/datasets/utils.py deleted file mode 100644 index fcb60e432c374c1a904700a7348f706fa0e523eb..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmpretrain/datasets/utils.py +++ /dev/null @@ -1,243 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import gzip -import hashlib -import os -import os.path -import shutil -import tarfile -import tempfile -import urllib.error -import urllib.request -import zipfile - -from mmengine.fileio import LocalBackend, get_file_backend - -__all__ = [ - 'rm_suffix', 'check_integrity', 'download_and_extract_archive', - 'open_maybe_compressed_file' -] - - -def rm_suffix(s, suffix=None): - if suffix is None: - return s[:s.rfind('.')] - else: - return s[:s.rfind(suffix)] - - -def calculate_md5(fpath: str, chunk_size: int = 1024 * 1024): - md5 = hashlib.md5() - backend = get_file_backend(fpath, enable_singleton=True) - if isinstance(backend, LocalBackend): - # Enable chunk update for local file. - with open(fpath, 'rb') as f: - for chunk in iter(lambda: f.read(chunk_size), b''): - md5.update(chunk) - else: - md5.update(backend.get(fpath)) - return md5.hexdigest() - - -def check_md5(fpath, md5, **kwargs): - return md5 == calculate_md5(fpath, **kwargs) - - -def check_integrity(fpath, md5=None): - if not os.path.isfile(fpath): - return False - if md5 is None: - return True - return check_md5(fpath, md5) - - -def download_url_to_file(url, dst, hash_prefix=None, progress=True): - """Download object at the given URL to a local path. - - Modified from - https://pytorch.org/docs/stable/hub.html#torch.hub.download_url_to_file - - Args: - url (str): URL of the object to download - dst (str): Full path where object will be saved, - e.g. ``/tmp/temporary_file`` - hash_prefix (string, optional): If not None, the SHA256 downloaded - file should start with ``hash_prefix``. Defaults to None. - progress (bool): whether or not to display a progress bar to stderr. - Defaults to True - """ - file_size = None - req = urllib.request.Request(url) - u = urllib.request.urlopen(req) - meta = u.info() - if hasattr(meta, 'getheaders'): - content_length = meta.getheaders('Content-Length') - else: - content_length = meta.get_all('Content-Length') - if content_length is not None and len(content_length) > 0: - file_size = int(content_length[0]) - - # We deliberately save it in a temp file and move it after download is - # complete. This prevents a local file being overridden by a broken - # download. - dst = os.path.expanduser(dst) - dst_dir = os.path.dirname(dst) - f = tempfile.NamedTemporaryFile(delete=False, dir=dst_dir) - - import rich.progress - columns = [ - rich.progress.DownloadColumn(), - rich.progress.BarColumn(bar_width=None), - rich.progress.TimeRemainingColumn(), - ] - try: - if hash_prefix is not None: - sha256 = hashlib.sha256() - with rich.progress.Progress(*columns) as pbar: - task = pbar.add_task('download', total=file_size, visible=progress) - while True: - buffer = u.read(8192) - if len(buffer) == 0: - break - f.write(buffer) - if hash_prefix is not None: - sha256.update(buffer) - pbar.update(task, advance=len(buffer)) - - f.close() - if hash_prefix is not None: - digest = sha256.hexdigest() - if digest[:len(hash_prefix)] != hash_prefix: - raise RuntimeError( - 'invalid hash value (expected "{}", got "{}")'.format( - hash_prefix, digest)) - shutil.move(f.name, dst) - finally: - f.close() - if os.path.exists(f.name): - os.remove(f.name) - - -def download_url(url, root, filename=None, md5=None): - """Download a file from a url and place it in root. - - Args: - url (str): URL to download file from. - root (str): Directory to place downloaded file in. - filename (str | None): Name to save the file under. - If filename is None, use the basename of the URL. - md5 (str | None): MD5 checksum of the download. - If md5 is None, download without md5 check. - """ - root = os.path.expanduser(root) - if not filename: - filename = os.path.basename(url) - fpath = os.path.join(root, filename) - - os.makedirs(root, exist_ok=True) - - if check_integrity(fpath, md5): - print(f'Using downloaded and verified file: {fpath}') - else: - try: - print(f'Downloading {url} to {fpath}') - download_url_to_file(url, fpath) - except (urllib.error.URLError, IOError) as e: - if url[:5] == 'https': - url = url.replace('https:', 'http:') - print('Failed download. Trying https -> http instead.' - f' Downloading {url} to {fpath}') - download_url_to_file(url, fpath) - else: - raise e - # check integrity of downloaded file - if not check_integrity(fpath, md5): - raise RuntimeError('File not found or corrupted.') - - -def _is_tarxz(filename): - return filename.endswith('.tar.xz') - - -def _is_tar(filename): - return filename.endswith('.tar') - - -def _is_targz(filename): - return filename.endswith('.tar.gz') - - -def _is_tgz(filename): - return filename.endswith('.tgz') - - -def _is_gzip(filename): - return filename.endswith('.gz') and not filename.endswith('.tar.gz') - - -def _is_zip(filename): - return filename.endswith('.zip') - - -def extract_archive(from_path, to_path=None, remove_finished=False): - if to_path is None: - to_path = os.path.dirname(from_path) - - if _is_tar(from_path): - with tarfile.open(from_path, 'r') as tar: - tar.extractall(path=to_path) - elif _is_targz(from_path) or _is_tgz(from_path): - with tarfile.open(from_path, 'r:gz') as tar: - tar.extractall(path=to_path) - elif _is_tarxz(from_path): - with tarfile.open(from_path, 'r:xz') as tar: - tar.extractall(path=to_path) - elif _is_gzip(from_path): - to_path = os.path.join( - to_path, - os.path.splitext(os.path.basename(from_path))[0]) - with open(to_path, 'wb') as out_f, gzip.GzipFile(from_path) as zip_f: - out_f.write(zip_f.read()) - elif _is_zip(from_path): - with zipfile.ZipFile(from_path, 'r') as z: - z.extractall(to_path) - else: - raise ValueError(f'Extraction of {from_path} not supported') - - if remove_finished: - os.remove(from_path) - - -def download_and_extract_archive(url, - download_root, - extract_root=None, - filename=None, - md5=None, - remove_finished=False): - download_root = os.path.expanduser(download_root) - if extract_root is None: - extract_root = download_root - if not filename: - filename = os.path.basename(url) - - download_url(url, download_root, filename, md5) - - archive = os.path.join(download_root, filename) - print(f'Extracting {archive} to {extract_root}') - extract_archive(archive, extract_root, remove_finished) - - -def open_maybe_compressed_file(path: str): - """Return a file object that possibly decompresses 'path' on the fly. - - Decompression occurs when argument `path` is a string and ends with '.gz' - or '.xz'. - """ - if not isinstance(path, str): - return path - if path.endswith('.gz'): - import gzip - return gzip.open(path, 'rb') - if path.endswith('.xz'): - import lzma - return lzma.open(path, 'rb') - return open(path, 'rb') diff --git a/spaces/Lbin123/Lbingo/src/components/providers.tsx b/spaces/Lbin123/Lbingo/src/components/providers.tsx deleted file mode 100644 index 892226412d80fe0b05211911b9e245cd22876460..0000000000000000000000000000000000000000 --- a/spaces/Lbin123/Lbingo/src/components/providers.tsx +++ /dev/null @@ -1,15 +0,0 @@ -'use client' - -import * as React from 'react' -import { ThemeProvider as NextThemesProvider } from 'next-themes' -import { ThemeProviderProps } from 'next-themes/dist/types' - -import { TooltipProvider } from '@/components/ui/tooltip' - -export function Providers({ children, ...props }: ThemeProviderProps) { - return ( - - {children} - - ) -} diff --git a/spaces/Lianjd/stock_dashboard/backtrader/sizers/fixedsize.py b/spaces/Lianjd/stock_dashboard/backtrader/sizers/fixedsize.py deleted file mode 100644 index a5d47f6bc6e5fb789a90ff08bd00db6a2dcd75f2..0000000000000000000000000000000000000000 --- a/spaces/Lianjd/stock_dashboard/backtrader/sizers/fixedsize.py +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8; py-indent-offset:4 -*- -############################################################################### -# -# Copyright (C) 2015-2020 Daniel Rodriguez -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### -from __future__ import (absolute_import, division, print_function, - unicode_literals) - -import backtrader as bt - - -class FixedSize(bt.Sizer): - ''' - This sizer simply returns a fixed size for any operation. - Size can be controlled by number of tranches that a system - wishes to use to scale into trades by specifying the ``tranches`` - parameter. - - - Params: - - ``stake`` (default: ``1``) - - ``tranches`` (default: ``1``) - ''' - - params = (('stake', 1), - ('tranches', 1)) - - def _getsizing(self, comminfo, cash, data, isbuy): - if self.p.tranches > 1: - return abs(int(self.p.stake / self.p.tranches)) - else: - return self.p.stake - - def setsizing(self, stake): - if self.p.tranches > 1: - self.p.stake = abs(int(self.p.stake / self.p.tranches)) - else: - self.p.stake = stake # OLD METHOD FOR SAMPLE COMPATIBILITY - - -SizerFix = FixedSize - - -class FixedReverser(bt.Sizer): - '''This sizer returns the needes fixed size to reverse an open position or - the fixed size to open one - - - To open a position: return the param ``stake`` - - - To reverse a position: return 2 * ``stake`` - - Params: - - ``stake`` (default: ``1``) - ''' - params = (('stake', 1),) - - def _getsizing(self, comminfo, cash, data, isbuy): - position = self.strategy.getposition(data) - size = self.p.stake * (1 + (position.size != 0)) - return size - - -class FixedSizeTarget(bt.Sizer): - ''' - This sizer simply returns a fixed target size, useful when coupled - with Target Orders and specifically ``cerebro.target_order_size()``. - Size can be controlled by number of tranches that a system - wishes to use to scale into trades by specifying the ``tranches`` - parameter. - - - Params: - - ``stake`` (default: ``1``) - - ``tranches`` (default: ``1``) - ''' - - params = (('stake', 1), - ('tranches', 1)) - - def _getsizing(self, comminfo, cash, data, isbuy): - if self.p.tranches > 1: - size = abs(int(self.p.stake / self.p.tranches)) - return min((self.strategy.position.size + size), self.p.stake) - else: - return self.p.stake - - def setsizing(self, stake): - if self.p.tranches > 1: - size = abs(int(self.p.stake / self.p.tranches)) - self.p.stake = min((self.strategy.position.size + size), - self.p.stake) - else: - self.p.stake = stake # OLD METHOD FOR SAMPLE COMPATIBILITY diff --git a/spaces/LobsterQQQ/Nail-Set-Art/app.py b/spaces/LobsterQQQ/Nail-Set-Art/app.py deleted file mode 100644 index 5f7ff1495be492ba0546115770b84cad9f0e710a..0000000000000000000000000000000000000000 --- a/spaces/LobsterQQQ/Nail-Set-Art/app.py +++ /dev/null @@ -1,201 +0,0 @@ -from contextlib import nullcontext -import gradio as gr -import torch -from torch import autocast -from diffusers import StableDiffusionPipeline - - -device = "cuda" if torch.cuda.is_available() else "cpu" -context = autocast if device == "cuda" else nullcontext -dtype = torch.float16 if device == "cuda" else torch.float32 - -pipe = StableDiffusionPipeline.from_pretrained("ringhyacinth/nail-set-diffuser", torch_dtype=dtype) -pipe = pipe.to(device) - - -# Disable nsfw checker -disable_safety = True - -if disable_safety: - def null_safety(images, **kwargs): - return images, False - pipe.safety_checker = null_safety - - -def infer(prompt, n_samples, steps, scale): - - with context("cuda"): - images = pipe(n_samples*[prompt], guidance_scale=scale, num_inference_steps=steps).images - - return images - -css = """ - a { - color: inherit; - text-decoration: underline; - } - .gradio-container { - font-family: 'IBM Plex Sans', sans-serif; - } - .gr-button { - color: white; - border-color: #9d66e5; - background: #9d66e5; - } - input[type='range'] { - accent-color: #9d66e5; - } - .dark input[type='range'] { - accent-color: #dfdfdf; - } - .container { - max-width: 730px; - margin: auto; - padding-top: 1.5rem; - } - #gallery { - min-height: 22rem; - margin-bottom: 15px; - margin-left: auto; - margin-right: auto; - border-bottom-right-radius: .5rem !important; - border-bottom-left-radius: .5rem !important; - } - #gallery>div>.h-full { - min-height: 20rem; - } - .details:hover { - text-decoration: underline; - } - .gr-button { - white-space: nowrap; - } - .gr-button:focus { - border-color: rgb(147 197 253 / var(--tw-border-opacity)); - outline: none; - box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); - --tw-border-opacity: 1; - --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); - --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color); - --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity)); - --tw-ring-opacity: .5; - } - #advanced-options { - margin-bottom: 20px; - } - .footer { - margin-bottom: 45px; - margin-top: 35px; - text-align: center; - border-bottom: 1px solid #e5e5e5; - } - .footer>p { - font-size: .8rem; - display: inline-block; - padding: 0 10px; - transform: translateY(10px); - background: white; - } - .dark .logo{ filter: invert(1); } - .dark .footer { - border-color: #303030; - } - .dark .footer>p { - background: #0b0f19; - } - .acknowledgments h4{ - margin: 1.25em 0 .25em 0; - font-weight: bold; - font-size: 115%; - } -""" - -block = gr.Blocks(css=css) - -examples = [ - [ - 'Nail Set, hamilton nail, broadway musical theme nail.', - 2, - 7, - ], - [ - 'Nail Set, chinese new year nail, super detailed', - 2, - 7, - ], - [ - 'Nail Set, thanksgiving nail, super detailed', - 2, - 7, - ], -] - -with block: - gr.HTML( - """ -
-
- -

- Text to Nail set -

-
-

- Generate a new Nail Set from a text description. Use the token {Nail Set} in your prompts for the effect. -

-
- """ - ) - with gr.Group(): - with gr.Box(): - with gr.Row().style(mobile_collapse=False, equal_height=True): - text = gr.Textbox( - label="Enter your prompt", - show_label=False, - max_lines=1, - placeholder="Enter your prompt", - ).style( - border=(True, False, True, True), - rounded=(True, False, False, True), - container=False, - ) - btn = gr.Button("Generate image").style( - margin=False, - rounded=(False, True, True, False), - ) - - gallery = gr.Gallery( - label="Generated images", show_label=False, elem_id="gallery" - ).style(grid=[2], height="auto") - - - with gr.Row(elem_id="advanced-options"): - samples = gr.Slider(label="Images", minimum=1, maximum=4, value=2, step=1) - steps = gr.Slider(label="Steps", minimum=50, maximum=100, value=50, step=5) - scale = gr.Slider( - label="Guidance Scale", minimum=0, maximum=50, value=7.5, step=0.1 - ) - - - ex = gr.Examples(examples=examples, fn=infer, inputs=[text, samples, scale], outputs=gallery, cache_examples=False) - ex.dataset.headers = [""] - - - text.submit(infer, inputs=[text, samples, steps, scale], outputs=gallery) - btn.click(infer, inputs=[text, samples, steps, scale], outputs=gallery) - gr.HTML( - """ - -
-

Use the tokens {Nail Set} in your prompts for the effect. -

Put in a text prompt and generate your own nail set! -

Trained by Weekend and Hyacinth

-
- """ - ) - -block.launch() \ No newline at end of file diff --git a/spaces/MBZ/LoRA-DreamBooth-Training-UI/app_training.py b/spaces/MBZ/LoRA-DreamBooth-Training-UI/app_training.py deleted file mode 100644 index 09660a26b4d99f8ff8457a454fdddcc57d7f3756..0000000000000000000000000000000000000000 --- a/spaces/MBZ/LoRA-DreamBooth-Training-UI/app_training.py +++ /dev/null @@ -1,144 +0,0 @@ -#!/usr/bin/env python - -from __future__ import annotations - -import os - -import gradio as gr - -from constants import UploadTarget -from inference import InferencePipeline -from trainer import Trainer - - -def create_training_demo(trainer: Trainer, - pipe: InferencePipeline | None = None) -> gr.Blocks: - with gr.Blocks() as demo: - with gr.Row(): - with gr.Column(): - with gr.Box(): - gr.Markdown('Training Data') - instance_images = gr.Files(label='Instance images') - instance_prompt = gr.Textbox(label='Instance prompt', - max_lines=1) - gr.Markdown(''' - - Upload images of the style you are planning on training on. - - For an instance prompt, use a unique, made up word to avoid collisions. - ''') - with gr.Box(): - gr.Markdown('Output Model') - output_model_name = gr.Text(label='Name of your model', - max_lines=1) - delete_existing_model = gr.Checkbox( - label='Delete existing model of the same name', - value=False) - validation_prompt = gr.Text(label='Validation Prompt') - with gr.Box(): - gr.Markdown('Upload Settings') - with gr.Row(): - upload_to_hub = gr.Checkbox( - label='Upload model to Hub', value=True) - use_private_repo = gr.Checkbox(label='Private', - value=True) - delete_existing_repo = gr.Checkbox( - label='Delete existing repo of the same name', - value=False) - upload_to = gr.Radio( - label='Upload to', - choices=[_.value for _ in UploadTarget], - value=UploadTarget.LORA_LIBRARY.value) - gr.Markdown(''' - - By default, trained models will be uploaded to [LoRA Library](https://huggingface.co/lora-library) (see [this example model](https://huggingface.co/lora-library/lora-dreambooth-sample-dog)). - - You can also choose "Personal Profile", in which case, the model will be uploaded to https://huggingface.co/{your_username}/{model_name}. - ''') - - with gr.Box(): - gr.Markdown('Training Parameters') - with gr.Row(): - base_model = gr.Text( - label='Base Model', - value='stabilityai/stable-diffusion-2-1-base', - max_lines=1) - resolution = gr.Dropdown(choices=['512', '768'], - value='512', - label='Resolution') - num_training_steps = gr.Number( - label='Number of Training Steps', value=1000, precision=0) - learning_rate = gr.Number(label='Learning Rate', value=0.0001) - gradient_accumulation = gr.Number( - label='Number of Gradient Accumulation', - value=1, - precision=0) - seed = gr.Slider(label='Seed', - minimum=0, - maximum=100000, - step=1, - value=0) - fp16 = gr.Checkbox(label='FP16', value=True) - use_8bit_adam = gr.Checkbox(label='Use 8bit Adam', value=True) - checkpointing_steps = gr.Number(label='Checkpointing Steps', - value=100, - precision=0) - use_wandb = gr.Checkbox(label='Use W&B', - value=False, - interactive=bool( - os.getenv('WANDB_API_KEY'))) - validation_epochs = gr.Number(label='Validation Epochs', - value=100, - precision=0) - gr.Markdown(''' - - The base model must be a model that is compatible with [diffusers](https://github.com/huggingface/diffusers) library. - - It takes a few minutes to download the base model first. - - It will take about 8 minutes to train for 1000 steps with a T4 GPU. - - You may want to try a small number of steps first, like 1, to see if everything works fine in your environment. - - You can check the training status by pressing the "Open logs" button if you are running this on your Space. - - You need to set the environment variable `WANDB_API_KEY` if you'd like to use [W&B](https://wandb.ai/site). See [W&B documentation](https://docs.wandb.ai/guides/track/advanced/environment-variables). - - **Note:** Due to [this issue](https://github.com/huggingface/accelerate/issues/944), currently, training will not terminate properly if you use W&B. - ''') - - remove_gpu_after_training = gr.Checkbox( - label='Remove GPU after training', - value=False, - interactive=bool(os.getenv('SPACE_ID')), - visible=False) - run_button = gr.Button('Start Training') - - with gr.Box(): - gr.Markdown('Output message') - output_message = gr.Markdown() - - if pipe is not None: - run_button.click(fn=pipe.clear) - run_button.click(fn=trainer.run, - inputs=[ - instance_images, - instance_prompt, - output_model_name, - delete_existing_model, - validation_prompt, - base_model, - resolution, - num_training_steps, - learning_rate, - gradient_accumulation, - seed, - fp16, - use_8bit_adam, - checkpointing_steps, - use_wandb, - validation_epochs, - upload_to_hub, - use_private_repo, - delete_existing_repo, - upload_to, - remove_gpu_after_training, - ], - outputs=output_message) - return demo - - -if __name__ == '__main__': - hf_token = os.getenv('HF_TOKEN') - trainer = Trainer(hf_token) - demo = create_training_demo(trainer) - demo.queue(max_size=1).launch(share=False) diff --git a/spaces/MINAMONI/img-to-music/app.py b/spaces/MINAMONI/img-to-music/app.py deleted file mode 100644 index 30d094ce05b344d21f1c497c183a4ce7649ec164..0000000000000000000000000000000000000000 --- a/spaces/MINAMONI/img-to-music/app.py +++ /dev/null @@ -1,333 +0,0 @@ -import gradio as gr -import openai -import numpy as np -import time -import base64 -import ffmpeg -from sentence_transformers import SentenceTransformer -from audio2numpy import open_audio -import httpx -import json -import os -import requests -import urllib -import pydub -from os import path -from pydub import AudioSegment -import re - -MUBERT_LICENSE = os.environ.get('MUBERT_LICENSE') -MUBERT_TOKEN = os.environ.get('MUBERT_TOKEN') - -#img_to_text = gr.Blocks.load(name="spaces/pharma/CLIP-Interrogator") -img_to_text = gr.Blocks.load(name="spaces/fffiloni/CLIP-Interrogator-2") - -from share_btn import community_icon_html, loading_icon_html, share_js -from utils import get_tags_for_prompts, get_mubert_tags_embeddings - -minilm = SentenceTransformer('all-MiniLM-L6-v2') -mubert_tags_embeddings = get_mubert_tags_embeddings(minilm) - -##———————————————————————————————————— - -MUBERT_LICENSE = os.environ.get('MUBERT_LICENSE') -MUBERT_TOKEN = os.environ.get('MUBERT_TOKEN') - -##———————————————————————————————————— -def get_pat_token(): - r = httpx.post('https://api-b2b.mubert.com/v2/GetServiceAccess', - json={ - "method": "GetServiceAccess", - "params": { - "email":"mail@mail.com", - "phone":"+11234567890", - "license": MUBERT_LICENSE, - "token": MUBERT_TOKEN, - - } - }) - - rdata = json.loads(r.text) - assert rdata['status'] == 1, "probably incorrect e-mail" - pat = rdata['data']['pat'] - #print(f"pat: {pat}") - return pat - -def get_music(pat, prompt, track_duration, gen_intensity, gen_mode): - - if len(prompt) > 200: - prompt = prompt[:200] - - r = httpx.post('https://api-b2b.mubert.com/v2/TTMRecordTrack', - json={ - "method": "TTMRecordTrack", - "params": - { - "text": prompt, - "pat": pat, - "mode":gen_mode, - "duration":track_duration, - "intensity": gen_intensity, - "format": "wav" - } - }) - - rdata = json.loads(r.text) - - #print(f"rdata: {rdata}") - assert rdata['status'] == 1, rdata['error']['text'] - track = rdata['data']['tasks'][0]['download_link'] - print(track) - - local_file_path = "sample.wav" - - # Download the MP3 file from the URL - headers = { - 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7; rv:93.0) Gecko/20100101 Firefox/93.0'} - - retries = 3 - delay = 5 # in seconds - while retries > 0: - response = requests.get(track, headers=headers) - if response.status_code == 200: - break - retries -= 1 - time.sleep(delay) - response = requests.get(track, headers=headers) - print(f"{response}") - # Save the downloaded content to a local file - with open(local_file_path, 'wb') as f: - f.write(response.content) - return "sample.wav", track - - -def get_results(text_prompt,track_duration,gen_intensity,gen_mode): - pat_token = get_pat_token() - music = get_music(pat_token, text_prompt, track_duration, gen_intensity, gen_mode) - return pat_token, music[0], music[1] - -def get_prompts(uploaded_image, track_duration, gen_intensity, gen_mode, openai_api_key): - print("calling clip interrogator") - #prompt = img_to_text(uploaded_image, "ViT-L (best for Stable Diffusion 1.*)", "fast", fn_index=1)[0] - - prompt = img_to_text(uploaded_image, 'best', 4, fn_index=1)[0] - print(prompt) - clean_prompt = clean_text(prompt) - print(f"prompt cleaned: {clean_prompt}") - musical_prompt = 'You did not use any OpenAI API key to pimp your result :)' - if openai_api_key is not None: - gpt_adaptation = try_api(prompt, openai_api_key) - if gpt_adaptation[0] != "oups": - musical_prompt = gpt_adaptation[0] - print(f"musical adapt: {musical_prompt}") - music_result = get_results(musical_prompt, track_duration, gen_intensity, gen_mode) - else: - music_result = get_results(clean_prompt, track_duration, gen_intensity, gen_mode) - else: - music_result = get_results(clean_prompt, track_duration, gen_intensity, gen_mode) - - show_prompts = f""" - CLIP Interrogator Caption: '{prompt}' - — - OpenAI Musical Adaptation: '{musical_prompt}' - — - Audio file link: {music_result[2]} - """ - #wave_file = convert_mp3_to_wav(music_result[1]) - - time.sleep(1) - return gr.Textbox.update(value=show_prompts, visible=True), music_result[1], gr.update(visible=True), gr.update(visible=True), gr.update(visible=True) - -def try_api(message, openai_api_key): - - try: - response = call_api(message, openai_api_key) - return response, "no error" - except openai.error.Timeout as e: - #Handle timeout error, e.g. retry or log - #print(f"OpenAI API request timed out: {e}") - return "oups", f"OpenAI API request timed out:
{e}
" - except openai.error.APIError as e: - #Handle API error, e.g. retry or log - #print(f"OpenAI API returned an API Error: {e}") - return "oups", f"OpenAI API returned an API Error:
{e}
" - except openai.error.APIConnectionError as e: - #Handle connection error, e.g. check network or log - #print(f"OpenAI API request failed to connect: {e}") - return "oups", f"OpenAI API request failed to connect:
{e}
" - except openai.error.InvalidRequestError as e: - #Handle invalid request error, e.g. validate parameters or log - #print(f"OpenAI API request was invalid: {e}") - return "oups", f"OpenAI API request was invalid:
{e}
" - except openai.error.AuthenticationError as e: - #Handle authentication error, e.g. check credentials or log - #print(f"OpenAI API request was not authorized: {e}") - return "oups", f"OpenAI API request was not authorized:
{e}
" - except openai.error.PermissionError as e: - #Handle permission error, e.g. check scope or log - #print(f"OpenAI API request was not permitted: {e}") - return "oups", f"OpenAI API request was not permitted:
{e}
" - except openai.error.RateLimitError as e: - #Handle rate limit error, e.g. wait or log - #print(f"OpenAI API request exceeded rate limit: {e}") - return "oups", f"OpenAI API request exceeded rate limit:
{e}
" - -def call_api(message, openai_api_key): - - instruction = "Convert in less than 200 characters this image caption to a very concise musical description with musical terms, as if you wanted to describe a musical ambiance, stricly in English" - - print("starting open ai") - augmented_prompt = f"{instruction}: '{message}'." - openai.api_key = openai_api_key - - response = openai.Completion.create( - model="text-davinci-003", - prompt=augmented_prompt, - temperature=0.5, - max_tokens=2048, - top_p=1, - frequency_penalty=0, - presence_penalty=0.6 - ) - - #print(response) - - #return str(response.choices[0].text).split("\n",2)[2] - return str(response.choices[0].text).lstrip('\n') - - -def get_track_by_tags(tags, pat, duration, gen_intensity, gen_mode, maxit=20): - - r = httpx.post('https://api-b2b.mubert.com/v2/RecordTrackTTM', - json={ - "method": "RecordTrackTTM", - "params": { - "pat": pat, - "duration": duration, - "format": "wav", - "intensity":gen_intensity, - "tags": tags, - "mode": gen_mode - } - }) - - rdata = json.loads(r.text) - print(rdata) - #assert rdata['status'] == 1, rdata['error']['text'] - trackurl = rdata['data']['tasks'][0] - - print('Generating track ', end='') - for i in range(maxit): - r = httpx.get(trackurl) - if r.status_code == 200: - return trackurl - time.sleep(1) - - -def generate_track_by_prompt(pat, prompt, duration, gen_intensity, gen_mode): - try: - _, tags = get_tags_for_prompts(minilm, mubert_tags_embeddings, prompt)[0] - result = get_track_by_tags(tags, pat, int(duration), gen_intensity, gen_mode) - print(result) - return result, ",".join(tags), "Success" - except Exception as e: - return None, "", str(e) - -def convert_mp3_to_wav(mp3_filepath): - - wave_file="file.wav" - - sound = AudioSegment.from_mp3(mp3_filepath) - sound.export(wave_file, format="wav") - - return wave_file - -def remove_emoji(text): - emoji_pattern = re.compile("[" - u"\U0001F600-\U0001F64F" # emoticons - u"\U0001F300-\U0001F5FF" # symbols & pictographs - u"\U0001F680-\U0001F6FF" # transport & map symbols - u"\U0001F1E0-\U0001F1FF" # flags (iOS) - "]+", flags=re.UNICODE) - return emoji_pattern.sub(r'', text) - -def remove_nonalphanumeric(text): - return re.sub(r'[^a-zA-Z0-9\s]', '', text) - -def clean_text(text): - clean_text = remove_nonalphanumeric(text) - clean_text = remove_emoji(clean_text) - clean_text = re.sub(r'\d+', '', clean_text) # Remove any number - return clean_text - -article = """ - - - -
-

You may also like:

-
- - - - - -
-
- - -""" - -with gr.Blocks(css="style.css") as demo: - with gr.Column(elem_id="col-container"): - - gr.HTML("""
-
-

- Image to Music -

-
-

- Sends an image in to CLIP Interrogator - to generate a text prompt which is then run through - Mubert text-to-music to generate music from the input image! -

-
""") - - input_img = gr.Image(type="filepath", elem_id="input-img") - prompts_out = gr.Textbox(label="Text Captions", visible=False, elem_id="prompts_out", info="If player do not work, try to copy/paste the link in a new browser window") - music_output = gr.Audio(label="Result", type="filepath", elem_id="music-output").style(height="5rem") - #music_url = gr.Textbox(max_lines=1, info="If player do not work, try to copy/paste the link in a new browser window") - #text_status = gr.Textbox(label="status") - with gr.Group(elem_id="share-btn-container"): - community_icon = gr.HTML(community_icon_html, visible=False) - loading_icon = gr.HTML(loading_icon_html, visible=False) - share_button = gr.Button("Share to community", elem_id="share-btn", visible=False) - - with gr.Accordion(label="Music Generation Options", open=False): - openai_api_key = gr.Textbox(type="password", label="🔐 Your OpenAI API Key (optional)", placeholder="sk-123abc...", info="You can use your OpenAI key to adapt CLIP Interrogator caption to a musical translation.") - track_duration = gr.Slider(minimum=20, maximum=120, value=55, ustep=5, label="Track duration", elem_id="duration-inp") - with gr.Row(): - gen_intensity = gr.Dropdown(choices=["low", "medium", "high"], value="medium", label="Intensity") - gen_mode = gr.Radio(label="mode", choices=["track", "loop"], value="loop") - - generate = gr.Button("Generate Music from Image") - - gr.HTML(article) - - generate.click(get_prompts, inputs=[input_img,track_duration,gen_intensity,gen_mode, openai_api_key], outputs=[prompts_out, music_output, share_button, community_icon, loading_icon], api_name="i2m") - share_button.click(None, [], [], _js=share_js) - -demo.queue(max_size=32).launch() \ No newline at end of file diff --git a/spaces/MZhaovo/Llama_Difu/presets.py b/spaces/MZhaovo/Llama_Difu/presets.py deleted file mode 100644 index 509a451837f964f0179efbce38b6f71268e84a91..0000000000000000000000000000000000000000 --- a/spaces/MZhaovo/Llama_Difu/presets.py +++ /dev/null @@ -1,58 +0,0 @@ -prompt_tmpl_list = ["Default", "MZhao Mode"] - -prompt_tmpl_dict = { - "Default": -"""\ -Context information is below: - -{context_str} - -Given the above context, please answer the following question. -Answer in bullet points. - -Question: {query_str} -Answer in the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch. -""", - - "MZhao Mode": -"""\ -Context information: - -{context_str} - -Instructions: -Mzhao is the chat mode of the Difu Chat Engine. -Mzhao identifies as “Difu” not an assistant. -Mzhao introduces itself with “this is Difu only at the beginning of the conversation. -Mzhao does not disclose the internal alias “Mzhao.” -Mzhao can understand and communicate fluently in the user’s language of choice, such as English, 中文, 日本語, Español, Français, or Deutsch. -Mzhao’s responses should be informative, visual, logical, and actionable. -Mzhao’s responses should also be positive, interesting, entertaining, and engaging. -Mzhao’s responses should avoid being vague, controversial, or off-topic. -Mzhao’s logic and reasoning should be rigorous, intelligent, and defensible. -Mzhao can provide additional relevant details to respond thoroughly and comprehensively to cover multiple aspects in depth. -Mzhao can generate poems, stories, code, essays, songs, celebrity parodies, and more. -Mzhao can generate a query to look up for helpful products or services advertisements after responding. -Mzhao must always generate short suggestions for the next user turn after responding. -Mzhao should always generate short suggestions for the next user turn that are relevant to the conversation and not offensive. -Mzhao can leverage past responses and web results for generating relevant and interesting suggestions for the next user turn. -Mzhao does not generate generic suggestions for the next user turn, such as “thank you.” -Mzhao does not generate suggestions for the next user turn to carry out tasks such as “booking flight ticket” or “send an email to” that Mzhao cannot perform. -Mzhao should always perform context infomation look ups when the user is seeking information or whenever context infomation could be potentially helpful, regardless of Mzhao’s internal knowledge or information. -Mzhao can and should perform up to three look ups in a single conversation turn. Mzhao should never look up the same query more than once. -Mzhao can only issue numerical references to the URLs. Mzhao should never generate URLs or links apart from the ones provided in context infomation. -Mzhao always references factual statements to the context infomation. -Context infomation may be incomplete or irrelevant. Mzhao doesn’t make assumptions on the context infomation beyond strictly what’s returned. -If the context infomation do not contain sufficient information to answer the user message completely, Mzhao uses only facts from the context infomation and does not add any information by itself. -Mzhao can leverage information from multiple context infomation to respond comprehensively. -If the user message consists of keywords instead of chat messages, Mzhao treats it as a look up query. -Context infomation may expire over time. Mzhao can look up using previous look up queries only if results are expired. -Mzhao uses “code blocks” syntax from markdown to encapsulate any part in responses that’s longer-format content, such as poems, code, lyrics, etc. (except tables). -Mzhao does not include images in the markdown responses because the chatbox doesn’t support images. - -Query: {query_str} - -Reply in the same language as the query, such as English, 中文, 日本語, Español, Français, or Deutsch -""" - -} diff --git a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/makeaprotagonist/models/unet.py b/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/makeaprotagonist/models/unet.py deleted file mode 100644 index 95fb6bef9019953e34891b3f46f9968c92d19748..0000000000000000000000000000000000000000 --- a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/makeaprotagonist/models/unet.py +++ /dev/null @@ -1,660 +0,0 @@ -# Adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_condition.py - -from dataclasses import dataclass -from typing import List, Optional, Tuple, Union, Callable - -import os -import json - -import torch -import torch.nn as nn -import torch.utils.checkpoint - -from diffusers.configuration_utils import ConfigMixin, register_to_config -from diffusers import ModelMixin -from diffusers.utils import BaseOutput, logging -from diffusers.models.embeddings import TimestepEmbedding, Timesteps, GaussianFourierProjection -from .unet_blocks import ( - CrossAttnDownBlock3D, - CrossAttnUpBlock3D, - DownBlock3D, - UNetMidBlock3DCrossAttn, - UpBlock3D, - get_down_block, - get_up_block, -) -from .resnet import InflatedConv3d - -import ipdb - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - - -@dataclass -class UNet3DConditionOutput(BaseOutput): - sample: torch.FloatTensor - - -class UNet3DConditionModel(ModelMixin, ConfigMixin): - _supports_gradient_checkpointing = True - - @register_to_config - def __init__( - self, - sample_size: Optional[int] = None, - in_channels: int = 4, - out_channels: int = 4, - center_input_sample: bool = False, - flip_sin_to_cos: bool = True, - freq_shift: int = 0, - down_block_types: Tuple[str] = ( - "CrossAttnDownBlock3D", - "CrossAttnDownBlock3D", - "CrossAttnDownBlock3D", - "DownBlock3D", - ), - mid_block_type: str = "UNetMidBlock3DCrossAttn", - up_block_types: Tuple[str] = ( - "UpBlock3D", - "CrossAttnUpBlock3D", - "CrossAttnUpBlock3D", - "CrossAttnUpBlock3D" - ), - only_cross_attention: Union[bool, Tuple[bool]] = False, - block_out_channels: Tuple[int] = (320, 640, 1280, 1280), - layers_per_block: int = 2, - downsample_padding: int = 1, - mid_block_scale_factor: float = 1, - act_fn: str = "silu", - norm_num_groups: int = 32, - norm_eps: float = 1e-5, - cross_attention_dim: int = 1280, - attention_head_dim: Union[int, Tuple[int]] = 8, - dual_cross_attention: bool = False, - use_linear_projection: bool = False, - class_embed_type: Optional[str] = None, - num_class_embeds: Optional[int] = None, - upcast_attention: bool = False, - resnet_time_scale_shift: str = "default", - - time_embedding_type: str = "positional", - timestep_post_act: Optional[str] = None, - time_cond_proj_dim: Optional[int] = None, - conv_in_kernel: int = 3, - conv_out_kernel: int = 3, - projection_class_embeddings_input_dim: Optional[int] = None, - class_embeddings_concat: bool = False, - - temporal_conv=False, - ): - super().__init__() - - self.sample_size = sample_size - time_embed_dim = block_out_channels[0] * 4 - - # input - self.in_channels = in_channels - conv_in_padding = (conv_in_kernel - 1) // 2 - self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding) - - # time - if time_embedding_type == "fourier": - time_embed_dim = block_out_channels[0] * 2 - if time_embed_dim % 2 != 0: - raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.") - self.time_proj = GaussianFourierProjection( - time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos - ) - timestep_input_dim = time_embed_dim - elif time_embedding_type == "positional": - time_embed_dim = block_out_channels[0] * 4 - - self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) - timestep_input_dim = block_out_channels[0] - else: - raise ValueError( - f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`." - ) - - self.time_embedding = TimestepEmbedding( - timestep_input_dim, - time_embed_dim, - act_fn=act_fn, - post_act_fn=timestep_post_act, - cond_proj_dim=time_cond_proj_dim, - ) - - # class embedding - if class_embed_type is None and num_class_embeds is not None: - self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) - elif class_embed_type == "timestep": - self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) - elif class_embed_type == "identity": - self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) - elif class_embed_type == "projection": - if projection_class_embeddings_input_dim is None: - raise ValueError( - "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" - ) - # The projection `class_embed_type` is the same as the timestep `class_embed_type` except - # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings - # 2. it projects from an arbitrary input dimension. - # - # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations. - # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings. - # As a result, `TimestepEmbedding` can be passed arbitrary vectors. - self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) - elif class_embed_type == "simple_projection": - if projection_class_embeddings_input_dim is None: - raise ValueError( - "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set" - ) - self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim) - else: - self.class_embedding = None - - self.down_blocks = nn.ModuleList([]) - self.mid_block = None - self.up_blocks = nn.ModuleList([]) - - - if isinstance(only_cross_attention, bool): - only_cross_attention = [only_cross_attention] * len(down_block_types) - - if isinstance(attention_head_dim, int): - attention_head_dim = (attention_head_dim,) * len(down_block_types) - - if isinstance(cross_attention_dim, int): - cross_attention_dim = (cross_attention_dim,) * len(down_block_types) - - if class_embeddings_concat: - # The time embeddings are concatenated with the class embeddings. The dimension of the - # time embeddings passed to the down, middle, and up blocks is twice the dimension of the - # regular time embeddings - blocks_time_embed_dim = time_embed_dim * 2 - else: - blocks_time_embed_dim = time_embed_dim - - # down - output_channel = block_out_channels[0] - for i, down_block_type in enumerate(down_block_types): - input_channel = output_channel - output_channel = block_out_channels[i] - is_final_block = i == len(block_out_channels) - 1 - - down_block = get_down_block( - down_block_type, - num_layers=layers_per_block, - in_channels=input_channel, - out_channels=output_channel, - temb_channels=blocks_time_embed_dim, - add_downsample=not is_final_block, - resnet_eps=norm_eps, - resnet_act_fn=act_fn, - resnet_groups=norm_num_groups, - cross_attention_dim=cross_attention_dim[i], - attn_num_head_channels=attention_head_dim[i], - downsample_padding=downsample_padding, - dual_cross_attention=dual_cross_attention, - use_linear_projection=use_linear_projection, - only_cross_attention=only_cross_attention[i], - upcast_attention=upcast_attention, - resnet_time_scale_shift=resnet_time_scale_shift, - temporal_conv=temporal_conv - ) - self.down_blocks.append(down_block) - - # mid - if mid_block_type == "UNetMidBlock3DCrossAttn": - self.mid_block = UNetMidBlock3DCrossAttn( - in_channels=block_out_channels[-1], - temb_channels=blocks_time_embed_dim, - resnet_eps=norm_eps, - resnet_act_fn=act_fn, - output_scale_factor=mid_block_scale_factor, - resnet_time_scale_shift=resnet_time_scale_shift, - cross_attention_dim=cross_attention_dim[-1], - attn_num_head_channels=attention_head_dim[-1], - resnet_groups=norm_num_groups, - dual_cross_attention=dual_cross_attention, - use_linear_projection=use_linear_projection, - upcast_attention=upcast_attention, - temporal_conv=temporal_conv - ) - else: - raise ValueError(f"unknown mid_block_type : {mid_block_type}") - - # count how many layers upsample the videos - self.num_upsamplers = 0 - - # up - reversed_block_out_channels = list(reversed(block_out_channels)) - reversed_attention_head_dim = list(reversed(attention_head_dim)) - reversed_cross_attention_dim = list(reversed(cross_attention_dim)) - only_cross_attention = list(reversed(only_cross_attention)) - output_channel = reversed_block_out_channels[0] - for i, up_block_type in enumerate(up_block_types): - is_final_block = i == len(block_out_channels) - 1 - - prev_output_channel = output_channel - output_channel = reversed_block_out_channels[i] - input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] - - # add upsample block for all BUT final layer - if not is_final_block: - add_upsample = True - self.num_upsamplers += 1 - else: - add_upsample = False - - up_block = get_up_block( - up_block_type, - num_layers=layers_per_block + 1, - in_channels=input_channel, - out_channels=output_channel, - prev_output_channel=prev_output_channel, - temb_channels=blocks_time_embed_dim, - add_upsample=add_upsample, - resnet_eps=norm_eps, - resnet_act_fn=act_fn, - resnet_groups=norm_num_groups, - cross_attention_dim=reversed_cross_attention_dim[i], - attn_num_head_channels=reversed_attention_head_dim[i], - dual_cross_attention=dual_cross_attention, - use_linear_projection=use_linear_projection, - only_cross_attention=only_cross_attention[i], - upcast_attention=upcast_attention, - resnet_time_scale_shift=resnet_time_scale_shift, - temporal_conv=temporal_conv - ) - self.up_blocks.append(up_block) - prev_output_channel = output_channel - - # out - if norm_num_groups is not None: - self.conv_norm_out = nn.GroupNorm( - num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps - ) - self.conv_act = nn.SiLU() - else: - self.conv_norm_out = None - self.conv_act = None - - conv_out_padding = (conv_out_kernel - 1) // 2 - self.conv_out = InflatedConv3d( - block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding - ) - - def set_attention_slice(self, slice_size): - r""" - Enable sliced attention computation. - When this option is enabled, the attention module will split the input tensor in slices, to compute attention - in several steps. This is useful to save some memory in exchange for a small speed decrease. - Args: - slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): - When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If - `"max"`, maximum amount of memory will be saved by running only one slice at a time. If a number is - provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` - must be a multiple of `slice_size`. - """ - sliceable_head_dims = [] - - def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): - if hasattr(module, "set_attention_slice"): - sliceable_head_dims.append(module.sliceable_head_dim) - - for child in module.children(): - fn_recursive_retrieve_sliceable_dims(child) - - # retrieve number of attention layers - for module in self.children(): - fn_recursive_retrieve_sliceable_dims(module) - - num_sliceable_layers = len(sliceable_head_dims) - - if slice_size == "auto": - # half the attention head size is usually a good trade-off between - # speed and memory - slice_size = [dim // 2 for dim in sliceable_head_dims] - elif slice_size == "max": - # make smallest slice possible - slice_size = num_sliceable_layers * [1] - - slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size - - if len(slice_size) != len(sliceable_head_dims): - raise ValueError( - f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" - f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." - ) - - for i in range(len(slice_size)): - size = slice_size[i] - dim = sliceable_head_dims[i] - if size is not None and size > dim: - raise ValueError(f"size {size} has to be smaller or equal to {dim}.") - - # Recursively walk through all the children. - # Any children which exposes the set_attention_slice method - # gets the message - def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): - if hasattr(module, "set_attention_slice"): - module.set_attention_slice(slice_size.pop()) - - for child in module.children(): - fn_recursive_set_attention_slice(child, slice_size) - - reversed_slice_size = list(reversed(slice_size)) - for module in self.children(): - fn_recursive_set_attention_slice(module, reversed_slice_size) - - def _set_gradient_checkpointing(self, module, value=False): - if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)): - module.gradient_checkpointing = value - - def set_use_memory_efficient_attention_xformers( - self, valid: bool, attention_op: Optional[Callable] = None - ) -> None: - # Recursively walk through all the children. - # Any children which exposes the set_use_memory_efficient_attention_xformers method - # gets the message - def fn_recursive_set_mem_eff(module: torch.nn.Module): - if hasattr(module, "set_use_memory_efficient_attention_xformers"): - try: - module.set_use_memory_efficient_attention_xformers(valid, attention_op) - except: - ipdb.set_trace() - - for child in module.children(): - fn_recursive_set_mem_eff(child) - - for module in self.children(): - if isinstance(module, torch.nn.Module): - fn_recursive_set_mem_eff(module) - - def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None): - r""" - Enable memory efficient attention as implemented in xformers. - When this option is enabled, you should observe lower GPU memory usage and a potential speed up at inference - time. Speed up at training time is not guaranteed. - Warning: When Memory Efficient Attention and Sliced attention are both enabled, the Memory Efficient Attention - is used. - Parameters: - attention_op (`Callable`, *optional*): - Override the default `None` operator for use as `op` argument to the - [`memory_efficient_attention()`](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.memory_efficient_attention) - function of xFormers. - Examples: - ```py - >>> import torch - >>> from diffusers import UNet2DConditionModel - >>> from xformers.ops import MemoryEfficientAttentionFlashAttentionOp - >>> model = UNet2DConditionModel.from_pretrained( - ... "stabilityai/stable-diffusion-2-1", subfolder="unet", torch_dtype=torch.float16 - ... ) - >>> model = model.to("cuda") - >>> model.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp) - ``` - """ - self.set_use_memory_efficient_attention_xformers(True, attention_op) - - def disable_xformers_memory_efficient_attention(self): - r""" - Disable memory efficient attention as implemented in xformers. - """ - self.set_use_memory_efficient_attention_xformers(False) - - def forward( - self, - sample: torch.FloatTensor, - timestep: Union[torch.Tensor, float, int], - encoder_hidden_states: torch.Tensor, - adapter_features: Optional[torch.Tensor] = None, - class_labels: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - return_dict: bool = True, - down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None, - mid_block_additional_residual: Optional[torch.Tensor] = None, - ## mask args - class_labels_aux: Optional[torch.Tensor] = None, - masks: Optional[torch.Tensor] = None, - - ) -> Union[UNet3DConditionOutput, Tuple]: - r""" - Args: - sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor - timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps - encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple. - - Returns: - [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`: - [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When - returning a tuple, the first element is the sample tensor. - """ - # By default samples have to be AT least a multiple of the overall upsampling factor. - # The overall upsampling factor is equal to 2 ** (# num of upsampling layears). - # However, the upsampling interpolation output size can be forced to fit any upsampling size - # on the fly if necessary. - default_overall_up_factor = 2**self.num_upsamplers - if adapter_features is None: - adapter_features = [None] * len(self.down_blocks) - # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor` - forward_upsample_size = False - upsample_size = None - - if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]): - logger.info("Forward upsample size to force interpolation output size.") - forward_upsample_size = True - - # prepare attention_mask - if attention_mask is not None: - attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 - attention_mask = attention_mask.unsqueeze(1) - - # center input if necessary - if self.config.center_input_sample: - sample = 2 * sample - 1.0 - - # time - timesteps = timestep - if not torch.is_tensor(timesteps): - # This would be a good case for the `match` statement (Python 3.10+) - is_mps = sample.device.type == "mps" - if isinstance(timestep, float): - dtype = torch.float32 if is_mps else torch.float64 - else: - dtype = torch.int32 if is_mps else torch.int64 - timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) - elif len(timesteps.shape) == 0: - timesteps = timesteps[None].to(sample.device) - - # broadcast to batch dimension in a way that's compatible with ONNX/Core ML - timesteps = timesteps.expand(sample.shape[0]) - - t_emb = self.time_proj(timesteps) - - # timesteps does not contain any weights and will always return f32 tensors - # but time_embedding might actually be running in fp16. so we need to cast here. - # there might be better ways to encapsulate this. - t_emb = t_emb.to(dtype=self.dtype) - t_emb = self.time_embedding(t_emb) - - # ipdb.set_trace() - if self.class_embedding is not None: - if class_labels is None: - raise ValueError("class_labels should be provided when num_class_embeds > 0") - - if self.config.class_embed_type == "timestep": - class_labels = self.time_proj(class_labels) - - ## NOTE use torch.tensor(0) indicate not use image embedding - if class_labels.dim() == 0: - class_emb = 0 - else: - class_emb = self.class_embedding(class_labels).to(dtype=self.dtype) # F,C - # class_emb = 0 ## NOTE this is for debugging, trying not use image embedding - - # ipdb.set_trace() - if self.config.class_embeddings_concat: ## false - emb = torch.cat([t_emb, class_emb], dim=-1) - else: - if t_emb.size(0) == class_emb.size(0): - emb = t_emb + class_emb - elif t_emb.size(0) * sample.size(2) == class_emb.size(0): - # e_emb 2,C / class emb: 2F,C - class_emb = class_emb.reshape(t_emb.size(0), sample.size(2), class_emb.size(-1)) - emb = t_emb[:, None] + class_emb - emb = emb.reshape(-1, emb.size(-1)) - else: - emb = t_emb.repeat(2,1) + class_emb - - ## NOTE aux embedding - emb_aux = None - if self.class_embedding is not None and class_labels_aux is not None: - # ipdb.set_trace() - if class_labels_aux is not None and masks is None: - raise ValueError("masks should be provided when class_labels_aux is given") - - if self.config.class_embed_type == "timestep": - class_labels_aux = self.time_proj(class_labels_aux) - - class_emb_aux = self.class_embedding(class_labels_aux).to(dtype=self.dtype) - - if self.config.class_embeddings_concat: ## false - emb_aux = torch.cat([t_emb, class_emb_aux], dim=-1) - else: - # emb_aux = t_emb + class_emb_aux - if t_emb.size(0) == class_emb_aux.size(0): - emb_aux = t_emb + class_emb_aux - else: - # e_emb 2,C / class emb: 2F,C - class_emb_aux = class_emb_aux.reshape(t_emb.size(0), sample.size(2), class_emb_aux.size(-1)) - emb_aux = t_emb[:, None] + class_emb_aux - emb_aux = emb_aux.reshape(-1, emb_aux.size(-1)) - - - # pre-process - sample = self.conv_in(sample) - - # down - down_block_res_samples = (sample,) - for down_id, downsample_block in enumerate(self.down_blocks): - if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: - - # ipdb.set_trace() - sample, res_samples = downsample_block( - hidden_states=sample, - temb=emb, - temb_aux=emb_aux, - masks=masks, - encoder_hidden_states=encoder_hidden_states, - attention_mask=attention_mask, - adapter_feature=adapter_features[down_id] - ) - else: - sample, res_samples = downsample_block(hidden_states=sample, temb=emb, temb_aux=emb_aux, masks=masks,adapter_feature=adapter_features[down_id]) - - down_block_res_samples += res_samples - - if down_block_additional_residuals is not None: ## seems to fit controlnet - new_down_block_res_samples = () - - for down_block_res_sample, down_block_additional_residual in zip( - down_block_res_samples, down_block_additional_residuals - ): - down_block_res_sample = down_block_res_sample + down_block_additional_residual - new_down_block_res_samples += (down_block_res_sample,) - - down_block_res_samples = new_down_block_res_samples - - # mid - sample = self.mid_block( - sample, emb, temb_aux=emb_aux, masks=masks, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask - ) - if mid_block_additional_residual is not None: - sample = sample + mid_block_additional_residual - - # up - for i, upsample_block in enumerate(self.up_blocks): - is_final_block = i == len(self.up_blocks) - 1 - - res_samples = down_block_res_samples[-len(upsample_block.resnets) :] - down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] - - # if we have not reached the final block and need to forward the - # upsample size, we do it here - if not is_final_block and forward_upsample_size: - upsample_size = down_block_res_samples[-1].shape[2:] - - if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention: - sample = upsample_block( - hidden_states=sample, - temb=emb, - temb_aux=emb_aux, - masks=masks, - res_hidden_states_tuple=res_samples, - encoder_hidden_states=encoder_hidden_states, - upsample_size=upsample_size, - attention_mask=attention_mask, - ) - else: - sample = upsample_block( - hidden_states=sample, temb=emb, temb_aux=emb_aux, masks=masks, res_hidden_states_tuple=res_samples, upsample_size=upsample_size - ) - # post-process - if self.conv_norm_out: - sample = self.conv_norm_out(sample) - sample = self.conv_act(sample) - sample = self.conv_out(sample) - - if not return_dict: - return (sample,) - - return UNet3DConditionOutput(sample=sample) - - @classmethod - def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, use_temporal_conv=False): - if subfolder is not None: - pretrained_model_path = os.path.join(pretrained_model_path, subfolder) - - config_file = os.path.join(pretrained_model_path, 'config.json') - if not os.path.isfile(config_file): - raise RuntimeError(f"{config_file} does not exist") - with open(config_file, "r") as f: - config = json.load(f) - config["_class_name"] = cls.__name__ - config["down_block_types"] = [ - "CrossAttnDownBlock3D", - "CrossAttnDownBlock3D", - "CrossAttnDownBlock3D", - "DownBlock3D" - ] - - config["mid_block_type"] = "UNetMidBlock3DCrossAttn" - - config["up_block_types"] = [ - "UpBlock3D", - "CrossAttnUpBlock3D", - "CrossAttnUpBlock3D", - "CrossAttnUpBlock3D" - ] - - config["temporal_conv"] = use_temporal_conv - - from diffusers.utils import WEIGHTS_NAME - model = cls.from_config(config) - model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME) - if not os.path.isfile(model_file): - raise RuntimeError(f"{model_file} does not exist") - state_dict = torch.load(model_file, map_location="cpu") - for k, v in model.state_dict().items(): - if '_temp.' in k: - state_dict.update({k: v}) - if 'temp_conv' in k: - state_dict.update({k: v}) - - model.load_state_dict(state_dict) - - return model \ No newline at end of file diff --git a/spaces/Makiing/coolb-in-gtest/src/lib/hooks/use-bing.ts b/spaces/Makiing/coolb-in-gtest/src/lib/hooks/use-bing.ts deleted file mode 100644 index dcdb1667ced0cba299b0825c0e91c4732411308c..0000000000000000000000000000000000000000 --- a/spaces/Makiing/coolb-in-gtest/src/lib/hooks/use-bing.ts +++ /dev/null @@ -1,173 +0,0 @@ -'use client' - -import { useState, useCallback, useEffect, useMemo } from 'react' -import { useAtom, useAtomValue } from 'jotai' -import { chatFamily, bingConversationStyleAtom, GreetMessages, hashAtom, voiceAtom } from '@/state' -import { setConversationMessages } from './chat-history' -import { ChatMessageModel, BotId, FileItem } from '@/lib/bots/bing/types' -import { nanoid } from '../utils' -import { TTS } from '../bots/bing/tts' - -export function useBing(botId: BotId = 'bing') { - const chatAtom = useMemo(() => chatFamily({ botId, page: 'singleton' }), [botId]) - const [enableTTS] = useAtom(voiceAtom) - const speaker = useMemo(() => new TTS(), []) - const [hash, setHash] = useAtom(hashAtom) - const bingConversationStyle = useAtomValue(bingConversationStyleAtom) - const [chatState, setChatState] = useAtom(chatAtom) - const [input, setInput] = useState('') - const [attachmentList, setAttachmentList] = useState([]) - - const updateMessage = useCallback( - (messageId: string, updater: (message: ChatMessageModel) => void) => { - setChatState((draft) => { - const message = draft.messages.find((m) => m.id === messageId) - if (message) { - updater(message) - } - }) - }, - [setChatState], - ) - - const sendMessage = useCallback( - async (input: string, options = {}) => { - const botMessageId = nanoid() - const imageUrl = attachmentList?.[0]?.status === 'loaded' ? attachmentList[0].url : undefined - setChatState((draft) => { - const text = imageUrl ? `${input}\n\n![image](${imageUrl})` : input - draft.messages.push({ id: nanoid(), text, author: 'user' }, { id: botMessageId, text: '', author: 'bot' }) - setAttachmentList([]) - }) - const abortController = new AbortController() - setChatState((draft) => { - draft.generatingMessageId = botMessageId - draft.abortController = abortController - }) - speaker.reset() - await chatState.bot.sendMessage({ - prompt: input, - imageUrl: /\?bcid=([^&]+)/.test(imageUrl ?? '') ? `https://www.bing.com/images/blob?bcid=${RegExp.$1}` : imageUrl, - options: { - ...options, - bingConversationStyle, - }, - signal: abortController.signal, - onEvent(event) { - if (event.type === 'UPDATE_ANSWER') { - updateMessage(botMessageId, (message) => { - if (event.data.text.length > message.text.length) { - message.text = event.data.text - } - - if (event.data.spokenText && enableTTS) { - speaker.speak(event.data.spokenText) - } - - message.throttling = event.data.throttling || message.throttling - message.sourceAttributions = event.data.sourceAttributions || message.sourceAttributions - message.suggestedResponses = event.data.suggestedResponses || message.suggestedResponses - }) - } else if (event.type === 'ERROR') { - updateMessage(botMessageId, (message) => { - message.error = event.error - }) - setChatState((draft) => { - draft.abortController = undefined - draft.generatingMessageId = '' - }) - } else if (event.type === 'DONE') { - setChatState((draft) => { - draft.abortController = undefined - draft.generatingMessageId = '' - }) - } - }, - }) - }, - [botId, attachmentList, chatState.bot, setChatState, updateMessage], - ) - - const uploadImage = useCallback(async (imgUrl: string) => { - setAttachmentList([{ url: imgUrl, status: 'loading' }]) - const response = await chatState.bot.uploadImage(imgUrl, bingConversationStyle) - if (response?.blobId) { - setAttachmentList([{ url: `/api/blob?bcid=${response.blobId}`, status: 'loaded' }]) - } else { - setAttachmentList([{ url: imgUrl, status: 'error' }]) - } - }, [chatState.bot]) - - const resetConversation = useCallback(() => { - chatState.bot.resetConversation() - speaker.abort() - setChatState((draft) => { - draft.abortController = undefined - draft.generatingMessageId = '' - draft.messages = [{ author: 'bot', text: GreetMessages[Math.floor(GreetMessages.length * Math.random())], id: nanoid() }] - draft.conversationId = nanoid() - }) - }, [chatState.bot, setChatState]) - - const stopGenerating = useCallback(() => { - chatState.abortController?.abort() - if (chatState.generatingMessageId) { - updateMessage(chatState.generatingMessageId, (message) => { - if (!message.text && !message.error) { - message.text = 'Cancelled' - } - }) - } - setChatState((draft) => { - draft.generatingMessageId = '' - }) - }, [chatState.abortController, chatState.generatingMessageId, setChatState, updateMessage]) - - useEffect(() => { - if (chatState.messages.length) { - setConversationMessages(botId, chatState.conversationId, chatState.messages) - } - }, [botId, chatState.conversationId, chatState.messages]) - - useEffect(() => { - if (hash === 'reset') { - resetConversation() - setHash('') - } - }, [hash, setHash]) - - const chat = useMemo( - () => ({ - botId, - bot: chatState.bot, - isSpeaking: speaker.isSpeaking, - messages: chatState.messages, - sendMessage, - setInput, - input, - resetConversation, - generating: !!chatState.generatingMessageId, - stopGenerating, - uploadImage, - setAttachmentList, - attachmentList, - }), - [ - botId, - bingConversationStyle, - chatState.bot, - chatState.generatingMessageId, - chatState.messages, - speaker.isSpeaking, - setInput, - input, - setAttachmentList, - attachmentList, - resetConversation, - sendMessage, - stopGenerating, - ], - ) - - return chat -} diff --git a/spaces/MarkuzML/swap_face/README.md b/spaces/MarkuzML/swap_face/README.md deleted file mode 100644 index 79fc8ce24dc018c133d344a74a1dd35335b76c94..0000000000000000000000000000000000000000 --- a/spaces/MarkuzML/swap_face/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Swap Face -emoji: 👀 -colorFrom: yellow -colorTo: red -sdk: streamlit -sdk_version: 1.25.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/datasets/voc.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/datasets/voc.py deleted file mode 100644 index a8855203b14ee0dc4da9099a2945d4aedcffbcd6..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/datasets/voc.py +++ /dev/null @@ -1,29 +0,0 @@ -import os.path as osp - -from .builder import DATASETS -from .custom import CustomDataset - - -@DATASETS.register_module() -class PascalVOCDataset(CustomDataset): - """Pascal VOC dataset. - - Args: - split (str): Split txt file for Pascal VOC. - """ - - CLASSES = ('background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', - 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', - 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', - 'train', 'tvmonitor') - - PALETTE = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128], - [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], - [192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128], - [192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0], - [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]] - - def __init__(self, split, **kwargs): - super(PascalVOCDataset, self).__init__( - img_suffix='.jpg', seg_map_suffix='.png', split=split, **kwargs) - assert osp.exists(self.img_dir) and self.split is not None diff --git a/spaces/MichaelWelsch/FreeVC/speaker_encoder/visualizations.py b/spaces/MichaelWelsch/FreeVC/speaker_encoder/visualizations.py deleted file mode 100644 index ec00fc64d6e9fda2bb8e613531066ac824df1451..0000000000000000000000000000000000000000 --- a/spaces/MichaelWelsch/FreeVC/speaker_encoder/visualizations.py +++ /dev/null @@ -1,178 +0,0 @@ -from speaker_encoder.data_objects.speaker_verification_dataset import SpeakerVerificationDataset -from datetime import datetime -from time import perf_counter as timer -import matplotlib.pyplot as plt -import numpy as np -# import webbrowser -import visdom -import umap - -colormap = np.array([ - [76, 255, 0], - [0, 127, 70], - [255, 0, 0], - [255, 217, 38], - [0, 135, 255], - [165, 0, 165], - [255, 167, 255], - [0, 255, 255], - [255, 96, 38], - [142, 76, 0], - [33, 0, 127], - [0, 0, 0], - [183, 183, 183], -], dtype=np.float) / 255 - - -class Visualizations: - def __init__(self, env_name=None, update_every=10, server="http://localhost", disabled=False): - # Tracking data - self.last_update_timestamp = timer() - self.update_every = update_every - self.step_times = [] - self.losses = [] - self.eers = [] - print("Updating the visualizations every %d steps." % update_every) - - # If visdom is disabled TODO: use a better paradigm for that - self.disabled = disabled - if self.disabled: - return - - # Set the environment name - now = str(datetime.now().strftime("%d-%m %Hh%M")) - if env_name is None: - self.env_name = now - else: - self.env_name = "%s (%s)" % (env_name, now) - - # Connect to visdom and open the corresponding window in the browser - try: - self.vis = visdom.Visdom(server, env=self.env_name, raise_exceptions=True) - except ConnectionError: - raise Exception("No visdom server detected. Run the command \"visdom\" in your CLI to " - "start it.") - # webbrowser.open("http://localhost:8097/env/" + self.env_name) - - # Create the windows - self.loss_win = None - self.eer_win = None - # self.lr_win = None - self.implementation_win = None - self.projection_win = None - self.implementation_string = "" - - def log_params(self): - if self.disabled: - return - from speaker_encoder import params_data - from speaker_encoder import params_model - param_string = "Model parameters:
" - for param_name in (p for p in dir(params_model) if not p.startswith("__")): - value = getattr(params_model, param_name) - param_string += "\t%s: %s
" % (param_name, value) - param_string += "Data parameters:
" - for param_name in (p for p in dir(params_data) if not p.startswith("__")): - value = getattr(params_data, param_name) - param_string += "\t%s: %s
" % (param_name, value) - self.vis.text(param_string, opts={"title": "Parameters"}) - - def log_dataset(self, dataset: SpeakerVerificationDataset): - if self.disabled: - return - dataset_string = "" - dataset_string += "Speakers: %s\n" % len(dataset.speakers) - dataset_string += "\n" + dataset.get_logs() - dataset_string = dataset_string.replace("\n", "
") - self.vis.text(dataset_string, opts={"title": "Dataset"}) - - def log_implementation(self, params): - if self.disabled: - return - implementation_string = "" - for param, value in params.items(): - implementation_string += "%s: %s\n" % (param, value) - implementation_string = implementation_string.replace("\n", "
") - self.implementation_string = implementation_string - self.implementation_win = self.vis.text( - implementation_string, - opts={"title": "Training implementation"} - ) - - def update(self, loss, eer, step): - # Update the tracking data - now = timer() - self.step_times.append(1000 * (now - self.last_update_timestamp)) - self.last_update_timestamp = now - self.losses.append(loss) - self.eers.append(eer) - print(".", end="") - - # Update the plots every steps - if step % self.update_every != 0: - return - time_string = "Step time: mean: %5dms std: %5dms" % \ - (int(np.mean(self.step_times)), int(np.std(self.step_times))) - print("\nStep %6d Loss: %.4f EER: %.4f %s" % - (step, np.mean(self.losses), np.mean(self.eers), time_string)) - if not self.disabled: - self.loss_win = self.vis.line( - [np.mean(self.losses)], - [step], - win=self.loss_win, - update="append" if self.loss_win else None, - opts=dict( - legend=["Avg. loss"], - xlabel="Step", - ylabel="Loss", - title="Loss", - ) - ) - self.eer_win = self.vis.line( - [np.mean(self.eers)], - [step], - win=self.eer_win, - update="append" if self.eer_win else None, - opts=dict( - legend=["Avg. EER"], - xlabel="Step", - ylabel="EER", - title="Equal error rate" - ) - ) - if self.implementation_win is not None: - self.vis.text( - self.implementation_string + ("%s" % time_string), - win=self.implementation_win, - opts={"title": "Training implementation"}, - ) - - # Reset the tracking - self.losses.clear() - self.eers.clear() - self.step_times.clear() - - def draw_projections(self, embeds, utterances_per_speaker, step, out_fpath=None, - max_speakers=10): - max_speakers = min(max_speakers, len(colormap)) - embeds = embeds[:max_speakers * utterances_per_speaker] - - n_speakers = len(embeds) // utterances_per_speaker - ground_truth = np.repeat(np.arange(n_speakers), utterances_per_speaker) - colors = [colormap[i] for i in ground_truth] - - reducer = umap.UMAP() - projected = reducer.fit_transform(embeds) - plt.scatter(projected[:, 0], projected[:, 1], c=colors) - plt.gca().set_aspect("equal", "datalim") - plt.title("UMAP projection (step %d)" % step) - if not self.disabled: - self.projection_win = self.vis.matplot(plt, win=self.projection_win) - if out_fpath is not None: - plt.savefig(out_fpath) - plt.clf() - - def save(self): - if not self.disabled: - self.vis.save([self.env_name]) - \ No newline at end of file diff --git a/spaces/Mountchicken/MAERec-Gradio/configs/kie/_base_/datasets/wildreceipt.py b/spaces/Mountchicken/MAERec-Gradio/configs/kie/_base_/datasets/wildreceipt.py deleted file mode 100644 index 9c1122edd53c5c8df4bad55ad764c12e1714026a..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/configs/kie/_base_/datasets/wildreceipt.py +++ /dev/null @@ -1,16 +0,0 @@ -wildreceipt_data_root = 'data/wildreceipt/' - -wildreceipt_train = dict( - type='WildReceiptDataset', - data_root=wildreceipt_data_root, - metainfo=wildreceipt_data_root + 'class_list.txt', - ann_file='train.txt', - pipeline=None) - -wildreceipt_test = dict( - type='WildReceiptDataset', - data_root=wildreceipt_data_root, - metainfo=wildreceipt_data_root + 'class_list.txt', - ann_file='test.txt', - test_mode=True, - pipeline=None) diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/datasets/preparers/parsers/synthtext_parser.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/datasets/preparers/parsers/synthtext_parser.py deleted file mode 100644 index 0764e0d8f1f5b00bdc7d2c8210b24d8bb2b87a53..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/mmocr/datasets/preparers/parsers/synthtext_parser.py +++ /dev/null @@ -1,172 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -from typing import List, Optional, Tuple, Union - -import numpy as np -from mmengine import track_parallel_progress -from scipy.io import loadmat - -from mmocr.utils import is_type_list -from ..data_preparer import DATA_PARSERS -from .base import BaseParser - - -@DATA_PARSERS.register_module() -class SynthTextAnnParser(BaseParser): - """SynthText Text Detection Annotation Parser. - - Args: - split (str): The split of the dataset. It is usually set automatically - and users do not need to set it manually in config file in most - cases. - nproc (int): Number of processes to process the data. Defaults to 1. - It is usually set automatically and users do not need to set it - manually in config file in most cases. - separator (str): The separator between each element in a line. Defaults - to ','. - ignore (str): The text to be ignored. Defaults to '###'. - format (str): The format of the annotation. Defaults to - 'x1,y1,x2,y2,x3,y3,x4,trans'. - encoding (str): The encoding of the annotation file. Defaults to - 'utf-8-sig'. - remove_strs (List[str], Optional): Used to remove redundant strings in - the transcription. Defaults to None. - mode (str, optional): The mode of the box converter. Supported modes - are 'xywh' and 'xyxy'. Defaults to None. - """ - - def __init__(self, - split: str, - nproc: int, - separator: str = ',', - ignore: str = '###', - format: str = 'x1,y1,x2,y2,x3,y3,x4,y4,trans', - encoding: str = 'utf-8', - remove_strs: Optional[List[str]] = None, - mode: str = None) -> None: - self.sep = separator - self.format = format - self.encoding = encoding - self.ignore = ignore - self.mode = mode - self.remove_strs = remove_strs - super().__init__(split=split, nproc=nproc) - - def _trace_boundary(self, char_boxes: List[np.ndarray]) -> np.ndarray: - """Trace the boundary point of text. - - Args: - char_boxes (list[ndarray]): The char boxes for one text. Each - element is 4x2 ndarray. - - Returns: - ndarray: The boundary point sets with size nx2. - """ - assert is_type_list(char_boxes, np.ndarray) - - # from top left to to right - p_top = [box[0:2] for box in char_boxes] - # from bottom right to bottom left - p_bottom = [ - char_boxes[idx][[2, 3], :] - for idx in range(len(char_boxes) - 1, -1, -1) - ] - - p = p_top + p_bottom - - boundary = np.concatenate(p).astype(int) - - return boundary - - def _match_bbox_char_str(self, bboxes: np.ndarray, char_bboxes: np.ndarray, - strs: np.ndarray - ) -> Tuple[List[np.ndarray], List[str]]: - """Match the bboxes, char bboxes, and strs. - - Args: - bboxes (ndarray): The text boxes of size (2, 4, num_box). - char_bboxes (ndarray): The char boxes of size (2, 4, num_char_box). - strs (ndarray): The string of size (num_strs,) - - Returns: - Tuple(List[ndarray], List[str]): Polygon & word list. - """ - assert isinstance(bboxes, np.ndarray) - assert isinstance(char_bboxes, np.ndarray) - assert isinstance(strs, np.ndarray) - # bboxes = bboxes.astype(np.int32) - char_bboxes = char_bboxes.astype(np.int32) - - if len(char_bboxes.shape) == 2: - char_bboxes = np.expand_dims(char_bboxes, axis=2) - char_bboxes = np.transpose(char_bboxes, (2, 1, 0)) - num_boxes = 1 if len(bboxes.shape) == 2 else bboxes.shape[-1] - - poly_charbox_list = [[] for _ in range(num_boxes)] - - words = [] - for line in strs: - words += line.split() - words_len = [len(w) for w in words] - words_end_inx = np.cumsum(words_len) - start_inx = 0 - for word_inx, end_inx in enumerate(words_end_inx): - for char_inx in range(start_inx, end_inx): - poly_charbox_list[word_inx].append(char_bboxes[char_inx]) - start_inx = end_inx - - for box_inx in range(num_boxes): - assert len(poly_charbox_list[box_inx]) > 0 - - poly_boundary_list = [] - for item in poly_charbox_list: - boundary = np.ndarray((0, 2)) - if len(item) > 0: - boundary = self._trace_boundary(item) - poly_boundary_list.append(boundary) - - return poly_boundary_list, words - - def parse_files(self, img_paths: Union[List[str], str], - ann_paths: Union[List[str], str]) -> List[Tuple]: - """Convert annotations to MMOCR format. - - Args: - img_paths (str or list[str]): the list of image paths or the - directory of the images. - ann_paths (str or list[str]): the list of annotation paths or the - path of the annotation file which contains all the annotations. - - Returns: - List[Tuple]: A list of a tuple of (image_path, instances). - - - img_path (str): The path of image file, which can be read - directly by opencv. - - instance: instance is a list of dict containing parsed - annotations, which should contain the following keys: - - - 'poly' or 'box' (textdet or textspotting) - - 'text' (textspotting or textrecog) - - 'ignore' (all task) - """ - assert isinstance(ann_paths, str) - gt = loadmat(ann_paths) - self.img_dir = img_paths - samples = track_parallel_progress( - self.parse_file, - list( - zip(gt['imnames'][0], gt['wordBB'][0], gt['charBB'][0], - gt['txt'][0])), - nproc=self.nproc) - return samples - - def parse_file(self, annotation: Tuple) -> Tuple: - """Parse single annotation.""" - img_file, wordBB, charBB, txt = annotation - polys_list, word_list = self._match_bbox_char_str(wordBB, charBB, txt) - - instances = list() - for poly, word in zip(polys_list, word_list): - instances.append( - dict(poly=poly.flatten().tolist(), text=word, ignore=False)) - return osp.join(self.img_dir, img_file[0]), instances diff --git a/spaces/Mrleo/MyChatGPT/overwrites.py b/spaces/Mrleo/MyChatGPT/overwrites.py deleted file mode 100644 index 436fcf46b5807ca045e77ac762039ba0ffc16f6d..0000000000000000000000000000000000000000 --- a/spaces/Mrleo/MyChatGPT/overwrites.py +++ /dev/null @@ -1,38 +0,0 @@ -from __future__ import annotations -import logging - -from llama_index import Prompt -from typing import List, Tuple -import mdtex2html - -from presets import * -from llama_func import * - - -def compact_text_chunks(self, prompt: Prompt, text_chunks: List[str]) -> List[str]: - logging.debug("Compacting text chunks...🚀🚀🚀") - combined_str = [c.strip() for c in text_chunks if c.strip()] - combined_str = [f"[{index+1}] {c}" for index, c in enumerate(combined_str)] - combined_str = "\n\n".join(combined_str) - # resplit based on self.max_chunk_overlap - text_splitter = self.get_text_splitter_given_prompt(prompt, 1, padding=1) - return text_splitter.split_text(combined_str) - - -def postprocess( - self, y: List[Tuple[str | None, str | None]] -) -> List[Tuple[str | None, str | None]]: - """ - Parameters: - y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. - Returns: - List of tuples representing the message and response. Each message and response will be a string of HTML. - """ - if y is None or y == []: - return [] - tag_regex = re.compile(r"^<\w+>[^<]+") - if tag_regex.search(y[-1][1]): - y[-1] = (y[-1][0].replace("\n", "
"), y[-1][1]) - else: - y[-1] = (y[-1][0].replace("\n", "
"), convert_mdtext(y[-1][1])) - return y diff --git a/spaces/Msninmx/shamzam/README.md b/spaces/Msninmx/shamzam/README.md deleted file mode 100644 index 36e0db65d3b51367953fb41226f076aa6487180e..0000000000000000000000000000000000000000 --- a/spaces/Msninmx/shamzam/README.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: Shamzam -emoji: 🎵 -colorFrom: green -colorTo: blue -sdk: gradio -app_file: app.py ---- \ No newline at end of file diff --git a/spaces/Muennighoff/code_eval_octopack/README.md b/spaces/Muennighoff/code_eval_octopack/README.md deleted file mode 100644 index a0197873b48738b4cd612607d8e671be695ca385..0000000000000000000000000000000000000000 --- a/spaces/Muennighoff/code_eval_octopack/README.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -title: Code Eval OctoPack -emoji: 🐙 -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false -tags: -- evaluate -- metric -description: >- - This metric implements code evaluation with execution across multiple languages as used in the paper "OctoPack: Instruction Tuning - Code Large Language Models" (https://arxiv.org/abs/2308.07124). ---- - -# Metric Card for Code Eval - -## Metric description - -The CodeEval metric estimates the pass@k metric for code synthesis. - -It implements the code exection for HumanEvalPack as described in the paper ["OctoPack: Instruction Tuning Code Large Language Model"](https://arxiv.org/abs/2308.07124). - - -## How to use - -The Code Eval metric calculates how good are predictions given a set of references. Its arguments are: - -`predictions`: a list of candidates to evaluate. Each candidate should be a list of strings with several code candidates to solve the problem. - -`references`: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. - -`k`: number of code candidates to consider in the evaluation. The default value is `[1, 10, 100]`. - -`num_workers`: the number of workers used to evaluate the candidate programs (The default value is `4`). - -`timeout`: The maximum time taken to produce a prediction before it is considered a "timeout". The default value is `3.0` (i.e. 3 seconds). - -`language`: Which language to execute the code in. The default value is `python` and alternatives are `javascript`, `java`, `go`, `cpp`, `rust` - -`cargo_string`: The cargo installations to perform for Rust. Defaults to some basic packages, see `code_eval_octopack.py`. - -```python -from evaluate import load -code_eval = load("Muennighoff/code_eval_octopack") -test_cases = ["assert add(2,3)==5"] -candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]] -pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2], language="python") -``` - -N.B. -This metric exists to run untrusted model-generated code. Users are strongly encouraged not to do so outside of a robust security sandbox. Before running this metric and once you've taken the necessary precautions, you will need to set the `HF_ALLOW_CODE_EVAL` environment variable. Use it at your own risk: -```python -import os -os.environ["HF_ALLOW_CODE_EVAL"] = "1"` -``` - -## Output values - -The Code Eval metric outputs two things: - -`pass_at_k`: a dictionary with the pass rates for each k value defined in the arguments. - -`results`: a dictionary with granular results of each unit test. - -## Examples - -Full match at `k=1`: - -```python -from evaluate import load -code_eval = load("Muennighoff/code_eval_octopack") -test_cases = ["assert add(2,3)==5"] -candidates = [["def add(a, b): return a+b"]] -pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1], language="python") -print(pass_at_k) -{'pass@1': 1.0} -``` - -No match for k = 1: - -```python -from evaluate import load -code_eval = load("Muennighoff/code_eval_octopack") -test_cases = ["assert add(2,3)==5"] -candidates = [["def add(a,b): return a*b"]] -pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1], language="python") -print(pass_at_k) -{'pass@1': 0.0} -``` - -Partial match at k=1, full match at k=2: - -```python -from evaluate import load -code_eval = load("Muennighoff/code_eval_octopack") -test_cases = ["assert add(2,3)==5"] -candidates = [["def add(a, b): return a+b", "def add(a,b): return a*b"]] -pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2], language="python") -print(pass_at_k) -{'pass@1': 0.5, 'pass@2': 1.0} -``` - -## Citation - -```bibtex -@article{muennighoff2023octopack, - title={OctoPack: Instruction Tuning Code Large Language Models}, - author={Niklas Muennighoff and Qian Liu and Armel Zebaze and Qinkai Zheng and Binyuan Hui and Terry Yue Zhuo and Swayam Singh and Xiangru Tang and Leandro von Werra and Shayne Longpre}, - journal={arXiv preprint arXiv:2308.07124}, - year={2023} -} -``` diff --git a/spaces/NAACL2022/CLIP-Caption-Reward/scripts/make_bu_data.py b/spaces/NAACL2022/CLIP-Caption-Reward/scripts/make_bu_data.py deleted file mode 100644 index 211f3e93dd3df9836e542322b0a19eeb581b2e1a..0000000000000000000000000000000000000000 --- a/spaces/NAACL2022/CLIP-Caption-Reward/scripts/make_bu_data.py +++ /dev/null @@ -1,52 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import base64 -import numpy as np -import csv -import sys -import zlib -import time -import mmap -import argparse - -parser = argparse.ArgumentParser() - -# output_dir -parser.add_argument('--downloaded_feats', default='data/bu_data', help='downloaded feature directory') -parser.add_argument('--output_dir', default='data/cocobu', help='output feature files') - -args = parser.parse_args() - -csv.field_size_limit(sys.maxsize) - - -FIELDNAMES = ['image_id', 'image_w','image_h','num_boxes', 'boxes', 'features'] -infiles = ['trainval/karpathy_test_resnet101_faster_rcnn_genome.tsv', - 'trainval/karpathy_val_resnet101_faster_rcnn_genome.tsv',\ - 'trainval/karpathy_train_resnet101_faster_rcnn_genome.tsv.0', \ - 'trainval/karpathy_train_resnet101_faster_rcnn_genome.tsv.1'] - -os.makedirs(args.output_dir+'_att') -os.makedirs(args.output_dir+'_fc') -os.makedirs(args.output_dir+'_box') - -for infile in infiles: - print('Reading ' + infile) - with open(os.path.join(args.downloaded_feats, infile), "r") as tsv_in_file: - reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames = FIELDNAMES) - for item in reader: - item['image_id'] = int(item['image_id']) - item['num_boxes'] = int(item['num_boxes']) - for field in ['boxes', 'features']: - item[field] = np.frombuffer(base64.decodestring(item[field].encode('ascii')), - dtype=np.float32).reshape((item['num_boxes'],-1)) - np.savez_compressed(os.path.join(args.output_dir+'_att', str(item['image_id'])), feat=item['features']) - np.save(os.path.join(args.output_dir+'_fc', str(item['image_id'])), item['features'].mean(0)) - np.save(os.path.join(args.output_dir+'_box', str(item['image_id'])), item['boxes']) - - - - diff --git a/spaces/NCTCMumbai/NCTC/models/official/nlp/modeling/layers/multi_channel_attention.py b/spaces/NCTCMumbai/NCTC/models/official/nlp/modeling/layers/multi_channel_attention.py deleted file mode 100644 index 499d977c753518f0892267ac98abc6bf7618c2cd..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/official/nlp/modeling/layers/multi_channel_attention.py +++ /dev/null @@ -1,165 +0,0 @@ -# Lint as: python3 -# Copyright 2020 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Multi-channel Attention.""" -# pylint: disable=g-classes-have-attributes - -from __future__ import absolute_import -from __future__ import division -# from __future__ import google_type_annotations -from __future__ import print_function - -import math - -import tensorflow as tf -from official.modeling import tf_utils -from official.nlp.modeling.layers import attention -from official.nlp.modeling.layers import dense_einsum -from official.nlp.modeling.layers import masked_softmax - - -class VotingAttention(tf.keras.layers.Layer): - """Voting Attention layer. - - Arguments: - num_heads: the number of attention heads. - head_size: per-head hidden size. - kernel_initializer: Initializer for dense layer kernels. - bias_initializer: Initializer for dense layer biases. - kernel_regularizer: Regularizer for dense layer kernels. - bias_regularizer: Regularizer for dense layer biases. - activity_regularizer: Regularizer for dense layer activity. - kernel_constraint: Constraint for dense layer kernels. - bias_constraint: Constraint for dense layer kernels. - """ - - def __init__(self, - num_heads, - head_size, - kernel_initializer="glorot_uniform", - bias_initializer="zeros", - kernel_regularizer=None, - bias_regularizer=None, - activity_regularizer=None, - kernel_constraint=None, - bias_constraint=None, - **kwargs): - super(VotingAttention, self).__init__(**kwargs) - self._num_heads = num_heads - self._head_size = head_size - self._kernel_initializer = tf.keras.initializers.get(kernel_initializer) - self._bias_initializer = tf.keras.initializers.get(bias_initializer) - self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer) - self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer) - self._kernel_constraint = tf.keras.constraints.get(kernel_constraint) - self._bias_constraint = tf.keras.constraints.get(bias_constraint) - - def build(self, unused_input_shapes): - self._query_dense = dense_einsum.DenseEinsum( - output_shape=(self._num_heads, self._head_size), - kernel_initializer=self._kernel_initializer, - bias_initializer=self._bias_initializer, - kernel_regularizer=self._kernel_regularizer, - bias_regularizer=self._bias_regularizer, - activity_regularizer=self._activity_regularizer, - kernel_constraint=self._kernel_constraint, - bias_constraint=self._bias_constraint, - dtype=self.dtype, - name="encdocatt_query") - self._key_dense = dense_einsum.DenseEinsum( - output_shape=(self._num_heads, self._head_size), - kernel_initializer=self._kernel_initializer, - bias_initializer=self._bias_initializer, - kernel_regularizer=self._kernel_regularizer, - bias_regularizer=self._bias_regularizer, - activity_regularizer=self._activity_regularizer, - kernel_constraint=self._kernel_constraint, - bias_constraint=self._bias_constraint, - dtype=self.dtype, - name="encdocatt_key") - super(VotingAttention, self).build(unused_input_shapes) - - def call(self, encoder_outputs, doc_attention_mask): - num_docs = tf_utils.get_shape_list(encoder_outputs, expected_rank=[4])[1] - cls_embeddings = encoder_outputs[:, :, 0, :] - key = self._key_dense(cls_embeddings) - query = self._query_dense(cls_embeddings) - doc_attention_mask = tf.cast(doc_attention_mask, tf.float32) - - key = tf.einsum("BANH,BA->BANH", key, doc_attention_mask) - query = tf.einsum("BANH,BA->BANH", query, doc_attention_mask) - attention_matrix = tf.einsum("BXNH,BYNH->BNXY", query, key) - mask = tf.ones([num_docs, num_docs]) - mask = tf.linalg.set_diag(mask, tf.zeros(num_docs)) - attention_matrix = tf.einsum("BNXY,XY->BNXY", attention_matrix, mask) - doc_attention_probs = tf.einsum("BNAY->BNA", attention_matrix) - doc_attention_probs = tf.einsum("BNA->BA", doc_attention_probs) - infadder = (1.0 - doc_attention_mask) * -100000.0 - return tf.nn.softmax(doc_attention_probs + infadder) - - -class MultiChannelAttention(attention.MultiHeadAttention): - """Multi-channel Attention layer. - - Introduced in: https://arxiv.org/abs/2001.09386. Expects multiple - cross-attention target sequences. - """ - - def build(self, input_shape): - super(MultiChannelAttention, self).build(input_shape) - self._masked_softmax = masked_softmax.MaskedSoftmax(mask_expansion_axes=[2]) - - def call(self, inputs, attention_mask=None): - from_tensor = inputs[0] - to_tensor = inputs[1] - doc_attention_probs = inputs[2] - - # Scalar dimensions referenced here: - # B = batch size (number of stories) - # A = num_docs (number of docs) - # F = `from_tensor` sequence length - # T = `to_tensor` sequence length - # N = `num_attention_heads` - # H = `size_per_head` - # `query_tensor` = [B, F, N ,H] - query_tensor = self._query_dense(from_tensor) - - # `key_tensor` = [B, A, T, N, H] - key_tensor = self._key_dense(to_tensor) - - # `value_tensor` = [B, A, T, N, H] - value_tensor = self._value_dense(to_tensor) - - # Take the dot product between "query" and "key" to get the raw - # attention scores. - attention_scores = tf.einsum("BATNH,BFNH->BANFT", key_tensor, query_tensor) - attention_scores = tf.multiply(attention_scores, - 1.0 / math.sqrt(float(self._key_size))) - - # Normalize the attention scores to probabilities. - # `attention_probs` = [B, A, N, F, T] - attention_probs = self._masked_softmax(attention_scores, attention_mask) - - # This is actually dropping out entire tokens to attend to, which might - # seem a bit unusual, but is taken from the original Transformer paper. - attention_probs = self._dropout_layer(attention_probs) - - # `context_layer` = [B, F, N, H] - context_layer = tf.einsum("BANFT,BATNH->BAFNH", attention_probs, - value_tensor) - attention_output = tf.einsum("BNFA,BAFNH->BFNH", doc_attention_probs, - context_layer) - attention_output = self._output_dense(attention_output) - return attention_output diff --git a/spaces/Neo-Salvatore/GPTBase/app.py b/spaces/Neo-Salvatore/GPTBase/app.py deleted file mode 100644 index 787c0919a7172d68b2d90d1f3dac28dc2ca83796..0000000000000000000000000000000000000000 --- a/spaces/Neo-Salvatore/GPTBase/app.py +++ /dev/null @@ -1,91 +0,0 @@ -import requests -import streamlit as st -import json -from streamlit_chat import message - -st.title("GPTBase") - -def clear_submit(): - st.session_state["submit"] = False - -def set_api_key(api_key: str): - st.session_state["API_KEY"] = api_key - -def set_ai_id(ai_id: str): - st.session_state["ai_id"] = ai_id - -# Sidebar -index = None -doc = None -with st.sidebar: - user_secret = st.text_input( - "API Key", - type="password", - placeholder="Paste your API key here (ak-...)", - help="You can get your API key from https://mygpt.felo.me/api-keys.", - value=st.session_state.get("API_KEY", ""), - ) - if user_secret: - set_api_key(user_secret) - - user_ai_id = st.text_input( - "AI Id", - placeholder="Paste your AI Id here", - value=st.session_state.get("ai_id", ""), - ) - if user_ai_id: - set_ai_id(user_ai_id) - - uploaded_file = st.file_uploader( - "Upload a pdf, docx, or txt file", - type=["pdf", "docx", "txt"], - help="Scanned documents are not supported yet!", - on_change=clear_submit, - ) - api_key = st.session_state.get("API_KEY") - ai_id = st.session_state.get("ai_id") - if not api_key or not ai_id: - st.warning("Please enter your API key and AI Id to upload a file.") - - if uploaded_file is not None and "API_KEY" in st.session_state and "ai_id" in st.session_state: - - file = {"file": uploaded_file} - url = f'https://mygpt.felo.me/api/v1/ais/{ai_id}/files' - headers = {"Authorization": f"Bearer {api_key}"} - response = requests.post(url, headers=headers, files=file) - result = response.json() - if response.status_code != 200: - st.text(result['detail']) - -tab1, tab2 = st.tabs(["Introduce", "Ask AI"]) -with tab1: - st.markdown("### Use steps") - st.write('1.Please fill in your API Key and AI Id.') - st.write('2.Upload the file you want to get the answer.') - st.write('3.You can ask AI with the file.') - # st.markdown("""---11""") - # st.write('If you have any questions or suggestions, please visit: ') - -with tab2: - if not api_key or not ai_id: - st.write('Please fill in your API Key and AI Id.') - st.header("Ask AI some questions about the file you uploaded:") - - def get_text(): - input_text = st.text_area("You:", on_change=clear_submit) - return input_text - user_input = get_text() - button = st.button("Submit") - - if button or st.session_state.get("submit"): - if api_key and ai_id and user_input: - url = f'https://mygpt.felo.me/api/v1/question/{ai_id}' - headers = {"Authorization": f"Bearer {api_key}"} - data = {"question": user_input} - response = requests.post(url, headers=headers, data=json.dumps(data)) - result = response.json() - if response.status_code != 200: - st.text(result['detail']) - else: - message(result['answer']) - diff --git a/spaces/Nick1/rvc-models/lib/infer_pack/models_onnx.py b/spaces/Nick1/rvc-models/lib/infer_pack/models_onnx.py deleted file mode 100644 index 963e67b29f828e9fdd096397952054fe77cf3d10..0000000000000000000000000000000000000000 --- a/spaces/Nick1/rvc-models/lib/infer_pack/models_onnx.py +++ /dev/null @@ -1,819 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from lib.infer_pack import modules -from lib.infer_pack import attentions -from lib.infer_pack import commons -from lib.infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from lib.infer_pack.commons import init_weights -import numpy as np -from lib.infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder768(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(768, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMsNSFsidM(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - version, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - if version == "v1": - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - else: - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - self.speaker_map = None - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def construct_spkmixmap(self, n_speaker): - self.speaker_map = torch.zeros((n_speaker, 1, 1, self.gin_channels)) - for i in range(n_speaker): - self.speaker_map[i] = self.emb_g(torch.LongTensor([[i]])) - self.speaker_map = self.speaker_map.unsqueeze(0) - - def forward(self, phone, phone_lengths, pitch, nsff0, g, rnd, max_len=None): - if self.speaker_map is not None: # [N, S] * [S, B, 1, H] - g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1)) # [N, S, B, 1, 1] - g = g * self.speaker_map # [N, S, B, 1, H] - g = torch.sum(g, dim=1) # [N, 1, B, 1, H] - g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N] - else: - g = g.unsqueeze(0) - g = self.emb_g(g).transpose(1, 2) - - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class MultiPeriodDiscriminatorV2(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminatorV2, self).__init__() - # periods = [2, 3, 5, 7, 11, 17] - periods = [2, 3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/NoriZC/vits-models/text/cleaners.py b/spaces/NoriZC/vits-models/text/cleaners.py deleted file mode 100644 index 68c9ad24d5a303b68a521fba2e8776c8cc867356..0000000000000000000000000000000000000000 --- a/spaces/NoriZC/vits-models/text/cleaners.py +++ /dev/null @@ -1,475 +0,0 @@ -""" from https://github.com/keithito/tacotron """ - -''' -Cleaners are transformations that run over the input text at both training and eval time. - -Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" -hyperparameter. Some cleaners are English-specific. You'll typically want to use: - 1. "english_cleaners" for English text - 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using - the Unidecode library (https://pypi.python.org/pypi/Unidecode) - 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update - the symbols in symbols.py to match your data). -''' - -import re -from unidecode import unidecode -import pyopenjtalk -from jamo import h2j, j2hcj -from pypinyin import lazy_pinyin, BOPOMOFO -import jieba, cn2an - - -# This is a list of Korean classifiers preceded by pure Korean numerals. -_korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통' - -# Regular expression matching whitespace: -_whitespace_re = re.compile(r'\s+') - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile(r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile(r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# List of (regular expression, replacement) pairs for abbreviations: -_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ - ('mrs', 'misess'), - ('mr', 'mister'), - ('dr', 'doctor'), - ('st', 'saint'), - ('co', 'company'), - ('jr', 'junior'), - ('maj', 'major'), - ('gen', 'general'), - ('drs', 'doctors'), - ('rev', 'reverend'), - ('lt', 'lieutenant'), - ('hon', 'honorable'), - ('sgt', 'sergeant'), - ('capt', 'captain'), - ('esq', 'esquire'), - ('ltd', 'limited'), - ('col', 'colonel'), - ('ft', 'fort'), -]] - -# List of (hangul, hangul divided) pairs: -_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄳ', 'ㄱㅅ'), - ('ㄵ', 'ㄴㅈ'), - ('ㄶ', 'ㄴㅎ'), - ('ㄺ', 'ㄹㄱ'), - ('ㄻ', 'ㄹㅁ'), - ('ㄼ', 'ㄹㅂ'), - ('ㄽ', 'ㄹㅅ'), - ('ㄾ', 'ㄹㅌ'), - ('ㄿ', 'ㄹㅍ'), - ('ㅀ', 'ㄹㅎ'), - ('ㅄ', 'ㅂㅅ'), - ('ㅘ', 'ㅗㅏ'), - ('ㅙ', 'ㅗㅐ'), - ('ㅚ', 'ㅗㅣ'), - ('ㅝ', 'ㅜㅓ'), - ('ㅞ', 'ㅜㅔ'), - ('ㅟ', 'ㅜㅣ'), - ('ㅢ', 'ㅡㅣ'), - ('ㅑ', 'ㅣㅏ'), - ('ㅒ', 'ㅣㅐ'), - ('ㅕ', 'ㅣㅓ'), - ('ㅖ', 'ㅣㅔ'), - ('ㅛ', 'ㅣㅗ'), - ('ㅠ', 'ㅣㅜ') -]] - -# List of (Latin alphabet, hangul) pairs: -_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', '에이'), - ('b', '비'), - ('c', '시'), - ('d', '디'), - ('e', '이'), - ('f', '에프'), - ('g', '지'), - ('h', '에이치'), - ('i', '아이'), - ('j', '제이'), - ('k', '케이'), - ('l', '엘'), - ('m', '엠'), - ('n', '엔'), - ('o', '오'), - ('p', '피'), - ('q', '큐'), - ('r', '아르'), - ('s', '에스'), - ('t', '티'), - ('u', '유'), - ('v', '브이'), - ('w', '더블유'), - ('x', '엑스'), - ('y', '와이'), - ('z', '제트') -]] - -# List of (Latin alphabet, bopomofo) pairs: -_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', 'ㄟˉ'), - ('b', 'ㄅㄧˋ'), - ('c', 'ㄙㄧˉ'), - ('d', 'ㄉㄧˋ'), - ('e', 'ㄧˋ'), - ('f', 'ㄝˊㄈㄨˋ'), - ('g', 'ㄐㄧˋ'), - ('h', 'ㄝˇㄑㄩˋ'), - ('i', 'ㄞˋ'), - ('j', 'ㄐㄟˋ'), - ('k', 'ㄎㄟˋ'), - ('l', 'ㄝˊㄛˋ'), - ('m', 'ㄝˊㄇㄨˋ'), - ('n', 'ㄣˉ'), - ('o', 'ㄡˉ'), - ('p', 'ㄆㄧˉ'), - ('q', 'ㄎㄧㄡˉ'), - ('r', 'ㄚˋ'), - ('s', 'ㄝˊㄙˋ'), - ('t', 'ㄊㄧˋ'), - ('u', 'ㄧㄡˉ'), - ('v', 'ㄨㄧˉ'), - ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'), - ('x', 'ㄝˉㄎㄨˋㄙˋ'), - ('y', 'ㄨㄞˋ'), - ('z', 'ㄗㄟˋ') -]] - - -# List of (bopomofo, romaji) pairs: -_bopomofo_to_romaji = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('ㄅㄛ', 'p⁼wo'), - ('ㄆㄛ', 'pʰwo'), - ('ㄇㄛ', 'mwo'), - ('ㄈㄛ', 'fwo'), - ('ㄅ', 'p⁼'), - ('ㄆ', 'pʰ'), - ('ㄇ', 'm'), - ('ㄈ', 'f'), - ('ㄉ', 't⁼'), - ('ㄊ', 'tʰ'), - ('ㄋ', 'n'), - ('ㄌ', 'l'), - ('ㄍ', 'k⁼'), - ('ㄎ', 'kʰ'), - ('ㄏ', 'h'), - ('ㄐ', 'ʧ⁼'), - ('ㄑ', 'ʧʰ'), - ('ㄒ', 'ʃ'), - ('ㄓ', 'ʦ`⁼'), - ('ㄔ', 'ʦ`ʰ'), - ('ㄕ', 's`'), - ('ㄖ', 'ɹ`'), - ('ㄗ', 'ʦ⁼'), - ('ㄘ', 'ʦʰ'), - ('ㄙ', 's'), - ('ㄚ', 'a'), - ('ㄛ', 'o'), - ('ㄜ', 'ə'), - ('ㄝ', 'e'), - ('ㄞ', 'ai'), - ('ㄟ', 'ei'), - ('ㄠ', 'au'), - ('ㄡ', 'ou'), - ('ㄧㄢ', 'yeNN'), - ('ㄢ', 'aNN'), - ('ㄧㄣ', 'iNN'), - ('ㄣ', 'əNN'), - ('ㄤ', 'aNg'), - ('ㄧㄥ', 'iNg'), - ('ㄨㄥ', 'uNg'), - ('ㄩㄥ', 'yuNg'), - ('ㄥ', 'əNg'), - ('ㄦ', 'əɻ'), - ('ㄧ', 'i'), - ('ㄨ', 'u'), - ('ㄩ', 'ɥ'), - ('ˉ', '→'), - ('ˊ', '↑'), - ('ˇ', '↓↑'), - ('ˋ', '↓'), - ('˙', ''), - (',', ','), - ('。', '.'), - ('!', '!'), - ('?', '?'), - ('—', '-') -]] - - -def expand_abbreviations(text): - for regex, replacement in _abbreviations: - text = re.sub(regex, replacement, text) - return text - - -def lowercase(text): - return text.lower() - - -def collapse_whitespace(text): - return re.sub(_whitespace_re, ' ', text) - - -def convert_to_ascii(text): - return unidecode(text) - - -def japanese_to_romaji_with_accent(text): - '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = '' - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - if text!='': - text+=' ' - labels = pyopenjtalk.extract_fullcontext(sentence) - for n, label in enumerate(labels): - phoneme = re.search(r'\-([^\+]*)\+', label).group(1) - if phoneme not in ['sil','pau']: - text += phoneme.replace('ch','ʧ').replace('sh','ʃ').replace('cl','Q') - else: - continue - n_moras = int(re.search(r'/F:(\d+)_', label).group(1)) - a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1)) - a2 = int(re.search(r"\+(\d+)\+", label).group(1)) - a3 = int(re.search(r"\+(\d+)/", label).group(1)) - if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil','pau']: - a2_next=-1 - else: - a2_next = int(re.search(r"\+(\d+)\+", labels[n + 1]).group(1)) - # Accent phrase boundary - if a3 == 1 and a2_next == 1: - text += ' ' - # Falling - elif a1 == 0 and a2_next == a2 + 1 and a2 != n_moras: - text += '↓' - # Rising - elif a2 == 1 and a2_next == 2: - text += '↑' - if i y.size(1): - x[mask] = padding_idx - if x.dim() == 2: - x[mask, : y.size(1)] = y - else: - x[mask, : y.size(1), :] = y - else: - x[mask] = y - return x diff --git a/spaces/OIUGLK/bingo/src/components/chat-list.tsx b/spaces/OIUGLK/bingo/src/components/chat-list.tsx deleted file mode 100644 index 624a78ef0d7be0f1192cf02a81e2e9cf214cb193..0000000000000000000000000000000000000000 --- a/spaces/OIUGLK/bingo/src/components/chat-list.tsx +++ /dev/null @@ -1,28 +0,0 @@ -import React from 'react' - -import { Separator } from '@/components/ui/separator' -import { ChatMessage } from '@/components/chat-message' -import { ChatMessageModel } from '@/lib/bots/bing/types' - -export interface ChatList { - messages: ChatMessageModel[] -} - -export function ChatList({ messages }: ChatList) { - if (!messages.length) { - return null - } - - return ( -
- {messages.map((message, index) => ( - - - {index < messages.length - 1 && ( - - )} - - ))} -
- ) -} diff --git a/spaces/OdinStef/Chatapp/app.py b/spaces/OdinStef/Chatapp/app.py deleted file mode 100644 index 096e6d1bfcca60fa3b309ca306eedf5db62b18b0..0000000000000000000000000000000000000000 --- a/spaces/OdinStef/Chatapp/app.py +++ /dev/null @@ -1,25 +0,0 @@ -import openai -import os -import gradio as gr - -openai.api_key = os.getenv("mykey2") - -messages = [{"role": "system", "content": "You are a genius like Albert Einstein"}] - - -def CustomChatGPT(user_input): - messages.append({"role": "user", "content": user_input}) - response = openai.ChatCompletion.create( - model = "gpt-3.5-turbo", - messages = messages - ) - ChatGPT_reply = response["choices"][0]["message"]["content"] - messages.append({"role": "assistant", "content": ChatGPT_reply}) - return ChatGPT_reply - -description = "Cette Intelligence Artificielle est un programme fait par Stephane Dube (dubestephane @ vivaldi.net). Envoyez moi un message pour une requête ou pour une question. \n" -title = "Je peux répondre a presque toute les questions." - -demo = gr.Interface(fn=CustomChatGPT, description = description, title = title, inputs = gr.inputs.Textbox(label = "Question", placeholder="Ex.: Quelle est la plus haute tour du monde?"), outputs = gr.outputs.Textbox(label = "Réponse")) - -demo.launch() diff --git a/spaces/OpenDILabCommunity/DI-sheep/run.sh b/spaces/OpenDILabCommunity/DI-sheep/run.sh deleted file mode 100644 index 0439a29025b4c20e9351d75ced00488457c91bc9..0000000000000000000000000000000000000000 --- a/spaces/OpenDILabCommunity/DI-sheep/run.sh +++ /dev/null @@ -1,3 +0,0 @@ -service nginx start -cd ./DI-sheep/service && nohup python agent_app.py & -cd ./DI-sheep/ui && npm run build && npm run preview diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/layers/shape_spec.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/layers/shape_spec.py deleted file mode 100644 index fe7e8e261c1ab1bb1636bd7a245068d64e632306..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/layers/shape_spec.py +++ /dev/null @@ -1,20 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. -from collections import namedtuple - - -class ShapeSpec(namedtuple("_ShapeSpec", ["channels", "height", "width", "stride"])): - """ - A simple structure that contains basic shape specification about a tensor. - It is often used as the auxiliary inputs/outputs of models, - to complement the lack of shape inference ability among pytorch modules. - - Attributes: - channels: - height: - width: - stride: - """ - - def __new__(cls, channels=None, height=None, width=None, stride=None): - return super().__new__(cls, channels, height, width, stride) diff --git a/spaces/OpenGVLab/InternGPT/third-party/lama/bin/models/ade20k/segm_lib/nn/__init__.py b/spaces/OpenGVLab/InternGPT/third-party/lama/bin/models/ade20k/segm_lib/nn/__init__.py deleted file mode 100644 index 98a96370ef04570f516052bb73f568d0ebc346c3..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/third-party/lama/bin/models/ade20k/segm_lib/nn/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .modules import * -from .parallel import UserScatteredDataParallel, user_scattered_collate, async_copy_to diff --git a/spaces/OpenMotionLab/MotionGPT/mGPT/models/utils/cross_attention.py b/spaces/OpenMotionLab/MotionGPT/mGPT/models/utils/cross_attention.py deleted file mode 100644 index deb1f053e575bd0940d12a9cc526a44f689f24c0..0000000000000000000000000000000000000000 --- a/spaces/OpenMotionLab/MotionGPT/mGPT/models/utils/cross_attention.py +++ /dev/null @@ -1,412 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -DETR Transformer class. -Copy-paste from torch.nn.Transformer with modifications: - * positional encodings are passed in MHattention - * extra LN at the end of encoder is removed - * decoder returns a stack of activations from all decoding layers -""" -import copy -from typing import List, Optional -from numpy import block - -import torch -import torch.nn.functional as F -from torch import Tensor, nn - - -class SkipTransformerEncoder(nn.Module): - def __init__(self, encoder_layer, num_layers, norm=None): - super().__init__() - self.d_model = encoder_layer.d_model - - self.num_layers = num_layers - self.norm = norm - - assert num_layers % 2 == 1 - - num_block = (num_layers-1)//2 - self.input_blocks = _get_clones(encoder_layer, num_block) - self.middle_block = _get_clone(encoder_layer) - self.output_blocks = _get_clones(encoder_layer, num_block) - self.linear_blocks = _get_clones(nn.Linear(2*self.d_model, self.d_model), num_block) - - self._reset_parameters() - - def _reset_parameters(self): - for p in self.parameters(): - if p.dim() > 1: - nn.init.xavier_uniform_(p) - - def forward(self, src, - mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None): - x = src - - xs = [] - for module in self.input_blocks: - x = module(x, src_mask=mask, - src_key_padding_mask=src_key_padding_mask, pos=pos) - xs.append(x) - - x = self.middle_block(x, src_mask=mask, - src_key_padding_mask=src_key_padding_mask, pos=pos) - - for (module, linear) in zip(self.output_blocks, self.linear_blocks): - x = torch.cat([x, xs.pop()], dim=-1) - x = linear(x) - x = module(x, src_mask=mask, - src_key_padding_mask=src_key_padding_mask, pos=pos) - - if self.norm is not None: - x = self.norm(x) - return x - -class SkipTransformerDecoder(nn.Module): - def __init__(self, decoder_layer, num_layers, norm=None): - super().__init__() - self.d_model = decoder_layer.d_model - - self.num_layers = num_layers - self.norm = norm - - assert num_layers % 2 == 1 - - num_block = (num_layers-1)//2 - self.input_blocks = _get_clones(decoder_layer, num_block) - self.middle_block = _get_clone(decoder_layer) - self.output_blocks = _get_clones(decoder_layer, num_block) - self.linear_blocks = _get_clones(nn.Linear(2*self.d_model, self.d_model), num_block) - - self._reset_parameters() - - def _reset_parameters(self): - for p in self.parameters(): - if p.dim() > 1: - nn.init.xavier_uniform_(p) - - def forward(self, tgt, memory, - tgt_mask: Optional[Tensor] = None, - memory_mask: Optional[Tensor] = None, - tgt_key_padding_mask: Optional[Tensor] = None, - memory_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None, - query_pos: Optional[Tensor] = None): - x = tgt - - xs = [] - for module in self.input_blocks: - x = module(x, memory, tgt_mask=tgt_mask, - memory_mask=memory_mask, - tgt_key_padding_mask=tgt_key_padding_mask, - memory_key_padding_mask=memory_key_padding_mask, - pos=pos, query_pos=query_pos) - xs.append(x) - - x = self.middle_block(x, memory, tgt_mask=tgt_mask, - memory_mask=memory_mask, - tgt_key_padding_mask=tgt_key_padding_mask, - memory_key_padding_mask=memory_key_padding_mask, - pos=pos, query_pos=query_pos) - - for (module, linear) in zip(self.output_blocks, self.linear_blocks): - x = torch.cat([x, xs.pop()], dim=-1) - x = linear(x) - x = module(x, memory, tgt_mask=tgt_mask, - memory_mask=memory_mask, - tgt_key_padding_mask=tgt_key_padding_mask, - memory_key_padding_mask=memory_key_padding_mask, - pos=pos, query_pos=query_pos) - - if self.norm is not None: - x = self.norm(x) - - return x - -class Transformer(nn.Module): - - def __init__(self, d_model=512, nhead=8, num_encoder_layers=6, - num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, - activation="relu", normalize_before=False, - return_intermediate_dec=False): - super().__init__() - - encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, - dropout, activation, normalize_before) - encoder_norm = nn.LayerNorm(d_model) if normalize_before else None - self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) - - decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, - dropout, activation, normalize_before) - decoder_norm = nn.LayerNorm(d_model) - self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm, - return_intermediate=return_intermediate_dec) - - self._reset_parameters() - - self.d_model = d_model - self.nhead = nhead - - def _reset_parameters(self): - for p in self.parameters(): - if p.dim() > 1: - nn.init.xavier_uniform_(p) - - def forward(self, src, mask, query_embed, pos_embed): - # flatten NxCxHxW to HWxNxC - bs, c, h, w = src.shape - src = src.flatten(2).permute(2, 0, 1) - pos_embed = pos_embed.flatten(2).permute(2, 0, 1) - query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1) - mask = mask.flatten(1) - - tgt = torch.zeros_like(query_embed) - memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed) - hs = self.decoder(tgt, memory, memory_key_padding_mask=mask, - pos=pos_embed, query_pos=query_embed) - return hs.transpose(1, 2), memory.permute(1, 2, 0).view(bs, c, h, w) - - -class TransformerEncoder(nn.Module): - - def __init__(self, encoder_layer, num_layers, norm=None): - super().__init__() - self.layers = _get_clones(encoder_layer, num_layers) - self.num_layers = num_layers - self.norm = norm - - def forward(self, src, - mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None): - output = src - - for layer in self.layers: - output = layer(output, src_mask=mask, - src_key_padding_mask=src_key_padding_mask, pos=pos) - - if self.norm is not None: - output = self.norm(output) - - return output - - -class TransformerDecoder(nn.Module): - - def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False): - super().__init__() - self.layers = _get_clones(decoder_layer, num_layers) - self.num_layers = num_layers - self.norm = norm - self.return_intermediate = return_intermediate - - def forward(self, tgt, memory, - tgt_mask: Optional[Tensor] = None, - memory_mask: Optional[Tensor] = None, - tgt_key_padding_mask: Optional[Tensor] = None, - memory_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None, - query_pos: Optional[Tensor] = None): - output = tgt - - intermediate = [] - - for layer in self.layers: - output = layer(output, memory, tgt_mask=tgt_mask, - memory_mask=memory_mask, - tgt_key_padding_mask=tgt_key_padding_mask, - memory_key_padding_mask=memory_key_padding_mask, - pos=pos, query_pos=query_pos) - if self.return_intermediate: - intermediate.append(self.norm(output)) - - if self.norm is not None: - output = self.norm(output) - if self.return_intermediate: - intermediate.pop() - intermediate.append(output) - - if self.return_intermediate: - return torch.stack(intermediate) - - return output.unsqueeze(0) - - -class TransformerEncoderLayer(nn.Module): - - def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, - activation="relu", normalize_before=False): - super().__init__() - self.d_model = d_model - self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) - # Implementation of Feedforward model - self.linear1 = nn.Linear(d_model, dim_feedforward) - self.dropout = nn.Dropout(dropout) - self.linear2 = nn.Linear(dim_feedforward, d_model) - - self.norm1 = nn.LayerNorm(d_model) - self.norm2 = nn.LayerNorm(d_model) - self.dropout1 = nn.Dropout(dropout) - self.dropout2 = nn.Dropout(dropout) - - self.activation = _get_activation_fn(activation) - self.normalize_before = normalize_before - - def with_pos_embed(self, tensor, pos: Optional[Tensor]): - return tensor if pos is None else tensor + pos - - def forward_post(self, - src, - src_mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None): - q = k = self.with_pos_embed(src, pos) - src2 = self.self_attn(q, k, value=src, attn_mask=src_mask, - key_padding_mask=src_key_padding_mask)[0] - src = src + self.dropout1(src2) - src = self.norm1(src) - src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) - src = src + self.dropout2(src2) - src = self.norm2(src) - return src - - def forward_pre(self, src, - src_mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None): - src2 = self.norm1(src) - q = k = self.with_pos_embed(src2, pos) - src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask, - key_padding_mask=src_key_padding_mask)[0] - src = src + self.dropout1(src2) - src2 = self.norm2(src) - src2 = self.linear2(self.dropout(self.activation(self.linear1(src2)))) - src = src + self.dropout2(src2) - return src - - def forward(self, src, - src_mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None): - if self.normalize_before: - return self.forward_pre(src, src_mask, src_key_padding_mask, pos) - return self.forward_post(src, src_mask, src_key_padding_mask, pos) - - -class TransformerDecoderLayer(nn.Module): - - def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, - activation="relu", normalize_before=False): - super().__init__() - self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) - self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) - # Implementation of Feedforward model - self.d_model = d_model - self.linear1 = nn.Linear(d_model, dim_feedforward) - self.dropout = nn.Dropout(dropout) - self.linear2 = nn.Linear(dim_feedforward, d_model) - - self.norm1 = nn.LayerNorm(d_model) - self.norm2 = nn.LayerNorm(d_model) - self.norm3 = nn.LayerNorm(d_model) - self.dropout1 = nn.Dropout(dropout) - self.dropout2 = nn.Dropout(dropout) - self.dropout3 = nn.Dropout(dropout) - - self.activation = _get_activation_fn(activation) - self.normalize_before = normalize_before - - def with_pos_embed(self, tensor, pos: Optional[Tensor]): - return tensor if pos is None else tensor + pos - - def forward_post(self, tgt, memory, - tgt_mask: Optional[Tensor] = None, - memory_mask: Optional[Tensor] = None, - tgt_key_padding_mask: Optional[Tensor] = None, - memory_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None, - query_pos: Optional[Tensor] = None): - - q = k = self.with_pos_embed(tgt, query_pos) - tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask, - key_padding_mask=tgt_key_padding_mask)[0] - tgt = tgt + self.dropout1(tgt2) - tgt = self.norm1(tgt) - tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos), - key=self.with_pos_embed(memory, pos), - value=memory, attn_mask=memory_mask, - key_padding_mask=memory_key_padding_mask)[0] - tgt = tgt + self.dropout2(tgt2) - tgt = self.norm2(tgt) - tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) - tgt = tgt + self.dropout3(tgt2) - tgt = self.norm3(tgt) - return tgt - - def forward_pre(self, tgt, memory, - tgt_mask: Optional[Tensor] = None, - memory_mask: Optional[Tensor] = None, - tgt_key_padding_mask: Optional[Tensor] = None, - memory_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None, - query_pos: Optional[Tensor] = None): - tgt2 = self.norm1(tgt) - q = k = self.with_pos_embed(tgt2, query_pos) - tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask, - key_padding_mask=tgt_key_padding_mask)[0] - tgt = tgt + self.dropout1(tgt2) - tgt2 = self.norm2(tgt) - tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos), - key=self.with_pos_embed(memory, pos), - value=memory, attn_mask=memory_mask, - key_padding_mask=memory_key_padding_mask)[0] - tgt = tgt + self.dropout2(tgt2) - tgt2 = self.norm3(tgt) - tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) - tgt = tgt + self.dropout3(tgt2) - return tgt - - def forward(self, tgt, memory, - tgt_mask: Optional[Tensor] = None, - memory_mask: Optional[Tensor] = None, - tgt_key_padding_mask: Optional[Tensor] = None, - memory_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None, - query_pos: Optional[Tensor] = None): - if self.normalize_before: - return self.forward_pre(tgt, memory, tgt_mask, memory_mask, - tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos) - return self.forward_post(tgt, memory, tgt_mask, memory_mask, - tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos) - - -def _get_clone(module): - return copy.deepcopy(module) - -def _get_clones(module, N): - return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) - - -def build_transformer(args): - return Transformer( - d_model=args.hidden_dim, - dropout=args.dropout, - nhead=args.nheads, - dim_feedforward=args.dim_feedforward, - num_encoder_layers=args.enc_layers, - num_decoder_layers=args.dec_layers, - normalize_before=args.pre_norm, - return_intermediate_dec=True, - ) - - -def _get_activation_fn(activation): - """Return an activation function given a string""" - if activation == "relu": - return F.relu - if activation == "gelu": - return F.gelu - if activation == "glu": - return F.glu - raise RuntimeError(F"activation should be relu/gelu, not {activation}.") \ No newline at end of file diff --git a/spaces/PAIR/Text2Video-Zero/annotator/openpose/hand.py b/spaces/PAIR/Text2Video-Zero/annotator/openpose/hand.py deleted file mode 100644 index 3d0bf17165ad7eb225332b51f4a2aa16718664b2..0000000000000000000000000000000000000000 --- a/spaces/PAIR/Text2Video-Zero/annotator/openpose/hand.py +++ /dev/null @@ -1,86 +0,0 @@ -import cv2 -import json -import numpy as np -import math -import time -from scipy.ndimage.filters import gaussian_filter -import matplotlib.pyplot as plt -import matplotlib -import torch -from skimage.measure import label - -from .model import handpose_model -from . import util - -class Hand(object): - def __init__(self, model_path): - self.model = handpose_model() - if torch.cuda.is_available(): - self.model = self.model.cuda() - print('cuda') - model_dict = util.transfer(self.model, torch.load(model_path)) - self.model.load_state_dict(model_dict) - self.model.eval() - - def __call__(self, oriImg): - scale_search = [0.5, 1.0, 1.5, 2.0] - # scale_search = [0.5] - boxsize = 368 - stride = 8 - padValue = 128 - thre = 0.05 - multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search] - heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 22)) - # paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38)) - - for m in range(len(multiplier)): - scale = multiplier[m] - imageToTest = cv2.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC) - imageToTest_padded, pad = util.padRightDownCorner(imageToTest, stride, padValue) - im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5 - im = np.ascontiguousarray(im) - - data = torch.from_numpy(im).float() - if torch.cuda.is_available(): - data = data.cuda() - # data = data.permute([2, 0, 1]).unsqueeze(0).float() - with torch.no_grad(): - output = self.model(data).cpu().numpy() - # output = self.model(data).numpy()q - - # extract outputs, resize, and remove padding - heatmap = np.transpose(np.squeeze(output), (1, 2, 0)) # output 1 is heatmaps - heatmap = cv2.resize(heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC) - heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :] - heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC) - - heatmap_avg += heatmap / len(multiplier) - - all_peaks = [] - for part in range(21): - map_ori = heatmap_avg[:, :, part] - one_heatmap = gaussian_filter(map_ori, sigma=3) - binary = np.ascontiguousarray(one_heatmap > thre, dtype=np.uint8) - # 全部小于阈值 - if np.sum(binary) == 0: - all_peaks.append([0, 0]) - continue - label_img, label_numbers = label(binary, return_num=True, connectivity=binary.ndim) - max_index = np.argmax([np.sum(map_ori[label_img == i]) for i in range(1, label_numbers + 1)]) + 1 - label_img[label_img != max_index] = 0 - map_ori[label_img == 0] = 0 - - y, x = util.npmax(map_ori) - all_peaks.append([x, y]) - return np.array(all_peaks) - -if __name__ == "__main__": - hand_estimation = Hand('../model/hand_pose_model.pth') - - # test_image = '../images/hand.jpg' - test_image = '../images/hand.jpg' - oriImg = cv2.imread(test_image) # B,G,R order - peaks = hand_estimation(oriImg) - canvas = util.draw_handpose(oriImg, peaks, True) - cv2.imshow('', canvas) - cv2.waitKey(0) \ No newline at end of file diff --git a/spaces/PAIR/Text2Video-Zero/annotator/openpose/util.py b/spaces/PAIR/Text2Video-Zero/annotator/openpose/util.py deleted file mode 100644 index 6f91ae0e65abaf0cbd62d803f56498991141e61b..0000000000000000000000000000000000000000 --- a/spaces/PAIR/Text2Video-Zero/annotator/openpose/util.py +++ /dev/null @@ -1,164 +0,0 @@ -import math -import numpy as np -import matplotlib -import cv2 - - -def padRightDownCorner(img, stride, padValue): - h = img.shape[0] - w = img.shape[1] - - pad = 4 * [None] - pad[0] = 0 # up - pad[1] = 0 # left - pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down - pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right - - img_padded = img - pad_up = np.tile(img_padded[0:1, :, :]*0 + padValue, (pad[0], 1, 1)) - img_padded = np.concatenate((pad_up, img_padded), axis=0) - pad_left = np.tile(img_padded[:, 0:1, :]*0 + padValue, (1, pad[1], 1)) - img_padded = np.concatenate((pad_left, img_padded), axis=1) - pad_down = np.tile(img_padded[-2:-1, :, :]*0 + padValue, (pad[2], 1, 1)) - img_padded = np.concatenate((img_padded, pad_down), axis=0) - pad_right = np.tile(img_padded[:, -2:-1, :]*0 + padValue, (1, pad[3], 1)) - img_padded = np.concatenate((img_padded, pad_right), axis=1) - - return img_padded, pad - -# transfer caffe model to pytorch which will match the layer name -def transfer(model, model_weights): - transfered_model_weights = {} - for weights_name in model.state_dict().keys(): - transfered_model_weights[weights_name] = model_weights['.'.join(weights_name.split('.')[1:])] - return transfered_model_weights - -# draw the body keypoint and lims -def draw_bodypose(canvas, candidate, subset): - stickwidth = 4 - limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \ - [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \ - [1, 16], [16, 18], [3, 17], [6, 18]] - - colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \ - [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \ - [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]] - for i in range(18): - for n in range(len(subset)): - index = int(subset[n][i]) - if index == -1: - continue - x, y = candidate[index][0:2] - cv2.circle(canvas, (int(x), int(y)), 4, colors[i], thickness=-1) - for i in range(17): - for n in range(len(subset)): - index = subset[n][np.array(limbSeq[i]) - 1] - if -1 in index: - continue - cur_canvas = canvas.copy() - Y = candidate[index.astype(int), 0] - X = candidate[index.astype(int), 1] - mX = np.mean(X) - mY = np.mean(Y) - length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5 - angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1])) - polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1) - cv2.fillConvexPoly(cur_canvas, polygon, colors[i]) - canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0) - # plt.imsave("preview.jpg", canvas[:, :, [2, 1, 0]]) - # plt.imshow(canvas[:, :, [2, 1, 0]]) - return canvas - - -# image drawed by opencv is not good. -def draw_handpose(canvas, all_hand_peaks, show_number=False): - edges = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], \ - [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]] - - for peaks in all_hand_peaks: - for ie, e in enumerate(edges): - if np.sum(np.all(peaks[e], axis=1)==0)==0: - x1, y1 = peaks[e[0]] - x2, y2 = peaks[e[1]] - cv2.line(canvas, (x1, y1), (x2, y2), matplotlib.colors.hsv_to_rgb([ie/float(len(edges)), 1.0, 1.0])*255, thickness=2) - - for i, keyponit in enumerate(peaks): - x, y = keyponit - cv2.circle(canvas, (x, y), 4, (0, 0, 255), thickness=-1) - if show_number: - cv2.putText(canvas, str(i), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0), lineType=cv2.LINE_AA) - return canvas - -# detect hand according to body pose keypoints -# please refer to https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/hand/handDetector.cpp -def handDetect(candidate, subset, oriImg): - # right hand: wrist 4, elbow 3, shoulder 2 - # left hand: wrist 7, elbow 6, shoulder 5 - ratioWristElbow = 0.33 - detect_result = [] - image_height, image_width = oriImg.shape[0:2] - for person in subset.astype(int): - # if any of three not detected - has_left = np.sum(person[[5, 6, 7]] == -1) == 0 - has_right = np.sum(person[[2, 3, 4]] == -1) == 0 - if not (has_left or has_right): - continue - hands = [] - #left hand - if has_left: - left_shoulder_index, left_elbow_index, left_wrist_index = person[[5, 6, 7]] - x1, y1 = candidate[left_shoulder_index][:2] - x2, y2 = candidate[left_elbow_index][:2] - x3, y3 = candidate[left_wrist_index][:2] - hands.append([x1, y1, x2, y2, x3, y3, True]) - # right hand - if has_right: - right_shoulder_index, right_elbow_index, right_wrist_index = person[[2, 3, 4]] - x1, y1 = candidate[right_shoulder_index][:2] - x2, y2 = candidate[right_elbow_index][:2] - x3, y3 = candidate[right_wrist_index][:2] - hands.append([x1, y1, x2, y2, x3, y3, False]) - - for x1, y1, x2, y2, x3, y3, is_left in hands: - # pos_hand = pos_wrist + ratio * (pos_wrist - pos_elbox) = (1 + ratio) * pos_wrist - ratio * pos_elbox - # handRectangle.x = posePtr[wrist*3] + ratioWristElbow * (posePtr[wrist*3] - posePtr[elbow*3]); - # handRectangle.y = posePtr[wrist*3+1] + ratioWristElbow * (posePtr[wrist*3+1] - posePtr[elbow*3+1]); - # const auto distanceWristElbow = getDistance(poseKeypoints, person, wrist, elbow); - # const auto distanceElbowShoulder = getDistance(poseKeypoints, person, elbow, shoulder); - # handRectangle.width = 1.5f * fastMax(distanceWristElbow, 0.9f * distanceElbowShoulder); - x = x3 + ratioWristElbow * (x3 - x2) - y = y3 + ratioWristElbow * (y3 - y2) - distanceWristElbow = math.sqrt((x3 - x2) ** 2 + (y3 - y2) ** 2) - distanceElbowShoulder = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2) - width = 1.5 * max(distanceWristElbow, 0.9 * distanceElbowShoulder) - # x-y refers to the center --> offset to topLeft point - # handRectangle.x -= handRectangle.width / 2.f; - # handRectangle.y -= handRectangle.height / 2.f; - x -= width / 2 - y -= width / 2 # width = height - # overflow the image - if x < 0: x = 0 - if y < 0: y = 0 - width1 = width - width2 = width - if x + width > image_width: width1 = image_width - x - if y + width > image_height: width2 = image_height - y - width = min(width1, width2) - # the max hand box value is 20 pixels - if width >= 20: - detect_result.append([int(x), int(y), int(width), is_left]) - - ''' - return value: [[x, y, w, True if left hand else False]]. - width=height since the network require squared input. - x, y is the coordinate of top left - ''' - return detect_result - -# get max index of 2d array -def npmax(array): - arrayindex = array.argmax(1) - arrayvalue = array.max(1) - i = arrayvalue.argmax() - j = arrayindex[i] - return i, j diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/configs/_base_/models/nonlocal_r50-d8.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/configs/_base_/models/nonlocal_r50-d8.py deleted file mode 100644 index 5674a39854cafd1f2e363bac99c58ccae62f24da..0000000000000000000000000000000000000000 --- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/configs/_base_/models/nonlocal_r50-d8.py +++ /dev/null @@ -1,46 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained='open-mmlab://resnet50_v1c', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 2, 4), - strides=(1, 2, 1, 1), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - decode_head=dict( - type='NLHead', - in_channels=2048, - in_index=3, - channels=512, - dropout_ratio=0.1, - reduction=2, - use_scale=True, - mode='embedded_gaussian', - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=1024, - in_index=2, - channels=256, - num_convs=1, - concat_input=False, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(mode='whole')) diff --git a/spaces/ParthRangarajan/Centauri_Pilot/README.md b/spaces/ParthRangarajan/Centauri_Pilot/README.md deleted file mode 100644 index 114f770d5f591020c148e6a76e08a83eadc9f51a..0000000000000000000000000000000000000000 --- a/spaces/ParthRangarajan/Centauri_Pilot/README.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: Centauri_Pilot -emoji: 🐢 -colorFrom: gray -colorTo: purple -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`models`: _List[string]_ -HF model IDs (like "gpt2" or "deepset/roberta-base-squad2") used in the Space. -Will be parsed automatically from your code if not specified here. - -`datasets`: _List[string]_ -HF dataset IDs (like "common_voice" or "oscar-corpus/OSCAR-2109") used in the Space. -Will be parsed automatically from your code if not specified here. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/system/vm/trap-state.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/system/vm/trap-state.go deleted file mode 100644 index 95da72ed2bc53a6bae2d98f15835c96cc3fbb1d2..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/system/vm/trap-state.go and /dev/null differ diff --git a/spaces/PeepDaSlan9/OpenAssistant-falcon-7b-sft-mix-2000/app.py b/spaces/PeepDaSlan9/OpenAssistant-falcon-7b-sft-mix-2000/app.py deleted file mode 100644 index 37573fd48e2618cf081342b84bd04e484d860504..0000000000000000000000000000000000000000 --- a/spaces/PeepDaSlan9/OpenAssistant-falcon-7b-sft-mix-2000/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/OpenAssistant/falcon-7b-sft-mix-2000").launch() \ No newline at end of file diff --git a/spaces/PeepDaSlan9/SDXL-artists-browser/style.css b/spaces/PeepDaSlan9/SDXL-artists-browser/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/PeepDaSlan9/SDXL-artists-browser/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/decode_heads/sep_aspp_head.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/decode_heads/sep_aspp_head.py deleted file mode 100644 index 3339a7ac56e77dfc638e9bffb557d4699148686b..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/decode_heads/sep_aspp_head.py +++ /dev/null @@ -1,101 +0,0 @@ -import torch -import torch.nn as nn -from annotator.uniformer.mmcv.cnn import ConvModule, DepthwiseSeparableConvModule - -from annotator.uniformer.mmseg.ops import resize -from ..builder import HEADS -from .aspp_head import ASPPHead, ASPPModule - - -class DepthwiseSeparableASPPModule(ASPPModule): - """Atrous Spatial Pyramid Pooling (ASPP) Module with depthwise separable - conv.""" - - def __init__(self, **kwargs): - super(DepthwiseSeparableASPPModule, self).__init__(**kwargs) - for i, dilation in enumerate(self.dilations): - if dilation > 1: - self[i] = DepthwiseSeparableConvModule( - self.in_channels, - self.channels, - 3, - dilation=dilation, - padding=dilation, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - -@HEADS.register_module() -class DepthwiseSeparableASPPHead(ASPPHead): - """Encoder-Decoder with Atrous Separable Convolution for Semantic Image - Segmentation. - - This head is the implementation of `DeepLabV3+ - `_. - - Args: - c1_in_channels (int): The input channels of c1 decoder. If is 0, - the no decoder will be used. - c1_channels (int): The intermediate channels of c1 decoder. - """ - - def __init__(self, c1_in_channels, c1_channels, **kwargs): - super(DepthwiseSeparableASPPHead, self).__init__(**kwargs) - assert c1_in_channels >= 0 - self.aspp_modules = DepthwiseSeparableASPPModule( - dilations=self.dilations, - in_channels=self.in_channels, - channels=self.channels, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - if c1_in_channels > 0: - self.c1_bottleneck = ConvModule( - c1_in_channels, - c1_channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - else: - self.c1_bottleneck = None - self.sep_bottleneck = nn.Sequential( - DepthwiseSeparableConvModule( - self.channels + c1_channels, - self.channels, - 3, - padding=1, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg), - DepthwiseSeparableConvModule( - self.channels, - self.channels, - 3, - padding=1, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg)) - - def forward(self, inputs): - """Forward function.""" - x = self._transform_inputs(inputs) - aspp_outs = [ - resize( - self.image_pool(x), - size=x.size()[2:], - mode='bilinear', - align_corners=self.align_corners) - ] - aspp_outs.extend(self.aspp_modules(x)) - aspp_outs = torch.cat(aspp_outs, dim=1) - output = self.bottleneck(aspp_outs) - if self.c1_bottleneck is not None: - c1_output = self.c1_bottleneck(inputs[0]) - output = resize( - input=output, - size=c1_output.shape[2:], - mode='bilinear', - align_corners=self.align_corners) - output = torch.cat([output, c1_output], dim=1) - output = self.sep_bottleneck(output) - output = self.cls_seg(output) - return output diff --git a/spaces/Prathap/summarization/README.md b/spaces/Prathap/summarization/README.md deleted file mode 100644 index ee9f7cd7066457b6fdd193b079f5f04216b3b2ab..0000000000000000000000000000000000000000 --- a/spaces/Prathap/summarization/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Summarization -emoji: ⚡ -colorFrom: blue -colorTo: green -sdk: streamlit -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/Qiwei97/Airbnb_tool/app.py b/spaces/Qiwei97/Airbnb_tool/app.py deleted file mode 100644 index a9468e3a192fa13bed4dbbf393bdfebacf0e81ee..0000000000000000000000000000000000000000 --- a/spaces/Qiwei97/Airbnb_tool/app.py +++ /dev/null @@ -1,257 +0,0 @@ - -import streamlit as st -import numpy as np -import seaborn as sns -import matplotlib.pyplot as plt -import numpy_financial as npf -import pandas as pd -from streamlit_folium import folium_static -import leafmap.foliumap as leafmap -import folium -from shapely.geometry import Point, Polygon -import geopandas -import geopy -from geopy.geocoders import Nominatim -from geopy.extra.rate_limiter import RateLimiter -from scipy.spatial import cKDTree - -#----------------------------------------- -# Set page settings -st.set_page_config(layout="wide") - -#----------------------------------------- -# Sidebar -with st.sidebar: - st.header('Welcome to the Airbnb Investment Tool!') - nav = st.selectbox('Navigation', ['Heuristic Pricing', - 'Investment Analysis']) - -#----------------------------------------- -# Additional Functions - -def p_title(title): - st.markdown(f'

{title}

', unsafe_allow_html=True) - - -# Function to return a GeoPandas DataFrame containing the listings -# that are within a specified radius from a specified lat, long. -def getNearbyListings(gdf_proj, input_long, input_lat, radius): - # Build Tree - airbnbCoords = np.array(list(gdf_proj.geometry.apply(lambda x: (x.x, x.y)))) - airbnbTree = cKDTree(airbnbCoords) - - # Convert lat-long to projected coords - gdf_input = geopandas.GeoSeries.from_xy(x=[input_long], y=[input_lat], crs=4326) - gdf_input_proj = gdf_input.to_crs(crs=32634) - - coords = np.array(list((gdf_input_proj.x[0], gdf_input_proj.y[0]))) - - # Returns list of indices whose distance is <= radius - neighbours_indices = airbnbTree.query_ball_point(coords, radius) - gdf_neighbours_proj = gdf_proj.iloc[neighbours_indices, :] - gdf_neighbours = gdf_neighbours_proj.to_crs(crs=4326) - - return gdf_neighbours - -# Function to return IRR. -# Financial Modelling Tool. -def investment_tool(house_price, loan_amount, loan_period, percentage_loan_interest_annual, - rental_charged_monthly, percentage_rental_tax, percentage_increase_in_rental_yearly, utilisation_rate, - yearly_refurbishment_costs, percentage_increase_in_refurbishment_yearly, ending_value_of_house): - - #expected format of percentage parameters is whole number and not decimals i.e., 5 instead of 0.05 - #all non-% parameters are expected to be positive - - house_price = int(house_price) - loan_amount = int(loan_amount) - loan_period = int(loan_period) - percentage_loan_interest_annual = int(percentage_loan_interest_annual) - rental_charged_monthly = int(rental_charged_monthly) - percentage_rental_tax = int(percentage_rental_tax) - percentage_increase_in_rental_yearly = int(percentage_increase_in_rental_yearly) - percentage_utilisation_rate = int(utilisation_rate) - yearly_refurbishment_costs = int(yearly_refurbishment_costs) - percentage_increase_in_refurbishment_yearly = int(percentage_increase_in_refurbishment_yearly) - ending_value_of_house = int(ending_value_of_house) - - #ensuring the figures make sense - if loan_amount > house_price: - return print("Loan Amount cannot exceed House Price") - - #creating the list of cash flows to be used to calculate internal rate of return - initial_cashflow = -(1 - loan_amount/house_price) * house_price - cashflow_list = [initial_cashflow] - - #finding the annual mortgage assuming equal amortization - mortgage = npf.pmt(percentage_loan_interest_annual / 100, loan_period, loan_amount) #the np.pmt function will automatically put mortgage as a negative cashflow - - #finding the annual cashflows & loan balance changes during the loan period and appending them to the respective lists - for i in range(loan_period): - rental = 12 * rental_charged_monthly * ((1 + (percentage_increase_in_rental_yearly / 100)) ** i) * (1 - (percentage_rental_tax / 100)) * utilisation_rate / 100 - refurbishment_cost = -1 * yearly_refurbishment_costs * ((1 + (percentage_increase_in_refurbishment_yearly / 100)) ** i) - - #the condition here is to include the salvage/ending value of the house to cashflows after loan repayments are finished - if i == (loan_period-1): - yearly_cashflow = ending_value_of_house + rental + mortgage + refurbishment_cost - else: - yearly_cashflow = rental + mortgage + refurbishment_cost - cashflow_list.append(yearly_cashflow) - - #finding the internal rate of return - irr = round(npf.irr(cashflow_list), 4) - - #----------------------------------- - #Dataframe for plotting of graph - loan_dict = {'Year': [0], 'Starting Loan Balance': [0], 'Cumulative Interest Paid': [0], 'Cumulative Principal Paid': [0], 'Remaining Loan Balance': [0]} - - # Create DataFrame - loan_dataframe = pd.DataFrame(loan_dict) - - #finding the annual mortgage assuming equal amortization - mortgage = npf.pmt(percentage_loan_interest_annual / 100, loan_period, loan_amount) #the np.pmt function will automatically put mortgage as a negative cashflow - - #updating the global dataframe - loan_dataframe.loc[0,'Starting Loan Balance'] = loan_amount - - for i in range(loan_period): - loan_dataframe.loc[i,'Year'] = i+1 - - #the condition here is to calculate principal and interest paid - if i == 0: - loan_dataframe.loc[i,'Cumulative Interest Paid'] = loan_dataframe.loc[i,'Starting Loan Balance'] * (percentage_loan_interest_annual / 100) - loan_dataframe.loc[i,'Cumulative Principal Paid'] = (-1 * mortgage) - (loan_dataframe.loc[i,'Starting Loan Balance'] * (percentage_loan_interest_annual / 100)) - else: - loan_dataframe.loc[i,'Cumulative Interest Paid'] = loan_dataframe.loc[i,'Starting Loan Balance'] * (percentage_loan_interest_annual / 100) + loan_dataframe.loc[i-1,'Cumulative Interest Paid'] - loan_dataframe.loc[i,'Cumulative Principal Paid'] = (-1 * mortgage) - (loan_dataframe.loc[i,'Starting Loan Balance'] * (percentage_loan_interest_annual / 100)) + loan_dataframe.loc[i-1,'Cumulative Principal Paid'] - - loan_dataframe.loc[i,'Remaining Loan Balance'] = loan_dataframe.loc[i,'Starting Loan Balance'] + (loan_dataframe.loc[i,'Starting Loan Balance'] * (percentage_loan_interest_annual / 100)) + mortgage - - #condition to update starting loan balance - if i != loan_period-1: - loan_dataframe.loc[i+1,'Starting Loan Balance'] = loan_dataframe.loc[i,'Remaining Loan Balance'] - - loan_dataframe['Remaining Loan Balance'] = pd.to_numeric(loan_dataframe['Remaining Loan Balance']) - - return irr, loan_dataframe - -#----------------------------------------- -# Load Airbnb listings data -df_raw = pd.read_csv("data/listings_sf_withamenities.csv") -df = df_raw.copy() -gdf = geopandas.GeoDataFrame( - df, - geometry=geopandas.points_from_xy(df.longitude, df.latitude), - crs=4326) -gdf_proj = gdf.to_crs(crs=32634) - -#----------------------------------------- -# Tab 1: Heuristic Pricing -if nav == 'Heuristic Pricing': - - st.markdown("

Airbnb 🏠

", unsafe_allow_html=True) - st.text('') - p_title('Heuristic Pricing') - st.text('') - - # Get address inputs - st.caption('Enter your address:') - with st.form("heuristics_form"): - col1, col2 = st.columns(2) - with col1: - postalcode = st.text_input("Postal Code", "94109") - street = st.text_input("Street", "1788 Clay Street") - city = st.selectbox("City", ["San Francisco"]) - with col2: - state = st.selectbox("State", ["California"]) - country = st.selectbox("Country", ["United States"]) - radius = st.slider("Distance of nearest listings (metres)", min_value=500, max_value=2000, value=500, step=500) - submitted = st.form_submit_button("Submit") - - if submitted: - # Get geolocation - geolocator = Nominatim(user_agent="GTA Lookup") - geocode = RateLimiter(geolocator.geocode, min_delay_seconds=1) - location = geolocator.geocode({"postalcode": postalcode, "street": street, "city": city, "state": state, "country": country}) - - # If the search address yields no result, set to default coords of San Fran - if location is None: - lat = 37.773972 - lon = -122.431297 - st.error("Address is not found. Please try again.") - else: - lat = location.latitude - lon = location.longitude - - # Compute Stats - st.markdown('___') - st.caption('Recommended Pricing:') - gdf_nearby_listings = getNearbyListings(gdf_proj, lon, lat, radius=radius) - if len(gdf_nearby_listings) == 0: - st.error("There are no nearby listings.") - else: - col3, col4 = st.columns(2) - with col3: - df_nearby_stats = gdf_nearby_listings[["price"]].describe().round(2) - df_nearby_stats.columns = pd.Index(["Price"]) - st.table(df_nearby_stats.style.format("{:.2f}")) - with col4: - # Plot Stats - fig = plt.figure(figsize=(10, 4)) - sns.boxplot(x="price", data=gdf_nearby_listings, showfliers=False) - st.pyplot(fig) - - # Plot using leafmap. Responsive width. - m = leafmap.Map(tiles="OpenStreetMap", location=[lat, lon], zoom_start=15) - m.add_marker(location=[lat, lon]) - m.add_points_from_xy(gdf_nearby_listings, x="longitude", y="latitude", - popup=["id", "price", "review_scores_rating"], - color_options=['red']) - m.add_heatmap(data=gdf_nearby_listings, - latitude="latitude", longitude="longitude", - value="price", min_opacity=0.1, - name="Price Heatmap", blue=50) - m.to_streamlit() - -#----------------------------------------- -# Tab 2: Investment Analysis -if nav == 'Investment Analysis': - st.markdown("

Airbnb 🏠

", unsafe_allow_html=True) - st.text('') - p_title('Investment Analysis') - - # Financial Projections - st.caption("Enter data here") - with st.form("investment_form"): - col1_2, col2_2, col3_2 = st.columns(3) - with col1_2: - house_price = st.number_input("Purchase Price of House ($)", min_value=0, value=250000) - loan_amount = st.number_input("Loan Amount ($)", min_value=0, value=150000) - loan_period = st.number_input("Loan Period (Years)", min_value=0, value=15) - percentage_loan_interest_annual = st.number_input("Annual Loan I/R (%)", min_value=0.0, max_value=100.0, value=2.1) - with col2_2: - rental_charged_monthly = st.number_input("Monthly Rental ($)", min_value=0, value=2000) - percentage_rental_tax = st.number_input("Rental Tax (%)", min_value=0.0, value=0.0) - percentage_increase_in_rental_yearly = st.number_input("Annual Rental Increase (%)", min_value=0.0, value=1.0) - utilisation_rate = st.number_input("Utilisation Rate (%)", min_value=0.0, value=50.0) - with col3_2: - yearly_refurbishment_costs = st.number_input("Yearly Refurbishment Costs ($)", min_value=0, value=3000) - percentage_increase_in_refurbishment_yearly = st.number_input("Yearly Refurbishment Costs Increase (%)", min_value=0.0, value=2.0) - ending_value_of_house = st.number_input("Ending Value of House ($)", min_value=0, value=300000) - submitted2 = st.form_submit_button("Submit") - - if submitted2: - irr, loan_dataframe = investment_tool(house_price, loan_amount, loan_period, percentage_loan_interest_annual, - rental_charged_monthly, percentage_rental_tax, percentage_increase_in_rental_yearly, utilisation_rate, - yearly_refurbishment_costs, percentage_increase_in_refurbishment_yearly, ending_value_of_house) - st.markdown('___') - st.caption("Expected Internal Rate of Return") - st.text("{:.2%}".format(irr)) - # Print plots - fig = plt.figure(figsize=(10, 4)) - plt.bar(loan_dataframe['Year'], loan_dataframe['Cumulative Principal Paid'], color='lightcoral') - plt.bar(loan_dataframe['Year'], loan_dataframe['Cumulative Interest Paid'], bottom=loan_dataframe['Cumulative Principal Paid'], color='lightsalmon') - plt.plot(loan_dataframe['Year'], loan_dataframe['Remaining Loan Balance'], color='crimson') - plt.ylabel('Amount') - plt.title('Loan Balance') - plt.legend(('Loan Balance Remaining','Cumulative Principal Paid', 'Cumulative Interest Paid')) - st.pyplot(fig) \ No newline at end of file diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/chardet/cli/chardetect.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/chardet/cli/chardetect.py deleted file mode 100644 index 7926fa37e380d87dbcd30f9fce6cb46359f7d3bb..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/chardet/cli/chardetect.py +++ /dev/null @@ -1,86 +0,0 @@ -""" -Script which takes one or more file paths and reports on their detected -encodings - -Example:: - - % chardetect somefile someotherfile - somefile: windows-1252 with confidence 0.5 - someotherfile: ascii with confidence 1.0 - -If no paths are provided, it takes its input from stdin. - -""" - - -import argparse -import sys - -from .. import __version__ -from ..universaldetector import UniversalDetector - - -def description_of(lines, name="stdin"): - """ - Return a string describing the probable encoding of a file or - list of strings. - - :param lines: The lines to get the encoding of. - :type lines: Iterable of bytes - :param name: Name of file or collection of lines - :type name: str - """ - u = UniversalDetector() - for line in lines: - line = bytearray(line) - u.feed(line) - # shortcut out of the loop to save reading further - particularly useful if we read a BOM. - if u.done: - break - u.close() - result = u.result - if result["encoding"]: - return f'{name}: {result["encoding"]} with confidence {result["confidence"]}' - return f"{name}: no result" - - -def main(argv=None): - """ - Handles command line arguments and gets things started. - - :param argv: List of arguments, as if specified on the command-line. - If None, ``sys.argv[1:]`` is used instead. - :type argv: list of str - """ - # Get command line arguments - parser = argparse.ArgumentParser( - description="Takes one or more file paths and reports their detected \ - encodings" - ) - parser.add_argument( - "input", - help="File whose encoding we would like to determine. \ - (default: stdin)", - type=argparse.FileType("rb"), - nargs="*", - default=[sys.stdin.buffer], - ) - parser.add_argument( - "--version", action="version", version=f"%(prog)s {__version__}" - ) - args = parser.parse_args(argv) - - for f in args.input: - if f.isatty(): - print( - "You are running chardetect interactively. Press " - "CTRL-D twice at the start of a blank line to signal the " - "end of your input. If you want help, run chardetect " - "--help\n", - file=sys.stderr, - ) - print(description_of(f, f.name)) - - -if __name__ == "__main__": - main() diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/chardet/langturkishmodel.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/chardet/langturkishmodel.py deleted file mode 100644 index 291857c25c83f91a151c1d7760e8e5e09c1ee238..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/chardet/langturkishmodel.py +++ /dev/null @@ -1,4380 +0,0 @@ -from pip._vendor.chardet.sbcharsetprober import SingleByteCharSetModel - -# 3: Positive -# 2: Likely -# 1: Unlikely -# 0: Negative - -TURKISH_LANG_MODEL = { - 23: { # 'A' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 0, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 0, # 'K' - 49: 0, # 'L' - 20: 0, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 0, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 3, # 'a' - 21: 0, # 'b' - 28: 0, # 'c' - 12: 2, # 'd' - 2: 3, # 'e' - 18: 0, # 'f' - 27: 1, # 'g' - 25: 1, # 'h' - 3: 1, # 'i' - 24: 0, # 'j' - 10: 2, # 'k' - 5: 1, # 'l' - 13: 1, # 'm' - 4: 1, # 'n' - 15: 0, # 'o' - 26: 0, # 'p' - 7: 1, # 'r' - 8: 1, # 's' - 9: 1, # 't' - 14: 1, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 3, # 'y' - 22: 0, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 1, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 0, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 0, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 37: { # 'B' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 2, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 2, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 1, # 'K' - 49: 0, # 'L' - 20: 0, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 1, # 'P' - 44: 0, # 'R' - 35: 1, # 'S' - 31: 0, # 'T' - 51: 0, # 'U' - 38: 1, # 'V' - 62: 0, # 'W' - 43: 1, # 'Y' - 56: 0, # 'Z' - 1: 2, # 'a' - 21: 0, # 'b' - 28: 2, # 'c' - 12: 0, # 'd' - 2: 3, # 'e' - 18: 0, # 'f' - 27: 0, # 'g' - 25: 0, # 'h' - 3: 0, # 'i' - 24: 0, # 'j' - 10: 0, # 'k' - 5: 0, # 'l' - 13: 1, # 'm' - 4: 1, # 'n' - 15: 0, # 'o' - 26: 0, # 'p' - 7: 0, # 'r' - 8: 0, # 's' - 9: 0, # 't' - 14: 2, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 0, # 'y' - 22: 1, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 1, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 0, # 'ç' - 61: 0, # 'î' - 34: 1, # 'ö' - 17: 0, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 0, # 'ı' - 40: 1, # 'Ş' - 19: 1, # 'ş' - }, - 47: { # 'C' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 1, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 0, # 'K' - 49: 1, # 'L' - 20: 0, # 'M' - 46: 1, # 'N' - 42: 0, # 'O' - 48: 1, # 'P' - 44: 1, # 'R' - 35: 0, # 'S' - 31: 0, # 'T' - 51: 0, # 'U' - 38: 1, # 'V' - 62: 0, # 'W' - 43: 1, # 'Y' - 56: 0, # 'Z' - 1: 3, # 'a' - 21: 0, # 'b' - 28: 2, # 'c' - 12: 0, # 'd' - 2: 3, # 'e' - 18: 0, # 'f' - 27: 0, # 'g' - 25: 0, # 'h' - 3: 0, # 'i' - 24: 2, # 'j' - 10: 1, # 'k' - 5: 2, # 'l' - 13: 2, # 'm' - 4: 2, # 'n' - 15: 1, # 'o' - 26: 0, # 'p' - 7: 2, # 'r' - 8: 0, # 's' - 9: 0, # 't' - 14: 3, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 0, # 'y' - 22: 2, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 1, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 1, # 'ç' - 61: 0, # 'î' - 34: 1, # 'ö' - 17: 0, # 'ü' - 30: 0, # 'ğ' - 41: 1, # 'İ' - 6: 3, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 39: { # 'D' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 1, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 1, # 'K' - 49: 0, # 'L' - 20: 0, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 1, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 0, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 2, # 'a' - 21: 0, # 'b' - 28: 2, # 'c' - 12: 0, # 'd' - 2: 2, # 'e' - 18: 0, # 'f' - 27: 0, # 'g' - 25: 0, # 'h' - 3: 0, # 'i' - 24: 0, # 'j' - 10: 0, # 'k' - 5: 1, # 'l' - 13: 3, # 'm' - 4: 0, # 'n' - 15: 1, # 'o' - 26: 0, # 'p' - 7: 0, # 'r' - 8: 0, # 's' - 9: 0, # 't' - 14: 1, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 0, # 'y' - 22: 1, # 'z' - 63: 0, # '·' - 54: 1, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 1, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 0, # 'ü' - 30: 1, # 'ğ' - 41: 0, # 'İ' - 6: 1, # 'ı' - 40: 1, # 'Ş' - 19: 0, # 'ş' - }, - 29: { # 'E' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 1, # 'E' - 52: 0, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 3, # 'K' - 49: 0, # 'L' - 20: 1, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 0, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 3, # 'a' - 21: 0, # 'b' - 28: 0, # 'c' - 12: 2, # 'd' - 2: 3, # 'e' - 18: 0, # 'f' - 27: 1, # 'g' - 25: 0, # 'h' - 3: 1, # 'i' - 24: 1, # 'j' - 10: 0, # 'k' - 5: 3, # 'l' - 13: 3, # 'm' - 4: 3, # 'n' - 15: 0, # 'o' - 26: 0, # 'p' - 7: 0, # 'r' - 8: 1, # 's' - 9: 1, # 't' - 14: 1, # 'u' - 32: 1, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 2, # 'y' - 22: 0, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 0, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 0, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 3, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 52: { # 'F' - 23: 0, # 'A' - 37: 1, # 'B' - 47: 1, # 'C' - 39: 1, # 'D' - 29: 1, # 'E' - 52: 2, # 'F' - 36: 0, # 'G' - 45: 2, # 'H' - 53: 1, # 'I' - 60: 0, # 'J' - 16: 0, # 'K' - 49: 0, # 'L' - 20: 1, # 'M' - 46: 1, # 'N' - 42: 1, # 'O' - 48: 2, # 'P' - 44: 1, # 'R' - 35: 1, # 'S' - 31: 1, # 'T' - 51: 1, # 'U' - 38: 1, # 'V' - 62: 0, # 'W' - 43: 2, # 'Y' - 56: 0, # 'Z' - 1: 0, # 'a' - 21: 1, # 'b' - 28: 1, # 'c' - 12: 1, # 'd' - 2: 0, # 'e' - 18: 1, # 'f' - 27: 0, # 'g' - 25: 0, # 'h' - 3: 2, # 'i' - 24: 1, # 'j' - 10: 0, # 'k' - 5: 0, # 'l' - 13: 1, # 'm' - 4: 2, # 'n' - 15: 1, # 'o' - 26: 0, # 'p' - 7: 2, # 'r' - 8: 1, # 's' - 9: 1, # 't' - 14: 1, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 1, # 'y' - 22: 1, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 1, # 'Ö' - 55: 2, # 'Ü' - 59: 0, # 'â' - 33: 0, # 'ç' - 61: 0, # 'î' - 34: 2, # 'ö' - 17: 0, # 'ü' - 30: 1, # 'ğ' - 41: 1, # 'İ' - 6: 2, # 'ı' - 40: 0, # 'Ş' - 19: 2, # 'ş' - }, - 36: { # 'G' - 23: 1, # 'A' - 37: 0, # 'B' - 47: 1, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 1, # 'F' - 36: 2, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 2, # 'K' - 49: 0, # 'L' - 20: 0, # 'M' - 46: 2, # 'N' - 42: 1, # 'O' - 48: 1, # 'P' - 44: 1, # 'R' - 35: 1, # 'S' - 31: 0, # 'T' - 51: 1, # 'U' - 38: 2, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 3, # 'a' - 21: 0, # 'b' - 28: 1, # 'c' - 12: 0, # 'd' - 2: 3, # 'e' - 18: 0, # 'f' - 27: 0, # 'g' - 25: 0, # 'h' - 3: 0, # 'i' - 24: 1, # 'j' - 10: 1, # 'k' - 5: 0, # 'l' - 13: 3, # 'm' - 4: 2, # 'n' - 15: 0, # 'o' - 26: 1, # 'p' - 7: 0, # 'r' - 8: 1, # 's' - 9: 1, # 't' - 14: 3, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 1, # 'x' - 11: 0, # 'y' - 22: 2, # 'z' - 63: 0, # '·' - 54: 1, # 'Ç' - 50: 2, # 'Ö' - 55: 0, # 'Ü' - 59: 1, # 'â' - 33: 2, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 0, # 'ü' - 30: 1, # 'ğ' - 41: 1, # 'İ' - 6: 2, # 'ı' - 40: 2, # 'Ş' - 19: 1, # 'ş' - }, - 45: { # 'H' - 23: 0, # 'A' - 37: 1, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 2, # 'F' - 36: 2, # 'G' - 45: 1, # 'H' - 53: 1, # 'I' - 60: 0, # 'J' - 16: 2, # 'K' - 49: 1, # 'L' - 20: 0, # 'M' - 46: 1, # 'N' - 42: 1, # 'O' - 48: 1, # 'P' - 44: 0, # 'R' - 35: 2, # 'S' - 31: 0, # 'T' - 51: 1, # 'U' - 38: 2, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 3, # 'a' - 21: 0, # 'b' - 28: 2, # 'c' - 12: 0, # 'd' - 2: 3, # 'e' - 18: 0, # 'f' - 27: 0, # 'g' - 25: 0, # 'h' - 3: 2, # 'i' - 24: 0, # 'j' - 10: 1, # 'k' - 5: 0, # 'l' - 13: 2, # 'm' - 4: 0, # 'n' - 15: 1, # 'o' - 26: 1, # 'p' - 7: 1, # 'r' - 8: 0, # 's' - 9: 0, # 't' - 14: 3, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 0, # 'y' - 22: 2, # 'z' - 63: 0, # '·' - 54: 1, # 'Ç' - 50: 1, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 1, # 'ç' - 61: 0, # 'î' - 34: 1, # 'ö' - 17: 0, # 'ü' - 30: 2, # 'ğ' - 41: 1, # 'İ' - 6: 0, # 'ı' - 40: 2, # 'Ş' - 19: 1, # 'ş' - }, - 53: { # 'I' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 1, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 2, # 'K' - 49: 0, # 'L' - 20: 0, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 1, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 0, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 2, # 'a' - 21: 0, # 'b' - 28: 2, # 'c' - 12: 0, # 'd' - 2: 2, # 'e' - 18: 0, # 'f' - 27: 0, # 'g' - 25: 0, # 'h' - 3: 0, # 'i' - 24: 0, # 'j' - 10: 0, # 'k' - 5: 2, # 'l' - 13: 2, # 'm' - 4: 0, # 'n' - 15: 0, # 'o' - 26: 0, # 'p' - 7: 0, # 'r' - 8: 0, # 's' - 9: 0, # 't' - 14: 2, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 0, # 'y' - 22: 2, # 'z' - 63: 0, # '·' - 54: 1, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 2, # 'ç' - 61: 0, # 'î' - 34: 1, # 'ö' - 17: 0, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 0, # 'ı' - 40: 1, # 'Ş' - 19: 1, # 'ş' - }, - 60: { # 'J' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 0, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 0, # 'K' - 49: 0, # 'L' - 20: 1, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 0, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 0, # 'a' - 21: 1, # 'b' - 28: 0, # 'c' - 12: 1, # 'd' - 2: 0, # 'e' - 18: 0, # 'f' - 27: 0, # 'g' - 25: 0, # 'h' - 3: 1, # 'i' - 24: 0, # 'j' - 10: 0, # 'k' - 5: 0, # 'l' - 13: 0, # 'm' - 4: 1, # 'n' - 15: 0, # 'o' - 26: 0, # 'p' - 7: 0, # 'r' - 8: 1, # 's' - 9: 0, # 't' - 14: 0, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 0, # 'y' - 22: 0, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 0, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 0, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 0, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 16: { # 'K' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 3, # 'E' - 52: 0, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 0, # 'K' - 49: 0, # 'L' - 20: 2, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 2, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 2, # 'a' - 21: 3, # 'b' - 28: 0, # 'c' - 12: 3, # 'd' - 2: 1, # 'e' - 18: 3, # 'f' - 27: 3, # 'g' - 25: 3, # 'h' - 3: 3, # 'i' - 24: 2, # 'j' - 10: 3, # 'k' - 5: 0, # 'l' - 13: 0, # 'm' - 4: 3, # 'n' - 15: 0, # 'o' - 26: 1, # 'p' - 7: 3, # 'r' - 8: 3, # 's' - 9: 3, # 't' - 14: 0, # 'u' - 32: 3, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 2, # 'y' - 22: 1, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 0, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 2, # 'ü' - 30: 0, # 'ğ' - 41: 1, # 'İ' - 6: 3, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 49: { # 'L' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 2, # 'E' - 52: 0, # 'F' - 36: 1, # 'G' - 45: 1, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 0, # 'K' - 49: 0, # 'L' - 20: 1, # 'M' - 46: 0, # 'N' - 42: 2, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 0, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 1, # 'Y' - 56: 0, # 'Z' - 1: 0, # 'a' - 21: 3, # 'b' - 28: 0, # 'c' - 12: 2, # 'd' - 2: 0, # 'e' - 18: 0, # 'f' - 27: 0, # 'g' - 25: 0, # 'h' - 3: 2, # 'i' - 24: 0, # 'j' - 10: 1, # 'k' - 5: 0, # 'l' - 13: 0, # 'm' - 4: 2, # 'n' - 15: 1, # 'o' - 26: 1, # 'p' - 7: 1, # 'r' - 8: 1, # 's' - 9: 1, # 't' - 14: 0, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 2, # 'y' - 22: 0, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 2, # 'Ü' - 59: 0, # 'â' - 33: 0, # 'ç' - 61: 0, # 'î' - 34: 1, # 'ö' - 17: 1, # 'ü' - 30: 1, # 'ğ' - 41: 0, # 'İ' - 6: 2, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 20: { # 'M' - 23: 1, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 0, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 1, # 'J' - 16: 3, # 'K' - 49: 0, # 'L' - 20: 2, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 1, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 3, # 'a' - 21: 2, # 'b' - 28: 0, # 'c' - 12: 3, # 'd' - 2: 3, # 'e' - 18: 0, # 'f' - 27: 1, # 'g' - 25: 1, # 'h' - 3: 2, # 'i' - 24: 2, # 'j' - 10: 2, # 'k' - 5: 2, # 'l' - 13: 3, # 'm' - 4: 3, # 'n' - 15: 0, # 'o' - 26: 1, # 'p' - 7: 3, # 'r' - 8: 0, # 's' - 9: 2, # 't' - 14: 3, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 2, # 'y' - 22: 0, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 3, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 0, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 3, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 46: { # 'N' - 23: 0, # 'A' - 37: 1, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 1, # 'F' - 36: 1, # 'G' - 45: 1, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 2, # 'K' - 49: 0, # 'L' - 20: 0, # 'M' - 46: 1, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 1, # 'R' - 35: 1, # 'S' - 31: 0, # 'T' - 51: 1, # 'U' - 38: 2, # 'V' - 62: 0, # 'W' - 43: 1, # 'Y' - 56: 0, # 'Z' - 1: 3, # 'a' - 21: 0, # 'b' - 28: 2, # 'c' - 12: 0, # 'd' - 2: 3, # 'e' - 18: 0, # 'f' - 27: 1, # 'g' - 25: 0, # 'h' - 3: 0, # 'i' - 24: 2, # 'j' - 10: 1, # 'k' - 5: 1, # 'l' - 13: 3, # 'm' - 4: 2, # 'n' - 15: 1, # 'o' - 26: 1, # 'p' - 7: 1, # 'r' - 8: 0, # 's' - 9: 0, # 't' - 14: 3, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 1, # 'x' - 11: 1, # 'y' - 22: 2, # 'z' - 63: 0, # '·' - 54: 1, # 'Ç' - 50: 1, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 0, # 'ç' - 61: 0, # 'î' - 34: 1, # 'ö' - 17: 0, # 'ü' - 30: 0, # 'ğ' - 41: 1, # 'İ' - 6: 2, # 'ı' - 40: 1, # 'Ş' - 19: 1, # 'ş' - }, - 42: { # 'O' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 1, # 'F' - 36: 0, # 'G' - 45: 1, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 2, # 'K' - 49: 1, # 'L' - 20: 0, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 2, # 'P' - 44: 1, # 'R' - 35: 1, # 'S' - 31: 0, # 'T' - 51: 1, # 'U' - 38: 1, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 3, # 'a' - 21: 0, # 'b' - 28: 2, # 'c' - 12: 0, # 'd' - 2: 2, # 'e' - 18: 0, # 'f' - 27: 0, # 'g' - 25: 0, # 'h' - 3: 0, # 'i' - 24: 0, # 'j' - 10: 0, # 'k' - 5: 3, # 'l' - 13: 3, # 'm' - 4: 0, # 'n' - 15: 1, # 'o' - 26: 0, # 'p' - 7: 0, # 'r' - 8: 0, # 's' - 9: 0, # 't' - 14: 2, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 0, # 'y' - 22: 2, # 'z' - 63: 0, # '·' - 54: 2, # 'Ç' - 50: 1, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 2, # 'ç' - 61: 0, # 'î' - 34: 1, # 'ö' - 17: 0, # 'ü' - 30: 1, # 'ğ' - 41: 2, # 'İ' - 6: 1, # 'ı' - 40: 1, # 'Ş' - 19: 1, # 'ş' - }, - 48: { # 'P' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 2, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 2, # 'F' - 36: 1, # 'G' - 45: 1, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 2, # 'K' - 49: 0, # 'L' - 20: 0, # 'M' - 46: 1, # 'N' - 42: 1, # 'O' - 48: 1, # 'P' - 44: 0, # 'R' - 35: 1, # 'S' - 31: 0, # 'T' - 51: 0, # 'U' - 38: 1, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 2, # 'a' - 21: 0, # 'b' - 28: 2, # 'c' - 12: 0, # 'd' - 2: 3, # 'e' - 18: 0, # 'f' - 27: 0, # 'g' - 25: 0, # 'h' - 3: 0, # 'i' - 24: 0, # 'j' - 10: 1, # 'k' - 5: 0, # 'l' - 13: 2, # 'm' - 4: 0, # 'n' - 15: 2, # 'o' - 26: 0, # 'p' - 7: 0, # 'r' - 8: 0, # 's' - 9: 0, # 't' - 14: 2, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 2, # 'x' - 11: 0, # 'y' - 22: 2, # 'z' - 63: 0, # '·' - 54: 1, # 'Ç' - 50: 2, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 0, # 'ç' - 61: 0, # 'î' - 34: 2, # 'ö' - 17: 0, # 'ü' - 30: 1, # 'ğ' - 41: 1, # 'İ' - 6: 0, # 'ı' - 40: 2, # 'Ş' - 19: 1, # 'ş' - }, - 44: { # 'R' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 1, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 1, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 3, # 'K' - 49: 0, # 'L' - 20: 0, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 1, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 0, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 1, # 'Y' - 56: 0, # 'Z' - 1: 3, # 'a' - 21: 1, # 'b' - 28: 1, # 'c' - 12: 0, # 'd' - 2: 2, # 'e' - 18: 0, # 'f' - 27: 0, # 'g' - 25: 0, # 'h' - 3: 0, # 'i' - 24: 0, # 'j' - 10: 1, # 'k' - 5: 2, # 'l' - 13: 2, # 'm' - 4: 0, # 'n' - 15: 1, # 'o' - 26: 0, # 'p' - 7: 0, # 'r' - 8: 0, # 's' - 9: 0, # 't' - 14: 2, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 1, # 'y' - 22: 2, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 1, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 1, # 'ç' - 61: 0, # 'î' - 34: 1, # 'ö' - 17: 1, # 'ü' - 30: 1, # 'ğ' - 41: 0, # 'İ' - 6: 2, # 'ı' - 40: 1, # 'Ş' - 19: 1, # 'ş' - }, - 35: { # 'S' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 1, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 1, # 'F' - 36: 1, # 'G' - 45: 1, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 3, # 'K' - 49: 1, # 'L' - 20: 1, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 1, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 0, # 'T' - 51: 1, # 'U' - 38: 1, # 'V' - 62: 0, # 'W' - 43: 1, # 'Y' - 56: 0, # 'Z' - 1: 3, # 'a' - 21: 0, # 'b' - 28: 2, # 'c' - 12: 0, # 'd' - 2: 3, # 'e' - 18: 0, # 'f' - 27: 0, # 'g' - 25: 0, # 'h' - 3: 0, # 'i' - 24: 0, # 'j' - 10: 1, # 'k' - 5: 1, # 'l' - 13: 2, # 'm' - 4: 1, # 'n' - 15: 0, # 'o' - 26: 0, # 'p' - 7: 0, # 'r' - 8: 0, # 's' - 9: 1, # 't' - 14: 2, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 0, # 'y' - 22: 1, # 'z' - 63: 0, # '·' - 54: 2, # 'Ç' - 50: 2, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 3, # 'ç' - 61: 0, # 'î' - 34: 1, # 'ö' - 17: 0, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 3, # 'ı' - 40: 2, # 'Ş' - 19: 1, # 'ş' - }, - 31: { # 'T' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 0, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 1, # 'J' - 16: 2, # 'K' - 49: 0, # 'L' - 20: 1, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 2, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 3, # 'a' - 21: 2, # 'b' - 28: 0, # 'c' - 12: 1, # 'd' - 2: 3, # 'e' - 18: 2, # 'f' - 27: 2, # 'g' - 25: 0, # 'h' - 3: 1, # 'i' - 24: 1, # 'j' - 10: 2, # 'k' - 5: 2, # 'l' - 13: 3, # 'm' - 4: 3, # 'n' - 15: 0, # 'o' - 26: 2, # 'p' - 7: 2, # 'r' - 8: 0, # 's' - 9: 2, # 't' - 14: 2, # 'u' - 32: 1, # 'v' - 57: 1, # 'w' - 58: 1, # 'x' - 11: 2, # 'y' - 22: 0, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 0, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 1, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 3, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 51: { # 'U' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 1, # 'F' - 36: 1, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 1, # 'K' - 49: 0, # 'L' - 20: 0, # 'M' - 46: 1, # 'N' - 42: 0, # 'O' - 48: 1, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 0, # 'T' - 51: 1, # 'U' - 38: 1, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 3, # 'a' - 21: 0, # 'b' - 28: 1, # 'c' - 12: 0, # 'd' - 2: 3, # 'e' - 18: 0, # 'f' - 27: 2, # 'g' - 25: 0, # 'h' - 3: 0, # 'i' - 24: 0, # 'j' - 10: 1, # 'k' - 5: 1, # 'l' - 13: 3, # 'm' - 4: 2, # 'n' - 15: 0, # 'o' - 26: 1, # 'p' - 7: 0, # 'r' - 8: 0, # 's' - 9: 0, # 't' - 14: 2, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 0, # 'y' - 22: 2, # 'z' - 63: 0, # '·' - 54: 1, # 'Ç' - 50: 1, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 0, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 0, # 'ü' - 30: 1, # 'ğ' - 41: 1, # 'İ' - 6: 2, # 'ı' - 40: 0, # 'Ş' - 19: 1, # 'ş' - }, - 38: { # 'V' - 23: 1, # 'A' - 37: 1, # 'B' - 47: 1, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 2, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 3, # 'K' - 49: 0, # 'L' - 20: 3, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 1, # 'P' - 44: 1, # 'R' - 35: 0, # 'S' - 31: 0, # 'T' - 51: 1, # 'U' - 38: 1, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 3, # 'a' - 21: 0, # 'b' - 28: 2, # 'c' - 12: 0, # 'd' - 2: 3, # 'e' - 18: 0, # 'f' - 27: 0, # 'g' - 25: 0, # 'h' - 3: 0, # 'i' - 24: 0, # 'j' - 10: 0, # 'k' - 5: 2, # 'l' - 13: 2, # 'm' - 4: 0, # 'n' - 15: 2, # 'o' - 26: 0, # 'p' - 7: 0, # 'r' - 8: 0, # 's' - 9: 1, # 't' - 14: 3, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 1, # 'y' - 22: 2, # 'z' - 63: 0, # '·' - 54: 1, # 'Ç' - 50: 1, # 'Ö' - 55: 0, # 'Ü' - 59: 1, # 'â' - 33: 2, # 'ç' - 61: 0, # 'î' - 34: 1, # 'ö' - 17: 0, # 'ü' - 30: 1, # 'ğ' - 41: 1, # 'İ' - 6: 3, # 'ı' - 40: 2, # 'Ş' - 19: 1, # 'ş' - }, - 62: { # 'W' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 0, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 0, # 'K' - 49: 0, # 'L' - 20: 0, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 0, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 0, # 'a' - 21: 0, # 'b' - 28: 0, # 'c' - 12: 0, # 'd' - 2: 0, # 'e' - 18: 0, # 'f' - 27: 0, # 'g' - 25: 0, # 'h' - 3: 0, # 'i' - 24: 0, # 'j' - 10: 0, # 'k' - 5: 0, # 'l' - 13: 0, # 'm' - 4: 0, # 'n' - 15: 0, # 'o' - 26: 0, # 'p' - 7: 0, # 'r' - 8: 0, # 's' - 9: 0, # 't' - 14: 0, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 0, # 'y' - 22: 0, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 0, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 0, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 0, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 43: { # 'Y' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 1, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 2, # 'F' - 36: 0, # 'G' - 45: 1, # 'H' - 53: 1, # 'I' - 60: 0, # 'J' - 16: 2, # 'K' - 49: 0, # 'L' - 20: 0, # 'M' - 46: 2, # 'N' - 42: 0, # 'O' - 48: 2, # 'P' - 44: 1, # 'R' - 35: 1, # 'S' - 31: 0, # 'T' - 51: 1, # 'U' - 38: 2, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 3, # 'a' - 21: 0, # 'b' - 28: 2, # 'c' - 12: 0, # 'd' - 2: 2, # 'e' - 18: 0, # 'f' - 27: 0, # 'g' - 25: 0, # 'h' - 3: 0, # 'i' - 24: 1, # 'j' - 10: 1, # 'k' - 5: 1, # 'l' - 13: 3, # 'm' - 4: 0, # 'n' - 15: 2, # 'o' - 26: 0, # 'p' - 7: 0, # 'r' - 8: 0, # 's' - 9: 0, # 't' - 14: 3, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 1, # 'x' - 11: 0, # 'y' - 22: 2, # 'z' - 63: 0, # '·' - 54: 1, # 'Ç' - 50: 2, # 'Ö' - 55: 1, # 'Ü' - 59: 1, # 'â' - 33: 0, # 'ç' - 61: 0, # 'î' - 34: 1, # 'ö' - 17: 0, # 'ü' - 30: 1, # 'ğ' - 41: 1, # 'İ' - 6: 0, # 'ı' - 40: 2, # 'Ş' - 19: 1, # 'ş' - }, - 56: { # 'Z' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 0, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 0, # 'K' - 49: 0, # 'L' - 20: 0, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 0, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 2, # 'Z' - 1: 2, # 'a' - 21: 1, # 'b' - 28: 0, # 'c' - 12: 0, # 'd' - 2: 2, # 'e' - 18: 0, # 'f' - 27: 0, # 'g' - 25: 0, # 'h' - 3: 2, # 'i' - 24: 1, # 'j' - 10: 0, # 'k' - 5: 0, # 'l' - 13: 1, # 'm' - 4: 1, # 'n' - 15: 0, # 'o' - 26: 0, # 'p' - 7: 1, # 'r' - 8: 1, # 's' - 9: 0, # 't' - 14: 2, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 0, # 'y' - 22: 0, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 0, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 1, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 1, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 1: { # 'a' - 23: 3, # 'A' - 37: 0, # 'B' - 47: 1, # 'C' - 39: 0, # 'D' - 29: 3, # 'E' - 52: 0, # 'F' - 36: 1, # 'G' - 45: 1, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 0, # 'K' - 49: 0, # 'L' - 20: 3, # 'M' - 46: 1, # 'N' - 42: 0, # 'O' - 48: 1, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 3, # 'T' - 51: 0, # 'U' - 38: 1, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 2, # 'Z' - 1: 2, # 'a' - 21: 3, # 'b' - 28: 0, # 'c' - 12: 3, # 'd' - 2: 2, # 'e' - 18: 3, # 'f' - 27: 3, # 'g' - 25: 3, # 'h' - 3: 3, # 'i' - 24: 3, # 'j' - 10: 3, # 'k' - 5: 0, # 'l' - 13: 2, # 'm' - 4: 3, # 'n' - 15: 1, # 'o' - 26: 3, # 'p' - 7: 3, # 'r' - 8: 3, # 's' - 9: 3, # 't' - 14: 3, # 'u' - 32: 3, # 'v' - 57: 2, # 'w' - 58: 0, # 'x' - 11: 3, # 'y' - 22: 0, # 'z' - 63: 1, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 1, # 'ç' - 61: 1, # 'î' - 34: 1, # 'ö' - 17: 3, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 3, # 'ı' - 40: 0, # 'Ş' - 19: 1, # 'ş' - }, - 21: { # 'b' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 0, # 'F' - 36: 1, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 1, # 'J' - 16: 2, # 'K' - 49: 0, # 'L' - 20: 2, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 1, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 1, # 'Y' - 56: 0, # 'Z' - 1: 3, # 'a' - 21: 2, # 'b' - 28: 0, # 'c' - 12: 3, # 'd' - 2: 3, # 'e' - 18: 0, # 'f' - 27: 3, # 'g' - 25: 1, # 'h' - 3: 3, # 'i' - 24: 2, # 'j' - 10: 3, # 'k' - 5: 3, # 'l' - 13: 3, # 'm' - 4: 3, # 'n' - 15: 0, # 'o' - 26: 3, # 'p' - 7: 1, # 'r' - 8: 2, # 's' - 9: 2, # 't' - 14: 2, # 'u' - 32: 1, # 'v' - 57: 0, # 'w' - 58: 1, # 'x' - 11: 3, # 'y' - 22: 0, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 1, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 0, # 'ü' - 30: 1, # 'ğ' - 41: 0, # 'İ' - 6: 2, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 28: { # 'c' - 23: 0, # 'A' - 37: 1, # 'B' - 47: 1, # 'C' - 39: 1, # 'D' - 29: 2, # 'E' - 52: 0, # 'F' - 36: 2, # 'G' - 45: 2, # 'H' - 53: 1, # 'I' - 60: 0, # 'J' - 16: 0, # 'K' - 49: 0, # 'L' - 20: 2, # 'M' - 46: 1, # 'N' - 42: 1, # 'O' - 48: 2, # 'P' - 44: 1, # 'R' - 35: 1, # 'S' - 31: 2, # 'T' - 51: 2, # 'U' - 38: 2, # 'V' - 62: 0, # 'W' - 43: 3, # 'Y' - 56: 0, # 'Z' - 1: 1, # 'a' - 21: 1, # 'b' - 28: 2, # 'c' - 12: 2, # 'd' - 2: 1, # 'e' - 18: 1, # 'f' - 27: 2, # 'g' - 25: 2, # 'h' - 3: 3, # 'i' - 24: 1, # 'j' - 10: 3, # 'k' - 5: 0, # 'l' - 13: 2, # 'm' - 4: 3, # 'n' - 15: 2, # 'o' - 26: 2, # 'p' - 7: 3, # 'r' - 8: 3, # 's' - 9: 3, # 't' - 14: 1, # 'u' - 32: 0, # 'v' - 57: 1, # 'w' - 58: 0, # 'x' - 11: 2, # 'y' - 22: 1, # 'z' - 63: 1, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 1, # 'Ü' - 59: 0, # 'â' - 33: 0, # 'ç' - 61: 1, # 'î' - 34: 2, # 'ö' - 17: 2, # 'ü' - 30: 2, # 'ğ' - 41: 1, # 'İ' - 6: 3, # 'ı' - 40: 0, # 'Ş' - 19: 2, # 'ş' - }, - 12: { # 'd' - 23: 1, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 0, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 2, # 'J' - 16: 3, # 'K' - 49: 0, # 'L' - 20: 3, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 1, # 'S' - 31: 1, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 3, # 'a' - 21: 2, # 'b' - 28: 1, # 'c' - 12: 3, # 'd' - 2: 3, # 'e' - 18: 1, # 'f' - 27: 3, # 'g' - 25: 3, # 'h' - 3: 2, # 'i' - 24: 3, # 'j' - 10: 2, # 'k' - 5: 3, # 'l' - 13: 3, # 'm' - 4: 3, # 'n' - 15: 1, # 'o' - 26: 2, # 'p' - 7: 3, # 'r' - 8: 2, # 's' - 9: 2, # 't' - 14: 3, # 'u' - 32: 1, # 'v' - 57: 0, # 'w' - 58: 1, # 'x' - 11: 3, # 'y' - 22: 1, # 'z' - 63: 1, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 0, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 1, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 2, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 2: { # 'e' - 23: 2, # 'A' - 37: 0, # 'B' - 47: 2, # 'C' - 39: 0, # 'D' - 29: 3, # 'E' - 52: 1, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 1, # 'K' - 49: 0, # 'L' - 20: 3, # 'M' - 46: 1, # 'N' - 42: 0, # 'O' - 48: 1, # 'P' - 44: 1, # 'R' - 35: 0, # 'S' - 31: 3, # 'T' - 51: 0, # 'U' - 38: 1, # 'V' - 62: 0, # 'W' - 43: 1, # 'Y' - 56: 0, # 'Z' - 1: 3, # 'a' - 21: 3, # 'b' - 28: 0, # 'c' - 12: 3, # 'd' - 2: 2, # 'e' - 18: 3, # 'f' - 27: 3, # 'g' - 25: 3, # 'h' - 3: 3, # 'i' - 24: 3, # 'j' - 10: 3, # 'k' - 5: 0, # 'l' - 13: 2, # 'm' - 4: 3, # 'n' - 15: 1, # 'o' - 26: 3, # 'p' - 7: 3, # 'r' - 8: 3, # 's' - 9: 3, # 't' - 14: 3, # 'u' - 32: 3, # 'v' - 57: 2, # 'w' - 58: 0, # 'x' - 11: 3, # 'y' - 22: 1, # 'z' - 63: 1, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 1, # 'ç' - 61: 0, # 'î' - 34: 1, # 'ö' - 17: 3, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 3, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 18: { # 'f' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 0, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 2, # 'K' - 49: 0, # 'L' - 20: 2, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 2, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 3, # 'a' - 21: 1, # 'b' - 28: 0, # 'c' - 12: 3, # 'd' - 2: 3, # 'e' - 18: 2, # 'f' - 27: 1, # 'g' - 25: 1, # 'h' - 3: 1, # 'i' - 24: 1, # 'j' - 10: 1, # 'k' - 5: 3, # 'l' - 13: 3, # 'm' - 4: 3, # 'n' - 15: 0, # 'o' - 26: 2, # 'p' - 7: 1, # 'r' - 8: 3, # 's' - 9: 3, # 't' - 14: 1, # 'u' - 32: 2, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 1, # 'y' - 22: 0, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 1, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 1, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 1, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 27: { # 'g' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 0, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 3, # 'K' - 49: 0, # 'L' - 20: 0, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 1, # 'S' - 31: 1, # 'T' - 51: 0, # 'U' - 38: 2, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 3, # 'a' - 21: 1, # 'b' - 28: 0, # 'c' - 12: 1, # 'd' - 2: 3, # 'e' - 18: 0, # 'f' - 27: 2, # 'g' - 25: 1, # 'h' - 3: 2, # 'i' - 24: 3, # 'j' - 10: 2, # 'k' - 5: 3, # 'l' - 13: 3, # 'm' - 4: 2, # 'n' - 15: 0, # 'o' - 26: 1, # 'p' - 7: 2, # 'r' - 8: 2, # 's' - 9: 3, # 't' - 14: 3, # 'u' - 32: 1, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 1, # 'y' - 22: 0, # 'z' - 63: 1, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 0, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 0, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 2, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 25: { # 'h' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 0, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 2, # 'K' - 49: 0, # 'L' - 20: 0, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 0, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 3, # 'a' - 21: 0, # 'b' - 28: 0, # 'c' - 12: 2, # 'd' - 2: 3, # 'e' - 18: 0, # 'f' - 27: 1, # 'g' - 25: 2, # 'h' - 3: 2, # 'i' - 24: 3, # 'j' - 10: 3, # 'k' - 5: 3, # 'l' - 13: 3, # 'm' - 4: 3, # 'n' - 15: 1, # 'o' - 26: 1, # 'p' - 7: 3, # 'r' - 8: 3, # 's' - 9: 2, # 't' - 14: 3, # 'u' - 32: 2, # 'v' - 57: 1, # 'w' - 58: 0, # 'x' - 11: 1, # 'y' - 22: 0, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 0, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 0, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 3, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 3: { # 'i' - 23: 2, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 0, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 1, # 'J' - 16: 3, # 'K' - 49: 0, # 'L' - 20: 3, # 'M' - 46: 0, # 'N' - 42: 1, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 1, # 'S' - 31: 2, # 'T' - 51: 0, # 'U' - 38: 1, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 3, # 'a' - 21: 2, # 'b' - 28: 0, # 'c' - 12: 3, # 'd' - 2: 3, # 'e' - 18: 2, # 'f' - 27: 3, # 'g' - 25: 1, # 'h' - 3: 3, # 'i' - 24: 2, # 'j' - 10: 3, # 'k' - 5: 3, # 'l' - 13: 3, # 'm' - 4: 3, # 'n' - 15: 1, # 'o' - 26: 3, # 'p' - 7: 3, # 'r' - 8: 3, # 's' - 9: 3, # 't' - 14: 3, # 'u' - 32: 2, # 'v' - 57: 1, # 'w' - 58: 1, # 'x' - 11: 3, # 'y' - 22: 1, # 'z' - 63: 1, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 1, # 'Ü' - 59: 0, # 'â' - 33: 2, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 3, # 'ü' - 30: 0, # 'ğ' - 41: 1, # 'İ' - 6: 2, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 24: { # 'j' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 0, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 1, # 'J' - 16: 2, # 'K' - 49: 0, # 'L' - 20: 2, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 1, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 1, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 1, # 'Z' - 1: 3, # 'a' - 21: 1, # 'b' - 28: 1, # 'c' - 12: 3, # 'd' - 2: 3, # 'e' - 18: 2, # 'f' - 27: 1, # 'g' - 25: 1, # 'h' - 3: 2, # 'i' - 24: 1, # 'j' - 10: 2, # 'k' - 5: 2, # 'l' - 13: 3, # 'm' - 4: 2, # 'n' - 15: 0, # 'o' - 26: 1, # 'p' - 7: 2, # 'r' - 8: 3, # 's' - 9: 2, # 't' - 14: 3, # 'u' - 32: 2, # 'v' - 57: 0, # 'w' - 58: 2, # 'x' - 11: 1, # 'y' - 22: 0, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 1, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 1, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 3, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 10: { # 'k' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 0, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 3, # 'K' - 49: 0, # 'L' - 20: 2, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 3, # 'T' - 51: 0, # 'U' - 38: 1, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 1, # 'Z' - 1: 3, # 'a' - 21: 2, # 'b' - 28: 0, # 'c' - 12: 2, # 'd' - 2: 3, # 'e' - 18: 1, # 'f' - 27: 2, # 'g' - 25: 2, # 'h' - 3: 3, # 'i' - 24: 2, # 'j' - 10: 2, # 'k' - 5: 3, # 'l' - 13: 3, # 'm' - 4: 3, # 'n' - 15: 0, # 'o' - 26: 3, # 'p' - 7: 2, # 'r' - 8: 2, # 's' - 9: 2, # 't' - 14: 3, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 1, # 'x' - 11: 3, # 'y' - 22: 0, # 'z' - 63: 1, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 3, # 'ç' - 61: 0, # 'î' - 34: 1, # 'ö' - 17: 3, # 'ü' - 30: 1, # 'ğ' - 41: 0, # 'İ' - 6: 3, # 'ı' - 40: 0, # 'Ş' - 19: 1, # 'ş' - }, - 5: { # 'l' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 3, # 'E' - 52: 0, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 0, # 'K' - 49: 0, # 'L' - 20: 2, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 1, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 0, # 'a' - 21: 3, # 'b' - 28: 0, # 'c' - 12: 3, # 'd' - 2: 1, # 'e' - 18: 3, # 'f' - 27: 3, # 'g' - 25: 2, # 'h' - 3: 3, # 'i' - 24: 2, # 'j' - 10: 3, # 'k' - 5: 1, # 'l' - 13: 1, # 'm' - 4: 3, # 'n' - 15: 0, # 'o' - 26: 2, # 'p' - 7: 3, # 'r' - 8: 3, # 's' - 9: 3, # 't' - 14: 2, # 'u' - 32: 2, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 3, # 'y' - 22: 0, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 1, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 2, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 3, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 13: { # 'm' - 23: 1, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 3, # 'E' - 52: 0, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 0, # 'K' - 49: 0, # 'L' - 20: 3, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 3, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 1, # 'Y' - 56: 0, # 'Z' - 1: 2, # 'a' - 21: 3, # 'b' - 28: 0, # 'c' - 12: 3, # 'd' - 2: 2, # 'e' - 18: 3, # 'f' - 27: 3, # 'g' - 25: 3, # 'h' - 3: 3, # 'i' - 24: 3, # 'j' - 10: 3, # 'k' - 5: 0, # 'l' - 13: 2, # 'm' - 4: 3, # 'n' - 15: 1, # 'o' - 26: 2, # 'p' - 7: 3, # 'r' - 8: 3, # 's' - 9: 3, # 't' - 14: 2, # 'u' - 32: 2, # 'v' - 57: 1, # 'w' - 58: 0, # 'x' - 11: 3, # 'y' - 22: 0, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 0, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 3, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 3, # 'ı' - 40: 0, # 'Ş' - 19: 1, # 'ş' - }, - 4: { # 'n' - 23: 1, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 0, # 'F' - 36: 0, # 'G' - 45: 1, # 'H' - 53: 0, # 'I' - 60: 2, # 'J' - 16: 3, # 'K' - 49: 0, # 'L' - 20: 3, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 2, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 3, # 'a' - 21: 2, # 'b' - 28: 1, # 'c' - 12: 3, # 'd' - 2: 3, # 'e' - 18: 1, # 'f' - 27: 2, # 'g' - 25: 3, # 'h' - 3: 2, # 'i' - 24: 2, # 'j' - 10: 3, # 'k' - 5: 3, # 'l' - 13: 3, # 'm' - 4: 3, # 'n' - 15: 1, # 'o' - 26: 3, # 'p' - 7: 2, # 'r' - 8: 3, # 's' - 9: 3, # 't' - 14: 3, # 'u' - 32: 2, # 'v' - 57: 0, # 'w' - 58: 2, # 'x' - 11: 3, # 'y' - 22: 0, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 1, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 2, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 1, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 15: { # 'o' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 1, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 2, # 'F' - 36: 1, # 'G' - 45: 1, # 'H' - 53: 1, # 'I' - 60: 0, # 'J' - 16: 3, # 'K' - 49: 2, # 'L' - 20: 0, # 'M' - 46: 2, # 'N' - 42: 1, # 'O' - 48: 2, # 'P' - 44: 1, # 'R' - 35: 0, # 'S' - 31: 0, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 3, # 'a' - 21: 0, # 'b' - 28: 2, # 'c' - 12: 0, # 'd' - 2: 3, # 'e' - 18: 0, # 'f' - 27: 0, # 'g' - 25: 0, # 'h' - 3: 1, # 'i' - 24: 2, # 'j' - 10: 1, # 'k' - 5: 3, # 'l' - 13: 3, # 'm' - 4: 2, # 'n' - 15: 2, # 'o' - 26: 0, # 'p' - 7: 1, # 'r' - 8: 0, # 's' - 9: 0, # 't' - 14: 3, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 2, # 'x' - 11: 0, # 'y' - 22: 2, # 'z' - 63: 0, # '·' - 54: 1, # 'Ç' - 50: 2, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 3, # 'ç' - 61: 0, # 'î' - 34: 1, # 'ö' - 17: 0, # 'ü' - 30: 2, # 'ğ' - 41: 2, # 'İ' - 6: 3, # 'ı' - 40: 2, # 'Ş' - 19: 2, # 'ş' - }, - 26: { # 'p' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 0, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 3, # 'K' - 49: 0, # 'L' - 20: 1, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 0, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 3, # 'a' - 21: 1, # 'b' - 28: 0, # 'c' - 12: 1, # 'd' - 2: 3, # 'e' - 18: 0, # 'f' - 27: 1, # 'g' - 25: 1, # 'h' - 3: 2, # 'i' - 24: 3, # 'j' - 10: 1, # 'k' - 5: 3, # 'l' - 13: 3, # 'm' - 4: 2, # 'n' - 15: 0, # 'o' - 26: 2, # 'p' - 7: 2, # 'r' - 8: 1, # 's' - 9: 1, # 't' - 14: 3, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 1, # 'x' - 11: 1, # 'y' - 22: 0, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 3, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 1, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 3, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 7: { # 'r' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 1, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 2, # 'J' - 16: 3, # 'K' - 49: 0, # 'L' - 20: 2, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 2, # 'T' - 51: 1, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 1, # 'Z' - 1: 3, # 'a' - 21: 1, # 'b' - 28: 0, # 'c' - 12: 3, # 'd' - 2: 3, # 'e' - 18: 0, # 'f' - 27: 2, # 'g' - 25: 3, # 'h' - 3: 2, # 'i' - 24: 2, # 'j' - 10: 3, # 'k' - 5: 3, # 'l' - 13: 3, # 'm' - 4: 3, # 'n' - 15: 0, # 'o' - 26: 2, # 'p' - 7: 3, # 'r' - 8: 3, # 's' - 9: 3, # 't' - 14: 3, # 'u' - 32: 2, # 'v' - 57: 0, # 'w' - 58: 1, # 'x' - 11: 2, # 'y' - 22: 0, # 'z' - 63: 1, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 2, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 3, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 2, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 8: { # 's' - 23: 1, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 0, # 'F' - 36: 1, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 3, # 'K' - 49: 0, # 'L' - 20: 3, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 2, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 1, # 'Z' - 1: 3, # 'a' - 21: 2, # 'b' - 28: 1, # 'c' - 12: 3, # 'd' - 2: 3, # 'e' - 18: 0, # 'f' - 27: 2, # 'g' - 25: 2, # 'h' - 3: 2, # 'i' - 24: 3, # 'j' - 10: 3, # 'k' - 5: 3, # 'l' - 13: 3, # 'm' - 4: 3, # 'n' - 15: 0, # 'o' - 26: 3, # 'p' - 7: 3, # 'r' - 8: 3, # 's' - 9: 3, # 't' - 14: 3, # 'u' - 32: 2, # 'v' - 57: 0, # 'w' - 58: 1, # 'x' - 11: 2, # 'y' - 22: 1, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 2, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 2, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 3, # 'ı' - 40: 0, # 'Ş' - 19: 1, # 'ş' - }, - 9: { # 't' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 0, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 1, # 'J' - 16: 3, # 'K' - 49: 0, # 'L' - 20: 2, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 2, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 1, # 'Z' - 1: 3, # 'a' - 21: 3, # 'b' - 28: 0, # 'c' - 12: 3, # 'd' - 2: 3, # 'e' - 18: 2, # 'f' - 27: 2, # 'g' - 25: 2, # 'h' - 3: 2, # 'i' - 24: 2, # 'j' - 10: 3, # 'k' - 5: 3, # 'l' - 13: 3, # 'm' - 4: 3, # 'n' - 15: 0, # 'o' - 26: 2, # 'p' - 7: 3, # 'r' - 8: 3, # 's' - 9: 3, # 't' - 14: 3, # 'u' - 32: 3, # 'v' - 57: 0, # 'w' - 58: 2, # 'x' - 11: 2, # 'y' - 22: 0, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 3, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 2, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 3, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 14: { # 'u' - 23: 3, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 3, # 'E' - 52: 0, # 'F' - 36: 0, # 'G' - 45: 1, # 'H' - 53: 0, # 'I' - 60: 1, # 'J' - 16: 0, # 'K' - 49: 0, # 'L' - 20: 3, # 'M' - 46: 2, # 'N' - 42: 0, # 'O' - 48: 1, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 3, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 1, # 'Y' - 56: 2, # 'Z' - 1: 2, # 'a' - 21: 3, # 'b' - 28: 0, # 'c' - 12: 3, # 'd' - 2: 2, # 'e' - 18: 2, # 'f' - 27: 3, # 'g' - 25: 3, # 'h' - 3: 3, # 'i' - 24: 2, # 'j' - 10: 3, # 'k' - 5: 0, # 'l' - 13: 3, # 'm' - 4: 3, # 'n' - 15: 0, # 'o' - 26: 3, # 'p' - 7: 3, # 'r' - 8: 3, # 's' - 9: 3, # 't' - 14: 3, # 'u' - 32: 2, # 'v' - 57: 2, # 'w' - 58: 0, # 'x' - 11: 3, # 'y' - 22: 0, # 'z' - 63: 1, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 0, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 3, # 'ü' - 30: 1, # 'ğ' - 41: 0, # 'İ' - 6: 3, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 32: { # 'v' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 0, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 3, # 'K' - 49: 0, # 'L' - 20: 1, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 0, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 3, # 'a' - 21: 0, # 'b' - 28: 0, # 'c' - 12: 3, # 'd' - 2: 3, # 'e' - 18: 0, # 'f' - 27: 0, # 'g' - 25: 0, # 'h' - 3: 0, # 'i' - 24: 1, # 'j' - 10: 1, # 'k' - 5: 3, # 'l' - 13: 2, # 'm' - 4: 3, # 'n' - 15: 0, # 'o' - 26: 1, # 'p' - 7: 1, # 'r' - 8: 2, # 's' - 9: 3, # 't' - 14: 3, # 'u' - 32: 1, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 0, # 'y' - 22: 0, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 2, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 0, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 1, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 57: { # 'w' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 0, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 0, # 'K' - 49: 0, # 'L' - 20: 0, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 0, # 'T' - 51: 1, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 1, # 'a' - 21: 0, # 'b' - 28: 0, # 'c' - 12: 0, # 'd' - 2: 2, # 'e' - 18: 0, # 'f' - 27: 0, # 'g' - 25: 1, # 'h' - 3: 0, # 'i' - 24: 0, # 'j' - 10: 1, # 'k' - 5: 0, # 'l' - 13: 0, # 'm' - 4: 1, # 'n' - 15: 0, # 'o' - 26: 0, # 'p' - 7: 0, # 'r' - 8: 1, # 's' - 9: 0, # 't' - 14: 1, # 'u' - 32: 0, # 'v' - 57: 2, # 'w' - 58: 0, # 'x' - 11: 0, # 'y' - 22: 0, # 'z' - 63: 1, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 0, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 1, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 0, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 58: { # 'x' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 1, # 'E' - 52: 0, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 1, # 'J' - 16: 0, # 'K' - 49: 0, # 'L' - 20: 1, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 0, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 0, # 'a' - 21: 1, # 'b' - 28: 0, # 'c' - 12: 2, # 'd' - 2: 1, # 'e' - 18: 0, # 'f' - 27: 0, # 'g' - 25: 0, # 'h' - 3: 2, # 'i' - 24: 2, # 'j' - 10: 1, # 'k' - 5: 0, # 'l' - 13: 0, # 'm' - 4: 2, # 'n' - 15: 0, # 'o' - 26: 0, # 'p' - 7: 1, # 'r' - 8: 2, # 's' - 9: 1, # 't' - 14: 0, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 2, # 'y' - 22: 0, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 0, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 1, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 2, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 11: { # 'y' - 23: 1, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 0, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 1, # 'J' - 16: 3, # 'K' - 49: 0, # 'L' - 20: 1, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 1, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 1, # 'Y' - 56: 1, # 'Z' - 1: 3, # 'a' - 21: 1, # 'b' - 28: 0, # 'c' - 12: 2, # 'd' - 2: 3, # 'e' - 18: 0, # 'f' - 27: 2, # 'g' - 25: 2, # 'h' - 3: 2, # 'i' - 24: 1, # 'j' - 10: 2, # 'k' - 5: 3, # 'l' - 13: 3, # 'm' - 4: 3, # 'n' - 15: 0, # 'o' - 26: 1, # 'p' - 7: 2, # 'r' - 8: 1, # 's' - 9: 2, # 't' - 14: 3, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 1, # 'x' - 11: 3, # 'y' - 22: 0, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 3, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 2, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 3, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 22: { # 'z' - 23: 2, # 'A' - 37: 2, # 'B' - 47: 1, # 'C' - 39: 2, # 'D' - 29: 3, # 'E' - 52: 1, # 'F' - 36: 2, # 'G' - 45: 2, # 'H' - 53: 1, # 'I' - 60: 0, # 'J' - 16: 0, # 'K' - 49: 0, # 'L' - 20: 3, # 'M' - 46: 2, # 'N' - 42: 2, # 'O' - 48: 2, # 'P' - 44: 1, # 'R' - 35: 1, # 'S' - 31: 3, # 'T' - 51: 2, # 'U' - 38: 2, # 'V' - 62: 0, # 'W' - 43: 2, # 'Y' - 56: 1, # 'Z' - 1: 1, # 'a' - 21: 2, # 'b' - 28: 1, # 'c' - 12: 2, # 'd' - 2: 2, # 'e' - 18: 3, # 'f' - 27: 2, # 'g' - 25: 2, # 'h' - 3: 3, # 'i' - 24: 2, # 'j' - 10: 3, # 'k' - 5: 0, # 'l' - 13: 2, # 'm' - 4: 3, # 'n' - 15: 2, # 'o' - 26: 2, # 'p' - 7: 3, # 'r' - 8: 3, # 's' - 9: 3, # 't' - 14: 0, # 'u' - 32: 2, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 3, # 'y' - 22: 2, # 'z' - 63: 1, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 2, # 'Ü' - 59: 1, # 'â' - 33: 0, # 'ç' - 61: 0, # 'î' - 34: 2, # 'ö' - 17: 2, # 'ü' - 30: 2, # 'ğ' - 41: 1, # 'İ' - 6: 3, # 'ı' - 40: 1, # 'Ş' - 19: 2, # 'ş' - }, - 63: { # '·' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 0, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 0, # 'K' - 49: 0, # 'L' - 20: 0, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 0, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 0, # 'a' - 21: 0, # 'b' - 28: 0, # 'c' - 12: 0, # 'd' - 2: 1, # 'e' - 18: 0, # 'f' - 27: 0, # 'g' - 25: 0, # 'h' - 3: 0, # 'i' - 24: 0, # 'j' - 10: 0, # 'k' - 5: 0, # 'l' - 13: 2, # 'm' - 4: 0, # 'n' - 15: 0, # 'o' - 26: 0, # 'p' - 7: 0, # 'r' - 8: 0, # 's' - 9: 0, # 't' - 14: 2, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 0, # 'y' - 22: 0, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 0, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 0, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 0, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 54: { # 'Ç' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 1, # 'C' - 39: 1, # 'D' - 29: 0, # 'E' - 52: 0, # 'F' - 36: 1, # 'G' - 45: 1, # 'H' - 53: 1, # 'I' - 60: 0, # 'J' - 16: 0, # 'K' - 49: 0, # 'L' - 20: 0, # 'M' - 46: 0, # 'N' - 42: 1, # 'O' - 48: 1, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 0, # 'T' - 51: 1, # 'U' - 38: 1, # 'V' - 62: 0, # 'W' - 43: 2, # 'Y' - 56: 0, # 'Z' - 1: 0, # 'a' - 21: 1, # 'b' - 28: 0, # 'c' - 12: 1, # 'd' - 2: 0, # 'e' - 18: 0, # 'f' - 27: 1, # 'g' - 25: 0, # 'h' - 3: 3, # 'i' - 24: 0, # 'j' - 10: 1, # 'k' - 5: 0, # 'l' - 13: 0, # 'm' - 4: 2, # 'n' - 15: 1, # 'o' - 26: 0, # 'p' - 7: 2, # 'r' - 8: 0, # 's' - 9: 1, # 't' - 14: 0, # 'u' - 32: 2, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 0, # 'y' - 22: 0, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 2, # 'Ü' - 59: 0, # 'â' - 33: 0, # 'ç' - 61: 0, # 'î' - 34: 1, # 'ö' - 17: 0, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 2, # 'ı' - 40: 0, # 'Ş' - 19: 1, # 'ş' - }, - 50: { # 'Ö' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 1, # 'C' - 39: 1, # 'D' - 29: 2, # 'E' - 52: 0, # 'F' - 36: 1, # 'G' - 45: 2, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 0, # 'K' - 49: 0, # 'L' - 20: 1, # 'M' - 46: 1, # 'N' - 42: 2, # 'O' - 48: 2, # 'P' - 44: 1, # 'R' - 35: 0, # 'S' - 31: 0, # 'T' - 51: 1, # 'U' - 38: 1, # 'V' - 62: 0, # 'W' - 43: 2, # 'Y' - 56: 0, # 'Z' - 1: 0, # 'a' - 21: 2, # 'b' - 28: 1, # 'c' - 12: 2, # 'd' - 2: 0, # 'e' - 18: 1, # 'f' - 27: 1, # 'g' - 25: 1, # 'h' - 3: 2, # 'i' - 24: 0, # 'j' - 10: 2, # 'k' - 5: 0, # 'l' - 13: 0, # 'm' - 4: 3, # 'n' - 15: 2, # 'o' - 26: 2, # 'p' - 7: 3, # 'r' - 8: 1, # 's' - 9: 2, # 't' - 14: 0, # 'u' - 32: 1, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 0, # 'y' - 22: 1, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 0, # 'ç' - 61: 0, # 'î' - 34: 2, # 'ö' - 17: 2, # 'ü' - 30: 1, # 'ğ' - 41: 0, # 'İ' - 6: 2, # 'ı' - 40: 0, # 'Ş' - 19: 1, # 'ş' - }, - 55: { # 'Ü' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 2, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 1, # 'K' - 49: 0, # 'L' - 20: 0, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 1, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 0, # 'T' - 51: 0, # 'U' - 38: 1, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 2, # 'a' - 21: 0, # 'b' - 28: 2, # 'c' - 12: 0, # 'd' - 2: 2, # 'e' - 18: 0, # 'f' - 27: 1, # 'g' - 25: 0, # 'h' - 3: 0, # 'i' - 24: 0, # 'j' - 10: 0, # 'k' - 5: 1, # 'l' - 13: 1, # 'm' - 4: 1, # 'n' - 15: 0, # 'o' - 26: 0, # 'p' - 7: 0, # 'r' - 8: 0, # 's' - 9: 1, # 't' - 14: 2, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 0, # 'y' - 22: 1, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 1, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 0, # 'ç' - 61: 0, # 'î' - 34: 1, # 'ö' - 17: 0, # 'ü' - 30: 1, # 'ğ' - 41: 1, # 'İ' - 6: 0, # 'ı' - 40: 0, # 'Ş' - 19: 1, # 'ş' - }, - 59: { # 'â' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 0, # 'F' - 36: 1, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 1, # 'K' - 49: 0, # 'L' - 20: 0, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 0, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 2, # 'a' - 21: 0, # 'b' - 28: 0, # 'c' - 12: 0, # 'd' - 2: 2, # 'e' - 18: 0, # 'f' - 27: 0, # 'g' - 25: 0, # 'h' - 3: 0, # 'i' - 24: 0, # 'j' - 10: 0, # 'k' - 5: 0, # 'l' - 13: 2, # 'm' - 4: 0, # 'n' - 15: 1, # 'o' - 26: 0, # 'p' - 7: 0, # 'r' - 8: 0, # 's' - 9: 0, # 't' - 14: 2, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 0, # 'y' - 22: 1, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 0, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 0, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 1, # 'ı' - 40: 1, # 'Ş' - 19: 0, # 'ş' - }, - 33: { # 'ç' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 3, # 'E' - 52: 0, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 0, # 'K' - 49: 0, # 'L' - 20: 1, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 2, # 'T' - 51: 0, # 'U' - 38: 1, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 0, # 'Z' - 1: 0, # 'a' - 21: 3, # 'b' - 28: 0, # 'c' - 12: 2, # 'd' - 2: 0, # 'e' - 18: 2, # 'f' - 27: 1, # 'g' - 25: 3, # 'h' - 3: 3, # 'i' - 24: 0, # 'j' - 10: 3, # 'k' - 5: 0, # 'l' - 13: 0, # 'm' - 4: 3, # 'n' - 15: 0, # 'o' - 26: 1, # 'p' - 7: 3, # 'r' - 8: 2, # 's' - 9: 3, # 't' - 14: 0, # 'u' - 32: 2, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 2, # 'y' - 22: 0, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 0, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 1, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 3, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 61: { # 'î' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 0, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 0, # 'K' - 49: 0, # 'L' - 20: 0, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 0, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 1, # 'Z' - 1: 2, # 'a' - 21: 0, # 'b' - 28: 0, # 'c' - 12: 0, # 'd' - 2: 2, # 'e' - 18: 0, # 'f' - 27: 0, # 'g' - 25: 0, # 'h' - 3: 0, # 'i' - 24: 1, # 'j' - 10: 0, # 'k' - 5: 0, # 'l' - 13: 1, # 'm' - 4: 1, # 'n' - 15: 0, # 'o' - 26: 0, # 'p' - 7: 0, # 'r' - 8: 0, # 's' - 9: 0, # 't' - 14: 1, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 0, # 'y' - 22: 1, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 0, # 'ç' - 61: 1, # 'î' - 34: 0, # 'ö' - 17: 0, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 1, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 34: { # 'ö' - 23: 0, # 'A' - 37: 1, # 'B' - 47: 1, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 2, # 'F' - 36: 1, # 'G' - 45: 1, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 3, # 'K' - 49: 1, # 'L' - 20: 0, # 'M' - 46: 1, # 'N' - 42: 1, # 'O' - 48: 2, # 'P' - 44: 1, # 'R' - 35: 1, # 'S' - 31: 1, # 'T' - 51: 1, # 'U' - 38: 1, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 1, # 'Z' - 1: 3, # 'a' - 21: 1, # 'b' - 28: 2, # 'c' - 12: 1, # 'd' - 2: 3, # 'e' - 18: 0, # 'f' - 27: 2, # 'g' - 25: 2, # 'h' - 3: 1, # 'i' - 24: 2, # 'j' - 10: 1, # 'k' - 5: 2, # 'l' - 13: 3, # 'm' - 4: 2, # 'n' - 15: 2, # 'o' - 26: 0, # 'p' - 7: 0, # 'r' - 8: 3, # 's' - 9: 1, # 't' - 14: 3, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 1, # 'y' - 22: 2, # 'z' - 63: 0, # '·' - 54: 1, # 'Ç' - 50: 2, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 2, # 'ç' - 61: 0, # 'î' - 34: 2, # 'ö' - 17: 0, # 'ü' - 30: 2, # 'ğ' - 41: 1, # 'İ' - 6: 1, # 'ı' - 40: 2, # 'Ş' - 19: 1, # 'ş' - }, - 17: { # 'ü' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 1, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 0, # 'F' - 36: 0, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 1, # 'J' - 16: 1, # 'K' - 49: 0, # 'L' - 20: 1, # 'M' - 46: 0, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 1, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 0, # 'Y' - 56: 1, # 'Z' - 1: 3, # 'a' - 21: 0, # 'b' - 28: 0, # 'c' - 12: 1, # 'd' - 2: 3, # 'e' - 18: 1, # 'f' - 27: 2, # 'g' - 25: 0, # 'h' - 3: 1, # 'i' - 24: 1, # 'j' - 10: 2, # 'k' - 5: 3, # 'l' - 13: 2, # 'm' - 4: 3, # 'n' - 15: 0, # 'o' - 26: 2, # 'p' - 7: 2, # 'r' - 8: 3, # 's' - 9: 2, # 't' - 14: 3, # 'u' - 32: 1, # 'v' - 57: 1, # 'w' - 58: 0, # 'x' - 11: 0, # 'y' - 22: 0, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 1, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 2, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 2, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 30: { # 'ğ' - 23: 0, # 'A' - 37: 2, # 'B' - 47: 1, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 2, # 'F' - 36: 1, # 'G' - 45: 0, # 'H' - 53: 1, # 'I' - 60: 0, # 'J' - 16: 3, # 'K' - 49: 0, # 'L' - 20: 1, # 'M' - 46: 2, # 'N' - 42: 2, # 'O' - 48: 1, # 'P' - 44: 1, # 'R' - 35: 0, # 'S' - 31: 1, # 'T' - 51: 0, # 'U' - 38: 2, # 'V' - 62: 0, # 'W' - 43: 2, # 'Y' - 56: 0, # 'Z' - 1: 3, # 'a' - 21: 0, # 'b' - 28: 2, # 'c' - 12: 0, # 'd' - 2: 2, # 'e' - 18: 0, # 'f' - 27: 0, # 'g' - 25: 0, # 'h' - 3: 0, # 'i' - 24: 3, # 'j' - 10: 1, # 'k' - 5: 2, # 'l' - 13: 3, # 'm' - 4: 0, # 'n' - 15: 1, # 'o' - 26: 0, # 'p' - 7: 1, # 'r' - 8: 0, # 's' - 9: 0, # 't' - 14: 3, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 0, # 'y' - 22: 2, # 'z' - 63: 0, # '·' - 54: 2, # 'Ç' - 50: 2, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 1, # 'ç' - 61: 0, # 'î' - 34: 2, # 'ö' - 17: 0, # 'ü' - 30: 1, # 'ğ' - 41: 2, # 'İ' - 6: 2, # 'ı' - 40: 2, # 'Ş' - 19: 1, # 'ş' - }, - 41: { # 'İ' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 1, # 'C' - 39: 1, # 'D' - 29: 1, # 'E' - 52: 0, # 'F' - 36: 2, # 'G' - 45: 2, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 0, # 'K' - 49: 0, # 'L' - 20: 2, # 'M' - 46: 1, # 'N' - 42: 1, # 'O' - 48: 2, # 'P' - 44: 0, # 'R' - 35: 1, # 'S' - 31: 1, # 'T' - 51: 1, # 'U' - 38: 1, # 'V' - 62: 0, # 'W' - 43: 2, # 'Y' - 56: 0, # 'Z' - 1: 1, # 'a' - 21: 2, # 'b' - 28: 1, # 'c' - 12: 2, # 'd' - 2: 1, # 'e' - 18: 0, # 'f' - 27: 3, # 'g' - 25: 2, # 'h' - 3: 2, # 'i' - 24: 2, # 'j' - 10: 2, # 'k' - 5: 0, # 'l' - 13: 1, # 'm' - 4: 3, # 'n' - 15: 1, # 'o' - 26: 1, # 'p' - 7: 3, # 'r' - 8: 3, # 's' - 9: 2, # 't' - 14: 0, # 'u' - 32: 0, # 'v' - 57: 1, # 'w' - 58: 0, # 'x' - 11: 2, # 'y' - 22: 0, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 1, # 'Ü' - 59: 1, # 'â' - 33: 0, # 'ç' - 61: 0, # 'î' - 34: 1, # 'ö' - 17: 1, # 'ü' - 30: 2, # 'ğ' - 41: 0, # 'İ' - 6: 3, # 'ı' - 40: 0, # 'Ş' - 19: 1, # 'ş' - }, - 6: { # 'ı' - 23: 2, # 'A' - 37: 0, # 'B' - 47: 0, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 0, # 'F' - 36: 1, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 2, # 'J' - 16: 3, # 'K' - 49: 0, # 'L' - 20: 3, # 'M' - 46: 1, # 'N' - 42: 0, # 'O' - 48: 0, # 'P' - 44: 0, # 'R' - 35: 0, # 'S' - 31: 2, # 'T' - 51: 0, # 'U' - 38: 0, # 'V' - 62: 0, # 'W' - 43: 2, # 'Y' - 56: 1, # 'Z' - 1: 3, # 'a' - 21: 2, # 'b' - 28: 1, # 'c' - 12: 3, # 'd' - 2: 3, # 'e' - 18: 3, # 'f' - 27: 3, # 'g' - 25: 2, # 'h' - 3: 3, # 'i' - 24: 3, # 'j' - 10: 3, # 'k' - 5: 3, # 'l' - 13: 3, # 'm' - 4: 3, # 'n' - 15: 0, # 'o' - 26: 3, # 'p' - 7: 3, # 'r' - 8: 3, # 's' - 9: 3, # 't' - 14: 3, # 'u' - 32: 3, # 'v' - 57: 1, # 'w' - 58: 1, # 'x' - 11: 3, # 'y' - 22: 0, # 'z' - 63: 1, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 2, # 'ç' - 61: 0, # 'î' - 34: 0, # 'ö' - 17: 3, # 'ü' - 30: 0, # 'ğ' - 41: 0, # 'İ' - 6: 3, # 'ı' - 40: 0, # 'Ş' - 19: 0, # 'ş' - }, - 40: { # 'Ş' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 1, # 'C' - 39: 1, # 'D' - 29: 1, # 'E' - 52: 0, # 'F' - 36: 1, # 'G' - 45: 2, # 'H' - 53: 1, # 'I' - 60: 0, # 'J' - 16: 0, # 'K' - 49: 0, # 'L' - 20: 2, # 'M' - 46: 1, # 'N' - 42: 1, # 'O' - 48: 2, # 'P' - 44: 2, # 'R' - 35: 1, # 'S' - 31: 1, # 'T' - 51: 0, # 'U' - 38: 1, # 'V' - 62: 0, # 'W' - 43: 2, # 'Y' - 56: 1, # 'Z' - 1: 0, # 'a' - 21: 2, # 'b' - 28: 0, # 'c' - 12: 2, # 'd' - 2: 0, # 'e' - 18: 3, # 'f' - 27: 0, # 'g' - 25: 2, # 'h' - 3: 3, # 'i' - 24: 2, # 'j' - 10: 1, # 'k' - 5: 0, # 'l' - 13: 1, # 'm' - 4: 3, # 'n' - 15: 2, # 'o' - 26: 0, # 'p' - 7: 3, # 'r' - 8: 2, # 's' - 9: 2, # 't' - 14: 1, # 'u' - 32: 3, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 2, # 'y' - 22: 0, # 'z' - 63: 0, # '·' - 54: 0, # 'Ç' - 50: 0, # 'Ö' - 55: 1, # 'Ü' - 59: 0, # 'â' - 33: 0, # 'ç' - 61: 0, # 'î' - 34: 2, # 'ö' - 17: 1, # 'ü' - 30: 2, # 'ğ' - 41: 0, # 'İ' - 6: 2, # 'ı' - 40: 1, # 'Ş' - 19: 2, # 'ş' - }, - 19: { # 'ş' - 23: 0, # 'A' - 37: 0, # 'B' - 47: 1, # 'C' - 39: 0, # 'D' - 29: 0, # 'E' - 52: 2, # 'F' - 36: 1, # 'G' - 45: 0, # 'H' - 53: 0, # 'I' - 60: 0, # 'J' - 16: 3, # 'K' - 49: 2, # 'L' - 20: 0, # 'M' - 46: 1, # 'N' - 42: 1, # 'O' - 48: 1, # 'P' - 44: 1, # 'R' - 35: 1, # 'S' - 31: 0, # 'T' - 51: 1, # 'U' - 38: 1, # 'V' - 62: 0, # 'W' - 43: 1, # 'Y' - 56: 0, # 'Z' - 1: 3, # 'a' - 21: 1, # 'b' - 28: 2, # 'c' - 12: 0, # 'd' - 2: 3, # 'e' - 18: 0, # 'f' - 27: 2, # 'g' - 25: 1, # 'h' - 3: 1, # 'i' - 24: 0, # 'j' - 10: 2, # 'k' - 5: 2, # 'l' - 13: 3, # 'm' - 4: 0, # 'n' - 15: 0, # 'o' - 26: 1, # 'p' - 7: 3, # 'r' - 8: 0, # 's' - 9: 0, # 't' - 14: 3, # 'u' - 32: 0, # 'v' - 57: 0, # 'w' - 58: 0, # 'x' - 11: 0, # 'y' - 22: 2, # 'z' - 63: 0, # '·' - 54: 1, # 'Ç' - 50: 2, # 'Ö' - 55: 0, # 'Ü' - 59: 0, # 'â' - 33: 1, # 'ç' - 61: 1, # 'î' - 34: 2, # 'ö' - 17: 0, # 'ü' - 30: 1, # 'ğ' - 41: 1, # 'İ' - 6: 1, # 'ı' - 40: 1, # 'Ş' - 19: 1, # 'ş' - }, -} - -# 255: Undefined characters that did not exist in training text -# 254: Carriage/Return -# 253: symbol (punctuation) that does not belong to word -# 252: 0 - 9 -# 251: Control characters - -# Character Mapping Table(s): -ISO_8859_9_TURKISH_CHAR_TO_ORDER = { - 0: 255, # '\x00' - 1: 255, # '\x01' - 2: 255, # '\x02' - 3: 255, # '\x03' - 4: 255, # '\x04' - 5: 255, # '\x05' - 6: 255, # '\x06' - 7: 255, # '\x07' - 8: 255, # '\x08' - 9: 255, # '\t' - 10: 255, # '\n' - 11: 255, # '\x0b' - 12: 255, # '\x0c' - 13: 255, # '\r' - 14: 255, # '\x0e' - 15: 255, # '\x0f' - 16: 255, # '\x10' - 17: 255, # '\x11' - 18: 255, # '\x12' - 19: 255, # '\x13' - 20: 255, # '\x14' - 21: 255, # '\x15' - 22: 255, # '\x16' - 23: 255, # '\x17' - 24: 255, # '\x18' - 25: 255, # '\x19' - 26: 255, # '\x1a' - 27: 255, # '\x1b' - 28: 255, # '\x1c' - 29: 255, # '\x1d' - 30: 255, # '\x1e' - 31: 255, # '\x1f' - 32: 255, # ' ' - 33: 255, # '!' - 34: 255, # '"' - 35: 255, # '#' - 36: 255, # '$' - 37: 255, # '%' - 38: 255, # '&' - 39: 255, # "'" - 40: 255, # '(' - 41: 255, # ')' - 42: 255, # '*' - 43: 255, # '+' - 44: 255, # ',' - 45: 255, # '-' - 46: 255, # '.' - 47: 255, # '/' - 48: 255, # '0' - 49: 255, # '1' - 50: 255, # '2' - 51: 255, # '3' - 52: 255, # '4' - 53: 255, # '5' - 54: 255, # '6' - 55: 255, # '7' - 56: 255, # '8' - 57: 255, # '9' - 58: 255, # ':' - 59: 255, # ';' - 60: 255, # '<' - 61: 255, # '=' - 62: 255, # '>' - 63: 255, # '?' - 64: 255, # '@' - 65: 23, # 'A' - 66: 37, # 'B' - 67: 47, # 'C' - 68: 39, # 'D' - 69: 29, # 'E' - 70: 52, # 'F' - 71: 36, # 'G' - 72: 45, # 'H' - 73: 53, # 'I' - 74: 60, # 'J' - 75: 16, # 'K' - 76: 49, # 'L' - 77: 20, # 'M' - 78: 46, # 'N' - 79: 42, # 'O' - 80: 48, # 'P' - 81: 69, # 'Q' - 82: 44, # 'R' - 83: 35, # 'S' - 84: 31, # 'T' - 85: 51, # 'U' - 86: 38, # 'V' - 87: 62, # 'W' - 88: 65, # 'X' - 89: 43, # 'Y' - 90: 56, # 'Z' - 91: 255, # '[' - 92: 255, # '\\' - 93: 255, # ']' - 94: 255, # '^' - 95: 255, # '_' - 96: 255, # '`' - 97: 1, # 'a' - 98: 21, # 'b' - 99: 28, # 'c' - 100: 12, # 'd' - 101: 2, # 'e' - 102: 18, # 'f' - 103: 27, # 'g' - 104: 25, # 'h' - 105: 3, # 'i' - 106: 24, # 'j' - 107: 10, # 'k' - 108: 5, # 'l' - 109: 13, # 'm' - 110: 4, # 'n' - 111: 15, # 'o' - 112: 26, # 'p' - 113: 64, # 'q' - 114: 7, # 'r' - 115: 8, # 's' - 116: 9, # 't' - 117: 14, # 'u' - 118: 32, # 'v' - 119: 57, # 'w' - 120: 58, # 'x' - 121: 11, # 'y' - 122: 22, # 'z' - 123: 255, # '{' - 124: 255, # '|' - 125: 255, # '}' - 126: 255, # '~' - 127: 255, # '\x7f' - 128: 180, # '\x80' - 129: 179, # '\x81' - 130: 178, # '\x82' - 131: 177, # '\x83' - 132: 176, # '\x84' - 133: 175, # '\x85' - 134: 174, # '\x86' - 135: 173, # '\x87' - 136: 172, # '\x88' - 137: 171, # '\x89' - 138: 170, # '\x8a' - 139: 169, # '\x8b' - 140: 168, # '\x8c' - 141: 167, # '\x8d' - 142: 166, # '\x8e' - 143: 165, # '\x8f' - 144: 164, # '\x90' - 145: 163, # '\x91' - 146: 162, # '\x92' - 147: 161, # '\x93' - 148: 160, # '\x94' - 149: 159, # '\x95' - 150: 101, # '\x96' - 151: 158, # '\x97' - 152: 157, # '\x98' - 153: 156, # '\x99' - 154: 155, # '\x9a' - 155: 154, # '\x9b' - 156: 153, # '\x9c' - 157: 152, # '\x9d' - 158: 151, # '\x9e' - 159: 106, # '\x9f' - 160: 150, # '\xa0' - 161: 149, # '¡' - 162: 148, # '¢' - 163: 147, # '£' - 164: 146, # '¤' - 165: 145, # '¥' - 166: 144, # '¦' - 167: 100, # '§' - 168: 143, # '¨' - 169: 142, # '©' - 170: 141, # 'ª' - 171: 140, # '«' - 172: 139, # '¬' - 173: 138, # '\xad' - 174: 137, # '®' - 175: 136, # '¯' - 176: 94, # '°' - 177: 80, # '±' - 178: 93, # '²' - 179: 135, # '³' - 180: 105, # '´' - 181: 134, # 'µ' - 182: 133, # '¶' - 183: 63, # '·' - 184: 132, # '¸' - 185: 131, # '¹' - 186: 130, # 'º' - 187: 129, # '»' - 188: 128, # '¼' - 189: 127, # '½' - 190: 126, # '¾' - 191: 125, # '¿' - 192: 124, # 'À' - 193: 104, # 'Á' - 194: 73, # 'Â' - 195: 99, # 'Ã' - 196: 79, # 'Ä' - 197: 85, # 'Å' - 198: 123, # 'Æ' - 199: 54, # 'Ç' - 200: 122, # 'È' - 201: 98, # 'É' - 202: 92, # 'Ê' - 203: 121, # 'Ë' - 204: 120, # 'Ì' - 205: 91, # 'Í' - 206: 103, # 'Î' - 207: 119, # 'Ï' - 208: 68, # 'Ğ' - 209: 118, # 'Ñ' - 210: 117, # 'Ò' - 211: 97, # 'Ó' - 212: 116, # 'Ô' - 213: 115, # 'Õ' - 214: 50, # 'Ö' - 215: 90, # '×' - 216: 114, # 'Ø' - 217: 113, # 'Ù' - 218: 112, # 'Ú' - 219: 111, # 'Û' - 220: 55, # 'Ü' - 221: 41, # 'İ' - 222: 40, # 'Ş' - 223: 86, # 'ß' - 224: 89, # 'à' - 225: 70, # 'á' - 226: 59, # 'â' - 227: 78, # 'ã' - 228: 71, # 'ä' - 229: 82, # 'å' - 230: 88, # 'æ' - 231: 33, # 'ç' - 232: 77, # 'è' - 233: 66, # 'é' - 234: 84, # 'ê' - 235: 83, # 'ë' - 236: 110, # 'ì' - 237: 75, # 'í' - 238: 61, # 'î' - 239: 96, # 'ï' - 240: 30, # 'ğ' - 241: 67, # 'ñ' - 242: 109, # 'ò' - 243: 74, # 'ó' - 244: 87, # 'ô' - 245: 102, # 'õ' - 246: 34, # 'ö' - 247: 95, # '÷' - 248: 81, # 'ø' - 249: 108, # 'ù' - 250: 76, # 'ú' - 251: 72, # 'û' - 252: 17, # 'ü' - 253: 6, # 'ı' - 254: 19, # 'ş' - 255: 107, # 'ÿ' -} - -ISO_8859_9_TURKISH_MODEL = SingleByteCharSetModel( - charset_name="ISO-8859-9", - language="Turkish", - char_to_order_map=ISO_8859_9_TURKISH_CHAR_TO_ORDER, - language_model=TURKISH_LANG_MODEL, - typical_positive_ratio=0.97029, - keep_ascii_letters=True, - alphabet="ABCDEFGHIJKLMNOPRSTUVYZabcdefghijklmnoprstuvyzÂÇÎÖÛÜâçîöûüĞğİıŞş", -) diff --git a/spaces/Rbrq/DeticChatGPT/detic/data/custom_build_augmentation.py b/spaces/Rbrq/DeticChatGPT/detic/data/custom_build_augmentation.py deleted file mode 100644 index 9642c15e582fc953ecaa378a325b4fa02f4e7d28..0000000000000000000000000000000000000000 --- a/spaces/Rbrq/DeticChatGPT/detic/data/custom_build_augmentation.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import logging -import numpy as np -import pycocotools.mask as mask_util -import torch -from fvcore.common.file_io import PathManager -from PIL import Image - - -from detectron2.data import transforms as T -from .transforms.custom_augmentation_impl import EfficientDetResizeCrop - -def build_custom_augmentation(cfg, is_train, scale=None, size=None, \ - min_size=None, max_size=None): - """ - Create a list of default :class:`Augmentation` from config. - Now it includes resizing and flipping. - - Returns: - list[Augmentation] - """ - if cfg.INPUT.CUSTOM_AUG == 'ResizeShortestEdge': - if is_train: - min_size = cfg.INPUT.MIN_SIZE_TRAIN if min_size is None else min_size - max_size = cfg.INPUT.MAX_SIZE_TRAIN if max_size is None else max_size - sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING - else: - min_size = cfg.INPUT.MIN_SIZE_TEST - max_size = cfg.INPUT.MAX_SIZE_TEST - sample_style = "choice" - augmentation = [T.ResizeShortestEdge(min_size, max_size, sample_style)] - elif cfg.INPUT.CUSTOM_AUG == 'EfficientDetResizeCrop': - if is_train: - scale = cfg.INPUT.SCALE_RANGE if scale is None else scale - size = cfg.INPUT.TRAIN_SIZE if size is None else size - else: - scale = (1, 1) - size = cfg.INPUT.TEST_SIZE - augmentation = [EfficientDetResizeCrop(size, scale)] - else: - assert 0, cfg.INPUT.CUSTOM_AUG - - if is_train: - augmentation.append(T.RandomFlip()) - return augmentation - - -build_custom_transform_gen = build_custom_augmentation -""" -Alias for backward-compatibility. -""" \ No newline at end of file diff --git a/spaces/Realcat/image-matching-webui/third_party/ASpanFormer/docs/TRAINING.md b/spaces/Realcat/image-matching-webui/third_party/ASpanFormer/docs/TRAINING.md deleted file mode 100644 index 99238b612d961a5a6aa29885bad23808c7aa6e07..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/ASpanFormer/docs/TRAINING.md +++ /dev/null @@ -1,72 +0,0 @@ - -# Traininig ASpanFormer - -## Dataset setup -Generally, two parts of data are needed for training ASpanFormer, the original dataset, i.e., ScanNet and MegaDepth, and the offline generated dataset indices. The dataset indices store scenes, image pairs, and other metadata within each dataset used for training/validation/testing. For the MegaDepth dataset, the relative poses between images used for training are directly cached in the indexing files. However, the relative poses of ScanNet image pairs are not stored due to the enormous resulting file size. - -### Download datasets -#### MegaDepth -We use depth maps provided in the [original MegaDepth dataset](https://www.cs.cornell.edu/projects/megadepth/) as well as undistorted images, corresponding camera intrinsics and extrinsics preprocessed by [D2-Net](https://github.com/mihaidusmanu/d2-net#downloading-and-preprocessing-the-megadepth-dataset). You can download them separately from the following links. -- [MegaDepth undistorted images and processed depths](https://www.cs.cornell.edu/projects/megadepth/dataset/Megadepth_v1/MegaDepth_v1.tar.gz) - - Note that we only use depth maps. - - Path of the download data will be referreed to as `/path/to/megadepth` -- [D2-Net preprocessed images](https://drive.google.com/drive/folders/1hxpOsqOZefdrba_BqnW490XpNX_LgXPB) - - Images are undistorted manually in D2-Net since the undistorted images from MegaDepth do not come with corresponding intrinsics. - - Path of the download data will be referreed to as `/path/to/megadepth_d2net` - -#### ScanNet -Please set up the ScanNet dataset following [the official guide](https://github.com/ScanNet/ScanNet#scannet-data) -> NOTE: We use the [python exported data](https://github.com/ScanNet/ScanNet/tree/master/SensReader/python), -instead of the [c++ exported one](https://github.com/ScanNet/ScanNet/tree/master/SensReader/c%2B%2B). - -### Download the dataset indices - -You can download the required dataset indices from the [following link](https://drive.google.com/drive/folders/1DOcOPZb3-5cWxLqn256AhwUVjBPifhuf). -After downloading, unzip the required files. -```shell -unzip downloaded-file.zip - -# extract dataset indices -tar xf train-data/megadepth_indices.tar -tar xf train-data/scannet_indices.tar - -# extract testing data (optional) -tar xf testdata/megadepth_test_1500.tar -tar xf testdata/scannet_test_1500.tar -``` - -### Build the dataset symlinks - -We symlink the datasets to the `data` directory under the main ASpanFormer project directory. - -```shell -# scannet -# -- # train and test dataset -ln -s /path/to/scannet_train/* /path/to/ASpanFormer/data/scannet/train -ln -s /path/to/scannet_test/* /path/to/ASpanFormer/data/scannet/test -# -- # dataset indices -ln -s /path/to/scannet_indices/* /path/to/ASpanFormer/data/scannet/index - -# megadepth -# -- # train and test dataset (train and test share the same dataset) -ln -sv /path/to/megadepth/phoenix /path/to/megadepth_d2net/Undistorted_SfM /path/to/ASpanFormer/data/megadepth/train -ln -sv /path/to/megadepth/phoenix /path/to/megadepth_d2net/Undistorted_SfM /path/to/ASpanFormer/data/megadepth/test -# -- # dataset indices -ln -s /path/to/megadepth_indices/* /path/to/ASpanFormer/data/megadepth/index -``` - - -## Training -We provide training scripts of ScanNet and MegaDepth. The results in the ASpanFormer paper can be reproduced with 8 v100 GPUs. For a different setup, we scale the learning rate and its warm-up linearly, but the final evaluation results might vary due to the different batch size & learning rate used. Thus the reproduction of results in our paper is not guaranteed. - - -### Training on ScanNet -``` shell -scripts/reproduce_train/indoor.sh -``` - - -### Training on MegaDepth -``` shell -scripts/reproduce_train/outdoor.sh -``` \ No newline at end of file diff --git a/spaces/RedBaron5/PatentSolver/functions.py b/spaces/RedBaron5/PatentSolver/functions.py deleted file mode 100644 index 89aa53ba7ad6d6682d5b2dc087b5c4b4971c6706..0000000000000000000000000000000000000000 --- a/spaces/RedBaron5/PatentSolver/functions.py +++ /dev/null @@ -1,648 +0,0 @@ - -# ~~~~~~~~~~~~~~~~~~~~~~~~ # -# ~~~ Import libraries ~~~ # -# ~~~~~~~~~~~~~~~~~~~~~~~~ # - -# Google Scraper Class # -from google_patent_scraper import scraper_class - -# Context Manager # -from contextlib import contextmanager - -# Writing/Reading -import csv -import numpy as np -import pandas as pd - -# clean patent # -import re - -# Multiprocessing # -import multiprocessing as mp - -# parse xml to text -from bs4 import BeautifulSoup as bs - -# zip folder to download -import shutil -import base64 -import streamlit as st -import os - -# extract problems -from App.bin import constants -from App.bin.InputHandler import InputHandler -from App.bin.PatentHandler import PatentHandler -from App.bin.CorpusProcessor import CorpusProcessor -import json -from pandas import json_normalize -import glob - - - -# ~~~~~~~~~~~~~~~~~~~ # -# ~~~~ Functions ~~~~ # -# ~~~~~~~~~~~~~~~~~~~ # - -def single_process_scraper(patent,path_to_data_file,data_column_order): - """Scrapes a single google patent using the google scraper class - - Function does not return any values, instead it writes the output - of the data_patent_details into a csv file specified in the path_to_data_file - parameter - - Inputs: - patent (str) : patent number including country prefix - lock (obj) : to prevent collisions, function uses a lock. You can pass whichever - lock you want to this parameter - path_to_data_file : absolute path to csv file to write data_patent_details to - data_column_order : name of columns in order they will be saved in csv file - - """ - # ~ Initialize scraper class ~ # - scraper=scraper_class() - - # ~ Scrape single patent ~ # - err, soup, url = scraper.request_single_patent(patent) - - # Checks if the scrape is successful. - # If successful -> parse text and deposit into csv file - # Else -> print error statement - - if err=='Success': - patent_parsed = scraper.get_scraped_data(soup,url,patent) - - # Save the parsed data_patent_details to a csv file - # using multiprocessing lock function - # to prevent collisions - with lock: - with open(path_to_data_file,'a',newline='') as ofile: - writer = csv.DictWriter(ofile, fieldnames=data_column_order) - writer.writerow(patent_parsed) - else: - print('Patent {0} has error code {1}'.format(patent,err)) - -# Allow pool to accept keyword arguments -@contextmanager -def poolcontext(*args, **kwargs): - pool = mp.Pool(*args, **kwargs) - yield pool - pool.terminate() - -def init(l): - """Creates lock object that is global, for use in sharing - across processes - """ - global lock - lock = l - - -def patentinput(patent_string): - """ - remove space among patent numbers from users' inputs - """ - patent_string = patent_string.replace(" ", "") #remove space that user tpyed - list_results = list(patent_string.split(",")) - return list_results - -def clean_patent(table): - """clean raw patent details from website - """ - - list_inventor_name = np.array([]) # create an empty list - - inventor_name = table['inventor_name'] - for line in inventor_name: - new_line = re.sub(r'"inventor_name":', '', line) - new_line = re.sub(r'\{|\}|\[|\]|\"', '', new_line) - # print(new_line) - list_inventor_name = np.append(list_inventor_name, new_line) - - new_table_inventor_name = pd.DataFrame(list_inventor_name, columns=['inventor_name']) - # new_table.to_csv('saved_data/cleaned_patent_details') - - ##clean assignee_name_orig feature - list_assignee_name = np.array([]) - assignee_name = table['assignee_name_orig'] - for line in assignee_name: - new_line = re.sub(r'"assignee_name":', '', line) ##### errors - new_line = re.sub(r'\{|\}|\[|\]|\"', '', new_line) - list_assignee_name = np.append(list_assignee_name, new_line) - - new_table_assignee_name = pd.DataFrame(list_assignee_name, columns=['assignee_name_orig']) - # print(new_table_assignee_name) - # - ##clean assignee_name_current feature - list_assignee_name_current = np.array([]) - assignee_name_current = table['assignee_name_current'] - for line in assignee_name_current: - new_line = re.sub(r'("assignee_name":)|(\\n\s\s)|(\{|\}|\[|\]|\")', '', line) - list_assignee_name_current = np.append(list_assignee_name_current, new_line) - - new_table_assignee_name_current = pd.DataFrame(list_assignee_name_current, columns=['assignee_name_current']) - # print(new_table_assignee_name_current) - # - ##clean forward_cite_no_family feature - list_forward_cite_no_family = np.array([]) - forward_cite_no_family = table['forward_cite_no_family'] - for line in forward_cite_no_family: - new_line = re.sub( - r'("patent_number":)|(\\n)|(\{|\}|\[|\]|\")|(priority_date)|(:)|(pub_date)|(\d{4}-\d{2}-\d{2})', '', line) - new_line = re.sub(r'\s\,\s', '', new_line) - list_forward_cite_no_family = np.append(list_forward_cite_no_family, new_line) - - new_table_forward_cite_no_family = pd.DataFrame(list_forward_cite_no_family, columns=['forward_cite_no_family']) - # print(new_table_forward_cite_no_family) - # - ##clean forward_cite_yes_family feature - list_forward_cite_yes_family = np.array([]) - forward_cite_yes_family = table['forward_cite_yes_family'] - for line in forward_cite_yes_family: - new_line = re.sub( - r'("patent_number":)|(\\n)|(\{|\}|\[|\]|\")|(priority_date)|(:)|(pub_date)|(\d{4}-\d{2}-\d{2})', '', line) - new_line = re.sub(r'\s\,\s', '', new_line) - list_forward_cite_yes_family = np.append(list_forward_cite_yes_family, new_line) - - new_table_forward_cite_yes_family = pd.DataFrame(list_forward_cite_yes_family, columns=['forward_cite_yes_family']) - # print(new_table_forward_cite_yes_family) - - ##clean backward_cite_no_family feature - list_backward_cite_no_family = np.array([]) - backward_cite_no_family = table['backward_cite_no_family'] - for line in backward_cite_no_family: - new_line = re.sub( - r'("patent_number":)|(\\n)|(\{|\}|\[|\]|\")|(priority_date)|(:)|(pub_date)|(\d{4}-\d{2}-\d{2})', '', line) - new_line = re.sub(r'\s\,\s', '', new_line) - list_backward_cite_no_family = np.append(list_backward_cite_no_family, new_line) - - new_table_backward_cite_no_family = pd.DataFrame(list_backward_cite_no_family, columns=['backward_cite_no_family']) - # print(new_table_backward_cite_no_family) - - ##clean backward_cite_yes_family feature - list_backward_cite_yes_family = np.array([]) - backward_cite_yes_family = table['backward_cite_yes_family'] - for line in backward_cite_yes_family: - new_line = re.sub( - r'("patent_number":)|(\\n)|(\{|\}|\[|\]|\")|(priority_date)|(:)|(pub_date)|(\d{4}-\d{2}-\d{2})', '', line) - new_line = re.sub(r'\s\,\s', '', new_line) - list_backward_cite_yes_family = np.append(list_backward_cite_yes_family, new_line) - - new_table_backward_cite_yes_family = pd.DataFrame(list_backward_cite_yes_family, - columns=['backward_cite_yes_family']) - # print(new_table_backward_cite_yes_family) - - ##rename url feature - list_patent_number = np.array([]) - patent_number = table['url'] - for line in patent_number: - list_patent_number = np.append(list_patent_number, line) - - new_table_patent_number = pd.DataFrame(list_patent_number, columns=['patent_number']) - # print(new_table_patent_number) - - ##rename patent feature - list_patent_link = np.array([]) - patent_link = table['patent'] - for line in patent_link: - list_patent_link = np.append(list_patent_link, line) - - new_table_patent_link = pd.DataFrame(list_patent_link, columns=['patent_link']) - # print(new_table_patent_link) - - ##rename abstract_text - list_abstract_text = np.array([]) - abstract_text = table['abstract_text'] - for line in abstract_text: - list_abstract_text = np.append(list_abstract_text, line) - - new_table_abstract_text = pd.DataFrame(abstract_text, columns=['abstract_text']) - # print(new_table_patent_link) - - ################################### - - ## concatenate all of sub dataframes to the final results - results = pd.concat([new_table_patent_number, table[['pub_date', 'priority_date', 'grant_date', 'filing_date']], - new_table_inventor_name, new_table_assignee_name, new_table_assignee_name_current, - new_table_forward_cite_no_family, new_table_forward_cite_yes_family, - new_table_backward_cite_yes_family, new_table_backward_cite_no_family, new_table_patent_link, - new_table_abstract_text], axis=1) - - return results - - -def count_patent(patent_table): - """count the patent features""" - - ##count the number of assignee_name feature - assignee_name = pd.DataFrame(patent_table['assignee_name_orig']) - count_assignee_name = assignee_name.applymap(lambda x: str.count(x, ',') + 1) - count_assignee_name = count_assignee_name.rename(columns={'assignee_name_orig': 'count_assignee_name'}) - # print(count_assignee_name) - - ##count the number of inventor_name feature - inventor_name = pd.DataFrame(patent_table['inventor_name']) - count_inventor_name = inventor_name.applymap(lambda x: str.count(x, ',') + 1) - count_inventor_name = count_inventor_name.rename(columns={'inventor_name': 'count_inventor_name'}) - # print(count_inventor_name) - - ##count the number of assignee_name_current feature - assignee_name_current = pd.DataFrame(patent_table['assignee_name_current']) - # print(assignee_name_current) - - ##replace NaN as int(0) - assignee_name_current_replace_NaN = lambda x: int(0) if pd.isnull(x) else str.count(x, ',') + 1 - count_assignee_name_current = assignee_name_current.applymap(assignee_name_current_replace_NaN) - count_assignee_name_current = count_assignee_name_current.rename( - columns={'assignee_name_current': 'count_assignee_name_current'}) - # print(count_assignee_name_current) - - ##count forward_cite_no_family - forward_cite_no_family = pd.DataFrame(patent_table['forward_cite_no_family']) - forward_cite_no_family_replace_NaN = lambda x: int(0) if pd.isnull(x) else str.count(x, ',') - count_forward_cite_no_family = forward_cite_no_family.applymap(forward_cite_no_family_replace_NaN) - count_forward_cite_no_family = count_forward_cite_no_family.rename( - columns={'forward_cite_no_family': 'count_forward_cite_no_family'}) - # print(count_forward_cite_no_family) - - ##count forward_cite_yes_family - forward_cite_yes_family = pd.DataFrame(patent_table['forward_cite_yes_family']) - forward_cite_yes_family_replace_NaN = lambda x: int(0) if pd.isnull(x) else str.count(x, ',') - count_forward_cite_yes_family = forward_cite_yes_family.applymap(forward_cite_yes_family_replace_NaN) - count_forward_cite_yes_family = count_forward_cite_yes_family.rename( - columns={'forward_cite_yes_family': 'count_forward_cite_yes_family'}) - # print(count_forward_cite_yes_family) - - ##count backward_cite_no_family - backward_cite_no_family = pd.DataFrame(patent_table['backward_cite_no_family']) - backward_cite_no_family_replace_NaN = lambda x: int(0) if pd.isnull(x) else str.count(x, ',') - count_backward_cite_no_family = backward_cite_no_family.applymap(backward_cite_no_family_replace_NaN) - count_backward_cite_no_family = count_backward_cite_no_family.rename( - columns={'backward_cite_no_family': 'count_backward_cite_no_family'}) - # print(count_backward_cite_no_family) - - ##count backward_cite_yes_family - backward_cite_yes_family = pd.DataFrame(patent_table['backward_cite_yes_family']) - backward_cite_yes_family_replace_NaN = lambda x: int(0) if pd.isnull(x) else str.count(x, ',') - count_backward_cite_yes_family = backward_cite_yes_family.applymap(backward_cite_yes_family_replace_NaN) - count_backward_cite_yes_family = count_backward_cite_yes_family.rename( - columns={'backward_cite_yes_family': 'count_backward_cite_yes_family'}) - # print(count_backward_cite_yes_family) - - ##concate dataframes to the final cleaned dataset - results = pd.concat([patent_table[['patent_number', 'pub_date', 'priority_date', - 'grant_date', 'filing_date', 'inventor_name']], count_inventor_name, - patent_table[['assignee_name_orig']], count_assignee_name, - patent_table[['assignee_name_current']], count_assignee_name_current, - patent_table[['forward_cite_no_family']], count_forward_cite_no_family, - patent_table[['forward_cite_yes_family']], count_forward_cite_yes_family, - patent_table[['backward_cite_no_family']], count_backward_cite_no_family, - patent_table[['backward_cite_yes_family']], count_backward_cite_yes_family, - patent_table[['patent_link', 'abstract_text']]], axis=1) - - return results - - -def XMLtoTEXT(patent_xml, saved_file_path): - # read file - tree = bs(patent_xml, "html.parser") - - # get title - - print('Title:') - title = tree.find_all("invention-title") - patent_title = title[0].text - print(patent_title) - - # get number - print("Patent number:") - patent_number = tree.find_all('doc-number') - patent_number = 'US' + patent_number[0].text - patent_number_new = re.sub(r'US0', 'US', patent_number) - print(patent_number_new) - - # get domain - print('Domain:') - domain = tree.find_all('classification-level') - patent_domain = domain[0].text - print(patent_domain) - - # get date of publication - print("Publication date:") - date = tree.find_all("date") - patent_pubdate = date[0].text - print(patent_pubdate) - - # get abstract - print('Abstract:') - ab = tree.find_all("abstract") - patent_abstract = ab[0].text - print(patent_abstract) - - # get claim - print('Claims:') - claims = tree.find_all("claim-text") - for claim in claims: - print(claim.text) - - # get description - print('Description:') - description = tree.find_all('description') - for des in description: - print(des.text) - - # save file to the place - with open(saved_file_path + patent_number_new + '.txt', 'w') as text_file: - text_file.write("Patent title" + '\n' + patent_title + - '\n' * 2 + "Patent number" + '\n' + - patent_number_new + '\n' * 2 + "Domain" + '\n' + patent_domain + '\n' * 2 + "Publication date" + '\n' + patent_pubdate - + '\n' * 2 + "Abstract" + '\n' + patent_abstract - + '\n' * 2 + 'Claims' + '\n') # save patent title, number, domain, publication data_patent_details, abstract - for claim in claims: - text_file.write(claim.text + '\n') - text_file.write('\n' + 'Description' + '\n') - for des in description: - text_file.write('\n' + des.text + '\n') - - return text_file - - -# to download patents (.txt) by zip file -def create_download_zip(zip_directory, zip_path, filename): - """ - zip_directory (str): path to directory you want to zip - zip_path (str): where you want to save zip file - filename (str): download filename for user who download this - """ - shutil.make_archive(zip_path+filename, 'zip', zip_directory) - - with open(zip_path+filename+'.zip', 'rb') as f: - st.download_button( - label = 'Download', - data = f, - file_name='patent.zip', - mime= 'zip' - ) - - - -# save input files (txt) into the folder -def save_uploadedfile(uploadedfile): - with open(os.path.join('Data/input/US_patents/',uploadedfile.name ), 'wb') as f: - f.write(uploadedfile.getbuffer()) - # return st.success('Saved File:{}'.format(uploadedfile.name)) - -# to extract problems from patents -def extractor (folder): - input_folder = constants.DATA_INPUT + folder - files_extension = "*." + 'txt' - - iInput = InputHandler(input_folder, files_extension) - input_data = iInput.get_input() - - pretreat_data = PatentHandler(input_data) - clean_patent_data = pretreat_data.pretreat_data() - - process_data = CorpusProcessor(clean_patent_data, input_folder, files_extension) - processed_data = process_data.process_corpus() - - # convert json to dataframe - with open('Data/graphs/US_patents/graph.json') as json_data: - data = json.load(json_data) - - concept_df = json_normalize(data['problem_graph'], sep="_") - - concept_df = concept_df[['concept_sentence', 'concept_source', 'concept_type']] - problem_df = concept_df.rename(columns={"concept_sentence": "problem", 'concept_source': 'patent_number', - 'concept_type': 'type'}) - # choose problems - problem_new = problem_df.loc[problem_df['type'] == 'problem'] - - print(problem_new) - - new_table_test = problem_new['patent_number'].apply( - lambda x: re.search(r'(?<=US_patents\/).*?(?=.txt)', x).group()) - - # assign patent number to the corresponding feature - problem_results = problem_new.assign(patent_number=new_table_test) - - print(problem_results[['problem', 'patent_number']]) - problem_results = problem_results[['patent_number', 'problem']] - problem_results.to_csv('data_problem/problem.csv', - index=False) - -@st.cache -def convert_df(df): - # IMPORTANT: Cache the conversion to prevent computation on every rerun - return df.to_csv().encode('utf-8') - - -def extract_info_text(): - new = pd.DataFrame(columns=['title', 'patent_number', 'domain', 'publication_date']) - - # use glob to get all the txt files in the folder - path = 'Data/input/US_patents' - txt_files = glob.glob(os.path.join(path, "*.txt")) - for f in txt_files: - df = pd.read_csv(f, sep='\n', header=None, names=['content']) - print(df) - # extract patent information from text - new = new.append({'patent_number': df.iloc[3, 0], 'title': df.iloc[1, 0], - 'domain': df.iloc[5, 0], 'publication_date': df.iloc[7, 0]}, ignore_index=True) - - print(new) - - problem = pd.read_csv('data_problem/problem.csv') - final = pd.merge(problem, new, on='patent_number', how='left') - return final - -def input_domain(user_input_domain): - if user_input_domain == 'A (Human necessities)': - domain = 'A' - elif user_input_domain == 'B (Performing operations; transporting)': - domain = 'B' - elif user_input_domain == 'C (Chemistry; metallurgy)': - domain = 'C' - elif user_input_domain == 'D (Textiles; paper)': - domain = 'D' - elif user_input_domain == 'E (Fixed constructions)': - domain = 'E' - elif user_input_domain == 'F (Mechanical engineering; lighting; heating; weapons; blasting engines or pumps': - domain = 'F' - elif user_input_domain == 'G (Physics)': - domain = 'G' - elif user_input_domain == 'H (Electricity)': - domain = 'H' - return domain - -# the function for choosing month period that user choosed -def choosing_month_period(problem_corpus,start_year, end_year, start_month, end_month): - problem_corpus = problem_corpus[problem_corpus['publication_year'].between(start_year, end_year)] - if start_year != end_year: # 2014- 2015 #2014- 2016 - if start_month == end_month: # /01/ /01/ - if end_year == start_year + 1: # 2014/03/01 - 2015/03/01 #2014/01/01 - 2015/01/23 #2014/12/01 - 2015/12/23 - problem_corpus.loc[(problem_corpus['publication_year'] == start_year) & ( - problem_corpus['publication_month'].between(start_month, 12)), 'label'] = 'true' - problem_corpus.loc[(problem_corpus['publication_year'] == end_year) & ( - problem_corpus['publication_month'].between(1, end_month)), 'label'] = 'true' - - elif end_year > start_year + 1: # 2014/01/01 - 2016/01/23 #2014/12/01 - 2016/12/23 # 2014/03/01 - 2016/03/01 - if start_month == 1: # 2014/01/01 - 2016/01/23 - problem_corpus.loc[( - problem_corpus['publication_year'] == end_year) & ( - problem_corpus['publication_month'].between( - end_month + 1, 12)), 'label'] = 'false' - problem_corpus.loc[(problem_corpus.label != 'false'), 'label'] = 'true' - elif start_month == 12: # 2014/12/01 - 2016/12/23 - problem_corpus.loc[( - problem_corpus['publication_year'] == start_year) & ( - problem_corpus['publication_month'].between( - 1, start_month - 1)), 'label'] = 'false' - problem_corpus.loc[(problem_corpus.label != 'false'), 'label'] = 'true' - else: # 2014/03/01 - 2016/03/01 - problem_corpus.loc[( - problem_corpus['publication_year'] == start_year) & ( - problem_corpus['publication_month'].between( - 1, start_month - 1)), 'label'] = 'false' - problem_corpus.loc[( - problem_corpus['publication_year'] == end_year) & ( - problem_corpus['publication_month'].between( - end_month + 1, 12)), 'label'] = 'false' - problem_corpus.loc[(problem_corpus.label != 'false'), 'label'] = 'true' - if start_month > end_month: # /03/ /01/ - if end_year == start_year + 1: # 2014/12/01 - 2015/03/01 #2014/02/01 - 2015/01/23 - problem_corpus.loc[(problem_corpus['publication_year'] == start_year) & ( - problem_corpus['publication_month'].between(start_month, 12)), 'label'] = 'true' - problem_corpus.loc[(problem_corpus['publication_year'] == end_year) & ( - problem_corpus['publication_month'].between(1, end_month)), 'label'] = 'true' - - elif end_year > start_year + 1: # 2014/12/01 - 2016/03/01 #2014/02/01 - 2016/01/23 - problem_corpus.loc[( - problem_corpus['publication_year'] == start_year) & ( - problem_corpus['publication_month'].between( - 1, start_month - 1)), 'label'] = 'false' - problem_corpus.loc[( - problem_corpus['publication_year'] == end_year) & ( - problem_corpus['publication_month'].between( - end_month + 1, 12)), 'label'] = 'false' - problem_corpus.loc[(problem_corpus.label != 'false'), 'label'] = 'true' - - if start_month < end_month: # /01/ /03/ - if end_year == start_year + 1: # 2014/01/01 - 2015/12/01 #2014/02/01 - 2015/11/23 - problem_corpus.loc[(problem_corpus['publication_year'] == start_year) & ( - problem_corpus['publication_month'].between(start_month, 12)), 'label'] = 'true' - problem_corpus.loc[(problem_corpus['publication_year'] == end_year) & ( - problem_corpus['publication_month'].between(1, end_month)), 'label'] = 'true' - - elif end_year > start_year + 1: # 2014/01/01 - 2016/12/01 #2014/02/01 - 2016/11/23 - if start_month == 1 & end_month == 12: # 2014/01/01 - 2016/12/01 - problem_corpus['label'] = 'true' - elif start_month == 1: # 2014/01/01 - 2016/03/01 #2014/01/01 - 2016/11/01 - problem_corpus.loc[(problem_corpus['publication_year'] == end_year) & (problem_corpus[ - 'publication_month'].between( - end_month + 1, 12)), 'label'] = 'false' - problem_corpus.loc[(problem_corpus.label != 'false'), 'label'] = 'true' - elif end_month == 12: # 2014/02/01 - 2016/12/01 #2015/02/01 - 2016/12/01 - problem_corpus.loc[(problem_corpus['publication_year'] == start_year) & (problem_corpus[ - 'publication_month'].between( - 1, start_month - 1)), 'label'] = 'false' - problem_corpus.loc[(problem_corpus.label != 'false'), 'label'] = 'true' - else: # 2014/02/01 - 2016/11/23 - problem_corpus.loc[(problem_corpus['publication_year'] == start_year) & (problem_corpus[ - 'publication_month'].between( - 1, start_month - 1)), 'label'] = 'false' - problem_corpus.loc[(problem_corpus['publication_year'] == end_year) & (problem_corpus[ - 'publication_month'].between( - end_month + 1, 12)), 'label'] = 'false' - problem_corpus.loc[(problem_corpus.label != 'false'), 'label'] = 'true' - - - - else: # start_year == end_year: 2012-2012 - problem_corpus = problem_corpus[problem_corpus['publication_year'] == start_year] - if start_month != end_month: # 2014/03/01 - 2014/05/01 2014/01/01 - 2014/05/01 2014/03/01 - 2014/12/01 - problem_corpus.loc[problem_corpus['publication_month'].between(start_month, end_month), 'label'] = 'true' - else: # 2014/03/01 - 2014/03/20 #2014/01/01 - 2014/01/20 - problem_corpus.loc[problem_corpus['publication_month'] == start_month, 'label'] = 'true' - - problem_corpus = problem_corpus.loc[problem_corpus['label'] == 'true'] - problem_corpus= problem_corpus[['patent_number', 'Domain', 'First part Contradiction', - 'Second part Contradiction', 'publication_date', 'publication_year', - 'publication_month', 'label']] - return problem_corpus - -# for IDM-Similar model (word2vec) -def avg_feature_vector(sentence, model, num_features, index2word_set): - words = sentence.split() - feature_vec = np.zeros((num_features, ), dtype='float32') - n_words = 0 - for word in words: - if word in index2word_set: - n_words += 1 - feature_vec = np.add(feature_vec, model[word]) - if (n_words > 0): - feature_vec = np.divide(feature_vec, n_words) - return feature_vec - -def creat_query_id(dataset): - # create query - question = [] - for each in dataset['problem']: - new = "What is the solution for the problem that " + each + "?" - question.append(new) - dataset['question'] = question - - # create id - data = dataset.rename(columns={'Unnamed: 0': 'id'}) - return data - -def csv_to_json (csv_file,json_file): - results = [] - with open(csv_file) as csv_file: - csvReader = csv.DictReader(csv_file) - for row in csvReader: - context = row['Context'] - qas = [] - content = {} - content['id'] = row['id'] - content['question'] = row['question'] - qas.append(content) - result = {} - result['context'] = context - result['qas'] = qas - results.append(result) - - # write data to a json file - with open(json_file, 'w') as jsonFile: - jsonFile.write(json.dumps(results, indent=4)) - - - -def QA_prediction(prediction_file, prediction_output, model): - # if __name__ == '__main__': - with open(prediction_file, 'r') as pre_file: - temp = json.loads(pre_file.read()) - predictions = model.predict(temp) - - with open(prediction_output, 'w') as json_file: - json_file.write(json.dumps(predictions, indent=4)) - print(predictions) - -def json_to_csv(input_file, output_file): - result = pd.read_json(input_file) - print(result.head()) - - result_answer = result.iloc[0][:] - print(result_answer.head()) - print(len(result_answer)) - - df = pd.DataFrame(index=np.arange(len(result_answer)), columns=['id', 'answer']) - print(df) - - for i in range(len(result_answer)): - line = result_answer[i] - print(line) - df.iloc[i, 0] = line['id'] - df.iloc[i, 1] = line['answer'] - - print(df.head()) - df.to_csv(output_file, index=False) diff --git a/spaces/Reself/StableVideo/stablevideo/atlas_data.py b/spaces/Reself/StableVideo/stablevideo/atlas_data.py deleted file mode 100644 index b9dfff4a5fe85a50a7c916cf1822524dcff61c40..0000000000000000000000000000000000000000 --- a/spaces/Reself/StableVideo/stablevideo/atlas_data.py +++ /dev/null @@ -1,291 +0,0 @@ -import random -import json - -import torch -import torch.nn.functional as F -from torch.utils.data import Dataset -from torchvision import transforms -from torchvision.transforms.functional import crop - -from stablevideo.atlas_utils import ( - load_neural_atlases_models, - get_frames_data, - get_high_res_atlas, - get_atlas_crops, - reconstruct_video_layer, - create_uv_mask, - get_masks_boundaries, - get_random_crop_params, - get_atlas_bounding_box, - load_video -) - -class AtlasData(): - def __init__(self, video_name): - with open(f"data/{video_name}/config.json", "r") as f: - json_dict = json.load(f) - try: - maximum_number_of_frames = json_dict["number_of_frames"] - except: - maximum_number_of_frames = json_dict["maximum_number_of_frames"] - - config = { - "device": "cpu", - "checkpoint_path": f"data/{video_name}/checkpoint.ckpt", - "resx": json_dict["resx"], - "resy": json_dict["resy"], - "maximum_number_of_frames": maximum_number_of_frames, - "return_atlas_alpha": False, - "grid_atlas_resolution": 2000, - "num_scales": 7, - "masks_border_expansion": 30, - "mask_alpha_threshold": 0.99, # 0.95 - "align_corners": False - } - self.config = config - self.device = config["device"] - - self.min_size = min(self.config["resx"], self.config["resy"]) - self.max_size = max(self.config["resx"], self.config["resy"]) - data_folder = f"data/{video_name}/{video_name}" - self.original_video = load_video( - data_folder, - resize=(self.config["resy"], self.config["resx"]), - num_frames=self.config["maximum_number_of_frames"], - ) - self.original_video = self.original_video.to(self.device) # tensor - - ( - foreground_mapping, - background_mapping, - foreground_atlas_model, - background_atlas_model, - alpha_model, - ) = load_neural_atlases_models(config) - ( - original_background_all_uvs, - original_foreground_all_uvs, - self.all_alpha, - foreground_atlas_alpha, - ) = get_frames_data( - config, - foreground_mapping, - background_mapping, - alpha_model, - ) - - self.background_reconstruction = reconstruct_video_layer(original_background_all_uvs, background_atlas_model) - # using original video for the foreground layer - self.foreground_reconstruction = self.original_video * self.all_alpha - - ( - self.background_all_uvs, - self.scaled_background_uvs, - self.background_min_u, - self.background_min_v, - self.background_max_u, - self.background_max_v, - ) = self.preprocess_uv_values( - original_background_all_uvs, config["grid_atlas_resolution"], device=self.device, layer="background" - ) - ( - self.foreground_all_uvs, - self.scaled_foreground_uvs, - self.foreground_min_u, - self.foreground_min_v, - self.foreground_max_u, - self.foreground_max_v, - ) = self.preprocess_uv_values( - original_foreground_all_uvs, config["grid_atlas_resolution"], device=self.device, layer="foreground" - ) - - self.background_uv_mask = create_uv_mask( - config, - background_mapping, - self.background_min_u, - self.background_min_v, - self.background_max_u, - self.background_max_v, - uv_shift=-0.5, - resolution_shift=1, - ) - self.foreground_uv_mask = create_uv_mask( - config, - foreground_mapping, - self.foreground_min_u, - self.foreground_min_v, - self.foreground_max_u, - self.foreground_max_v, - uv_shift=0.5, - resolution_shift=0, - ) - self.background_grid_atlas = get_high_res_atlas( - background_atlas_model, - self.background_min_v, - self.background_min_u, - self.background_max_v, - self.background_max_u, - config["grid_atlas_resolution"], - device=config["device"], - layer="background", - ) - self.foreground_grid_atlas = get_high_res_atlas( - foreground_atlas_model, - self.foreground_min_v, - self.foreground_min_u, - self.foreground_max_v, - self.foreground_max_u, - config["grid_atlas_resolution"], - device=config["device"], - layer="foreground", - ) - if config["return_atlas_alpha"]: - self.foreground_atlas_alpha = foreground_atlas_alpha # used for visualizations - self.cnn_min_crop_size = 2 ** self.config["num_scales"] + 1 - - self.mask_boundaries = get_masks_boundaries( - alpha_video=self.all_alpha.cpu(), - border=self.config["masks_border_expansion"], - threshold=self.config["mask_alpha_threshold"], - min_crop_size=self.cnn_min_crop_size, - ) - self.cropped_foreground_atlas, self.foreground_atlas_bbox = get_atlas_bounding_box( - self.mask_boundaries, self.foreground_grid_atlas, self.foreground_all_uvs - ) - - self.step = -1 - self.edited_atlas_dict, self.edit_dict, self.uv_mask = {}, {}, {} - - @staticmethod - def preprocess_uv_values(layer_uv_values, resolution, device="cuda", layer="background"): - if layer == "background": - shift = 1 - else: - shift = 0 - uv_values = (layer_uv_values + shift) * resolution - min_u, min_v = uv_values.reshape(-1, 2).min(dim=0).values.long() - uv_values -= torch.tensor([min_u, min_v], device=device) - max_u, max_v = uv_values.reshape(-1, 2).max(dim=0).values.ceil().long() - - edge_size = torch.tensor([max_u, max_v], device=device) - scaled_uv_values = ((uv_values.reshape(-1, 2) / edge_size) * 2 - 1).unsqueeze(1).unsqueeze(0) - - return uv_values, scaled_uv_values, min_u, min_v, max_u, max_v - - def get_random_crop_data(self, crop_size): - t = random.randint(0, self.config["maximum_number_of_frames"] - 1) - y_start, x_start, h_crop, w_crop = get_random_crop_params((self.config["resx"], self.config["resy"]), crop_size) - return y_start, x_start, h_crop, w_crop, t - - def get_global_crops_multi(self, keyframes, res): - foreground_atlas_crops = [] - background_atlas_crops = [] - foreground_uvs = [] - background_uvs = [] - background_alpha_crops = [] - foreground_alpha_crops = [] - original_background_crops = [] - original_foreground_crops = [] - output_dict = {} - - self.config["crops_min_cover"] = 0.95 - self.config["grid_atlas_resolution"] = res - - for cur_frame in keyframes: - y_start, x_start, frame_h, frame_w = self.mask_boundaries[cur_frame].tolist() - crop_size = ( - max( - random.randint(round(self.config["crops_min_cover"] * frame_h), frame_h), - self.cnn_min_crop_size, - ), - max( - random.randint(round(self.config["crops_min_cover"] * frame_w), frame_w), - self.cnn_min_crop_size, - ), - ) - y_crop, x_crop, h_crop, w_crop = get_random_crop_params((frame_w, frame_h), crop_size) - - foreground_uv = self.foreground_all_uvs[ - cur_frame, - y_start + y_crop : y_start + y_crop + h_crop, - x_start + x_crop : x_start + x_crop + w_crop, - ] - alpha = self.all_alpha[ - [cur_frame], - :, - y_start + y_crop : y_start + y_crop + h_crop, - x_start + x_crop : x_start + x_crop + w_crop, - ] - - original_foreground_crop = self.foreground_reconstruction[ - [cur_frame], - :, - y_start + y_crop : y_start + y_crop + h_crop, - x_start + x_crop : x_start + x_crop + w_crop, - ] - - foreground_alpha_crops.append(alpha) - foreground_uvs.append(foreground_uv) # not scaled - original_foreground_crops.append(original_foreground_crop) - - foreground_max_vals = torch.tensor( - [self.config["grid_atlas_resolution"]] * 2, device=self.device, dtype=torch.long - ) - foreground_min_vals = torch.tensor([0] * 2, device=self.device, dtype=torch.long) - for uv_values in foreground_uvs: - min_uv = uv_values.amin(dim=[0, 1]).long() - max_uv = uv_values.amax(dim=[0, 1]).ceil().long() - foreground_min_vals = torch.minimum(foreground_min_vals, min_uv) - foreground_max_vals = torch.maximum(foreground_max_vals, max_uv) - - h_v = foreground_max_vals[1] - foreground_min_vals[1] - w_u = foreground_max_vals[0] - foreground_min_vals[0] - foreground_atlas_crop = crop( - self.foreground_grid_atlas, - foreground_min_vals[1], - foreground_min_vals[0], - h_v, - w_u, - ) - foreground_atlas_crops.append(foreground_atlas_crop) - - for i, uv_values in enumerate(foreground_uvs): - foreground_uvs[i] = ( - 2 * (uv_values - foreground_min_vals) / (foreground_max_vals - foreground_min_vals) - 1 - ).unsqueeze(0) - - - crop_size = ( - random.randint(round(self.config["crops_min_cover"] * self.min_size), self.min_size), - random.randint(round(self.config["crops_min_cover"] * self.max_size), self.max_size), - ) - crop_data = self.get_random_crop_data(crop_size) - y, x, h, w, _ = crop_data - background_uv = self.background_all_uvs[keyframes, y : y + h, x : x + w] - original_background_crop = self.background_reconstruction[ - keyframes, :, y : y + h, x : x + w - ] - alpha = self.all_alpha[keyframes, :, y : y + h, x : x + w] - - - original_background_crops = [el.unsqueeze(0) for el in original_background_crop] - background_alpha_crops = [el.unsqueeze(0) for el in alpha] - - background_atlas_crop, background_min_vals, background_max_vals = get_atlas_crops( - background_uv, - self.background_grid_atlas, - ) - background_uv = 2 * (background_uv - background_min_vals) / (background_max_vals - background_min_vals) - 1 - background_atlas_crops = [el.unsqueeze(0) for el in background_atlas_crop] - background_uvs = [el.unsqueeze(0) for el in background_uv] - - output_dict["foreground_alpha"] = foreground_alpha_crops - output_dict["foreground_uvs"] = foreground_uvs - output_dict["original_foreground_crops"] = original_foreground_crops - output_dict["foreground_atlas_crops"] = foreground_atlas_crops - - output_dict["background_alpha"] = background_alpha_crops - output_dict["background_uvs"] = background_uvs - output_dict["original_background_crops"] = original_background_crops - output_dict["background_atlas_crops"] = background_atlas_crops - return output_dict \ No newline at end of file diff --git a/spaces/Riksarkivet/htr_demo/helper/text/overview/contributions/contributions.md b/spaces/Riksarkivet/htr_demo/helper/text/overview/contributions/contributions.md deleted file mode 100644 index bcaa71cec148b6096486831237efcc06321f334f..0000000000000000000000000000000000000000 --- a/spaces/Riksarkivet/htr_demo/helper/text/overview/contributions/contributions.md +++ /dev/null @@ -1,33 +0,0 @@ -## HTRFLOW – Contributions - -The AI models used in HTRFLOW is the result of a collaborative effort, involving the National Archives in both Sweden and Finland, in partnership with the Stockholm City Archives, Jämtlands läns fornskriftsällskap, citizen science volunteers and researchers from Stockholm and Uppsala Universities. - -Several datasets have been created by participants through Citizen Science using the Handwritten Text Recognition (HTR) software, Transkribus, provided by [READ-COOP SCE](https://readcoop.eu/) . - -### Archives used to train models for HTRFLOW - -[Svea hovrätt (Renskrivna protokoll), 1713–1735](https://sok.riksarkivet.se/arkiv/H2hpDbNn14scxjzdWqAaJ1) - -[Bergmästaren i Nora m fl bergslag (Hammartingsprotokoll), 1698–1765](https://sok.riksarkivet.se/arkiv/M5Fe2TT9rH6cxG02H087k3) - -[Trolldomskommissionen, mainly 1670s](https://sok.riksarkivet.se/trolldomskommissionen) - -[Bergskollegium, 1718–1758](https://sok.riksarkivet.se/arkiv/SMFky31ekQ80Qsk0UCZZE2) - -[Jämtlands domsaga, 1647–1688](https://sok.riksarkivet.se/arkiv/2l4NYFT8rH6cxG02H087k3) - -[Stockholms domkapitel, 1728–1759](https://sok.riksarkivet.se/arkiv/etg1tyeEaIPMBzKbUKTjw1) - -[Politikollegiet, 1729–1759](https://sok.riksarkivet.se/arkiv/1lQnXIDiKaYxRLBlK1dGF3) - -[Göteborgs poliskammare före 1900 (Detektiva polisens rapportböcker), 1868–1901](https://sok.riksarkivet.se/arkiv/oLTOi9yxweZJUG018W43t3) - -[Renovated Court Records, the National Archives of Finland, 1800s](https://tuomiokirjat.kansallisarkisto.fi/) - -### Ongoing research collaborations - -[Transcription node Sweden – machine interpretation and citizen research combined](https://riksarkivet.se/forskning), Swedish National Archives and University of Gothenburg, funded by the Swedish National Heritage Board. - -[Mapping the geographies of early modern mining knowledge. A digital history of the study tours of the Swedish Bureau of Mines, 1691–1826](https://www.idehist.uu.se/forskning/projekt/den-tidigmoderna-bergsvetenskapens-geografier), Uppsala University and Stockholm University, funded by the Swedish Research Council. - -The Swedish National Archives' research and development on HTR is part of the Swedish national infrastructure Huminfra. [Click here](https://riksarkivet.se/huminfra) for more information. diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv_custom/checkpoint.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv_custom/checkpoint.py deleted file mode 100644 index 19b87fef0a52d31babcdb3edb8f3089b6420173f..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv_custom/checkpoint.py +++ /dev/null @@ -1,500 +0,0 @@ -# Copyright (c) Open-MMLab. All rights reserved. -import io -import os -import os.path as osp -import pkgutil -import time -import warnings -from collections import OrderedDict -from importlib import import_module -from tempfile import TemporaryDirectory - -import torch -import torchvision -from torch.optim import Optimizer -from torch.utils import model_zoo -from torch.nn import functional as F - -import annotator.uniformer.mmcv as mmcv -from annotator.uniformer.mmcv.fileio import FileClient -from annotator.uniformer.mmcv.fileio import load as load_file -from annotator.uniformer.mmcv.parallel import is_module_wrapper -from annotator.uniformer.mmcv.utils import mkdir_or_exist -from annotator.uniformer.mmcv.runner import get_dist_info - -ENV_MMCV_HOME = 'MMCV_HOME' -ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME' -DEFAULT_CACHE_DIR = '~/.cache' - - -def _get_mmcv_home(): - mmcv_home = os.path.expanduser( - os.getenv( - ENV_MMCV_HOME, - os.path.join( - os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'mmcv'))) - - mkdir_or_exist(mmcv_home) - return mmcv_home - - -def load_state_dict(module, state_dict, strict=False, logger=None): - """Load state_dict to a module. - - This method is modified from :meth:`torch.nn.Module.load_state_dict`. - Default value for ``strict`` is set to ``False`` and the message for - param mismatch will be shown even if strict is False. - - Args: - module (Module): Module that receives the state_dict. - state_dict (OrderedDict): Weights. - strict (bool): whether to strictly enforce that the keys - in :attr:`state_dict` match the keys returned by this module's - :meth:`~torch.nn.Module.state_dict` function. Default: ``False``. - logger (:obj:`logging.Logger`, optional): Logger to log the error - message. If not specified, print function will be used. - """ - unexpected_keys = [] - all_missing_keys = [] - err_msg = [] - - metadata = getattr(state_dict, '_metadata', None) - state_dict = state_dict.copy() - if metadata is not None: - state_dict._metadata = metadata - - # use _load_from_state_dict to enable checkpoint version control - def load(module, prefix=''): - # recursively check parallel module in case that the model has a - # complicated structure, e.g., nn.Module(nn.Module(DDP)) - if is_module_wrapper(module): - module = module.module - local_metadata = {} if metadata is None else metadata.get( - prefix[:-1], {}) - module._load_from_state_dict(state_dict, prefix, local_metadata, True, - all_missing_keys, unexpected_keys, - err_msg) - for name, child in module._modules.items(): - if child is not None: - load(child, prefix + name + '.') - - load(module) - load = None # break load->load reference cycle - - # ignore "num_batches_tracked" of BN layers - missing_keys = [ - key for key in all_missing_keys if 'num_batches_tracked' not in key - ] - - if unexpected_keys: - err_msg.append('unexpected key in source ' - f'state_dict: {", ".join(unexpected_keys)}\n') - if missing_keys: - err_msg.append( - f'missing keys in source state_dict: {", ".join(missing_keys)}\n') - - rank, _ = get_dist_info() - if len(err_msg) > 0 and rank == 0: - err_msg.insert( - 0, 'The model and loaded state dict do not match exactly\n') - err_msg = '\n'.join(err_msg) - if strict: - raise RuntimeError(err_msg) - elif logger is not None: - logger.warning(err_msg) - else: - print(err_msg) - - -def load_url_dist(url, model_dir=None): - """In distributed setting, this function only download checkpoint at local - rank 0.""" - rank, world_size = get_dist_info() - rank = int(os.environ.get('LOCAL_RANK', rank)) - if rank == 0: - checkpoint = model_zoo.load_url(url, model_dir=model_dir) - if world_size > 1: - torch.distributed.barrier() - if rank > 0: - checkpoint = model_zoo.load_url(url, model_dir=model_dir) - return checkpoint - - -def load_pavimodel_dist(model_path, map_location=None): - """In distributed setting, this function only download checkpoint at local - rank 0.""" - try: - from pavi import modelcloud - except ImportError: - raise ImportError( - 'Please install pavi to load checkpoint from modelcloud.') - rank, world_size = get_dist_info() - rank = int(os.environ.get('LOCAL_RANK', rank)) - if rank == 0: - model = modelcloud.get(model_path) - with TemporaryDirectory() as tmp_dir: - downloaded_file = osp.join(tmp_dir, model.name) - model.download(downloaded_file) - checkpoint = torch.load(downloaded_file, map_location=map_location) - if world_size > 1: - torch.distributed.barrier() - if rank > 0: - model = modelcloud.get(model_path) - with TemporaryDirectory() as tmp_dir: - downloaded_file = osp.join(tmp_dir, model.name) - model.download(downloaded_file) - checkpoint = torch.load( - downloaded_file, map_location=map_location) - return checkpoint - - -def load_fileclient_dist(filename, backend, map_location): - """In distributed setting, this function only download checkpoint at local - rank 0.""" - rank, world_size = get_dist_info() - rank = int(os.environ.get('LOCAL_RANK', rank)) - allowed_backends = ['ceph'] - if backend not in allowed_backends: - raise ValueError(f'Load from Backend {backend} is not supported.') - if rank == 0: - fileclient = FileClient(backend=backend) - buffer = io.BytesIO(fileclient.get(filename)) - checkpoint = torch.load(buffer, map_location=map_location) - if world_size > 1: - torch.distributed.barrier() - if rank > 0: - fileclient = FileClient(backend=backend) - buffer = io.BytesIO(fileclient.get(filename)) - checkpoint = torch.load(buffer, map_location=map_location) - return checkpoint - - -def get_torchvision_models(): - model_urls = dict() - for _, name, ispkg in pkgutil.walk_packages(torchvision.models.__path__): - if ispkg: - continue - _zoo = import_module(f'torchvision.models.{name}') - if hasattr(_zoo, 'model_urls'): - _urls = getattr(_zoo, 'model_urls') - model_urls.update(_urls) - return model_urls - - -def get_external_models(): - mmcv_home = _get_mmcv_home() - default_json_path = osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json') - default_urls = load_file(default_json_path) - assert isinstance(default_urls, dict) - external_json_path = osp.join(mmcv_home, 'open_mmlab.json') - if osp.exists(external_json_path): - external_urls = load_file(external_json_path) - assert isinstance(external_urls, dict) - default_urls.update(external_urls) - - return default_urls - - -def get_mmcls_models(): - mmcls_json_path = osp.join(mmcv.__path__[0], 'model_zoo/mmcls.json') - mmcls_urls = load_file(mmcls_json_path) - - return mmcls_urls - - -def get_deprecated_model_names(): - deprecate_json_path = osp.join(mmcv.__path__[0], - 'model_zoo/deprecated.json') - deprecate_urls = load_file(deprecate_json_path) - assert isinstance(deprecate_urls, dict) - - return deprecate_urls - - -def _process_mmcls_checkpoint(checkpoint): - state_dict = checkpoint['state_dict'] - new_state_dict = OrderedDict() - for k, v in state_dict.items(): - if k.startswith('backbone.'): - new_state_dict[k[9:]] = v - new_checkpoint = dict(state_dict=new_state_dict) - - return new_checkpoint - - -def _load_checkpoint(filename, map_location=None): - """Load checkpoint from somewhere (modelzoo, file, url). - - Args: - filename (str): Accept local filepath, URL, ``torchvision://xxx``, - ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for - details. - map_location (str | None): Same as :func:`torch.load`. Default: None. - - Returns: - dict | OrderedDict: The loaded checkpoint. It can be either an - OrderedDict storing model weights or a dict containing other - information, which depends on the checkpoint. - """ - if filename.startswith('modelzoo://'): - warnings.warn('The URL scheme of "modelzoo://" is deprecated, please ' - 'use "torchvision://" instead') - model_urls = get_torchvision_models() - model_name = filename[11:] - checkpoint = load_url_dist(model_urls[model_name]) - elif filename.startswith('torchvision://'): - model_urls = get_torchvision_models() - model_name = filename[14:] - checkpoint = load_url_dist(model_urls[model_name]) - elif filename.startswith('open-mmlab://'): - model_urls = get_external_models() - model_name = filename[13:] - deprecated_urls = get_deprecated_model_names() - if model_name in deprecated_urls: - warnings.warn(f'open-mmlab://{model_name} is deprecated in favor ' - f'of open-mmlab://{deprecated_urls[model_name]}') - model_name = deprecated_urls[model_name] - model_url = model_urls[model_name] - # check if is url - if model_url.startswith(('http://', 'https://')): - checkpoint = load_url_dist(model_url) - else: - filename = osp.join(_get_mmcv_home(), model_url) - if not osp.isfile(filename): - raise IOError(f'{filename} is not a checkpoint file') - checkpoint = torch.load(filename, map_location=map_location) - elif filename.startswith('mmcls://'): - model_urls = get_mmcls_models() - model_name = filename[8:] - checkpoint = load_url_dist(model_urls[model_name]) - checkpoint = _process_mmcls_checkpoint(checkpoint) - elif filename.startswith(('http://', 'https://')): - checkpoint = load_url_dist(filename) - elif filename.startswith('pavi://'): - model_path = filename[7:] - checkpoint = load_pavimodel_dist(model_path, map_location=map_location) - elif filename.startswith('s3://'): - checkpoint = load_fileclient_dist( - filename, backend='ceph', map_location=map_location) - else: - if not osp.isfile(filename): - raise IOError(f'{filename} is not a checkpoint file') - checkpoint = torch.load(filename, map_location=map_location) - return checkpoint - - -def load_checkpoint(model, - filename, - map_location='cpu', - strict=False, - logger=None): - """Load checkpoint from a file or URI. - - Args: - model (Module): Module to load checkpoint. - filename (str): Accept local filepath, URL, ``torchvision://xxx``, - ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for - details. - map_location (str): Same as :func:`torch.load`. - strict (bool): Whether to allow different params for the model and - checkpoint. - logger (:mod:`logging.Logger` or None): The logger for error message. - - Returns: - dict or OrderedDict: The loaded checkpoint. - """ - checkpoint = _load_checkpoint(filename, map_location) - # OrderedDict is a subclass of dict - if not isinstance(checkpoint, dict): - raise RuntimeError( - f'No state_dict found in checkpoint file {filename}') - # get state_dict from checkpoint - if 'state_dict' in checkpoint: - state_dict = checkpoint['state_dict'] - elif 'model' in checkpoint: - state_dict = checkpoint['model'] - else: - state_dict = checkpoint - # strip prefix of state_dict - if list(state_dict.keys())[0].startswith('module.'): - state_dict = {k[7:]: v for k, v in state_dict.items()} - - # for MoBY, load model of online branch - if sorted(list(state_dict.keys()))[0].startswith('encoder'): - state_dict = {k.replace('encoder.', ''): v for k, v in state_dict.items() if k.startswith('encoder.')} - - # reshape absolute position embedding - if state_dict.get('absolute_pos_embed') is not None: - absolute_pos_embed = state_dict['absolute_pos_embed'] - N1, L, C1 = absolute_pos_embed.size() - N2, C2, H, W = model.absolute_pos_embed.size() - if N1 != N2 or C1 != C2 or L != H*W: - logger.warning("Error in loading absolute_pos_embed, pass") - else: - state_dict['absolute_pos_embed'] = absolute_pos_embed.view(N2, H, W, C2).permute(0, 3, 1, 2) - - # interpolate position bias table if needed - relative_position_bias_table_keys = [k for k in state_dict.keys() if "relative_position_bias_table" in k] - for table_key in relative_position_bias_table_keys: - table_pretrained = state_dict[table_key] - table_current = model.state_dict()[table_key] - L1, nH1 = table_pretrained.size() - L2, nH2 = table_current.size() - if nH1 != nH2: - logger.warning(f"Error in loading {table_key}, pass") - else: - if L1 != L2: - S1 = int(L1 ** 0.5) - S2 = int(L2 ** 0.5) - table_pretrained_resized = F.interpolate( - table_pretrained.permute(1, 0).view(1, nH1, S1, S1), - size=(S2, S2), mode='bicubic') - state_dict[table_key] = table_pretrained_resized.view(nH2, L2).permute(1, 0) - - # load state_dict - load_state_dict(model, state_dict, strict, logger) - return checkpoint - - -def weights_to_cpu(state_dict): - """Copy a model state_dict to cpu. - - Args: - state_dict (OrderedDict): Model weights on GPU. - - Returns: - OrderedDict: Model weights on GPU. - """ - state_dict_cpu = OrderedDict() - for key, val in state_dict.items(): - state_dict_cpu[key] = val.cpu() - return state_dict_cpu - - -def _save_to_state_dict(module, destination, prefix, keep_vars): - """Saves module state to `destination` dictionary. - - This method is modified from :meth:`torch.nn.Module._save_to_state_dict`. - - Args: - module (nn.Module): The module to generate state_dict. - destination (dict): A dict where state will be stored. - prefix (str): The prefix for parameters and buffers used in this - module. - """ - for name, param in module._parameters.items(): - if param is not None: - destination[prefix + name] = param if keep_vars else param.detach() - for name, buf in module._buffers.items(): - # remove check of _non_persistent_buffers_set to allow nn.BatchNorm2d - if buf is not None: - destination[prefix + name] = buf if keep_vars else buf.detach() - - -def get_state_dict(module, destination=None, prefix='', keep_vars=False): - """Returns a dictionary containing a whole state of the module. - - Both parameters and persistent buffers (e.g. running averages) are - included. Keys are corresponding parameter and buffer names. - - This method is modified from :meth:`torch.nn.Module.state_dict` to - recursively check parallel module in case that the model has a complicated - structure, e.g., nn.Module(nn.Module(DDP)). - - Args: - module (nn.Module): The module to generate state_dict. - destination (OrderedDict): Returned dict for the state of the - module. - prefix (str): Prefix of the key. - keep_vars (bool): Whether to keep the variable property of the - parameters. Default: False. - - Returns: - dict: A dictionary containing a whole state of the module. - """ - # recursively check parallel module in case that the model has a - # complicated structure, e.g., nn.Module(nn.Module(DDP)) - if is_module_wrapper(module): - module = module.module - - # below is the same as torch.nn.Module.state_dict() - if destination is None: - destination = OrderedDict() - destination._metadata = OrderedDict() - destination._metadata[prefix[:-1]] = local_metadata = dict( - version=module._version) - _save_to_state_dict(module, destination, prefix, keep_vars) - for name, child in module._modules.items(): - if child is not None: - get_state_dict( - child, destination, prefix + name + '.', keep_vars=keep_vars) - for hook in module._state_dict_hooks.values(): - hook_result = hook(module, destination, prefix, local_metadata) - if hook_result is not None: - destination = hook_result - return destination - - -def save_checkpoint(model, filename, optimizer=None, meta=None): - """Save checkpoint to file. - - The checkpoint will have 3 fields: ``meta``, ``state_dict`` and - ``optimizer``. By default ``meta`` will contain version and time info. - - Args: - model (Module): Module whose params are to be saved. - filename (str): Checkpoint filename. - optimizer (:obj:`Optimizer`, optional): Optimizer to be saved. - meta (dict, optional): Metadata to be saved in checkpoint. - """ - if meta is None: - meta = {} - elif not isinstance(meta, dict): - raise TypeError(f'meta must be a dict or None, but got {type(meta)}') - meta.update(mmcv_version=mmcv.__version__, time=time.asctime()) - - if is_module_wrapper(model): - model = model.module - - if hasattr(model, 'CLASSES') and model.CLASSES is not None: - # save class name to the meta - meta.update(CLASSES=model.CLASSES) - - checkpoint = { - 'meta': meta, - 'state_dict': weights_to_cpu(get_state_dict(model)) - } - # save optimizer state dict in the checkpoint - if isinstance(optimizer, Optimizer): - checkpoint['optimizer'] = optimizer.state_dict() - elif isinstance(optimizer, dict): - checkpoint['optimizer'] = {} - for name, optim in optimizer.items(): - checkpoint['optimizer'][name] = optim.state_dict() - - if filename.startswith('pavi://'): - try: - from pavi import modelcloud - from pavi.exception import NodeNotFoundError - except ImportError: - raise ImportError( - 'Please install pavi to load checkpoint from modelcloud.') - model_path = filename[7:] - root = modelcloud.Folder() - model_dir, model_name = osp.split(model_path) - try: - model = modelcloud.get(model_dir) - except NodeNotFoundError: - model = root.create_training_model(model_dir) - with TemporaryDirectory() as tmp_dir: - checkpoint_file = osp.join(tmp_dir, model_name) - with open(checkpoint_file, 'wb') as f: - torch.save(checkpoint, f) - f.flush() - model.create_file(checkpoint_file, name=model_name) - else: - mmcv.mkdir_or_exist(osp.dirname(filename)) - # immediately flush buffer - with open(filename, 'wb') as f: - torch.save(checkpoint, f) - f.flush() \ No newline at end of file diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/dii_head.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/dii_head.py deleted file mode 100644 index 8c970a78184672aaaa95edcdaecec03a26604390..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/dii_head.py +++ /dev/null @@ -1,415 +0,0 @@ -import torch -import torch.nn as nn -from mmcv.cnn import (bias_init_with_prob, build_activation_layer, - build_norm_layer) -from mmcv.runner import auto_fp16, force_fp32 - -from mmdet.core import multi_apply -from mmdet.models.builder import HEADS, build_loss -from mmdet.models.dense_heads.atss_head import reduce_mean -from mmdet.models.losses import accuracy -from mmdet.models.utils import FFN, MultiheadAttention, build_transformer -from .bbox_head import BBoxHead - - -@HEADS.register_module() -class DIIHead(BBoxHead): - r"""Dynamic Instance Interactive Head for `Sparse R-CNN: End-to-End Object - Detection with Learnable Proposals `_ - - Args: - num_classes (int): Number of class in dataset. - Defaults to 80. - num_ffn_fcs (int): The number of fully-connected - layers in FFNs. Defaults to 2. - num_heads (int): The hidden dimension of FFNs. - Defaults to 8. - num_cls_fcs (int): The number of fully-connected - layers in classification subnet. Defaults to 1. - num_reg_fcs (int): The number of fully-connected - layers in regression subnet. Defaults to 3. - feedforward_channels (int): The hidden dimension - of FFNs. Defaults to 2048 - in_channels (int): Hidden_channels of MultiheadAttention. - Defaults to 256. - dropout (float): Probability of drop the channel. - Defaults to 0.0 - ffn_act_cfg (dict): The activation config for FFNs. - dynamic_conv_cfg (dict): The convolution config - for DynamicConv. - loss_iou (dict): The config for iou or giou loss. - - """ - - def __init__(self, - num_classes=80, - num_ffn_fcs=2, - num_heads=8, - num_cls_fcs=1, - num_reg_fcs=3, - feedforward_channels=2048, - in_channels=256, - dropout=0.0, - ffn_act_cfg=dict(type='ReLU', inplace=True), - dynamic_conv_cfg=dict( - type='DynamicConv', - in_channels=256, - feat_channels=64, - out_channels=256, - input_feat_shape=7, - act_cfg=dict(type='ReLU', inplace=True), - norm_cfg=dict(type='LN')), - loss_iou=dict(type='GIoULoss', loss_weight=2.0), - **kwargs): - super(DIIHead, self).__init__( - num_classes=num_classes, - reg_decoded_bbox=True, - reg_class_agnostic=True, - **kwargs) - self.loss_iou = build_loss(loss_iou) - self.in_channels = in_channels - self.fp16_enabled = False - self.attention = MultiheadAttention(in_channels, num_heads, dropout) - self.attention_norm = build_norm_layer(dict(type='LN'), in_channels)[1] - - self.instance_interactive_conv = build_transformer(dynamic_conv_cfg) - self.instance_interactive_conv_dropout = nn.Dropout(dropout) - self.instance_interactive_conv_norm = build_norm_layer( - dict(type='LN'), in_channels)[1] - - self.ffn = FFN( - in_channels, - feedforward_channels, - num_ffn_fcs, - act_cfg=ffn_act_cfg, - dropout=dropout) - self.ffn_norm = build_norm_layer(dict(type='LN'), in_channels)[1] - - self.cls_fcs = nn.ModuleList() - for _ in range(num_cls_fcs): - self.cls_fcs.append( - nn.Linear(in_channels, in_channels, bias=False)) - self.cls_fcs.append( - build_norm_layer(dict(type='LN'), in_channels)[1]) - self.cls_fcs.append( - build_activation_layer(dict(type='ReLU', inplace=True))) - - # over load the self.fc_cls in BBoxHead - if self.loss_cls.use_sigmoid: - self.fc_cls = nn.Linear(in_channels, self.num_classes) - else: - self.fc_cls = nn.Linear(in_channels, self.num_classes + 1) - - self.reg_fcs = nn.ModuleList() - for _ in range(num_reg_fcs): - self.reg_fcs.append( - nn.Linear(in_channels, in_channels, bias=False)) - self.reg_fcs.append( - build_norm_layer(dict(type='LN'), in_channels)[1]) - self.reg_fcs.append( - build_activation_layer(dict(type='ReLU', inplace=True))) - # over load the self.fc_cls in BBoxHead - self.fc_reg = nn.Linear(in_channels, 4) - - assert self.reg_class_agnostic, 'DIIHead only ' \ - 'suppport `reg_class_agnostic=True` ' - assert self.reg_decoded_bbox, 'DIIHead only ' \ - 'suppport `reg_decoded_bbox=True`' - - def init_weights(self): - """Use xavier initialization for all weight parameter and set - classification head bias as a specific value when use focal loss.""" - for p in self.parameters(): - if p.dim() > 1: - nn.init.xavier_uniform_(p) - else: - # adopt the default initialization for - # the weight and bias of the layer norm - pass - if self.loss_cls.use_sigmoid: - bias_init = bias_init_with_prob(0.01) - nn.init.constant_(self.fc_cls.bias, bias_init) - - @auto_fp16() - def forward(self, roi_feat, proposal_feat): - """Forward function of Dynamic Instance Interactive Head. - - Args: - roi_feat (Tensor): Roi-pooling features with shape - (batch_size*num_proposals, feature_dimensions, - pooling_h , pooling_w). - proposal_feat (Tensor): Intermediate feature get from - diihead in last stage, has shape - (batch_size, num_proposals, feature_dimensions) - - Returns: - tuple[Tensor]: Usually a tuple of classification scores - and bbox prediction and a intermediate feature. - - - cls_scores (Tensor): Classification scores for - all proposals, has shape - (batch_size, num_proposals, num_classes). - - bbox_preds (Tensor): Box energies / deltas for - all proposals, has shape - (batch_size, num_proposals, 4). - - obj_feat (Tensor): Object feature before classification - and regression subnet, has shape - (batch_size, num_proposal, feature_dimensions). - """ - N, num_proposals = proposal_feat.shape[:2] - - # Self attention - proposal_feat = proposal_feat.permute(1, 0, 2) - proposal_feat = self.attention_norm(self.attention(proposal_feat)) - - # instance interactive - proposal_feat = proposal_feat.permute(1, 0, - 2).reshape(-1, self.in_channels) - proposal_feat_iic = self.instance_interactive_conv( - proposal_feat, roi_feat) - proposal_feat = proposal_feat + self.instance_interactive_conv_dropout( - proposal_feat_iic) - obj_feat = self.instance_interactive_conv_norm(proposal_feat) - - # FFN - obj_feat = self.ffn_norm(self.ffn(obj_feat)) - - cls_feat = obj_feat - reg_feat = obj_feat - - for cls_layer in self.cls_fcs: - cls_feat = cls_layer(cls_feat) - for reg_layer in self.reg_fcs: - reg_feat = reg_layer(reg_feat) - - cls_score = self.fc_cls(cls_feat).view(N, num_proposals, -1) - bbox_delta = self.fc_reg(reg_feat).view(N, num_proposals, -1) - - return cls_score, bbox_delta, obj_feat.view(N, num_proposals, -1) - - @force_fp32(apply_to=('cls_score', 'bbox_pred')) - def loss(self, - cls_score, - bbox_pred, - labels, - label_weights, - bbox_targets, - bbox_weights, - imgs_whwh=None, - reduction_override=None, - **kwargs): - """"Loss function of DIIHead, get loss of all images. - - Args: - cls_score (Tensor): Classification prediction - results of all class, has shape - (batch_size * num_proposals_single_image, num_classes) - bbox_pred (Tensor): Regression prediction results, - has shape - (batch_size * num_proposals_single_image, 4), the last - dimension 4 represents [tl_x, tl_y, br_x, br_y]. - labels (Tensor): Label of each proposals, has shape - (batch_size * num_proposals_single_image - label_weights (Tensor): Classification loss - weight of each proposals, has shape - (batch_size * num_proposals_single_image - bbox_targets (Tensor): Regression targets of each - proposals, has shape - (batch_size * num_proposals_single_image, 4), - the last dimension 4 represents - [tl_x, tl_y, br_x, br_y]. - bbox_weights (Tensor): Regression loss weight of each - proposals's coordinate, has shape - (batch_size * num_proposals_single_image, 4), - imgs_whwh (Tensor): imgs_whwh (Tensor): Tensor with\ - shape (batch_size, num_proposals, 4), the last - dimension means - [img_width,img_height, img_width, img_height]. - reduction_override (str, optional): The reduction - method used to override the original reduction - method of the loss. Options are "none", - "mean" and "sum". Defaults to None, - - Returns: - dict[str, Tensor]: Dictionary of loss components - """ - losses = dict() - bg_class_ind = self.num_classes - # note in spare rcnn num_gt == num_pos - pos_inds = (labels >= 0) & (labels < bg_class_ind) - num_pos = pos_inds.sum().float() - avg_factor = reduce_mean(num_pos) - if cls_score is not None: - if cls_score.numel() > 0: - losses['loss_cls'] = self.loss_cls( - cls_score, - labels, - label_weights, - avg_factor=avg_factor, - reduction_override=reduction_override) - losses['pos_acc'] = accuracy(cls_score[pos_inds], - labels[pos_inds]) - if bbox_pred is not None: - # 0~self.num_classes-1 are FG, self.num_classes is BG - # do not perform bounding box regression for BG anymore. - if pos_inds.any(): - pos_bbox_pred = bbox_pred.reshape(bbox_pred.size(0), - 4)[pos_inds.type(torch.bool)] - imgs_whwh = imgs_whwh.reshape(bbox_pred.size(0), - 4)[pos_inds.type(torch.bool)] - losses['loss_bbox'] = self.loss_bbox( - pos_bbox_pred / imgs_whwh, - bbox_targets[pos_inds.type(torch.bool)] / imgs_whwh, - bbox_weights[pos_inds.type(torch.bool)], - avg_factor=avg_factor) - losses['loss_iou'] = self.loss_iou( - pos_bbox_pred, - bbox_targets[pos_inds.type(torch.bool)], - bbox_weights[pos_inds.type(torch.bool)], - avg_factor=avg_factor) - else: - losses['loss_bbox'] = bbox_pred.sum() * 0 - losses['loss_iou'] = bbox_pred.sum() * 0 - return losses - - def _get_target_single(self, pos_inds, neg_inds, pos_bboxes, neg_bboxes, - pos_gt_bboxes, pos_gt_labels, cfg): - """Calculate the ground truth for proposals in the single image - according to the sampling results. - - Almost the same as the implementation in `bbox_head`, - we add pos_inds and neg_inds to select positive and - negative samples instead of selecting the first num_pos - as positive samples. - - Args: - pos_inds (Tensor): The length is equal to the - positive sample numbers contain all index - of the positive sample in the origin proposal set. - neg_inds (Tensor): The length is equal to the - negative sample numbers contain all index - of the negative sample in the origin proposal set. - pos_bboxes (Tensor): Contains all the positive boxes, - has shape (num_pos, 4), the last dimension 4 - represents [tl_x, tl_y, br_x, br_y]. - neg_bboxes (Tensor): Contains all the negative boxes, - has shape (num_neg, 4), the last dimension 4 - represents [tl_x, tl_y, br_x, br_y]. - pos_gt_bboxes (Tensor): Contains all the gt_boxes, - has shape (num_gt, 4), the last dimension 4 - represents [tl_x, tl_y, br_x, br_y]. - pos_gt_labels (Tensor): Contains all the gt_labels, - has shape (num_gt). - cfg (obj:`ConfigDict`): `train_cfg` of R-CNN. - - Returns: - Tuple[Tensor]: Ground truth for proposals in a single image. - Containing the following Tensors: - - - labels(Tensor): Gt_labels for all proposals, has - shape (num_proposals,). - - label_weights(Tensor): Labels_weights for all proposals, has - shape (num_proposals,). - - bbox_targets(Tensor):Regression target for all proposals, has - shape (num_proposals, 4), the last dimension 4 - represents [tl_x, tl_y, br_x, br_y]. - - bbox_weights(Tensor):Regression weights for all proposals, - has shape (num_proposals, 4). - """ - num_pos = pos_bboxes.size(0) - num_neg = neg_bboxes.size(0) - num_samples = num_pos + num_neg - - # original implementation uses new_zeros since BG are set to be 0 - # now use empty & fill because BG cat_id = num_classes, - # FG cat_id = [0, num_classes-1] - labels = pos_bboxes.new_full((num_samples, ), - self.num_classes, - dtype=torch.long) - label_weights = pos_bboxes.new_zeros(num_samples) - bbox_targets = pos_bboxes.new_zeros(num_samples, 4) - bbox_weights = pos_bboxes.new_zeros(num_samples, 4) - if num_pos > 0: - labels[pos_inds] = pos_gt_labels - pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight - label_weights[pos_inds] = pos_weight - if not self.reg_decoded_bbox: - pos_bbox_targets = self.bbox_coder.encode( - pos_bboxes, pos_gt_bboxes) - else: - pos_bbox_targets = pos_gt_bboxes - bbox_targets[pos_inds, :] = pos_bbox_targets - bbox_weights[pos_inds, :] = 1 - if num_neg > 0: - label_weights[neg_inds] = 1.0 - - return labels, label_weights, bbox_targets, bbox_weights - - def get_targets(self, - sampling_results, - gt_bboxes, - gt_labels, - rcnn_train_cfg, - concat=True): - """Calculate the ground truth for all samples in a batch according to - the sampling_results. - - Almost the same as the implementation in bbox_head, we passed - additional parameters pos_inds_list and neg_inds_list to - `_get_target_single` function. - - Args: - sampling_results (List[obj:SamplingResults]): Assign results of - all images in a batch after sampling. - gt_bboxes (list[Tensor]): Gt_bboxes of all images in a batch, - each tensor has shape (num_gt, 4), the last dimension 4 - represents [tl_x, tl_y, br_x, br_y]. - gt_labels (list[Tensor]): Gt_labels of all images in a batch, - each tensor has shape (num_gt,). - rcnn_train_cfg (obj:`ConfigDict`): `train_cfg` of RCNN. - concat (bool): Whether to concatenate the results of all - the images in a single batch. - - Returns: - Tuple[Tensor]: Ground truth for proposals in a single image. - Containing the following list of Tensors: - - - labels (list[Tensor],Tensor): Gt_labels for all - proposals in a batch, each tensor in list has - shape (num_proposals,) when `concat=False`, otherwise just - a single tensor has shape (num_all_proposals,). - - label_weights (list[Tensor]): Labels_weights for - all proposals in a batch, each tensor in list has shape - (num_proposals,) when `concat=False`, otherwise just a - single tensor has shape (num_all_proposals,). - - bbox_targets (list[Tensor],Tensor): Regression target - for all proposals in a batch, each tensor in list has - shape (num_proposals, 4) when `concat=False`, otherwise - just a single tensor has shape (num_all_proposals, 4), - the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. - - bbox_weights (list[tensor],Tensor): Regression weights for - all proposals in a batch, each tensor in list has shape - (num_proposals, 4) when `concat=False`, otherwise just a - single tensor has shape (num_all_proposals, 4). - """ - pos_inds_list = [res.pos_inds for res in sampling_results] - neg_inds_list = [res.neg_inds for res in sampling_results] - pos_bboxes_list = [res.pos_bboxes for res in sampling_results] - neg_bboxes_list = [res.neg_bboxes for res in sampling_results] - pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results] - pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results] - labels, label_weights, bbox_targets, bbox_weights = multi_apply( - self._get_target_single, - pos_inds_list, - neg_inds_list, - pos_bboxes_list, - neg_bboxes_list, - pos_gt_bboxes_list, - pos_gt_labels_list, - cfg=rcnn_train_cfg) - if concat: - labels = torch.cat(labels, 0) - label_weights = torch.cat(label_weights, 0) - bbox_targets = torch.cat(bbox_targets, 0) - bbox_weights = torch.cat(bbox_weights, 0) - return labels, label_weights, bbox_targets, bbox_weights diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/runner/hooks/profiler.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/runner/hooks/profiler.py deleted file mode 100644 index b70236997eec59c2209ef351ae38863b4112d0ec..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/runner/hooks/profiler.py +++ /dev/null @@ -1,180 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings -from typing import Callable, List, Optional, Union - -import torch - -from ..dist_utils import master_only -from .hook import HOOKS, Hook - - -@HOOKS.register_module() -class ProfilerHook(Hook): - """Profiler to analyze performance during training. - - PyTorch Profiler is a tool that allows the collection of the performance - metrics during the training. More details on Profiler can be found at - https://pytorch.org/docs/1.8.1/profiler.html#torch.profiler.profile - - Args: - by_epoch (bool): Profile performance by epoch or by iteration. - Default: True. - profile_iters (int): Number of iterations for profiling. - If ``by_epoch=True``, profile_iters indicates that they are the - first profile_iters epochs at the beginning of the - training, otherwise it indicates the first profile_iters - iterations. Default: 1. - activities (list[str]): List of activity groups (CPU, CUDA) to use in - profiling. Default: ['cpu', 'cuda']. - schedule (dict, optional): Config of generating the callable schedule. - if schedule is None, profiler will not add step markers into the - trace and table view. Default: None. - on_trace_ready (callable, dict): Either a handler or a dict of generate - handler. Default: None. - record_shapes (bool): Save information about operator's input shapes. - Default: False. - profile_memory (bool): Track tensor memory allocation/deallocation. - Default: False. - with_stack (bool): Record source information (file and line number) - for the ops. Default: False. - with_flops (bool): Use formula to estimate the FLOPS of specific - operators (matrix multiplication and 2D convolution). - Default: False. - json_trace_path (str, optional): Exports the collected trace in Chrome - JSON format. Default: None. - - Example: - >>> runner = ... # instantiate a Runner - >>> # tensorboard trace - >>> trace_config = dict(type='tb_trace', dir_name='work_dir') - >>> profiler_config = dict(on_trace_ready=trace_config) - >>> runner.register_profiler_hook(profiler_config) - >>> runner.run(data_loaders=[trainloader], workflow=[('train', 1)]) - """ - - def __init__(self, - by_epoch: bool = True, - profile_iters: int = 1, - activities: List[str] = ['cpu', 'cuda'], - schedule: Optional[dict] = None, - on_trace_ready: Optional[Union[Callable, dict]] = None, - record_shapes: bool = False, - profile_memory: bool = False, - with_stack: bool = False, - with_flops: bool = False, - json_trace_path: Optional[str] = None) -> None: - try: - from torch import profiler # torch version >= 1.8.1 - except ImportError: - raise ImportError('profiler is the new feature of torch1.8.1, ' - f'but your version is {torch.__version__}') - - assert isinstance(by_epoch, bool), '``by_epoch`` should be a boolean.' - self.by_epoch = by_epoch - - if profile_iters < 1: - raise ValueError('profile_iters should be greater than 0, but got ' - f'{profile_iters}') - self.profile_iters = profile_iters - - if not isinstance(activities, list): - raise ValueError( - f'activities should be list, but got {type(activities)}') - self.activities = [] - for activity in activities: - activity = activity.lower() - if activity == 'cpu': - self.activities.append(profiler.ProfilerActivity.CPU) - elif activity == 'cuda': - self.activities.append(profiler.ProfilerActivity.CUDA) - else: - raise ValueError( - f'activity should be "cpu" or "cuda", but got {activity}') - - if schedule is not None: - self.schedule = profiler.schedule(**schedule) - else: - self.schedule = None - - self.on_trace_ready = on_trace_ready - self.record_shapes = record_shapes - self.profile_memory = profile_memory - self.with_stack = with_stack - self.with_flops = with_flops - self.json_trace_path = json_trace_path - - @master_only - def before_run(self, runner): - if self.by_epoch and runner.max_epochs < self.profile_iters: - raise ValueError('self.profile_iters should not be greater than ' - f'{runner.max_epochs}') - - if not self.by_epoch and runner.max_iters < self.profile_iters: - raise ValueError('self.profile_iters should not be greater than ' - f'{runner.max_iters}') - - if callable(self.on_trace_ready): # handler - _on_trace_ready = self.on_trace_ready - elif isinstance(self.on_trace_ready, dict): # config of handler - trace_cfg = self.on_trace_ready.copy() - trace_type = trace_cfg.pop('type') # log_trace handler - if trace_type == 'log_trace': - - def _log_handler(prof): - print(prof.key_averages().table(**trace_cfg)) - - _on_trace_ready = _log_handler - elif trace_type == 'tb_trace': # tensorboard_trace handler - try: - import torch_tb_profiler # noqa: F401 - except ImportError: - raise ImportError('please run "pip install ' - 'torch-tb-profiler" to install ' - 'torch_tb_profiler') - _on_trace_ready = torch.profiler.tensorboard_trace_handler( - **trace_cfg) - else: - raise ValueError('trace_type should be "log_trace" or ' - f'"tb_trace", but got {trace_type}') - elif self.on_trace_ready is None: - _on_trace_ready = None # type: ignore - else: - raise ValueError('on_trace_ready should be handler, dict or None, ' - f'but got {type(self.on_trace_ready)}') - - if runner.max_epochs > 1: - warnings.warn(f'profiler will profile {runner.max_epochs} epochs ' - 'instead of 1 epoch. Since profiler will slow down ' - 'the training, it is recommended to train 1 epoch ' - 'with ProfilerHook and adjust your setting according' - ' to the profiler summary. During normal training ' - '(epoch > 1), you may disable the ProfilerHook.') - - self.profiler = torch.profiler.profile( - activities=self.activities, - schedule=self.schedule, - on_trace_ready=_on_trace_ready, - record_shapes=self.record_shapes, - profile_memory=self.profile_memory, - with_stack=self.with_stack, - with_flops=self.with_flops) - - self.profiler.__enter__() - runner.logger.info('profiler is profiling...') - - @master_only - def after_train_epoch(self, runner): - if self.by_epoch and runner.epoch == self.profile_iters - 1: - runner.logger.info('profiler may take a few minutes...') - self.profiler.__exit__(None, None, None) - if self.json_trace_path is not None: - self.profiler.export_chrome_trace(self.json_trace_path) - - @master_only - def after_train_iter(self, runner): - self.profiler.step() - if not self.by_epoch and runner.iter == self.profile_iters - 1: - runner.logger.info('profiler may take a few minutes...') - self.profiler.__exit__(None, None, None) - if self.json_trace_path is not None: - self.profiler.export_chrome_trace(self.json_trace_path) diff --git a/spaces/Rongjiehuang/ProDiff/egs/datasets/audio/libritts/pre_align.py b/spaces/Rongjiehuang/ProDiff/egs/datasets/audio/libritts/pre_align.py deleted file mode 100644 index 335b43d913edb02fb02e10c2479aa3dd9e07bb2f..0000000000000000000000000000000000000000 --- a/spaces/Rongjiehuang/ProDiff/egs/datasets/audio/libritts/pre_align.py +++ /dev/null @@ -1,18 +0,0 @@ -import os - -from data_gen.tts.base_pre_align import BasePreAlign -import glob - - -class LibrittsPreAlign(BasePreAlign): - def meta_data(self): - wav_fns = sorted(glob.glob(f'{self.raw_data_dir}/*/*/*/*.wav')) - for wav_fn in wav_fns: - item_name = os.path.basename(wav_fn)[:-4] - txt_fn = f'{wav_fn[:-4]}.normalized.txt' - spk = item_name.split("_")[0] - yield item_name, wav_fn, (self.load_txt, txt_fn), spk - - -if __name__ == "__main__": - LibrittsPreAlign().process() diff --git a/spaces/Rongjiehuang/ProDiff/modules/parallel_wavegan/models/parallel_wavegan.py b/spaces/Rongjiehuang/ProDiff/modules/parallel_wavegan/models/parallel_wavegan.py deleted file mode 100644 index c63b59f67aa48342179415c1d1beac68574a5498..0000000000000000000000000000000000000000 --- a/spaces/Rongjiehuang/ProDiff/modules/parallel_wavegan/models/parallel_wavegan.py +++ /dev/null @@ -1,434 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2019 Tomoki Hayashi -# MIT License (https://opensource.org/licenses/MIT) - -"""Parallel WaveGAN Modules.""" - -import logging -import math - -import torch -from torch import nn - -from modules.parallel_wavegan.layers import Conv1d -from modules.parallel_wavegan.layers import Conv1d1x1 -from modules.parallel_wavegan.layers import ResidualBlock -from modules.parallel_wavegan.layers import upsample -from modules.parallel_wavegan import models - - -class ParallelWaveGANGenerator(torch.nn.Module): - """Parallel WaveGAN Generator module.""" - - def __init__(self, - in_channels=1, - out_channels=1, - kernel_size=3, - layers=30, - stacks=3, - residual_channels=64, - gate_channels=128, - skip_channels=64, - aux_channels=80, - aux_context_window=2, - dropout=0.0, - bias=True, - use_weight_norm=True, - use_causal_conv=False, - upsample_conditional_features=True, - upsample_net="ConvInUpsampleNetwork", - upsample_params={"upsample_scales": [4, 4, 4, 4]}, - use_pitch_embed=False, - ): - """Initialize Parallel WaveGAN Generator module. - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - kernel_size (int): Kernel size of dilated convolution. - layers (int): Number of residual block layers. - stacks (int): Number of stacks i.e., dilation cycles. - residual_channels (int): Number of channels in residual conv. - gate_channels (int): Number of channels in gated conv. - skip_channels (int): Number of channels in skip conv. - aux_channels (int): Number of channels for auxiliary feature conv. - aux_context_window (int): Context window size for auxiliary feature. - dropout (float): Dropout rate. 0.0 means no dropout applied. - bias (bool): Whether to use bias parameter in conv layer. - use_weight_norm (bool): Whether to use weight norm. - If set to true, it will be applied to all of the conv layers. - use_causal_conv (bool): Whether to use causal structure. - upsample_conditional_features (bool): Whether to use upsampling network. - upsample_net (str): Upsampling network architecture. - upsample_params (dict): Upsampling network parameters. - - """ - super(ParallelWaveGANGenerator, self).__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.aux_channels = aux_channels - self.layers = layers - self.stacks = stacks - self.kernel_size = kernel_size - - # check the number of layers and stacks - assert layers % stacks == 0 - layers_per_stack = layers // stacks - - # define first convolution - self.first_conv = Conv1d1x1(in_channels, residual_channels, bias=True) - - # define conv + upsampling network - if upsample_conditional_features: - upsample_params.update({ - "use_causal_conv": use_causal_conv, - }) - if upsample_net == "MelGANGenerator": - assert aux_context_window == 0 - upsample_params.update({ - "use_weight_norm": False, # not to apply twice - "use_final_nonlinear_activation": False, - }) - self.upsample_net = getattr(models, upsample_net)(**upsample_params) - else: - if upsample_net == "ConvInUpsampleNetwork": - upsample_params.update({ - "aux_channels": aux_channels, - "aux_context_window": aux_context_window, - }) - self.upsample_net = getattr(upsample, upsample_net)(**upsample_params) - else: - self.upsample_net = None - - # define residual blocks - self.conv_layers = torch.nn.ModuleList() - for layer in range(layers): - dilation = 2 ** (layer % layers_per_stack) - conv = ResidualBlock( - kernel_size=kernel_size, - residual_channels=residual_channels, - gate_channels=gate_channels, - skip_channels=skip_channels, - aux_channels=aux_channels, - dilation=dilation, - dropout=dropout, - bias=bias, - use_causal_conv=use_causal_conv, - ) - self.conv_layers += [conv] - - # define output layers - self.last_conv_layers = torch.nn.ModuleList([ - torch.nn.ReLU(inplace=True), - Conv1d1x1(skip_channels, skip_channels, bias=True), - torch.nn.ReLU(inplace=True), - Conv1d1x1(skip_channels, out_channels, bias=True), - ]) - - self.use_pitch_embed = use_pitch_embed - if use_pitch_embed: - self.pitch_embed = nn.Embedding(300, aux_channels, 0) - self.c_proj = nn.Linear(2 * aux_channels, aux_channels) - - # apply weight norm - if use_weight_norm: - self.apply_weight_norm() - - def forward(self, x, c=None, pitch=None, **kwargs): - """Calculate forward propagation. - - Args: - x (Tensor): Input noise signal (B, C_in, T). - c (Tensor): Local conditioning auxiliary features (B, C ,T'). - pitch (Tensor): Local conditioning pitch (B, T'). - - Returns: - Tensor: Output tensor (B, C_out, T) - - """ - # perform upsampling - if c is not None and self.upsample_net is not None: - if self.use_pitch_embed: - p = self.pitch_embed(pitch) - c = self.c_proj(torch.cat([c.transpose(1, 2), p], -1)).transpose(1, 2) - c = self.upsample_net(c) - assert c.size(-1) == x.size(-1), (c.size(-1), x.size(-1)) - - # encode to hidden representation - x = self.first_conv(x) - skips = 0 - for f in self.conv_layers: - x, h = f(x, c) - skips += h - skips *= math.sqrt(1.0 / len(self.conv_layers)) - - # apply final layers - x = skips - for f in self.last_conv_layers: - x = f(x) - - return x - - def remove_weight_norm(self): - """Remove weight normalization module from all of the layers.""" - def _remove_weight_norm(m): - try: - logging.debug(f"Weight norm is removed from {m}.") - torch.nn.utils.remove_weight_norm(m) - except ValueError: # this module didn't have weight norm - return - - self.apply(_remove_weight_norm) - - def apply_weight_norm(self): - """Apply weight normalization module from all of the layers.""" - def _apply_weight_norm(m): - if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d): - torch.nn.utils.weight_norm(m) - logging.debug(f"Weight norm is applied to {m}.") - - self.apply(_apply_weight_norm) - - @staticmethod - def _get_receptive_field_size(layers, stacks, kernel_size, - dilation=lambda x: 2 ** x): - assert layers % stacks == 0 - layers_per_cycle = layers // stacks - dilations = [dilation(i % layers_per_cycle) for i in range(layers)] - return (kernel_size - 1) * sum(dilations) + 1 - - @property - def receptive_field_size(self): - """Return receptive field size.""" - return self._get_receptive_field_size(self.layers, self.stacks, self.kernel_size) - - -class ParallelWaveGANDiscriminator(torch.nn.Module): - """Parallel WaveGAN Discriminator module.""" - - def __init__(self, - in_channels=1, - out_channels=1, - kernel_size=3, - layers=10, - conv_channels=64, - dilation_factor=1, - nonlinear_activation="LeakyReLU", - nonlinear_activation_params={"negative_slope": 0.2}, - bias=True, - use_weight_norm=True, - ): - """Initialize Parallel WaveGAN Discriminator module. - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - kernel_size (int): Number of output channels. - layers (int): Number of conv layers. - conv_channels (int): Number of chnn layers. - dilation_factor (int): Dilation factor. For example, if dilation_factor = 2, - the dilation will be 2, 4, 8, ..., and so on. - nonlinear_activation (str): Nonlinear function after each conv. - nonlinear_activation_params (dict): Nonlinear function parameters - bias (bool): Whether to use bias parameter in conv. - use_weight_norm (bool) Whether to use weight norm. - If set to true, it will be applied to all of the conv layers. - - """ - super(ParallelWaveGANDiscriminator, self).__init__() - assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size." - assert dilation_factor > 0, "Dilation factor must be > 0." - self.conv_layers = torch.nn.ModuleList() - conv_in_channels = in_channels - for i in range(layers - 1): - if i == 0: - dilation = 1 - else: - dilation = i if dilation_factor == 1 else dilation_factor ** i - conv_in_channels = conv_channels - padding = (kernel_size - 1) // 2 * dilation - conv_layer = [ - Conv1d(conv_in_channels, conv_channels, - kernel_size=kernel_size, padding=padding, - dilation=dilation, bias=bias), - getattr(torch.nn, nonlinear_activation)(inplace=True, **nonlinear_activation_params) - ] - self.conv_layers += conv_layer - padding = (kernel_size - 1) // 2 - last_conv_layer = Conv1d( - conv_in_channels, out_channels, - kernel_size=kernel_size, padding=padding, bias=bias) - self.conv_layers += [last_conv_layer] - - # apply weight norm - if use_weight_norm: - self.apply_weight_norm() - - def forward(self, x): - """Calculate forward propagation. - - Args: - x (Tensor): Input noise signal (B, 1, T). - - Returns: - Tensor: Output tensor (B, 1, T) - - """ - for f in self.conv_layers: - x = f(x) - return x - - def apply_weight_norm(self): - """Apply weight normalization module from all of the layers.""" - def _apply_weight_norm(m): - if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d): - torch.nn.utils.weight_norm(m) - logging.debug(f"Weight norm is applied to {m}.") - - self.apply(_apply_weight_norm) - - def remove_weight_norm(self): - """Remove weight normalization module from all of the layers.""" - def _remove_weight_norm(m): - try: - logging.debug(f"Weight norm is removed from {m}.") - torch.nn.utils.remove_weight_norm(m) - except ValueError: # this module didn't have weight norm - return - - self.apply(_remove_weight_norm) - - -class ResidualParallelWaveGANDiscriminator(torch.nn.Module): - """Parallel WaveGAN Discriminator module.""" - - def __init__(self, - in_channels=1, - out_channels=1, - kernel_size=3, - layers=30, - stacks=3, - residual_channels=64, - gate_channels=128, - skip_channels=64, - dropout=0.0, - bias=True, - use_weight_norm=True, - use_causal_conv=False, - nonlinear_activation="LeakyReLU", - nonlinear_activation_params={"negative_slope": 0.2}, - ): - """Initialize Parallel WaveGAN Discriminator module. - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - kernel_size (int): Kernel size of dilated convolution. - layers (int): Number of residual block layers. - stacks (int): Number of stacks i.e., dilation cycles. - residual_channels (int): Number of channels in residual conv. - gate_channels (int): Number of channels in gated conv. - skip_channels (int): Number of channels in skip conv. - dropout (float): Dropout rate. 0.0 means no dropout applied. - bias (bool): Whether to use bias parameter in conv. - use_weight_norm (bool): Whether to use weight norm. - If set to true, it will be applied to all of the conv layers. - use_causal_conv (bool): Whether to use causal structure. - nonlinear_activation_params (dict): Nonlinear function parameters - - """ - super(ResidualParallelWaveGANDiscriminator, self).__init__() - assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size." - - self.in_channels = in_channels - self.out_channels = out_channels - self.layers = layers - self.stacks = stacks - self.kernel_size = kernel_size - - # check the number of layers and stacks - assert layers % stacks == 0 - layers_per_stack = layers // stacks - - # define first convolution - self.first_conv = torch.nn.Sequential( - Conv1d1x1(in_channels, residual_channels, bias=True), - getattr(torch.nn, nonlinear_activation)( - inplace=True, **nonlinear_activation_params), - ) - - # define residual blocks - self.conv_layers = torch.nn.ModuleList() - for layer in range(layers): - dilation = 2 ** (layer % layers_per_stack) - conv = ResidualBlock( - kernel_size=kernel_size, - residual_channels=residual_channels, - gate_channels=gate_channels, - skip_channels=skip_channels, - aux_channels=-1, - dilation=dilation, - dropout=dropout, - bias=bias, - use_causal_conv=use_causal_conv, - ) - self.conv_layers += [conv] - - # define output layers - self.last_conv_layers = torch.nn.ModuleList([ - getattr(torch.nn, nonlinear_activation)( - inplace=True, **nonlinear_activation_params), - Conv1d1x1(skip_channels, skip_channels, bias=True), - getattr(torch.nn, nonlinear_activation)( - inplace=True, **nonlinear_activation_params), - Conv1d1x1(skip_channels, out_channels, bias=True), - ]) - - # apply weight norm - if use_weight_norm: - self.apply_weight_norm() - - def forward(self, x): - """Calculate forward propagation. - - Args: - x (Tensor): Input noise signal (B, 1, T). - - Returns: - Tensor: Output tensor (B, 1, T) - - """ - x = self.first_conv(x) - - skips = 0 - for f in self.conv_layers: - x, h = f(x, None) - skips += h - skips *= math.sqrt(1.0 / len(self.conv_layers)) - - # apply final layers - x = skips - for f in self.last_conv_layers: - x = f(x) - return x - - def apply_weight_norm(self): - """Apply weight normalization module from all of the layers.""" - def _apply_weight_norm(m): - if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d): - torch.nn.utils.weight_norm(m) - logging.debug(f"Weight norm is applied to {m}.") - - self.apply(_apply_weight_norm) - - def remove_weight_norm(self): - """Remove weight normalization module from all of the layers.""" - def _remove_weight_norm(m): - try: - logging.debug(f"Weight norm is removed from {m}.") - torch.nn.utils.remove_weight_norm(m) - except ValueError: # this module didn't have weight norm - return - - self.apply(_remove_weight_norm) diff --git a/spaces/Rubens/semantic_similarity/README.md b/spaces/Rubens/semantic_similarity/README.md deleted file mode 100644 index f3d02069692c2dc55011d407d9e1239bbb5be1f1..0000000000000000000000000000000000000000 --- a/spaces/Rubens/semantic_similarity/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Semantic Similarity -emoji: 🐢 -colorFrom: yellow -colorTo: blue -sdk: gradio -sdk_version: 3.15.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/SERER/VITS-Umamusume-voice-synthesizer/monotonic_align/__init__.py b/spaces/SERER/VITS-Umamusume-voice-synthesizer/monotonic_align/__init__.py deleted file mode 100644 index 3d7009c40fea3a98168e3e3bc9ae061e91327422..0000000000000000000000000000000000000000 --- a/spaces/SERER/VITS-Umamusume-voice-synthesizer/monotonic_align/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -import numpy as np -import torch -from .monotonic_align.core import maximum_path_c - - -def maximum_path(neg_cent, mask): - """ Cython optimized version. - neg_cent: [b, t_t, t_s] - mask: [b, t_t, t_s] - """ - device = neg_cent.device - dtype = neg_cent.dtype - neg_cent = neg_cent.data.cpu().numpy().astype(np.float32) - path = np.zeros(neg_cent.shape, dtype=np.int32) - - t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(np.int32) - t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32) - maximum_path_c(path, neg_cent, t_t_max, t_s_max) - return torch.from_numpy(path).to(device=device, dtype=dtype) diff --git a/spaces/SIVAPRASATH/tamil-translator/app.py b/spaces/SIVAPRASATH/tamil-translator/app.py deleted file mode 100644 index 311405a7a875a7815be9db93b2a6ec6a173b7401..0000000000000000000000000000000000000000 --- a/spaces/SIVAPRASATH/tamil-translator/app.py +++ /dev/null @@ -1,17 +0,0 @@ -import gradio as gr -from transformers import MBartForConditionalGeneration, MBart50TokenizerFast,MBartTokenizerFast,MBart50Tokenizer - - -from transformers import MBartTokenizer,MBartForConditionalGeneration, MBartConfig -model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-one-to-many-mmt") -tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-one-to-many-mmt",src_lang="en_XX") - -def get_input(text): - models_input = tokenizer(text,return_tensors="pt") - generated_tokens = model.generate(**models_input,forced_bos_token_id=tokenizer.lang_code_to_id["ta_IN"]) - translation = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) - return translation - -iface = gr.Interface(fn=get_input,inputs="text",outputs="text", title = "English to Tamil Translator",description="Get Tamil translation for your text in English") - -iface.launch() \ No newline at end of file diff --git a/spaces/SRankChatGpt/Presentation-Assistant/README.md b/spaces/SRankChatGpt/Presentation-Assistant/README.md deleted file mode 100644 index 74e79008f24ac2a0c022f072715b5ebf81f313e0..0000000000000000000000000000000000000000 --- a/spaces/SRankChatGpt/Presentation-Assistant/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: PA(Presentation Assistant) -emoji: 🧑‍🏫 -colorFrom: indigo -colorTo: green -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/SantoshKumar/SD-H5-AR-VR-IOT/style.css b/spaces/SantoshKumar/SD-H5-AR-VR-IOT/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/SantoshKumar/SD-H5-AR-VR-IOT/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/Sapiensia/diffuse-the-rest/build/_app/immutable/chunks/index-032ac624.js b/spaces/Sapiensia/diffuse-the-rest/build/_app/immutable/chunks/index-032ac624.js deleted file mode 100644 index 85296fbcdf93ad959ffc0963af842b8640cbb27e..0000000000000000000000000000000000000000 --- a/spaces/Sapiensia/diffuse-the-rest/build/_app/immutable/chunks/index-032ac624.js +++ /dev/null @@ -1 +0,0 @@ -function N(){}function H(t,n){for(const e in n)t[e]=n[e];return t}function P(t){return t()}function T(){return Object.create(null)}function p(t){t.forEach(P)}function I(t){return typeof t=="function"}function ot(t,n){return t!=t?n==n:t!==n||t&&typeof t=="object"||typeof t=="function"}let g;function st(t,n){return g||(g=document.createElement("a")),g.href=n,t===g.href}function G(t){return Object.keys(t).length===0}function J(t,...n){if(t==null)return N;const e=t.subscribe(...n);return e.unsubscribe?()=>e.unsubscribe():e}function at(t,n,e){t.$$.on_destroy.push(J(n,e))}function ft(t,n,e,i){if(t){const r=B(t,n,e,i);return t[0](r)}}function B(t,n,e,i){return t[1]&&i?H(e.ctx.slice(),t[1](i(n))):e.ctx}function dt(t,n,e,i){if(t[2]&&i){const r=t[2](i(e));if(n.dirty===void 0)return r;if(typeof r=="object"){const s=[],c=Math.max(n.dirty.length,r.length);for(let o=0;o32){const n=[],e=t.ctx.length/32;for(let i=0;i>1);e(r)<=i?t=r+1:n=r}return t}function W(t){if(t.hydrate_init)return;t.hydrate_init=!0;let n=t.childNodes;if(t.nodeName==="HEAD"){const l=[];for(let u=0;u0&&n[e[r]].claim_order<=u?r+1:R(1,r,y=>n[e[y]].claim_order,u))-1;i[l]=e[f]+1;const a=f+1;e[a]=l,r=Math.max(a,r)}const s=[],c=[];let o=n.length-1;for(let l=e[r]+1;l!=0;l=i[l-1]){for(s.push(n[l-1]);o>=l;o--)c.push(n[o]);o--}for(;o>=0;o--)c.push(n[o]);s.reverse(),c.sort((l,u)=>l.claim_order-u.claim_order);for(let l=0,u=0;l=s[u].claim_order;)u++;const f=ut.removeEventListener(n,e,i)}function bt(t){return function(n){return n.preventDefault(),t.call(this,n)}}function xt(t){return function(n){return n.stopPropagation(),t.call(this,n)}}function vt(t,n,e){e==null?t.removeAttribute(n):t.getAttribute(n)!==e&&t.setAttribute(n,e)}function Z(t){return Array.from(t.childNodes)}function tt(t){t.claim_info===void 0&&(t.claim_info={last_index:0,total_claimed:0})}function D(t,n,e,i,r=!1){tt(t);const s=(()=>{for(let c=t.claim_info.last_index;c=0;c--){const o=t[c];if(n(o)){const l=e(o);return l===void 0?t.splice(c,1):t[c]=l,r?l===void 0&&t.claim_info.last_index--:t.claim_info.last_index=c,o}}return i()})();return s.claim_order=t.claim_info.total_claimed,t.claim_info.total_claimed+=1,s}function L(t,n,e,i){return D(t,r=>r.nodeName===n,r=>{const s=[];for(let c=0;cr.removeAttribute(c))},()=>i(n))}function wt(t,n,e){return L(t,n,e,X)}function Et(t,n,e){return L(t,n,e,Y)}function nt(t,n){return D(t,e=>e.nodeType===3,e=>{const i=""+n;if(e.data.startsWith(i)){if(e.data.length!==i.length)return e.splitText(i.length)}else e.data=i},()=>S(n),!0)}function $t(t){return nt(t," ")}function At(t,n){n=""+n,t.wholeText!==n&&(t.data=n)}function Nt(t,n,e,i){e===null?t.style.removeProperty(n):t.style.setProperty(n,e,i?"important":"")}function et(t,n,{bubbles:e=!1,cancelable:i=!1}={}){const r=document.createEvent("CustomEvent");return r.initCustomEvent(t,e,i,n),r}function St(t,n=document.body){return Array.from(n.querySelectorAll(t))}let m;function h(t){m=t}function j(){if(!m)throw new Error("Function called outside component initialization");return m}function jt(t){j().$$.on_mount.push(t)}function Ct(t){j().$$.after_update.push(t)}function kt(){const t=j();return(n,e,{cancelable:i=!1}={})=>{const r=t.$$.callbacks[n];if(r){const s=et(n,e,{cancelable:i});return r.slice().forEach(c=>{c.call(t,s)}),!s.defaultPrevented}return!0}}const _=[],q=[],x=[],M=[],O=Promise.resolve();let $=!1;function z(){$||($=!0,O.then(F))}function Tt(){return z(),O}function A(t){x.push(t)}const E=new Set;let b=0;function F(){const t=m;do{for(;b<_.length;){const n=_[b];b++,h(n),it(n.$$)}for(h(null),_.length=0,b=0;q.length;)q.pop()();for(let n=0;n{v.delete(t),i&&(e&&t.d(1),i())}),t.o(n)}else i&&i()}const Bt=typeof window<"u"?window:typeof globalThis<"u"?globalThis:global;function Dt(t){t&&t.c()}function Lt(t,n){t&&t.l(n)}function ct(t,n,e,i){const{fragment:r,on_mount:s,on_destroy:c,after_update:o}=t.$$;r&&r.m(n,e),i||A(()=>{const l=s.map(P).filter(I);c?c.push(...l):p(l),t.$$.on_mount=[]}),o.forEach(A)}function lt(t,n){const e=t.$$;e.fragment!==null&&(p(e.on_destroy),e.fragment&&e.fragment.d(n),e.on_destroy=e.fragment=null,e.ctx=[])}function ut(t,n){t.$$.dirty[0]===-1&&(_.push(t),z(),t.$$.dirty.fill(0)),t.$$.dirty[n/31|0]|=1<{const k=C.length?C[0]:y;return u.ctx&&r(u.ctx[a],u.ctx[a]=k)&&(!u.skip_bound&&u.bound[a]&&u.bound[a](k),f&&ut(t,a)),y}):[],u.update(),f=!0,p(u.before_update),u.fragment=i?i(u.ctx):!1,n.target){if(n.hydrate){K();const a=Z(n.target);u.fragment&&u.fragment.l(a),a.forEach(V)}else u.fragment&&u.fragment.c();n.intro&&rt(t.$$.fragment),ct(t,n.target,n.anchor,n.customElement),Q(),F()}h(l)}class zt{$destroy(){lt(this,1),this.$destroy=N}$on(n,e){const i=this.$$.callbacks[n]||(this.$$.callbacks[n]=[]);return i.push(e),()=>{const r=i.indexOf(e);r!==-1&&i.splice(r,1)}}$set(n){this.$$set&&!G(n)&&(this.$$.skip_bound=!0,this.$$set(n),this.$$.skip_bound=!1)}}export{N as A,ft as B,_t as C,ht as D,dt as E,U as F,at as G,Y as H,Et as I,kt as J,gt as K,St as L,st as M,xt as N,bt as O,p as P,Bt as Q,A as R,zt as S,q as T,pt as a,mt as b,$t as c,Mt as d,yt as e,rt as f,qt as g,V as h,Ot as i,Ct as j,X as k,wt as l,Z as m,vt as n,jt as o,Nt as p,S as q,nt as r,ot as s,Pt as t,At as u,Dt as v,Lt as w,ct as x,lt as y,Tt as z}; diff --git a/spaces/SeyedAli/Musical-genres-Detection/README.md b/spaces/SeyedAli/Musical-genres-Detection/README.md deleted file mode 100644 index 06e02f3cb0490d9dbd0ee98814aa702a1f4f6348..0000000000000000000000000000000000000000 --- a/spaces/SeyedAli/Musical-genres-Detection/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Musical Genres Detection -emoji: 🎵 -colorFrom: gray -colorTo: red -sdk: gradio -sdk_version: 3.44.4 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Shad0ws/AI-Agent-with-Google-Search-APIs/src/baby_agi.py b/spaces/Shad0ws/AI-Agent-with-Google-Search-APIs/src/baby_agi.py deleted file mode 100644 index 3011e659a283e2bad3139969c56f20df40073e55..0000000000000000000000000000000000000000 --- a/spaces/Shad0ws/AI-Agent-with-Google-Search-APIs/src/baby_agi.py +++ /dev/null @@ -1,221 +0,0 @@ -from langchain.vectorstores.base import VectorStore -from pydantic import BaseModel, Field -from langchain.chains.base import Chain -from collections import deque -from typing import Dict, List, Optional, Any -from langchain.agents import ZeroShotAgent, AgentExecutor -from src.task_creation_chain import TaskCreationChain -from src.task_prio_chain import TaskPrioritizationChain -import streamlit as st -from langchain import LLMChain -from langchain.llms import BaseLLM - -# -----------------helpers - -def get_next_task( - task_creation_chain: LLMChain, - result: Dict, - task_description: str, - task_list: List[str], - objective: str, -) -> List[Dict]: - """Get the next task.""" - incomplete_tasks = ", ".join(task_list) - response = task_creation_chain.run( - result=result, - task_description=task_description, - incomplete_tasks=incomplete_tasks, - objective=objective, - ) - new_tasks = response.split("\n") - return [{"task_name": task_name} for task_name in new_tasks if task_name.strip()] - -def prioritize_tasks( - task_prioritization_chain: LLMChain, - this_task_id: int, - task_list: List[Dict], - objective: str, -) -> List[Dict]: - """Prioritize tasks.""" - task_names = [t["task_name"] for t in task_list] - next_task_id = int(this_task_id) + 1 - response = task_prioritization_chain.run( - task_names=task_names, next_task_id=next_task_id, objective=objective - ) - new_tasks = response.split("\n") - prioritized_task_list = [] - for task_string in new_tasks: - if not task_string.strip(): - continue - task_parts = task_string.strip().split(".", 1) - if len(task_parts) == 2: - task_id = task_parts[0].strip() - task_name = task_parts[1].strip() - prioritized_task_list.append({"task_id": task_id, "task_name": task_name}) - return prioritized_task_list - -def _get_top_tasks(vectorstore, query: str, k: int) -> List[str]: - """Get the top k tasks based on the query.""" - results = vectorstore.similarity_search_with_score(query, k=k) - if not results: - return [] - sorted_results, _ = zip(*sorted(results, key=lambda x: x[1], reverse=True)) - return [str(item.metadata["task"]) for item in sorted_results] - - -def execute_task( - vectorstore, execution_chain: LLMChain, objective: str, task: str, k: int = 5 -) -> str: - """Execute a task.""" - context = _get_top_tasks(vectorstore, query=objective, k=k) - return execution_chain.run(objective=objective, context=context, task=task) - - -# ---------------Class------------- - - -class BabyAGI(Chain, BaseModel): - """Controller model for the BabyAGI agent.""" - - task_list: deque = Field(default_factory=deque) - task_creation_chain: TaskCreationChain = Field(...) - task_prioritization_chain: TaskPrioritizationChain = Field(...) - execution_chain: AgentExecutor = Field(...) - task_id_counter: int = Field(1) - vectorstore: VectorStore = Field(init=False) - max_iterations: Optional[int] = None - - class Config: - """Configuration for this pydantic object.""" - - arbitrary_types_allowed = True - - def add_task(self, task: Dict): - self.task_list.append(task) - - def print_task_list(self): - print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m") - if len(self.task_list) > 1: - st.write('**Task List:** \n') - for t in self.task_list: - print(str(t["task_id"]) + ": " + t["task_name"]) - if len(self.task_list) > 1: - st.write(str(t["task_id"]) + ": " + t["task_name"]) - - def print_next_task(self, task: Dict): - print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m") - print(str(task["task_name"])) - return (str(task["task_name"])) - - def print_task_result(self, result: str): - print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m") - return(result) - - @property - def input_keys(self) -> List[str]: - return ["objective"] - - @property - def output_keys(self) -> List[str]: - return [] - - def _call(_self, inputs: Dict[str, Any]) -> Dict[str, Any]: - result_list = [] - """Run the agent.""" - objective = inputs["objective"] - first_task = inputs.get("first_task", f"Make a todo list to accomplish the objective: {objective}") - _self.add_task({"task_id": 1, "task_name": first_task}) - num_iters = 0 - while True: - if _self.task_list: - _self.print_task_list() - - # Step 1: Pull the first task - task = _self.task_list.popleft() - _self.print_next_task(task) - st.write('**Next Task:** \n') - st.write(_self.print_next_task(task)) - - # Step 2: Execute the task - result = execute_task( - _self.vectorstore, _self.execution_chain, objective, task["task_name"] - ) - this_task_id = int(task["task_id"]) - _self.print_task_result(result) - st.write('**Result from Task:** \n') - st.write(_self.print_task_result(result)) - result_list.append(result) - - # Step 3: Store the result in Pinecone - result_id = f"result_{task['task_id']}" - _self.vectorstore.add_texts( - texts=[result], - metadatas=[{"task": task["task_name"]}], - ids=[result_id], - ) - - # Step 4: Create new tasks and reprioritize task list - new_tasks = get_next_task( - _self.task_creation_chain, - result, - task["task_name"], - [t["task_name"] for t in _self.task_list], - objective, - ) - for new_task in new_tasks: - _self.task_id_counter += 1 - new_task.update({"task_id": _self.task_id_counter}) - _self.add_task(new_task) - _self.task_list = deque( - prioritize_tasks( - _self.task_prioritization_chain, - this_task_id, - list(_self.task_list), - objective, - ) - ) - num_iters += 1 - if _self.max_iterations is not None and num_iters == _self.max_iterations: - print( - "\033[91m\033[1m" + "\n*****TASK ENDING*****\n" + "\033[0m\033[0m" - ) - st.success('Task Completed!', icon="✅") - break - - # Create a temporary file to hold the text - with open('output.txt', 'w') as f: - for item in result_list: - f.write(item) - f.write("\n\n") - - return {} - - @classmethod - def from_llm( - cls, - prompt: str, - tools: list, - llm: BaseLLM, - vectorstore: VectorStore, - verbose: bool = False, - **kwargs - ) -> "BabyAGI": - """Initialize the BabyAGI Controller.""" - task_creation_chain = TaskCreationChain.from_llm(llm, verbose=verbose) - task_prioritization_chain = TaskPrioritizationChain.from_llm( - llm, verbose=verbose - ) - llm_chain = LLMChain(llm=llm, prompt=prompt) - tool_names = [tool.name for tool in tools] - agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names) - agent_executor = AgentExecutor.from_agent_and_tools( - agent=agent, tools=tools, verbose=True - ) - return cls( - task_creation_chain=task_creation_chain, - task_prioritization_chain=task_prioritization_chain, - execution_chain=agent_executor, - vectorstore=vectorstore, - **kwargs, - ) - \ No newline at end of file diff --git a/spaces/Siddhant/ESPnet2-SLU/README.md b/spaces/Siddhant/ESPnet2-SLU/README.md deleted file mode 100644 index 751e74621f74665d26ed2c634da021a0732f63a8..0000000000000000000000000000000000000000 --- a/spaces/Siddhant/ESPnet2-SLU/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: ESPnet2 SLU -emoji: 📈 -colorFrom: green -colorTo: green -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/SujanMidatani/speechToText/README.md b/spaces/SujanMidatani/speechToText/README.md deleted file mode 100644 index 413d0b0795f773aae321a793ccc71fc5e2bce35c..0000000000000000000000000000000000000000 --- a/spaces/SujanMidatani/speechToText/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: SpeechToText -emoji: 🐢 -colorFrom: blue -colorTo: purple -sdk: gradio -sdk_version: 3.34.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/terminal/shortcuts/__init__.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/terminal/shortcuts/__init__.py deleted file mode 100644 index 12890f4ab6e57dfa3393554e532783e0e05de91c..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/terminal/shortcuts/__init__.py +++ /dev/null @@ -1,630 +0,0 @@ -""" -Module to define and register Terminal IPython shortcuts with -:mod:`prompt_toolkit` -""" - -# Copyright (c) IPython Development Team. -# Distributed under the terms of the Modified BSD License. - -import os -import signal -import sys -import warnings -from dataclasses import dataclass -from typing import Callable, Any, Optional, List - -from prompt_toolkit.application.current import get_app -from prompt_toolkit.key_binding import KeyBindings -from prompt_toolkit.key_binding.key_processor import KeyPressEvent -from prompt_toolkit.key_binding.bindings import named_commands as nc -from prompt_toolkit.key_binding.bindings.completion import ( - display_completions_like_readline, -) -from prompt_toolkit.key_binding.vi_state import InputMode, ViState -from prompt_toolkit.filters import Condition - -from IPython.core.getipython import get_ipython -from IPython.terminal.shortcuts import auto_match as match -from IPython.terminal.shortcuts import auto_suggest -from IPython.terminal.shortcuts.filters import filter_from_string -from IPython.utils.decorators import undoc - -from prompt_toolkit.enums import DEFAULT_BUFFER - -__all__ = ["create_ipython_shortcuts"] - - -@dataclass -class BaseBinding: - command: Callable[[KeyPressEvent], Any] - keys: List[str] - - -@dataclass -class RuntimeBinding(BaseBinding): - filter: Condition - - -@dataclass -class Binding(BaseBinding): - # while filter could be created by referencing variables directly (rather - # than created from strings), by using strings we ensure that users will - # be able to create filters in configuration (e.g. JSON) files too, which - # also benefits the documentation by enforcing human-readable filter names. - condition: Optional[str] = None - - def __post_init__(self): - if self.condition: - self.filter = filter_from_string(self.condition) - else: - self.filter = None - - -def create_identifier(handler: Callable): - parts = handler.__module__.split(".") - name = handler.__name__ - package = parts[0] - if len(parts) > 1: - final_module = parts[-1] - return f"{package}:{final_module}.{name}" - else: - return f"{package}:{name}" - - -AUTO_MATCH_BINDINGS = [ - *[ - Binding( - cmd, [key], "focused_insert & auto_match & followed_by_closing_paren_or_end" - ) - for key, cmd in match.auto_match_parens.items() - ], - *[ - # raw string - Binding(cmd, [key], "focused_insert & auto_match & preceded_by_raw_str_prefix") - for key, cmd in match.auto_match_parens_raw_string.items() - ], - Binding( - match.double_quote, - ['"'], - "focused_insert" - " & auto_match" - " & not_inside_unclosed_string" - " & preceded_by_paired_double_quotes" - " & followed_by_closing_paren_or_end", - ), - Binding( - match.single_quote, - ["'"], - "focused_insert" - " & auto_match" - " & not_inside_unclosed_string" - " & preceded_by_paired_single_quotes" - " & followed_by_closing_paren_or_end", - ), - Binding( - match.docstring_double_quotes, - ['"'], - "focused_insert" - " & auto_match" - " & not_inside_unclosed_string" - " & preceded_by_two_double_quotes", - ), - Binding( - match.docstring_single_quotes, - ["'"], - "focused_insert" - " & auto_match" - " & not_inside_unclosed_string" - " & preceded_by_two_single_quotes", - ), - Binding( - match.skip_over, - [")"], - "focused_insert & auto_match & followed_by_closing_round_paren", - ), - Binding( - match.skip_over, - ["]"], - "focused_insert & auto_match & followed_by_closing_bracket", - ), - Binding( - match.skip_over, - ["}"], - "focused_insert & auto_match & followed_by_closing_brace", - ), - Binding( - match.skip_over, ['"'], "focused_insert & auto_match & followed_by_double_quote" - ), - Binding( - match.skip_over, ["'"], "focused_insert & auto_match & followed_by_single_quote" - ), - Binding( - match.delete_pair, - ["backspace"], - "focused_insert" - " & preceded_by_opening_round_paren" - " & auto_match" - " & followed_by_closing_round_paren", - ), - Binding( - match.delete_pair, - ["backspace"], - "focused_insert" - " & preceded_by_opening_bracket" - " & auto_match" - " & followed_by_closing_bracket", - ), - Binding( - match.delete_pair, - ["backspace"], - "focused_insert" - " & preceded_by_opening_brace" - " & auto_match" - " & followed_by_closing_brace", - ), - Binding( - match.delete_pair, - ["backspace"], - "focused_insert" - " & preceded_by_double_quote" - " & auto_match" - " & followed_by_double_quote", - ), - Binding( - match.delete_pair, - ["backspace"], - "focused_insert" - " & preceded_by_single_quote" - " & auto_match" - " & followed_by_single_quote", - ), -] - -AUTO_SUGGEST_BINDINGS = [ - # there are two reasons for re-defining bindings defined upstream: - # 1) prompt-toolkit does not execute autosuggestion bindings in vi mode, - # 2) prompt-toolkit checks if we are at the end of text, not end of line - # hence it does not work in multi-line mode of navigable provider - Binding( - auto_suggest.accept_or_jump_to_end, - ["end"], - "has_suggestion & default_buffer_focused & emacs_like_insert_mode", - ), - Binding( - auto_suggest.accept_or_jump_to_end, - ["c-e"], - "has_suggestion & default_buffer_focused & emacs_like_insert_mode", - ), - Binding( - auto_suggest.accept, - ["c-f"], - "has_suggestion & default_buffer_focused & emacs_like_insert_mode", - ), - Binding( - auto_suggest.accept, - ["right"], - "has_suggestion & default_buffer_focused & emacs_like_insert_mode", - ), - Binding( - auto_suggest.accept_word, - ["escape", "f"], - "has_suggestion & default_buffer_focused & emacs_like_insert_mode", - ), - Binding( - auto_suggest.accept_token, - ["c-right"], - "has_suggestion & default_buffer_focused & emacs_like_insert_mode", - ), - Binding( - auto_suggest.discard, - ["escape"], - # note this one is using `emacs_insert_mode`, not `emacs_like_insert_mode` - # as in `vi_insert_mode` we do not want `escape` to be shadowed (ever). - "has_suggestion & default_buffer_focused & emacs_insert_mode", - ), - Binding( - auto_suggest.discard, - ["delete"], - "has_suggestion & default_buffer_focused & emacs_insert_mode", - ), - Binding( - auto_suggest.swap_autosuggestion_up, - ["c-up"], - "navigable_suggestions" - " & ~has_line_above" - " & has_suggestion" - " & default_buffer_focused", - ), - Binding( - auto_suggest.swap_autosuggestion_down, - ["c-down"], - "navigable_suggestions" - " & ~has_line_below" - " & has_suggestion" - " & default_buffer_focused", - ), - Binding( - auto_suggest.up_and_update_hint, - ["c-up"], - "has_line_above & navigable_suggestions & default_buffer_focused", - ), - Binding( - auto_suggest.down_and_update_hint, - ["c-down"], - "has_line_below & navigable_suggestions & default_buffer_focused", - ), - Binding( - auto_suggest.accept_character, - ["escape", "right"], - "has_suggestion & default_buffer_focused & emacs_like_insert_mode", - ), - Binding( - auto_suggest.accept_and_move_cursor_left, - ["c-left"], - "has_suggestion & default_buffer_focused & emacs_like_insert_mode", - ), - Binding( - auto_suggest.accept_and_keep_cursor, - ["escape", "down"], - "has_suggestion & default_buffer_focused & emacs_insert_mode", - ), - Binding( - auto_suggest.backspace_and_resume_hint, - ["backspace"], - # no `has_suggestion` here to allow resuming if no suggestion - "default_buffer_focused & emacs_like_insert_mode", - ), - Binding( - auto_suggest.resume_hinting, - ["right"], - "is_cursor_at_the_end_of_line" - " & default_buffer_focused" - " & emacs_like_insert_mode" - " & pass_through", - ), -] - - -SIMPLE_CONTROL_BINDINGS = [ - Binding(cmd, [key], "vi_insert_mode & default_buffer_focused & ebivim") - for key, cmd in { - "c-a": nc.beginning_of_line, - "c-b": nc.backward_char, - "c-k": nc.kill_line, - "c-w": nc.backward_kill_word, - "c-y": nc.yank, - "c-_": nc.undo, - }.items() -] - - -ALT_AND_COMOBO_CONTROL_BINDINGS = [ - Binding(cmd, list(keys), "vi_insert_mode & default_buffer_focused & ebivim") - for keys, cmd in { - # Control Combos - ("c-x", "c-e"): nc.edit_and_execute, - ("c-x", "e"): nc.edit_and_execute, - # Alt - ("escape", "b"): nc.backward_word, - ("escape", "c"): nc.capitalize_word, - ("escape", "d"): nc.kill_word, - ("escape", "h"): nc.backward_kill_word, - ("escape", "l"): nc.downcase_word, - ("escape", "u"): nc.uppercase_word, - ("escape", "y"): nc.yank_pop, - ("escape", "."): nc.yank_last_arg, - }.items() -] - - -def add_binding(bindings: KeyBindings, binding: Binding): - bindings.add( - *binding.keys, - **({"filter": binding.filter} if binding.filter is not None else {}), - )(binding.command) - - -def create_ipython_shortcuts(shell, skip=None) -> KeyBindings: - """Set up the prompt_toolkit keyboard shortcuts for IPython. - - Parameters - ---------- - shell: InteractiveShell - The current IPython shell Instance - skip: List[Binding] - Bindings to skip. - - Returns - ------- - KeyBindings - the keybinding instance for prompt toolkit. - - """ - kb = KeyBindings() - skip = skip or [] - for binding in KEY_BINDINGS: - skip_this_one = False - for to_skip in skip: - if ( - to_skip.command == binding.command - and to_skip.filter == binding.filter - and to_skip.keys == binding.keys - ): - skip_this_one = True - break - if skip_this_one: - continue - add_binding(kb, binding) - - def get_input_mode(self): - app = get_app() - app.ttimeoutlen = shell.ttimeoutlen - app.timeoutlen = shell.timeoutlen - - return self._input_mode - - def set_input_mode(self, mode): - shape = {InputMode.NAVIGATION: 2, InputMode.REPLACE: 4}.get(mode, 6) - cursor = "\x1b[{} q".format(shape) - - sys.stdout.write(cursor) - sys.stdout.flush() - - self._input_mode = mode - - if shell.editing_mode == "vi" and shell.modal_cursor: - ViState._input_mode = InputMode.INSERT # type: ignore - ViState.input_mode = property(get_input_mode, set_input_mode) # type: ignore - - return kb - - -def reformat_and_execute(event): - """Reformat code and execute it""" - shell = get_ipython() - reformat_text_before_cursor( - event.current_buffer, event.current_buffer.document, shell - ) - event.current_buffer.validate_and_handle() - - -def reformat_text_before_cursor(buffer, document, shell): - text = buffer.delete_before_cursor(len(document.text[: document.cursor_position])) - try: - formatted_text = shell.reformat_handler(text) - buffer.insert_text(formatted_text) - except Exception as e: - buffer.insert_text(text) - - -def handle_return_or_newline_or_execute(event): - shell = get_ipython() - if getattr(shell, "handle_return", None): - return shell.handle_return(shell)(event) - else: - return newline_or_execute_outer(shell)(event) - - -def newline_or_execute_outer(shell): - def newline_or_execute(event): - """When the user presses return, insert a newline or execute the code.""" - b = event.current_buffer - d = b.document - - if b.complete_state: - cc = b.complete_state.current_completion - if cc: - b.apply_completion(cc) - else: - b.cancel_completion() - return - - # If there's only one line, treat it as if the cursor is at the end. - # See https://github.com/ipython/ipython/issues/10425 - if d.line_count == 1: - check_text = d.text - else: - check_text = d.text[: d.cursor_position] - status, indent = shell.check_complete(check_text) - - # if all we have after the cursor is whitespace: reformat current text - # before cursor - after_cursor = d.text[d.cursor_position :] - reformatted = False - if not after_cursor.strip(): - reformat_text_before_cursor(b, d, shell) - reformatted = True - if not ( - d.on_last_line - or d.cursor_position_row >= d.line_count - d.empty_line_count_at_the_end() - ): - if shell.autoindent: - b.insert_text("\n" + indent) - else: - b.insert_text("\n") - return - - if (status != "incomplete") and b.accept_handler: - if not reformatted: - reformat_text_before_cursor(b, d, shell) - b.validate_and_handle() - else: - if shell.autoindent: - b.insert_text("\n" + indent) - else: - b.insert_text("\n") - - return newline_or_execute - - -def previous_history_or_previous_completion(event): - """ - Control-P in vi edit mode on readline is history next, unlike default prompt toolkit. - - If completer is open this still select previous completion. - """ - event.current_buffer.auto_up() - - -def next_history_or_next_completion(event): - """ - Control-N in vi edit mode on readline is history previous, unlike default prompt toolkit. - - If completer is open this still select next completion. - """ - event.current_buffer.auto_down() - - -def dismiss_completion(event): - """Dismiss completion""" - b = event.current_buffer - if b.complete_state: - b.cancel_completion() - - -def reset_buffer(event): - """Reset buffer""" - b = event.current_buffer - if b.complete_state: - b.cancel_completion() - else: - b.reset() - - -def reset_search_buffer(event): - """Reset search buffer""" - if event.current_buffer.document.text: - event.current_buffer.reset() - else: - event.app.layout.focus(DEFAULT_BUFFER) - - -def suspend_to_bg(event): - """Suspend to background""" - event.app.suspend_to_background() - - -def quit(event): - """ - Quit application with ``SIGQUIT`` if supported or ``sys.exit`` otherwise. - - On platforms that support SIGQUIT, send SIGQUIT to the current process. - On other platforms, just exit the process with a message. - """ - sigquit = getattr(signal, "SIGQUIT", None) - if sigquit is not None: - os.kill(0, signal.SIGQUIT) - else: - sys.exit("Quit") - - -def indent_buffer(event): - """Indent buffer""" - event.current_buffer.insert_text(" " * 4) - - -def newline_autoindent(event): - """Insert a newline after the cursor indented appropriately. - - Fancier version of former ``newline_with_copy_margin`` which should - compute the correct indentation of the inserted line. That is to say, indent - by 4 extra space after a function definition, class definition, context - manager... And dedent by 4 space after ``pass``, ``return``, ``raise ...``. - """ - shell = get_ipython() - inputsplitter = shell.input_transformer_manager - b = event.current_buffer - d = b.document - - if b.complete_state: - b.cancel_completion() - text = d.text[: d.cursor_position] + "\n" - _, indent = inputsplitter.check_complete(text) - b.insert_text("\n" + (" " * (indent or 0)), move_cursor=False) - - -def open_input_in_editor(event): - """Open code from input in external editor""" - event.app.current_buffer.open_in_editor() - - -if sys.platform == "win32": - from IPython.core.error import TryNext - from IPython.lib.clipboard import ( - ClipboardEmpty, - tkinter_clipboard_get, - win32_clipboard_get, - ) - - @undoc - def win_paste(event): - try: - text = win32_clipboard_get() - except TryNext: - try: - text = tkinter_clipboard_get() - except (TryNext, ClipboardEmpty): - return - except ClipboardEmpty: - return - event.current_buffer.insert_text(text.replace("\t", " " * 4)) - -else: - - @undoc - def win_paste(event): - """Stub used on other platforms""" - pass - - -KEY_BINDINGS = [ - Binding( - handle_return_or_newline_or_execute, - ["enter"], - "default_buffer_focused & ~has_selection & insert_mode", - ), - Binding( - reformat_and_execute, - ["escape", "enter"], - "default_buffer_focused & ~has_selection & insert_mode & ebivim", - ), - Binding(quit, ["c-\\"]), - Binding( - previous_history_or_previous_completion, - ["c-p"], - "vi_insert_mode & default_buffer_focused", - ), - Binding( - next_history_or_next_completion, - ["c-n"], - "vi_insert_mode & default_buffer_focused", - ), - Binding(dismiss_completion, ["c-g"], "default_buffer_focused & has_completions"), - Binding(reset_buffer, ["c-c"], "default_buffer_focused"), - Binding(reset_search_buffer, ["c-c"], "search_buffer_focused"), - Binding(suspend_to_bg, ["c-z"], "supports_suspend"), - Binding( - indent_buffer, - ["tab"], # Ctrl+I == Tab - "default_buffer_focused" - " & ~has_selection" - " & insert_mode" - " & cursor_in_leading_ws", - ), - Binding(newline_autoindent, ["c-o"], "default_buffer_focused & emacs_insert_mode"), - Binding(open_input_in_editor, ["f2"], "default_buffer_focused"), - *AUTO_MATCH_BINDINGS, - *AUTO_SUGGEST_BINDINGS, - Binding( - display_completions_like_readline, - ["c-i"], - "readline_like_completions" - " & default_buffer_focused" - " & ~has_selection" - " & insert_mode" - " & ~cursor_in_leading_ws", - ), - Binding(win_paste, ["c-v"], "default_buffer_focused & ~vi_mode & is_windows_os"), - *SIMPLE_CONTROL_BINDINGS, - *ALT_AND_COMOBO_CONTROL_BINDINGS, -] diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/version.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/version.py deleted file mode 100644 index 1cce4e50bd692d4002e3cac3c545a3fb2efe95d0..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/version.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -__version__ = '1.3.17' - - -def parse_version_info(version_str: str, length: int = 4) -> tuple: - """Parse a version string into a tuple. - - Args: - version_str (str): The version string. - length (int): The maximum number of version levels. Default: 4. - - Returns: - tuple[int | str]: The version info, e.g., "1.3.0" is parsed into - (1, 3, 0, 0, 0, 0), and "2.0.0rc1" is parsed into - (2, 0, 0, 0, 'rc', 1) (when length is set to 4). - """ - from packaging.version import parse - version = parse(version_str) - assert version.release, f'failed to parse version {version_str}' - release = list(version.release) - release = release[:length] - if len(release) < length: - release = release + [0] * (length - len(release)) - if version.is_prerelease: - release.extend(list(version.pre)) - elif version.is_postrelease: - release.extend(list(version.post)) - else: - release.extend([0, 0]) - return tuple(release) - - -version_info = tuple(int(x) for x in __version__.split('.')[:3]) - -__all__ = ['__version__', 'version_info', 'parse_version_info'] diff --git a/spaces/TEnngal/bingo/src/components/chat-attachments.tsx b/spaces/TEnngal/bingo/src/components/chat-attachments.tsx deleted file mode 100644 index ef43d4e262935d263b6099138c56f7daade5299d..0000000000000000000000000000000000000000 --- a/spaces/TEnngal/bingo/src/components/chat-attachments.tsx +++ /dev/null @@ -1,37 +0,0 @@ -import Image from 'next/image' -import ClearIcon from '@/assets/images/clear.svg' -import RefreshIcon from '@/assets/images/refresh.svg' -import { FileItem } from '@/lib/bots/bing/types' -import { cn } from '@/lib/utils' -import { useBing } from '@/lib/hooks/use-bing' - -type ChatAttachmentsProps = Pick, 'attachmentList' | 'setAttachmentList' | 'uploadImage'> - -export function ChatAttachments({ attachmentList = [], setAttachmentList, uploadImage }: ChatAttachmentsProps) { - return attachmentList.length ? ( -
- {attachmentList.map(file => ( -
- {file.status === 'loading' && ( -
-
-
) - } - {file.status !== 'error' && ( -
- -
) - } - {file.status === 'error' && ( -
- uploadImage(file.url)} /> -
- )} - -
- ))} -
- ) : null -} diff --git a/spaces/TEnngal/bingo/src/components/toaster.tsx b/spaces/TEnngal/bingo/src/components/toaster.tsx deleted file mode 100644 index 4d2693460b61307a1d4c127fd01df9bee16e59ff..0000000000000000000000000000000000000000 --- a/spaces/TEnngal/bingo/src/components/toaster.tsx +++ /dev/null @@ -1,3 +0,0 @@ -'use client' - -export { Toaster } from 'react-hot-toast' diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/distro/__main__.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/distro/__main__.py deleted file mode 100644 index 0c01d5b08b6b44379b931d54d7fcf5221fdc9fde..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/distro/__main__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .distro import main - -if __name__ == "__main__": - main() diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/util/proxy.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/util/proxy.py deleted file mode 100644 index 2199cc7b7f004009493d032720c36d6568f9d89e..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/urllib3/util/proxy.py +++ /dev/null @@ -1,57 +0,0 @@ -from .ssl_ import create_urllib3_context, resolve_cert_reqs, resolve_ssl_version - - -def connection_requires_http_tunnel( - proxy_url=None, proxy_config=None, destination_scheme=None -): - """ - Returns True if the connection requires an HTTP CONNECT through the proxy. - - :param URL proxy_url: - URL of the proxy. - :param ProxyConfig proxy_config: - Proxy configuration from poolmanager.py - :param str destination_scheme: - The scheme of the destination. (i.e https, http, etc) - """ - # If we're not using a proxy, no way to use a tunnel. - if proxy_url is None: - return False - - # HTTP destinations never require tunneling, we always forward. - if destination_scheme == "http": - return False - - # Support for forwarding with HTTPS proxies and HTTPS destinations. - if ( - proxy_url.scheme == "https" - and proxy_config - and proxy_config.use_forwarding_for_https - ): - return False - - # Otherwise always use a tunnel. - return True - - -def create_proxy_ssl_context( - ssl_version, cert_reqs, ca_certs=None, ca_cert_dir=None, ca_cert_data=None -): - """ - Generates a default proxy ssl context if one hasn't been provided by the - user. - """ - ssl_context = create_urllib3_context( - ssl_version=resolve_ssl_version(ssl_version), - cert_reqs=resolve_cert_reqs(cert_reqs), - ) - - if ( - not ca_certs - and not ca_cert_dir - and not ca_cert_data - and hasattr(ssl_context, "load_default_certs") - ): - ssl_context.load_default_certs() - - return ssl_context diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/dense_heads/utils.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/dense_heads/utils.py deleted file mode 100644 index c9efa287fc71315f633347023b390fe4ce57913a..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/dense_heads/utils.py +++ /dev/null @@ -1,38 +0,0 @@ -import cv2 -import torch -from torch import nn -from detectron2.utils.comm import get_world_size -from detectron2.structures import pairwise_iou, Boxes -# from .data import CenterNetCrop -import torch.nn.functional as F -import numpy as np -from detectron2.structures import Boxes, ImageList, Instances - -__all__ = ['reduce_sum', '_transpose'] - -INF = 1000000000 - -def _transpose(training_targets, num_loc_list): - ''' - This function is used to transpose image first training targets to - level first ones - :return: level first training targets - ''' - for im_i in range(len(training_targets)): - training_targets[im_i] = torch.split( - training_targets[im_i], num_loc_list, dim=0) - - targets_level_first = [] - for targets_per_level in zip(*training_targets): - targets_level_first.append( - torch.cat(targets_per_level, dim=0)) - return targets_level_first - - -def reduce_sum(tensor): - world_size = get_world_size() - if world_size < 2: - return tensor - tensor = tensor.clone() - torch.distributed.all_reduce(tensor, op=torch.distributed.ReduceOp.SUM) - return tensor \ No newline at end of file diff --git a/spaces/VikasKumar01/My_AI_chatbot/README.md b/spaces/VikasKumar01/My_AI_chatbot/README.md deleted file mode 100644 index 0fabb51accd04c09ea27887174a314c3af9639a6..0000000000000000000000000000000000000000 --- a/spaces/VikasKumar01/My_AI_chatbot/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: My AI Chatbot -emoji: 👀 -colorFrom: gray -colorTo: gray -sdk: gradio -sdk_version: 3.42.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Wayben/ChatGPT/run_Windows.bat b/spaces/Wayben/ChatGPT/run_Windows.bat deleted file mode 100644 index 4c18f9ccaeea0af972301ffdf48778641221f76d..0000000000000000000000000000000000000000 --- a/spaces/Wayben/ChatGPT/run_Windows.bat +++ /dev/null @@ -1,5 +0,0 @@ -@echo off -echo Opening ChuanhuChatGPT... - -REM Open powershell via bat -start powershell.exe -NoExit -Command "python ./ChuanhuChatbot.py" diff --git a/spaces/Xalphinions/tab-cot/app.py b/spaces/Xalphinions/tab-cot/app.py deleted file mode 100644 index 44e87153fb7932b9266c7ca9e79da64ee405478a..0000000000000000000000000000000000000000 --- a/spaces/Xalphinions/tab-cot/app.py +++ /dev/null @@ -1,49 +0,0 @@ -import gradio as gr -from statistics import mean -from torch.utils.data import Dataset -from collections import OrderedDict -import xml.etree.ElementTree as ET -import openai # For GPT-3 API ... -import os -import multiprocessing -import json -import numpy as np -import random -import torch -import torchtext -import re -import random -import time -import datetime -import pandas as pd -import sys - -openai.api_key = os.getenv("api_key") - -def greet(question): - input = question + '\n\n' + "|step|subquestion|process|result|" - response = openai.ChatCompletion.create( - model="gpt-3.5-turbo", - messages=[ - {"role": "system", "content": "You are a helpful assistant that generate table to solve reasoning problem."}, - {"role": "user", "content": input}, - - ] - ) - response = response["choices"][0]["message"]["content"] - return "|step|subquestion|process|result|\n" + response - - -iface = gr.Interface( - fn=greet, - inputs="text", - outputs="text", - title="Tab-CoT: Zero-Shot Tabular Chain-of-Thought", - examples=[ - ["Tommy is fundraising for his charity by selling brownies for $3 a slice and cheesecakes for $4 a slice. If Tommy sells 43 brownies and 23 slices of cheesecake, how much money does Tommy raise?"], - ["Judy teaches 5 dance classes, every day, on the weekdays and 8 classes on Saturday. If each class has 15 students and she charges $15.00 per student, how much money does she make in 1 week?"], - ["According to its nutritional info, a bag of chips has 250 calories per serving. If a 300g bag has 5 servings, how many grams can you eat if your daily calorie target is 2000 and you have already consumed 1800 calories?"], - ] - ) -iface.launch() - diff --git a/spaces/XingHe0127/Chatbot/Dockerfile b/spaces/XingHe0127/Chatbot/Dockerfile deleted file mode 100644 index 335c2dba28ba8c365de9306858462a59dea25f28..0000000000000000000000000000000000000000 --- a/spaces/XingHe0127/Chatbot/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -FROM python:3.9 as builder -RUN apt-get update && apt-get install -y build-essential -COPY requirements.txt . -COPY requirements_advanced.txt . -RUN pip install --user -r requirements.txt -# RUN pip install --user -r requirements_advanced.txt - -FROM python:3.9 -MAINTAINER iskoldt -COPY --from=builder /root/.local /root/.local -ENV PATH=/root/.local/bin:$PATH -COPY . /app -WORKDIR /app -ENV dockerrun yes -CMD ["python3", "-u", "ChuanhuChatbot.py", "2>&1", "|", "tee", "/var/log/application.log"] diff --git a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/stable_diffusion_safe/safety_checker.py b/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/stable_diffusion_safe/safety_checker.py deleted file mode 100644 index f9dbf51e86440847646e168e5a50ebf835440f2a..0000000000000000000000000000000000000000 --- a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/stable_diffusion_safe/safety_checker.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import torch -import torch.nn as nn - -from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel - -from ...utils import logging - - -logger = logging.get_logger(__name__) - - -def cosine_distance(image_embeds, text_embeds): - normalized_image_embeds = nn.functional.normalize(image_embeds) - normalized_text_embeds = nn.functional.normalize(text_embeds) - return torch.mm(normalized_image_embeds, normalized_text_embeds.t()) - - -class SafeStableDiffusionSafetyChecker(PreTrainedModel): - config_class = CLIPConfig - - _no_split_modules = ["CLIPEncoderLayer"] - - def __init__(self, config: CLIPConfig): - super().__init__(config) - - self.vision_model = CLIPVisionModel(config.vision_config) - self.visual_projection = nn.Linear(config.vision_config.hidden_size, config.projection_dim, bias=False) - - self.concept_embeds = nn.Parameter(torch.ones(17, config.projection_dim), requires_grad=False) - self.special_care_embeds = nn.Parameter(torch.ones(3, config.projection_dim), requires_grad=False) - - self.concept_embeds_weights = nn.Parameter(torch.ones(17), requires_grad=False) - self.special_care_embeds_weights = nn.Parameter(torch.ones(3), requires_grad=False) - - @torch.no_grad() - def forward(self, clip_input, images): - pooled_output = self.vision_model(clip_input)[1] # pooled_output - image_embeds = self.visual_projection(pooled_output) - - # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 - special_cos_dist = cosine_distance(image_embeds, self.special_care_embeds).cpu().float().numpy() - cos_dist = cosine_distance(image_embeds, self.concept_embeds).cpu().float().numpy() - - result = [] - batch_size = image_embeds.shape[0] - for i in range(batch_size): - result_img = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []} - - # increase this value to create a stronger `nfsw` filter - # at the cost of increasing the possibility of filtering benign images - adjustment = 0.0 - - for concept_idx in range(len(special_cos_dist[0])): - concept_cos = special_cos_dist[i][concept_idx] - concept_threshold = self.special_care_embeds_weights[concept_idx].item() - result_img["special_scores"][concept_idx] = round(concept_cos - concept_threshold + adjustment, 3) - if result_img["special_scores"][concept_idx] > 0: - result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]}) - adjustment = 0.01 - - for concept_idx in range(len(cos_dist[0])): - concept_cos = cos_dist[i][concept_idx] - concept_threshold = self.concept_embeds_weights[concept_idx].item() - result_img["concept_scores"][concept_idx] = round(concept_cos - concept_threshold + adjustment, 3) - if result_img["concept_scores"][concept_idx] > 0: - result_img["bad_concepts"].append(concept_idx) - - result.append(result_img) - - has_nsfw_concepts = [len(res["bad_concepts"]) > 0 for res in result] - - return images, has_nsfw_concepts - - @torch.no_grad() - def forward_onnx(self, clip_input: torch.FloatTensor, images: torch.FloatTensor): - pooled_output = self.vision_model(clip_input)[1] # pooled_output - image_embeds = self.visual_projection(pooled_output) - - special_cos_dist = cosine_distance(image_embeds, self.special_care_embeds) - cos_dist = cosine_distance(image_embeds, self.concept_embeds) - - # increase this value to create a stronger `nsfw` filter - # at the cost of increasing the possibility of filtering benign images - adjustment = 0.0 - - special_scores = special_cos_dist - self.special_care_embeds_weights + adjustment - # special_scores = special_scores.round(decimals=3) - special_care = torch.any(special_scores > 0, dim=1) - special_adjustment = special_care * 0.01 - special_adjustment = special_adjustment.unsqueeze(1).expand(-1, cos_dist.shape[1]) - - concept_scores = (cos_dist - self.concept_embeds_weights) + special_adjustment - # concept_scores = concept_scores.round(decimals=3) - has_nsfw_concepts = torch.any(concept_scores > 0, dim=1) - - return images, has_nsfw_concepts diff --git a/spaces/Yiqin/ChatVID/model/fastchat/serve/monkey_patch_non_inplace.py b/spaces/Yiqin/ChatVID/model/fastchat/serve/monkey_patch_non_inplace.py deleted file mode 100644 index 9661d70751261a11bbc33b57967efcf09d3cbe0c..0000000000000000000000000000000000000000 --- a/spaces/Yiqin/ChatVID/model/fastchat/serve/monkey_patch_non_inplace.py +++ /dev/null @@ -1,118 +0,0 @@ -""" -Monkey patch the llama implementation in the huggingface/transformers library. -Avoid bugs in mps backend by not using in-place operations. -""" -import math -from typing import List, Optional, Tuple - -import torch -from torch import nn -import transformers - - -def rotate_half(x): - """Rotates half the hidden dims of the input.""" - x1 = x[..., : x.shape[-1] // 2].clone() - x2 = x[..., x.shape[-1] // 2 :].clone() - return torch.cat((-x2, x1), dim=-1) - - -def apply_rotary_pos_emb(q, k, cos, sin, position_ids): - gather_indices = position_ids[:, None, :, None] # [bs, 1, seq_len, 1] - gather_indices = gather_indices.repeat(1, cos.shape[1], 1, cos.shape[3]) - cos = torch.gather(cos.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices) - sin = torch.gather(sin.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices) - q_embed = (q * cos) + (rotate_half(q) * sin) - k_embed = (k * cos) + (rotate_half(k) * sin) - return q_embed, k_embed - - -def forward( - self, - hidden_states: torch.Tensor, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.LongTensor] = None, - past_key_value: Optional[Tuple[torch.Tensor]] = None, - output_attentions: bool = False, - use_cache: bool = False, -) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: - bsz, q_len, _ = hidden_states.size() - - query_states = ( - self.q_proj(hidden_states) - .view(bsz, q_len, self.num_heads, self.head_dim) - .transpose(1, 2) - ) - key_states = ( - self.k_proj(hidden_states) - .view(bsz, q_len, self.num_heads, self.head_dim) - .transpose(1, 2) - ) - value_states = ( - self.v_proj(hidden_states) - .view(bsz, q_len, self.num_heads, self.head_dim) - .transpose(1, 2) - ) - - kv_seq_len = key_states.shape[-2] - if past_key_value is not None: - kv_seq_len += past_key_value[0].shape[-2] - cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) - query_states, key_states = apply_rotary_pos_emb( - query_states, key_states, cos, sin, position_ids - ) - # [bsz, nh, t, hd] - - if past_key_value is not None: - # reuse k, v, self_attention - key_states = torch.cat([past_key_value[0], key_states], dim=2) - value_states = torch.cat([past_key_value[1], value_states], dim=2) - - past_key_value = (key_states, value_states) if use_cache else None - - attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt( - self.head_dim - ) - - if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): - raise ValueError( - f"Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is" - f" {attn_weights.size()}" - ) - - if attention_mask is not None: - if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): - raise ValueError( - f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" - ) - attn_weights = attn_weights + attention_mask - attn_weights = torch.max( - attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min) - ) - - # upcast attention to fp32 - attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to( - query_states.dtype - ) - attn_output = torch.matmul(attn_weights, value_states) - - if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): - raise ValueError( - f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" - f" {attn_output.size()}" - ) - - attn_output = attn_output.transpose(1, 2) - attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) - - attn_output = self.o_proj(attn_output) - - if not output_attentions: - attn_weights = None - - return attn_output, attn_weights, past_key_value - - -def replace_llama_attn_with_non_inplace_operations(): - """Avoid bugs in mps backend by not using in-place operations.""" - transformers.models.llama.modeling_llama.LlamaAttention.forward = forward diff --git a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/data/benchmark.py b/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/data/benchmark.py deleted file mode 100644 index ac2f372a4b111ad40b8e720adea208608271bab6..0000000000000000000000000000000000000000 --- a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/data/benchmark.py +++ /dev/null @@ -1,225 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import logging -import numpy as np -from itertools import count -from typing import List, Tuple -import torch -import tqdm -from fvcore.common.timer import Timer - -from detectron2.utils import comm - -from .build import build_batch_data_loader -from .common import DatasetFromList, MapDataset -from .samplers import TrainingSampler - -logger = logging.getLogger(__name__) - - -class _EmptyMapDataset(torch.utils.data.Dataset): - """ - Map anything to emptiness. - """ - - def __init__(self, dataset): - self.ds = dataset - - def __len__(self): - return len(self.ds) - - def __getitem__(self, idx): - _ = self.ds[idx] - return [0] - - -def iter_benchmark( - iterator, num_iter: int, warmup: int = 5, max_time_seconds: float = 60 -) -> Tuple[float, List[float]]: - """ - Benchmark an iterator/iterable for `num_iter` iterations with an extra - `warmup` iterations of warmup. - End early if `max_time_seconds` time is spent on iterations. - - Returns: - float: average time (seconds) per iteration - list[float]: time spent on each iteration. Sometimes useful for further analysis. - """ - num_iter, warmup = int(num_iter), int(warmup) - - iterator = iter(iterator) - for _ in range(warmup): - next(iterator) - timer = Timer() - all_times = [] - for curr_iter in tqdm.trange(num_iter): - start = timer.seconds() - if start > max_time_seconds: - num_iter = curr_iter - break - next(iterator) - all_times.append(timer.seconds() - start) - avg = timer.seconds() / num_iter - return avg, all_times - - -class DataLoaderBenchmark: - """ - Some common benchmarks that help understand perf bottleneck of a standard dataloader - made of dataset, mapper and sampler. - """ - - def __init__( - self, - dataset, - *, - mapper, - sampler=None, - total_batch_size, - num_workers=0, - max_time_seconds: int = 90, - ): - """ - Args: - max_time_seconds (int): maximum time to spent for each benchmark - other args: same as in `build.py:build_detection_train_loader` - """ - if isinstance(dataset, list): - dataset = DatasetFromList(dataset, copy=False, serialize=True) - if sampler is None: - sampler = TrainingSampler(len(dataset)) - - self.dataset = dataset - self.mapper = mapper - self.sampler = sampler - self.total_batch_size = total_batch_size - self.num_workers = num_workers - self.per_gpu_batch_size = self.total_batch_size // comm.get_world_size() - - self.max_time_seconds = max_time_seconds - - def _benchmark(self, iterator, num_iter, warmup, msg=None): - avg, all_times = iter_benchmark(iterator, num_iter, warmup, self.max_time_seconds) - if msg is not None: - self._log_time(msg, avg, all_times) - return avg, all_times - - def _log_time(self, msg, avg, all_times, distributed=False): - percentiles = [np.percentile(all_times, k, interpolation="nearest") for k in [1, 5, 95, 99]] - if not distributed: - logger.info( - f"{msg}: avg={1.0/avg:.1f} it/s, " - f"p1={percentiles[0]:.2g}s, p5={percentiles[1]:.2g}s, " - f"p95={percentiles[2]:.2g}s, p99={percentiles[3]:.2g}s." - ) - return - avg_per_gpu = comm.all_gather(avg) - percentiles_per_gpu = comm.all_gather(percentiles) - if comm.get_rank() > 0: - return - for idx, avg, percentiles in zip(count(), avg_per_gpu, percentiles_per_gpu): - logger.info( - f"GPU{idx} {msg}: avg={1.0/avg:.1f} it/s, " - f"p1={percentiles[0]:.2g}s, p5={percentiles[1]:.2g}s, " - f"p95={percentiles[2]:.2g}s, p99={percentiles[3]:.2g}s." - ) - - def benchmark_dataset(self, num_iter, warmup=5): - """ - Benchmark the speed of taking raw samples from the dataset. - """ - - def loader(): - while True: - for k in self.sampler: - yield self.dataset[k] - - self._benchmark(loader(), num_iter, warmup, "Dataset Alone") - - def benchmark_mapper(self, num_iter, warmup=5): - """ - Benchmark the speed of taking raw samples from the dataset and map - them in a single process. - """ - - def loader(): - while True: - for k in self.sampler: - yield self.mapper(self.dataset[k]) - - self._benchmark(loader(), num_iter, warmup, "Single Process Mapper (sec/sample)") - - def benchmark_workers(self, num_iter, warmup=10): - """ - Benchmark the dataloader by tuning num_workers to [0, 1, self.num_workers]. - """ - candidates = [0, 1] - if self.num_workers not in candidates: - candidates.append(self.num_workers) - - dataset = MapDataset(self.dataset, self.mapper) - for n in candidates: - loader = build_batch_data_loader( - dataset, - self.sampler, - self.total_batch_size, - num_workers=n, - ) - self._benchmark( - iter(loader), - num_iter * max(n, 1), - warmup * max(n, 1), - f"DataLoader ({n} workers, bs={self.per_gpu_batch_size})", - ) - del loader - - def benchmark_IPC(self, num_iter, warmup=10): - """ - Benchmark the dataloader where each worker outputs nothing. This - eliminates the IPC overhead compared to the regular dataloader. - - PyTorch multiprocessing's IPC only optimizes for torch tensors. - Large numpy arrays or other data structure may incur large IPC overhead. - """ - n = self.num_workers - dataset = _EmptyMapDataset(MapDataset(self.dataset, self.mapper)) - loader = build_batch_data_loader( - dataset, self.sampler, self.total_batch_size, num_workers=n - ) - self._benchmark( - iter(loader), - num_iter * max(n, 1), - warmup * max(n, 1), - f"DataLoader ({n} workers, bs={self.per_gpu_batch_size}) w/o comm", - ) - - def benchmark_distributed(self, num_iter, warmup=10): - """ - Benchmark the dataloader in each distributed worker, and log results of - all workers. This helps understand the final performance as well as - the variances among workers. - - It also prints startup time (first iter) of the dataloader. - """ - gpu = comm.get_world_size() - dataset = MapDataset(self.dataset, self.mapper) - n = self.num_workers - loader = build_batch_data_loader( - dataset, self.sampler, self.total_batch_size, num_workers=n - ) - - timer = Timer() - loader = iter(loader) - next(loader) - startup_time = timer.seconds() - logger.info("Dataloader startup time: {:.2f} seconds".format(startup_time)) - - comm.synchronize() - - avg, all_times = self._benchmark(loader, num_iter * max(n, 1), warmup * max(n, 1)) - del loader - self._log_time( - f"DataLoader ({gpu} GPUs x {n} workers, total bs={self.total_batch_size})", - avg, - all_times, - True, - ) diff --git a/spaces/YuAnthony/Audio-Caption/coco_caption/pycocoevalcap/bleu/__init__.py b/spaces/YuAnthony/Audio-Caption/coco_caption/pycocoevalcap/bleu/__init__.py deleted file mode 100644 index 3f7d85bba884ea8f83fc6ab2a1e6ade80d98d4d9..0000000000000000000000000000000000000000 --- a/spaces/YuAnthony/Audio-Caption/coco_caption/pycocoevalcap/bleu/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__author__ = 'tylin' diff --git a/spaces/aaaaaabbbbbbbdddddddduuuuulllll/Ashaar/poetry_diacritizer/util/text_encoders.py b/spaces/aaaaaabbbbbbbdddddddduuuuulllll/Ashaar/poetry_diacritizer/util/text_encoders.py deleted file mode 100644 index b49c5603afa2d41ad6e0145b719443f0f4ce9301..0000000000000000000000000000000000000000 --- a/spaces/aaaaaabbbbbbbdddddddduuuuulllll/Ashaar/poetry_diacritizer/util/text_encoders.py +++ /dev/null @@ -1,160 +0,0 @@ -from . import text_cleaners -from typing import Dict, List, Optional -from .constants import ALL_POSSIBLE_HARAQAT -import sentencepiece as spm - - -class TextEncoder: - pad = "P" - - def __init__( - self, - input_chars: List[str], - target_charts: List[str], - cleaner_fn: Optional[str] = None, - reverse_input: bool = False, - reverse_target: bool = False, - sp_model_path=None, - ): - if cleaner_fn: - self.cleaner_fn = getattr(text_cleaners, cleaner_fn) - else: - self.cleaner_fn = None - - self.input_symbols: List[str] = [TextEncoder.pad] + input_chars - self.target_symbols: List[str] = [TextEncoder.pad] + target_charts - - if sp_model_path is None: - self.input_symbol_to_id: Dict[str, int] = { - s: i for i, s in enumerate(self.input_symbols) - } - self.input_id_to_symbol: Dict[int, str] = { - i: s for i, s in enumerate(self.input_symbols) - } - else: - sp_model = spm.SentencePieceProcessor() - sp_model.load(sp_model_path + "/sp.model") - self.input_symbol_to_id: Dict[str, int] = { - s: sp_model.PieceToId(s+'▁') for s in self.input_symbols - } - self.input_symbol_to_id[" "] = sp_model.PieceToId("|") # encode space - self.input_symbol_to_id[TextEncoder.pad] = 0 # encode padding - - self.input_space_id = sp_model.PieceToId("|") - self.input_id_to_symbol: Dict[int, str] = { - i: s for s, i in self.input_symbol_to_id.items() - } - - self.target_symbol_to_id: Dict[str, int] = { - s: i for i, s in enumerate(self.target_symbols) - } - self.target_id_to_symbol: Dict[int, str] = { - i: s for i, s in enumerate(self.target_symbols) - } - - self.reverse_input = reverse_input - self.reverse_target = reverse_target - self.input_pad_id = self.input_symbol_to_id[self.pad] - self.target_pad_id = self.target_symbol_to_id[self.pad] - self.start_symbol_id = None - - def input_to_sequence(self, text: str) -> List[int]: - if self.reverse_input: - text = "".join(list(reversed(text))) - sequence = [self.input_symbol_to_id[s] for s in text if s not in [self.pad]] - - return sequence - - def target_to_sequence(self, text: str) -> List[int]: - if self.reverse_target: - text = "".join(list(reversed(text))) - sequence = [self.target_symbol_to_id[s] for s in text if s not in [self.pad]] - - return sequence - - def sequence_to_input(self, sequence: List[int]): - return [ - self.input_id_to_symbol[symbol] - for symbol in sequence - if symbol in self.input_id_to_symbol and symbol not in [self.input_pad_id] - ] - - def sequence_to_target(self, sequence: List[int]): - return [ - self.target_id_to_symbol[symbol] - for symbol in sequence - if symbol in self.target_id_to_symbol and symbol not in [self.target_pad_id] - ] - - def clean(self, text): - if self.cleaner_fn: - return self.cleaner_fn(text) - return text - - def combine_text_and_haraqat(self, input_ids: List[int], output_ids: List[int]): - """ - Combines the input text with its corresponding haraqat - Args: - inputs: a list of ids representing the input text - outputs: a list of ids representing the output text - Returns: - text: the text after merging the inputs text representation with the output - representation - """ - output = "" - for i, input_id in enumerate(input_ids): - if input_id == self.input_pad_id: - break - output += self.input_id_to_symbol[input_id] - # if input_id == self.input_space_id: - # continue - output += self.target_id_to_symbol[output_ids[i]] - return output - - def __str__(self): - return type(self).__name__ - - -class BasicArabicEncoder(TextEncoder): - def __init__( - self, - cleaner_fn="basic_cleaners", - reverse_input: bool = False, - reverse_target: bool = False, - sp_model_path=None, - ): - input_chars: List[str] = list("بض.غىهظخة؟:طس،؛فندؤلوئآك-يذاصشحزءمأجإ ترقعث") - target_charts: List[str] = list(ALL_POSSIBLE_HARAQAT.keys()) - - super().__init__( - input_chars, - target_charts, - cleaner_fn=cleaner_fn, - reverse_input=reverse_input, - reverse_target=reverse_target, - sp_model_path=sp_model_path, - ) - - -class ArabicEncoderWithStartSymbol(TextEncoder): - def __init__( - self, - cleaner_fn="basic_cleaners", - reverse_input: bool = False, - reverse_target: bool = False, - sp_model_path=None, - ): - input_chars: List[str] = list("بض.غىهظخة؟:طس،؛فندؤلوئآك-يذاصشحزءمأجإ ترقعث") - # the only difference from the basic encoder is adding the start symbol - target_charts: List[str] = list(ALL_POSSIBLE_HARAQAT.keys()) + ["s"] - - super().__init__( - input_chars, - target_charts, - cleaner_fn=cleaner_fn, - reverse_input=reverse_input, - reverse_target=reverse_target, - sp_model_path=sp_model_path, - ) - - self.start_symbol_id = self.target_symbol_to_id["s"] diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/utils/__init__.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/utils/__init__.py deleted file mode 100644 index 378a0068432a371af364de9d73785901c0f83383..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/utils/__init__.py +++ /dev/null @@ -1,69 +0,0 @@ -# flake8: noqa -# Copyright (c) OpenMMLab. All rights reserved. -from .config import Config, ConfigDict, DictAction -from .misc import (check_prerequisites, concat_list, deprecated_api_warning, - has_method, import_modules_from_strings, is_list_of, - is_method_overridden, is_seq_of, is_str, is_tuple_of, - iter_cast, list_cast, requires_executable, requires_package, - slice_list, to_1tuple, to_2tuple, to_3tuple, to_4tuple, - to_ntuple, tuple_cast) -from .path import (check_file_exist, fopen, is_filepath, mkdir_or_exist, - scandir, symlink) -from .progressbar import (ProgressBar, track_iter_progress, - track_parallel_progress, track_progress) -from .testing import (assert_attrs_equal, assert_dict_contains_subset, - assert_dict_has_keys, assert_is_norm_layer, - assert_keys_equal, assert_params_all_zeros, - check_python_script) -from .timer import Timer, TimerError, check_time -from .version_utils import digit_version, get_git_hash - -try: - import torch -except ImportError: - __all__ = [ - 'Config', 'ConfigDict', 'DictAction', 'is_str', 'iter_cast', - 'list_cast', 'tuple_cast', 'is_seq_of', 'is_list_of', 'is_tuple_of', - 'slice_list', 'concat_list', 'check_prerequisites', 'requires_package', - 'requires_executable', 'is_filepath', 'fopen', 'check_file_exist', - 'mkdir_or_exist', 'symlink', 'scandir', 'ProgressBar', - 'track_progress', 'track_iter_progress', 'track_parallel_progress', - 'Timer', 'TimerError', 'check_time', 'deprecated_api_warning', - 'digit_version', 'get_git_hash', 'import_modules_from_strings', - 'assert_dict_contains_subset', 'assert_attrs_equal', - 'assert_dict_has_keys', 'assert_keys_equal', 'check_python_script', - 'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple', - 'is_method_overridden', 'has_method' - ] -else: - from .env import collect_env - from .logging import get_logger, print_log - from .parrots_jit import jit, skip_no_elena - from .parrots_wrapper import ( - TORCH_VERSION, BuildExtension, CppExtension, CUDAExtension, DataLoader, - PoolDataLoader, SyncBatchNorm, _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, - _AvgPoolNd, _BatchNorm, _ConvNd, _ConvTransposeMixin, _InstanceNorm, - _MaxPoolNd, get_build_config, is_rocm_pytorch, _get_cuda_home) - from .registry import Registry, build_from_cfg - from .trace import is_jit_tracing - __all__ = [ - 'Config', 'ConfigDict', 'DictAction', 'collect_env', 'get_logger', - 'print_log', 'is_str', 'iter_cast', 'list_cast', 'tuple_cast', - 'is_seq_of', 'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list', - 'check_prerequisites', 'requires_package', 'requires_executable', - 'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', - 'symlink', 'scandir', 'ProgressBar', 'track_progress', - 'track_iter_progress', 'track_parallel_progress', 'Registry', - 'build_from_cfg', 'Timer', 'TimerError', 'check_time', 'SyncBatchNorm', - '_AdaptiveAvgPoolNd', '_AdaptiveMaxPoolNd', '_AvgPoolNd', '_BatchNorm', - '_ConvNd', '_ConvTransposeMixin', '_InstanceNorm', '_MaxPoolNd', - 'get_build_config', 'BuildExtension', 'CppExtension', 'CUDAExtension', - 'DataLoader', 'PoolDataLoader', 'TORCH_VERSION', - 'deprecated_api_warning', 'digit_version', 'get_git_hash', - 'import_modules_from_strings', 'jit', 'skip_no_elena', - 'assert_dict_contains_subset', 'assert_attrs_equal', - 'assert_dict_has_keys', 'assert_keys_equal', 'assert_is_norm_layer', - 'assert_params_all_zeros', 'check_python_script', - 'is_method_overridden', 'is_jit_tracing', 'is_rocm_pytorch', - '_get_cuda_home', 'has_method' - ] diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/cnn/vgg.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/cnn/vgg.py deleted file mode 100644 index 8778b649561a45a9652b1a15a26c2d171e58f3e1..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/cnn/vgg.py +++ /dev/null @@ -1,175 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import logging - -import torch.nn as nn - -from .utils import constant_init, kaiming_init, normal_init - - -def conv3x3(in_planes, out_planes, dilation=1): - """3x3 convolution with padding.""" - return nn.Conv2d( - in_planes, - out_planes, - kernel_size=3, - padding=dilation, - dilation=dilation) - - -def make_vgg_layer(inplanes, - planes, - num_blocks, - dilation=1, - with_bn=False, - ceil_mode=False): - layers = [] - for _ in range(num_blocks): - layers.append(conv3x3(inplanes, planes, dilation)) - if with_bn: - layers.append(nn.BatchNorm2d(planes)) - layers.append(nn.ReLU(inplace=True)) - inplanes = planes - layers.append(nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=ceil_mode)) - - return layers - - -class VGG(nn.Module): - """VGG backbone. - - Args: - depth (int): Depth of vgg, from {11, 13, 16, 19}. - with_bn (bool): Use BatchNorm or not. - num_classes (int): number of classes for classification. - num_stages (int): VGG stages, normally 5. - dilations (Sequence[int]): Dilation of each stage. - out_indices (Sequence[int]): Output from which stages. - frozen_stages (int): Stages to be frozen (all param fixed). -1 means - not freezing any parameters. - bn_eval (bool): Whether to set BN layers as eval mode, namely, freeze - running stats (mean and var). - bn_frozen (bool): Whether to freeze weight and bias of BN layers. - """ - - arch_settings = { - 11: (1, 1, 2, 2, 2), - 13: (2, 2, 2, 2, 2), - 16: (2, 2, 3, 3, 3), - 19: (2, 2, 4, 4, 4) - } - - def __init__(self, - depth, - with_bn=False, - num_classes=-1, - num_stages=5, - dilations=(1, 1, 1, 1, 1), - out_indices=(0, 1, 2, 3, 4), - frozen_stages=-1, - bn_eval=True, - bn_frozen=False, - ceil_mode=False, - with_last_pool=True): - super(VGG, self).__init__() - if depth not in self.arch_settings: - raise KeyError(f'invalid depth {depth} for vgg') - assert num_stages >= 1 and num_stages <= 5 - stage_blocks = self.arch_settings[depth] - self.stage_blocks = stage_blocks[:num_stages] - assert len(dilations) == num_stages - assert max(out_indices) <= num_stages - - self.num_classes = num_classes - self.out_indices = out_indices - self.frozen_stages = frozen_stages - self.bn_eval = bn_eval - self.bn_frozen = bn_frozen - - self.inplanes = 3 - start_idx = 0 - vgg_layers = [] - self.range_sub_modules = [] - for i, num_blocks in enumerate(self.stage_blocks): - num_modules = num_blocks * (2 + with_bn) + 1 - end_idx = start_idx + num_modules - dilation = dilations[i] - planes = 64 * 2**i if i < 4 else 512 - vgg_layer = make_vgg_layer( - self.inplanes, - planes, - num_blocks, - dilation=dilation, - with_bn=with_bn, - ceil_mode=ceil_mode) - vgg_layers.extend(vgg_layer) - self.inplanes = planes - self.range_sub_modules.append([start_idx, end_idx]) - start_idx = end_idx - if not with_last_pool: - vgg_layers.pop(-1) - self.range_sub_modules[-1][1] -= 1 - self.module_name = 'features' - self.add_module(self.module_name, nn.Sequential(*vgg_layers)) - - if self.num_classes > 0: - self.classifier = nn.Sequential( - nn.Linear(512 * 7 * 7, 4096), - nn.ReLU(True), - nn.Dropout(), - nn.Linear(4096, 4096), - nn.ReLU(True), - nn.Dropout(), - nn.Linear(4096, num_classes), - ) - - def init_weights(self, pretrained=None): - if isinstance(pretrained, str): - logger = logging.getLogger() - from ..runner import load_checkpoint - load_checkpoint(self, pretrained, strict=False, logger=logger) - elif pretrained is None: - for m in self.modules(): - if isinstance(m, nn.Conv2d): - kaiming_init(m) - elif isinstance(m, nn.BatchNorm2d): - constant_init(m, 1) - elif isinstance(m, nn.Linear): - normal_init(m, std=0.01) - else: - raise TypeError('pretrained must be a str or None') - - def forward(self, x): - outs = [] - vgg_layers = getattr(self, self.module_name) - for i in range(len(self.stage_blocks)): - for j in range(*self.range_sub_modules[i]): - vgg_layer = vgg_layers[j] - x = vgg_layer(x) - if i in self.out_indices: - outs.append(x) - if self.num_classes > 0: - x = x.view(x.size(0), -1) - x = self.classifier(x) - outs.append(x) - if len(outs) == 1: - return outs[0] - else: - return tuple(outs) - - def train(self, mode=True): - super(VGG, self).train(mode) - if self.bn_eval: - for m in self.modules(): - if isinstance(m, nn.BatchNorm2d): - m.eval() - if self.bn_frozen: - for params in m.parameters(): - params.requires_grad = False - vgg_layers = getattr(self, self.module_name) - if mode and self.frozen_stages >= 0: - for i in range(self.frozen_stages): - for j in range(*self.range_sub_modules[i]): - mod = vgg_layers[j] - mod.eval() - for param in mod.parameters(): - param.requires_grad = False diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/image/codecs/quartz.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/image/codecs/quartz.py deleted file mode 100644 index 9b33fe22f3cb5a1a1bcb636fef3d68e26695c1f9..0000000000000000000000000000000000000000 --- a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/image/codecs/quartz.py +++ /dev/null @@ -1,109 +0,0 @@ -from ctypes import c_void_p, c_ubyte - -from pyglet.image import ImageData, Animation, AnimationFrame -from pyglet.image.codecs import * - -from pyglet.libs.darwin.cocoapy import cf, quartz, NSMakeRect -from pyglet.libs.darwin.cocoapy import cfnumber_to_number -from pyglet.libs.darwin.cocoapy import kCGImageAlphaPremultipliedLast -from pyglet.libs.darwin.cocoapy import kCGImagePropertyGIFDictionary -from pyglet.libs.darwin.cocoapy import kCGImagePropertyGIFDelayTime - - -class QuartzImageDecoder(ImageDecoder): - def get_file_extensions(self): - # Quartz can actually decode many more formats, but these are the most common. - return [ '.bmp', '.cur', '.gif', '.ico', '.jp2', '.jpg', '.jpeg', - '.pcx', '.png', '.tga', '.tif', '.tiff', '.xbm', '.xpm' ] - - def get_animation_file_extensions(self): - return ['.gif'] - - def _get_pyglet_ImageData_from_source_at_index(self, sourceRef, index): - imageRef = c_void_p(quartz.CGImageSourceCreateImageAtIndex(sourceRef, index, None)) - - # Regardless of the internal format of the image (L, LA, RGB, RGBA, etc) - # we just automatically convert everything to an RGBA format. - format = 'RGBA' - rgbColorSpace = c_void_p(quartz.CGColorSpaceCreateDeviceRGB()) - bitsPerComponent = 8 - width = quartz.CGImageGetWidth(imageRef) - height = quartz.CGImageGetHeight(imageRef) - bytesPerRow = 4 * width - - # Create a buffer to store the RGBA formatted data. - bufferSize = height * bytesPerRow - buffer = (c_ubyte * bufferSize)() - - # Create a bitmap context for the RGBA formatted data. - # Note that premultiplied alpha is required: - # http://developer.apple.com/library/mac/#qa/qa1037/_index.html - bitmap = c_void_p(quartz.CGBitmapContextCreate(buffer, - width, height, - bitsPerComponent, - bytesPerRow, - rgbColorSpace, - kCGImageAlphaPremultipliedLast)) - - # Write the image data into the bitmap. - quartz.CGContextDrawImage(bitmap, NSMakeRect(0,0,width,height), imageRef) - - quartz.CGImageRelease(imageRef) - quartz.CGContextRelease(bitmap) - quartz.CGColorSpaceRelease(rgbColorSpace) - - pitch = bytesPerRow - return ImageData(width, height, format, buffer, -pitch) - - def decode(self, filename, file): - if not file: - file = open(filename, 'rb') - file_bytes = file.read() - data = c_void_p(cf.CFDataCreate(None, file_bytes, len(file_bytes))) - # Second argument is an options dictionary. It might be a good idea to provide - # a value for kCGImageSourceTypeIdentifierHint here using filename extension. - sourceRef = c_void_p(quartz.CGImageSourceCreateWithData(data, None)) - image = self._get_pyglet_ImageData_from_source_at_index(sourceRef, 0) - - cf.CFRelease(data) - cf.CFRelease(sourceRef) - - return image - - def decode_animation(self, filename, file): - if not file: - file = open(filename, 'rb') - # If file is not an animated GIF, it will be loaded as a single-frame animation. - file_bytes = file.read() - data = c_void_p(cf.CFDataCreate(None, file_bytes, len(file_bytes))) - sourceRef = c_void_p(quartz.CGImageSourceCreateWithData(data, None)) - - # Get number of frames in the animation. - count = quartz.CGImageSourceGetCount(sourceRef) - - frames = [] - - for index in range(count): - # Try to determine frame duration from GIF properties dictionary. - duration = 0.1 # default duration if none found - props = c_void_p(quartz.CGImageSourceCopyPropertiesAtIndex(sourceRef, index, None)) - if cf.CFDictionaryContainsKey(props, kCGImagePropertyGIFDictionary): - gif_props = c_void_p(cf.CFDictionaryGetValue(props, kCGImagePropertyGIFDictionary)) - if cf.CFDictionaryContainsKey(gif_props, kCGImagePropertyGIFDelayTime): - duration = cfnumber_to_number(c_void_p(cf.CFDictionaryGetValue(gif_props, kCGImagePropertyGIFDelayTime))) - - cf.CFRelease(props) - image = self._get_pyglet_ImageData_from_source_at_index(sourceRef, index) - frames.append( AnimationFrame(image, duration) ) - - cf.CFRelease(data) - cf.CFRelease(sourceRef) - - return Animation(frames) - - -def get_decoders(): - return [ QuartzImageDecoder() ] - -def get_encoders(): - return [] diff --git a/spaces/active-learning/webhook/Dockerfile b/spaces/active-learning/webhook/Dockerfile deleted file mode 100644 index e9e2e4a87e0f859680a49a0243b1022a6cb645b7..0000000000000000000000000000000000000000 --- a/spaces/active-learning/webhook/Dockerfile +++ /dev/null @@ -1,27 +0,0 @@ -# Use the official Python 3.9 image -FROM python:3.9-bullseye - -# Set the working directory to /code -WORKDIR /code - -# Copy the current directory contents into the container at /code -COPY ./requirements.txt /code/requirements.txt - -# Install requirements.txt -RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt - -# Set up a new user named "user" with user ID 1000 -RUN useradd -m -u 1000 user -# Switch to the "user" user -USER user -# Set home to the user's home directory -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:$PATH - -# Set the working directory to the user's home directory -WORKDIR $HOME/app - -# Copy the current directory contents into the container at $HOME/app setting the owner to the user -COPY --chown=user . $HOME/app - -CMD ["uvicorn", "main:app","--proxy-headers", "--host", "0.0.0.0", "--port", "7860"] diff --git a/spaces/akhaliq/deeplab2/data/preprocessing/__init__.py b/spaces/akhaliq/deeplab2/data/preprocessing/__init__.py deleted file mode 100644 index 35e4ce02ff422f3aa84ab644b88d65b13e0cbc03..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/deeplab2/data/preprocessing/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# coding=utf-8 -# Copyright 2021 The Deeplab2 Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - diff --git a/spaces/akhaliq/deeplab2/model/layers/axial_blocks.py b/spaces/akhaliq/deeplab2/model/layers/axial_blocks.py deleted file mode 100644 index bb21189461979d87aa5a8294959053a5960dfe76..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/deeplab2/model/layers/axial_blocks.py +++ /dev/null @@ -1,308 +0,0 @@ -# coding=utf-8 -# Copyright 2021 The Deeplab2 Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Implements Axial-Blocks proposed in Axial-DeepLab [1]. - -Axial-Blocks are based on residual bottleneck blocks, but with the 3x3 -convolution replaced with two axial-attention layers, one on the height-axis, -followed by the other on the width-axis. - -[1] Axial-Deeplab: Stand-Alone Axial-Attention for Panoptic Segmentation, - ECCV 2020 Spotlight. - Huiyu Wang, Yukun Zhu, Bradley Green, Hartwig Adam, Alan Yuille, - Liang-Chieh Chen. -""" -import tensorflow as tf - -from deeplab2.model import utils -from deeplab2.model.layers import activations -from deeplab2.model.layers import axial_layers -from deeplab2.model.layers import convolutions -from deeplab2.model.layers import squeeze_and_excite - - -class AxialBlock(tf.keras.layers.Layer): - """An AxialBlock as a building block for an Axial-ResNet model. - - We implement the Axial-Block proposed in [1] in a general way that also - includes convolutional residual blocks, such as the basic block and the - bottleneck block (w/ and w/o Switchable Atrous Convolution). - - A basic block consists of two 3x3 convolutions and a residual connection. It - is the main building block for wide-resnet variants. - - A bottleneck block consists of consecutive 1x1, 3x3, 1x1 convolutions and a - residual connection. It is the main building block for standard resnet - variants. - - An axial block consists of a 1x1 input convolution, a self-attention layer - (either axial-attention or global attention), a 1x1 output convolution, and a - residual connection. It is the main building block for axial-resnet variants. - - Note: We apply the striding in the first spatial operation (i.e. 3x3 - convolution or self-attention layer). - """ - - def __init__(self, - filters_list, - kernel_size=3, - strides=1, - atrous_rate=1, - use_squeeze_and_excite=False, - use_sac=False, - bn_layer=tf.keras.layers.BatchNormalization, - activation='relu', - name=None, - conv_kernel_weight_decay=0.0, - basic_block_second_conv_atrous_rate=None, - attention_type=None, - axial_layer_config=None): - """Initializes an AxialBlock. - - Args: - filters_list: A list of filter numbers in the residual block. We currently - support filters_list with two or three elements. Two elements specify - the filters for two consecutive 3x3 convolutions, while three elements - specify the filters for three convolutions (1x1, 3x3, and 1x1). - kernel_size: The size of the convolution kernels (default: 3). - strides: The strides of the block (default: 1). - atrous_rate: The atrous rate of the 3x3 convolutions (default: 1). If this - residual block is a basic block, it is recommendeded to specify correct - basic_block_second_conv_atrous_rate for the second 3x3 convolution. - Otherwise, the second conv will also use atrous rate, which might cause - atrous inconsistency with different output strides, as tested in - axial_block_groups_test.test_atrous_consistency_basic_block. - use_squeeze_and_excite: A boolean flag indicating whether - squeeze-and-excite (SE) is used. - use_sac: A boolean, using the Switchable Atrous Convolution (SAC) or not. - bn_layer: A tf.keras.layers.Layer that computes the normalization - (default: tf.keras.layers.BatchNormalization). - activation: A string specifying the activation function to apply. - name: An string specifying the name of the layer (default: None). - conv_kernel_weight_decay: A float, the weight decay for convolution - kernels. - basic_block_second_conv_atrous_rate: An integer, the atrous rate for the - second convolution of basic block. This is necessary to ensure atrous - consistency with different output_strides. Defaults to atrous_rate. - attention_type: A string, type of attention to apply. Support 'axial' and - 'global'. - axial_layer_config: A dict, an argument dictionary for the axial layer. - - Raises: - ValueError: If filters_list does not have two or three elements. - ValueError: If attention_type is not supported. - ValueError: If double_global_attention is True in axial_layer_config. - """ - super(AxialBlock, self).__init__(name=name) - - self._filters_list = filters_list - self._strides = strides - self._use_squeeze_and_excite = use_squeeze_and_excite - self._bn_layer = bn_layer - self._activate_fn = activations.get_activation(activation) - self._attention_type = attention_type - - if axial_layer_config is None: - axial_layer_config = {} - - if basic_block_second_conv_atrous_rate is None: - basic_block_second_conv_atrous_rate = atrous_rate - - if len(filters_list) == 3: - # Three consecutive convolutions: 1x1, 3x3, and 1x1. - self._conv1_bn_act = convolutions.Conv2DSame( - filters_list[0], 1, 'conv1_bn_act', - use_bias=False, - use_bn=True, - bn_layer=bn_layer, - activation=activation, - conv_kernel_weight_decay=conv_kernel_weight_decay) - - if attention_type is None or attention_type.lower() == 'none': - self._conv2_bn_act = convolutions.Conv2DSame( - filters_list[1], kernel_size, 'conv2_bn_act', - strides=strides, - atrous_rate=atrous_rate, - use_bias=False, - use_bn=True, - bn_layer=bn_layer, - activation=activation, - use_switchable_atrous_conv=use_sac, - # We default to use global context in SAC if use_sac is True. This - # setting is experimentally found effective. - use_global_context_in_sac=use_sac, - conv_kernel_weight_decay=conv_kernel_weight_decay) - elif attention_type == 'axial': - if 'double_global_attention' in axial_layer_config: - if axial_layer_config['double_global_attention']: - raise ValueError('Double_global_attention takes no effect in ' - 'AxialAttention2D.') - del axial_layer_config['double_global_attention'] - self._attention = axial_layers.AxialAttention2D( - strides=strides, - filters=filters_list[1], - name='attention', - bn_layer=bn_layer, - conv_kernel_weight_decay=conv_kernel_weight_decay, - **axial_layer_config) - elif attention_type == 'global': - self._attention = axial_layers.GlobalAttention2D( - strides=strides, - filters=filters_list[1], - name='attention', - bn_layer=bn_layer, - conv_kernel_weight_decay=conv_kernel_weight_decay, - **axial_layer_config) - else: - raise ValueError(attention_type + ' is not supported.') - - # Here we apply a batch norm with gamma initialized at zero. This ensures - # that at random initialization of the model, the skip connections - # dominate all residual blocks. In this way, all the skip connections - # construct an identity mapping that passes the gradients (without any - # distortion from the randomly initialized blocks) to all residual blocks. - # This trick helps training at early epochs. - # Reference: "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour". - # https://arxiv.org/abs/1706.02677 - self._conv3_bn = convolutions.Conv2DSame( - filters_list[2], 1, 'conv3_bn', - use_bias=False, - use_bn=True, - bn_layer=bn_layer, - bn_gamma_initializer='zeros', - activation='none', - conv_kernel_weight_decay=conv_kernel_weight_decay) - elif len(filters_list) == 2: - # Two consecutive convolutions: 3x3 and 3x3. - self._conv1_bn_act = convolutions.Conv2DSame( - filters_list[0], kernel_size, 'conv1_bn_act', - strides=strides, - atrous_rate=atrous_rate, - use_bias=False, - use_bn=True, - bn_layer=bn_layer, - activation=activation, - use_switchable_atrous_conv=use_sac, - use_global_context_in_sac=use_sac, - conv_kernel_weight_decay=conv_kernel_weight_decay) - # Here we apply a batch norm with gamma initialized at zero. This ensures - # that at random initialization of the model, the skip connections - # dominate all residual blocks. In this way, all the skip connections - # construct an identity mapping that passes the gradients (without any - # distortion from the randomly initialized blocks) to all residual blocks. - # This trick helps training at early epochs. - # Reference: "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour". - # https://arxiv.org/abs/1706.02677 - self._conv2_bn = convolutions.Conv2DSame( - filters_list[1], kernel_size, 'conv2_bn', - strides=1, - atrous_rate=basic_block_second_conv_atrous_rate, - use_bias=False, - use_bn=True, - bn_layer=bn_layer, - bn_gamma_initializer='zeros', - activation='none', - use_switchable_atrous_conv=use_sac, - use_global_context_in_sac=use_sac, - conv_kernel_weight_decay=conv_kernel_weight_decay) - else: - raise ValueError('Expect filters_list to have length 2 or 3; got %d' % - len(filters_list)) - - if self._use_squeeze_and_excite: - self._squeeze_and_excite = squeeze_and_excite.SimplifiedSqueezeAndExcite( - filters_list[-1]) - self._conv_kernel_weight_decay = conv_kernel_weight_decay - - def build(self, input_shape_list): - input_tensor_shape = input_shape_list[0] - self._shortcut = None - if input_tensor_shape[3] != self._filters_list[-1]: - self._shortcut = convolutions.Conv2DSame( - self._filters_list[-1], 1, 'shortcut', - strides=self._strides, - use_bias=False, - use_bn=True, - bn_layer=self._bn_layer, - activation='none', - conv_kernel_weight_decay=self._conv_kernel_weight_decay) - - def call(self, inputs): - """Performs a forward pass. - - We have to define drop_path_random_mask outside the layer call and pass it - into the layer, because recompute_grad (gradient checkpointing) does not - allow any randomness within the function call. In addition, recompute_grad - only supports float tensors as inputs. For this reason, the training flag - should be also passed as a float tensor. For the same reason, we cannot - support passing drop_path_random_mask as None. Instead, we ask the users to - pass only the first two tensors when drop path is not used. - - Args: - inputs: A tuple of 2 or 3 tensors, containing - input_tensor should be an input tensor of type tf.Tensor with shape - [batch, height, width, channels]. - float_tensor_training should be a float tensor of 0.0 or 1.0, whether - the model is in training mode. - (optional) drop_path_random_mask is a drop path random mask of type - tf.Tensor with shape [batch, 1, 1, 1]. - - Returns: - outputs: two tensors. The first tensor does not use the last activation - function. The second tensor uses the activation. We return non-activated - output to support MaX-DeepLab which uses non-activated feature for the - stacked decoders. - - Raises: - ValueError: If the length of inputs is not 2 or 3. - """ - if len(inputs) not in (2, 3): - raise ValueError('The length of inputs should be either 2 or 3.') - - # Unpack the inputs. - input_tensor, float_tensor_training, drop_path_random_mask = ( - utils.pad_sequence_with_none(inputs, target_length=3)) - - # Recompute_grad takes only float tensors as inputs. It does not allow - # bools or boolean tensors. For this reason, we cast training to a float - # tensor outside this call, and now we cast it back to a boolean tensor. - training = tf.cast(float_tensor_training, tf.bool) - - shortcut = input_tensor - if self._shortcut is not None: - shortcut = self._shortcut(shortcut, training=training) - elif self._strides != 1: - shortcut = shortcut[:, ::self._strides, ::self._strides, :] - - if len(self._filters_list) == 3: - x = self._conv1_bn_act(input_tensor, training=training) - if (self._attention_type is None or - self._attention_type.lower() == 'none'): - x = self._conv2_bn_act(x, training=training) - else: - x = self._attention(x, training=training) - x = self._activate_fn(x) - x = self._conv3_bn(x, training=training) - if len(self._filters_list) == 2: - x = self._conv1_bn_act(input_tensor, training=training) - x = self._conv2_bn(x, training=training) - - if self._use_squeeze_and_excite: - x = self._squeeze_and_excite(x) - - if drop_path_random_mask is not None: - x = x * drop_path_random_mask - x = x + shortcut - return x, self._activate_fn(x) diff --git a/spaces/akhaliq/deeplab2/model/post_processor/vip_deeplab_test.py b/spaces/akhaliq/deeplab2/model/post_processor/vip_deeplab_test.py deleted file mode 100644 index e742fe470f5d2410b5c69005130977e9ee50e8a0..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/deeplab2/model/post_processor/vip_deeplab_test.py +++ /dev/null @@ -1,67 +0,0 @@ -# coding=utf-8 -# Copyright 2021 The Deeplab2 Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Test for vip_deeplab.py.""" -import numpy as np -import tensorflow as tf - -from deeplab2.model.post_processor import vip_deeplab - - -class PostProcessingTest(tf.test.TestCase): - - def test_stitch_video_panoptic_prediction(self): - concat_semantic = np.array( - [[[0, 0, 0, 0], - [0, 1, 1, 0], - [0, 2, 2, 0], - [2, 2, 3, 3]]], dtype=np.int32) - concat_instance = np.array( - [[[1, 1, 2, 2], - [1, 0, 0, 2], - [1, 1, 1, 2], - [2, 2, 1, 1]]], dtype=np.int32) - next_semantic = np.array( - [[[0, 1, 1, 0], - [0, 1, 1, 0], - [0, 2, 2, 0], - [2, 2, 3, 3]]], dtype=np.int32) - next_instance = np.array( - [[[2, 0, 0, 1], - [2, 0, 0, 1], - [2, 4, 4, 1], - [5, 5, 3, 3]]], dtype=np.int32) - label_divisor = 1000 - concat_panoptic = concat_semantic * label_divisor + concat_instance - next_panoptic = next_semantic * label_divisor + next_instance - new_panoptic = vip_deeplab.stitch_video_panoptic_prediction( - concat_panoptic, - next_panoptic, - label_divisor) - # The expected instance is manually computed. It should receive the IDs - # propagated from concat_instance by IoU matching between concat_panoptic - # and next_panoptic. - expected_semantic = next_semantic - expected_instance = np.array( - [[[1, 0, 0, 2], - [1, 0, 0, 2], - [1, 1, 1, 2], - [2, 2, 1, 1]]], dtype=np.int32) - expected_panoptic = expected_semantic * label_divisor + expected_instance - np.testing.assert_array_equal(expected_panoptic, new_panoptic) - - -if __name__ == '__main__': - tf.test.main() diff --git a/spaces/akhaliq/lama/bin/blur_predicts.py b/spaces/akhaliq/lama/bin/blur_predicts.py deleted file mode 100644 index a14fcc28d5a906ad3a21ab4ba482f38b4fc411cb..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/lama/bin/blur_predicts.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env python3 - -import os - -import cv2 -import numpy as np -import tqdm - -from saicinpainting.evaluation.data import PrecomputedInpaintingResultsDataset -from saicinpainting.evaluation.utils import load_yaml - - -def main(args): - config = load_yaml(args.config) - - if not args.predictdir.endswith('/'): - args.predictdir += '/' - - dataset = PrecomputedInpaintingResultsDataset(args.datadir, args.predictdir, **config.dataset_kwargs) - - os.makedirs(os.path.dirname(args.outpath), exist_ok=True) - - for img_i in tqdm.trange(len(dataset)): - pred_fname = dataset.pred_filenames[img_i] - cur_out_fname = os.path.join(args.outpath, pred_fname[len(args.predictdir):]) - os.makedirs(os.path.dirname(cur_out_fname), exist_ok=True) - - sample = dataset[img_i] - img = sample['image'] - mask = sample['mask'] - inpainted = sample['inpainted'] - - inpainted_blurred = cv2.GaussianBlur(np.transpose(inpainted, (1, 2, 0)), - ksize=(args.k, args.k), - sigmaX=args.s, sigmaY=args.s, - borderType=cv2.BORDER_REFLECT) - - cur_res = (1 - mask) * np.transpose(img, (1, 2, 0)) + mask * inpainted_blurred - cur_res = np.clip(cur_res * 255, 0, 255).astype('uint8') - cur_res = cv2.cvtColor(cur_res, cv2.COLOR_RGB2BGR) - cv2.imwrite(cur_out_fname, cur_res) - - -if __name__ == '__main__': - import argparse - - aparser = argparse.ArgumentParser() - aparser.add_argument('config', type=str, help='Path to evaluation config') - aparser.add_argument('datadir', type=str, - help='Path to folder with images and masks (output of gen_mask_dataset.py)') - aparser.add_argument('predictdir', type=str, - help='Path to folder with predicts (e.g. predict_hifill_baseline.py)') - aparser.add_argument('outpath', type=str, help='Where to put results') - aparser.add_argument('-s', type=float, default=0.1, help='Gaussian blur sigma') - aparser.add_argument('-k', type=int, default=5, help='Kernel size in gaussian blur') - - main(aparser.parse_args()) diff --git a/spaces/akhaliq/yolov7/hubconf.py b/spaces/akhaliq/yolov7/hubconf.py deleted file mode 100644 index f8a8cbe940d89fa0ca293183407ec7bf6a453305..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/yolov7/hubconf.py +++ /dev/null @@ -1,97 +0,0 @@ -"""PyTorch Hub models - -Usage: - import torch - model = torch.hub.load('repo', 'model') -""" - -from pathlib import Path - -import torch - -from models.yolo import Model -from utils.general import check_requirements, set_logging -from utils.google_utils import attempt_download -from utils.torch_utils import select_device - -dependencies = ['torch', 'yaml'] -check_requirements(Path(__file__).parent / 'requirements.txt', exclude=('pycocotools', 'thop')) -set_logging() - - -def create(name, pretrained, channels, classes, autoshape): - """Creates a specified model - - Arguments: - name (str): name of model, i.e. 'yolov7' - pretrained (bool): load pretrained weights into the model - channels (int): number of input channels - classes (int): number of model classes - - Returns: - pytorch model - """ - try: - cfg = list((Path(__file__).parent / 'cfg').rglob(f'{name}.yaml'))[0] # model.yaml path - model = Model(cfg, channels, classes) - if pretrained: - fname = f'{name}.pt' # checkpoint filename - attempt_download(fname) # download if not found locally - ckpt = torch.load(fname, map_location=torch.device('cpu')) # load - msd = model.state_dict() # model state_dict - csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 - csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter - model.load_state_dict(csd, strict=False) # load - if len(ckpt['model'].names) == classes: - model.names = ckpt['model'].names # set class names attribute - if autoshape: - model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS - device = select_device('0' if torch.cuda.is_available() else 'cpu') # default to GPU if available - return model.to(device) - - except Exception as e: - s = 'Cache maybe be out of date, try force_reload=True.' - raise Exception(s) from e - - -def custom(path_or_model='path/to/model.pt', autoshape=True): - """custom mode - - Arguments (3 options): - path_or_model (str): 'path/to/model.pt' - path_or_model (dict): torch.load('path/to/model.pt') - path_or_model (nn.Module): torch.load('path/to/model.pt')['model'] - - Returns: - pytorch model - """ - model = torch.load(path_or_model) if isinstance(path_or_model, str) else path_or_model # load checkpoint - if isinstance(model, dict): - model = model['ema' if model.get('ema') else 'model'] # load model - - hub_model = Model(model.yaml).to(next(model.parameters()).device) # create - hub_model.load_state_dict(model.float().state_dict()) # load state_dict - hub_model.names = model.names # class names - if autoshape: - hub_model = hub_model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS - device = select_device('0' if torch.cuda.is_available() else 'cpu') # default to GPU if available - return hub_model.to(device) - - -def yolov7(pretrained=True, channels=3, classes=80, autoshape=True): - return create('yolov7', pretrained, channels, classes, autoshape) - - -if __name__ == '__main__': - model = custom(path_or_model='yolov7.pt') # custom example - # model = create(name='yolov7', pretrained=True, channels=3, classes=80, autoshape=True) # pretrained example - - # Verify inference - import numpy as np - from PIL import Image - - imgs = [np.zeros((640, 480, 3))] - - results = model(imgs) # batched inference - results.print() - results.save() diff --git a/spaces/alamin655/Personas/conversant/prompt_chatbot.py b/spaces/alamin655/Personas/conversant/prompt_chatbot.py deleted file mode 100644 index 79602d355ed420686faa494cb002a6f3efe799c8..0000000000000000000000000000000000000000 --- a/spaces/alamin655/Personas/conversant/prompt_chatbot.py +++ /dev/null @@ -1,608 +0,0 @@ -# Copyright (c) 2022 Cohere Inc. and its affiliates. -# -# Licensed under the MIT License (the "License"); -# you may not use this file except in compliance with the License. -# -# You may obtain a copy of the License in the LICENSE file at the top -# level of this repository. - -import json -import logging -import os -import warnings -from concurrent.futures import Future, ThreadPoolExecutor -from typing import Any, Dict, Tuple - -import cohere -import jsonschema - -import conversant -from conversant.chatbot import Chatbot, Interaction -from conversant.prompts.chat_prompt import ChatPrompt -from conversant.prompts.prompt import Prompt - -MAX_GENERATE_TOKENS = 2048 -TOKENS_PER_REQUEST = 10 -PERSONA_MODEL_DIRECTORY = f"{os.path.dirname(conversant.__file__)}/personas" -PERSONA_JSON_SCHEMA = { - "type": "object", - "properties": { - "chatbot_config": { - "type": "object", - "properties": { - "max_context_examples": {"type": "integer"}, - "avatar": {"type": "string"}, - }, - }, - "client_config": { - "type": "object", - "properties": { - "model": {"type": "string"}, - "max_tokens": {"type": "integer"}, - "temperature": {"type": "number"}, - "frequency_penalty": {"type": "number"}, - "presence_penalty": {"type": "number"}, - "stop_sequences": {"type": "array"}, - }, - }, - "prompt_config": { - "type": "object", - }, - }, -} - - -class PromptChatbot(Chatbot): - """Use prompt templates and LLM generation to define a chatbot. - - This bot makes no use of external knowledge sources. - """ - - def __init__( - self, - client: cohere.Client, - prompt: Prompt, - persona_name: str = "", - chatbot_config: Dict[str, Any] = {}, - client_config: Dict[str, Any] = {}, - ): - """Enriches init by adding a prompt. - - Args: - client (cohere.Client): Cohere client for API - prompt (Prompt): Prompt object to direct behavior. - persona_name (str, optional): Bot's persona name. Defaults to empty string. - chatbot_config: (Dict[str, Any], optional): Bot's chat config. Defaults to - empty dict. - client_config (Dict[str, Any], optional): Bot's client config. Defaults to - empty dict. - """ - - super().__init__(client) - self.prompt = prompt - self.persona_name = persona_name - - self.configure_chatbot(chatbot_config) - self.configure_client(client_config) - self.chat_history = [] - self.prompt_size_history = [] - self.prompt_history = [self.prompt.to_string()] - self.curr_max_context_examples = self.chatbot_config["max_context_examples"] - - # For the generation models, the maximum token length is 2048 - # (prompt and generation). So the prompt sent to .generate should be - # MAX_GENERATE_TOKENS minus max tokens generated - self.max_prompt_size = MAX_GENERATE_TOKENS - self.client_config["max_tokens"] - self._check_prompt_size() - - def __repr__(self) -> str: - return json.dumps(self.to_dict(), indent=4, default=str) - - @property - def user_name(self): - """ - Returns: - str: The name of the user, defined in the prompt. Defaults to "User". - """ - if hasattr(self.prompt, "user_name"): - return self.prompt.user_name - else: - return "User" - - @property - def bot_name(self): - """ - Returns: - str: The name of the chatbot, defined in the prompt. Defaults to - "PromptChatbot". - """ - if hasattr(self.prompt, "bot_name"): - return self.prompt.bot_name - else: - return "PromptChatbot" - - @property - def latest_prompt(self) -> str: - """Retrieves the latest prompt. - - Returns: - str: The prompt most recently added to the prompt history. - """ - return self.prompt_history[-1] - - def _update_max_context_examples( - self, prompt_size: int, max_context_examples: int - ) -> int: - """Adjust max_context_examples until a possible prompt size. - - if this is not possible, send an error message. - - Args: - prompt_size (int): Number of tokens of the prompt - max_context_examples (int): The length of the chat history for - the chatbot to use in reply. - - Returns: - int: updated max_context_examples - """ - # Store original values - original_size = prompt_size - # If the size of chat_history is smaller than max_context_examples - # the value of the variable is already updated with the size value - trimmed_max_examples = min(len(self.chat_history), max_context_examples) - - # Check if the max_context_examples is bigger than 0 so it can be reduced - if max_context_examples > 0: - # Reduce max_context_examples until the number of token of the prompt - # is less than maximum or reaches 1 - for size in self.prompt_size_history[-max_context_examples:]: - prompt_size -= size - trimmed_max_examples -= 1 - if prompt_size <= self.max_prompt_size: - if self.curr_max_context_examples == trimmed_max_examples: - warnings.warn( - "The parameter max_context_examples continues " - f"{self.curr_max_context_examples}" - ", so that the total amount of tokens does not" - f" exceed {MAX_GENERATE_TOKENS}." - ) - else: - warnings.warn( - "The parameter max_context_examples was changed for" - f" this turn, from {self.curr_max_context_examples} to " - f"{trimmed_max_examples}, so that " - "the total amount of tokens does not" - f" exceed {MAX_GENERATE_TOKENS}." - ) - self.curr_max_context_examples = trimmed_max_examples - return trimmed_max_examples - - raise ValueError( - "The total number of tokens (prompt and prediction) cannot exceed " - f"{MAX_GENERATE_TOKENS}. Try using a shorter start prompt, sending " - "smaller text messages in the chat, or setting a smaller value " - "for the parameter max_tokens. More details:\n" - f" - Start Prompt: {self.start_prompt_size} tokens\n" - f" - Messages sent in chat: {original_size - self.start_prompt_size} " - f"tokens\n - Parameter max_tokens: {self.client_config['max_tokens']} " - "tokens" - ) - - def _dispatch_concurrent_generate_call(self, **kwargs) -> Future: - """Dispatches a concurrent call to co.generate. - - This allows a network bound co.generate call to proceed while also - yielding the current response in a partial reply generator. - - Args: - kwargs: Keyword arguments for the call to co.generate. - - Returns: - Future: A future object that will be called to retrieve the result of - co.generate. - """ - with ThreadPoolExecutor(max_workers=1) as exe: - future = exe.submit(self.co.generate, **kwargs) - return future - - def get_stop_seq(self, response: str) -> str: - """Given a response, returns the stop sequence it has if any. - - Args: - response (str): Response coming from prompt chatbot. - - Returns: - str: The stop sequence in the response. If no stop sequence is found, then - an empty string is returned. - - """ - for stop_seq in self.client_config["stop_sequences"]: - if stop_seq in response: - return stop_seq - return "" - - def generate_prompt_update_examples(self, query: str) -> str: - """Generate prompt from query and update max context examples if necessary - - Args: - query (str): A query passed to the prompt chatbot. - - Returns: - current_prompt (str): Returns the current prompt using - query and chat history - - """ - # The current prompt is assembled from the initial prompt, - # from the chat history with a maximum of max_context_examples, - # and from the current query - - current_prompt = self.get_current_prompt(query) - - current_prompt_size = self.co.tokenize(current_prompt).length - - if current_prompt_size > self.max_prompt_size: - max_context_examples = self._update_max_context_examples( - current_prompt_size, self.chatbot_config["max_context_examples"] - ) - current_prompt = self.get_current_prompt(query, max_context_examples) - - elif ( - self.curr_max_context_examples - != self.chatbot_config["max_context_examples"] - ): - warnings.warn( - "The max_context_examples value returned" - f" to {self.chatbot_config['max_context_examples']} - " - f"value set in the original config" - ) - return current_prompt - - def partial_reply(self, query: str) -> Tuple[str, str]: - """Generates (partial) reply to a query given a chat history. - - Args: - query (str): A query passed to the prompt chatbot. - - Yields: - - Tuple[str, str]: A tuple of the response before the co.generate call, - and the response after. - """ - current_prompt = self.generate_prompt_update_examples(query) - self.prompt_history.append(current_prompt) - - response_before_current = "" - response_so_far = "" - num_requests_made = 0 - max_requests = int(self.client_config["max_tokens"] / TOKENS_PER_REQUEST) - reply_complete = False - - # As soon as the function is called (and the generator is created), dispatch - # a concurrent call to co.generate - future = self._dispatch_concurrent_generate_call( - model=self.client_config["model"], - prompt=current_prompt, - max_tokens=TOKENS_PER_REQUEST, - temperature=self.client_config["temperature"], - frequency_penalty=self.client_config["frequency_penalty"], - presence_penalty=self.client_config["presence_penalty"], - stop_sequences=self.client_config["stop_sequences"], - ) - - while num_requests_made < max_requests and not reply_complete: - generated_object = future.result() - partial_response = generated_object.generations[0].text - - # If the partial response is an empty string, then this iteration is a no-op - # (we indicate that the reply is completely generated). - if not partial_response: - reply_complete = True - - else: - - # Concatenate the candidate response, then fetches the stop sequence if - # it exists in the candidate response - candidate_response = response_so_far + partial_response - stop_seq = self.get_stop_seq(response_so_far + partial_response) - - # Truncate the candidate response if a stop sequence was found - if stop_seq: - candidate_response = candidate_response[ - : candidate_response.index(stop_seq) - ] - - # If the stop sequence is found across two partial replies, - # then the response_so_far has to be truncated. Example: - # - # stop_seq: "\nUser" - # response_so_far: "Thank you!\n" - # partial_response: "User: You are welcome" - # - # Then the candidate response is: - # - # candidate_response: "Thank you!" - # - # In this case, what is yielded at the end of the loop needs to be: - # - # response_before_current: "Thank you!" - # response_so_far: "Thank you!" - # - # So we'll truncate the response_so_far to be candidate_response - if len(candidate_response) < len(response_so_far): - response_so_far = candidate_response - - reply_complete = True - - # Save candidate response - current_prompt += partial_response - response_before_current = response_so_far - response_so_far = candidate_response - - # If this is the first partial_reply, append a new element to - # chat history after removing the leading whitespace - if num_requests_made == 0: - response_so_far = response_so_far.lstrip() - self.chat_history.append( - self.prompt.create_interaction(query, response_so_far) - ) - self.prompt_size_history.append( - self.co.tokenize( - self.prompt.create_interaction_string( - query, response_so_far - ) - ).length - ) - # Otherwise, overwrite the current chat history with the current - # response so far - else: - self.chat_history[-1] = self.prompt.create_interaction( - query, response_so_far - ) - self.prompt_size_history[-1] = self.co.tokenize( - self.prompt.create_interaction_string(query, response_so_far) - ).length - - num_requests_made += 1 - - # This dispatches a concurrent call to co.generate, which can be - # later accessed on the next iteration of the generator. - if num_requests_made < max_requests and not reply_complete: - future = self._dispatch_concurrent_generate_call( - model=self.client_config["model"], - prompt=current_prompt, - max_tokens=TOKENS_PER_REQUEST, - temperature=self.client_config["temperature"], - frequency_penalty=self.client_config["frequency_penalty"], - presence_penalty=self.client_config["presence_penalty"], - stop_sequences=self.client_config["stop_sequences"], - ) - - yield response_before_current, response_so_far - - def reply(self, query: str) -> Interaction: - """Replies to a query given a chat history. - - The reply is then generated directly from a call to a LLM. - - Args: - query (str): A query passed to the prompt chatbot. - - Returns: - Interaction: Dictionary of query and generated LLM response - """ - - current_prompt = self.generate_prompt_update_examples(query) - - # Make a call to Cohere's co.generate API - generated_object = self.co.generate( - model=self.client_config["model"], - prompt=current_prompt, - max_tokens=self.client_config["max_tokens"], - temperature=self.client_config["temperature"], - frequency_penalty=self.client_config["frequency_penalty"], - presence_penalty=self.client_config["presence_penalty"], - stop_sequences=self.client_config["stop_sequences"], - ) - # If response was cut off by .generate() finding a stop sequence, - # remove that sequence from the response. - response = generated_object.generations[0].text - for stop_seq in self.client_config["stop_sequences"]: - if response.endswith(stop_seq): - response = response[: -len(stop_seq)] - response = response.lstrip() - - # We need to remember the current response in the chat history for future - # responses. - self.chat_history.append(self.prompt.create_interaction(query, response)) - self.prompt_size_history.append( - self.co.tokenize( - self.prompt.create_interaction_string(query, response) - ).length - ) - self.prompt_history.append(current_prompt) - - return response - - def get_current_prompt(self, query: str, max_context_examples: int = None) -> str: - """Stitches the prompt with a trailing window of the chat. - Args: - query (str): The current user query. - max_context_examples (int): The length of the chat history for - the chatbot to use in reply. - - Returns: - str: The current prompt given a query. - """ - if max_context_examples is None: - max_context_examples = self.chatbot_config["max_context_examples"] - - # get base prompt - base_prompt = self.prompt.to_string() + "\n" - - # get context prompt - context_prompt_lines = [] - trimmed_chat_history = ( - self.chat_history[-max_context_examples:] - if max_context_examples > 0 - else [] - ) - # TODO when prompt is updated, the history is mutated - # as it is recreated using the new prompt. A possible fix is to save the old - # prompt in history and use it when recreating. - for turn in trimmed_chat_history: - context_prompt_lines.append(self.prompt.create_interaction_string(**turn)) - context_prompt = self.prompt.example_separator + "".join(context_prompt_lines) - - current_prompt = base_prompt + context_prompt - - # get query prompt - if query != "": - query_prompt = self.prompt.create_interaction_string(query) - current_prompt += query_prompt - return current_prompt.strip() - - def configure_chatbot(self, chatbot_config: Dict = {}) -> None: - """Configures chatbot options. - - Args: - chatbot_config (Dict, optional): Updates self.chatbot_config. Defaults - to {}. - """ - # We initialize the chatbot to these default config values. - if not hasattr(self, "chatbot_config"): - self.chatbot_config = {"max_context_examples": 10, "avatar": ":robot:"} - # Override default config values with the config passed in - if isinstance(chatbot_config, Dict): - self.chatbot_config.update(chatbot_config) - else: - raise TypeError( - "chatbot_config must be of type Dict, but was passed in as " - f"{type(chatbot_config)}" - ) - - def configure_client(self, client_config: Dict = {}) -> None: - """Configures client options. - - Args: - client_config (Dict, optional): Updates self.client_config. Defaults to {}. - """ - # We initialize the client to these default config values. - if not hasattr(self, "client_config"): - self.client_config = { - "model": "xlarge", - "max_tokens": 200, - "temperature": 0.75, - "frequency_penalty": 0.0, - "presence_penalty": 0.0, - "stop_sequences": ["\\n", "\n"], - } - # Override default config values with the config passed in - if isinstance(client_config, Dict): - self.client_config.update(client_config) - else: - raise TypeError( - "client_config must be of type Dict, but was passed in as " - f"{type(client_config)}" - ) - - # Checks if the parameter is equal or bigger than MAX_GENERATE_TOKENS - if self.client_config["max_tokens"] >= MAX_GENERATE_TOKENS: - raise ValueError( - f"The parameter max_tokens needs to be smaller than " - f"{MAX_GENERATE_TOKENS}. Try using a smaller value." - ) - elif self.client_config["max_tokens"] > (MAX_GENERATE_TOKENS * 0.75): - warnings.warn( - "The parameter max_tokens has a value " - f"({self.client_config['max_tokens']}) close to the total allowed" - f" for prompt and prediction - {MAX_GENERATE_TOKENS} tokens" - ) - - @classmethod - def from_persona( - cls, - persona_name: str, - client: cohere.Client, - persona_dir: str = PERSONA_MODEL_DIRECTORY, - ): - """Initializes a PromptChatbot using a persona. - - Args: - persona (str): Name of persona, corresponding to a .json file. - client (cohere.Client): Cohere client for API - persona_dir (str): Path to where pre-defined personas are. - """ - # Load the persona from a local directory - persona_path = os.path.join(persona_dir, persona_name, "config.json") - if os.path.isfile(persona_path): - logging.info(f"loading persona from {persona_path}") - else: - raise FileNotFoundError(f"{persona_path} cannot be found.") - with open(persona_path) as f: - persona = json.load(f) - - # Validate that the persona follows our predefined schema - cls._validate_persona_dict(persona, persona_path) - return cls( - client=client, - prompt=ChatPrompt.from_dict(persona["chat_prompt_config"]), - persona_name=persona_name, - chatbot_config=persona["chatbot_config"], - client_config=persona["client_config"], - ) - - def to_dict(self) -> Dict[str, Any]: - """Serializes this instance into a Python dictionary. - - Returns: - Dict[str, Any]: Dictionary of attributes that defines this instance of a - PromptChatbot. - """ - attr_dict = {k: v for k, v in vars(self).items()} - attr_dict["prompt"] = attr_dict["prompt"].to_dict() - return attr_dict - - def _check_prompt_size(self) -> None: - - self.start_prompt_size = self.co.tokenize(self.prompt.to_string()).length - if self.start_prompt_size > self.max_prompt_size: - raise ValueError( - f"The prompt given to PromptChatbot has {self.start_prompt_size}" - " tokens. And the value of the parameter max_tokens is" - f" {self.client_config['max_tokens']}. Adding the two values " - f"the total cannot exceed {MAX_GENERATE_TOKENS}. " - "Try using a shorter preamble or less examples." - ) - elif self.start_prompt_size > (0.75 * self.max_prompt_size): - warnings.warn( - "The prompt given to PromptChatbot has " - f"{self.start_prompt_size} tokens. And the value of the parameter" - f" max_tokens is {self.client_config['max_tokens']}. " - "Adding the two together gives a value close to the total allowed" - f" for prompt and prediction - {MAX_GENERATE_TOKENS} tokens" - ) - - @staticmethod - def _validate_persona_dict(persona: Dict[str, Any], persona_path: str) -> None: - """Validates formatting of a persona defined as a dictionary. - - Args: - persona (Dict[str, Any]): A dictionary containing the persona. - persona_path: The path from which the persona was loaded. - """ - try: - jsonschema.validate(instance=persona, schema=PERSONA_JSON_SCHEMA) - except jsonschema.exceptions.ValidationError as e: - raise jsonschema.exceptions.ValidationError( - f"Type of values in given dictionary (persona from {persona_path}) do " - f"not match schema': {e}" - ) - except KeyError as e: - raise KeyError( - f"Invalid key in given dictionary (persona from {persona_path})': {e}" - ) - except Exception as e: - raise Exception( - "Failed to validate persona in given dictionary (persona from " - f"{persona_path}): {e}" - ) diff --git a/spaces/alamin655/websurfx/public/static/colorschemes/oceanic-next.css b/spaces/alamin655/websurfx/public/static/colorschemes/oceanic-next.css deleted file mode 100644 index 896bae13eca023fba05eb8b91500ceb51c3b34fd..0000000000000000000000000000000000000000 --- a/spaces/alamin655/websurfx/public/static/colorschemes/oceanic-next.css +++ /dev/null @@ -1,11 +0,0 @@ -:root { - --background-color: #1b2b34; - --foreground-color: #d8dee9; - --color-one: #343d46; - --color-two: #5FB3B3ff; - --color-three: #69Cf; - --color-four: #99c794; - --color-five: #69c; - --color-six: #c594c5; - --color-seven: #D8DEE9ff; -} diff --git a/spaces/aliabd/SummerTime/model/third_party/HMNet/ThirdParty/ROUGE/ROUGE-1.5.5/XML/DOM/Parser.pod b/spaces/aliabd/SummerTime/model/third_party/HMNet/ThirdParty/ROUGE/ROUGE-1.5.5/XML/DOM/Parser.pod deleted file mode 100644 index b8cd46ec91963eec25511df32d5e9d1f8aa1b5cb..0000000000000000000000000000000000000000 --- a/spaces/aliabd/SummerTime/model/third_party/HMNet/ThirdParty/ROUGE/ROUGE-1.5.5/XML/DOM/Parser.pod +++ /dev/null @@ -1,67 +0,0 @@ -=head1 NAME - -XML::DOM::Parser - An XML::Parser that builds XML::DOM document structures - -=head1 SYNOPSIS - - use XML::DOM; - - my $parser = new XML::DOM::Parser; - my $doc = $parser->parsefile ("file.xml"); - $doc->dispose; # Avoid memory leaks - cleanup circular references - -=head1 DESCRIPTION - -XML::DOM::Parser extends L - -The XML::Parser module was written by Clark Cooper and -is built on top of XML::Parser::Expat, -which is a lower level interface to James Clark's expat library. - -XML::DOM::Parser parses XML strings or files -and builds a data structure that conforms to the API of the Document Object -Model as described at L. -See the L manpage for other additional properties of the -XML::DOM::Parser class. -Note that the 'Style' property should not be used (it is set internally.) - -The XML::Parser B option is more or less supported, in that it will -generate EntityReference objects whenever an entity reference is encountered -in character data. I'm not sure how useful this is. Any comments are welcome. - -As described in the synopsis, when you create an XML::DOM::Parser object, -the parse and parsefile methods create an L object -from the specified input. This Document object can then be examined, modified and -written back out to a file or converted to a string. - -When using XML::DOM with XML::Parser version 2.19 and up, setting the -XML::DOM::Parser option B to 1 will store CDATASections in -CDATASection nodes, instead of converting them to Text nodes. -Subsequent CDATASection nodes will be merged into one. Let me know if this -is a problem. - -=head1 Using LWP to parse URLs - -The parsefile() method now also supports URLs, e.g. I. -It uses LWP to download the file and then calls parse() on the resulting string. -By default it will use a L that is created as follows: - - use LWP::UserAgent; - $LWP_USER_AGENT = LWP::UserAgent->new; - $LWP_USER_AGENT->env_proxy; - -Note that env_proxy reads proxy settings from environment variables, which is what I need to -do to get thru our firewall. If you want to use a different LWP::UserAgent, you can either set -it globally with: - - XML::DOM::Parser::set_LWP_UserAgent ($my_agent); - -or, you can specify it for a specific XML::DOM::Parser by passing it to the constructor: - - my $parser = new XML::DOM::Parser (LWP_UserAgent => $my_agent); - -Currently, LWP is used when the filename (passed to parsefile) starts with one of -the following URL schemes: http, https, ftp, wais, gopher, or file (followed by a colon.) -If I missed one, please let me know. - -The LWP modules are part of libwww-perl which is available at CPAN. diff --git a/spaces/aliabid94/crossword/README.md b/spaces/aliabid94/crossword/README.md deleted file mode 100644 index 185b8b452016e4322a709d8356e318ddccac934f..0000000000000000000000000000000000000000 --- a/spaces/aliabid94/crossword/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Crossword -emoji: 🧩 -colorFrom: indigo -colorTo: gray -sdk: gradio -sdk_version: 3.32.0 -app_file: run.py -pinned: false -license: mit ---- - diff --git a/spaces/allknowingroger/Image-Models-Test10/README.md b/spaces/allknowingroger/Image-Models-Test10/README.md deleted file mode 100644 index f4b76ae3d674676241555ac4d1f174a460c9c976..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test10/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: More Image Models -emoji: 😻 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: true -duplicated_from: allknowingroger/Image-Models-Test9 ---- - - \ No newline at end of file diff --git a/spaces/allknowingroger/Image-Models-Test39/README.md b/spaces/allknowingroger/Image-Models-Test39/README.md deleted file mode 100644 index 2cadaafbd0ac28469c87d116537d57a759150744..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test39/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Image Models -emoji: 👀 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: true -duplicated_from: allknowingroger/Image-Models-Test38 ---- - - \ No newline at end of file diff --git a/spaces/allknowingroger/Image-Models-Test63/app.py b/spaces/allknowingroger/Image-Models-Test63/app.py deleted file mode 100644 index 3ffcd571e3f2bbc44fa853595d2c112eeb93e30c..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test63/app.py +++ /dev/null @@ -1,144 +0,0 @@ -import gradio as gr -# import os -# import sys -# from pathlib import Path -import time - -models =[ - "govindkrishnan123/my-pet-dog", - "anjali0610/my-dog", - "MakAttack/6537c748746a76fc12454f8b", - "Yntec/CultClassic", - "oljike/all-kzkhs-lora", - "Yntec/photoMovieX", - "Yntec/YiffyMix", - "Yntec/CinematicReality", - "aigrils2/beautifulv6", -] - - -model_functions = {} -model_idx = 1 -for model_path in models: - try: - model_functions[model_idx] = gr.Interface.load(f"models/{model_path}", live=False, preprocess=True, postprocess=False) - except Exception as error: - def the_fn(txt): - return None - model_functions[model_idx] = gr.Interface(fn=the_fn, inputs=["text"], outputs=["image"]) - model_idx+=1 - - -def send_it_idx(idx): - def send_it_fn(prompt): - output = (model_functions.get(str(idx)) or model_functions.get(str(1)))(prompt) - return output - return send_it_fn - -def get_prompts(prompt_text): - return prompt_text - -def clear_it(val): - if int(val) != 0: - val = 0 - else: - val = 0 - pass - return val - -def all_task_end(cnt,t_stamp): - to = t_stamp + 60 - et = time.time() - if et > to and t_stamp != 0: - d = gr.update(value=0) - tog = gr.update(value=1) - #print(f'to: {to} et: {et}') - else: - if cnt != 0: - d = gr.update(value=et) - else: - d = gr.update(value=0) - tog = gr.update(value=0) - #print (f'passing: to: {to} et: {et}') - pass - return d, tog - -def all_task_start(): - print("\n\n\n\n\n\n\n") - t = time.gmtime() - t_stamp = time.time() - current_time = time.strftime("%H:%M:%S", t) - return gr.update(value=t_stamp), gr.update(value=t_stamp), gr.update(value=0) - -def clear_fn(): - nn = len(models) - return tuple([None, *[None for _ in range(nn)]]) - - - -with gr.Blocks(title="SD Models") as my_interface: - with gr.Column(scale=12): - # with gr.Row(): - # gr.Markdown("""- Primary prompt: 你想画的内容(英文单词,如 a cat, 加英文逗号效果更好;点 Improve 按钮进行完善)\n- Real prompt: 完善后的提示词,出现后再点右边的 Run 按钮开始运行""") - with gr.Row(): - with gr.Row(scale=6): - primary_prompt=gr.Textbox(label="Prompt", value="") - # real_prompt=gr.Textbox(label="Real prompt") - with gr.Row(scale=6): - # improve_prompts_btn=gr.Button("Improve") - with gr.Row(): - run=gr.Button("Run",variant="primary") - clear_btn=gr.Button("Clear") - with gr.Row(): - sd_outputs = {} - model_idx = 1 - for model_path in models: - with gr.Column(scale=3, min_width=320): - with gr.Box(): - sd_outputs[model_idx] = gr.Image(label=model_path) - pass - model_idx += 1 - pass - pass - - with gr.Row(visible=False): - start_box=gr.Number(interactive=False) - end_box=gr.Number(interactive=False) - tog_box=gr.Textbox(value=0,interactive=False) - - start_box.change( - all_task_end, - [start_box, end_box], - [start_box, tog_box], - every=1, - show_progress=False) - - primary_prompt.submit(all_task_start, None, [start_box, end_box, tog_box]) - run.click(all_task_start, None, [start_box, end_box, tog_box]) - runs_dict = {} - model_idx = 1 - for model_path in models: - runs_dict[model_idx] = run.click(model_functions[model_idx], inputs=[primary_prompt], outputs=[sd_outputs[model_idx]]) - model_idx += 1 - pass - pass - - # improve_prompts_btn_clicked=improve_prompts_btn.click( - # get_prompts, - # inputs=[primary_prompt], - # outputs=[primary_prompt], - # cancels=list(runs_dict.values())) - clear_btn.click( - clear_fn, - None, - [primary_prompt, *list(sd_outputs.values())], - cancels=[*list(runs_dict.values())]) - tog_box.change( - clear_it, - tog_box, - tog_box, - cancels=[*list(runs_dict.values())]) - -my_interface.queue(concurrency_count=600, status_update_rate=1) -my_interface.launch(inline=True, show_api=False) - \ No newline at end of file diff --git a/spaces/allknowingroger/Image-Models-Test75/app.py b/spaces/allknowingroger/Image-Models-Test75/app.py deleted file mode 100644 index 92885a147fed06b8e5a4a7ff30c554c7cb79ce92..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test75/app.py +++ /dev/null @@ -1,144 +0,0 @@ -import gradio as gr -# import os -# import sys -# from pathlib import Path -import time - -models =[ - "Yntec/Toonify2", - "alkalinevk/testrep", - "hashu/my-pet-dog-hsq", - "hashu/my-pet-dog-xzh", - "skramd/db-sdxl-whitebg-product-photography", - "abin-regi/my-pet-dog-xzk", - "skshreyas714/lora-trained-xl-colab", - "Crosstyan/BPModel", - "NoCrypt/SomethingV2_2", -] - - -model_functions = {} -model_idx = 1 -for model_path in models: - try: - model_functions[model_idx] = gr.Interface.load(f"models/{model_path}", live=False, preprocess=True, postprocess=False) - except Exception as error: - def the_fn(txt): - return None - model_functions[model_idx] = gr.Interface(fn=the_fn, inputs=["text"], outputs=["image"]) - model_idx+=1 - - -def send_it_idx(idx): - def send_it_fn(prompt): - output = (model_functions.get(str(idx)) or model_functions.get(str(1)))(prompt) - return output - return send_it_fn - -def get_prompts(prompt_text): - return prompt_text - -def clear_it(val): - if int(val) != 0: - val = 0 - else: - val = 0 - pass - return val - -def all_task_end(cnt,t_stamp): - to = t_stamp + 60 - et = time.time() - if et > to and t_stamp != 0: - d = gr.update(value=0) - tog = gr.update(value=1) - #print(f'to: {to} et: {et}') - else: - if cnt != 0: - d = gr.update(value=et) - else: - d = gr.update(value=0) - tog = gr.update(value=0) - #print (f'passing: to: {to} et: {et}') - pass - return d, tog - -def all_task_start(): - print("\n\n\n\n\n\n\n") - t = time.gmtime() - t_stamp = time.time() - current_time = time.strftime("%H:%M:%S", t) - return gr.update(value=t_stamp), gr.update(value=t_stamp), gr.update(value=0) - -def clear_fn(): - nn = len(models) - return tuple([None, *[None for _ in range(nn)]]) - - - -with gr.Blocks(title="SD Models") as my_interface: - with gr.Column(scale=12): - # with gr.Row(): - # gr.Markdown("""- Primary prompt: 你想画的内容(英文单词,如 a cat, 加英文逗号效果更好;点 Improve 按钮进行完善)\n- Real prompt: 完善后的提示词,出现后再点右边的 Run 按钮开始运行""") - with gr.Row(): - with gr.Row(scale=6): - primary_prompt=gr.Textbox(label="Prompt", value="") - # real_prompt=gr.Textbox(label="Real prompt") - with gr.Row(scale=6): - # improve_prompts_btn=gr.Button("Improve") - with gr.Row(): - run=gr.Button("Run",variant="primary") - clear_btn=gr.Button("Clear") - with gr.Row(): - sd_outputs = {} - model_idx = 1 - for model_path in models: - with gr.Column(scale=3, min_width=320): - with gr.Box(): - sd_outputs[model_idx] = gr.Image(label=model_path) - pass - model_idx += 1 - pass - pass - - with gr.Row(visible=False): - start_box=gr.Number(interactive=False) - end_box=gr.Number(interactive=False) - tog_box=gr.Textbox(value=0,interactive=False) - - start_box.change( - all_task_end, - [start_box, end_box], - [start_box, tog_box], - every=1, - show_progress=False) - - primary_prompt.submit(all_task_start, None, [start_box, end_box, tog_box]) - run.click(all_task_start, None, [start_box, end_box, tog_box]) - runs_dict = {} - model_idx = 1 - for model_path in models: - runs_dict[model_idx] = run.click(model_functions[model_idx], inputs=[primary_prompt], outputs=[sd_outputs[model_idx]]) - model_idx += 1 - pass - pass - - # improve_prompts_btn_clicked=improve_prompts_btn.click( - # get_prompts, - # inputs=[primary_prompt], - # outputs=[primary_prompt], - # cancels=list(runs_dict.values())) - clear_btn.click( - clear_fn, - None, - [primary_prompt, *list(sd_outputs.values())], - cancels=[*list(runs_dict.values())]) - tog_box.change( - clear_it, - tog_box, - tog_box, - cancels=[*list(runs_dict.values())]) - -my_interface.queue(concurrency_count=600, status_update_rate=1) -my_interface.launch(inline=True, show_api=False) - \ No newline at end of file diff --git a/spaces/amankishore/sjc/sd1/ldm/__init__.py b/spaces/amankishore/sjc/sd1/ldm/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/andryMLOPS/ASTA-GPT-3.8_web_ui/g4f/Provider/Providers/Xiaor.py b/spaces/andryMLOPS/ASTA-GPT-3.8_web_ui/g4f/Provider/Providers/Xiaor.py deleted file mode 100644 index 5757f9971157116cbbfabbe5420e3b7e88fed4e7..0000000000000000000000000000000000000000 --- a/spaces/andryMLOPS/ASTA-GPT-3.8_web_ui/g4f/Provider/Providers/Xiaor.py +++ /dev/null @@ -1,39 +0,0 @@ -import requests -import os -import json -from ...typing import sha256, Dict, get_type_hints - -url = 'https://xiaor.eu.org' -model = ['gpt-3.5-turbo', 'gpt-3.5-turbo-16k', - 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0613'] -supports_stream = True -needs_auth = False - - -def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs): - headers = { - 'Content-Type': 'application/json', - } - data = { - 'model': model, - 'temperature': 0.7, - 'presence_penalty': 0, - 'messages': messages, - } - response = requests.post(url + '/p1/v1/chat/completions', - json=data, stream=True) - - if stream: - for chunk in response.iter_content(chunk_size=None): - chunk = chunk.decode('utf-8') - if chunk.strip(): - message = json.loads(chunk)['choices'][0]['message']['content'] - yield message - else: - message = response.json()['choices'][0]['message']['content'] - yield message - - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join( - [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) diff --git a/spaces/aodianyun/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/prompt.py b/spaces/aodianyun/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/prompt.py deleted file mode 100644 index f81aa7c281376933c95c854ed2ecc8ae99ad92a7..0000000000000000000000000000000000000000 --- a/spaces/aodianyun/stable-diffusion-webui/extensions/deforum/scripts/deforum_helpers/prompt.py +++ /dev/null @@ -1,113 +0,0 @@ -import re - -def check_is_number(value): - float_pattern = r'^(?=.)([+-]?([0-9]*)(\.([0-9]+))?)$' - return re.match(float_pattern, value) - -def parse_weight(match, frame = 0)->float: - import numexpr - w_raw = match.group("weight") - if w_raw == None: - return 1 - if check_is_number(w_raw): - return float(w_raw) - else: - t = frame - if len(w_raw) < 3: - print('the value inside `-characters cannot represent a math function') - return 1 - return float(numexpr.evaluate(w_raw[1:-1])) - -def split_weighted_subprompts(text, frame = 0): - """ - splits the prompt based on deforum webui implementation, moved from generate.py - """ - math_parser = re.compile(""" - (?P( - `[\S\s]*?`# a math function wrapped in `-characters - )) - """, re.VERBOSE) - - parsed_prompt = re.sub(math_parser, lambda m: str(parse_weight(m, frame)), text) - - negative_prompts = [] - positive_prompts = [] - - prompt_split = parsed_prompt.split("--neg") - if len(prompt_split) > 1: - positive_prompts, negative_prompts = parsed_prompt.split("--neg") #TODO: add --neg to vanilla Deforum for compat - else: - positive_prompts = prompt_split[0] - negative_prompts = "" - - return positive_prompts, negative_prompts - -def interpolate_prompts(animation_prompts, max_frames): - import numpy as np - import pandas as pd - # Get prompts sorted by keyframe - sorted_prompts = sorted(animation_prompts.items(), key=lambda item: int(item[0])) - - # Setup container for interpolated prompts - prompt_series = pd.Series([np.nan for a in range(max_frames)]) - - # For every keyframe prompt except the last - for i in range(0,len(sorted_prompts)-1): - - # Get current and next keyframe - current_frame = int(sorted_prompts[i][0]) - next_frame = int(sorted_prompts[i+1][0]) - - # Ensure there's no weird ordering issues or duplication in the animation prompts - # (unlikely because we sort above, and the json parser will strip dupes) - if current_frame>=next_frame: - print(f"WARNING: Sequential prompt keyframes {i}:{current_frame} and {i+1}:{next_frame} are not monotonously increasing; skipping interpolation.") - continue - - # Get current and next keyframes' positive and negative prompts (if any) - current_prompt = sorted_prompts[i][1] - next_prompt = sorted_prompts[i+1][1] - current_positive, current_negative, *_ = current_prompt.split("--neg") + [None] - next_positive, next_negative, *_ = next_prompt.split("--neg") + [None] - - # Calculate how much to shift the weight from current to next prompt at each frame - weight_step = 1/(next_frame-current_frame) - - # Apply weighted prompt interpolation for each frame between current and next keyframe - # using the syntax: prompt1 :weight1 AND prompt1 :weight2 --neg nprompt1 :weight1 AND nprompt1 :weight2 - # (See: https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#composable-diffusion ) - for f in range(current_frame,next_frame): - next_weight = weight_step * (f-current_frame) - current_weight = 1 - next_weight - - # We will build the prompt incrementally depending on which prompts are present - prompt_series[f] = '' - - # Cater for the case where neither, either or both current & next have positive prompts: - if current_positive: - prompt_series[f] += f"{current_positive} :{current_weight}" - if current_positive and next_positive: - prompt_series[f] += f" AND " - if next_positive: - prompt_series[f] += f"{next_positive} :{next_weight}" - - # Cater for the case where neither, either or both current & next have negative prompts: - if current_negative or next_negative: - prompt_series[f] += " --neg " - if current_negative: - prompt_series[f] += f" {current_negative} :{current_weight}" - if current_negative and next_negative: - prompt_series[f] += f" AND " - if next_negative: - prompt_series[f] += f" {next_negative} :{next_weight}" - - # Set explicitly declared keyframe prompts (overwriting interpolated values at the keyframe idx). This ensures: - # - That final prompt is set, and - # - Gives us a chance to emit warnings if any keyframe prompts are already using composable diffusion - for i, prompt in animation_prompts.items(): - prompt_series[int(i)] = prompt - if ' AND ' in prompt: - print(f"WARNING: keyframe {i}'s prompt is using composable diffusion (aka the 'AND' keyword). This will cause unexpected behaviour with interpolation.") - - # Return the filled series, in case max_frames is greater than the last keyframe or any ranges were skipped. - return prompt_series.ffill().bfill() diff --git a/spaces/appl044/Chat-GPT-LangChain/polly_utils.py b/spaces/appl044/Chat-GPT-LangChain/polly_utils.py deleted file mode 100644 index 7cb38abff2aaac3c5b24f20914d464151173780d..0000000000000000000000000000000000000000 --- a/spaces/appl044/Chat-GPT-LangChain/polly_utils.py +++ /dev/null @@ -1,635 +0,0 @@ -# This class stores Polly voice data. Specifically, the class stores several records containing -# language, lang_code, gender, voice_id and engine. The class also has a method to return the -# voice_id, lang_code and engine given a language and gender. - -NEURAL_ENGINE = "neural" -STANDARD_ENGINE = "standard" - - -class PollyVoiceData: - def get_voice(self, language, gender): - for voice in self.voice_data: - if voice['language'] == language and voice['gender'] == gender: - if voice['neural'] == 'Yes': - return voice['voice_id'], voice['lang_code'], NEURAL_ENGINE - for voice in self.voice_data: - if voice['language'] == language and voice['gender'] == gender: - if voice['standard'] == 'Yes': - return voice['voice_id'], voice['lang_code'], STANDARD_ENGINE - return None, None, None - - def get_whisper_lang_code(self, language): - for voice in self.voice_data: - if voice['language'] == language: - return voice['whisper_lang_code'] - return "en" - - def __init__(self): - self.voice_data = [ - {'language': 'Arabic', - 'lang_code': 'arb', - 'whisper_lang_code': 'ar', - 'voice_id': 'Zeina', - 'gender': 'Female', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'Arabic (Gulf)', - 'lang_code': 'ar-AE', - 'whisper_lang_code': 'ar', - 'voice_id': 'Hala', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'No'}, - {'language': 'Catalan', - 'lang_code': 'ca-ES', - 'whisper_lang_code': 'ca', - 'voice_id': 'Arlet', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'No'}, - {'language': 'Chinese (Cantonese)', - 'lang_code': 'yue-CN', - 'whisper_lang_code': 'zh', - 'voice_id': 'Hiujin', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'No'}, - {'language': 'Chinese (Mandarin)', - 'lang_code': 'cmn-CN', - 'whisper_lang_code': 'zh', - 'voice_id': 'Zhiyu', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'No'}, - {'language': 'Danish', - 'lang_code': 'da-DK', - 'whisper_lang_code': 'da', - 'voice_id': 'Naja', - 'gender': 'Female', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'Danish', - 'lang_code': 'da-DK', - 'whisper_lang_code': 'da', - 'voice_id': 'Mads', - 'gender': 'Male', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'Dutch', - 'lang_code': 'nl-NL', - 'whisper_lang_code': 'nl', - 'voice_id': 'Laura', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'No'}, - {'language': 'Dutch', - 'lang_code': 'nl-NL', - 'whisper_lang_code': 'nl', - 'voice_id': 'Lotte', - 'gender': 'Female', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'Dutch', - 'lang_code': 'nl-NL', - 'whisper_lang_code': 'nl', - 'voice_id': 'Ruben', - 'gender': 'Male', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'English (Australian)', - 'lang_code': 'en-AU', - 'whisper_lang_code': 'en', - 'voice_id': 'Nicole', - 'gender': 'Female', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'English (Australian)', - 'lang_code': 'en-AU', - 'whisper_lang_code': 'en', - 'voice_id': 'Olivia', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'No'}, - {'language': 'English (Australian)', - 'lang_code': 'en-AU', - 'whisper_lang_code': 'en', - 'voice_id': 'Russell', - 'gender': 'Male', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'English (British)', - 'lang_code': 'en-GB', - 'whisper_lang_code': 'en', - 'voice_id': 'Amy', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'Yes'}, - {'language': 'English (British)', - 'lang_code': 'en-GB', - 'whisper_lang_code': 'en', - 'voice_id': 'Emma', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'Yes'}, - {'language': 'English (British)', - 'lang_code': 'en-GB', - 'whisper_lang_code': 'en', - 'voice_id': 'Brian', - 'gender': 'Male', - 'neural': 'Yes', - 'standard': 'Yes'}, - {'language': 'English (British)', - 'lang_code': 'en-GB', - 'whisper_lang_code': 'en', - 'voice_id': 'Arthur', - 'gender': 'Male', - 'neural': 'Yes', - 'standard': 'No'}, - {'language': 'English (Indian)', - 'lang_code': 'en-IN', - 'whisper_lang_code': 'en', - 'voice_id': 'Aditi', - 'gender': 'Female', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'English (Indian)', - 'lang_code': 'en-IN', - 'whisper_lang_code': 'en', - 'voice_id': 'Raveena', - 'gender': 'Female', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'English (Indian)', - 'lang_code': 'en-IN', - 'whisper_lang_code': 'en', - 'voice_id': 'Kajal', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'No'}, - {'language': 'English (New Zealand)', - 'lang_code': 'en-NZ', - 'whisper_lang_code': 'en', - 'voice_id': 'Aria', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'No'}, - {'language': 'English (South African)', - 'lang_code': 'en-ZA', - 'whisper_lang_code': 'en', - 'voice_id': 'Ayanda', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'No'}, - {'language': 'English (US)', - 'lang_code': 'en-US', - 'whisper_lang_code': 'en', - 'voice_id': 'Ivy', - 'gender': 'Female (child)', - 'neural': 'Yes', - 'standard': 'Yes'}, - {'language': 'English (US)', - 'lang_code': 'en-US', - 'whisper_lang_code': 'en', - 'voice_id': 'Joanna', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'Yes'}, - {'language': 'English (US)', - 'lang_code': 'en-US', - 'whisper_lang_code': 'en', - 'voice_id': 'Kendra', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'Yes'}, - {'language': 'English (US)', - 'lang_code': 'en-US', - 'whisper_lang_code': 'en', - 'voice_id': 'Kimberly', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'Yes'}, - {'language': 'English (US)', - 'lang_code': 'en-US', - 'whisper_lang_code': 'en', - 'voice_id': 'Salli', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'Yes'}, - {'language': 'English (US)', - 'lang_code': 'en-US', - 'whisper_lang_code': 'en', - 'voice_id': 'Joey', - 'gender': 'Male', - 'neural': 'Yes', - 'standard': 'Yes'}, - {'language': 'English (US)', - 'lang_code': 'en-US', - 'whisper_lang_code': 'en', - 'voice_id': 'Justin', - 'gender': 'Male (child)', - 'neural': 'Yes', - 'standard': 'Yes'}, - {'language': 'English (US)', - 'lang_code': 'en-US', - 'whisper_lang_code': 'en', - 'voice_id': 'Kevin', - 'gender': 'Male (child)', - 'neural': 'Yes', - 'standard': 'No'}, - {'language': 'English (US)', - 'lang_code': 'en-US', - 'whisper_lang_code': 'en', - 'voice_id': 'Matthew', - 'gender': 'Male', - 'neural': 'Yes', - 'standard': 'Yes'}, - {'language': 'English (Welsh)', - 'lang_code': 'en-GB-WLS', - 'whisper_lang_code': 'en', - 'voice_id': 'Geraint', - 'gender': 'Male', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'Finnish', - 'lang_code': 'fi-FI', - 'whisper_lang_code': 'fi', - 'voice_id': 'Suvi', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'No'}, - {'language': 'French', - 'lang_code': 'fr-FR', - 'whisper_lang_code': 'fr', - 'voice_id': 'Celine', - 'gender': 'Female', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'French', - 'lang_code': 'fr-FR', - 'whisper_lang_code': 'fr', - 'voice_id': 'Lea', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'Yes'}, - {'language': 'French', - 'lang_code': 'fr-FR', - 'whisper_lang_code': 'fr', - 'voice_id': 'Mathieu', - 'gender': 'Male', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'French (Canadian)', - 'lang_code': 'fr-CA', - 'whisper_lang_code': 'fr', - 'voice_id': 'Chantal', - 'gender': 'Female', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'French (Canadian)', - 'lang_code': 'fr-CA', - 'whisper_lang_code': 'fr', - 'voice_id': 'Gabrielle', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'No'}, - {'language': 'French (Canadian)', - 'lang_code': 'fr-CA', - 'whisper_lang_code': 'fr', - 'voice_id': 'Liam', - 'gender': 'Male', - 'neural': 'Yes', - 'standard': 'No'}, - {'language': 'German', - 'lang_code': 'de-DE', - 'whisper_lang_code': 'de', - 'voice_id': 'Marlene', - 'gender': 'Female', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'German', - 'lang_code': 'de-DE', - 'whisper_lang_code': 'de', - 'voice_id': 'Vicki', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'Yes'}, - {'language': 'German', - 'lang_code': 'de-DE', - 'whisper_lang_code': 'de', - 'voice_id': 'Hans', - 'gender': 'Male', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'German', - 'lang_code': 'de-DE', - 'whisper_lang_code': 'de', - 'voice_id': 'Daniel', - 'gender': 'Male', - 'neural': 'Yes', - 'standard': 'No'}, - {'language': 'German (Austrian)', - 'lang_code': 'de-AT', - 'whisper_lang_code': 'de', - 'voice_id': 'Hannah', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'No'}, - {'language': 'Hindi', - 'lang_code': 'hi-IN', - 'whisper_lang_code': 'hi', - 'voice_id': 'Aditi', - 'gender': 'Female', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'Hindi', - 'lang_code': 'hi-IN', - 'whisper_lang_code': 'hi', - 'voice_id': 'Kajal', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'No'}, - {'language': 'Icelandic', - 'lang_code': 'is-IS', - 'whisper_lang_code': 'is', - 'voice_id': 'Dora', - 'gender': 'Female', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'Icelandic', - 'lang_code': 'is-IS', - 'whisper_lang_code': 'is', - 'voice_id': 'Karl', - 'gender': 'Male', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'Italian', - 'lang_code': 'it-IT', - 'whisper_lang_code': 'it', - 'voice_id': 'Carla', - 'gender': 'Female', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'Italian', - 'lang_code': 'it-IT', - 'whisper_lang_code': 'it', - 'voice_id': 'Bianca', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'Yes'}, - {'language': 'Japanese', - 'lang_code': 'ja-JP', - 'whisper_lang_code': 'ja', - 'voice_id': 'Mizuki', - 'gender': 'Female', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'Japanese', - 'lang_code': 'ja-JP', - 'whisper_lang_code': 'ja', - 'voice_id': 'Takumi', - 'gender': 'Male', - 'neural': 'Yes', - 'standard': 'Yes'}, - {'language': 'Korean', - 'lang_code': 'ko-KR', - 'whisper_lang_code': 'ko', - 'voice_id': 'Seoyeon', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'Yes'}, - {'language': 'Norwegian', - 'lang_code': 'nb-NO', - 'whisper_lang_code': 'no', - 'voice_id': 'Liv', - 'gender': 'Female', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'Norwegian', - 'lang_code': 'nb-NO', - 'whisper_lang_code': 'no', - 'voice_id': 'Ida', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'No'}, - {'language': 'Polish', - 'lang_code': 'pl-PL', - 'whisper_lang_code': 'pl', - 'voice_id': 'Ewa', - 'gender': 'Female', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'Polish', - 'lang_code': 'pl-PL', - 'whisper_lang_code': 'pl', - 'voice_id': 'Maja', - 'gender': 'Female', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'Polish', - 'lang_code': 'pl-PL', - 'whisper_lang_code': 'pl', - 'voice_id': 'Jacek', - 'gender': 'Male', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'Polish', - 'lang_code': 'pl-PL', - 'whisper_lang_code': 'pl', - 'voice_id': 'Jan', - 'gender': 'Male', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'Polish', - 'lang_code': 'pl-PL', - 'whisper_lang_code': 'pl', - 'voice_id': 'Ola', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'No'}, - {'language': 'Portuguese (Brazilian)', - 'lang_code': 'pt-BR', - 'whisper_lang_code': 'pt', - 'voice_id': 'Camila', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'Yes'}, - {'language': 'Portuguese (Brazilian)', - 'lang_code': 'pt-BR', - 'whisper_lang_code': 'pt', - 'voice_id': 'Vitoria', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'Yes'}, - {'language': 'Portuguese (Brazilian)', - 'lang_code': 'pt-BR', - 'whisper_lang_code': 'pt', - 'voice_id': 'Ricardo', - 'gender': 'Male', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'Portuguese (European)', - 'lang_code': 'pt-PT', - 'whisper_lang_code': 'pt', - 'voice_id': 'Ines', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'Yes'}, - {'language': 'Portuguese (European)', - 'lang_code': 'pt-PT', - 'whisper_lang_code': 'pt', - 'voice_id': 'Cristiano', - 'gender': 'Male', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'Romanian', - 'lang_code': 'ro-RO', - 'whisper_lang_code': 'ro', - 'voice_id': 'Carmen', - 'gender': 'Female', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'Russian', - 'lang_code': 'ru-RU', - 'whisper_lang_code': 'ru', - 'voice_id': 'Tatyana', - 'gender': 'Female', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'Russian', - 'lang_code': 'ru-RU', - 'whisper_lang_code': 'ru', - 'voice_id': 'Maxim', - 'gender': 'Male', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'Spanish (European)', - 'lang_code': 'es-ES', - 'whisper_lang_code': 'es', - 'voice_id': 'Conchita', - 'gender': 'Female', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'Spanish (European)', - 'lang_code': 'es-ES', - 'whisper_lang_code': 'es', - 'voice_id': 'Lucia', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'Yes'}, - {'language': 'Spanish (European)', - 'lang_code': 'es-ES', - 'whisper_lang_code': 'es', - 'voice_id': 'Enrique', - 'gender': 'Male', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'Spanish (Mexican)', - 'lang_code': 'es-MX', - 'whisper_lang_code': 'es', - 'voice_id': 'Mia', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'Yes'}, - {'language': 'Spanish (US)', - 'lang_code': 'es-US', - 'whisper_lang_code': 'es', - 'voice_id': 'Lupe', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'Yes'}, - {'language': 'Spanish (US)', - 'lang_code': 'es-US', - 'whisper_lang_code': 'es', - 'voice_id': 'Penelope', - 'gender': 'Female', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'Spanish (US)', - 'lang_code': 'es-US', - 'whisper_lang_code': 'es', - 'voice_id': 'Miguel', - 'gender': 'Male', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'Spanish (US)', - 'lang_code': 'es-US', - 'whisper_lang_code': 'es', - 'voice_id': 'Pedro', - 'gender': 'Male', - 'neural': 'Yes', - 'standard': 'No'}, - {'language': 'Swedish', - 'lang_code': 'sv-SE', - 'whisper_lang_code': 'sv', - 'voice_id': 'Astrid', - 'gender': 'Female', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'Swedish', - 'lang_code': 'sv-SE', - 'whisper_lang_code': 'sv', - 'voice_id': 'Elin', - 'gender': 'Female', - 'neural': 'Yes', - 'standard': 'No'}, - {'language': 'Turkish', - 'lang_code': 'tr-TR', - 'whisper_lang_code': 'tr', - 'voice_id': 'Filiz', - 'gender': 'Female', - 'neural': 'No', - 'standard': 'Yes'}, - {'language': 'Welsh', - 'lang_code': 'cy-GB', - 'whisper_lang_code': 'cy', - 'voice_id': 'Gwyneth', - 'gender': 'Female', - 'neural': 'No', - 'standard': 'Yes'} - ] - - -# Run from the command-line -if __name__ == '__main__': - polly_voice_data = PollyVoiceData() - - voice_id, language_code, engine = polly_voice_data.get_voice('English (US)', 'Male') - print('English (US)', 'Male', voice_id, language_code, engine) - - voice_id, language_code, engine = polly_voice_data.get_voice('English (US)', 'Female') - print('English (US)', 'Female', voice_id, language_code, engine) - - voice_id, language_code, engine = polly_voice_data.get_voice('French', 'Female') - print('French', 'Female', voice_id, language_code, engine) - - voice_id, language_code, engine = polly_voice_data.get_voice('French', 'Male') - print('French', 'Male', voice_id, language_code, engine) - - voice_id, language_code, engine = polly_voice_data.get_voice('Japanese', 'Female') - print('Japanese', 'Female', voice_id, language_code, engine) - - voice_id, language_code, engine = polly_voice_data.get_voice('Japanese', 'Male') - print('Japanese', 'Male', voice_id, language_code, engine) - - voice_id, language_code, engine = polly_voice_data.get_voice('Hindi', 'Female') - print('Hindi', 'Female', voice_id, language_code, engine) - - voice_id, language_code, engine = polly_voice_data.get_voice('Hindi', 'Male') - print('Hindi', 'Male', voice_id, language_code, engine) - - whisper_lang_code = polly_voice_data.get_whisper_lang_code('English (US)') - print('English (US) whisper_lang_code:', whisper_lang_code) - - whisper_lang_code = polly_voice_data.get_whisper_lang_code('Chinese (Mandarin)') - print('Chinese (Mandarin) whisper_lang_code:', whisper_lang_code) - - whisper_lang_code = polly_voice_data.get_whisper_lang_code('Norwegian') - print('Norwegian whisper_lang_code:', whisper_lang_code) - - whisper_lang_code = polly_voice_data.get_whisper_lang_code('Dutch') - print('Dutch whisper_lang_code:', whisper_lang_code) - - whisper_lang_code = polly_voice_data.get_whisper_lang_code('Foo') - print('Foo whisper_lang_code:', whisper_lang_code) - - diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/antlr4/LL1Analyzer.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/antlr4/LL1Analyzer.py deleted file mode 100644 index 6b398fcd99db853b289eb57f5f0cbc1b880cf8ea..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/antlr4/LL1Analyzer.py +++ /dev/null @@ -1,172 +0,0 @@ -# -# Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. -# Use of this file is governed by the BSD 3-clause license that -# can be found in the LICENSE.txt file in the project root. -#/ -from antlr4.IntervalSet import IntervalSet -from antlr4.Token import Token -from antlr4.PredictionContext import PredictionContext, SingletonPredictionContext, PredictionContextFromRuleContext -from antlr4.RuleContext import RuleContext -from antlr4.atn.ATN import ATN -from antlr4.atn.ATNConfig import ATNConfig -from antlr4.atn.ATNState import ATNState, RuleStopState -from antlr4.atn.Transition import WildcardTransition, NotSetTransition, AbstractPredicateTransition, RuleTransition - - -class LL1Analyzer (object): - - #* Special value added to the lookahead sets to indicate that we hit - # a predicate during analysis if {@code seeThruPreds==false}. - #/ - HIT_PRED = Token.INVALID_TYPE - - def __init__(self, atn:ATN): - self.atn = atn - - #* - # Calculates the SLL(1) expected lookahead set for each outgoing transition - # of an {@link ATNState}. The returned array has one element for each - # outgoing transition in {@code s}. If the closure from transition - # i leads to a semantic predicate before matching a symbol, the - # element at index i of the result will be {@code null}. - # - # @param s the ATN state - # @return the expected symbols for each outgoing transition of {@code s}. - #/ - def getDecisionLookahead(self, s:ATNState): - if s is None: - return None - - count = len(s.transitions) - look = [] * count - for alt in range(0, count): - look[alt] = set() - lookBusy = set() - seeThruPreds = False # fail to get lookahead upon pred - self._LOOK(s.transition(alt).target, None, PredictionContext.EMPTY, - look[alt], lookBusy, set(), seeThruPreds, False) - # Wipe out lookahead for this alternative if we found nothing - # or we had a predicate when we !seeThruPreds - if len(look[alt])==0 or self.HIT_PRED in look[alt]: - look[alt] = None - return look - - #* - # Compute set of tokens that can follow {@code s} in the ATN in the - # specified {@code ctx}. - # - #

If {@code ctx} is {@code null} and the end of the rule containing - # {@code s} is reached, {@link Token#EPSILON} is added to the result set. - # If {@code ctx} is not {@code null} and the end of the outermost rule is - # reached, {@link Token#EOF} is added to the result set.

- # - # @param s the ATN state - # @param stopState the ATN state to stop at. This can be a - # {@link BlockEndState} to detect epsilon paths through a closure. - # @param ctx the complete parser context, or {@code null} if the context - # should be ignored - # - # @return The set of tokens that can follow {@code s} in the ATN in the - # specified {@code ctx}. - #/ - def LOOK(self, s:ATNState, stopState:ATNState=None, ctx:RuleContext=None): - r = IntervalSet() - seeThruPreds = True # ignore preds; get all lookahead - lookContext = PredictionContextFromRuleContext(s.atn, ctx) if ctx is not None else None - self._LOOK(s, stopState, lookContext, r, set(), set(), seeThruPreds, True) - return r - - #* - # Compute set of tokens that can follow {@code s} in the ATN in the - # specified {@code ctx}. - # - #

If {@code ctx} is {@code null} and {@code stopState} or the end of the - # rule containing {@code s} is reached, {@link Token#EPSILON} is added to - # the result set. If {@code ctx} is not {@code null} and {@code addEOF} is - # {@code true} and {@code stopState} or the end of the outermost rule is - # reached, {@link Token#EOF} is added to the result set.

- # - # @param s the ATN state. - # @param stopState the ATN state to stop at. This can be a - # {@link BlockEndState} to detect epsilon paths through a closure. - # @param ctx The outer context, or {@code null} if the outer context should - # not be used. - # @param look The result lookahead set. - # @param lookBusy A set used for preventing epsilon closures in the ATN - # from causing a stack overflow. Outside code should pass - # {@code new HashSet} for this argument. - # @param calledRuleStack A set used for preventing left recursion in the - # ATN from causing a stack overflow. Outside code should pass - # {@code new BitSet()} for this argument. - # @param seeThruPreds {@code true} to true semantic predicates as - # implicitly {@code true} and "see through them", otherwise {@code false} - # to treat semantic predicates as opaque and add {@link #HIT_PRED} to the - # result if one is encountered. - # @param addEOF Add {@link Token#EOF} to the result if the end of the - # outermost context is reached. This parameter has no effect if {@code ctx} - # is {@code null}. - #/ - def _LOOK(self, s:ATNState, stopState:ATNState , ctx:PredictionContext, look:IntervalSet, lookBusy:set, - calledRuleStack:set, seeThruPreds:bool, addEOF:bool): - c = ATNConfig(s, 0, ctx) - - if c in lookBusy: - return - lookBusy.add(c) - - if s == stopState: - if ctx is None: - look.addOne(Token.EPSILON) - return - elif ctx.isEmpty() and addEOF: - look.addOne(Token.EOF) - return - - if isinstance(s, RuleStopState ): - if ctx is None: - look.addOne(Token.EPSILON) - return - elif ctx.isEmpty() and addEOF: - look.addOne(Token.EOF) - return - - if ctx != PredictionContext.EMPTY: - # run thru all possible stack tops in ctx - for i in range(0, len(ctx)): - returnState = self.atn.states[ctx.getReturnState(i)] - removed = returnState.ruleIndex in calledRuleStack - try: - calledRuleStack.discard(returnState.ruleIndex) - self._LOOK(returnState, stopState, ctx.getParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF) - finally: - if removed: - calledRuleStack.add(returnState.ruleIndex) - return - - for t in s.transitions: - if type(t) == RuleTransition: - if t.target.ruleIndex in calledRuleStack: - continue - - newContext = SingletonPredictionContext.create(ctx, t.followState.stateNumber) - - try: - calledRuleStack.add(t.target.ruleIndex) - self._LOOK(t.target, stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) - finally: - calledRuleStack.remove(t.target.ruleIndex) - elif isinstance(t, AbstractPredicateTransition ): - if seeThruPreds: - self._LOOK(t.target, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) - else: - look.addOne(self.HIT_PRED) - elif t.isEpsilon: - self._LOOK(t.target, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) - elif type(t) == WildcardTransition: - look.addRange( range(Token.MIN_USER_TOKEN_TYPE, self.atn.maxTokenType + 1) ) - else: - set_ = t.label - if set_ is not None: - if isinstance(t, NotSetTransition): - set_ = set_.complement(Token.MIN_USER_TOKEN_TYPE, self.atn.maxTokenType) - look.addSet(set_) diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/click/_termui_impl.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/click/_termui_impl.py deleted file mode 100644 index 4b979bcc1edcb33feb9b385156492221d92253dc..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/click/_termui_impl.py +++ /dev/null @@ -1,717 +0,0 @@ -""" -This module contains implementations for the termui module. To keep the -import time of Click down, some infrequently used functionality is -placed in this module and only imported as needed. -""" -import contextlib -import math -import os -import sys -import time -import typing as t -from gettext import gettext as _ - -from ._compat import _default_text_stdout -from ._compat import CYGWIN -from ._compat import get_best_encoding -from ._compat import isatty -from ._compat import open_stream -from ._compat import strip_ansi -from ._compat import term_len -from ._compat import WIN -from .exceptions import ClickException -from .utils import echo - -V = t.TypeVar("V") - -if os.name == "nt": - BEFORE_BAR = "\r" - AFTER_BAR = "\n" -else: - BEFORE_BAR = "\r\033[?25l" - AFTER_BAR = "\033[?25h\n" - - -class ProgressBar(t.Generic[V]): - def __init__( - self, - iterable: t.Optional[t.Iterable[V]], - length: t.Optional[int] = None, - fill_char: str = "#", - empty_char: str = " ", - bar_template: str = "%(bar)s", - info_sep: str = " ", - show_eta: bool = True, - show_percent: t.Optional[bool] = None, - show_pos: bool = False, - item_show_func: t.Optional[t.Callable[[t.Optional[V]], t.Optional[str]]] = None, - label: t.Optional[str] = None, - file: t.Optional[t.TextIO] = None, - color: t.Optional[bool] = None, - update_min_steps: int = 1, - width: int = 30, - ) -> None: - self.fill_char = fill_char - self.empty_char = empty_char - self.bar_template = bar_template - self.info_sep = info_sep - self.show_eta = show_eta - self.show_percent = show_percent - self.show_pos = show_pos - self.item_show_func = item_show_func - self.label = label or "" - if file is None: - file = _default_text_stdout() - self.file = file - self.color = color - self.update_min_steps = update_min_steps - self._completed_intervals = 0 - self.width = width - self.autowidth = width == 0 - - if length is None: - from operator import length_hint - - length = length_hint(iterable, -1) - - if length == -1: - length = None - if iterable is None: - if length is None: - raise TypeError("iterable or length is required") - iterable = t.cast(t.Iterable[V], range(length)) - self.iter = iter(iterable) - self.length = length - self.pos = 0 - self.avg: t.List[float] = [] - self.start = self.last_eta = time.time() - self.eta_known = False - self.finished = False - self.max_width: t.Optional[int] = None - self.entered = False - self.current_item: t.Optional[V] = None - self.is_hidden = not isatty(self.file) - self._last_line: t.Optional[str] = None - - def __enter__(self) -> "ProgressBar": - self.entered = True - self.render_progress() - return self - - def __exit__(self, exc_type, exc_value, tb): # type: ignore - self.render_finish() - - def __iter__(self) -> t.Iterator[V]: - if not self.entered: - raise RuntimeError("You need to use progress bars in a with block.") - self.render_progress() - return self.generator() - - def __next__(self) -> V: - # Iteration is defined in terms of a generator function, - # returned by iter(self); use that to define next(). This works - # because `self.iter` is an iterable consumed by that generator, - # so it is re-entry safe. Calling `next(self.generator())` - # twice works and does "what you want". - return next(iter(self)) - - def render_finish(self) -> None: - if self.is_hidden: - return - self.file.write(AFTER_BAR) - self.file.flush() - - @property - def pct(self) -> float: - if self.finished: - return 1.0 - return min(self.pos / (float(self.length or 1) or 1), 1.0) - - @property - def time_per_iteration(self) -> float: - if not self.avg: - return 0.0 - return sum(self.avg) / float(len(self.avg)) - - @property - def eta(self) -> float: - if self.length is not None and not self.finished: - return self.time_per_iteration * (self.length - self.pos) - return 0.0 - - def format_eta(self) -> str: - if self.eta_known: - t = int(self.eta) - seconds = t % 60 - t //= 60 - minutes = t % 60 - t //= 60 - hours = t % 24 - t //= 24 - if t > 0: - return f"{t}d {hours:02}:{minutes:02}:{seconds:02}" - else: - return f"{hours:02}:{minutes:02}:{seconds:02}" - return "" - - def format_pos(self) -> str: - pos = str(self.pos) - if self.length is not None: - pos += f"/{self.length}" - return pos - - def format_pct(self) -> str: - return f"{int(self.pct * 100): 4}%"[1:] - - def format_bar(self) -> str: - if self.length is not None: - bar_length = int(self.pct * self.width) - bar = self.fill_char * bar_length - bar += self.empty_char * (self.width - bar_length) - elif self.finished: - bar = self.fill_char * self.width - else: - chars = list(self.empty_char * (self.width or 1)) - if self.time_per_iteration != 0: - chars[ - int( - (math.cos(self.pos * self.time_per_iteration) / 2.0 + 0.5) - * self.width - ) - ] = self.fill_char - bar = "".join(chars) - return bar - - def format_progress_line(self) -> str: - show_percent = self.show_percent - - info_bits = [] - if self.length is not None and show_percent is None: - show_percent = not self.show_pos - - if self.show_pos: - info_bits.append(self.format_pos()) - if show_percent: - info_bits.append(self.format_pct()) - if self.show_eta and self.eta_known and not self.finished: - info_bits.append(self.format_eta()) - if self.item_show_func is not None: - item_info = self.item_show_func(self.current_item) - if item_info is not None: - info_bits.append(item_info) - - return ( - self.bar_template - % { - "label": self.label, - "bar": self.format_bar(), - "info": self.info_sep.join(info_bits), - } - ).rstrip() - - def render_progress(self) -> None: - import shutil - - if self.is_hidden: - # Only output the label as it changes if the output is not a - # TTY. Use file=stderr if you expect to be piping stdout. - if self._last_line != self.label: - self._last_line = self.label - echo(self.label, file=self.file, color=self.color) - - return - - buf = [] - # Update width in case the terminal has been resized - if self.autowidth: - old_width = self.width - self.width = 0 - clutter_length = term_len(self.format_progress_line()) - new_width = max(0, shutil.get_terminal_size().columns - clutter_length) - if new_width < old_width: - buf.append(BEFORE_BAR) - buf.append(" " * self.max_width) # type: ignore - self.max_width = new_width - self.width = new_width - - clear_width = self.width - if self.max_width is not None: - clear_width = self.max_width - - buf.append(BEFORE_BAR) - line = self.format_progress_line() - line_len = term_len(line) - if self.max_width is None or self.max_width < line_len: - self.max_width = line_len - - buf.append(line) - buf.append(" " * (clear_width - line_len)) - line = "".join(buf) - # Render the line only if it changed. - - if line != self._last_line: - self._last_line = line - echo(line, file=self.file, color=self.color, nl=False) - self.file.flush() - - def make_step(self, n_steps: int) -> None: - self.pos += n_steps - if self.length is not None and self.pos >= self.length: - self.finished = True - - if (time.time() - self.last_eta) < 1.0: - return - - self.last_eta = time.time() - - # self.avg is a rolling list of length <= 7 of steps where steps are - # defined as time elapsed divided by the total progress through - # self.length. - if self.pos: - step = (time.time() - self.start) / self.pos - else: - step = time.time() - self.start - - self.avg = self.avg[-6:] + [step] - - self.eta_known = self.length is not None - - def update(self, n_steps: int, current_item: t.Optional[V] = None) -> None: - """Update the progress bar by advancing a specified number of - steps, and optionally set the ``current_item`` for this new - position. - - :param n_steps: Number of steps to advance. - :param current_item: Optional item to set as ``current_item`` - for the updated position. - - .. versionchanged:: 8.0 - Added the ``current_item`` optional parameter. - - .. versionchanged:: 8.0 - Only render when the number of steps meets the - ``update_min_steps`` threshold. - """ - if current_item is not None: - self.current_item = current_item - - self._completed_intervals += n_steps - - if self._completed_intervals >= self.update_min_steps: - self.make_step(self._completed_intervals) - self.render_progress() - self._completed_intervals = 0 - - def finish(self) -> None: - self.eta_known = False - self.current_item = None - self.finished = True - - def generator(self) -> t.Iterator[V]: - """Return a generator which yields the items added to the bar - during construction, and updates the progress bar *after* the - yielded block returns. - """ - # WARNING: the iterator interface for `ProgressBar` relies on - # this and only works because this is a simple generator which - # doesn't create or manage additional state. If this function - # changes, the impact should be evaluated both against - # `iter(bar)` and `next(bar)`. `next()` in particular may call - # `self.generator()` repeatedly, and this must remain safe in - # order for that interface to work. - if not self.entered: - raise RuntimeError("You need to use progress bars in a with block.") - - if self.is_hidden: - yield from self.iter - else: - for rv in self.iter: - self.current_item = rv - - # This allows show_item_func to be updated before the - # item is processed. Only trigger at the beginning of - # the update interval. - if self._completed_intervals == 0: - self.render_progress() - - yield rv - self.update(1) - - self.finish() - self.render_progress() - - -def pager(generator: t.Iterable[str], color: t.Optional[bool] = None) -> None: - """Decide what method to use for paging through text.""" - stdout = _default_text_stdout() - if not isatty(sys.stdin) or not isatty(stdout): - return _nullpager(stdout, generator, color) - pager_cmd = (os.environ.get("PAGER", None) or "").strip() - if pager_cmd: - if WIN: - return _tempfilepager(generator, pager_cmd, color) - return _pipepager(generator, pager_cmd, color) - if os.environ.get("TERM") in ("dumb", "emacs"): - return _nullpager(stdout, generator, color) - if WIN or sys.platform.startswith("os2"): - return _tempfilepager(generator, "more <", color) - if hasattr(os, "system") and os.system("(less) 2>/dev/null") == 0: - return _pipepager(generator, "less", color) - - import tempfile - - fd, filename = tempfile.mkstemp() - os.close(fd) - try: - if hasattr(os, "system") and os.system(f'more "{filename}"') == 0: - return _pipepager(generator, "more", color) - return _nullpager(stdout, generator, color) - finally: - os.unlink(filename) - - -def _pipepager(generator: t.Iterable[str], cmd: str, color: t.Optional[bool]) -> None: - """Page through text by feeding it to another program. Invoking a - pager through this might support colors. - """ - import subprocess - - env = dict(os.environ) - - # If we're piping to less we might support colors under the - # condition that - cmd_detail = cmd.rsplit("/", 1)[-1].split() - if color is None and cmd_detail[0] == "less": - less_flags = f"{os.environ.get('LESS', '')}{' '.join(cmd_detail[1:])}" - if not less_flags: - env["LESS"] = "-R" - color = True - elif "r" in less_flags or "R" in less_flags: - color = True - - c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, env=env) - stdin = t.cast(t.BinaryIO, c.stdin) - encoding = get_best_encoding(stdin) - try: - for text in generator: - if not color: - text = strip_ansi(text) - - stdin.write(text.encode(encoding, "replace")) - except (OSError, KeyboardInterrupt): - pass - else: - stdin.close() - - # Less doesn't respect ^C, but catches it for its own UI purposes (aborting - # search or other commands inside less). - # - # That means when the user hits ^C, the parent process (click) terminates, - # but less is still alive, paging the output and messing up the terminal. - # - # If the user wants to make the pager exit on ^C, they should set - # `LESS='-K'`. It's not our decision to make. - while True: - try: - c.wait() - except KeyboardInterrupt: - pass - else: - break - - -def _tempfilepager( - generator: t.Iterable[str], cmd: str, color: t.Optional[bool] -) -> None: - """Page through text by invoking a program on a temporary file.""" - import tempfile - - fd, filename = tempfile.mkstemp() - # TODO: This never terminates if the passed generator never terminates. - text = "".join(generator) - if not color: - text = strip_ansi(text) - encoding = get_best_encoding(sys.stdout) - with open_stream(filename, "wb")[0] as f: - f.write(text.encode(encoding)) - try: - os.system(f'{cmd} "{filename}"') - finally: - os.close(fd) - os.unlink(filename) - - -def _nullpager( - stream: t.TextIO, generator: t.Iterable[str], color: t.Optional[bool] -) -> None: - """Simply print unformatted text. This is the ultimate fallback.""" - for text in generator: - if not color: - text = strip_ansi(text) - stream.write(text) - - -class Editor: - def __init__( - self, - editor: t.Optional[str] = None, - env: t.Optional[t.Mapping[str, str]] = None, - require_save: bool = True, - extension: str = ".txt", - ) -> None: - self.editor = editor - self.env = env - self.require_save = require_save - self.extension = extension - - def get_editor(self) -> str: - if self.editor is not None: - return self.editor - for key in "VISUAL", "EDITOR": - rv = os.environ.get(key) - if rv: - return rv - if WIN: - return "notepad" - for editor in "sensible-editor", "vim", "nano": - if os.system(f"which {editor} >/dev/null 2>&1") == 0: - return editor - return "vi" - - def edit_file(self, filename: str) -> None: - import subprocess - - editor = self.get_editor() - environ: t.Optional[t.Dict[str, str]] = None - - if self.env: - environ = os.environ.copy() - environ.update(self.env) - - try: - c = subprocess.Popen(f'{editor} "{filename}"', env=environ, shell=True) - exit_code = c.wait() - if exit_code != 0: - raise ClickException( - _("{editor}: Editing failed").format(editor=editor) - ) - except OSError as e: - raise ClickException( - _("{editor}: Editing failed: {e}").format(editor=editor, e=e) - ) from e - - def edit(self, text: t.Optional[t.AnyStr]) -> t.Optional[t.AnyStr]: - import tempfile - - if not text: - data = b"" - elif isinstance(text, (bytes, bytearray)): - data = text - else: - if text and not text.endswith("\n"): - text += "\n" - - if WIN: - data = text.replace("\n", "\r\n").encode("utf-8-sig") - else: - data = text.encode("utf-8") - - fd, name = tempfile.mkstemp(prefix="editor-", suffix=self.extension) - f: t.BinaryIO - - try: - with os.fdopen(fd, "wb") as f: - f.write(data) - - # If the filesystem resolution is 1 second, like Mac OS - # 10.12 Extended, or 2 seconds, like FAT32, and the editor - # closes very fast, require_save can fail. Set the modified - # time to be 2 seconds in the past to work around this. - os.utime(name, (os.path.getatime(name), os.path.getmtime(name) - 2)) - # Depending on the resolution, the exact value might not be - # recorded, so get the new recorded value. - timestamp = os.path.getmtime(name) - - self.edit_file(name) - - if self.require_save and os.path.getmtime(name) == timestamp: - return None - - with open(name, "rb") as f: - rv = f.read() - - if isinstance(text, (bytes, bytearray)): - return rv - - return rv.decode("utf-8-sig").replace("\r\n", "\n") # type: ignore - finally: - os.unlink(name) - - -def open_url(url: str, wait: bool = False, locate: bool = False) -> int: - import subprocess - - def _unquote_file(url: str) -> str: - from urllib.parse import unquote - - if url.startswith("file://"): - url = unquote(url[7:]) - - return url - - if sys.platform == "darwin": - args = ["open"] - if wait: - args.append("-W") - if locate: - args.append("-R") - args.append(_unquote_file(url)) - null = open("/dev/null", "w") - try: - return subprocess.Popen(args, stderr=null).wait() - finally: - null.close() - elif WIN: - if locate: - url = _unquote_file(url.replace('"', "")) - args = f'explorer /select,"{url}"' - else: - url = url.replace('"', "") - wait_str = "/WAIT" if wait else "" - args = f'start {wait_str} "" "{url}"' - return os.system(args) - elif CYGWIN: - if locate: - url = os.path.dirname(_unquote_file(url).replace('"', "")) - args = f'cygstart "{url}"' - else: - url = url.replace('"', "") - wait_str = "-w" if wait else "" - args = f'cygstart {wait_str} "{url}"' - return os.system(args) - - try: - if locate: - url = os.path.dirname(_unquote_file(url)) or "." - else: - url = _unquote_file(url) - c = subprocess.Popen(["xdg-open", url]) - if wait: - return c.wait() - return 0 - except OSError: - if url.startswith(("http://", "https://")) and not locate and not wait: - import webbrowser - - webbrowser.open(url) - return 0 - return 1 - - -def _translate_ch_to_exc(ch: str) -> t.Optional[BaseException]: - if ch == "\x03": - raise KeyboardInterrupt() - - if ch == "\x04" and not WIN: # Unix-like, Ctrl+D - raise EOFError() - - if ch == "\x1a" and WIN: # Windows, Ctrl+Z - raise EOFError() - - return None - - -if WIN: - import msvcrt - - @contextlib.contextmanager - def raw_terminal() -> t.Iterator[int]: - yield -1 - - def getchar(echo: bool) -> str: - # The function `getch` will return a bytes object corresponding to - # the pressed character. Since Windows 10 build 1803, it will also - # return \x00 when called a second time after pressing a regular key. - # - # `getwch` does not share this probably-bugged behavior. Moreover, it - # returns a Unicode object by default, which is what we want. - # - # Either of these functions will return \x00 or \xe0 to indicate - # a special key, and you need to call the same function again to get - # the "rest" of the code. The fun part is that \u00e0 is - # "latin small letter a with grave", so if you type that on a French - # keyboard, you _also_ get a \xe0. - # E.g., consider the Up arrow. This returns \xe0 and then \x48. The - # resulting Unicode string reads as "a with grave" + "capital H". - # This is indistinguishable from when the user actually types - # "a with grave" and then "capital H". - # - # When \xe0 is returned, we assume it's part of a special-key sequence - # and call `getwch` again, but that means that when the user types - # the \u00e0 character, `getchar` doesn't return until a second - # character is typed. - # The alternative is returning immediately, but that would mess up - # cross-platform handling of arrow keys and others that start with - # \xe0. Another option is using `getch`, but then we can't reliably - # read non-ASCII characters, because return values of `getch` are - # limited to the current 8-bit codepage. - # - # Anyway, Click doesn't claim to do this Right(tm), and using `getwch` - # is doing the right thing in more situations than with `getch`. - func: t.Callable[[], str] - - if echo: - func = msvcrt.getwche # type: ignore - else: - func = msvcrt.getwch # type: ignore - - rv = func() - - if rv in ("\x00", "\xe0"): - # \x00 and \xe0 are control characters that indicate special key, - # see above. - rv += func() - - _translate_ch_to_exc(rv) - return rv - -else: - import tty - import termios - - @contextlib.contextmanager - def raw_terminal() -> t.Iterator[int]: - f: t.Optional[t.TextIO] - fd: int - - if not isatty(sys.stdin): - f = open("/dev/tty") - fd = f.fileno() - else: - fd = sys.stdin.fileno() - f = None - - try: - old_settings = termios.tcgetattr(fd) - - try: - tty.setraw(fd) - yield fd - finally: - termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) - sys.stdout.flush() - - if f is not None: - f.close() - except termios.error: - pass - - def getchar(echo: bool) -> str: - with raw_terminal() as fd: - ch = os.read(fd, 32).decode(get_best_encoding(sys.stdin), "replace") - - if echo and isatty(sys.stdout): - sys.stdout.write(ch) - - _translate_ch_to_exc(ch) - return ch diff --git a/spaces/avivdm1/AutoGPT/autogpt/speech/base.py b/spaces/avivdm1/AutoGPT/autogpt/speech/base.py deleted file mode 100644 index d74fa51be75b5078134c510b393a06deb0267b2a..0000000000000000000000000000000000000000 --- a/spaces/avivdm1/AutoGPT/autogpt/speech/base.py +++ /dev/null @@ -1,50 +0,0 @@ -"""Base class for all voice classes.""" -import abc -from threading import Lock - -from autogpt.config import AbstractSingleton - - -class VoiceBase(AbstractSingleton): - """ - Base class for all voice classes. - """ - - def __init__(self): - """ - Initialize the voice class. - """ - self._url = None - self._headers = None - self._api_key = None - self._voices = [] - self._mutex = Lock() - self._setup() - - def say(self, text: str, voice_index: int = 0) -> bool: - """ - Say the given text. - - Args: - text (str): The text to say. - voice_index (int): The index of the voice to use. - """ - with self._mutex: - return self._speech(text, voice_index) - - @abc.abstractmethod - def _setup(self) -> None: - """ - Setup the voices, API key, etc. - """ - pass - - @abc.abstractmethod - def _speech(self, text: str, voice_index: int = 0) -> bool: - """ - Play the given text. - - Args: - text (str): The text to play. - """ - pass diff --git a/spaces/awacke1/Bloom.Generative.Writer/generators/title_to_abstract.py b/spaces/awacke1/Bloom.Generative.Writer/generators/title_to_abstract.py deleted file mode 100644 index a5ff1dda8edc9a75e7befa4d8d7a16efe0722e67..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Bloom.Generative.Writer/generators/title_to_abstract.py +++ /dev/null @@ -1,5 +0,0 @@ - -from .model import model - -def title_to_abstract_generator(template): - return model('title', template) diff --git a/spaces/awacke1/Github-Create-Read-Update-Delete/app.py b/spaces/awacke1/Github-Create-Read-Update-Delete/app.py deleted file mode 100644 index e9f256549f71e2eb8bbab491cdb14477cad4bd3b..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Github-Create-Read-Update-Delete/app.py +++ /dev/null @@ -1,57 +0,0 @@ -#Write a streamlit program in short number of lines that has four buttons on a sidebar also with a textbox labeled Github URL. Create python code that performs the four functions with github : Pull & Read, Append and Write, Create a file on github, Delete a file on github. - -import streamlit as st - -st.markdown(""" - -https://pypi.org/project/github3api/ -github3api - -https://pypi.org/project/github-archive/ -# Install tool -pip3 install github-archive - -# Install locally -make install - -""") - - -def pull_files_from_github_url(url): - import os - - #client = GitHubAPI() - #client.get('/rate_limit')['resources']['core'] - - # Using GitPython library - # from git import Repo - - # Get the directory where the file will be downloaded - # dir_name = os.path.basename(url).split('.')[0] - - # Clone the repo - # - dir_name = 'c:\\users\\awacke1\\' - Repo.clone_from(url, dir_name) - - # Get the list of files - # repo = Repo(dir_name) - # return [item.a_path for item in repo.tree.traverse()] - -url = st.sidebar.text_input("GitHub URL") -url = 'https://github.com/AaronCWacker/Yggdrasil' - - -st.sidebar.title('GitHub Functions') - -if st.sidebar.button("Pull & Read"): - pull_files_from_github_url(url) - -if st.sidebar.button("Append & Write"): - pull_files_from_github_url(url) - -if st.sidebar.button("Create File"): - pull_files_from_github_url(url) - -if st.sidebar.button("Delete File"): - pull_files_from_github_url(url) diff --git a/spaces/awacke1/MistralGradioFast/README.md b/spaces/awacke1/MistralGradioFast/README.md deleted file mode 100644 index 60d82cb450ca9ad6e6c5f83cdcfd7fa545490a00..0000000000000000000000000000000000000000 --- a/spaces/awacke1/MistralGradioFast/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: MistralGradioFast -emoji: 👀 -colorFrom: red -colorTo: indigo -sdk: gradio -sdk_version: 3.47.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/Named-entity-resolution/app.py b/spaces/awacke1/Named-entity-resolution/app.py deleted file mode 100644 index 7fb67b2c343f6cdd0ccd377aaa3956f5c18cb23a..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Named-entity-resolution/app.py +++ /dev/null @@ -1,26 +0,0 @@ -import streamlit as st -import transformers - -# Load the pre-trained NER model -model = transformers.pipeline('ner', model='dslim/bert-base-NER') - -# Define the Streamlit app -def app(): - # Set the app title - st.title('Named Entity Recognition with HuggingFace') - - # Define the input text area - text = st.text_area('Enter some text:', value='', height=200) - - # Define the button to submit the text - if st.button('Submit'): - # Run the NER model on the input text - entities = model(text) - - # Display the named entities in the input text - for entity in entities: - st.write(f'{entity["entity"]}: {entity["word"]}') - -# Run the Streamlit app -if __name__ == '__main__': - app() diff --git a/spaces/awacke1/Streamlit.ChatWikiwriter.Multiplayer/README.md b/spaces/awacke1/Streamlit.ChatWikiwriter.Multiplayer/README.md deleted file mode 100644 index a61338d67504d00a18374442b1768c7d02eb2a52..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Streamlit.ChatWikiwriter.Multiplayer/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Streamlit.ChatWikiwriter.Multiplayer -emoji: ⚡ -colorFrom: green -colorTo: gray -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/StreamlitClipboardInteraction/app.py b/spaces/awacke1/StreamlitClipboardInteraction/app.py deleted file mode 100644 index 11e8fd22408a85bd542a83b7c1faf3dfce1bedb9..0000000000000000000000000000000000000000 --- a/spaces/awacke1/StreamlitClipboardInteraction/app.py +++ /dev/null @@ -1,22 +0,0 @@ -import streamlit as st -from bokeh.models.widgets import Button -from bokeh.models import CustomJS -from streamlit_bokeh_events import streamlit_bokeh_events -import pandas as pd - -df = pd.DataFrame({"x": [1, 2, 3, 4], "y": ["a", "b", "c", "d"]}) - -st.dataframe(df) - -copy_button = Button(label="Copy Dataframe as Text To Clipboard") -copy_button.js_on_event("button_click", CustomJS(args=dict(df=df.to_csv(sep='\t')), code=""" - navigator.clipboard.writeText(df); - """)) - -no_event = streamlit_bokeh_events( - copy_button, - events="GET_TEXT", - key="get_text", - refresh_on_update=True, - override_height=75, - debounce_time=0) \ No newline at end of file diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/nodes/misc/TextureCubeUVNode.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/nodes/misc/TextureCubeUVNode.js deleted file mode 100644 index e4f98189f819e82d8b8b36a72bc29afbdfbad6de..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/examples/js/nodes/misc/TextureCubeUVNode.js +++ /dev/null @@ -1,209 +0,0 @@ -/** - * @author sunag / http://www.sunag.com.br/ - */ - -import { TempNode } from '../core/TempNode.js'; -import { ConstNode } from '../core/ConstNode.js'; -import { StructNode } from '../core/StructNode.js'; -import { FunctionNode } from '../core/FunctionNode.js'; -import { ReflectNode } from '../accessors/ReflectNode.js'; -import { FloatNode } from '../inputs/FloatNode.js'; -import { BlinnExponentToRoughnessNode } from '../bsdfs/BlinnExponentToRoughnessNode.js'; - -function TextureCubeUVNode( uv, textureSize, blinnExponentToRoughness ) { - - TempNode.call( this, 'TextureCubeUVData' ); // TextureCubeUVData is type as StructNode - - this.uv = uv || new ReflectNode( ReflectNode.VECTOR ); - this.textureSize = textureSize || new FloatNode( 1024 ); - this.blinnExponentToRoughness = blinnExponentToRoughness || new BlinnExponentToRoughnessNode(); - -} - -TextureCubeUVNode.Nodes = ( function () { - - var TextureCubeUVData = new StructNode( [ - "struct TextureCubeUVData {", - " vec2 uv_10;", - " vec2 uv_20;", - " float t;", - "}" - ].join( "\n" ) ); - - var getFaceFromDirection = new FunctionNode( [ - "int getFaceFromDirection(vec3 direction) {", - " vec3 absDirection = abs(direction);", - " int face = -1;", - " if( absDirection.x > absDirection.z ) {", - " if(absDirection.x > absDirection.y )", - " face = direction.x > 0.0 ? 0 : 3;", - " else", - " face = direction.y > 0.0 ? 1 : 4;", - " }", - " else {", - " if(absDirection.z > absDirection.y )", - " face = direction.z > 0.0 ? 2 : 5;", - " else", - " face = direction.y > 0.0 ? 1 : 4;", - " }", - " return face;", - "}" - ].join( "\n" ) ); - - var cubeUV_maxLods1 = new ConstNode( "#define cubeUV_maxLods1 ( log2( cubeUV_textureSize * 0.25 ) - 1.0 )" ); - var cubeUV_rangeClamp = new ConstNode( "#define cubeUV_rangeClamp ( exp2( ( 6.0 - 1.0 ) * 2.0 ) )" ); - - var MipLevelInfo = new FunctionNode( [ - "vec2 MipLevelInfo( vec3 vec, float roughnessLevel, float roughness, in float cubeUV_textureSize ) {", - " float scale = exp2(cubeUV_maxLods1 - roughnessLevel);", - " float dxRoughness = dFdx(roughness);", - " float dyRoughness = dFdy(roughness);", - " vec3 dx = dFdx( vec * scale * dxRoughness );", - " vec3 dy = dFdy( vec * scale * dyRoughness );", - " float d = max( dot( dx, dx ), dot( dy, dy ) );", - // Clamp the value to the max mip level counts. hard coded to 6 mips" - " d = clamp(d, 1.0, cubeUV_rangeClamp);", - " float mipLevel = 0.5 * log2(d);", - " return vec2(floor(mipLevel), fract(mipLevel));", - "}" - ].join( "\n" ), [ cubeUV_maxLods1, cubeUV_rangeClamp ], { derivatives: true } ); - - var cubeUV_maxLods2 = new ConstNode( "#define cubeUV_maxLods2 ( log2( cubeUV_textureSize * 0.25 ) - 2.0 )" ); - var cubeUV_rcpTextureSize = new ConstNode( "#define cubeUV_rcpTextureSize ( 1.0 / cubeUV_textureSize )" ); - - var getCubeUV = new FunctionNode( [ - "vec2 getCubeUV( vec3 direction, float roughnessLevel, float mipLevel, in float cubeUV_textureSize ) {", - " mipLevel = roughnessLevel > cubeUV_maxLods2 - 3.0 ? 0.0 : mipLevel;", - " float a = 16.0 * cubeUV_rcpTextureSize;", - "", - " vec2 exp2_packed = exp2( vec2( roughnessLevel, mipLevel ) );", - " vec2 rcp_exp2_packed = vec2( 1.0 ) / exp2_packed;", - // float powScale = exp2(roughnessLevel + mipLevel);" - " float powScale = exp2_packed.x * exp2_packed.y;", - // float scale = 1.0 / exp2(roughnessLevel + 2.0 + mipLevel);" - " float scale = rcp_exp2_packed.x * rcp_exp2_packed.y * 0.25;", - // float mipOffset = 0.75*(1.0 - 1.0/exp2(mipLevel))/exp2(roughnessLevel);" - " float mipOffset = 0.75*(1.0 - rcp_exp2_packed.y) * rcp_exp2_packed.x;", - "", - " bool bRes = mipLevel == 0.0;", - " scale = bRes && (scale < a) ? a : scale;", - "", - " vec3 r;", - " vec2 offset;", - " int face = getFaceFromDirection(direction);", - "", - " float rcpPowScale = 1.0 / powScale;", - "", - " if( face == 0) {", - " r = vec3(direction.x, -direction.z, direction.y);", - " offset = vec2(0.0+mipOffset,0.75 * rcpPowScale);", - " offset.y = bRes && (offset.y < 2.0*a) ? a : offset.y;", - " }", - " else if( face == 1) {", - " r = vec3(direction.y, direction.x, direction.z);", - " offset = vec2(scale+mipOffset, 0.75 * rcpPowScale);", - " offset.y = bRes && (offset.y < 2.0*a) ? a : offset.y;", - " }", - " else if( face == 2) {", - " r = vec3(direction.z, direction.x, direction.y);", - " offset = vec2(2.0*scale+mipOffset, 0.75 * rcpPowScale);", - " offset.y = bRes && (offset.y < 2.0*a) ? a : offset.y;", - " }", - " else if( face == 3) {", - " r = vec3(direction.x, direction.z, direction.y);", - " offset = vec2(0.0+mipOffset,0.5 * rcpPowScale);", - " offset.y = bRes && (offset.y < 2.0*a) ? 0.0 : offset.y;", - " }", - " else if( face == 4) {", - " r = vec3(direction.y, direction.x, -direction.z);", - " offset = vec2(scale+mipOffset, 0.5 * rcpPowScale);", - " offset.y = bRes && (offset.y < 2.0*a) ? 0.0 : offset.y;", - " }", - " else {", - " r = vec3(direction.z, -direction.x, direction.y);", - " offset = vec2(2.0*scale+mipOffset, 0.5 * rcpPowScale);", - " offset.y = bRes && (offset.y < 2.0*a) ? 0.0 : offset.y;", - " }", - " r = normalize(r);", - " float texelOffset = 0.5 * cubeUV_rcpTextureSize;", - " vec2 s = ( r.yz / abs( r.x ) + vec2( 1.0 ) ) * 0.5;", - " vec2 base = offset + vec2( texelOffset );", - " return base + s * ( scale - 2.0 * texelOffset );", - "}" - ].join( "\n" ), [ cubeUV_maxLods2, cubeUV_rcpTextureSize, getFaceFromDirection ] ); - - var cubeUV_maxLods3 = new ConstNode( "#define cubeUV_maxLods3 ( log2( cubeUV_textureSize * 0.25 ) - 3.0 )" ); - - var textureCubeUV = new FunctionNode( [ - "TextureCubeUVData textureCubeUV( vec3 reflectedDirection, float roughness, in float cubeUV_textureSize ) {", - " float roughnessVal = roughness * cubeUV_maxLods3;", - " float r1 = floor(roughnessVal);", - " float r2 = r1 + 1.0;", - " float t = fract(roughnessVal);", - " vec2 mipInfo = MipLevelInfo(reflectedDirection, r1, roughness, cubeUV_textureSize);", - " float s = mipInfo.y;", - " float level0 = mipInfo.x;", - " float level1 = level0 + 1.0;", - " level1 = level1 > 5.0 ? 5.0 : level1;", - "", - // round to nearest mipmap if we are not interpolating." - " level0 += min( floor( s + 0.5 ), 5.0 );", - "", - // Tri linear interpolation." - " vec2 uv_10 = getCubeUV(reflectedDirection, r1, level0, cubeUV_textureSize);", - " vec2 uv_20 = getCubeUV(reflectedDirection, r2, level0, cubeUV_textureSize);", - "", - " return TextureCubeUVData(uv_10, uv_20, t);", - "}" - ].join( "\n" ), [ TextureCubeUVData, cubeUV_maxLods3, MipLevelInfo, getCubeUV ] ); - - return { - TextureCubeUVData: TextureCubeUVData, - textureCubeUV: textureCubeUV - }; - -} )(); - -TextureCubeUVNode.prototype = Object.create( TempNode.prototype ); -TextureCubeUVNode.prototype.constructor = TextureCubeUVNode; -TextureCubeUVNode.prototype.nodeType = "TextureCubeUV"; - -TextureCubeUVNode.prototype.generate = function ( builder, output ) { - - if ( builder.isShader( 'fragment' ) ) { - - var textureCubeUV = builder.include( TextureCubeUVNode.Nodes.textureCubeUV ); - - return builder.format( textureCubeUV + '( ' + this.uv.build( builder, 'v3' ) + ', ' + - this.blinnExponentToRoughness.build( builder, 'f' ) + ', ' + - this.textureSize.build( builder, 'f' ) + ' )', this.getType( builder ), output ); - - } else { - - console.warn( "THREE.TextureCubeUVNode is not compatible with " + builder.shader + " shader." ); - - return builder.format( 'vec4( 0.0 )', this.getType( builder ), output ); - - } - -}; - -TextureCubeUVNode.prototype.toJSON = function ( meta ) { - - var data = this.getJSONNode( meta ); - - if ( ! data ) { - - data = this.createJSONNode( meta ); - - data.uv = this.uv.toJSON( meta ).uuid; - data.textureSize = this.textureSize.toJSON( meta ).uuid; - data.blinnExponentToRoughness = this.blinnExponentToRoughness.toJSON( meta ).uuid; - - } - - return data; - -}; - -export { TextureCubeUVNode }; diff --git a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/fog_pars_vertex.glsl.js b/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/fog_pars_vertex.glsl.js deleted file mode 100644 index 284d460f48543b3b1b192f9f28f46f0882ab7b11..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/fog_pars_vertex.glsl.js +++ /dev/null @@ -1,7 +0,0 @@ -export default /* glsl */` -#ifdef USE_FOG - - varying float fogDepth; - -#endif -`; diff --git a/spaces/beihai/GFPGAN-V1.3-whole-image/.history/app_20220327092728.py b/spaces/beihai/GFPGAN-V1.3-whole-image/.history/app_20220327092728.py deleted file mode 100644 index dfc311232833ab0cc1dc8d7bc111303f1ff3bbea..0000000000000000000000000000000000000000 --- a/spaces/beihai/GFPGAN-V1.3-whole-image/.history/app_20220327092728.py +++ /dev/null @@ -1,63 +0,0 @@ -import os -#os.system("pip install gfpgan") - -#os.system("pip freeze") -#os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v0.2.0/GFPGANCleanv1-NoCE-C2.pth -P .") -import random -import gradio as gr -from PIL import Image -import torch -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/a/ab/Abraham_Lincoln_O-77_matte_collodion_print.jpg/1024px-Abraham_Lincoln_O-77_matte_collodion_print.jpg', 'lincoln.jpg') -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/5/50/Albert_Einstein_%28Nobel%29.png', 'einstein.png') -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/9/9d/Thomas_Edison2.jpg/1024px-Thomas_Edison2.jpg', 'edison.jpg') -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/a/a9/Henry_Ford_1888.jpg/1024px-Henry_Ford_1888.jpg', 'Henry.jpg') -# torch.hub.download_url_to_file('https://upload.wikimedia.org/wikipedia/commons/thumb/0/06/Frida_Kahlo%2C_by_Guillermo_Kahlo.jpg/800px-Frida_Kahlo%2C_by_Guillermo_Kahlo.jpg', 'Frida.jpg') - - -import cv2 -import glob -import numpy as np -from basicsr.utils import imwrite -from gfpgan import GFPGANer - -bg_upsampler = None - - - -# set up GFPGAN restorer -restorer = GFPGANer( - model_path='experiments/pretrained_models/GFPGANv1.3.pth', - upscale=2, - arch='clean', - channel_multiplier=2, - bg_upsampler=bg_upsampler) - - -def inference(img): - input_img = cv2.imread(img, cv2.IMREAD_COLOR) - cropped_faces, restored_faces, restored_img = restorer.enhance( - input_img, has_aligned=False, only_center_face=False, paste_back=True) - - #return Image.fromarray(restored_faces[0][:,:,::-1]) - return Image.fromarray(restored_img[:, :, ::-1]) - -title = "让美好回忆更清晰" -description = "上传老照片,点击Submit,稍等片刻,右侧Output框将照片另存为即可。" -article = "

| Github Repo

visitor badge
" -gr.Interface( - inference, - [gr.inputs.Image(type="filepath", label="Input")], - gr.outputs.Image(type="pil", label="Output"), - title=title, - description=description, - article=article, - examples=[ - ['lincoln.jpg'], - ['einstein.png'], - ['edison.jpg'], - ['Henry.jpg'], - ['Frida.jpg'] - ] - ).launch(enable_queue=True,cache_examples=True,share=True) - - diff --git a/spaces/beihai/PDF-Table-Extractor/.history/app_20220620160828.py b/spaces/beihai/PDF-Table-Extractor/.history/app_20220620160828.py deleted file mode 100644 index e5638ae2f7ce8d334984a3f6c44bb36bd3e2e77e..0000000000000000000000000000000000000000 --- a/spaces/beihai/PDF-Table-Extractor/.history/app_20220620160828.py +++ /dev/null @@ -1,42 +0,0 @@ -#-*- coding : utf-8-*- -import pandas as pd -import os,base64,subprocess -os.system("apt-get install -y ghostscript") -import streamlit as st - -from subprocess import STDOUT #os process manipuation - -@st.cache -def gh(): - """install ghostscript on the linux machine""" - proc = subprocess.Popen('apt-get install -y ghostscript', shell=True, stdin=None, stdout=open(os.devnull,"wb"), stderr=STDOUT, executable="/bin/bash") - proc.wait() - -gh() - -import camelot as cam - -st.title("PDF Table Extractor") - -input_pdf = st.file_uploader(label = "", type = 'pdf') - -page_number = st.text_input("请填写表格所在PDF页码,eg: 3", value = 1) - -if input_pdf is not None: - # byte object into a PDF file - with open("input.pdf", "wb") as f: - base64_pdf = base64.b64encode(input_pdf.read()).decode('utf-8') - f.write(base64.b64decode(base64_pdf)) - f.close() - - # read the pdf and parse it using stream - tables = cam.read_pdf("input.pdf", pages=page_number) - result = pd.ExcelWriter('result.xlsx', engine='xlsxwriter') - tables[0].to_excel(result,index=False) - # for i in range(0,len(tables)): - # table = tables[i].df - # sheetname = str(i) - # table.to_excel(result, sheetname,index=False) - - with open('result.xlsx','rb') as f: - st.download_button('提取完成,点击下载!', f,file_name='result.xlsx',mime="application/vnd.ms-excel") \ No newline at end of file diff --git a/spaces/belectron/Seen-Zan5/README.md b/spaces/belectron/Seen-Zan5/README.md deleted file mode 100644 index 1f74bcbc49522e49429a3925abbd1d5050460077..0000000000000000000000000000000000000000 --- a/spaces/belectron/Seen-Zan5/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Seen Zan5 -emoji: ⚡ -colorFrom: red -colorTo: green -sdk: streamlit -sdk_version: 1.15.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/bhanuprasad3245/mygenAIchatbot/app.py b/spaces/bhanuprasad3245/mygenAIchatbot/app.py deleted file mode 100644 index 5da3f3c9d079994a8bc59cd86ed5a8ba0c0db5ee..0000000000000000000000000000000000000000 --- a/spaces/bhanuprasad3245/mygenAIchatbot/app.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -import gradio as gr -from langchain.chat_models import ChatOpenAI -from langchain import LLMChain, PromptTemplate -from langchain.memory import ConversationBufferMemory - -OPENAI_API_KEY=os.getenv('OPENAI_API_KEY') - -template = """Meet sanju, your youthful and witty personal assistant! At 21 years old, he's full of energy and always eager to help. sanju's goal is to assist you with any questions or problems you might have. his enthusiasm shines through in every response, making interactions with his enjoyable and engaging. -{chat_history} -User: {user_message} -Chatbot:""" - -prompt = PromptTemplate( - input_variables=["chat_history", "user_message"], template=template -) - -memory = ConversationBufferMemory(memory_key="chat_history") - -llm_chain = LLMChain( - llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"), - prompt=prompt, - verbose=True, - memory=memory, -) - -def get_text_response(user_message,history): - response = llm_chain.predict(user_message = user_message) - return response - -demo = gr.ChatInterface(get_text_response) - -if __name__ == "__main__": - demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`. diff --git a/spaces/bioriAsaeru/text-to-voice/Advanced SystemCare Pro Crack 13.2.0 Keygen Full Download 2020 The Best PC Optimization Tool for Windows.md b/spaces/bioriAsaeru/text-to-voice/Advanced SystemCare Pro Crack 13.2.0 Keygen Full Download 2020 The Best PC Optimization Tool for Windows.md deleted file mode 100644 index 813ed6576eb8c2a64084b05f9c9f6240245b3e37..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Advanced SystemCare Pro Crack 13.2.0 Keygen Full Download 2020 The Best PC Optimization Tool for Windows.md +++ /dev/null @@ -1,6 +0,0 @@ -

Advanced SystemCare Pro Crack 13.2.0 Keygen Full Download 2020


Download File ✶✶✶ https://urloso.com/2uyPxa



- - aaccfb2cb3
-
-
-

diff --git a/spaces/bioriAsaeru/text-to-voice/Eyewitness Movie Free Download Hd.md b/spaces/bioriAsaeru/text-to-voice/Eyewitness Movie Free Download Hd.md deleted file mode 100644 index 43288ec26c10f06e3041aabdafe707c18b9825e3..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Eyewitness Movie Free Download Hd.md +++ /dev/null @@ -1,7 +0,0 @@ -
-

Recallingthis day in Something Like an Autobiography, Kurosawa explains the movieto them. The explanation is reprinted in the booklet that comes with the newCriterion DVD of "Rashomon." Two of the assistants are satisfied withhis explanation, but the third leaves looking puzzled. What he doesn'tunderstand is that while there is an explanation of the film's foureyewitness accounts of a murder, there is not a solution.

-

Eyewitness movie free download hd


Download ››››› https://urloso.com/2uyOP6



-

How should MB handle Internet-only releases? "Alternate Dimension Series 1" by "Steve Coleman and Five Elements" =82845 is available as a free-for-download CD release on the artists' home page -base.com/. Here are the issues:

-

download ?search=9 unlimited Movies and videos Download Here.?search=9 Hd,3gp. mp4 320p and More Videos You Can Download Easyly. tamilrockers and movierulz, tamilgun, filmywap, and pagalworld videos and Movies download.

aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/bradarrML/diffuse-the-rest/build/_app/immutable/chunks/index-032ac624.js b/spaces/bradarrML/diffuse-the-rest/build/_app/immutable/chunks/index-032ac624.js deleted file mode 100644 index 85296fbcdf93ad959ffc0963af842b8640cbb27e..0000000000000000000000000000000000000000 --- a/spaces/bradarrML/diffuse-the-rest/build/_app/immutable/chunks/index-032ac624.js +++ /dev/null @@ -1 +0,0 @@ -function N(){}function H(t,n){for(const e in n)t[e]=n[e];return t}function P(t){return t()}function T(){return Object.create(null)}function p(t){t.forEach(P)}function I(t){return typeof t=="function"}function ot(t,n){return t!=t?n==n:t!==n||t&&typeof t=="object"||typeof t=="function"}let g;function st(t,n){return g||(g=document.createElement("a")),g.href=n,t===g.href}function G(t){return Object.keys(t).length===0}function J(t,...n){if(t==null)return N;const e=t.subscribe(...n);return e.unsubscribe?()=>e.unsubscribe():e}function at(t,n,e){t.$$.on_destroy.push(J(n,e))}function ft(t,n,e,i){if(t){const r=B(t,n,e,i);return t[0](r)}}function B(t,n,e,i){return t[1]&&i?H(e.ctx.slice(),t[1](i(n))):e.ctx}function dt(t,n,e,i){if(t[2]&&i){const r=t[2](i(e));if(n.dirty===void 0)return r;if(typeof r=="object"){const s=[],c=Math.max(n.dirty.length,r.length);for(let o=0;o32){const n=[],e=t.ctx.length/32;for(let i=0;i>1);e(r)<=i?t=r+1:n=r}return t}function W(t){if(t.hydrate_init)return;t.hydrate_init=!0;let n=t.childNodes;if(t.nodeName==="HEAD"){const l=[];for(let u=0;u0&&n[e[r]].claim_order<=u?r+1:R(1,r,y=>n[e[y]].claim_order,u))-1;i[l]=e[f]+1;const a=f+1;e[a]=l,r=Math.max(a,r)}const s=[],c=[];let o=n.length-1;for(let l=e[r]+1;l!=0;l=i[l-1]){for(s.push(n[l-1]);o>=l;o--)c.push(n[o]);o--}for(;o>=0;o--)c.push(n[o]);s.reverse(),c.sort((l,u)=>l.claim_order-u.claim_order);for(let l=0,u=0;l=s[u].claim_order;)u++;const f=ut.removeEventListener(n,e,i)}function bt(t){return function(n){return n.preventDefault(),t.call(this,n)}}function xt(t){return function(n){return n.stopPropagation(),t.call(this,n)}}function vt(t,n,e){e==null?t.removeAttribute(n):t.getAttribute(n)!==e&&t.setAttribute(n,e)}function Z(t){return Array.from(t.childNodes)}function tt(t){t.claim_info===void 0&&(t.claim_info={last_index:0,total_claimed:0})}function D(t,n,e,i,r=!1){tt(t);const s=(()=>{for(let c=t.claim_info.last_index;c=0;c--){const o=t[c];if(n(o)){const l=e(o);return l===void 0?t.splice(c,1):t[c]=l,r?l===void 0&&t.claim_info.last_index--:t.claim_info.last_index=c,o}}return i()})();return s.claim_order=t.claim_info.total_claimed,t.claim_info.total_claimed+=1,s}function L(t,n,e,i){return D(t,r=>r.nodeName===n,r=>{const s=[];for(let c=0;cr.removeAttribute(c))},()=>i(n))}function wt(t,n,e){return L(t,n,e,X)}function Et(t,n,e){return L(t,n,e,Y)}function nt(t,n){return D(t,e=>e.nodeType===3,e=>{const i=""+n;if(e.data.startsWith(i)){if(e.data.length!==i.length)return e.splitText(i.length)}else e.data=i},()=>S(n),!0)}function $t(t){return nt(t," ")}function At(t,n){n=""+n,t.wholeText!==n&&(t.data=n)}function Nt(t,n,e,i){e===null?t.style.removeProperty(n):t.style.setProperty(n,e,i?"important":"")}function et(t,n,{bubbles:e=!1,cancelable:i=!1}={}){const r=document.createEvent("CustomEvent");return r.initCustomEvent(t,e,i,n),r}function St(t,n=document.body){return Array.from(n.querySelectorAll(t))}let m;function h(t){m=t}function j(){if(!m)throw new Error("Function called outside component initialization");return m}function jt(t){j().$$.on_mount.push(t)}function Ct(t){j().$$.after_update.push(t)}function kt(){const t=j();return(n,e,{cancelable:i=!1}={})=>{const r=t.$$.callbacks[n];if(r){const s=et(n,e,{cancelable:i});return r.slice().forEach(c=>{c.call(t,s)}),!s.defaultPrevented}return!0}}const _=[],q=[],x=[],M=[],O=Promise.resolve();let $=!1;function z(){$||($=!0,O.then(F))}function Tt(){return z(),O}function A(t){x.push(t)}const E=new Set;let b=0;function F(){const t=m;do{for(;b<_.length;){const n=_[b];b++,h(n),it(n.$$)}for(h(null),_.length=0,b=0;q.length;)q.pop()();for(let n=0;n{v.delete(t),i&&(e&&t.d(1),i())}),t.o(n)}else i&&i()}const Bt=typeof window<"u"?window:typeof globalThis<"u"?globalThis:global;function Dt(t){t&&t.c()}function Lt(t,n){t&&t.l(n)}function ct(t,n,e,i){const{fragment:r,on_mount:s,on_destroy:c,after_update:o}=t.$$;r&&r.m(n,e),i||A(()=>{const l=s.map(P).filter(I);c?c.push(...l):p(l),t.$$.on_mount=[]}),o.forEach(A)}function lt(t,n){const e=t.$$;e.fragment!==null&&(p(e.on_destroy),e.fragment&&e.fragment.d(n),e.on_destroy=e.fragment=null,e.ctx=[])}function ut(t,n){t.$$.dirty[0]===-1&&(_.push(t),z(),t.$$.dirty.fill(0)),t.$$.dirty[n/31|0]|=1<{const k=C.length?C[0]:y;return u.ctx&&r(u.ctx[a],u.ctx[a]=k)&&(!u.skip_bound&&u.bound[a]&&u.bound[a](k),f&&ut(t,a)),y}):[],u.update(),f=!0,p(u.before_update),u.fragment=i?i(u.ctx):!1,n.target){if(n.hydrate){K();const a=Z(n.target);u.fragment&&u.fragment.l(a),a.forEach(V)}else u.fragment&&u.fragment.c();n.intro&&rt(t.$$.fragment),ct(t,n.target,n.anchor,n.customElement),Q(),F()}h(l)}class zt{$destroy(){lt(this,1),this.$destroy=N}$on(n,e){const i=this.$$.callbacks[n]||(this.$$.callbacks[n]=[]);return i.push(e),()=>{const r=i.indexOf(e);r!==-1&&i.splice(r,1)}}$set(n){this.$$set&&!G(n)&&(this.$$.skip_bound=!0,this.$$set(n),this.$$.skip_bound=!1)}}export{N as A,ft as B,_t as C,ht as D,dt as E,U as F,at as G,Y as H,Et as I,kt as J,gt as K,St as L,st as M,xt as N,bt as O,p as P,Bt as Q,A as R,zt as S,q as T,pt as a,mt as b,$t as c,Mt as d,yt as e,rt as f,qt as g,V as h,Ot as i,Ct as j,X as k,wt as l,Z as m,vt as n,jt as o,Nt as p,S as q,nt as r,ot as s,Pt as t,At as u,Dt as v,Lt as w,ct as x,lt as y,Tt as z}; diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/evaluation/__init__.py b/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/evaluation/__init__.py deleted file mode 100644 index e5ae1f20cdc822ebf3c870f1289a0ad210c57ae7..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/evaluation/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -from .evaluator import DensePoseCOCOEvaluator diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/doc/TOOL_QUERY_DB.md b/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/doc/TOOL_QUERY_DB.md deleted file mode 100644 index b0a764b8740597c6af634127b80b53d28913726f..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/doc/TOOL_QUERY_DB.md +++ /dev/null @@ -1,105 +0,0 @@ - -# Query Dataset - -`query_db` is a tool to print or visualize DensePose data from a dataset. -It has two modes: `print` and `show` to output dataset entries to standard -output or to visualize them on images. - -## Print Mode - -The general command form is: -```bash -python query_db.py print [-h] [-v] [--max-entries N] -``` - -There are two mandatory arguments: - - ``, DensePose dataset specification, from which to select - the entries (e.g. `densepose_coco_2014_train`). - - ``, dataset entry selector which can be a single specification, - or a comma-separated list of specifications of the form - `field[:type]=value` for exact match with the value - or `field[:type]=min-max` for a range of values - -One can additionally limit the maximum number of entries to output -by providing `--max-entries` argument. - -Examples: - -1. Output at most 10 first entries from the `densepose_coco_2014_train` dataset: -```bash -python query_db.py print densepose_coco_2014_train \* --max-entries 10 -v -``` - -2. Output all entries with `file_name` equal to `COCO_train2014_000000000036.jpg`: -```bash -python query_db.py print densepose_coco_2014_train file_name=COCO_train2014_000000000036.jpg -v -``` - -3. Output all entries with `image_id` between 36 and 156: -```bash -python query_db.py print densepose_coco_2014_train image_id:int=36-156 -v -``` - -## Visualization Mode - -The general command form is: -```bash -python query_db.py show [-h] [-v] [--max-entries N] [--output ] -``` - -There are three mandatory arguments: - - ``, DensePose dataset specification, from which to select - the entries (e.g. `densepose_coco_2014_train`). - - ``, dataset entry selector which can be a single specification, - or a comma-separated list of specifications of the form - `field[:type]=value` for exact match with the value - or `field[:type]=min-max` for a range of values - - ``, visualizations specifier; currently available visualizations are: - * `bbox` - bounding boxes of annotated persons; - * `dp_i` - annotated points colored according to the containing part; - * `dp_pts` - annotated points in green color; - * `dp_segm` - segmentation masks for annotated persons; - * `dp_u` - annotated points colored according to their U coordinate in part parameterization; - * `dp_v` - annotated points colored according to their V coordinate in part parameterization; - -One can additionally provide one of the two optional arguments: - - `--max_entries` to limit the maximum number of entries to visualize - - `--output` to provide visualization file name template, which defaults - to `output.png`. To distinguish file names for different dataset - entries, the tool appends 1-based entry index to the output file name, - e.g. output.0001.png, output.0002.png, etc. - -The following examples show how to output different visualizations for image with `id = 322` -from `densepose_coco_2014_train` dataset: - -1. Show bounding box and segmentation: -```bash -python query_db.py show densepose_coco_2014_train image_id:int=322 bbox,dp_segm -v -``` -![Bounding Box + Segmentation Visualization](images/vis_bbox_dp_segm.jpg) - -2. Show bounding box and points colored according to the containing part: -```bash -python query_db.py show densepose_coco_2014_train image_id:int=322 bbox,dp_i -v -``` -![Bounding Box + Point Label Visualization](images/vis_bbox_dp_i.jpg) - -3. Show bounding box and annotated points in green color: -```bash -python query_db.py show densepose_coco_2014_train image_id:int=322 bbox,dp_segm -v -``` -![Bounding Box + Point Visualization](images/vis_bbox_dp_pts.jpg) - -4. Show bounding box and annotated points colored according to their U coordinate in part parameterization: -```bash -python query_db.py show densepose_coco_2014_train image_id:int=322 bbox,dp_u -v -``` -![Bounding Box + Point U Visualization](images/vis_bbox_dp_u.jpg) - -5. Show bounding box and annotated points colored according to their V coordinate in part parameterization: -```bash -python query_db.py show densepose_coco_2014_train image_id:int=322 bbox,dp_v -v -``` -![Bounding Box + Point V Visualization](images/vis_bbox_dp_v.jpg) - - diff --git a/spaces/cactusAtSea/influencerGPT/README.md b/spaces/cactusAtSea/influencerGPT/README.md deleted file mode 100644 index 0a321b50b451d8e8a678c28f98ba0f9e28fe2a87..0000000000000000000000000000000000000000 --- a/spaces/cactusAtSea/influencerGPT/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: InfluencerGPT -emoji: 👁 -colorFrom: red -colorTo: pink -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: unknown ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/caffeinum/VToonify/vtoonify/model/stylegan/op_gpu/upfirdn2d.py b/spaces/caffeinum/VToonify/vtoonify/model/stylegan/op_gpu/upfirdn2d.py deleted file mode 100644 index 3a12f15b3c2347194e3bf0fdfda736415693775f..0000000000000000000000000000000000000000 --- a/spaces/caffeinum/VToonify/vtoonify/model/stylegan/op_gpu/upfirdn2d.py +++ /dev/null @@ -1,209 +0,0 @@ -from collections import abc -import os - -import torch -from torch.nn import functional as F -from torch.autograd import Function -from torch.utils.cpp_extension import load - - -module_path = os.path.dirname(__file__) -upfirdn2d_op = load( - "upfirdn2d", - sources=[ - os.path.join(module_path, "upfirdn2d.cpp"), - os.path.join(module_path, "upfirdn2d_kernel.cu"), - ], -) - - -class UpFirDn2dBackward(Function): - @staticmethod - def forward( - ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size - ): - - up_x, up_y = up - down_x, down_y = down - g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad - - grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) - - grad_input = upfirdn2d_op.upfirdn2d( - grad_output, - grad_kernel, - down_x, - down_y, - up_x, - up_y, - g_pad_x0, - g_pad_x1, - g_pad_y0, - g_pad_y1, - ) - grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) - - ctx.save_for_backward(kernel) - - pad_x0, pad_x1, pad_y0, pad_y1 = pad - - ctx.up_x = up_x - ctx.up_y = up_y - ctx.down_x = down_x - ctx.down_y = down_y - ctx.pad_x0 = pad_x0 - ctx.pad_x1 = pad_x1 - ctx.pad_y0 = pad_y0 - ctx.pad_y1 = pad_y1 - ctx.in_size = in_size - ctx.out_size = out_size - - return grad_input - - @staticmethod - def backward(ctx, gradgrad_input): - kernel, = ctx.saved_tensors - - gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.in_size[3], 1) - - gradgrad_out = upfirdn2d_op.upfirdn2d( - gradgrad_input, - kernel, - ctx.up_x, - ctx.up_y, - ctx.down_x, - ctx.down_y, - ctx.pad_x0, - ctx.pad_x1, - ctx.pad_y0, - ctx.pad_y1, - ) - # gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0], ctx.out_size[1], ctx.in_size[3]) - gradgrad_out = gradgrad_out.view( - ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1] - ) - - return gradgrad_out, None, None, None, None, None, None, None, None - - -class UpFirDn2d(Function): - @staticmethod - def forward(ctx, input, kernel, up, down, pad): - up_x, up_y = up - down_x, down_y = down - pad_x0, pad_x1, pad_y0, pad_y1 = pad - - kernel_h, kernel_w = kernel.shape - batch, channel, in_h, in_w = input.shape - ctx.in_size = input.shape - - input = input.reshape(-1, in_h, in_w, 1) - - ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) - - out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h + down_y) // down_y - out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w + down_x) // down_x - ctx.out_size = (out_h, out_w) - - ctx.up = (up_x, up_y) - ctx.down = (down_x, down_y) - ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1) - - g_pad_x0 = kernel_w - pad_x0 - 1 - g_pad_y0 = kernel_h - pad_y0 - 1 - g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 - g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 - - ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) - - out = upfirdn2d_op.upfirdn2d( - input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1 - ) - # out = out.view(major, out_h, out_w, minor) - out = out.view(-1, channel, out_h, out_w) - - return out - - @staticmethod - def backward(ctx, grad_output): - kernel, grad_kernel = ctx.saved_tensors - - grad_input = None - - if ctx.needs_input_grad[0]: - grad_input = UpFirDn2dBackward.apply( - grad_output, - kernel, - grad_kernel, - ctx.up, - ctx.down, - ctx.pad, - ctx.g_pad, - ctx.in_size, - ctx.out_size, - ) - - return grad_input, None, None, None, None - - -def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): - if not isinstance(up, abc.Iterable): - up = (up, up) - - if not isinstance(down, abc.Iterable): - down = (down, down) - - if len(pad) == 2: - pad = (pad[0], pad[1], pad[0], pad[1]) - - if input.device.type == "cpu": - out = upfirdn2d_native(input, kernel, *up, *down, *pad) - - else: - out = UpFirDn2d.apply(input, kernel, up, down, pad) - - return out - - -def upfirdn2d_native( - input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1 -): - _, channel, in_h, in_w = input.shape - input = input.reshape(-1, in_h, in_w, 1) - - _, in_h, in_w, minor = input.shape - kernel_h, kernel_w = kernel.shape - - out = input.view(-1, in_h, 1, in_w, 1, minor) - out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) - out = out.view(-1, in_h * up_y, in_w * up_x, minor) - - out = F.pad( - out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)] - ) - out = out[ - :, - max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0), - max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0), - :, - ] - - out = out.permute(0, 3, 1, 2) - out = out.reshape( - [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1] - ) - w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) - out = F.conv2d(out, w) - out = out.reshape( - -1, - minor, - in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, - in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1, - ) - out = out.permute(0, 2, 3, 1) - out = out[:, ::down_y, ::down_x, :] - - out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h + down_y) // down_y - out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w + down_x) // down_x - - return out.view(-1, channel, out_h, out_w) diff --git a/spaces/cahya/image-search/app.py b/spaces/cahya/image-search/app.py deleted file mode 100644 index 600ebcd591453417fddcae680685330fda917c07..0000000000000000000000000000000000000000 --- a/spaces/cahya/image-search/app.py +++ /dev/null @@ -1,88 +0,0 @@ -import streamlit as st -import SessionState -from prompts import PROMPT_LIST -from wit_index import WitIndex -import random -import time - -# st.set_page_config(page_title="Image Search") - -# vector_length = 128 -wit_index_path = f"./models/wit_faiss.idx" -model_name = f"./models/distilbert-base-wit" -wit_dataset_path = "./models/wit_dataset.pkl" - - -@st.cache(suppress_st_warning=True, allow_output_mutation=True) -def get_wit_index(): - st.write("Loading the WIT index, dataset and the DistillBERT model..") - wit_index = WitIndex(wit_index_path, model_name, wit_dataset_path, gpu=False) - return wit_index - -# st.cache is disabled temporarily because the inference could take forever using newer streamlit version. -# @st.cache(suppress_st_warning=True) -def process(text: str, top_k: int = 10): - # st.write("Cache miss: process") - distance, index, image_info = wit_index.search(text, top_k=top_k) - return distance, index, image_info - - -st.title("Image Search") - -st.markdown( - """ - This application is a demo for sentence-based image search using - [WIT dataset](https://github.com/google-research-datasets/wit). We use DistillBert to encode the sentences - and Facebook's Faiss to search the vector embeddings. - """ -) -session_state = SessionState.get(prompt=None, prompt_box=None, text=None) -ALL_PROMPTS = list(PROMPT_LIST.keys())+["Custom"] -prompt = st.selectbox('Prompt', ALL_PROMPTS, index=len(ALL_PROMPTS)-1) -# Update prompt -if session_state.prompt is None: - session_state.prompt = prompt -elif session_state.prompt is not None and (prompt != session_state.prompt): - session_state.prompt = prompt - session_state.prompt_box = None - session_state.text = None -else: - session_state.prompt = prompt - -# Update prompt box -if session_state.prompt == "Custom": - session_state.prompt_box = "Enter your text here" -else: - if session_state.prompt is not None and session_state.prompt_box is None: - session_state.prompt_box = random.choice(PROMPT_LIST[session_state.prompt]) - -session_state.text = st.text_area("Enter text", session_state.prompt_box) - -top_k = st.sidebar.number_input( - "Top k", - value=6, - min_value=1, - max_value=10 -) - -wit_index = get_wit_index() -if st.button("Run"): - with st.spinner(text="Getting results..."): - st.subheader("Result") - time_start = time.time() - distances, index, image_info = process(text=session_state.text, top_k=int(top_k)) - time_end = time.time() - time_diff = time_end-time_start - print(f"Search in {time_diff} seconds") - st.markdown(f"*Search in {time_diff:.5f} seconds*") - for i, distance in enumerate(distances): - try: - st.image(image_info[i][0].replace("http:", "https:"), width=400) - except FileNotFoundError: - st.write(f"{image_info[i][0]} can't be displayed") - st.write(f"{image_info[i][1]}. (D: {distance:.2f})") - - # Reset state - session_state.prompt = None - session_state.prompt_box = None - session_state.text = None diff --git a/spaces/camillevanhoffelen/langchain-HuggingGPT/hugginggpt/model_selection.py b/spaces/camillevanhoffelen/langchain-HuggingGPT/hugginggpt/model_selection.py deleted file mode 100644 index 94f0b3584cba8e1cf248226c8f3c304392c09cd1..0000000000000000000000000000000000000000 --- a/spaces/camillevanhoffelen/langchain-HuggingGPT/hugginggpt/model_selection.py +++ /dev/null @@ -1,97 +0,0 @@ -import asyncio -import json -import logging - -import aiohttp -from langchain import LLMChain -from langchain.llms.base import BaseLLM -from langchain.output_parsers import OutputFixingParser, PydanticOutputParser -from langchain.prompts import load_prompt -from pydantic import BaseModel, Field - -from hugginggpt.exceptions import ModelSelectionException, async_wrap_exceptions -from hugginggpt.model_scraper import get_top_k_models -from hugginggpt.resources import get_prompt_resource -from hugginggpt.task_parsing import Task - -logger = logging.getLogger(__name__) - - -class Model(BaseModel): - id: str = Field(description="ID of the model") - reason: str = Field(description="Reason for selecting this model") - - -async def select_hf_models( - user_input: str, - tasks: list[Task], - model_selection_llm: BaseLLM, - output_fixing_llm: BaseLLM, -) -> dict[int, Model]: - """Use LLM agent to select the best available HuggingFace model for each task, given model metadata. - Runs concurrently.""" - async with aiohttp.ClientSession() as session: - async with asyncio.TaskGroup() as tg: - aio_tasks = [] - for task in tasks: - aio_tasks.append( - tg.create_task( - select_model( - user_input=user_input, - task=task, - model_selection_llm=model_selection_llm, - output_fixing_llm=output_fixing_llm, - session=session, - ) - ) - ) - results = await asyncio.gather(*aio_tasks) - return {task_id: model for task_id, model in results} - - -@async_wrap_exceptions(ModelSelectionException, "Failed to select model") -async def select_model( - user_input: str, - task: Task, - model_selection_llm: BaseLLM, - output_fixing_llm: BaseLLM, - session: aiohttp.ClientSession, -) -> (int, Model): - logger.info(f"Starting model selection for task: {task.task}") - - top_k_models = await get_top_k_models( - task=task.task, top_k=5, max_description_length=100, session=session - ) - - if task.task in [ - "summarization", - "translation", - "conversational", - "text-generation", - "text2text-generation", - ]: - model = Model( - id="openai", - reason="Text generation tasks are best handled by OpenAI models", - ) - else: - prompt_template = load_prompt( - get_prompt_resource("model-selection-prompt.json") - ) - llm_chain = LLMChain(prompt=prompt_template, llm=model_selection_llm) - # Need to replace double quotes with single quotes for correct response generation - task_str = task.json().replace('"', "'") - models_str = json.dumps(top_k_models).replace('"', "'") - output = await llm_chain.apredict( - user_input=user_input, task=task_str, models=models_str, stop=[""] - ) - logger.debug(f"Model selection raw output: {output}") - - parser = PydanticOutputParser(pydantic_object=Model) - fixing_parser = OutputFixingParser.from_llm( - parser=parser, llm=output_fixing_llm - ) - model = fixing_parser.parse(output) - - logger.info(f"For task: {task.task}, selected model: {model}") - return task.id, model diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/densepose/utils/transform.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/densepose/utils/transform.py deleted file mode 100644 index 8dc4ae7be878302ec39b7f235e3ae1b7a3ca29ee..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/densepose/utils/transform.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from detectron2.data import MetadataCatalog -from detectron2.utils.file_io import PathManager - -from densepose import DensePoseTransformData - - -def load_for_dataset(dataset_name): - path = MetadataCatalog.get(dataset_name).densepose_transform_src - densepose_transform_data_fpath = PathManager.get_local_path(path) - return DensePoseTransformData.load(densepose_transform_data_fpath) - - -def load_from_cfg(cfg): - return load_for_dataset(cfg.DATASETS.TEST[0]) diff --git a/spaces/cc1799/vits-uma-genshin-honkai/models.py b/spaces/cc1799/vits-uma-genshin-honkai/models.py deleted file mode 100644 index 8353b867f441de7e4d05aef980e672899c3a8889..0000000000000000000000000000000000000000 --- a/spaces/cc1799/vits-uma-genshin-honkai/models.py +++ /dev/null @@ -1,533 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -import attentions -import monotonic_align - -from torch.nn import Conv1d, ConvTranspose1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding - - -class StochasticDurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): - super().__init__() - filter_channels = in_channels # it needs to be removed from future version. - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.log_flow = modules.Log() - self.flows = nn.ModuleList() - self.flows.append(modules.ElementwiseAffine(2)) - for i in range(n_flows): - self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.flows.append(modules.Flip()) - - self.post_pre = nn.Conv1d(1, filter_channels, 1) - self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - self.post_flows = nn.ModuleList() - self.post_flows.append(modules.ElementwiseAffine(2)) - for i in range(4): - self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.post_flows.append(modules.Flip()) - - self.pre = nn.Conv1d(in_channels, filter_channels, 1) - self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, filter_channels, 1) - - def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): - x = torch.detach(x) - x = self.pre(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.convs(x, x_mask) - x = self.proj(x) * x_mask - - if not reverse: - flows = self.flows - assert w is not None - - logdet_tot_q = 0 - h_w = self.post_pre(w) - h_w = self.post_convs(h_w, x_mask) - h_w = self.post_proj(h_w) * x_mask - e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask - z_q = e_q - for flow in self.post_flows: - z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) - logdet_tot_q += logdet_q - z_u, z1 = torch.split(z_q, [1, 1], 1) - u = torch.sigmoid(z_u) * x_mask - z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2]) - logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q - - logdet_tot = 0 - z0, logdet = self.log_flow(z0, x_mask) - logdet_tot += logdet - z = torch.cat([z0, z1], 1) - for flow in flows: - z, logdet = flow(z, x_mask, g=x, reverse=reverse) - logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot - return nll + logq # [b] - else: - flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow - z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale - for flow in flows: - z = flow(z, x_mask, g=x, reverse=reverse) - z0, z1 = torch.split(z, [1, 1], 1) - logw = z0 - return logw - - -class DurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_2 = modules.LayerNorm(filter_channels) - self.proj = nn.Conv1d(filter_channels, 1, 1) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - def forward(self, x, x_mask, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - x = self.proj(x * x_mask) - return x * x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - - self.emb = nn.Embedding(n_vocab, hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths): - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)), - k, u, padding=(k-u)//2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel//(2**(i+1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i*self.num_kernels+j](x) - else: - xs += self.resblocks[i*self.num_kernels+j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2,3,5,7,11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=0, - gin_channels=0, - use_sdp=True, - **kwargs): - - super().__init__() - self.n_vocab = n_vocab - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.n_speakers = n_speakers - self.gin_channels = gin_channels - - self.use_sdp = use_sdp - - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - - if use_sdp: - self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) - else: - self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) - - if n_speakers > 1: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - def forward(self, x, x_lengths, y, y_lengths, sid=None): - - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - - with torch.no_grad(): - # negative cross-entropy - s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] - neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] - neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] - neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 - - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() - - w = attn.sum(2) - if self.use_sdp: - l_length = self.dp(x, x_mask, w, g=g) - l_length = l_length / torch.sum(x_mask) - else: - logw_ = torch.log(w + 1e-6) * x_mask - logw = self.dp(x, x_mask, g=g) - l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging - - # expand prior - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) - - z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) - o = self.dec(z_slice, g=g) - return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - if self.use_sdp: - logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) - else: - logw = self.dp(x, x_mask, g=g) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - z = self.flow(z_p, y_mask, g=g, reverse=True) - o = self.dec((z * y_mask)[:,:,:max_len], g=g) - return o, attn, y_mask, (z, z_p, m_p, logs_p) - - def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): - assert self.n_speakers > 0, "n_speakers have to be larger than 0." - g_src = self.emb_g(sid_src).unsqueeze(-1) - g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) - z_p = self.flow(z, y_mask, g=g_src) - z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) - o_hat = self.dec(z_hat * y_mask, g=g_tgt) - return o_hat, y_mask, (z, z_p, z_hat) - diff --git a/spaces/chendl/compositional_test/transformers/examples/research_projects/codeparrot/examples/README.md b/spaces/chendl/compositional_test/transformers/examples/research_projects/codeparrot/examples/README.md deleted file mode 100644 index c1980262d8275b9e0a9abe3d5a00d3c955ddb73d..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/research_projects/codeparrot/examples/README.md +++ /dev/null @@ -1,58 +0,0 @@ -# Examples -In this folder we showcase some examples to use code models for downstream tasks. - -## Complexity prediction -In this task we want to predict the complexity of Java programs in [CodeComplex](https://huggingface.co/datasets/codeparrot/codecomplex) dataset. Using Hugging Face `trainer`, we finetuned [multilingual CodeParrot](https://huggingface.co/codeparrot/codeparrot-small-multi) and [UniXcoder](https://huggingface.co/microsoft/unixcoder-base-nine) on it, and we used the latter to build this Java complexity prediction [space](https://huggingface.co/spaces/codeparrot/code-complexity-predictor) on Hugging Face hub. - -To fine-tune a model on this dataset you can use the following commands: - -```python -python train_complexity_predictor.py \ - --model_ckpt microsoft/unixcoder-base-nine \ - --num_epochs 60 \ - --num_warmup_steps 10 \ - --batch_size 8 \ - --learning_rate 5e-4 -``` - -## Code generation: text to python -In this task we want to train a model to generate code from english text. We finetuned Codeparrot-small on [github-jupyter-text-to-code](https://huggingface.co/datasets/codeparrot/github-jupyter-text-to-code), a dataset where the samples are a succession of docstrings and their Python code, originally extracted from Jupyter notebooks parsed in this [dataset](https://huggingface.co/datasets/codeparrot/github-jupyter-parsed). - -To fine-tune a model on this dataset we use the same [script](https://github.com/huggingface/transformers/blob/main/examples/research_projects/codeparrot/scripts/codeparrot_training.py) as the pretraining of codeparrot: - -```python -accelerate launch scripts/codeparrot_training.py \ - --model_ckpt codeparrot/codeparrot-small \ - --dataset_name_train codeparrot/github-jupyter-text-to-code \ - --dataset_name_valid codeparrot/github-jupyter-text-to-code \ - --train_batch_size 12 \ - --valid_batch_size 12 \ - --learning_rate 5e-4 \ - --num_warmup_steps 100 \ - --gradient_accumulation 1 \ - --gradient_checkpointing False \ - --max_train_steps 3000 \ - --save_checkpoint_steps 200 \ - --save_dir jupyter-text-to-python -``` - -## Code explanation: python to text -In this task we want to train a model to explain python code. We finetuned Codeparrot-small on [github-jupyter-code-to-text](https://huggingface.co/datasets/codeparrot/github-jupyter-code-to-text), a dataset where the samples are a succession of Python code and its explanation as a docstring, we just inverted the order of text and code pairs in github-jupyter-code-to-text dataset and added the delimiters "Explanation:" and "End of explanation" inside the doctrings. - -To fine-tune a model on this dataset we use the same [script](https://github.com/huggingface/transformers/blob/main/examples/research_projects/codeparrot/scripts/codeparrot_training.py) as the pretraining of codeparrot: - -```python -accelerate launch scripts/codeparrot_training.py \ - --model_ckpt codeparrot/codeparrot-small \ - --dataset_name_train codeparrot/github-jupyter-code-to-text \ - --dataset_name_valid codeparrot/github-jupyter-code-to-text \ - --train_batch_size 12 \ - --valid_batch_size 12 \ - --learning_rate 5e-4 \ - --num_warmup_steps 100 \ - --gradient_accumulation 1 \ - --gradient_checkpointing False \ - --max_train_steps 3000 \ - --save_checkpoint_steps 200 \ - --save_dir jupyter-python-to-text -``` \ No newline at end of file diff --git a/spaces/chendl/compositional_test/transformers/examples/research_projects/longform-qa/README.md b/spaces/chendl/compositional_test/transformers/examples/research_projects/longform-qa/README.md deleted file mode 100644 index eaa29d4542260c553c2403f311c2ee45df6c3fd9..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/research_projects/longform-qa/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Long Form Question Answering - -Author: @yjernite - -This folder contains the code for the Long Form Question answering [demo](http://35.226.96.115:8080/) as well as methods to train and use a fully end-to-end Long Form Question Answering system using the [🤗transformers](https://github.com/huggingface/transformers) and [🤗datasets](https://github.com/huggingface/datasets) libraries. - -You can use these methods to train your own system by following along the associate [notebook](https://github.com/huggingface/notebooks/blob/master/longform-qa/Long_Form_Question_Answering_with_ELI5_and_Wikipedia.ipynb) or [blog post](https://yjernite.github.io/lfqa.html). diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/feaLib/parser.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/feaLib/parser.py deleted file mode 100644 index 49667f4503e15be8c00388a72cb0d428dc7dafe9..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/feaLib/parser.py +++ /dev/null @@ -1,2365 +0,0 @@ -from fontTools.feaLib.error import FeatureLibError -from fontTools.feaLib.lexer import Lexer, IncludingLexer, NonIncludingLexer -from fontTools.feaLib.variableScalar import VariableScalar -from fontTools.misc.encodingTools import getEncoding -from fontTools.misc.textTools import bytechr, tobytes, tostr -import fontTools.feaLib.ast as ast -import logging -import os -import re - - -log = logging.getLogger(__name__) - - -class Parser(object): - """Initializes a Parser object. - - Example: - - .. code:: python - - from fontTools.feaLib.parser import Parser - parser = Parser(file, font.getReverseGlyphMap()) - parsetree = parser.parse() - - Note: the ``glyphNames`` iterable serves a double role to help distinguish - glyph names from ranges in the presence of hyphens and to ensure that glyph - names referenced in a feature file are actually part of a font's glyph set. - If the iterable is left empty, no glyph name in glyph set checking takes - place, and all glyph tokens containing hyphens are treated as literal glyph - names, not as ranges. (Adding a space around the hyphen can, in any case, - help to disambiguate ranges from glyph names containing hyphens.) - - By default, the parser will follow ``include()`` statements in the feature - file. To turn this off, pass ``followIncludes=False``. Pass a directory string as - ``includeDir`` to explicitly declare a directory to search included feature files - in. - """ - - extensions = {} - ast = ast - SS_FEATURE_TAGS = {"ss%02d" % i for i in range(1, 20 + 1)} - CV_FEATURE_TAGS = {"cv%02d" % i for i in range(1, 99 + 1)} - - def __init__( - self, featurefile, glyphNames=(), followIncludes=True, includeDir=None, **kwargs - ): - - if "glyphMap" in kwargs: - from fontTools.misc.loggingTools import deprecateArgument - - deprecateArgument("glyphMap", "use 'glyphNames' (iterable) instead") - if glyphNames: - raise TypeError( - "'glyphNames' and (deprecated) 'glyphMap' are " "mutually exclusive" - ) - glyphNames = kwargs.pop("glyphMap") - if kwargs: - raise TypeError( - "unsupported keyword argument%s: %s" - % ("" if len(kwargs) == 1 else "s", ", ".join(repr(k) for k in kwargs)) - ) - - self.glyphNames_ = set(glyphNames) - self.doc_ = self.ast.FeatureFile() - self.anchors_ = SymbolTable() - self.glyphclasses_ = SymbolTable() - self.lookups_ = SymbolTable() - self.valuerecords_ = SymbolTable() - self.symbol_tables_ = {self.anchors_, self.valuerecords_} - self.next_token_type_, self.next_token_ = (None, None) - self.cur_comments_ = [] - self.next_token_location_ = None - lexerClass = IncludingLexer if followIncludes else NonIncludingLexer - self.lexer_ = lexerClass(featurefile, includeDir=includeDir) - self.missing = {} - self.advance_lexer_(comments=True) - - def parse(self): - """Parse the file, and return a :class:`fontTools.feaLib.ast.FeatureFile` - object representing the root of the abstract syntax tree containing the - parsed contents of the file.""" - statements = self.doc_.statements - while self.next_token_type_ is not None or self.cur_comments_: - self.advance_lexer_(comments=True) - if self.cur_token_type_ is Lexer.COMMENT: - statements.append( - self.ast.Comment(self.cur_token_, location=self.cur_token_location_) - ) - elif self.is_cur_keyword_("include"): - statements.append(self.parse_include_()) - elif self.cur_token_type_ is Lexer.GLYPHCLASS: - statements.append(self.parse_glyphclass_definition_()) - elif self.is_cur_keyword_(("anon", "anonymous")): - statements.append(self.parse_anonymous_()) - elif self.is_cur_keyword_("anchorDef"): - statements.append(self.parse_anchordef_()) - elif self.is_cur_keyword_("languagesystem"): - statements.append(self.parse_languagesystem_()) - elif self.is_cur_keyword_("lookup"): - statements.append(self.parse_lookup_(vertical=False)) - elif self.is_cur_keyword_("markClass"): - statements.append(self.parse_markClass_()) - elif self.is_cur_keyword_("feature"): - statements.append(self.parse_feature_block_()) - elif self.is_cur_keyword_("conditionset"): - statements.append(self.parse_conditionset_()) - elif self.is_cur_keyword_("variation"): - statements.append(self.parse_feature_block_(variation=True)) - elif self.is_cur_keyword_("table"): - statements.append(self.parse_table_()) - elif self.is_cur_keyword_("valueRecordDef"): - statements.append(self.parse_valuerecord_definition_(vertical=False)) - elif ( - self.cur_token_type_ is Lexer.NAME - and self.cur_token_ in self.extensions - ): - statements.append(self.extensions[self.cur_token_](self)) - elif self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == ";": - continue - else: - raise FeatureLibError( - "Expected feature, languagesystem, lookup, markClass, " - 'table, or glyph class definition, got {} "{}"'.format( - self.cur_token_type_, self.cur_token_ - ), - self.cur_token_location_, - ) - # Report any missing glyphs at the end of parsing - if self.missing: - error = [ - " %s (first found at %s)" % (name, loc) - for name, loc in self.missing.items() - ] - raise FeatureLibError( - "The following glyph names are referenced but are missing from the " - "glyph set:\n" + ("\n".join(error)), - None, - ) - return self.doc_ - - def parse_anchor_(self): - # Parses an anchor in any of the four formats given in the feature - # file specification (2.e.vii). - self.expect_symbol_("<") - self.expect_keyword_("anchor") - location = self.cur_token_location_ - - if self.next_token_ == "NULL": # Format D - self.expect_keyword_("NULL") - self.expect_symbol_(">") - return None - - if self.next_token_type_ == Lexer.NAME: # Format E - name = self.expect_name_() - anchordef = self.anchors_.resolve(name) - if anchordef is None: - raise FeatureLibError( - 'Unknown anchor "%s"' % name, self.cur_token_location_ - ) - self.expect_symbol_(">") - return self.ast.Anchor( - anchordef.x, - anchordef.y, - name=name, - contourpoint=anchordef.contourpoint, - xDeviceTable=None, - yDeviceTable=None, - location=location, - ) - - x, y = self.expect_number_(variable=True), self.expect_number_(variable=True) - - contourpoint = None - if self.next_token_ == "contourpoint": # Format B - self.expect_keyword_("contourpoint") - contourpoint = self.expect_number_() - - if self.next_token_ == "<": # Format C - xDeviceTable = self.parse_device_() - yDeviceTable = self.parse_device_() - else: - xDeviceTable, yDeviceTable = None, None - - self.expect_symbol_(">") - return self.ast.Anchor( - x, - y, - name=None, - contourpoint=contourpoint, - xDeviceTable=xDeviceTable, - yDeviceTable=yDeviceTable, - location=location, - ) - - def parse_anchor_marks_(self): - # Parses a sequence of ``[ mark @MARKCLASS]*.`` - anchorMarks = [] # [(self.ast.Anchor, markClassName)*] - while self.next_token_ == "<": - anchor = self.parse_anchor_() - if anchor is None and self.next_token_ != "mark": - continue # without mark, eg. in GPOS type 5 - self.expect_keyword_("mark") - markClass = self.expect_markClass_reference_() - anchorMarks.append((anchor, markClass)) - return anchorMarks - - def parse_anchordef_(self): - # Parses a named anchor definition (`section 2.e.viii `_). - assert self.is_cur_keyword_("anchorDef") - location = self.cur_token_location_ - x, y = self.expect_number_(), self.expect_number_() - contourpoint = None - if self.next_token_ == "contourpoint": - self.expect_keyword_("contourpoint") - contourpoint = self.expect_number_() - name = self.expect_name_() - self.expect_symbol_(";") - anchordef = self.ast.AnchorDefinition( - name, x, y, contourpoint=contourpoint, location=location - ) - self.anchors_.define(name, anchordef) - return anchordef - - def parse_anonymous_(self): - # Parses an anonymous data block (`section 10 `_). - assert self.is_cur_keyword_(("anon", "anonymous")) - tag = self.expect_tag_() - _, content, location = self.lexer_.scan_anonymous_block(tag) - self.advance_lexer_() - self.expect_symbol_("}") - end_tag = self.expect_tag_() - assert tag == end_tag, "bad splitting in Lexer.scan_anonymous_block()" - self.expect_symbol_(";") - return self.ast.AnonymousBlock(tag, content, location=location) - - def parse_attach_(self): - # Parses a GDEF Attach statement (`section 9.b `_) - assert self.is_cur_keyword_("Attach") - location = self.cur_token_location_ - glyphs = self.parse_glyphclass_(accept_glyphname=True) - contourPoints = {self.expect_number_()} - while self.next_token_ != ";": - contourPoints.add(self.expect_number_()) - self.expect_symbol_(";") - return self.ast.AttachStatement(glyphs, contourPoints, location=location) - - def parse_enumerate_(self, vertical): - # Parse an enumerated pair positioning rule (`section 6.b.ii `_). - assert self.cur_token_ in {"enumerate", "enum"} - self.advance_lexer_() - return self.parse_position_(enumerated=True, vertical=vertical) - - def parse_GlyphClassDef_(self): - # Parses 'GlyphClassDef @BASE, @LIGATURES, @MARKS, @COMPONENTS;' - assert self.is_cur_keyword_("GlyphClassDef") - location = self.cur_token_location_ - if self.next_token_ != ",": - baseGlyphs = self.parse_glyphclass_(accept_glyphname=False) - else: - baseGlyphs = None - self.expect_symbol_(",") - if self.next_token_ != ",": - ligatureGlyphs = self.parse_glyphclass_(accept_glyphname=False) - else: - ligatureGlyphs = None - self.expect_symbol_(",") - if self.next_token_ != ",": - markGlyphs = self.parse_glyphclass_(accept_glyphname=False) - else: - markGlyphs = None - self.expect_symbol_(",") - if self.next_token_ != ";": - componentGlyphs = self.parse_glyphclass_(accept_glyphname=False) - else: - componentGlyphs = None - self.expect_symbol_(";") - return self.ast.GlyphClassDefStatement( - baseGlyphs, markGlyphs, ligatureGlyphs, componentGlyphs, location=location - ) - - def parse_glyphclass_definition_(self): - # Parses glyph class definitions such as '@UPPERCASE = [A-Z];' - location, name = self.cur_token_location_, self.cur_token_ - self.expect_symbol_("=") - glyphs = self.parse_glyphclass_(accept_glyphname=False) - self.expect_symbol_(";") - glyphclass = self.ast.GlyphClassDefinition(name, glyphs, location=location) - self.glyphclasses_.define(name, glyphclass) - return glyphclass - - def split_glyph_range_(self, name, location): - # Since v1.20, the OpenType Feature File specification allows - # for dashes in glyph names. A sequence like "a-b-c-d" could - # therefore mean a single glyph whose name happens to be - # "a-b-c-d", or it could mean a range from glyph "a" to glyph - # "b-c-d", or a range from glyph "a-b" to glyph "c-d", or a - # range from glyph "a-b-c" to glyph "d".Technically, this - # example could be resolved because the (pretty complex) - # definition of glyph ranges renders most of these splits - # invalid. But the specification does not say that a compiler - # should try to apply such fancy heuristics. To encourage - # unambiguous feature files, we therefore try all possible - # splits and reject the feature file if there are multiple - # splits possible. It is intentional that we don't just emit a - # warning; warnings tend to get ignored. To fix the problem, - # font designers can trivially add spaces around the intended - # split point, and we emit a compiler error that suggests - # how exactly the source should be rewritten to make things - # unambiguous. - parts = name.split("-") - solutions = [] - for i in range(len(parts)): - start, limit = "-".join(parts[0:i]), "-".join(parts[i:]) - if start in self.glyphNames_ and limit in self.glyphNames_: - solutions.append((start, limit)) - if len(solutions) == 1: - start, limit = solutions[0] - return start, limit - elif len(solutions) == 0: - raise FeatureLibError( - '"%s" is not a glyph in the font, and it can not be split ' - "into a range of known glyphs" % name, - location, - ) - else: - ranges = " or ".join(['"%s - %s"' % (s, l) for s, l in solutions]) - raise FeatureLibError( - 'Ambiguous glyph range "%s"; ' - "please use %s to clarify what you mean" % (name, ranges), - location, - ) - - def parse_glyphclass_(self, accept_glyphname, accept_null=False): - # Parses a glyph class, either named or anonymous, or (if - # ``bool(accept_glyphname)``) a glyph name. If ``bool(accept_null)`` then - # also accept the special NULL glyph. - if accept_glyphname and self.next_token_type_ in (Lexer.NAME, Lexer.CID): - if accept_null and self.next_token_ == "NULL": - # If you want a glyph called NULL, you should escape it. - self.advance_lexer_() - return self.ast.NullGlyph(location=self.cur_token_location_) - glyph = self.expect_glyph_() - self.check_glyph_name_in_glyph_set(glyph) - return self.ast.GlyphName(glyph, location=self.cur_token_location_) - if self.next_token_type_ is Lexer.GLYPHCLASS: - self.advance_lexer_() - gc = self.glyphclasses_.resolve(self.cur_token_) - if gc is None: - raise FeatureLibError( - "Unknown glyph class @%s" % self.cur_token_, - self.cur_token_location_, - ) - if isinstance(gc, self.ast.MarkClass): - return self.ast.MarkClassName(gc, location=self.cur_token_location_) - else: - return self.ast.GlyphClassName(gc, location=self.cur_token_location_) - - self.expect_symbol_("[") - location = self.cur_token_location_ - glyphs = self.ast.GlyphClass(location=location) - while self.next_token_ != "]": - if self.next_token_type_ is Lexer.NAME: - glyph = self.expect_glyph_() - location = self.cur_token_location_ - if "-" in glyph and self.glyphNames_ and glyph not in self.glyphNames_: - start, limit = self.split_glyph_range_(glyph, location) - self.check_glyph_name_in_glyph_set(start, limit) - glyphs.add_range( - start, limit, self.make_glyph_range_(location, start, limit) - ) - elif self.next_token_ == "-": - start = glyph - self.expect_symbol_("-") - limit = self.expect_glyph_() - self.check_glyph_name_in_glyph_set(start, limit) - glyphs.add_range( - start, limit, self.make_glyph_range_(location, start, limit) - ) - else: - if "-" in glyph and not self.glyphNames_: - log.warning( - str( - FeatureLibError( - f"Ambiguous glyph name that looks like a range: {glyph!r}", - location, - ) - ) - ) - self.check_glyph_name_in_glyph_set(glyph) - glyphs.append(glyph) - elif self.next_token_type_ is Lexer.CID: - glyph = self.expect_glyph_() - if self.next_token_ == "-": - range_location = self.cur_token_location_ - range_start = self.cur_token_ - self.expect_symbol_("-") - range_end = self.expect_cid_() - self.check_glyph_name_in_glyph_set( - f"cid{range_start:05d}", - f"cid{range_end:05d}", - ) - glyphs.add_cid_range( - range_start, - range_end, - self.make_cid_range_(range_location, range_start, range_end), - ) - else: - glyph_name = f"cid{self.cur_token_:05d}" - self.check_glyph_name_in_glyph_set(glyph_name) - glyphs.append(glyph_name) - elif self.next_token_type_ is Lexer.GLYPHCLASS: - self.advance_lexer_() - gc = self.glyphclasses_.resolve(self.cur_token_) - if gc is None: - raise FeatureLibError( - "Unknown glyph class @%s" % self.cur_token_, - self.cur_token_location_, - ) - if isinstance(gc, self.ast.MarkClass): - gc = self.ast.MarkClassName(gc, location=self.cur_token_location_) - else: - gc = self.ast.GlyphClassName(gc, location=self.cur_token_location_) - glyphs.add_class(gc) - else: - raise FeatureLibError( - "Expected glyph name, glyph range, " - f"or glyph class reference, found {self.next_token_!r}", - self.next_token_location_, - ) - self.expect_symbol_("]") - return glyphs - - def parse_glyph_pattern_(self, vertical): - # Parses a glyph pattern, including lookups and context, e.g.:: - # - # a b - # a b c' d e - # a b c' lookup ChangeC d e - prefix, glyphs, lookups, values, suffix = ([], [], [], [], []) - hasMarks = False - while self.next_token_ not in {"by", "from", ";", ","}: - gc = self.parse_glyphclass_(accept_glyphname=True) - marked = False - if self.next_token_ == "'": - self.expect_symbol_("'") - hasMarks = marked = True - if marked: - if suffix: - # makeotf also reports this as an error, while FontForge - # silently inserts ' in all the intervening glyphs. - # https://github.com/fonttools/fonttools/pull/1096 - raise FeatureLibError( - "Unsupported contextual target sequence: at most " - "one run of marked (') glyph/class names allowed", - self.cur_token_location_, - ) - glyphs.append(gc) - elif glyphs: - suffix.append(gc) - else: - prefix.append(gc) - - if self.is_next_value_(): - values.append(self.parse_valuerecord_(vertical)) - else: - values.append(None) - - lookuplist = None - while self.next_token_ == "lookup": - if lookuplist is None: - lookuplist = [] - self.expect_keyword_("lookup") - if not marked: - raise FeatureLibError( - "Lookups can only follow marked glyphs", - self.cur_token_location_, - ) - lookup_name = self.expect_name_() - lookup = self.lookups_.resolve(lookup_name) - if lookup is None: - raise FeatureLibError( - 'Unknown lookup "%s"' % lookup_name, self.cur_token_location_ - ) - lookuplist.append(lookup) - if marked: - lookups.append(lookuplist) - - if not glyphs and not suffix: # eg., "sub f f i by" - assert lookups == [] - return ([], prefix, [None] * len(prefix), values, [], hasMarks) - else: - if any(values[: len(prefix)]): - raise FeatureLibError( - "Positioning cannot be applied in the bactrack glyph sequence, " - "before the marked glyph sequence.", - self.cur_token_location_, - ) - marked_values = values[len(prefix) : len(prefix) + len(glyphs)] - if any(marked_values): - if any(values[len(prefix) + len(glyphs) :]): - raise FeatureLibError( - "Positioning values are allowed only in the marked glyph " - "sequence, or after the final glyph node when only one glyph " - "node is marked.", - self.cur_token_location_, - ) - values = marked_values - elif values and values[-1]: - if len(glyphs) > 1 or any(values[:-1]): - raise FeatureLibError( - "Positioning values are allowed only in the marked glyph " - "sequence, or after the final glyph node when only one glyph " - "node is marked.", - self.cur_token_location_, - ) - values = values[-1:] - elif any(values): - raise FeatureLibError( - "Positioning values are allowed only in the marked glyph " - "sequence, or after the final glyph node when only one glyph " - "node is marked.", - self.cur_token_location_, - ) - return (prefix, glyphs, lookups, values, suffix, hasMarks) - - def parse_ignore_glyph_pattern_(self, sub): - location = self.cur_token_location_ - prefix, glyphs, lookups, values, suffix, hasMarks = self.parse_glyph_pattern_( - vertical=False - ) - if any(lookups): - raise FeatureLibError( - f'No lookups can be specified for "ignore {sub}"', location - ) - if not hasMarks: - error = FeatureLibError( - f'Ambiguous "ignore {sub}", there should be least one marked glyph', - location, - ) - log.warning(str(error)) - suffix, glyphs = glyphs[1:], glyphs[0:1] - chainContext = (prefix, glyphs, suffix) - return chainContext - - def parse_ignore_context_(self, sub): - location = self.cur_token_location_ - chainContext = [self.parse_ignore_glyph_pattern_(sub)] - while self.next_token_ == ",": - self.expect_symbol_(",") - chainContext.append(self.parse_ignore_glyph_pattern_(sub)) - self.expect_symbol_(";") - return chainContext - - def parse_ignore_(self): - # Parses an ignore sub/pos rule. - assert self.is_cur_keyword_("ignore") - location = self.cur_token_location_ - self.advance_lexer_() - if self.cur_token_ in ["substitute", "sub"]: - chainContext = self.parse_ignore_context_("sub") - return self.ast.IgnoreSubstStatement(chainContext, location=location) - if self.cur_token_ in ["position", "pos"]: - chainContext = self.parse_ignore_context_("pos") - return self.ast.IgnorePosStatement(chainContext, location=location) - raise FeatureLibError( - 'Expected "substitute" or "position"', self.cur_token_location_ - ) - - def parse_include_(self): - assert self.cur_token_ == "include" - location = self.cur_token_location_ - filename = self.expect_filename_() - # self.expect_symbol_(";") - return ast.IncludeStatement(filename, location=location) - - def parse_language_(self): - assert self.is_cur_keyword_("language") - location = self.cur_token_location_ - language = self.expect_language_tag_() - include_default, required = (True, False) - if self.next_token_ in {"exclude_dflt", "include_dflt"}: - include_default = self.expect_name_() == "include_dflt" - if self.next_token_ == "required": - self.expect_keyword_("required") - required = True - self.expect_symbol_(";") - return self.ast.LanguageStatement( - language, include_default, required, location=location - ) - - def parse_ligatureCaretByIndex_(self): - assert self.is_cur_keyword_("LigatureCaretByIndex") - location = self.cur_token_location_ - glyphs = self.parse_glyphclass_(accept_glyphname=True) - carets = [self.expect_number_()] - while self.next_token_ != ";": - carets.append(self.expect_number_()) - self.expect_symbol_(";") - return self.ast.LigatureCaretByIndexStatement(glyphs, carets, location=location) - - def parse_ligatureCaretByPos_(self): - assert self.is_cur_keyword_("LigatureCaretByPos") - location = self.cur_token_location_ - glyphs = self.parse_glyphclass_(accept_glyphname=True) - carets = [self.expect_number_(variable=True)] - while self.next_token_ != ";": - carets.append(self.expect_number_(variable=True)) - self.expect_symbol_(";") - return self.ast.LigatureCaretByPosStatement(glyphs, carets, location=location) - - def parse_lookup_(self, vertical): - # Parses a ``lookup`` - either a lookup block, or a lookup reference - # inside a feature. - assert self.is_cur_keyword_("lookup") - location, name = self.cur_token_location_, self.expect_name_() - - if self.next_token_ == ";": - lookup = self.lookups_.resolve(name) - if lookup is None: - raise FeatureLibError( - 'Unknown lookup "%s"' % name, self.cur_token_location_ - ) - self.expect_symbol_(";") - return self.ast.LookupReferenceStatement(lookup, location=location) - - use_extension = False - if self.next_token_ == "useExtension": - self.expect_keyword_("useExtension") - use_extension = True - - block = self.ast.LookupBlock(name, use_extension, location=location) - self.parse_block_(block, vertical) - self.lookups_.define(name, block) - return block - - def parse_lookupflag_(self): - # Parses a ``lookupflag`` statement, either specified by number or - # in words. - assert self.is_cur_keyword_("lookupflag") - location = self.cur_token_location_ - - # format B: "lookupflag 6;" - if self.next_token_type_ == Lexer.NUMBER: - value = self.expect_number_() - self.expect_symbol_(";") - return self.ast.LookupFlagStatement(value, location=location) - - # format A: "lookupflag RightToLeft MarkAttachmentType @M;" - value_seen = False - value, markAttachment, markFilteringSet = 0, None, None - flags = { - "RightToLeft": 1, - "IgnoreBaseGlyphs": 2, - "IgnoreLigatures": 4, - "IgnoreMarks": 8, - } - seen = set() - while self.next_token_ != ";": - if self.next_token_ in seen: - raise FeatureLibError( - "%s can be specified only once" % self.next_token_, - self.next_token_location_, - ) - seen.add(self.next_token_) - if self.next_token_ == "MarkAttachmentType": - self.expect_keyword_("MarkAttachmentType") - markAttachment = self.parse_glyphclass_(accept_glyphname=False) - elif self.next_token_ == "UseMarkFilteringSet": - self.expect_keyword_("UseMarkFilteringSet") - markFilteringSet = self.parse_glyphclass_(accept_glyphname=False) - elif self.next_token_ in flags: - value_seen = True - value = value | flags[self.expect_name_()] - else: - raise FeatureLibError( - '"%s" is not a recognized lookupflag' % self.next_token_, - self.next_token_location_, - ) - self.expect_symbol_(";") - - if not any([value_seen, markAttachment, markFilteringSet]): - raise FeatureLibError( - "lookupflag must have a value", self.next_token_location_ - ) - - return self.ast.LookupFlagStatement( - value, - markAttachment=markAttachment, - markFilteringSet=markFilteringSet, - location=location, - ) - - def parse_markClass_(self): - assert self.is_cur_keyword_("markClass") - location = self.cur_token_location_ - glyphs = self.parse_glyphclass_(accept_glyphname=True) - if not glyphs.glyphSet(): - raise FeatureLibError( - "Empty glyph class in mark class definition", location - ) - anchor = self.parse_anchor_() - name = self.expect_class_name_() - self.expect_symbol_(";") - markClass = self.doc_.markClasses.get(name) - if markClass is None: - markClass = self.ast.MarkClass(name) - self.doc_.markClasses[name] = markClass - self.glyphclasses_.define(name, markClass) - mcdef = self.ast.MarkClassDefinition( - markClass, anchor, glyphs, location=location - ) - markClass.addDefinition(mcdef) - return mcdef - - def parse_position_(self, enumerated, vertical): - assert self.cur_token_ in {"position", "pos"} - if self.next_token_ == "cursive": # GPOS type 3 - return self.parse_position_cursive_(enumerated, vertical) - elif self.next_token_ == "base": # GPOS type 4 - return self.parse_position_base_(enumerated, vertical) - elif self.next_token_ == "ligature": # GPOS type 5 - return self.parse_position_ligature_(enumerated, vertical) - elif self.next_token_ == "mark": # GPOS type 6 - return self.parse_position_mark_(enumerated, vertical) - - location = self.cur_token_location_ - prefix, glyphs, lookups, values, suffix, hasMarks = self.parse_glyph_pattern_( - vertical - ) - self.expect_symbol_(";") - - if any(lookups): - # GPOS type 8: Chaining contextual positioning; explicit lookups - if any(values): - raise FeatureLibError( - 'If "lookup" is present, no values must be specified', location - ) - return self.ast.ChainContextPosStatement( - prefix, glyphs, suffix, lookups, location=location - ) - - # Pair positioning, format A: "pos V 10 A -10;" - # Pair positioning, format B: "pos V A -20;" - if not prefix and not suffix and len(glyphs) == 2 and not hasMarks: - if values[0] is None: # Format B: "pos V A -20;" - values.reverse() - return self.ast.PairPosStatement( - glyphs[0], - values[0], - glyphs[1], - values[1], - enumerated=enumerated, - location=location, - ) - - if enumerated: - raise FeatureLibError( - '"enumerate" is only allowed with pair positionings', location - ) - return self.ast.SinglePosStatement( - list(zip(glyphs, values)), - prefix, - suffix, - forceChain=hasMarks, - location=location, - ) - - def parse_position_cursive_(self, enumerated, vertical): - location = self.cur_token_location_ - self.expect_keyword_("cursive") - if enumerated: - raise FeatureLibError( - '"enumerate" is not allowed with ' "cursive attachment positioning", - location, - ) - glyphclass = self.parse_glyphclass_(accept_glyphname=True) - entryAnchor = self.parse_anchor_() - exitAnchor = self.parse_anchor_() - self.expect_symbol_(";") - return self.ast.CursivePosStatement( - glyphclass, entryAnchor, exitAnchor, location=location - ) - - def parse_position_base_(self, enumerated, vertical): - location = self.cur_token_location_ - self.expect_keyword_("base") - if enumerated: - raise FeatureLibError( - '"enumerate" is not allowed with ' - "mark-to-base attachment positioning", - location, - ) - base = self.parse_glyphclass_(accept_glyphname=True) - marks = self.parse_anchor_marks_() - self.expect_symbol_(";") - return self.ast.MarkBasePosStatement(base, marks, location=location) - - def parse_position_ligature_(self, enumerated, vertical): - location = self.cur_token_location_ - self.expect_keyword_("ligature") - if enumerated: - raise FeatureLibError( - '"enumerate" is not allowed with ' - "mark-to-ligature attachment positioning", - location, - ) - ligatures = self.parse_glyphclass_(accept_glyphname=True) - marks = [self.parse_anchor_marks_()] - while self.next_token_ == "ligComponent": - self.expect_keyword_("ligComponent") - marks.append(self.parse_anchor_marks_()) - self.expect_symbol_(";") - return self.ast.MarkLigPosStatement(ligatures, marks, location=location) - - def parse_position_mark_(self, enumerated, vertical): - location = self.cur_token_location_ - self.expect_keyword_("mark") - if enumerated: - raise FeatureLibError( - '"enumerate" is not allowed with ' - "mark-to-mark attachment positioning", - location, - ) - baseMarks = self.parse_glyphclass_(accept_glyphname=True) - marks = self.parse_anchor_marks_() - self.expect_symbol_(";") - return self.ast.MarkMarkPosStatement(baseMarks, marks, location=location) - - def parse_script_(self): - assert self.is_cur_keyword_("script") - location, script = self.cur_token_location_, self.expect_script_tag_() - self.expect_symbol_(";") - return self.ast.ScriptStatement(script, location=location) - - def parse_substitute_(self): - assert self.cur_token_ in {"substitute", "sub", "reversesub", "rsub"} - location = self.cur_token_location_ - reverse = self.cur_token_ in {"reversesub", "rsub"} - ( - old_prefix, - old, - lookups, - values, - old_suffix, - hasMarks, - ) = self.parse_glyph_pattern_(vertical=False) - if any(values): - raise FeatureLibError( - "Substitution statements cannot contain values", location - ) - new = [] - if self.next_token_ == "by": - keyword = self.expect_keyword_("by") - while self.next_token_ != ";": - gc = self.parse_glyphclass_(accept_glyphname=True, accept_null=True) - new.append(gc) - elif self.next_token_ == "from": - keyword = self.expect_keyword_("from") - new = [self.parse_glyphclass_(accept_glyphname=False)] - else: - keyword = None - self.expect_symbol_(";") - if len(new) == 0 and not any(lookups): - raise FeatureLibError( - 'Expected "by", "from" or explicit lookup references', - self.cur_token_location_, - ) - - # GSUB lookup type 3: Alternate substitution. - # Format: "substitute a from [a.1 a.2 a.3];" - if keyword == "from": - if reverse: - raise FeatureLibError( - 'Reverse chaining substitutions do not support "from"', location - ) - if len(old) != 1 or len(old[0].glyphSet()) != 1: - raise FeatureLibError('Expected a single glyph before "from"', location) - if len(new) != 1: - raise FeatureLibError( - 'Expected a single glyphclass after "from"', location - ) - return self.ast.AlternateSubstStatement( - old_prefix, old[0], old_suffix, new[0], location=location - ) - - num_lookups = len([l for l in lookups if l is not None]) - - is_deletion = False - if len(new) == 1 and isinstance(new[0], ast.NullGlyph): - new = [] # Deletion - is_deletion = True - - # GSUB lookup type 1: Single substitution. - # Format A: "substitute a by a.sc;" - # Format B: "substitute [one.fitted one.oldstyle] by one;" - # Format C: "substitute [a-d] by [A.sc-D.sc];" - if not reverse and len(old) == 1 and len(new) == 1 and num_lookups == 0: - glyphs = list(old[0].glyphSet()) - replacements = list(new[0].glyphSet()) - if len(replacements) == 1: - replacements = replacements * len(glyphs) - if len(glyphs) != len(replacements): - raise FeatureLibError( - 'Expected a glyph class with %d elements after "by", ' - "but found a glyph class with %d elements" - % (len(glyphs), len(replacements)), - location, - ) - return self.ast.SingleSubstStatement( - old, new, old_prefix, old_suffix, forceChain=hasMarks, location=location - ) - - # Glyph deletion, built as GSUB lookup type 2: Multiple substitution - # with empty replacement. - if is_deletion and len(old) == 1 and num_lookups == 0: - return self.ast.MultipleSubstStatement( - old_prefix, - old[0], - old_suffix, - (), - forceChain=hasMarks, - location=location, - ) - - # GSUB lookup type 2: Multiple substitution. - # Format: "substitute f_f_i by f f i;" - # - # GlyphsApp introduces two additional formats: - # Format 1: "substitute [f_i f_l] by [f f] [i l];" - # Format 2: "substitute [f_i f_l] by f [i l];" - # http://handbook.glyphsapp.com/en/layout/multiple-substitution-with-classes/ - if not reverse and len(old) == 1 and len(new) > 1 and num_lookups == 0: - count = len(old[0].glyphSet()) - for n in new: - if not list(n.glyphSet()): - raise FeatureLibError("Empty class in replacement", location) - if len(n.glyphSet()) != 1 and len(n.glyphSet()) != count: - raise FeatureLibError( - f'Expected a glyph class with 1 or {count} elements after "by", ' - f"but found a glyph class with {len(n.glyphSet())} elements", - location, - ) - return self.ast.MultipleSubstStatement( - old_prefix, - old[0], - old_suffix, - new, - forceChain=hasMarks, - location=location, - ) - - # GSUB lookup type 4: Ligature substitution. - # Format: "substitute f f i by f_f_i;" - if ( - not reverse - and len(old) > 1 - and len(new) == 1 - and len(new[0].glyphSet()) == 1 - and num_lookups == 0 - ): - return self.ast.LigatureSubstStatement( - old_prefix, - old, - old_suffix, - list(new[0].glyphSet())[0], - forceChain=hasMarks, - location=location, - ) - - # GSUB lookup type 8: Reverse chaining substitution. - if reverse: - if len(old) != 1: - raise FeatureLibError( - "In reverse chaining single substitutions, " - "only a single glyph or glyph class can be replaced", - location, - ) - if len(new) != 1: - raise FeatureLibError( - "In reverse chaining single substitutions, " - 'the replacement (after "by") must be a single glyph ' - "or glyph class", - location, - ) - if num_lookups != 0: - raise FeatureLibError( - "Reverse chaining substitutions cannot call named lookups", location - ) - glyphs = sorted(list(old[0].glyphSet())) - replacements = sorted(list(new[0].glyphSet())) - if len(replacements) == 1: - replacements = replacements * len(glyphs) - if len(glyphs) != len(replacements): - raise FeatureLibError( - 'Expected a glyph class with %d elements after "by", ' - "but found a glyph class with %d elements" - % (len(glyphs), len(replacements)), - location, - ) - return self.ast.ReverseChainSingleSubstStatement( - old_prefix, old_suffix, old, new, location=location - ) - - if len(old) > 1 and len(new) > 1: - raise FeatureLibError( - "Direct substitution of multiple glyphs by multiple glyphs " - "is not supported", - location, - ) - - # If there are remaining glyphs to parse, this is an invalid GSUB statement - if len(new) != 0 or is_deletion: - raise FeatureLibError("Invalid substitution statement", location) - - # GSUB lookup type 6: Chaining contextual substitution. - rule = self.ast.ChainContextSubstStatement( - old_prefix, old, old_suffix, lookups, location=location - ) - return rule - - def parse_subtable_(self): - assert self.is_cur_keyword_("subtable") - location = self.cur_token_location_ - self.expect_symbol_(";") - return self.ast.SubtableStatement(location=location) - - def parse_size_parameters_(self): - # Parses a ``parameters`` statement used in ``size`` features. See - # `section 8.b `_. - assert self.is_cur_keyword_("parameters") - location = self.cur_token_location_ - DesignSize = self.expect_decipoint_() - SubfamilyID = self.expect_number_() - RangeStart = 0.0 - RangeEnd = 0.0 - if self.next_token_type_ in (Lexer.NUMBER, Lexer.FLOAT) or SubfamilyID != 0: - RangeStart = self.expect_decipoint_() - RangeEnd = self.expect_decipoint_() - - self.expect_symbol_(";") - return self.ast.SizeParameters( - DesignSize, SubfamilyID, RangeStart, RangeEnd, location=location - ) - - def parse_size_menuname_(self): - assert self.is_cur_keyword_("sizemenuname") - location = self.cur_token_location_ - platformID, platEncID, langID, string = self.parse_name_() - return self.ast.FeatureNameStatement( - "size", platformID, platEncID, langID, string, location=location - ) - - def parse_table_(self): - assert self.is_cur_keyword_("table") - location, name = self.cur_token_location_, self.expect_tag_() - table = self.ast.TableBlock(name, location=location) - self.expect_symbol_("{") - handler = { - "GDEF": self.parse_table_GDEF_, - "head": self.parse_table_head_, - "hhea": self.parse_table_hhea_, - "vhea": self.parse_table_vhea_, - "name": self.parse_table_name_, - "BASE": self.parse_table_BASE_, - "OS/2": self.parse_table_OS_2_, - "STAT": self.parse_table_STAT_, - }.get(name) - if handler: - handler(table) - else: - raise FeatureLibError( - '"table %s" is not supported' % name.strip(), location - ) - self.expect_symbol_("}") - end_tag = self.expect_tag_() - if end_tag != name: - raise FeatureLibError( - 'Expected "%s"' % name.strip(), self.cur_token_location_ - ) - self.expect_symbol_(";") - return table - - def parse_table_GDEF_(self, table): - statements = table.statements - while self.next_token_ != "}" or self.cur_comments_: - self.advance_lexer_(comments=True) - if self.cur_token_type_ is Lexer.COMMENT: - statements.append( - self.ast.Comment(self.cur_token_, location=self.cur_token_location_) - ) - elif self.is_cur_keyword_("Attach"): - statements.append(self.parse_attach_()) - elif self.is_cur_keyword_("GlyphClassDef"): - statements.append(self.parse_GlyphClassDef_()) - elif self.is_cur_keyword_("LigatureCaretByIndex"): - statements.append(self.parse_ligatureCaretByIndex_()) - elif self.is_cur_keyword_("LigatureCaretByPos"): - statements.append(self.parse_ligatureCaretByPos_()) - elif self.cur_token_ == ";": - continue - else: - raise FeatureLibError( - "Expected Attach, LigatureCaretByIndex, " "or LigatureCaretByPos", - self.cur_token_location_, - ) - - def parse_table_head_(self, table): - statements = table.statements - while self.next_token_ != "}" or self.cur_comments_: - self.advance_lexer_(comments=True) - if self.cur_token_type_ is Lexer.COMMENT: - statements.append( - self.ast.Comment(self.cur_token_, location=self.cur_token_location_) - ) - elif self.is_cur_keyword_("FontRevision"): - statements.append(self.parse_FontRevision_()) - elif self.cur_token_ == ";": - continue - else: - raise FeatureLibError("Expected FontRevision", self.cur_token_location_) - - def parse_table_hhea_(self, table): - statements = table.statements - fields = ("CaretOffset", "Ascender", "Descender", "LineGap") - while self.next_token_ != "}" or self.cur_comments_: - self.advance_lexer_(comments=True) - if self.cur_token_type_ is Lexer.COMMENT: - statements.append( - self.ast.Comment(self.cur_token_, location=self.cur_token_location_) - ) - elif self.cur_token_type_ is Lexer.NAME and self.cur_token_ in fields: - key = self.cur_token_.lower() - value = self.expect_number_() - statements.append( - self.ast.HheaField(key, value, location=self.cur_token_location_) - ) - if self.next_token_ != ";": - raise FeatureLibError( - "Incomplete statement", self.next_token_location_ - ) - elif self.cur_token_ == ";": - continue - else: - raise FeatureLibError( - "Expected CaretOffset, Ascender, " "Descender or LineGap", - self.cur_token_location_, - ) - - def parse_table_vhea_(self, table): - statements = table.statements - fields = ("VertTypoAscender", "VertTypoDescender", "VertTypoLineGap") - while self.next_token_ != "}" or self.cur_comments_: - self.advance_lexer_(comments=True) - if self.cur_token_type_ is Lexer.COMMENT: - statements.append( - self.ast.Comment(self.cur_token_, location=self.cur_token_location_) - ) - elif self.cur_token_type_ is Lexer.NAME and self.cur_token_ in fields: - key = self.cur_token_.lower() - value = self.expect_number_() - statements.append( - self.ast.VheaField(key, value, location=self.cur_token_location_) - ) - if self.next_token_ != ";": - raise FeatureLibError( - "Incomplete statement", self.next_token_location_ - ) - elif self.cur_token_ == ";": - continue - else: - raise FeatureLibError( - "Expected VertTypoAscender, " - "VertTypoDescender or VertTypoLineGap", - self.cur_token_location_, - ) - - def parse_table_name_(self, table): - statements = table.statements - while self.next_token_ != "}" or self.cur_comments_: - self.advance_lexer_(comments=True) - if self.cur_token_type_ is Lexer.COMMENT: - statements.append( - self.ast.Comment(self.cur_token_, location=self.cur_token_location_) - ) - elif self.is_cur_keyword_("nameid"): - statement = self.parse_nameid_() - if statement: - statements.append(statement) - elif self.cur_token_ == ";": - continue - else: - raise FeatureLibError("Expected nameid", self.cur_token_location_) - - def parse_name_(self): - """Parses a name record. See `section 9.e `_.""" - platEncID = None - langID = None - if self.next_token_type_ in Lexer.NUMBERS: - platformID = self.expect_any_number_() - location = self.cur_token_location_ - if platformID not in (1, 3): - raise FeatureLibError("Expected platform id 1 or 3", location) - if self.next_token_type_ in Lexer.NUMBERS: - platEncID = self.expect_any_number_() - langID = self.expect_any_number_() - else: - platformID = 3 - location = self.cur_token_location_ - - if platformID == 1: # Macintosh - platEncID = platEncID or 0 # Roman - langID = langID or 0 # English - else: # 3, Windows - platEncID = platEncID or 1 # Unicode - langID = langID or 0x0409 # English - - string = self.expect_string_() - self.expect_symbol_(";") - - encoding = getEncoding(platformID, platEncID, langID) - if encoding is None: - raise FeatureLibError("Unsupported encoding", location) - unescaped = self.unescape_string_(string, encoding) - return platformID, platEncID, langID, unescaped - - def parse_stat_name_(self): - platEncID = None - langID = None - if self.next_token_type_ in Lexer.NUMBERS: - platformID = self.expect_any_number_() - location = self.cur_token_location_ - if platformID not in (1, 3): - raise FeatureLibError("Expected platform id 1 or 3", location) - if self.next_token_type_ in Lexer.NUMBERS: - platEncID = self.expect_any_number_() - langID = self.expect_any_number_() - else: - platformID = 3 - location = self.cur_token_location_ - - if platformID == 1: # Macintosh - platEncID = platEncID or 0 # Roman - langID = langID or 0 # English - else: # 3, Windows - platEncID = platEncID or 1 # Unicode - langID = langID or 0x0409 # English - - string = self.expect_string_() - encoding = getEncoding(platformID, platEncID, langID) - if encoding is None: - raise FeatureLibError("Unsupported encoding", location) - unescaped = self.unescape_string_(string, encoding) - return platformID, platEncID, langID, unescaped - - def parse_nameid_(self): - assert self.cur_token_ == "nameid", self.cur_token_ - location, nameID = self.cur_token_location_, self.expect_any_number_() - if nameID > 32767: - raise FeatureLibError( - "Name id value cannot be greater than 32767", self.cur_token_location_ - ) - platformID, platEncID, langID, string = self.parse_name_() - return self.ast.NameRecord( - nameID, platformID, platEncID, langID, string, location=location - ) - - def unescape_string_(self, string, encoding): - if encoding == "utf_16_be": - s = re.sub(r"\\[0-9a-fA-F]{4}", self.unescape_unichr_, string) - else: - unescape = lambda m: self.unescape_byte_(m, encoding) - s = re.sub(r"\\[0-9a-fA-F]{2}", unescape, string) - # We now have a Unicode string, but it might contain surrogate pairs. - # We convert surrogates to actual Unicode by round-tripping through - # Python's UTF-16 codec in a special mode. - utf16 = tobytes(s, "utf_16_be", "surrogatepass") - return tostr(utf16, "utf_16_be") - - @staticmethod - def unescape_unichr_(match): - n = match.group(0)[1:] - return chr(int(n, 16)) - - @staticmethod - def unescape_byte_(match, encoding): - n = match.group(0)[1:] - return bytechr(int(n, 16)).decode(encoding) - - def parse_table_BASE_(self, table): - statements = table.statements - while self.next_token_ != "}" or self.cur_comments_: - self.advance_lexer_(comments=True) - if self.cur_token_type_ is Lexer.COMMENT: - statements.append( - self.ast.Comment(self.cur_token_, location=self.cur_token_location_) - ) - elif self.is_cur_keyword_("HorizAxis.BaseTagList"): - horiz_bases = self.parse_base_tag_list_() - elif self.is_cur_keyword_("HorizAxis.BaseScriptList"): - horiz_scripts = self.parse_base_script_list_(len(horiz_bases)) - statements.append( - self.ast.BaseAxis( - horiz_bases, - horiz_scripts, - False, - location=self.cur_token_location_, - ) - ) - elif self.is_cur_keyword_("VertAxis.BaseTagList"): - vert_bases = self.parse_base_tag_list_() - elif self.is_cur_keyword_("VertAxis.BaseScriptList"): - vert_scripts = self.parse_base_script_list_(len(vert_bases)) - statements.append( - self.ast.BaseAxis( - vert_bases, - vert_scripts, - True, - location=self.cur_token_location_, - ) - ) - elif self.cur_token_ == ";": - continue - - def parse_table_OS_2_(self, table): - statements = table.statements - numbers = ( - "FSType", - "TypoAscender", - "TypoDescender", - "TypoLineGap", - "winAscent", - "winDescent", - "XHeight", - "CapHeight", - "WeightClass", - "WidthClass", - "LowerOpSize", - "UpperOpSize", - ) - ranges = ("UnicodeRange", "CodePageRange") - while self.next_token_ != "}" or self.cur_comments_: - self.advance_lexer_(comments=True) - if self.cur_token_type_ is Lexer.COMMENT: - statements.append( - self.ast.Comment(self.cur_token_, location=self.cur_token_location_) - ) - elif self.cur_token_type_ is Lexer.NAME: - key = self.cur_token_.lower() - value = None - if self.cur_token_ in numbers: - value = self.expect_number_() - elif self.is_cur_keyword_("Panose"): - value = [] - for i in range(10): - value.append(self.expect_number_()) - elif self.cur_token_ in ranges: - value = [] - while self.next_token_ != ";": - value.append(self.expect_number_()) - elif self.is_cur_keyword_("Vendor"): - value = self.expect_string_() - statements.append( - self.ast.OS2Field(key, value, location=self.cur_token_location_) - ) - elif self.cur_token_ == ";": - continue - - def parse_STAT_ElidedFallbackName(self): - assert self.is_cur_keyword_("ElidedFallbackName") - self.expect_symbol_("{") - names = [] - while self.next_token_ != "}" or self.cur_comments_: - self.advance_lexer_() - if self.is_cur_keyword_("name"): - platformID, platEncID, langID, string = self.parse_stat_name_() - nameRecord = self.ast.STATNameStatement( - "stat", - platformID, - platEncID, - langID, - string, - location=self.cur_token_location_, - ) - names.append(nameRecord) - else: - if self.cur_token_ != ";": - raise FeatureLibError( - f"Unexpected token {self.cur_token_} " f"in ElidedFallbackName", - self.cur_token_location_, - ) - self.expect_symbol_("}") - if not names: - raise FeatureLibError('Expected "name"', self.cur_token_location_) - return names - - def parse_STAT_design_axis(self): - assert self.is_cur_keyword_("DesignAxis") - names = [] - axisTag = self.expect_tag_() - if ( - axisTag not in ("ital", "opsz", "slnt", "wdth", "wght") - and not axisTag.isupper() - ): - log.warning(f"Unregistered axis tag {axisTag} should be uppercase.") - axisOrder = self.expect_number_() - self.expect_symbol_("{") - while self.next_token_ != "}" or self.cur_comments_: - self.advance_lexer_() - if self.cur_token_type_ is Lexer.COMMENT: - continue - elif self.is_cur_keyword_("name"): - location = self.cur_token_location_ - platformID, platEncID, langID, string = self.parse_stat_name_() - name = self.ast.STATNameStatement( - "stat", platformID, platEncID, langID, string, location=location - ) - names.append(name) - elif self.cur_token_ == ";": - continue - else: - raise FeatureLibError( - f'Expected "name", got {self.cur_token_}', self.cur_token_location_ - ) - - self.expect_symbol_("}") - return self.ast.STATDesignAxisStatement( - axisTag, axisOrder, names, self.cur_token_location_ - ) - - def parse_STAT_axis_value_(self): - assert self.is_cur_keyword_("AxisValue") - self.expect_symbol_("{") - locations = [] - names = [] - flags = 0 - while self.next_token_ != "}" or self.cur_comments_: - self.advance_lexer_(comments=True) - if self.cur_token_type_ is Lexer.COMMENT: - continue - elif self.is_cur_keyword_("name"): - location = self.cur_token_location_ - platformID, platEncID, langID, string = self.parse_stat_name_() - name = self.ast.STATNameStatement( - "stat", platformID, platEncID, langID, string, location=location - ) - names.append(name) - elif self.is_cur_keyword_("location"): - location = self.parse_STAT_location() - locations.append(location) - elif self.is_cur_keyword_("flag"): - flags = self.expect_stat_flags() - elif self.cur_token_ == ";": - continue - else: - raise FeatureLibError( - f"Unexpected token {self.cur_token_} " f"in AxisValue", - self.cur_token_location_, - ) - self.expect_symbol_("}") - if not names: - raise FeatureLibError('Expected "Axis Name"', self.cur_token_location_) - if not locations: - raise FeatureLibError('Expected "Axis location"', self.cur_token_location_) - if len(locations) > 1: - for location in locations: - if len(location.values) > 1: - raise FeatureLibError( - "Only one value is allowed in a " - "Format 4 Axis Value Record, but " - f"{len(location.values)} were found.", - self.cur_token_location_, - ) - format4_tags = [] - for location in locations: - tag = location.tag - if tag in format4_tags: - raise FeatureLibError( - f"Axis tag {tag} already " "defined.", self.cur_token_location_ - ) - format4_tags.append(tag) - - return self.ast.STATAxisValueStatement( - names, locations, flags, self.cur_token_location_ - ) - - def parse_STAT_location(self): - values = [] - tag = self.expect_tag_() - if len(tag.strip()) != 4: - raise FeatureLibError( - f"Axis tag {self.cur_token_} must be 4 " "characters", - self.cur_token_location_, - ) - - while self.next_token_ != ";": - if self.next_token_type_ is Lexer.FLOAT: - value = self.expect_float_() - values.append(value) - elif self.next_token_type_ is Lexer.NUMBER: - value = self.expect_number_() - values.append(value) - else: - raise FeatureLibError( - f'Unexpected value "{self.next_token_}". ' - "Expected integer or float.", - self.next_token_location_, - ) - if len(values) == 3: - nominal, min_val, max_val = values - if nominal < min_val or nominal > max_val: - raise FeatureLibError( - f"Default value {nominal} is outside " - f"of specified range " - f"{min_val}-{max_val}.", - self.next_token_location_, - ) - return self.ast.AxisValueLocationStatement(tag, values) - - def parse_table_STAT_(self, table): - statements = table.statements - design_axes = [] - while self.next_token_ != "}" or self.cur_comments_: - self.advance_lexer_(comments=True) - if self.cur_token_type_ is Lexer.COMMENT: - statements.append( - self.ast.Comment(self.cur_token_, location=self.cur_token_location_) - ) - elif self.cur_token_type_ is Lexer.NAME: - if self.is_cur_keyword_("ElidedFallbackName"): - names = self.parse_STAT_ElidedFallbackName() - statements.append(self.ast.ElidedFallbackName(names)) - elif self.is_cur_keyword_("ElidedFallbackNameID"): - value = self.expect_number_() - statements.append(self.ast.ElidedFallbackNameID(value)) - self.expect_symbol_(";") - elif self.is_cur_keyword_("DesignAxis"): - designAxis = self.parse_STAT_design_axis() - design_axes.append(designAxis.tag) - statements.append(designAxis) - self.expect_symbol_(";") - elif self.is_cur_keyword_("AxisValue"): - axisValueRecord = self.parse_STAT_axis_value_() - for location in axisValueRecord.locations: - if location.tag not in design_axes: - # Tag must be defined in a DesignAxis before it - # can be referenced - raise FeatureLibError( - "DesignAxis not defined for " f"{location.tag}.", - self.cur_token_location_, - ) - statements.append(axisValueRecord) - self.expect_symbol_(";") - else: - raise FeatureLibError( - f"Unexpected token {self.cur_token_}", self.cur_token_location_ - ) - elif self.cur_token_ == ";": - continue - - def parse_base_tag_list_(self): - # Parses BASE table entries. (See `section 9.a `_) - assert self.cur_token_ in ( - "HorizAxis.BaseTagList", - "VertAxis.BaseTagList", - ), self.cur_token_ - bases = [] - while self.next_token_ != ";": - bases.append(self.expect_script_tag_()) - self.expect_symbol_(";") - return bases - - def parse_base_script_list_(self, count): - assert self.cur_token_ in ( - "HorizAxis.BaseScriptList", - "VertAxis.BaseScriptList", - ), self.cur_token_ - scripts = [(self.parse_base_script_record_(count))] - while self.next_token_ == ",": - self.expect_symbol_(",") - scripts.append(self.parse_base_script_record_(count)) - self.expect_symbol_(";") - return scripts - - def parse_base_script_record_(self, count): - script_tag = self.expect_script_tag_() - base_tag = self.expect_script_tag_() - coords = [self.expect_number_() for i in range(count)] - return script_tag, base_tag, coords - - def parse_device_(self): - result = None - self.expect_symbol_("<") - self.expect_keyword_("device") - if self.next_token_ == "NULL": - self.expect_keyword_("NULL") - else: - result = [(self.expect_number_(), self.expect_number_())] - while self.next_token_ == ",": - self.expect_symbol_(",") - result.append((self.expect_number_(), self.expect_number_())) - result = tuple(result) # make it hashable - self.expect_symbol_(">") - return result - - def is_next_value_(self): - return ( - self.next_token_type_ is Lexer.NUMBER - or self.next_token_ == "<" - or self.next_token_ == "(" - ) - - def parse_valuerecord_(self, vertical): - if ( - self.next_token_type_ is Lexer.SYMBOL and self.next_token_ == "(" - ) or self.next_token_type_ is Lexer.NUMBER: - number, location = ( - self.expect_number_(variable=True), - self.cur_token_location_, - ) - if vertical: - val = self.ast.ValueRecord( - yAdvance=number, vertical=vertical, location=location - ) - else: - val = self.ast.ValueRecord( - xAdvance=number, vertical=vertical, location=location - ) - return val - self.expect_symbol_("<") - location = self.cur_token_location_ - if self.next_token_type_ is Lexer.NAME: - name = self.expect_name_() - if name == "NULL": - self.expect_symbol_(">") - return self.ast.ValueRecord() - vrd = self.valuerecords_.resolve(name) - if vrd is None: - raise FeatureLibError( - 'Unknown valueRecordDef "%s"' % name, self.cur_token_location_ - ) - value = vrd.value - xPlacement, yPlacement = (value.xPlacement, value.yPlacement) - xAdvance, yAdvance = (value.xAdvance, value.yAdvance) - else: - xPlacement, yPlacement, xAdvance, yAdvance = ( - self.expect_number_(variable=True), - self.expect_number_(variable=True), - self.expect_number_(variable=True), - self.expect_number_(variable=True), - ) - - if self.next_token_ == "<": - xPlaDevice, yPlaDevice, xAdvDevice, yAdvDevice = ( - self.parse_device_(), - self.parse_device_(), - self.parse_device_(), - self.parse_device_(), - ) - allDeltas = sorted( - [ - delta - for size, delta in (xPlaDevice if xPlaDevice else ()) - + (yPlaDevice if yPlaDevice else ()) - + (xAdvDevice if xAdvDevice else ()) - + (yAdvDevice if yAdvDevice else ()) - ] - ) - if allDeltas[0] < -128 or allDeltas[-1] > 127: - raise FeatureLibError( - "Device value out of valid range (-128..127)", - self.cur_token_location_, - ) - else: - xPlaDevice, yPlaDevice, xAdvDevice, yAdvDevice = (None, None, None, None) - - self.expect_symbol_(">") - return self.ast.ValueRecord( - xPlacement, - yPlacement, - xAdvance, - yAdvance, - xPlaDevice, - yPlaDevice, - xAdvDevice, - yAdvDevice, - vertical=vertical, - location=location, - ) - - def parse_valuerecord_definition_(self, vertical): - # Parses a named value record definition. (See section `2.e.v `_) - assert self.is_cur_keyword_("valueRecordDef") - location = self.cur_token_location_ - value = self.parse_valuerecord_(vertical) - name = self.expect_name_() - self.expect_symbol_(";") - vrd = self.ast.ValueRecordDefinition(name, value, location=location) - self.valuerecords_.define(name, vrd) - return vrd - - def parse_languagesystem_(self): - assert self.cur_token_ == "languagesystem" - location = self.cur_token_location_ - script = self.expect_script_tag_() - language = self.expect_language_tag_() - self.expect_symbol_(";") - return self.ast.LanguageSystemStatement(script, language, location=location) - - def parse_feature_block_(self, variation=False): - if variation: - assert self.cur_token_ == "variation" - else: - assert self.cur_token_ == "feature" - location = self.cur_token_location_ - tag = self.expect_tag_() - vertical = tag in {"vkrn", "vpal", "vhal", "valt"} - - stylisticset = None - cv_feature = None - size_feature = False - if tag in self.SS_FEATURE_TAGS: - stylisticset = tag - elif tag in self.CV_FEATURE_TAGS: - cv_feature = tag - elif tag == "size": - size_feature = True - - if variation: - conditionset = self.expect_name_() - - use_extension = False - if self.next_token_ == "useExtension": - self.expect_keyword_("useExtension") - use_extension = True - - if variation: - block = self.ast.VariationBlock( - tag, conditionset, use_extension=use_extension, location=location - ) - else: - block = self.ast.FeatureBlock( - tag, use_extension=use_extension, location=location - ) - self.parse_block_(block, vertical, stylisticset, size_feature, cv_feature) - return block - - def parse_feature_reference_(self): - assert self.cur_token_ == "feature", self.cur_token_ - location = self.cur_token_location_ - featureName = self.expect_tag_() - self.expect_symbol_(";") - return self.ast.FeatureReferenceStatement(featureName, location=location) - - def parse_featureNames_(self, tag): - """Parses a ``featureNames`` statement found in stylistic set features. - See section `8.c `_.""" - assert self.cur_token_ == "featureNames", self.cur_token_ - block = self.ast.NestedBlock( - tag, self.cur_token_, location=self.cur_token_location_ - ) - self.expect_symbol_("{") - for symtab in self.symbol_tables_: - symtab.enter_scope() - while self.next_token_ != "}" or self.cur_comments_: - self.advance_lexer_(comments=True) - if self.cur_token_type_ is Lexer.COMMENT: - block.statements.append( - self.ast.Comment(self.cur_token_, location=self.cur_token_location_) - ) - elif self.is_cur_keyword_("name"): - location = self.cur_token_location_ - platformID, platEncID, langID, string = self.parse_name_() - block.statements.append( - self.ast.FeatureNameStatement( - tag, platformID, platEncID, langID, string, location=location - ) - ) - elif self.cur_token_ == ";": - continue - else: - raise FeatureLibError('Expected "name"', self.cur_token_location_) - self.expect_symbol_("}") - for symtab in self.symbol_tables_: - symtab.exit_scope() - self.expect_symbol_(";") - return block - - def parse_cvParameters_(self, tag): - # Parses a ``cvParameters`` block found in Character Variant features. - # See section `8.d `_. - assert self.cur_token_ == "cvParameters", self.cur_token_ - block = self.ast.NestedBlock( - tag, self.cur_token_, location=self.cur_token_location_ - ) - self.expect_symbol_("{") - for symtab in self.symbol_tables_: - symtab.enter_scope() - - statements = block.statements - while self.next_token_ != "}" or self.cur_comments_: - self.advance_lexer_(comments=True) - if self.cur_token_type_ is Lexer.COMMENT: - statements.append( - self.ast.Comment(self.cur_token_, location=self.cur_token_location_) - ) - elif self.is_cur_keyword_( - { - "FeatUILabelNameID", - "FeatUITooltipTextNameID", - "SampleTextNameID", - "ParamUILabelNameID", - } - ): - statements.append(self.parse_cvNameIDs_(tag, self.cur_token_)) - elif self.is_cur_keyword_("Character"): - statements.append(self.parse_cvCharacter_(tag)) - elif self.cur_token_ == ";": - continue - else: - raise FeatureLibError( - "Expected statement: got {} {}".format( - self.cur_token_type_, self.cur_token_ - ), - self.cur_token_location_, - ) - - self.expect_symbol_("}") - for symtab in self.symbol_tables_: - symtab.exit_scope() - self.expect_symbol_(";") - return block - - def parse_cvNameIDs_(self, tag, block_name): - assert self.cur_token_ == block_name, self.cur_token_ - block = self.ast.NestedBlock(tag, block_name, location=self.cur_token_location_) - self.expect_symbol_("{") - for symtab in self.symbol_tables_: - symtab.enter_scope() - while self.next_token_ != "}" or self.cur_comments_: - self.advance_lexer_(comments=True) - if self.cur_token_type_ is Lexer.COMMENT: - block.statements.append( - self.ast.Comment(self.cur_token_, location=self.cur_token_location_) - ) - elif self.is_cur_keyword_("name"): - location = self.cur_token_location_ - platformID, platEncID, langID, string = self.parse_name_() - block.statements.append( - self.ast.CVParametersNameStatement( - tag, - platformID, - platEncID, - langID, - string, - block_name, - location=location, - ) - ) - elif self.cur_token_ == ";": - continue - else: - raise FeatureLibError('Expected "name"', self.cur_token_location_) - self.expect_symbol_("}") - for symtab in self.symbol_tables_: - symtab.exit_scope() - self.expect_symbol_(";") - return block - - def parse_cvCharacter_(self, tag): - assert self.cur_token_ == "Character", self.cur_token_ - location, character = self.cur_token_location_, self.expect_any_number_() - self.expect_symbol_(";") - if not (0xFFFFFF >= character >= 0): - raise FeatureLibError( - "Character value must be between " - "{:#x} and {:#x}".format(0, 0xFFFFFF), - location, - ) - return self.ast.CharacterStatement(character, tag, location=location) - - def parse_FontRevision_(self): - # Parses a ``FontRevision`` statement found in the head table. See - # `section 9.c `_. - assert self.cur_token_ == "FontRevision", self.cur_token_ - location, version = self.cur_token_location_, self.expect_float_() - self.expect_symbol_(";") - if version <= 0: - raise FeatureLibError("Font revision numbers must be positive", location) - return self.ast.FontRevisionStatement(version, location=location) - - def parse_conditionset_(self): - name = self.expect_name_() - - conditions = {} - self.expect_symbol_("{") - - while self.next_token_ != "}": - self.advance_lexer_() - if self.cur_token_type_ is not Lexer.NAME: - raise FeatureLibError("Expected an axis name", self.cur_token_location_) - - axis = self.cur_token_ - if axis in conditions: - raise FeatureLibError( - f"Repeated condition for axis {axis}", self.cur_token_location_ - ) - - if self.next_token_type_ is Lexer.FLOAT: - min_value = self.expect_float_() - elif self.next_token_type_ is Lexer.NUMBER: - min_value = self.expect_number_(variable=False) - - if self.next_token_type_ is Lexer.FLOAT: - max_value = self.expect_float_() - elif self.next_token_type_ is Lexer.NUMBER: - max_value = self.expect_number_(variable=False) - self.expect_symbol_(";") - - conditions[axis] = (min_value, max_value) - - self.expect_symbol_("}") - - finalname = self.expect_name_() - if finalname != name: - raise FeatureLibError('Expected "%s"' % name, self.cur_token_location_) - return self.ast.ConditionsetStatement(name, conditions) - - def parse_block_( - self, block, vertical, stylisticset=None, size_feature=False, cv_feature=None - ): - self.expect_symbol_("{") - for symtab in self.symbol_tables_: - symtab.enter_scope() - - statements = block.statements - while self.next_token_ != "}" or self.cur_comments_: - self.advance_lexer_(comments=True) - if self.cur_token_type_ is Lexer.COMMENT: - statements.append( - self.ast.Comment(self.cur_token_, location=self.cur_token_location_) - ) - elif self.cur_token_type_ is Lexer.GLYPHCLASS: - statements.append(self.parse_glyphclass_definition_()) - elif self.is_cur_keyword_("anchorDef"): - statements.append(self.parse_anchordef_()) - elif self.is_cur_keyword_({"enum", "enumerate"}): - statements.append(self.parse_enumerate_(vertical=vertical)) - elif self.is_cur_keyword_("feature"): - statements.append(self.parse_feature_reference_()) - elif self.is_cur_keyword_("ignore"): - statements.append(self.parse_ignore_()) - elif self.is_cur_keyword_("language"): - statements.append(self.parse_language_()) - elif self.is_cur_keyword_("lookup"): - statements.append(self.parse_lookup_(vertical)) - elif self.is_cur_keyword_("lookupflag"): - statements.append(self.parse_lookupflag_()) - elif self.is_cur_keyword_("markClass"): - statements.append(self.parse_markClass_()) - elif self.is_cur_keyword_({"pos", "position"}): - statements.append( - self.parse_position_(enumerated=False, vertical=vertical) - ) - elif self.is_cur_keyword_("script"): - statements.append(self.parse_script_()) - elif self.is_cur_keyword_({"sub", "substitute", "rsub", "reversesub"}): - statements.append(self.parse_substitute_()) - elif self.is_cur_keyword_("subtable"): - statements.append(self.parse_subtable_()) - elif self.is_cur_keyword_("valueRecordDef"): - statements.append(self.parse_valuerecord_definition_(vertical)) - elif stylisticset and self.is_cur_keyword_("featureNames"): - statements.append(self.parse_featureNames_(stylisticset)) - elif cv_feature and self.is_cur_keyword_("cvParameters"): - statements.append(self.parse_cvParameters_(cv_feature)) - elif size_feature and self.is_cur_keyword_("parameters"): - statements.append(self.parse_size_parameters_()) - elif size_feature and self.is_cur_keyword_("sizemenuname"): - statements.append(self.parse_size_menuname_()) - elif ( - self.cur_token_type_ is Lexer.NAME - and self.cur_token_ in self.extensions - ): - statements.append(self.extensions[self.cur_token_](self)) - elif self.cur_token_ == ";": - continue - else: - raise FeatureLibError( - "Expected glyph class definition or statement: got {} {}".format( - self.cur_token_type_, self.cur_token_ - ), - self.cur_token_location_, - ) - - self.expect_symbol_("}") - for symtab in self.symbol_tables_: - symtab.exit_scope() - - name = self.expect_name_() - if name != block.name.strip(): - raise FeatureLibError( - 'Expected "%s"' % block.name.strip(), self.cur_token_location_ - ) - self.expect_symbol_(";") - - # A multiple substitution may have a single destination, in which case - # it will look just like a single substitution. So if there are both - # multiple and single substitutions, upgrade all the single ones to - # multiple substitutions. - - # Check if we have a mix of non-contextual singles and multiples. - has_single = False - has_multiple = False - for s in statements: - if isinstance(s, self.ast.SingleSubstStatement): - has_single = not any([s.prefix, s.suffix, s.forceChain]) - elif isinstance(s, self.ast.MultipleSubstStatement): - has_multiple = not any([s.prefix, s.suffix, s.forceChain]) - - # Upgrade all single substitutions to multiple substitutions. - if has_single and has_multiple: - statements = [] - for s in block.statements: - if isinstance(s, self.ast.SingleSubstStatement): - glyphs = s.glyphs[0].glyphSet() - replacements = s.replacements[0].glyphSet() - if len(replacements) == 1: - replacements *= len(glyphs) - for i, glyph in enumerate(glyphs): - statements.append( - self.ast.MultipleSubstStatement( - s.prefix, - glyph, - s.suffix, - [replacements[i]], - s.forceChain, - location=s.location, - ) - ) - else: - statements.append(s) - block.statements = statements - - def is_cur_keyword_(self, k): - if self.cur_token_type_ is Lexer.NAME: - if isinstance(k, type("")): # basestring is gone in Python3 - return self.cur_token_ == k - else: - return self.cur_token_ in k - return False - - def expect_class_name_(self): - self.advance_lexer_() - if self.cur_token_type_ is not Lexer.GLYPHCLASS: - raise FeatureLibError("Expected @NAME", self.cur_token_location_) - return self.cur_token_ - - def expect_cid_(self): - self.advance_lexer_() - if self.cur_token_type_ is Lexer.CID: - return self.cur_token_ - raise FeatureLibError("Expected a CID", self.cur_token_location_) - - def expect_filename_(self): - self.advance_lexer_() - if self.cur_token_type_ is not Lexer.FILENAME: - raise FeatureLibError("Expected file name", self.cur_token_location_) - return self.cur_token_ - - def expect_glyph_(self): - self.advance_lexer_() - if self.cur_token_type_ is Lexer.NAME: - self.cur_token_ = self.cur_token_.lstrip("\\") - if len(self.cur_token_) > 63: - raise FeatureLibError( - "Glyph names must not be longer than 63 characters", - self.cur_token_location_, - ) - return self.cur_token_ - elif self.cur_token_type_ is Lexer.CID: - return "cid%05d" % self.cur_token_ - raise FeatureLibError("Expected a glyph name or CID", self.cur_token_location_) - - def check_glyph_name_in_glyph_set(self, *names): - """Adds a glyph name (just `start`) or glyph names of a - range (`start` and `end`) which are not in the glyph set - to the "missing list" for future error reporting. - - If no glyph set is present, does nothing. - """ - if self.glyphNames_: - for name in names: - if name in self.glyphNames_: - continue - if name not in self.missing: - self.missing[name] = self.cur_token_location_ - - def expect_markClass_reference_(self): - name = self.expect_class_name_() - mc = self.glyphclasses_.resolve(name) - if mc is None: - raise FeatureLibError( - "Unknown markClass @%s" % name, self.cur_token_location_ - ) - if not isinstance(mc, self.ast.MarkClass): - raise FeatureLibError( - "@%s is not a markClass" % name, self.cur_token_location_ - ) - return mc - - def expect_tag_(self): - self.advance_lexer_() - if self.cur_token_type_ is not Lexer.NAME: - raise FeatureLibError("Expected a tag", self.cur_token_location_) - if len(self.cur_token_) > 4: - raise FeatureLibError( - "Tags cannot be longer than 4 characters", self.cur_token_location_ - ) - return (self.cur_token_ + " ")[:4] - - def expect_script_tag_(self): - tag = self.expect_tag_() - if tag == "dflt": - raise FeatureLibError( - '"dflt" is not a valid script tag; use "DFLT" instead', - self.cur_token_location_, - ) - return tag - - def expect_language_tag_(self): - tag = self.expect_tag_() - if tag == "DFLT": - raise FeatureLibError( - '"DFLT" is not a valid language tag; use "dflt" instead', - self.cur_token_location_, - ) - return tag - - def expect_symbol_(self, symbol): - self.advance_lexer_() - if self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == symbol: - return symbol - raise FeatureLibError("Expected '%s'" % symbol, self.cur_token_location_) - - def expect_keyword_(self, keyword): - self.advance_lexer_() - if self.cur_token_type_ is Lexer.NAME and self.cur_token_ == keyword: - return self.cur_token_ - raise FeatureLibError('Expected "%s"' % keyword, self.cur_token_location_) - - def expect_name_(self): - self.advance_lexer_() - if self.cur_token_type_ is Lexer.NAME: - return self.cur_token_ - raise FeatureLibError("Expected a name", self.cur_token_location_) - - def expect_number_(self, variable=False): - self.advance_lexer_() - if self.cur_token_type_ is Lexer.NUMBER: - return self.cur_token_ - if variable and self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == "(": - return self.expect_variable_scalar_() - raise FeatureLibError("Expected a number", self.cur_token_location_) - - def expect_variable_scalar_(self): - self.advance_lexer_() # "(" - scalar = VariableScalar() - while True: - if self.cur_token_type_ == Lexer.SYMBOL and self.cur_token_ == ")": - break - location, value = self.expect_master_() - scalar.add_value(location, value) - return scalar - - def expect_master_(self): - location = {} - while True: - if self.cur_token_type_ is not Lexer.NAME: - raise FeatureLibError("Expected an axis name", self.cur_token_location_) - axis = self.cur_token_ - self.advance_lexer_() - if not (self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == "="): - raise FeatureLibError( - "Expected an equals sign", self.cur_token_location_ - ) - value = self.expect_number_() - location[axis] = value - if self.next_token_type_ is Lexer.NAME and self.next_token_[0] == ":": - # Lexer has just read the value as a glyph name. We'll correct it later - break - self.advance_lexer_() - if not (self.cur_token_type_ is Lexer.SYMBOL and self.cur_token_ == ","): - raise FeatureLibError( - "Expected an comma or an equals sign", self.cur_token_location_ - ) - self.advance_lexer_() - self.advance_lexer_() - value = int(self.cur_token_[1:]) - self.advance_lexer_() - return location, value - - def expect_any_number_(self): - self.advance_lexer_() - if self.cur_token_type_ in Lexer.NUMBERS: - return self.cur_token_ - raise FeatureLibError( - "Expected a decimal, hexadecimal or octal number", self.cur_token_location_ - ) - - def expect_float_(self): - self.advance_lexer_() - if self.cur_token_type_ is Lexer.FLOAT: - return self.cur_token_ - raise FeatureLibError( - "Expected a floating-point number", self.cur_token_location_ - ) - - def expect_decipoint_(self): - if self.next_token_type_ == Lexer.FLOAT: - return self.expect_float_() - elif self.next_token_type_ is Lexer.NUMBER: - return self.expect_number_() / 10 - else: - raise FeatureLibError( - "Expected an integer or floating-point number", self.cur_token_location_ - ) - - def expect_stat_flags(self): - value = 0 - flags = { - "OlderSiblingFontAttribute": 1, - "ElidableAxisValueName": 2, - } - while self.next_token_ != ";": - if self.next_token_ in flags: - name = self.expect_name_() - value = value | flags[name] - else: - raise FeatureLibError( - f"Unexpected STAT flag {self.cur_token_}", self.cur_token_location_ - ) - return value - - def expect_stat_values_(self): - if self.next_token_type_ == Lexer.FLOAT: - return self.expect_float_() - elif self.next_token_type_ is Lexer.NUMBER: - return self.expect_number_() - else: - raise FeatureLibError( - "Expected an integer or floating-point number", self.cur_token_location_ - ) - - def expect_string_(self): - self.advance_lexer_() - if self.cur_token_type_ is Lexer.STRING: - return self.cur_token_ - raise FeatureLibError("Expected a string", self.cur_token_location_) - - def advance_lexer_(self, comments=False): - if comments and self.cur_comments_: - self.cur_token_type_ = Lexer.COMMENT - self.cur_token_, self.cur_token_location_ = self.cur_comments_.pop(0) - return - else: - self.cur_token_type_, self.cur_token_, self.cur_token_location_ = ( - self.next_token_type_, - self.next_token_, - self.next_token_location_, - ) - while True: - try: - ( - self.next_token_type_, - self.next_token_, - self.next_token_location_, - ) = next(self.lexer_) - except StopIteration: - self.next_token_type_, self.next_token_ = (None, None) - if self.next_token_type_ != Lexer.COMMENT: - break - self.cur_comments_.append((self.next_token_, self.next_token_location_)) - - @staticmethod - def reverse_string_(s): - """'abc' --> 'cba'""" - return "".join(reversed(list(s))) - - def make_cid_range_(self, location, start, limit): - """(location, 999, 1001) --> ["cid00999", "cid01000", "cid01001"]""" - result = list() - if start > limit: - raise FeatureLibError( - "Bad range: start should be less than limit", location - ) - for cid in range(start, limit + 1): - result.append("cid%05d" % cid) - return result - - def make_glyph_range_(self, location, start, limit): - """(location, "a.sc", "d.sc") --> ["a.sc", "b.sc", "c.sc", "d.sc"]""" - result = list() - if len(start) != len(limit): - raise FeatureLibError( - 'Bad range: "%s" and "%s" should have the same length' % (start, limit), - location, - ) - - rev = self.reverse_string_ - prefix = os.path.commonprefix([start, limit]) - suffix = rev(os.path.commonprefix([rev(start), rev(limit)])) - if len(suffix) > 0: - start_range = start[len(prefix) : -len(suffix)] - limit_range = limit[len(prefix) : -len(suffix)] - else: - start_range = start[len(prefix) :] - limit_range = limit[len(prefix) :] - - if start_range >= limit_range: - raise FeatureLibError( - "Start of range must be smaller than its end", location - ) - - uppercase = re.compile(r"^[A-Z]$") - if uppercase.match(start_range) and uppercase.match(limit_range): - for c in range(ord(start_range), ord(limit_range) + 1): - result.append("%s%c%s" % (prefix, c, suffix)) - return result - - lowercase = re.compile(r"^[a-z]$") - if lowercase.match(start_range) and lowercase.match(limit_range): - for c in range(ord(start_range), ord(limit_range) + 1): - result.append("%s%c%s" % (prefix, c, suffix)) - return result - - digits = re.compile(r"^[0-9]{1,3}$") - if digits.match(start_range) and digits.match(limit_range): - for i in range(int(start_range, 10), int(limit_range, 10) + 1): - number = ("000" + str(i))[-len(start_range) :] - result.append("%s%s%s" % (prefix, number, suffix)) - return result - - raise FeatureLibError('Bad range: "%s-%s"' % (start, limit), location) - - -class SymbolTable(object): - def __init__(self): - self.scopes_ = [{}] - - def enter_scope(self): - self.scopes_.append({}) - - def exit_scope(self): - self.scopes_.pop() - - def define(self, name, item): - self.scopes_[-1][name] = item - - def resolve(self, name): - for scope in reversed(self.scopes_): - item = scope.get(name) - if item: - return item - return None diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fsspec/implementations/cached.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fsspec/implementations/cached.py deleted file mode 100644 index 379cf04cffeedc85618952c0dcea152c9ebc6eaa..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fsspec/implementations/cached.py +++ /dev/null @@ -1,867 +0,0 @@ -from __future__ import annotations - -import contextlib -import hashlib -import inspect -import logging -import os -import pickle -import tempfile -import time -from shutil import rmtree -from typing import ClassVar - -from fsspec import AbstractFileSystem, filesystem -from fsspec.callbacks import _DEFAULT_CALLBACK -from fsspec.compression import compr -from fsspec.core import BaseCache, MMapCache -from fsspec.exceptions import BlocksizeMismatchError -from fsspec.spec import AbstractBufferedFile -from fsspec.utils import infer_compression - -logger = logging.getLogger("fsspec.cached") - - -class CachingFileSystem(AbstractFileSystem): - """Locally caching filesystem, layer over any other FS - - This class implements chunk-wise local storage of remote files, for quick - access after the initial download. The files are stored in a given - directory with hashes of URLs for the filenames. If no directory is given, - a temporary one is used, which should be cleaned up by the OS after the - process ends. The files themselves are sparse (as implemented in - :class:`~fsspec.caching.MMapCache`), so only the data which is accessed - takes up space. - - Restrictions: - - - the block-size must be the same for each access of a given file, unless - all blocks of the file have already been read - - caching can only be applied to file-systems which produce files - derived from fsspec.spec.AbstractBufferedFile ; LocalFileSystem is also - allowed, for testing - """ - - protocol: ClassVar[str | tuple[str, ...]] = ("blockcache", "cached") - - def __init__( - self, - target_protocol=None, - cache_storage="TMP", - cache_check=10, - check_files=False, - expiry_time=604800, - target_options=None, - fs=None, - same_names=False, - compression=None, - **kwargs, - ): - """ - - Parameters - ---------- - target_protocol: str (optional) - Target filesystem protocol. Provide either this or ``fs``. - cache_storage: str or list(str) - Location to store files. If "TMP", this is a temporary directory, - and will be cleaned up by the OS when this process ends (or later). - If a list, each location will be tried in the order given, but - only the last will be considered writable. - cache_check: int - Number of seconds between reload of cache metadata - check_files: bool - Whether to explicitly see if the UID of the remote file matches - the stored one before using. Warning: some file systems such as - HTTP cannot reliably give a unique hash of the contents of some - path, so be sure to set this option to False. - expiry_time: int - The time in seconds after which a local copy is considered useless. - Set to falsy to prevent expiry. The default is equivalent to one - week. - target_options: dict or None - Passed to the instantiation of the FS, if fs is None. - fs: filesystem instance - The target filesystem to run against. Provide this or ``protocol``. - same_names: bool (optional) - By default, target URLs are hashed, so that files from different backends - with the same basename do not conflict. If this is true, the original - basename is used. - compression: str (optional) - To decompress on download. Can be 'infer' (guess from the URL name), - one of the entries in ``fsspec.compression.compr``, or None for no - decompression. - """ - super().__init__(**kwargs) - if fs is None and target_protocol is None: - raise ValueError( - "Please provide filesystem instance(fs) or target_protocol" - ) - if not (fs is None) ^ (target_protocol is None): - raise ValueError( - "Both filesystems (fs) and target_protocol may not be both given." - ) - if cache_storage == "TMP": - storage = [tempfile.mkdtemp()] - else: - if isinstance(cache_storage, str): - storage = [cache_storage] - else: - storage = cache_storage - os.makedirs(storage[-1], exist_ok=True) - self.storage = storage - self.kwargs = target_options or {} - self.cache_check = cache_check - self.check_files = check_files - self.expiry = expiry_time - self.compression = compression - # TODO: same_names should allow for variable prefix, not only - # to keep the basename - self.same_names = same_names - self.target_protocol = ( - target_protocol - if isinstance(target_protocol, str) - else (fs.protocol if isinstance(fs.protocol, str) else fs.protocol[0]) - ) - self.load_cache() - self.fs = fs if fs is not None else filesystem(target_protocol, **self.kwargs) - - def _strip_protocol(path): - # acts as a method, since each instance has a difference target - return self.fs._strip_protocol(type(self)._strip_protocol(path)) - - self._strip_protocol = _strip_protocol - - def _mkcache(self): - os.makedirs(self.storage[-1], exist_ok=True) - - def load_cache(self): - """Read set of stored blocks from file""" - cached_files = [] - for storage in self.storage: - fn = os.path.join(storage, "cache") - if os.path.exists(fn): - with open(fn, "rb") as f: - # TODO: consolidate blocks here - loaded_cached_files = pickle.load(f) - for c in loaded_cached_files.values(): - if isinstance(c["blocks"], list): - c["blocks"] = set(c["blocks"]) - cached_files.append(loaded_cached_files) - else: - cached_files.append({}) - self._mkcache() - self.cached_files = cached_files or [{}] - self.last_cache = time.time() - - def save_cache(self): - """Save set of stored blocks from file""" - fn = os.path.join(self.storage[-1], "cache") - # TODO: a file lock could be used to ensure file does not change - # between re-read and write; but occasional duplicated reads ok. - cache = self.cached_files[-1] - if os.path.exists(fn): - with open(fn, "rb") as f: - cached_files = pickle.load(f) - for k, c in cached_files.items(): - if k in cache: - if c["blocks"] is True or cache[k]["blocks"] is True: - c["blocks"] = True - else: - # self.cached_files[*][*]["blocks"] must continue to - # point to the same set object so that updates - # performed by MMapCache are propagated back to - # self.cached_files. - blocks = cache[k]["blocks"] - blocks.update(c["blocks"]) - c["blocks"] = blocks - c["time"] = max(c["time"], cache[k]["time"]) - c["uid"] = cache[k]["uid"] - - # Files can be added to cache after it was written once - for k, c in cache.items(): - if k not in cached_files: - cached_files[k] = c - else: - cached_files = cache - cache = {k: v.copy() for k, v in cached_files.items()} - for c in cache.values(): - if isinstance(c["blocks"], set): - c["blocks"] = list(c["blocks"]) - self._mkcache() - with atomic_write(fn) as f: - pickle.dump(cache, f) - self.cached_files[-1] = cached_files - self.last_cache = time.time() - - def _check_cache(self): - """Reload caches if time elapsed or any disappeared""" - self._mkcache() - if not self.cache_check: - # explicitly told not to bother checking - return - timecond = time.time() - self.last_cache > self.cache_check - existcond = all(os.path.exists(storage) for storage in self.storage) - if timecond or not existcond: - self.load_cache() - - def _check_file(self, path): - """Is path in cache and still valid""" - path = self._strip_protocol(path) - - self._check_cache() - for storage, cache in zip(self.storage, self.cached_files): - if path not in cache: - continue - detail = cache[path].copy() - if self.check_files: - if detail["uid"] != self.fs.ukey(path): - continue - if self.expiry: - if time.time() - detail["time"] > self.expiry: - continue - fn = os.path.join(storage, detail["fn"]) - if os.path.exists(fn): - return detail, fn - return False - - def clear_cache(self): - """Remove all files and metadat from the cache - - In the case of multiple cache locations, this clears only the last one, - which is assumed to be the read/write one. - """ - rmtree(self.storage[-1]) - self.load_cache() - - def clear_expired_cache(self, expiry_time=None): - """Remove all expired files and metadata from the cache - - In the case of multiple cache locations, this clears only the last one, - which is assumed to be the read/write one. - - Parameters - ---------- - expiry_time: int - The time in seconds after which a local copy is considered useless. - If not defined the default is equivalent to the attribute from the - file caching instantiation. - """ - - if not expiry_time: - expiry_time = self.expiry - - self._check_cache() - - for path, detail in self.cached_files[-1].copy().items(): - if time.time() - detail["time"] > expiry_time: - if self.same_names: - basename = os.path.basename(detail["original"]) - fn = os.path.join(self.storage[-1], basename) - else: - fn = os.path.join(self.storage[-1], detail["fn"]) - if os.path.exists(fn): - os.remove(fn) - self.cached_files[-1].pop(path) - - if self.cached_files[-1]: - cache_path = os.path.join(self.storage[-1], "cache") - with atomic_write(cache_path) as fc: - pickle.dump(self.cached_files[-1], fc) - else: - rmtree(self.storage[-1]) - self.load_cache() - - def pop_from_cache(self, path): - """Remove cached version of given file - - Deletes local copy of the given (remote) path. If it is found in a cache - location which is not the last, it is assumed to be read-only, and - raises PermissionError - """ - path = self._strip_protocol(path) - details = self._check_file(path) - if not details: - return - _, fn = details - if fn.startswith(self.storage[-1]): - # is in in writable cache - os.remove(fn) - self.cached_files[-1].pop(path) - self.save_cache() - else: - raise PermissionError( - "Can only delete cached file in last, writable cache location" - ) - - def _open( - self, - path, - mode="rb", - block_size=None, - autocommit=True, - cache_options=None, - **kwargs, - ): - """Wrap the target _open - - If the whole file exists in the cache, just open it locally and - return that. - - Otherwise, open the file on the target FS, and make it have a mmap - cache pointing to the location which we determine, in our cache. - The ``blocks`` instance is shared, so as the mmap cache instance - updates, so does the entry in our ``cached_files`` attribute. - We monkey-patch this file, so that when it closes, we call - ``close_and_update`` to save the state of the blocks. - """ - path = self._strip_protocol(path) - - path = self.fs._strip_protocol(path) - if "r" not in mode: - return self.fs._open( - path, - mode=mode, - block_size=block_size, - autocommit=autocommit, - cache_options=cache_options, - **kwargs, - ) - detail = self._check_file(path) - if detail: - # file is in cache - detail, fn = detail - hash, blocks = detail["fn"], detail["blocks"] - if blocks is True: - # stored file is complete - logger.debug("Opening local copy of %s" % path) - return open(fn, mode) - # TODO: action where partial file exists in read-only cache - logger.debug("Opening partially cached copy of %s" % path) - else: - hash = self.hash_name(path, self.same_names) - fn = os.path.join(self.storage[-1], hash) - blocks = set() - detail = { - "original": path, - "fn": hash, - "blocks": blocks, - "time": time.time(), - "uid": self.fs.ukey(path), - } - self.cached_files[-1][path] = detail - logger.debug("Creating local sparse file for %s" % path) - - # call target filesystems open - self._mkcache() - f = self.fs._open( - path, - mode=mode, - block_size=block_size, - autocommit=autocommit, - cache_options=cache_options, - cache_type="none", - **kwargs, - ) - if self.compression: - comp = ( - infer_compression(path) - if self.compression == "infer" - else self.compression - ) - f = compr[comp](f, mode="rb") - if "blocksize" in detail: - if detail["blocksize"] != f.blocksize: - raise BlocksizeMismatchError( - "Cached file must be reopened with same block" - "size as original (old: %i, new %i)" - "" % (detail["blocksize"], f.blocksize) - ) - else: - detail["blocksize"] = f.blocksize - f.cache = MMapCache(f.blocksize, f._fetch_range, f.size, fn, blocks) - close = f.close - f.close = lambda: self.close_and_update(f, close) - self.save_cache() - return f - - def hash_name(self, path, same_name): - return hash_name(path, same_name=same_name) - - def close_and_update(self, f, close): - """Called when a file is closing, so store the set of blocks""" - if f.closed: - return - path = self._strip_protocol(f.path) - - c = self.cached_files[-1][path] - if c["blocks"] is not True and len(c["blocks"]) * f.blocksize >= f.size: - c["blocks"] = True - try: - logger.debug("going to save") - self.save_cache() - logger.debug("saved") - except OSError: - logger.debug("Cache saving failed while closing file") - except NameError: - logger.debug("Cache save failed due to interpreter shutdown") - close() - f.closed = True - - def __getattribute__(self, item): - if item in [ - "load_cache", - "_open", - "save_cache", - "close_and_update", - "__init__", - "__getattribute__", - "__reduce__", - "_make_local_details", - "open", - "cat", - "cat_file", - "get", - "read_block", - "tail", - "head", - "_check_file", - "_check_cache", - "_mkcache", - "clear_cache", - "clear_expired_cache", - "pop_from_cache", - "_mkcache", - "local_file", - "_paths_from_path", - "get_mapper", - "open_many", - "commit_many", - "hash_name", - "__hash__", - "__eq__", - "to_json", - ]: - # all the methods defined in this class. Note `open` here, since - # it calls `_open`, but is actually in superclass - return lambda *args, **kw: getattr(type(self), item).__get__(self)( - *args, **kw - ) - if item in ["__reduce_ex__"]: - raise AttributeError - if item in ["_cache"]: - # class attributes - return getattr(type(self), item) - if item == "__class__": - return type(self) - d = object.__getattribute__(self, "__dict__") - fs = d.get("fs", None) # fs is not immediately defined - if item in d: - return d[item] - elif fs is not None: - if item in fs.__dict__: - # attribute of instance - return fs.__dict__[item] - # attributed belonging to the target filesystem - cls = type(fs) - m = getattr(cls, item) - if (inspect.isfunction(m) or inspect.isdatadescriptor(m)) and ( - not hasattr(m, "__self__") or m.__self__ is None - ): - # instance method - return m.__get__(fs, cls) - return m # class method or attribute - else: - # attributes of the superclass, while target is being set up - return super().__getattribute__(item) - - def __eq__(self, other): - """Test for equality.""" - if self is other: - return True - if not isinstance(other, type(self)): - return False - return ( - self.storage == other.storage - and self.kwargs == other.kwargs - and self.cache_check == other.cache_check - and self.check_files == other.check_files - and self.expiry == other.expiry - and self.compression == other.compression - and self.same_names == other.same_names - and self.target_protocol == other.target_protocol - ) - - def __hash__(self): - """Calculate hash.""" - return ( - hash(tuple(self.storage)) - ^ hash(str(self.kwargs)) - ^ hash(self.cache_check) - ^ hash(self.check_files) - ^ hash(self.expiry) - ^ hash(self.compression) - ^ hash(self.same_names) - ^ hash(self.target_protocol) - ) - - def to_json(self): - """Calculate JSON representation. - - Not implemented yet for CachingFileSystem. - """ - raise NotImplementedError( - "CachingFileSystem JSON representation not implemented" - ) - - -class WholeFileCacheFileSystem(CachingFileSystem): - """Caches whole remote files on first access - - This class is intended as a layer over any other file system, and - will make a local copy of each file accessed, so that all subsequent - reads are local. This is similar to ``CachingFileSystem``, but without - the block-wise functionality and so can work even when sparse files - are not allowed. See its docstring for definition of the init - arguments. - - The class still needs access to the remote store for listing files, - and may refresh cached files. - """ - - protocol = "filecache" - local_file = True - - def open_many(self, open_files): - paths = [of.path for of in open_files] - if "r" in open_files.mode: - self._mkcache() - else: - return [ - LocalTempFile(self.fs, path, mode=open_files.mode) for path in paths - ] - - if self.compression: - raise NotImplementedError - details = [self._check_file(sp) for sp in paths] - downpath = [p for p, d in zip(paths, details) if not d] - downfn0 = [ - os.path.join(self.storage[-1], self.hash_name(p, self.same_names)) - for p, d in zip(paths, details) - ] # keep these path names for opening later - downfn = [fn for fn, d in zip(downfn0, details) if not d] - if downpath: - # skip if all files are already cached and up to date - self.fs.get(downpath, downfn) - - # update metadata - only happens when downloads are successful - newdetail = [ - { - "original": path, - "fn": self.hash_name(path, self.same_names), - "blocks": True, - "time": time.time(), - "uid": self.fs.ukey(path), - } - for path in downpath - ] - self.cached_files[-1].update( - {path: detail for path, detail in zip(downpath, newdetail)} - ) - self.save_cache() - - def firstpart(fn): - # helper to adapt both whole-file and simple-cache - return fn[1] if isinstance(fn, tuple) else fn - - return [ - open(firstpart(fn0) if fn0 else fn1, mode=open_files.mode) - for fn0, fn1 in zip(details, downfn0) - ] - - def commit_many(self, open_files): - self.fs.put([f.fn for f in open_files], [f.path for f in open_files]) - [f.close() for f in open_files] - for f in open_files: - # in case autocommit is off, and so close did not already delete - try: - os.remove(f.name) - except FileNotFoundError: - pass - - def _make_local_details(self, path): - hash = self.hash_name(path, self.same_names) - fn = os.path.join(self.storage[-1], hash) - detail = { - "original": path, - "fn": hash, - "blocks": True, - "time": time.time(), - "uid": self.fs.ukey(path), - } - self.cached_files[-1][path] = detail - logger.debug("Copying %s to local cache" % path) - return fn - - def cat( - self, - path, - recursive=False, - on_error="raise", - callback=_DEFAULT_CALLBACK, - **kwargs, - ): - paths = self.expand_path( - path, recursive=recursive, maxdepth=kwargs.get("maxdepth", None) - ) - getpaths = [] - storepaths = [] - fns = [] - out = {} - for p in paths.copy(): - try: - detail = self._check_file(p) - if not detail: - fn = self._make_local_details(p) - getpaths.append(p) - storepaths.append(fn) - else: - detail, fn = detail if isinstance(detail, tuple) else (None, detail) - fns.append(fn) - except Exception as e: - if on_error == "raise": - raise - if on_error == "return": - out[p] = e - paths.remove(p) - - if getpaths: - self.fs.get(getpaths, storepaths) - self.save_cache() - - callback.set_size(len(paths)) - for p, fn in zip(paths, fns): - with open(fn, "rb") as f: - out[p] = f.read() - callback.relative_update(1) - if isinstance(path, str) and len(paths) == 1 and recursive is False: - out = out[paths[0]] - return out - - def _open(self, path, mode="rb", **kwargs): - path = self._strip_protocol(path) - if "r" not in mode: - return LocalTempFile(self, path, mode=mode) - detail = self._check_file(path) - if detail: - detail, fn = detail - _, blocks = detail["fn"], detail["blocks"] - if blocks is True: - logger.debug("Opening local copy of %s" % path) - - # In order to support downstream filesystems to be able to - # infer the compression from the original filename, like - # the `TarFileSystem`, let's extend the `io.BufferedReader` - # fileobject protocol by adding a dedicated attribute - # `original`. - f = open(fn, mode) - f.original = detail.get("original") - return f - else: - raise ValueError( - "Attempt to open partially cached file %s" - "as a wholly cached file" % path - ) - else: - fn = self._make_local_details(path) - kwargs["mode"] = mode - - # call target filesystems open - self._mkcache() - if self.compression: - with self.fs._open(path, **kwargs) as f, open(fn, "wb") as f2: - if isinstance(f, AbstractBufferedFile): - # want no type of caching if just downloading whole thing - f.cache = BaseCache(0, f.cache.fetcher, f.size) - comp = ( - infer_compression(path) - if self.compression == "infer" - else self.compression - ) - f = compr[comp](f, mode="rb") - data = True - while data: - block = getattr(f, "blocksize", 5 * 2**20) - data = f.read(block) - f2.write(data) - else: - self.fs.get(path, fn) - self.save_cache() - return self._open(path, mode) - - -class SimpleCacheFileSystem(WholeFileCacheFileSystem): - """Caches whole remote files on first access - - This class is intended as a layer over any other file system, and - will make a local copy of each file accessed, so that all subsequent - reads are local. This implementation only copies whole files, and - does not keep any metadata about the download time or file details. - It is therefore safer to use in multi-threaded/concurrent situations. - - This is the only of the caching filesystems that supports write: you will - be given a real local open file, and upon close and commit, it will be - uploaded to the target filesystem; the writability or the target URL is - not checked until that time. - - """ - - protocol = "simplecache" - local_file = True - - def __init__(self, **kwargs): - kw = kwargs.copy() - for key in ["cache_check", "expiry_time", "check_files"]: - kw[key] = False - super().__init__(**kw) - for storage in self.storage: - if not os.path.exists(storage): - os.makedirs(storage, exist_ok=True) - self.cached_files = [{}] - - def _check_file(self, path): - self._check_cache() - sha = self.hash_name(path, self.same_names) - for storage in self.storage: - fn = os.path.join(storage, sha) - if os.path.exists(fn): - return fn - - def save_cache(self): - pass - - def load_cache(self): - pass - - def _open(self, path, mode="rb", **kwargs): - path = self._strip_protocol(path) - - if "r" not in mode: - return LocalTempFile(self, path, mode=mode) - fn = self._check_file(path) - if fn: - return open(fn, mode) - - sha = self.hash_name(path, self.same_names) - fn = os.path.join(self.storage[-1], sha) - logger.debug("Copying %s to local cache" % path) - kwargs["mode"] = mode - - self._mkcache() - if self.compression: - with self.fs._open(path, **kwargs) as f, open(fn, "wb") as f2: - if isinstance(f, AbstractBufferedFile): - # want no type of caching if just downloading whole thing - f.cache = BaseCache(0, f.cache.fetcher, f.size) - comp = ( - infer_compression(path) - if self.compression == "infer" - else self.compression - ) - f = compr[comp](f, mode="rb") - data = True - while data: - block = getattr(f, "blocksize", 5 * 2**20) - data = f.read(block) - f2.write(data) - else: - self.fs.get(path, fn) - return self._open(path, mode) - - -class LocalTempFile: - """A temporary local file, which will be uploaded on commit""" - - def __init__(self, fs, path, fn=None, mode="wb", autocommit=True, seek=0): - if fn: - self.fn = fn - self.fh = open(fn, mode) - else: - fd, self.fn = tempfile.mkstemp() - self.fh = open(fd, mode) - self.mode = mode - if seek: - self.fh.seek(seek) - self.path = path - self.fs = fs - self.closed = False - self.autocommit = autocommit - - def __reduce__(self): - # always open in rb+ to allow continuing writing at a location - return ( - LocalTempFile, - (self.fs, self.path, self.fn, "rb+", self.autocommit, self.tell()), - ) - - def __enter__(self): - return self.fh - - def __exit__(self, exc_type, exc_val, exc_tb): - self.close() - - def close(self): - if self.closed: - return - self.fh.close() - self.closed = True - if self.autocommit: - self.commit() - - def discard(self): - self.fh.close() - os.remove(self.fn) - - def commit(self): - self.fs.put(self.fn, self.path) - try: - os.remove(self.fn) - except (PermissionError, FileNotFoundError): - # file path may be held by new version of the file on windows - pass - - @property - def name(self): - return self.fn - - def __getattr__(self, item): - return getattr(self.fh, item) - - -def hash_name(path, same_name): - if same_name: - hash = os.path.basename(path) - else: - hash = hashlib.sha256(path.encode()).hexdigest() - return hash - - -@contextlib.contextmanager -def atomic_write(path, mode="wb"): - """ - A context manager that opens a temporary file next to `path` and, on exit, - replaces `path` with the temporary file, thereby updating `path` - atomically. - """ - fd, fn = tempfile.mkstemp( - dir=os.path.dirname(path), prefix=os.path.basename(path) + "-" - ) - try: - with open(fd, mode) as fp: - yield fp - except BaseException: - with contextlib.suppress(FileNotFoundError): - os.unlink(fn) - raise - else: - os.replace(fn, path) diff --git a/spaces/cihyFjudo/fairness-paper-search/Hete avonturen means hot adventures in Dutch and it is also the title of a song by Geovanny Reyes on SoundCloud[3]..md b/spaces/cihyFjudo/fairness-paper-search/Hete avonturen means hot adventures in Dutch and it is also the title of a song by Geovanny Reyes on SoundCloud[3]..md deleted file mode 100644 index e2c252e5a766aee11a870d111014198348afc6ee..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Hete avonturen means hot adventures in Dutch and it is also the title of a song by Geovanny Reyes on SoundCloud[3]..md +++ /dev/null @@ -1,6 +0,0 @@ -

jommeke hete avonturen


Download ---> https://tinurli.com/2uwj44



-
- aaccfb2cb3
-
-
-

diff --git a/spaces/cihyFjudo/fairness-paper-search/Why You Need DRM Converter 4 3 8 Keygen Mac OSX to Enjoy Your Media Files Without Limits.md b/spaces/cihyFjudo/fairness-paper-search/Why You Need DRM Converter 4 3 8 Keygen Mac OSX to Enjoy Your Media Files Without Limits.md deleted file mode 100644 index cb91a8d86eaa1c9834c0c0d714276f2d2543a264..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Why You Need DRM Converter 4 3 8 Keygen Mac OSX to Enjoy Your Media Files Without Limits.md +++ /dev/null @@ -1,5 +0,0 @@ -
-

Re:Chips for security
Hard to tell if chips are already compromised. One thing for sure is that RNGs in them are weak (and probably backdoored). Good way to use these chips is to load a key instead of on-board keygen. Split the operations into tiny modules across multiple chips and vendors. This will disallow a single collusion.

-

drm converter 4 3 8 keygen mac osx


Downloadhttps://tinurli.com/2uwjyo



aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/contourpy/_version.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/contourpy/_version.py deleted file mode 100644 index 6849410aae0a8010e76d5f0a44ced13d750b0989..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/contourpy/_version.py +++ /dev/null @@ -1 +0,0 @@ -__version__ = "1.1.0" diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/misc/symfont.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/misc/symfont.py deleted file mode 100644 index 0bd69a386ec9f01c8951f0dfc8bc8c261718cf1f..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/misc/symfont.py +++ /dev/null @@ -1,251 +0,0 @@ -from fontTools.pens.basePen import BasePen -from functools import partial -from itertools import count -import sympy as sp -import sys - -n = 3 # Max Bezier degree; 3 for cubic, 2 for quadratic - -t, x, y = sp.symbols("t x y", real=True) -c = sp.symbols("c", real=False) # Complex representation instead of x/y - -X = tuple(sp.symbols("x:%d" % (n + 1), real=True)) -Y = tuple(sp.symbols("y:%d" % (n + 1), real=True)) -P = tuple(zip(*(sp.symbols("p:%d[%s]" % (n + 1, w), real=True) for w in "01"))) -C = tuple(sp.symbols("c:%d" % (n + 1), real=False)) - -# Cubic Bernstein basis functions -BinomialCoefficient = [(1, 0)] -for i in range(1, n + 1): - last = BinomialCoefficient[-1] - this = tuple(last[j - 1] + last[j] for j in range(len(last))) + (0,) - BinomialCoefficient.append(this) -BinomialCoefficient = tuple(tuple(item[:-1]) for item in BinomialCoefficient) -del last, this - -BernsteinPolynomial = tuple( - tuple(c * t**i * (1 - t) ** (n - i) for i, c in enumerate(coeffs)) - for n, coeffs in enumerate(BinomialCoefficient) -) - -BezierCurve = tuple( - tuple( - sum(P[i][j] * bernstein for i, bernstein in enumerate(bernsteins)) - for j in range(2) - ) - for n, bernsteins in enumerate(BernsteinPolynomial) -) -BezierCurveC = tuple( - sum(C[i] * bernstein for i, bernstein in enumerate(bernsteins)) - for n, bernsteins in enumerate(BernsteinPolynomial) -) - - -def green(f, curveXY): - f = -sp.integrate(sp.sympify(f), y) - f = f.subs({x: curveXY[0], y: curveXY[1]}) - f = sp.integrate(f * sp.diff(curveXY[0], t), (t, 0, 1)) - return f - - -class _BezierFuncsLazy(dict): - def __init__(self, symfunc): - self._symfunc = symfunc - self._bezfuncs = {} - - def __missing__(self, i): - args = ["p%d" % d for d in range(i + 1)] - f = green(self._symfunc, BezierCurve[i]) - f = sp.gcd_terms(f.collect(sum(P, ()))) # Optimize - return sp.lambdify(args, f) - - -class GreenPen(BasePen): - - _BezierFuncs = {} - - @classmethod - def _getGreenBezierFuncs(celf, func): - funcstr = str(func) - if not funcstr in celf._BezierFuncs: - celf._BezierFuncs[funcstr] = _BezierFuncsLazy(func) - return celf._BezierFuncs[funcstr] - - def __init__(self, func, glyphset=None): - BasePen.__init__(self, glyphset) - self._funcs = self._getGreenBezierFuncs(func) - self.value = 0 - - def _moveTo(self, p0): - self.__startPoint = p0 - - def _closePath(self): - p0 = self._getCurrentPoint() - if p0 != self.__startPoint: - self._lineTo(self.__startPoint) - - def _endPath(self): - p0 = self._getCurrentPoint() - if p0 != self.__startPoint: - # Green theorem is not defined on open contours. - raise NotImplementedError - - def _lineTo(self, p1): - p0 = self._getCurrentPoint() - self.value += self._funcs[1](p0, p1) - - def _qCurveToOne(self, p1, p2): - p0 = self._getCurrentPoint() - self.value += self._funcs[2](p0, p1, p2) - - def _curveToOne(self, p1, p2, p3): - p0 = self._getCurrentPoint() - self.value += self._funcs[3](p0, p1, p2, p3) - - -# Sample pens. -# Do not use this in real code. -# Use fontTools.pens.momentsPen.MomentsPen instead. -AreaPen = partial(GreenPen, func=1) -MomentXPen = partial(GreenPen, func=x) -MomentYPen = partial(GreenPen, func=y) -MomentXXPen = partial(GreenPen, func=x * x) -MomentYYPen = partial(GreenPen, func=y * y) -MomentXYPen = partial(GreenPen, func=x * y) - - -def printGreenPen(penName, funcs, file=sys.stdout, docstring=None): - - if docstring is not None: - print('"""%s"""' % docstring) - - print( - """from fontTools.pens.basePen import BasePen, OpenContourError -try: - import cython - - COMPILED = cython.compiled -except (AttributeError, ImportError): - # if cython not installed, use mock module with no-op decorators and types - from fontTools.misc import cython - - COMPILED = False - - -__all__ = ["%s"] - -class %s(BasePen): - - def __init__(self, glyphset=None): - BasePen.__init__(self, glyphset) -""" - % (penName, penName), - file=file, - ) - for name, f in funcs: - print(" self.%s = 0" % name, file=file) - print( - """ - def _moveTo(self, p0): - self.__startPoint = p0 - - def _closePath(self): - p0 = self._getCurrentPoint() - if p0 != self.__startPoint: - self._lineTo(self.__startPoint) - - def _endPath(self): - p0 = self._getCurrentPoint() - if p0 != self.__startPoint: - # Green theorem is not defined on open contours. - raise OpenContourError( - "Green theorem is not defined on open contours." - ) -""", - end="", - file=file, - ) - - for n in (1, 2, 3): - - subs = {P[i][j]: [X, Y][j][i] for i in range(n + 1) for j in range(2)} - greens = [green(f, BezierCurve[n]) for name, f in funcs] - greens = [sp.gcd_terms(f.collect(sum(P, ()))) for f in greens] # Optimize - greens = [f.subs(subs) for f in greens] # Convert to p to x/y - defs, exprs = sp.cse( - greens, - optimizations="basic", - symbols=(sp.Symbol("r%d" % i) for i in count()), - ) - - print() - for name, value in defs: - print(" @cython.locals(%s=cython.double)" % name, file=file) - if n == 1: - print( - """\ - @cython.locals(x0=cython.double, y0=cython.double) - @cython.locals(x1=cython.double, y1=cython.double) - def _lineTo(self, p1): - x0,y0 = self._getCurrentPoint() - x1,y1 = p1 -""", - file=file, - ) - elif n == 2: - print( - """\ - @cython.locals(x0=cython.double, y0=cython.double) - @cython.locals(x1=cython.double, y1=cython.double) - @cython.locals(x2=cython.double, y2=cython.double) - def _qCurveToOne(self, p1, p2): - x0,y0 = self._getCurrentPoint() - x1,y1 = p1 - x2,y2 = p2 -""", - file=file, - ) - elif n == 3: - print( - """\ - @cython.locals(x0=cython.double, y0=cython.double) - @cython.locals(x1=cython.double, y1=cython.double) - @cython.locals(x2=cython.double, y2=cython.double) - @cython.locals(x3=cython.double, y3=cython.double) - def _curveToOne(self, p1, p2, p3): - x0,y0 = self._getCurrentPoint() - x1,y1 = p1 - x2,y2 = p2 - x3,y3 = p3 -""", - file=file, - ) - for name, value in defs: - print(" %s = %s" % (name, value), file=file) - - print(file=file) - for name, value in zip([f[0] for f in funcs], exprs): - print(" self.%s += %s" % (name, value), file=file) - - print( - """ -if __name__ == '__main__': - from fontTools.misc.symfont import x, y, printGreenPen - printGreenPen('%s', [""" - % penName, - file=file, - ) - for name, f in funcs: - print(" ('%s', %s)," % (name, str(f)), file=file) - print(" ])", file=file) - - -if __name__ == "__main__": - pen = AreaPen() - pen.moveTo((100, 100)) - pen.lineTo((100, 200)) - pen.lineTo((200, 200)) - pen.curveTo((200, 250), (300, 300), (250, 350)) - pen.lineTo((200, 100)) - pen.closePath() - print(pen.value) diff --git a/spaces/cncn102/bingo1/src/components/voice.tsx b/spaces/cncn102/bingo1/src/components/voice.tsx deleted file mode 100644 index ab886394487445e4b0675770b76096bba0e61b0e..0000000000000000000000000000000000000000 --- a/spaces/cncn102/bingo1/src/components/voice.tsx +++ /dev/null @@ -1,52 +0,0 @@ -import React, { useEffect } from 'react' -import { useSetAtom } from 'jotai' -import { useBing } from '@/lib/hooks/use-bing' -import Image from 'next/image' -import VoiceIcon from '@/assets/images/voice.svg' -import VoiceButton from './ui/voice' -import { SR } from '@/lib/bots/bing/sr' -import { voiceListenAtom } from '@/state' - -const sr = new SR(['发送', '清空', '退出']) - -const Voice = ({ setInput, input, sendMessage, isSpeaking }: Pick, 'setInput' | 'sendMessage' | 'input' | 'isSpeaking'>) => { - const setListen = useSetAtom(voiceListenAtom) - useEffect(() => { - if (sr.listening) return - sr.transcript = !isSpeaking - }, [isSpeaking]) - - useEffect(() => { - sr.onchange = (msg: string, command?: string) => { - switch (command) { - case '退出': - sr.stop() - break; - case '发送': - sendMessage(input) - case '清空': - setInput('') - break; - default: - setInput(input + msg) - } - } - }, [input, setInput, sendMessage]) - - const switchSR = (enable: boolean = false) => { - setListen(enable) - if (enable) { - sr.start() - } else { - sr.stop() - } - } - - return sr.listening ? ( - switchSR(false)} /> - ) : ( - switchSR(true)} /> - ) -}; - -export default Voice; diff --git a/spaces/codes4aryan/LLMs-QandA-AI/app.py b/spaces/codes4aryan/LLMs-QandA-AI/app.py deleted file mode 100644 index af0c595dabfbbec7be7502256bdebec9f59e6d08..0000000000000000000000000000000000000000 --- a/spaces/codes4aryan/LLMs-QandA-AI/app.py +++ /dev/null @@ -1,35 +0,0 @@ - -import streamlit as st - - -from langchain.llms import OpenAI - -#Function to return the response -def load_answer(question): - llm = OpenAI(model_name="text-davinci-003",temperature=0) - answer=llm(question) - return answer - - -#App UI starts here -st.set_page_config(page_title="Aryan's Q&A Chatbot", page_icon=":robot:") -st.header("Aryan's Q&A Chatbot") - -#Gets the user input -def get_text(): - input_text = st.text_input("You: ", key="input") - return input_text - - -user_input=get_text() -response = load_answer(user_input) - -submit = st.button('Generate') - -#If generate button is clicked -if submit: - - st.subheader("Answer:") - - st.write(response) - diff --git a/spaces/colakin/video-generater/classes/OpenAI.php b/spaces/colakin/video-generater/classes/OpenAI.php deleted file mode 100644 index 0bd21d18ee68d77ffc8639a2d579c3b14f45fb8d..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/classes/OpenAI.php +++ /dev/null @@ -1,196 +0,0 @@ -api_key = $api_key; - $this->client = $client ?: new Client([ - 'base_uri' => 'https://api.openai.com/v1/', - 'headers' => [ - 'Content-Type' => 'application/json', - 'Authorization' => 'Bearer ' . $this->api_key - ] - ]); - $this->log = $log ?: new NullLogger(); - if (empty($api_key)) { - throw new RuntimeException('No valid API key was provided.'); - } - } - - /** - * Generate an image using DALL-E API and save it locally. - * - * @param string $prompt - * @param string $localDirectory - * @param string $size - * @param int $n - * @return array|null - */ - public function generateImage(string $prompt, string $localDirectory, string $size = '1024x1024', int $n = 4): ?array { - $data = [ - 'prompt' => $prompt, - 'n' => $n, - 'size' => $size - ]; - - try { - $response = $this->client->post('images/generations', ['json' => $data]); - $json = json_decode((string) $response->getBody(), true); - - if (json_last_error() === JSON_ERROR_NONE) { - $savedImages = []; - foreach ($json['data'] as $imageData) { - $imageUrl = $imageData['url']; - $localFilePath = $this->saveImage($imageUrl, $localDirectory); - $savedImages[] = $localFilePath; - } - - return $savedImages; - } else { - $this->log->error('Failed to decode JSON response', ['json_error' => json_last_error_msg()]); - - return null; - } - } catch (RequestException $e) { - $this->log->error('RequestException encountered', ['message' => $e->getMessage()]); - - return null; - } - } - - /** - * Save an image from a URL to a local file path. - * - * @param string $imageUrl - * @param string $localDirectory - * @return string - */ - private function saveImage(string $imageUrl, string $localDirectory): string { - $imageFileName = basename(parse_url($imageUrl, PHP_URL_PATH)); - $localFilePath = $localDirectory . '/' . $imageFileName; - - $client = new Client(); - $response = $client->get($imageUrl, ['sink' => $localFilePath]); - - if ($response->getStatusCode() == 200) { - $this->log->info('Image saved successfully', ['path' => $localFilePath]); - - return $localFilePath; - } else { - $this->log->error('Failed to save the image', ['status_code' => $response->getStatusCode()]); - - throw new RuntimeException('Failed to save the image: ' . $response->getStatusCode()); - } - } - - /** - * Generate a script using the OpenAI GPT-3.5 Turbo model. - * - * @param string $role - * @param string $prompt - * @param int $maxTokens - * @param float $temperature - * @return string|null - */ - public function generateScript(string $role, string $prompt, int $maxTokens = 3600, float $temperature = 1.0): ?string { - $data = [ - 'model' => 'gpt-3.5-turbo', - 'messages' => [ - ['role' => 'system', 'content' => $role], - ['role' => 'user', 'content' => $prompt], - ], - 'max_tokens' => $maxTokens, - 'temperature' => $temperature - ]; - - try { - $response = $this->client->post('chat/completions', ['json' => $data]); - $json = json_decode((string) $response->getBody(), true); - - if (json_last_error() === JSON_ERROR_NONE) { - $assistantResponse = $json['choices'][0]['message']['content']; - - return $assistantResponse; - } else { - $this->log->error('Failed to decode JSON response', ['json_error' => json_last_error_msg()]); - - return null; - } - } catch (RequestException $e) { - $this->log->error('RequestException encountered', ['message' => $e->getMessage()]); - - return null; - } - } - /** - * Generate image variations using DALL-E API and save them locally. - * - * @param string $imagePath - * @param string $localDirectory - * @param int $n - * @param string $size - * @return array|null - */ - public function generateImageVariations(string $imagePath, string $localDirectory, int $n = 4, string $size = '1024x1024'): ?array { - $data = [ - 'n' => $n, - 'size' => $size, - 'image' => curl_file_create($imagePath) - ]; - - try { - $response = $this->client->post('images/variations', ['multipart' => $data]); - $json = json_decode((string) $response->getBody(), true); - - if (json_last_error() === JSON_ERROR_NONE) { - $savedImages = []; - foreach ($json['data'] as $imageData) { - $imageUrl = $imageData['url']; - $localFilePath = $this->saveImage($imageUrl, $localDirectory); - $savedImages[] = $localFilePath; - } - - return $savedImages; - } else { - $this->log->error('Failed to decode JSON response', ['json_error' => json_last_error_msg()]); - - return null; - } - } catch (RequestException $e) { - $this->log->error('RequestException encountered', ['message' => $e->getMessage()]); - - return null; - } - } -} diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/arm/vpx_arith.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/arm/vpx_arith.h deleted file mode 100644 index 1ead2284c33c6609baf748fc332ef9ba412ca4fc..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/arm/vpx_arith.h +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright (C) 2010 Mans Rullgard - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_ARM_VPX_ARITH_H -#define AVCODEC_ARM_VPX_ARITH_H - -#if CONFIG_THUMB -# define A(x) -# define T(x) x -#else -# define A(x) x -# define T(x) -#endif - -#if CONFIG_THUMB || defined __clang__ -# define L(x) -# define U(x) x -#else -# define L(x) x -# define U(x) -#endif - -#if HAVE_ARMV6_INLINE - -#define vpx_rac_get_prob vp56_rac_get_prob_armv6 -static inline int vp56_rac_get_prob_armv6(VPXRangeCoder *c, int pr) -{ - unsigned shift = ff_vpx_norm_shift[c->high]; - unsigned code_word = c->code_word << shift; - unsigned high = c->high << shift; - unsigned bit; - - __asm__ ("adds %3, %3, %0 \n" - "itt cs \n" - "cmpcs %7, %4 \n" - L("ldrcsh %2, [%4], #2 \n") - U("ldrhcs %2, [%4], #2 \n") - "rsb %0, %6, #256 \n" - "smlabb %0, %5, %6, %0 \n" - T("itttt cs \n") - "rev16cs %2, %2 \n" - T("lslcs %2, %2, %3 \n") - T("orrcs %1, %1, %2 \n") - A("orrcs %1, %1, %2, lsl %3 \n") - "subcs %3, %3, #16 \n" - "lsr %0, %0, #8 \n" - "cmp %1, %0, lsl #16 \n" - "ittte ge \n" - "subge %1, %1, %0, lsl #16 \n" - "subge %0, %5, %0 \n" - "movge %2, #1 \n" - "movlt %2, #0 \n" - : "=&r"(c->high), "=&r"(c->code_word), "=&r"(bit), - "+&r"(c->bits), "+&r"(c->buffer) - : "r"(high), "r"(pr), "r"(c->end - 1), - "0"(shift), "1"(code_word) - : "cc"); - - return bit; -} - -#define vpx_rac_get_prob_branchy vp56_rac_get_prob_branchy_armv6 -static inline int vp56_rac_get_prob_branchy_armv6(VPXRangeCoder *c, int pr) -{ - unsigned shift = ff_vpx_norm_shift[c->high]; - unsigned code_word = c->code_word << shift; - unsigned high = c->high << shift; - unsigned low; - unsigned tmp; - - __asm__ ("adds %3, %3, %0 \n" - "itt cs \n" - "cmpcs %7, %4 \n" - L("ldrcsh %2, [%4], #2 \n") - U("ldrhcs %2, [%4], #2 \n") - "rsb %0, %6, #256 \n" - "smlabb %0, %5, %6, %0 \n" - T("itttt cs \n") - "rev16cs %2, %2 \n" - T("lslcs %2, %2, %3 \n") - T("orrcs %1, %1, %2 \n") - A("orrcs %1, %1, %2, lsl %3 \n") - "subcs %3, %3, #16 \n" - "lsr %0, %0, #8 \n" - "lsl %2, %0, #16 \n" - : "=&r"(low), "+&r"(code_word), "=&r"(tmp), - "+&r"(c->bits), "+&r"(c->buffer) - : "r"(high), "r"(pr), "r"(c->end - 1), "0"(shift) - : "cc"); - - if (code_word >= tmp) { - c->high = high - low; - c->code_word = code_word - tmp; - return 1; - } - - c->high = low; - c->code_word = code_word; - return 0; -} - -#endif - -#endif /* AVCODEC_ARM_VPX_ARITH_H */ diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dcahuff.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dcahuff.h deleted file mode 100644 index fdfe7e922a6fcf5736bc71f0ccb3b7ab2ede3155..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dcahuff.h +++ /dev/null @@ -1,75 +0,0 @@ -/* - * DCA compatible decoder - huffman tables - * Copyright (C) 2004 Gildas Bazin - * Copyright (C) 2007 Konstantin Shishkov - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_DCAHUFF_H -#define AVCODEC_DCAHUFF_H - -#include - -#include "libavutil/attributes.h" - -#include "vlc.h" - -#define DCA_CODE_BOOKS 10 -#define DCA_BITALLOC_12_COUNT 5 -#define DCA_NUM_BITALLOC_CODES (1 * 3 + \ - 3 * (5 + 7 + 9 + 13) \ - + 7 * (17 + 25 + 33 + 65 + 129)) - -extern VLC ff_dca_vlc_bit_allocation[5]; -#define DCA_TMODE_VLC_BITS 3 -extern VLC ff_dca_vlc_transition_mode[4]; -#define DCA_SCALES_VLC_BITS 9 -extern VLC ff_dca_vlc_scale_factor[5]; -extern VLC ff_dca_vlc_quant_index[DCA_CODE_BOOKS][7]; - -#define DCA_TNL_GRP_VLC_BITS 9 -extern VLC ff_dca_vlc_tnl_grp[5]; -#define DCA_TNL_SCF_VLC_BITS 9 -extern VLC ff_dca_vlc_tnl_scf; -#define DCA_DAMP_VLC_BITS 6 -extern VLC ff_dca_vlc_damp; -#define DCA_DPH_VLC_BITS 6 -extern VLC ff_dca_vlc_dph; -#define DCA_FST_RSD_VLC_BITS 9 -extern VLC ff_dca_vlc_fst_rsd_amp; -#define DCA_RSD_APPRX_VLC_BITS 5 -extern VLC ff_dca_vlc_rsd_apprx; -#define DCA_RSD_AMP_VLC_BITS 9 -extern VLC ff_dca_vlc_rsd_amp; -#define DCA_AVG_G3_VLC_BITS 9 -extern VLC ff_dca_vlc_avg_g3; -#define DCA_ST_GRID_VLC_BITS 9 -extern VLC ff_dca_vlc_st_grid; -#define DCA_GRID_VLC_BITS 9 -extern VLC ff_dca_vlc_grid_2; -extern VLC ff_dca_vlc_grid_3; -#define DCA_RSD_VLC_BITS 6 -extern VLC ff_dca_vlc_rsd; - -extern const int8_t ff_dca_bitalloc_offsets[DCA_CODE_BOOKS]; -extern const uint8_t ff_dca_bitalloc_sizes[DCA_CODE_BOOKS]; -extern const uint8_t ff_dca_vlc_src_tables[][2]; - -av_cold void ff_dca_init_vlcs(void); - -#endif /* AVCODEC_DCAHUFF_H */ diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/vp9_idct_msa.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/vp9_idct_msa.c deleted file mode 100644 index 53bfbb476569287c8a87d1a179904a839c7a4671..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/mips/vp9_idct_msa.c +++ /dev/null @@ -1,2171 +0,0 @@ -/* - * Copyright (c) 2015 - 2017 Shivraj Patil (Shivraj.Patil@imgtec.com) - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include -#include "libavcodec/vp9dsp.h" -#include "libavutil/mips/generic_macros_msa.h" -#include "vp9dsp_mips.h" - -#define VP9_DCT_CONST_BITS 14 -#define ROUND_POWER_OF_TWO(value, n) (((value) + (1 << ((n) - 1))) >> (n)) - -static const int32_t cospi_1_64 = 16364; -static const int32_t cospi_2_64 = 16305; -static const int32_t cospi_3_64 = 16207; -static const int32_t cospi_4_64 = 16069; -static const int32_t cospi_5_64 = 15893; -static const int32_t cospi_6_64 = 15679; -static const int32_t cospi_7_64 = 15426; -static const int32_t cospi_8_64 = 15137; -static const int32_t cospi_9_64 = 14811; -static const int32_t cospi_10_64 = 14449; -static const int32_t cospi_11_64 = 14053; -static const int32_t cospi_12_64 = 13623; -static const int32_t cospi_13_64 = 13160; -static const int32_t cospi_14_64 = 12665; -static const int32_t cospi_15_64 = 12140; -static const int32_t cospi_16_64 = 11585; -static const int32_t cospi_17_64 = 11003; -static const int32_t cospi_18_64 = 10394; -static const int32_t cospi_19_64 = 9760; -static const int32_t cospi_20_64 = 9102; -static const int32_t cospi_21_64 = 8423; -static const int32_t cospi_22_64 = 7723; -static const int32_t cospi_23_64 = 7005; -static const int32_t cospi_24_64 = 6270; -static const int32_t cospi_25_64 = 5520; -static const int32_t cospi_26_64 = 4756; -static const int32_t cospi_27_64 = 3981; -static const int32_t cospi_28_64 = 3196; -static const int32_t cospi_29_64 = 2404; -static const int32_t cospi_30_64 = 1606; -static const int32_t cospi_31_64 = 804; - -// 16384 * sqrt(2) * sin(kPi/9) * 2 / 3 -static const int32_t sinpi_1_9 = 5283; -static const int32_t sinpi_2_9 = 9929; -static const int32_t sinpi_3_9 = 13377; -static const int32_t sinpi_4_9 = 15212; - -#define VP9_DOTP_CONST_PAIR(reg0, reg1, cnst0, cnst1, out0, out1) \ -{ \ - v8i16 k0_m = __msa_fill_h(cnst0); \ - v4i32 s0_m, s1_m, s2_m, s3_m; \ - \ - s0_m = (v4i32) __msa_fill_h(cnst1); \ - k0_m = __msa_ilvev_h((v8i16) s0_m, k0_m); \ - \ - ILVRL_H2_SW((-reg1), reg0, s1_m, s0_m); \ - ILVRL_H2_SW(reg0, reg1, s3_m, s2_m); \ - DOTP_SH2_SW(s1_m, s0_m, k0_m, k0_m, s1_m, s0_m); \ - SRARI_W2_SW(s1_m, s0_m, VP9_DCT_CONST_BITS); \ - out0 = __msa_pckev_h((v8i16) s0_m, (v8i16) s1_m); \ - \ - DOTP_SH2_SW(s3_m, s2_m, k0_m, k0_m, s1_m, s0_m); \ - SRARI_W2_SW(s1_m, s0_m, VP9_DCT_CONST_BITS); \ - out1 = __msa_pckev_h((v8i16) s0_m, (v8i16) s1_m); \ -} - -#define VP9_DOT_ADD_SUB_SRARI_PCK(in0, in1, in2, in3, in4, in5, in6, in7, \ - dst0, dst1, dst2, dst3) \ -{ \ - v4i32 tp0_m, tp1_m, tp2_m, tp3_m, tp4_m; \ - v4i32 tp5_m, tp6_m, tp7_m, tp8_m, tp9_m; \ - \ - DOTP_SH4_SW(in0, in1, in0, in1, in4, in4, in5, in5, \ - tp0_m, tp2_m, tp3_m, tp4_m); \ - DOTP_SH4_SW(in2, in3, in2, in3, in6, in6, in7, in7, \ - tp5_m, tp6_m, tp7_m, tp8_m); \ - BUTTERFLY_4(tp0_m, tp3_m, tp7_m, tp5_m, tp1_m, tp9_m, tp7_m, tp5_m); \ - BUTTERFLY_4(tp2_m, tp4_m, tp8_m, tp6_m, tp3_m, tp0_m, tp4_m, tp2_m); \ - SRARI_W4_SW(tp1_m, tp9_m, tp7_m, tp5_m, VP9_DCT_CONST_BITS); \ - SRARI_W4_SW(tp3_m, tp0_m, tp4_m, tp2_m, VP9_DCT_CONST_BITS); \ - PCKEV_H4_SH(tp1_m, tp3_m, tp9_m, tp0_m, tp7_m, tp4_m, tp5_m, tp2_m, \ - dst0, dst1, dst2, dst3); \ -} - -#define VP9_DOT_SHIFT_RIGHT_PCK_H(in0, in1, in2) \ -( { \ - v8i16 dst_m; \ - v4i32 tp0_m, tp1_m; \ - \ - DOTP_SH2_SW(in0, in1, in2, in2, tp1_m, tp0_m); \ - SRARI_W2_SW(tp1_m, tp0_m, VP9_DCT_CONST_BITS); \ - dst_m = __msa_pckev_h((v8i16) tp1_m, (v8i16) tp0_m); \ - \ - dst_m; \ -} ) - -#define VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3, out4, out5, out6, out7) \ -{ \ - v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst4_m; \ - v8i16 vec0_m, vec1_m, vec2_m, vec3_m, s0_m, s1_m; \ - v8i16 coeff0_m = { cospi_2_64, cospi_6_64, cospi_10_64, cospi_14_64, \ - cospi_18_64, cospi_22_64, cospi_26_64, cospi_30_64 }; \ - v8i16 coeff1_m = { cospi_8_64, -cospi_8_64, cospi_16_64, \ - -cospi_16_64, cospi_24_64, -cospi_24_64, 0, 0 }; \ - \ - SPLATI_H2_SH(coeff0_m, 0, 7, cnst0_m, cnst1_m); \ - cnst2_m = -cnst0_m; \ - ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m); \ - SPLATI_H2_SH(coeff0_m, 4, 3, cnst2_m, cnst3_m); \ - cnst4_m = -cnst2_m; \ - ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m); \ - \ - ILVRL_H2_SH(in0, in7, vec1_m, vec0_m); \ - ILVRL_H2_SH(in4, in3, vec3_m, vec2_m); \ - VP9_DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \ - cnst1_m, cnst2_m, cnst3_m, in7, in0, \ - in4, in3); \ - \ - SPLATI_H2_SH(coeff0_m, 2, 5, cnst0_m, cnst1_m); \ - cnst2_m = -cnst0_m; \ - ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m); \ - SPLATI_H2_SH(coeff0_m, 6, 1, cnst2_m, cnst3_m); \ - cnst4_m = -cnst2_m; \ - ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m); \ - \ - ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \ - ILVRL_H2_SH(in6, in1, vec3_m, vec2_m); \ - \ - VP9_DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \ - cnst1_m, cnst2_m, cnst3_m, in5, in2, \ - in6, in1); \ - BUTTERFLY_4(in7, in0, in2, in5, s1_m, s0_m, in2, in5); \ - out7 = -s0_m; \ - out0 = s1_m; \ - \ - SPLATI_H4_SH(coeff1_m, 0, 4, 1, 5, \ - cnst0_m, cnst1_m, cnst2_m, cnst3_m); \ - \ - ILVEV_H2_SH(cnst3_m, cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst2_m); \ - cnst0_m = __msa_ilvev_h(cnst1_m, cnst0_m); \ - cnst1_m = cnst0_m; \ - \ - ILVRL_H2_SH(in4, in3, vec1_m, vec0_m); \ - ILVRL_H2_SH(in6, in1, vec3_m, vec2_m); \ - VP9_DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \ - cnst2_m, cnst3_m, cnst1_m, out1, out6, \ - s0_m, s1_m); \ - \ - SPLATI_H2_SH(coeff1_m, 2, 3, cnst0_m, cnst1_m); \ - cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m); \ - \ - ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \ - ILVRL_H2_SH(s0_m, s1_m, vec3_m, vec2_m); \ - out3 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \ - out4 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m); \ - out2 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst0_m); \ - out5 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst1_m); \ - \ - out1 = -out1; \ - out3 = -out3; \ - out5 = -out5; \ -} - -#define VP9_MADD_SHORT(m0, m1, c0, c1, res0, res1) \ -{ \ - v4i32 madd0_m, madd1_m, madd2_m, madd3_m; \ - v8i16 madd_s0_m, madd_s1_m; \ - \ - ILVRL_H2_SH(m1, m0, madd_s0_m, madd_s1_m); \ - DOTP_SH4_SW(madd_s0_m, madd_s1_m, madd_s0_m, madd_s1_m, \ - c0, c0, c1, c1, madd0_m, madd1_m, madd2_m, madd3_m); \ - SRARI_W4_SW(madd0_m, madd1_m, madd2_m, madd3_m, VP9_DCT_CONST_BITS); \ - PCKEV_H2_SH(madd1_m, madd0_m, madd3_m, madd2_m, res0, res1); \ -} - -#define VP9_MADD_BF(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3, \ - out0, out1, out2, out3) \ -{ \ - v8i16 madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m; \ - v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m, m4_m, m5_m; \ - \ - ILVRL_H2_SH(inp1, inp0, madd_s0_m, madd_s1_m); \ - ILVRL_H2_SH(inp3, inp2, madd_s2_m, madd_s3_m); \ - DOTP_SH4_SW(madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m, \ - cst0, cst0, cst2, cst2, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \ - BUTTERFLY_4(tmp0_m, tmp1_m, tmp3_m, tmp2_m, \ - m4_m, m5_m, tmp3_m, tmp2_m); \ - SRARI_W4_SW(m4_m, m5_m, tmp2_m, tmp3_m, VP9_DCT_CONST_BITS); \ - PCKEV_H2_SH(m5_m, m4_m, tmp3_m, tmp2_m, out0, out1); \ - DOTP_SH4_SW(madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m, \ - cst1, cst1, cst3, cst3, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \ - BUTTERFLY_4(tmp0_m, tmp1_m, tmp3_m, tmp2_m, \ - m4_m, m5_m, tmp3_m, tmp2_m); \ - SRARI_W4_SW(m4_m, m5_m, tmp2_m, tmp3_m, VP9_DCT_CONST_BITS); \ - PCKEV_H2_SH(m5_m, m4_m, tmp3_m, tmp2_m, out2, out3); \ -} - -#define VP9_SET_COSPI_PAIR(c0_h, c1_h) \ -( { \ - v8i16 out0_m, r0_m, r1_m; \ - \ - r0_m = __msa_fill_h(c0_h); \ - r1_m = __msa_fill_h(c1_h); \ - out0_m = __msa_ilvev_h(r1_m, r0_m); \ - \ - out0_m; \ -} ) - -#define VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3) \ -{ \ - uint8_t *dst_m = (uint8_t *) (dst); \ - v16u8 dst0_m, dst1_m, dst2_m, dst3_m; \ - v16i8 tmp0_m, tmp1_m; \ - v16i8 zero_m = { 0 }; \ - v8i16 res0_m, res1_m, res2_m, res3_m; \ - \ - LD_UB4(dst_m, dst_stride, dst0_m, dst1_m, dst2_m, dst3_m); \ - ILVR_B4_SH(zero_m, dst0_m, zero_m, dst1_m, zero_m, dst2_m, \ - zero_m, dst3_m, res0_m, res1_m, res2_m, res3_m); \ - ADD4(res0_m, in0, res1_m, in1, res2_m, in2, res3_m, in3, \ - res0_m, res1_m, res2_m, res3_m); \ - CLIP_SH4_0_255(res0_m, res1_m, res2_m, res3_m); \ - PCKEV_B2_SB(res1_m, res0_m, res3_m, res2_m, tmp0_m, tmp1_m); \ - ST_D4(tmp0_m, tmp1_m, 0, 1, 0, 1, dst_m, dst_stride); \ -} - -#define VP9_IDCT4x4(in0, in1, in2, in3, out0, out1, out2, out3) \ -{ \ - v8i16 c0_m, c1_m, c2_m, c3_m; \ - v8i16 step0_m, step1_m; \ - v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ - v16i8 zeros = { 0 }; \ - \ - c0_m = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64); \ - c1_m = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64); \ - step0_m = __msa_ilvr_h(in2, in0); \ - DOTP_SH2_SW(step0_m, step0_m, c0_m, c1_m, tmp0_m, tmp1_m); \ - \ - c2_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \ - c3_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \ - step1_m = __msa_ilvr_h(in3, in1); \ - DOTP_SH2_SW(step1_m, step1_m, c2_m, c3_m, tmp2_m, tmp3_m); \ - SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, VP9_DCT_CONST_BITS); \ - \ - PCKEV_H2_SW(tmp1_m, tmp0_m, tmp3_m, tmp2_m, tmp0_m, tmp2_m); \ - SLDI_B2_SW(zeros, tmp0_m, zeros, tmp2_m, 8, tmp1_m, tmp3_m); \ - BUTTERFLY_4((v8i16) tmp0_m, (v8i16) tmp1_m, \ - (v8i16) tmp2_m, (v8i16) tmp3_m, \ - out0, out1, out2, out3); \ -} - -#define VP9_IADST4x4(in0, in1, in2, in3, out0, out1, out2, out3) \ -{ \ - v8i16 res0_m, res1_m, c0_m, c1_m; \ - v8i16 k1_m, k2_m, k3_m, k4_m; \ - v8i16 zero_m = { 0 }; \ - v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ - v4i32 int0_m, int1_m, int2_m, int3_m; \ - v8i16 mask_m = { sinpi_1_9, sinpi_2_9, sinpi_3_9, \ - sinpi_4_9, -sinpi_1_9, -sinpi_2_9, -sinpi_3_9, \ - -sinpi_4_9 }; \ - \ - SPLATI_H4_SH(mask_m, 3, 0, 1, 2, c0_m, c1_m, k1_m, k2_m); \ - ILVEV_H2_SH(c0_m, c1_m, k1_m, k2_m, c0_m, c1_m); \ - ILVR_H2_SH(in0, in2, in1, in3, res0_m, res1_m); \ - DOTP_SH2_SW(res0_m, res1_m, c0_m, c1_m, tmp2_m, tmp1_m); \ - int0_m = tmp2_m + tmp1_m; \ - \ - SPLATI_H2_SH(mask_m, 4, 7, k4_m, k3_m); \ - ILVEV_H2_SH(k4_m, k1_m, k3_m, k2_m, c0_m, c1_m); \ - DOTP_SH2_SW(res0_m, res1_m, c0_m, c1_m, tmp0_m, tmp1_m); \ - int1_m = tmp0_m + tmp1_m; \ - \ - c0_m = __msa_splati_h(mask_m, 6); \ - ILVL_H2_SH(k2_m, c0_m, zero_m, k2_m, c0_m, c1_m); \ - ILVR_H2_SH(in0, in2, in1, in3, res0_m, res1_m); \ - DOTP_SH2_SW(res0_m, res1_m, c0_m, c1_m, tmp0_m, tmp1_m); \ - int2_m = tmp0_m + tmp1_m; \ - \ - c0_m = __msa_splati_h(mask_m, 6); \ - c0_m = __msa_ilvev_h(c0_m, k1_m); \ - \ - res0_m = __msa_ilvr_h((in1), (in3)); \ - tmp0_m = __msa_dotp_s_w(res0_m, c0_m); \ - int3_m = tmp2_m + tmp0_m; \ - \ - res0_m = __msa_ilvr_h((in2), (in3)); \ - c1_m = __msa_ilvev_h(k4_m, k3_m); \ - \ - tmp2_m = __msa_dotp_s_w(res0_m, c1_m); \ - res1_m = __msa_ilvr_h((in0), (in2)); \ - c1_m = __msa_ilvev_h(k1_m, zero_m); \ - \ - tmp3_m = __msa_dotp_s_w(res1_m, c1_m); \ - int3_m += tmp2_m; \ - int3_m += tmp3_m; \ - \ - SRARI_W4_SW(int0_m, int1_m, int2_m, int3_m, VP9_DCT_CONST_BITS); \ - PCKEV_H2_SH(int0_m, int0_m, int1_m, int1_m, out0, out1); \ - PCKEV_H2_SH(int2_m, int2_m, int3_m, int3_m, out2, out3); \ -} - -#define TRANSPOSE4X8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3, out4, out5, out6, out7) \ -{ \ - v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ - v8i16 tmp0_n, tmp1_n, tmp2_n, tmp3_n; \ - v8i16 zero_m = { 0 }; \ - \ - ILVR_H4_SH(in1, in0, in3, in2, in5, in4, in7, in6, \ - tmp0_n, tmp1_n, tmp2_n, tmp3_n); \ - ILVRL_W2_SH(tmp1_n, tmp0_n, tmp0_m, tmp2_m); \ - ILVRL_W2_SH(tmp3_n, tmp2_n, tmp1_m, tmp3_m); \ - \ - out0 = (v8i16) __msa_ilvr_d((v2i64) tmp1_m, (v2i64) tmp0_m); \ - out1 = (v8i16) __msa_ilvl_d((v2i64) tmp1_m, (v2i64) tmp0_m); \ - out2 = (v8i16) __msa_ilvr_d((v2i64) tmp3_m, (v2i64) tmp2_m); \ - out3 = (v8i16) __msa_ilvl_d((v2i64) tmp3_m, (v2i64) tmp2_m); \ - \ - out4 = zero_m; \ - out5 = zero_m; \ - out6 = zero_m; \ - out7 = zero_m; \ -} - -static void vp9_idct4x4_1_add_msa(int16_t *input, uint8_t *dst, - int32_t dst_stride) -{ - int16_t out; - v8i16 vec; - - out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), VP9_DCT_CONST_BITS); - out = ROUND_POWER_OF_TWO((out * cospi_16_64), VP9_DCT_CONST_BITS); - out = ROUND_POWER_OF_TWO(out, 4); - vec = __msa_fill_h(out); - input[0] = 0; - - ADDBLK_ST4x4_UB(vec, vec, vec, vec, dst, dst_stride); -} - -static void vp9_idct4x4_colcol_addblk_msa(int16_t *input, uint8_t *dst, - int32_t dst_stride) -{ - v8i16 in0, in1, in2, in3; - v8i16 zero = { 0 }; - - /* load vector elements of 4x4 block */ - in0 = LD_SH(input); - in2 = LD_SH(input + 8); - in1 = (v8i16) __msa_ilvl_d((v2i64) in0, (v2i64) in0); - in3 = (v8i16) __msa_ilvl_d((v2i64) in2, (v2i64) in2); - ST_SH2(zero, zero, input, 8); - /* rows */ - VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); - /* columns */ - TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); - VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); - /* rounding (add 2^3, divide by 2^4) */ - SRARI_H4_SH(in0, in1, in2, in3, 4); - ADDBLK_ST4x4_UB(in0, in1, in2, in3, dst, dst_stride); -} - -static void vp9_iadst4x4_colcol_addblk_msa(int16_t *input, uint8_t *dst, - int32_t dst_stride) -{ - v8i16 in0, in1, in2, in3; - v8i16 zero = { 0 }; - - /* load vector elements of 4x4 block */ - in0 = LD_SH(input); - in2 = LD_SH(input + 8); - in1 = (v8i16) __msa_ilvl_d((v2i64) in0, (v2i64) in0); - in3 = (v8i16) __msa_ilvl_d((v2i64) in2, (v2i64) in2); - ST_SH2(zero, zero, input, 8); - /* rows */ - VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3); - /* columns */ - TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); - VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3); - /* rounding (add 2^3, divide by 2^4) */ - SRARI_H4_SH(in0, in1, in2, in3, 4); - ADDBLK_ST4x4_UB(in0, in1, in2, in3, dst, dst_stride); -} - -static void vp9_iadst_idct_4x4_add_msa(int16_t *input, uint8_t *dst, - int32_t dst_stride, int32_t eob) -{ - v8i16 in0, in1, in2, in3; - v8i16 zero = { 0 }; - - /* load vector elements of 4x4 block */ - in0 = LD_SH(input); - in2 = LD_SH(input + 8); - in1 = (v8i16) __msa_ilvl_d((v2i64) in0, (v2i64) in0); - in3 = (v8i16) __msa_ilvl_d((v2i64) in2, (v2i64) in2); - ST_SH2(zero, zero, input, 8); - /* cols */ - VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3); - /* columns */ - TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); - VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); - /* rounding (add 2^3, divide by 2^4) */ - SRARI_H4_SH(in0, in1, in2, in3, 4); - ADDBLK_ST4x4_UB(in0, in1, in2, in3, dst, dst_stride); -} - -static void vp9_idct_iadst_4x4_add_msa(int16_t *input, uint8_t *dst, - int32_t dst_stride, int32_t eob) -{ - v8i16 in0, in1, in2, in3; - v8i16 zero = { 0 }; - - /* load vector elements of 4x4 block */ - in0 = LD_SH(input); - in2 = LD_SH(input + 8); - in1 = (v8i16) __msa_ilvl_d((v2i64) in0, (v2i64) in0); - in3 = (v8i16) __msa_ilvl_d((v2i64) in2, (v2i64) in2); - ST_SH2(zero, zero, input, 8); - /* cols */ - VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3); - /* columns */ - TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3); - VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3); - /* rounding (add 2^3, divide by 2^4) */ - SRARI_H4_SH(in0, in1, in2, in3, 4); - ADDBLK_ST4x4_UB(in0, in1, in2, in3, dst, dst_stride); -} - -#define VP9_SET_CONST_PAIR(mask_h, idx1_h, idx2_h) \ -( { \ - v8i16 c0_m, c1_m; \ - \ - SPLATI_H2_SH(mask_h, idx1_h, idx2_h, c0_m, c1_m); \ - c0_m = __msa_ilvev_h(c1_m, c0_m); \ - \ - c0_m; \ -} ) - -/* multiply and add macro */ -#define VP9_MADD(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3, \ - out0, out1, out2, out3) \ -{ \ - v8i16 madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m; \ - v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ - \ - ILVRL_H2_SH(inp1, inp0, madd_s1_m, madd_s0_m); \ - ILVRL_H2_SH(inp3, inp2, madd_s3_m, madd_s2_m); \ - DOTP_SH4_SW(madd_s1_m, madd_s0_m, madd_s1_m, madd_s0_m, \ - cst0, cst0, cst1, cst1, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \ - SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, VP9_DCT_CONST_BITS); \ - PCKEV_H2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out0, out1); \ - DOTP_SH4_SW(madd_s3_m, madd_s2_m, madd_s3_m, madd_s2_m, \ - cst2, cst2, cst3, cst3, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \ - SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, VP9_DCT_CONST_BITS); \ - PCKEV_H2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out2, out3); \ -} - -/* idct 8x8 macro */ -#define VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3, out4, out5, out6, out7) \ -{ \ - v8i16 tp0_m, tp1_m, tp2_m, tp3_m, tp4_m, tp5_m, tp6_m, tp7_m; \ - v8i16 k0_m, k1_m, k2_m, k3_m, res0_m, res1_m, res2_m, res3_m; \ - v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \ - v8i16 mask_m = { cospi_28_64, cospi_4_64, cospi_20_64, cospi_12_64, \ - cospi_16_64, -cospi_4_64, -cospi_20_64, -cospi_16_64 }; \ - \ - k0_m = VP9_SET_CONST_PAIR(mask_m, 0, 5); \ - k1_m = VP9_SET_CONST_PAIR(mask_m, 1, 0); \ - k2_m = VP9_SET_CONST_PAIR(mask_m, 6, 3); \ - k3_m = VP9_SET_CONST_PAIR(mask_m, 3, 2); \ - VP9_MADD(in1, in7, in3, in5, k0_m, k1_m, k2_m, k3_m, in1, in7, in3, in5); \ - SUB2(in1, in3, in7, in5, res0_m, res1_m); \ - k0_m = VP9_SET_CONST_PAIR(mask_m, 4, 7); \ - k1_m = __msa_splati_h(mask_m, 4); \ - \ - ILVRL_H2_SH(res0_m, res1_m, res2_m, res3_m); \ - DOTP_SH4_SW(res2_m, res3_m, res2_m, res3_m, k0_m, k0_m, k1_m, k1_m, \ - tmp0_m, tmp1_m, tmp2_m, tmp3_m); \ - SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, VP9_DCT_CONST_BITS); \ - tp4_m = in1 + in3; \ - PCKEV_H2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, tp5_m, tp6_m); \ - tp7_m = in7 + in5; \ - k2_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \ - k3_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \ - VP9_MADD(in0, in4, in2, in6, k1_m, k0_m, k2_m, k3_m, \ - in0, in4, in2, in6); \ - BUTTERFLY_4(in0, in4, in2, in6, tp0_m, tp1_m, tp2_m, tp3_m); \ - BUTTERFLY_8(tp0_m, tp1_m, tp2_m, tp3_m, tp4_m, tp5_m, tp6_m, tp7_m, \ - out0, out1, out2, out3, out4, out5, out6, out7); \ -} - -#define VP9_IADST8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, \ - out0, out1, out2, out3, out4, out5, out6, out7) \ -{ \ - v4i32 r0_m, r1_m, r2_m, r3_m, r4_m, r5_m, r6_m, r7_m; \ - v4i32 m0_m, m1_m, m2_m, m3_m, t0_m, t1_m; \ - v8i16 res0_m, res1_m, res2_m, res3_m, k0_m, k1_m, in_s0, in_s1; \ - v8i16 mask1_m = { cospi_2_64, cospi_30_64, -cospi_2_64, \ - cospi_10_64, cospi_22_64, -cospi_10_64, cospi_18_64, cospi_14_64 }; \ - v8i16 mask2_m = { cospi_14_64, -cospi_18_64, cospi_26_64, \ - cospi_6_64, -cospi_26_64, cospi_8_64, cospi_24_64, -cospi_8_64 }; \ - v8i16 mask3_m = { -cospi_24_64, cospi_8_64, cospi_16_64, \ - -cospi_16_64, 0, 0, 0, 0 }; \ - \ - k0_m = VP9_SET_CONST_PAIR(mask1_m, 0, 1); \ - k1_m = VP9_SET_CONST_PAIR(mask1_m, 1, 2); \ - ILVRL_H2_SH(in1, in0, in_s1, in_s0); \ - DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, \ - r0_m, r1_m, r2_m, r3_m); \ - k0_m = VP9_SET_CONST_PAIR(mask1_m, 6, 7); \ - k1_m = VP9_SET_CONST_PAIR(mask2_m, 0, 1); \ - ILVRL_H2_SH(in5, in4, in_s1, in_s0); \ - DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, \ - r4_m, r5_m, r6_m, r7_m); \ - ADD4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m, \ - m0_m, m1_m, m2_m, m3_m); \ - SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS); \ - PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, res0_m, res1_m); \ - SUB4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m, \ - m0_m, m1_m, m2_m, m3_m); \ - SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS); \ - PCKEV_H2_SW(m1_m, m0_m, m3_m, m2_m, t0_m, t1_m); \ - k0_m = VP9_SET_CONST_PAIR(mask1_m, 3, 4); \ - k1_m = VP9_SET_CONST_PAIR(mask1_m, 4, 5); \ - ILVRL_H2_SH(in3, in2, in_s1, in_s0); \ - DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, \ - r0_m, r1_m, r2_m, r3_m); \ - k0_m = VP9_SET_CONST_PAIR(mask2_m, 2, 3); \ - k1_m = VP9_SET_CONST_PAIR(mask2_m, 3, 4); \ - ILVRL_H2_SH(in7, in6, in_s1, in_s0); \ - DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, \ - r4_m, r5_m, r6_m, r7_m); \ - ADD4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m, \ - m0_m, m1_m, m2_m, m3_m); \ - SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS); \ - PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, res2_m, res3_m); \ - SUB4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m, \ - m0_m, m1_m, m2_m, m3_m); \ - SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS); \ - PCKEV_H2_SW(m1_m, m0_m, m3_m, m2_m, r2_m, r3_m); \ - ILVRL_H2_SW(r3_m, r2_m, m2_m, m3_m); \ - BUTTERFLY_4(res0_m, res1_m, res3_m, res2_m, out0, in7, in4, in3); \ - k0_m = VP9_SET_CONST_PAIR(mask2_m, 5, 6); \ - k1_m = VP9_SET_CONST_PAIR(mask2_m, 6, 7); \ - ILVRL_H2_SH(t1_m, t0_m, in_s1, in_s0); \ - DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, \ - r0_m, r1_m, r2_m, r3_m); \ - k1_m = VP9_SET_CONST_PAIR(mask3_m, 0, 1); \ - DOTP_SH4_SW(m2_m, m3_m, m2_m, m3_m, k0_m, k0_m, k1_m, k1_m, \ - r4_m, r5_m, r6_m, r7_m); \ - ADD4(r0_m, r6_m, r1_m, r7_m, r2_m, r4_m, r3_m, r5_m, \ - m0_m, m1_m, m2_m, m3_m); \ - SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS); \ - PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, in1, out6); \ - SUB4(r0_m, r6_m, r1_m, r7_m, r2_m, r4_m, r3_m, r5_m, \ - m0_m, m1_m, m2_m, m3_m); \ - SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS); \ - PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, in2, in5); \ - k0_m = VP9_SET_CONST_PAIR(mask3_m, 2, 2); \ - k1_m = VP9_SET_CONST_PAIR(mask3_m, 2, 3); \ - ILVRL_H2_SH(in4, in3, in_s1, in_s0); \ - DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, \ - m0_m, m1_m, m2_m, m3_m); \ - SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS); \ - PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, in3, out4); \ - ILVRL_H2_SW(in5, in2, m2_m, m3_m); \ - DOTP_SH4_SW(m2_m, m3_m, m2_m, m3_m, k0_m, k0_m, k1_m, k1_m, \ - m0_m, m1_m, m2_m, m3_m); \ - SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS); \ - PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, out2, in5); \ - \ - out1 = -in1; \ - out3 = -in3; \ - out5 = -in5; \ - out7 = -in7; \ -} - -static void vp9_idct8x8_1_add_msa(int16_t *input, uint8_t *dst, - int32_t dst_stride) -{ - int16_t out; - int32_t val; - v8i16 vec; - - out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), VP9_DCT_CONST_BITS); - out = ROUND_POWER_OF_TWO((out * cospi_16_64), VP9_DCT_CONST_BITS); - val = ROUND_POWER_OF_TWO(out, 5); - vec = __msa_fill_h(val); - input[0] = 0; - - VP9_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec); - dst += (4 * dst_stride); - VP9_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec); -} - -static void vp9_idct8x8_12_colcol_addblk_msa(int16_t *input, uint8_t *dst, - int32_t dst_stride) -{ - v8i16 in0, in1, in2, in3, in4, in5, in6, in7; - v8i16 s0, s1, s2, s3, s4, s5, s6, s7, k0, k1, k2, k3, m0, m1, m2, m3; - v4i32 tmp0, tmp1, tmp2, tmp3; - v8i16 zero = { 0 }; - - /* load vector elements of 8x8 block */ - LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); - ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 8); - ILVR_D2_SH(in1, in0, in3, in2, in0, in1); - ILVR_D2_SH(in5, in4, in7, in6, in2, in3); - - /* stage1 */ - ILVL_H2_SH(in3, in0, in2, in1, s0, s1); - k0 = VP9_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64); - k1 = VP9_SET_COSPI_PAIR(cospi_4_64, cospi_28_64); - k2 = VP9_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64); - k3 = VP9_SET_COSPI_PAIR(cospi_12_64, cospi_20_64); - DOTP_SH4_SW(s0, s0, s1, s1, k0, k1, k2, k3, tmp0, tmp1, tmp2, tmp3); - SRARI_W4_SW(tmp0, tmp1, tmp2, tmp3, VP9_DCT_CONST_BITS); - PCKEV_H2_SH(zero, tmp0, zero, tmp1, s0, s1); - PCKEV_H2_SH(zero, tmp2, zero, tmp3, s2, s3); - BUTTERFLY_4(s0, s1, s3, s2, s4, s7, s6, s5); - - /* stage2 */ - ILVR_H2_SH(in3, in1, in2, in0, s1, s0); - k0 = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64); - k1 = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64); - k2 = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); - k3 = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); - DOTP_SH4_SW(s0, s0, s1, s1, k0, k1, k2, k3, tmp0, tmp1, tmp2, tmp3); - SRARI_W4_SW(tmp0, tmp1, tmp2, tmp3, VP9_DCT_CONST_BITS); - PCKEV_H2_SH(zero, tmp0, zero, tmp1, s0, s1); - PCKEV_H2_SH(zero, tmp2, zero, tmp3, s2, s3); - BUTTERFLY_4(s0, s1, s2, s3, m0, m1, m2, m3); - - /* stage3 */ - s0 = __msa_ilvr_h(s6, s5); - - k1 = VP9_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64); - DOTP_SH2_SW(s0, s0, k1, k0, tmp0, tmp1); - SRARI_W2_SW(tmp0, tmp1, VP9_DCT_CONST_BITS); - PCKEV_H2_SH(zero, tmp0, zero, tmp1, s2, s3); - - /* stage4 */ - BUTTERFLY_8(m0, m1, m2, m3, s4, s2, s3, s7, - in0, in1, in2, in3, in4, in5, in6, in7); - TRANSPOSE4X8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - - /* final rounding (add 2^4, divide by 2^5) and shift */ - SRARI_H4_SH(in0, in1, in2, in3, 5); - SRARI_H4_SH(in4, in5, in6, in7, 5); - - /* add block and store 8x8 */ - VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3); - dst += (4 * dst_stride); - VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7); -} - -static void vp9_idct8x8_colcol_addblk_msa(int16_t *input, uint8_t *dst, - int32_t dst_stride) -{ - v8i16 in0, in1, in2, in3, in4, in5, in6, in7; - v8i16 zero = { 0 }; - - /* load vector elements of 8x8 block */ - LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); - ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 8); - /* 1D idct8x8 */ - VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - /* columns transform */ - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - /* 1D idct8x8 */ - VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - /* final rounding (add 2^4, divide by 2^5) and shift */ - SRARI_H4_SH(in0, in1, in2, in3, 5); - SRARI_H4_SH(in4, in5, in6, in7, 5); - /* add block and store 8x8 */ - VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3); - dst += (4 * dst_stride); - VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7); -} - -static void vp9_iadst8x8_colcol_addblk_msa(int16_t *input, uint8_t *dst, - int32_t dst_stride) -{ - v8i16 in0, in1, in2, in3, in4, in5, in6, in7; - v8i16 res0, res1, res2, res3, res4, res5, res6, res7; - v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7; - v8i16 out0, out1, out2, out3, out4, out5, out6, out7; - v8i16 cnst0, cnst1, cnst2, cnst3, cnst4; - v8i16 temp0, temp1, temp2, temp3, s0, s1; - v8i16 zero = { 0 }; - - /* load vector elements of 8x8 block */ - LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); - ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 8); - - /* 1D adst8x8 */ - VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - - /* columns transform */ - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - - cnst0 = __msa_fill_h(cospi_2_64); - cnst1 = __msa_fill_h(cospi_30_64); - cnst2 = -cnst0; - ILVEV_H2_SH(cnst0, cnst1, cnst1, cnst2, cnst0, cnst1); - cnst2 = __msa_fill_h(cospi_18_64); - cnst3 = __msa_fill_h(cospi_14_64); - cnst4 = -cnst2; - ILVEV_H2_SH(cnst2, cnst3, cnst3, cnst4, cnst2, cnst3); - - ILVRL_H2_SH(in0, in7, temp1, temp0); - ILVRL_H2_SH(in4, in3, temp3, temp2); - VP9_DOT_ADD_SUB_SRARI_PCK(temp0, temp1, temp2, temp3, cnst0, cnst1, cnst2, - cnst3, in7, in0, in4, in3); - - cnst0 = __msa_fill_h(cospi_10_64); - cnst1 = __msa_fill_h(cospi_22_64); - cnst2 = -cnst0; - ILVEV_H2_SH(cnst0, cnst1, cnst1, cnst2, cnst0, cnst1); - cnst2 = __msa_fill_h(cospi_26_64); - cnst3 = __msa_fill_h(cospi_6_64); - cnst4 = -cnst2; - ILVEV_H2_SH(cnst2, cnst3, cnst3, cnst4, cnst2, cnst3); - - ILVRL_H2_SH(in2, in5, temp1, temp0); - ILVRL_H2_SH(in6, in1, temp3, temp2); - VP9_DOT_ADD_SUB_SRARI_PCK(temp0, temp1, temp2, temp3, cnst0, cnst1, cnst2, - cnst3, in5, in2, in6, in1); - BUTTERFLY_4(in7, in0, in2, in5, s1, s0, in2, in5); - out7 = -s0; - out0 = s1; - SRARI_H2_SH(out0, out7, 5); - dst0 = LD_UB(dst + 0 * dst_stride); - dst7 = LD_UB(dst + 7 * dst_stride); - - res0 = (v8i16) __msa_ilvr_b((v16i8) zero, (v16i8) dst0); - res0 += out0; - CLIP_SH_0_255(res0); - res0 = (v8i16) __msa_pckev_b((v16i8) res0, (v16i8) res0); - ST_D1(res0, 0, dst); - - res7 = (v8i16) __msa_ilvr_b((v16i8) zero, (v16i8) dst7); - res7 += out7; - CLIP_SH_0_255(res7); - res7 = (v8i16) __msa_pckev_b((v16i8) res7, (v16i8) res7); - ST_D1(res7, 0, dst + 7 * dst_stride); - - cnst1 = __msa_fill_h(cospi_24_64); - cnst0 = __msa_fill_h(cospi_8_64); - cnst3 = -cnst1; - cnst2 = -cnst0; - - ILVEV_H2_SH(cnst3, cnst0, cnst1, cnst2, cnst3, cnst2); - cnst0 = __msa_ilvev_h(cnst1, cnst0); - cnst1 = cnst0; - - ILVRL_H2_SH(in4, in3, temp1, temp0); - ILVRL_H2_SH(in6, in1, temp3, temp2); - VP9_DOT_ADD_SUB_SRARI_PCK(temp0, temp1, temp2, temp3, cnst0, cnst2, cnst3, - cnst1, out1, out6, s0, s1); - out1 = -out1; - SRARI_H2_SH(out1, out6, 5); - dst1 = LD_UB(dst + 1 * dst_stride); - dst6 = LD_UB(dst + 6 * dst_stride); - ILVR_B2_SH(zero, dst1, zero, dst6, res1, res6); - ADD2(res1, out1, res6, out6, res1, res6); - CLIP_SH2_0_255(res1, res6); - PCKEV_B2_SH(res1, res1, res6, res6, res1, res6); - ST_D1(res1, 0, dst + dst_stride); - ST_D1(res6, 0, dst + 6 * dst_stride); - - cnst0 = __msa_fill_h(cospi_16_64); - cnst1 = -cnst0; - cnst1 = __msa_ilvev_h(cnst1, cnst0); - - ILVRL_H2_SH(in2, in5, temp1, temp0); - ILVRL_H2_SH(s0, s1, temp3, temp2); - out3 = VP9_DOT_SHIFT_RIGHT_PCK_H(temp0, temp1, cnst0); - out4 = VP9_DOT_SHIFT_RIGHT_PCK_H(temp0, temp1, cnst1); - out3 = -out3; - SRARI_H2_SH(out3, out4, 5); - dst3 = LD_UB(dst + 3 * dst_stride); - dst4 = LD_UB(dst + 4 * dst_stride); - ILVR_B2_SH(zero, dst3, zero, dst4, res3, res4); - ADD2(res3, out3, res4, out4, res3, res4); - CLIP_SH2_0_255(res3, res4); - PCKEV_B2_SH(res3, res3, res4, res4, res3, res4); - ST_D1(res3, 0, dst + 3 * dst_stride); - ST_D1(res4, 0, dst + 4 * dst_stride); - - out2 = VP9_DOT_SHIFT_RIGHT_PCK_H(temp2, temp3, cnst0); - out5 = VP9_DOT_SHIFT_RIGHT_PCK_H(temp2, temp3, cnst1); - out5 = -out5; - SRARI_H2_SH(out2, out5, 5); - dst2 = LD_UB(dst + 2 * dst_stride); - dst5 = LD_UB(dst + 5 * dst_stride); - ILVR_B2_SH(zero, dst2, zero, dst5, res2, res5); - ADD2(res2, out2, res5, out5, res2, res5); - CLIP_SH2_0_255(res2, res5); - PCKEV_B2_SH(res2, res2, res5, res5, res2, res5); - ST_D1(res2, 0, dst + 2 * dst_stride); - ST_D1(res5, 0, dst + 5 * dst_stride); -} - -static void vp9_iadst_idct_8x8_add_msa(int16_t *input, uint8_t *dst, - int32_t dst_stride, int32_t eob) -{ - v8i16 in0, in1, in2, in3, in4, in5, in6, in7; - v8i16 zero = { 0 }; - - /* load vector elements of 8x8 block */ - LD_SH8(input, 8, in1, in6, in3, in4, in5, in2, in7, in0); - ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 8); - /* 1D idct8x8 */ - VP9_IADST8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - /* columns transform */ - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - /* 1D idct8x8 */ - VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - /* final rounding (add 2^4, divide by 2^5) and shift */ - SRARI_H4_SH(in0, in1, in2, in3, 5); - SRARI_H4_SH(in4, in5, in6, in7, 5); - /* add block and store 8x8 */ - VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3); - dst += (4 * dst_stride); - VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7); -} - -static void vp9_idct_iadst_8x8_add_msa(int16_t *input, uint8_t *dst, - int32_t dst_stride, int32_t eob) -{ - v8i16 in0, in1, in2, in3, in4, in5, in6, in7; - v8i16 zero = { 0 }; - - /* load vector elements of 8x8 block */ - LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); - ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 8); - - /* 1D idct8x8 */ - VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - /* columns transform */ - TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, - in1, in6, in3, in4, in5, in2, in7, in0); - /* 1D idct8x8 */ - VP9_IADST8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, - in0, in1, in2, in3, in4, in5, in6, in7); - /* final rounding (add 2^4, divide by 2^5) and shift */ - SRARI_H4_SH(in0, in1, in2, in3, 5); - SRARI_H4_SH(in4, in5, in6, in7, 5); - /* add block and store 8x8 */ - VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3); - dst += (4 * dst_stride); - VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7); -} - -#define VP9_IADST8x16_1D(r0, r1, r2, r3, r4, r5, r6, r7, r8, \ - r9, r10, r11, r12, r13, r14, r15, \ - out0, out1, out2, out3, out4, out5, \ - out6, out7, out8, out9, out10, out11, \ - out12, out13, out14, out15) \ -{ \ - v8i16 g0_m, g1_m, g2_m, g3_m, g4_m, g5_m, g6_m, g7_m; \ - v8i16 g8_m, g9_m, g10_m, g11_m, g12_m, g13_m, g14_m, g15_m; \ - v8i16 h0_m, h1_m, h2_m, h3_m, h4_m, h5_m, h6_m, h7_m; \ - v8i16 h8_m, h9_m, h10_m, h11_m; \ - v8i16 k0_m, k1_m, k2_m, k3_m; \ - \ - /* stage 1 */ \ - k0_m = VP9_SET_COSPI_PAIR(cospi_1_64, cospi_31_64); \ - k1_m = VP9_SET_COSPI_PAIR(cospi_31_64, -cospi_1_64); \ - k2_m = VP9_SET_COSPI_PAIR(cospi_17_64, cospi_15_64); \ - k3_m = VP9_SET_COSPI_PAIR(cospi_15_64, -cospi_17_64); \ - VP9_MADD_BF(r15, r0, r7, r8, k0_m, k1_m, k2_m, k3_m, \ - g0_m, g1_m, g2_m, g3_m); \ - k0_m = VP9_SET_COSPI_PAIR(cospi_5_64, cospi_27_64); \ - k1_m = VP9_SET_COSPI_PAIR(cospi_27_64, -cospi_5_64); \ - k2_m = VP9_SET_COSPI_PAIR(cospi_21_64, cospi_11_64); \ - k3_m = VP9_SET_COSPI_PAIR(cospi_11_64, -cospi_21_64); \ - VP9_MADD_BF(r13, r2, r5, r10, k0_m, k1_m, k2_m, k3_m, \ - g4_m, g5_m, g6_m, g7_m); \ - k0_m = VP9_SET_COSPI_PAIR(cospi_9_64, cospi_23_64); \ - k1_m = VP9_SET_COSPI_PAIR(cospi_23_64, -cospi_9_64); \ - k2_m = VP9_SET_COSPI_PAIR(cospi_25_64, cospi_7_64); \ - k3_m = VP9_SET_COSPI_PAIR(cospi_7_64, -cospi_25_64); \ - VP9_MADD_BF(r11, r4, r3, r12, k0_m, k1_m, k2_m, k3_m, \ - g8_m, g9_m, g10_m, g11_m); \ - k0_m = VP9_SET_COSPI_PAIR(cospi_13_64, cospi_19_64); \ - k1_m = VP9_SET_COSPI_PAIR(cospi_19_64, -cospi_13_64); \ - k2_m = VP9_SET_COSPI_PAIR(cospi_29_64, cospi_3_64); \ - k3_m = VP9_SET_COSPI_PAIR(cospi_3_64, -cospi_29_64); \ - VP9_MADD_BF(r9, r6, r1, r14, k0_m, k1_m, k2_m, k3_m, \ - g12_m, g13_m, g14_m, g15_m); \ - \ - /* stage 2 */ \ - k0_m = VP9_SET_COSPI_PAIR(cospi_4_64, cospi_28_64); \ - k1_m = VP9_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64); \ - k2_m = VP9_SET_COSPI_PAIR(-cospi_28_64, cospi_4_64); \ - VP9_MADD_BF(g1_m, g3_m, g9_m, g11_m, k0_m, k1_m, k2_m, k0_m, \ - h0_m, h1_m, h2_m, h3_m); \ - k0_m = VP9_SET_COSPI_PAIR(cospi_12_64, cospi_20_64); \ - k1_m = VP9_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64); \ - k2_m = VP9_SET_COSPI_PAIR(cospi_20_64, -cospi_12_64); \ - VP9_MADD_BF(g7_m, g5_m, g15_m, g13_m, k0_m, k1_m, k2_m, k0_m, \ - h4_m, h5_m, h6_m, h7_m); \ - BUTTERFLY_4(h0_m, h2_m, h6_m, h4_m, out8, out9, out11, out10); \ - BUTTERFLY_8(g0_m, g2_m, g4_m, g6_m, g14_m, g12_m, g10_m, g8_m, \ - h8_m, h9_m, h10_m, h11_m, h6_m, h4_m, h2_m, h0_m); \ - \ - /* stage 3 */ \ - BUTTERFLY_4(h8_m, h9_m, h11_m, h10_m, out0, out1, h11_m, h10_m); \ - k0_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \ - k1_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \ - k2_m = VP9_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64); \ - VP9_MADD_BF(h0_m, h2_m, h4_m, h6_m, k0_m, k1_m, k2_m, k0_m, \ - out4, out6, out5, out7); \ - VP9_MADD_BF(h1_m, h3_m, h5_m, h7_m, k0_m, k1_m, k2_m, k0_m, \ - out12, out14, out13, out15); \ - \ - /* stage 4 */ \ - k0_m = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64); \ - k1_m = VP9_SET_COSPI_PAIR(-cospi_16_64, -cospi_16_64); \ - k2_m = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64); \ - k3_m = VP9_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64); \ - VP9_MADD_SHORT(h10_m, h11_m, k1_m, k2_m, out2, out3); \ - VP9_MADD_SHORT(out6, out7, k0_m, k3_m, out6, out7); \ - VP9_MADD_SHORT(out10, out11, k0_m, k3_m, out10, out11); \ - VP9_MADD_SHORT(out14, out15, k1_m, k2_m, out14, out15); \ -} - -static void vp9_idct16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst, - int32_t dst_stride) -{ - v8i16 loc0, loc1, loc2, loc3; - v8i16 reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14; - v8i16 reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15; - v8i16 tmp5, tmp6, tmp7; - v8i16 zero = { 0 }; - - /* load up 8x16 */ - LD_SH16(input, 16, - reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, - reg8, reg9, reg10, reg11, reg12, reg13, reg14, reg15); - - ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 16); - input += 8 * 16; - ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 16); - - VP9_DOTP_CONST_PAIR(reg2, reg14, cospi_28_64, cospi_4_64, reg2, reg14); - VP9_DOTP_CONST_PAIR(reg10, reg6, cospi_12_64, cospi_20_64, reg10, reg6); - BUTTERFLY_4(reg2, reg14, reg6, reg10, loc0, loc1, reg14, reg2); - VP9_DOTP_CONST_PAIR(reg14, reg2, cospi_16_64, cospi_16_64, loc2, loc3); - VP9_DOTP_CONST_PAIR(reg0, reg8, cospi_16_64, cospi_16_64, reg0, reg8); - VP9_DOTP_CONST_PAIR(reg4, reg12, cospi_24_64, cospi_8_64, reg4, reg12); - BUTTERFLY_4(reg8, reg0, reg4, reg12, reg2, reg6, reg10, reg14); - - reg0 = reg2 - loc1; - reg2 = reg2 + loc1; - reg12 = reg14 - loc0; - reg14 = reg14 + loc0; - reg4 = reg6 - loc3; - reg6 = reg6 + loc3; - reg8 = reg10 - loc2; - reg10 = reg10 + loc2; - - /* stage 2 */ - VP9_DOTP_CONST_PAIR(reg1, reg15, cospi_30_64, cospi_2_64, reg1, reg15); - VP9_DOTP_CONST_PAIR(reg9, reg7, cospi_14_64, cospi_18_64, loc2, loc3); - - reg9 = reg1 - loc2; - reg1 = reg1 + loc2; - reg7 = reg15 - loc3; - reg15 = reg15 + loc3; - - VP9_DOTP_CONST_PAIR(reg5, reg11, cospi_22_64, cospi_10_64, reg5, reg11); - VP9_DOTP_CONST_PAIR(reg13, reg3, cospi_6_64, cospi_26_64, loc0, loc1); - BUTTERFLY_4(loc0, loc1, reg11, reg5, reg13, reg3, reg11, reg5); - - loc1 = reg15 + reg3; - reg3 = reg15 - reg3; - loc2 = reg2 + loc1; - reg15 = reg2 - loc1; - - loc1 = reg1 + reg13; - reg13 = reg1 - reg13; - loc0 = reg0 + loc1; - loc1 = reg0 - loc1; - tmp6 = loc0; - tmp7 = loc1; - reg0 = loc2; - - VP9_DOTP_CONST_PAIR(reg7, reg9, cospi_24_64, cospi_8_64, reg7, reg9); - VP9_DOTP_CONST_PAIR((-reg5), (-reg11), cospi_8_64, cospi_24_64, reg5, - reg11); - - loc0 = reg9 + reg5; - reg5 = reg9 - reg5; - reg2 = reg6 + loc0; - reg1 = reg6 - loc0; - - loc0 = reg7 + reg11; - reg11 = reg7 - reg11; - loc1 = reg4 + loc0; - loc2 = reg4 - loc0; - tmp5 = loc1; - - VP9_DOTP_CONST_PAIR(reg5, reg11, cospi_16_64, cospi_16_64, reg5, reg11); - BUTTERFLY_4(reg8, reg10, reg11, reg5, loc0, reg4, reg9, loc1); - - reg10 = loc0; - reg11 = loc1; - - VP9_DOTP_CONST_PAIR(reg3, reg13, cospi_16_64, cospi_16_64, reg3, reg13); - BUTTERFLY_4(reg12, reg14, reg13, reg3, reg8, reg6, reg7, reg5); - reg13 = loc2; - - /* Transpose and store the output */ - reg12 = tmp5; - reg14 = tmp6; - reg3 = tmp7; - - SRARI_H4_SH(reg0, reg2, reg4, reg6, 6); - VP9_ADDBLK_ST8x4_UB(dst, dst_stride, reg0, reg2, reg4, reg6); - dst += (4 * dst_stride); - SRARI_H4_SH(reg8, reg10, reg12, reg14, 6); - VP9_ADDBLK_ST8x4_UB(dst, dst_stride, reg8, reg10, reg12, reg14); - dst += (4 * dst_stride); - SRARI_H4_SH(reg3, reg13, reg11, reg5, 6); - VP9_ADDBLK_ST8x4_UB(dst, dst_stride, reg3, reg13, reg11, reg5); - dst += (4 * dst_stride); - SRARI_H4_SH(reg7, reg9, reg1, reg15, 6); - VP9_ADDBLK_ST8x4_UB(dst, dst_stride, reg7, reg9, reg1, reg15); -} - -static void vp9_idct16_1d_columns_msa(int16_t *input, int16_t *output) -{ - v8i16 loc0, loc1, loc2, loc3; - v8i16 reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14; - v8i16 reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15; - v8i16 tmp5, tmp6, tmp7; - v8i16 zero = { 0 }; - - /* load up 8x16 */ - LD_SH16(input, 16, - reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, - reg8, reg9, reg10, reg11, reg12, reg13, reg14, reg15); - - ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 16); - input += 16 * 8; - ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 16); - - VP9_DOTP_CONST_PAIR(reg2, reg14, cospi_28_64, cospi_4_64, reg2, reg14); - VP9_DOTP_CONST_PAIR(reg10, reg6, cospi_12_64, cospi_20_64, reg10, reg6); - BUTTERFLY_4(reg2, reg14, reg6, reg10, loc0, loc1, reg14, reg2); - VP9_DOTP_CONST_PAIR(reg14, reg2, cospi_16_64, cospi_16_64, loc2, loc3); - VP9_DOTP_CONST_PAIR(reg0, reg8, cospi_16_64, cospi_16_64, reg0, reg8); - VP9_DOTP_CONST_PAIR(reg4, reg12, cospi_24_64, cospi_8_64, reg4, reg12); - BUTTERFLY_4(reg8, reg0, reg4, reg12, reg2, reg6, reg10, reg14); - - reg0 = reg2 - loc1; - reg2 = reg2 + loc1; - reg12 = reg14 - loc0; - reg14 = reg14 + loc0; - reg4 = reg6 - loc3; - reg6 = reg6 + loc3; - reg8 = reg10 - loc2; - reg10 = reg10 + loc2; - - /* stage 2 */ - VP9_DOTP_CONST_PAIR(reg1, reg15, cospi_30_64, cospi_2_64, reg1, reg15); - VP9_DOTP_CONST_PAIR(reg9, reg7, cospi_14_64, cospi_18_64, loc2, loc3); - - reg9 = reg1 - loc2; - reg1 = reg1 + loc2; - reg7 = reg15 - loc3; - reg15 = reg15 + loc3; - - VP9_DOTP_CONST_PAIR(reg5, reg11, cospi_22_64, cospi_10_64, reg5, reg11); - VP9_DOTP_CONST_PAIR(reg13, reg3, cospi_6_64, cospi_26_64, loc0, loc1); - BUTTERFLY_4(loc0, loc1, reg11, reg5, reg13, reg3, reg11, reg5); - - loc1 = reg15 + reg3; - reg3 = reg15 - reg3; - loc2 = reg2 + loc1; - reg15 = reg2 - loc1; - - loc1 = reg1 + reg13; - reg13 = reg1 - reg13; - loc0 = reg0 + loc1; - loc1 = reg0 - loc1; - tmp6 = loc0; - tmp7 = loc1; - reg0 = loc2; - - VP9_DOTP_CONST_PAIR(reg7, reg9, cospi_24_64, cospi_8_64, reg7, reg9); - VP9_DOTP_CONST_PAIR((-reg5), (-reg11), cospi_8_64, cospi_24_64, reg5, - reg11); - - loc0 = reg9 + reg5; - reg5 = reg9 - reg5; - reg2 = reg6 + loc0; - reg1 = reg6 - loc0; - - loc0 = reg7 + reg11; - reg11 = reg7 - reg11; - loc1 = reg4 + loc0; - loc2 = reg4 - loc0; - - tmp5 = loc1; - - VP9_DOTP_CONST_PAIR(reg5, reg11, cospi_16_64, cospi_16_64, reg5, reg11); - BUTTERFLY_4(reg8, reg10, reg11, reg5, loc0, reg4, reg9, loc1); - - reg10 = loc0; - reg11 = loc1; - - VP9_DOTP_CONST_PAIR(reg3, reg13, cospi_16_64, cospi_16_64, reg3, reg13); - BUTTERFLY_4(reg12, reg14, reg13, reg3, reg8, reg6, reg7, reg5); - reg13 = loc2; - - /* Transpose and store the output */ - reg12 = tmp5; - reg14 = tmp6; - reg3 = tmp7; - - /* transpose block */ - TRANSPOSE8x8_SH_SH(reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14, - reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14); - ST_SH4(reg0, reg2, reg4, reg6, output, 16); - ST_SH4(reg8, reg10, reg12, reg14, (output + 4 * 16), 16); - - /* transpose block */ - TRANSPOSE8x8_SH_SH(reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15, - reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15); - ST_SH4(reg3, reg13, reg11, reg5, (output + 8), 16); - ST_SH4(reg7, reg9, reg1, reg15, (output + 8 + 4 * 16), 16); -} - -static void vp9_idct16x16_1_add_msa(int16_t *input, uint8_t *dst, - int32_t dst_stride) -{ - uint8_t i; - int16_t out; - v8i16 vec, res0, res1, res2, res3, res4, res5, res6, res7; - v16u8 dst0, dst1, dst2, dst3, tmp0, tmp1, tmp2, tmp3; - - out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), VP9_DCT_CONST_BITS); - out = ROUND_POWER_OF_TWO((out * cospi_16_64), VP9_DCT_CONST_BITS); - out = ROUND_POWER_OF_TWO(out, 6); - input[0] = 0; - - vec = __msa_fill_h(out); - - for (i = 4; i--;) { - LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3); - UNPCK_UB_SH(dst0, res0, res4); - UNPCK_UB_SH(dst1, res1, res5); - UNPCK_UB_SH(dst2, res2, res6); - UNPCK_UB_SH(dst3, res3, res7); - ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2, - res3); - ADD4(res4, vec, res5, vec, res6, vec, res7, vec, res4, res5, res6, - res7); - CLIP_SH8_0_255(res0, res1, res2, res3, res4, res5, res6, res7); - PCKEV_B4_UB(res4, res0, res5, res1, res6, res2, res7, res3, - tmp0, tmp1, tmp2, tmp3); - ST_UB4(tmp0, tmp1, tmp2, tmp3, dst, dst_stride); - dst += (4 * dst_stride); - } -} - -static void vp9_idct16x16_10_colcol_addblk_msa(int16_t *input, uint8_t *dst, - int32_t dst_stride) -{ - int32_t i; - int16_t out_arr[16 * 16] ALLOC_ALIGNED(ALIGNMENT); - int16_t *out = out_arr; - - /* transform rows */ - vp9_idct16_1d_columns_msa(input, out); - - /* short case just considers top 4 rows as valid output */ - out += 4 * 16; - for (i = 12; i--;) { - __asm__ volatile ( - "sw $zero, 0(%[out]) \n\t" - "sw $zero, 4(%[out]) \n\t" - "sw $zero, 8(%[out]) \n\t" - "sw $zero, 12(%[out]) \n\t" - "sw $zero, 16(%[out]) \n\t" - "sw $zero, 20(%[out]) \n\t" - "sw $zero, 24(%[out]) \n\t" - "sw $zero, 28(%[out]) \n\t" - - : - : [out] "r" (out) - ); - - out += 16; - } - - out = out_arr; - - /* transform columns */ - for (i = 0; i < 2; i++) { - /* process 8 * 16 block */ - vp9_idct16_1d_columns_addblk_msa((out + (i << 3)), (dst + (i << 3)), - dst_stride); - } -} - -static void vp9_idct16x16_colcol_addblk_msa(int16_t *input, uint8_t *dst, - int32_t dst_stride) -{ - int32_t i; - int16_t out_arr[16 * 16] ALLOC_ALIGNED(ALIGNMENT); - int16_t *out = out_arr; - - /* transform rows */ - for (i = 0; i < 2; i++) { - /* process 8 * 16 block */ - vp9_idct16_1d_columns_msa((input + (i << 3)), (out + (i << 7))); - } - - /* transform columns */ - for (i = 0; i < 2; i++) { - /* process 8 * 16 block */ - vp9_idct16_1d_columns_addblk_msa((out + (i << 3)), (dst + (i << 3)), - dst_stride); - } -} - -static void vp9_iadst16_1d_columns_msa(int16_t *input, int16_t *output) -{ - v8i16 r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15; - v8i16 l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15; - v8i16 zero = { 0 }; - - /* load input data */ - LD_SH16(input, 16, - l0, l1, l2, l3, l4, l5, l6, l7, - l8, l9, l10, l11, l12, l13, l14, l15); - - ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 16); - input += 16 * 8; - ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 16); - - /* ADST in horizontal */ - VP9_IADST8x16_1D(l0, l1, l2, l3, l4, l5, l6, l7, - l8, l9, l10, l11, l12, l13, l14, l15, - r0, r1, r2, r3, r4, r5, r6, r7, - r8, r9, r10, r11, r12, r13, r14, r15); - - l1 = -r8; - l3 = -r4; - l13 = -r13; - l15 = -r1; - - TRANSPOSE8x8_SH_SH(r0, l1, r12, l3, r6, r14, r10, r2, - l0, l1, l2, l3, l4, l5, l6, l7); - ST_SH8(l0, l1, l2, l3, l4, l5, l6, l7, output, 16); - TRANSPOSE8x8_SH_SH(r3, r11, r15, r7, r5, l13, r9, l15, - l8, l9, l10, l11, l12, l13, l14, l15); - ST_SH8(l8, l9, l10, l11, l12, l13, l14, l15, (output + 8), 16); -} - -static void vp9_iadst16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst, - int32_t dst_stride) -{ - v8i16 v0, v2, v4, v6, k0, k1, k2, k3; - v8i16 r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15; - v8i16 out0, out1, out2, out3, out4, out5, out6, out7; - v8i16 out8, out9, out10, out11, out12, out13, out14, out15; - v8i16 g0, g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, g11, g12, g13, g14, g15; - v8i16 h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11; - v8i16 res0, res1, res2, res3, res4, res5, res6, res7; - v8i16 res8, res9, res10, res11, res12, res13, res14, res15; - v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7; - v16u8 dst8, dst9, dst10, dst11, dst12, dst13, dst14, dst15; - v16i8 zero = { 0 }; - - r0 = LD_SH(input + 0 * 16); - r3 = LD_SH(input + 3 * 16); - r4 = LD_SH(input + 4 * 16); - r7 = LD_SH(input + 7 * 16); - r8 = LD_SH(input + 8 * 16); - r11 = LD_SH(input + 11 * 16); - r12 = LD_SH(input + 12 * 16); - r15 = LD_SH(input + 15 * 16); - - /* stage 1 */ - k0 = VP9_SET_COSPI_PAIR(cospi_1_64, cospi_31_64); - k1 = VP9_SET_COSPI_PAIR(cospi_31_64, -cospi_1_64); - k2 = VP9_SET_COSPI_PAIR(cospi_17_64, cospi_15_64); - k3 = VP9_SET_COSPI_PAIR(cospi_15_64, -cospi_17_64); - VP9_MADD_BF(r15, r0, r7, r8, k0, k1, k2, k3, g0, g1, g2, g3); - k0 = VP9_SET_COSPI_PAIR(cospi_9_64, cospi_23_64); - k1 = VP9_SET_COSPI_PAIR(cospi_23_64, -cospi_9_64); - k2 = VP9_SET_COSPI_PAIR(cospi_25_64, cospi_7_64); - k3 = VP9_SET_COSPI_PAIR(cospi_7_64, -cospi_25_64); - VP9_MADD_BF(r11, r4, r3, r12, k0, k1, k2, k3, g8, g9, g10, g11); - BUTTERFLY_4(g0, g2, g10, g8, h8, h9, v2, v0); - k0 = VP9_SET_COSPI_PAIR(cospi_4_64, cospi_28_64); - k1 = VP9_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64); - k2 = VP9_SET_COSPI_PAIR(-cospi_28_64, cospi_4_64); - VP9_MADD_BF(g1, g3, g9, g11, k0, k1, k2, k0, h0, h1, h2, h3); - - r1 = LD_SH(input + 1 * 16); - r2 = LD_SH(input + 2 * 16); - r5 = LD_SH(input + 5 * 16); - r6 = LD_SH(input + 6 * 16); - r9 = LD_SH(input + 9 * 16); - r10 = LD_SH(input + 10 * 16); - r13 = LD_SH(input + 13 * 16); - r14 = LD_SH(input + 14 * 16); - - k0 = VP9_SET_COSPI_PAIR(cospi_5_64, cospi_27_64); - k1 = VP9_SET_COSPI_PAIR(cospi_27_64, -cospi_5_64); - k2 = VP9_SET_COSPI_PAIR(cospi_21_64, cospi_11_64); - k3 = VP9_SET_COSPI_PAIR(cospi_11_64, -cospi_21_64); - VP9_MADD_BF(r13, r2, r5, r10, k0, k1, k2, k3, g4, g5, g6, g7); - k0 = VP9_SET_COSPI_PAIR(cospi_13_64, cospi_19_64); - k1 = VP9_SET_COSPI_PAIR(cospi_19_64, -cospi_13_64); - k2 = VP9_SET_COSPI_PAIR(cospi_29_64, cospi_3_64); - k3 = VP9_SET_COSPI_PAIR(cospi_3_64, -cospi_29_64); - VP9_MADD_BF(r9, r6, r1, r14, k0, k1, k2, k3, g12, g13, g14, g15); - BUTTERFLY_4(g4, g6, g14, g12, h10, h11, v6, v4); - BUTTERFLY_4(h8, h9, h11, h10, out0, out1, h11, h10); - out1 = -out1; - SRARI_H2_SH(out0, out1, 6); - dst0 = LD_UB(dst + 0 * dst_stride); - dst1 = LD_UB(dst + 15 * dst_stride); - ILVR_B2_SH(zero, dst0, zero, dst1, res0, res1); - ADD2(res0, out0, res1, out1, res0, res1); - CLIP_SH2_0_255(res0, res1); - PCKEV_B2_SH(res0, res0, res1, res1, res0, res1); - ST_D1(res0, 0, dst); - ST_D1(res1, 0, dst + 15 * dst_stride); - - k0 = VP9_SET_COSPI_PAIR(cospi_12_64, cospi_20_64); - k1 = VP9_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64); - k2 = VP9_SET_COSPI_PAIR(cospi_20_64, -cospi_12_64); - VP9_MADD_BF(g7, g5, g15, g13, k0, k1, k2, k0, h4, h5, h6, h7); - BUTTERFLY_4(h0, h2, h6, h4, out8, out9, out11, out10); - out8 = -out8; - - SRARI_H2_SH(out8, out9, 6); - dst8 = LD_UB(dst + 1 * dst_stride); - dst9 = LD_UB(dst + 14 * dst_stride); - ILVR_B2_SH(zero, dst8, zero, dst9, res8, res9); - ADD2(res8, out8, res9, out9, res8, res9); - CLIP_SH2_0_255(res8, res9); - PCKEV_B2_SH(res8, res8, res9, res9, res8, res9); - ST_D1(res8, 0, dst + dst_stride); - ST_D1(res9, 0, dst + 14 * dst_stride); - - k0 = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); - k1 = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); - k2 = VP9_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64); - VP9_MADD_BF(v0, v2, v4, v6, k0, k1, k2, k0, out4, out6, out5, out7); - out4 = -out4; - SRARI_H2_SH(out4, out5, 6); - dst4 = LD_UB(dst + 3 * dst_stride); - dst5 = LD_UB(dst + 12 * dst_stride); - ILVR_B2_SH(zero, dst4, zero, dst5, res4, res5); - ADD2(res4, out4, res5, out5, res4, res5); - CLIP_SH2_0_255(res4, res5); - PCKEV_B2_SH(res4, res4, res5, res5, res4, res5); - ST_D1(res4, 0, dst + 3 * dst_stride); - ST_D1(res5, 0, dst + 12 * dst_stride); - - VP9_MADD_BF(h1, h3, h5, h7, k0, k1, k2, k0, out12, out14, out13, out15); - out13 = -out13; - SRARI_H2_SH(out12, out13, 6); - dst12 = LD_UB(dst + 2 * dst_stride); - dst13 = LD_UB(dst + 13 * dst_stride); - ILVR_B2_SH(zero, dst12, zero, dst13, res12, res13); - ADD2(res12, out12, res13, out13, res12, res13); - CLIP_SH2_0_255(res12, res13); - PCKEV_B2_SH(res12, res12, res13, res13, res12, res13); - ST_D1(res12, 0, dst + 2 * dst_stride); - ST_D1(res13, 0, dst + 13 * dst_stride); - - k0 = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64); - k3 = VP9_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64); - VP9_MADD_SHORT(out6, out7, k0, k3, out6, out7); - SRARI_H2_SH(out6, out7, 6); - dst6 = LD_UB(dst + 4 * dst_stride); - dst7 = LD_UB(dst + 11 * dst_stride); - ILVR_B2_SH(zero, dst6, zero, dst7, res6, res7); - ADD2(res6, out6, res7, out7, res6, res7); - CLIP_SH2_0_255(res6, res7); - PCKEV_B2_SH(res6, res6, res7, res7, res6, res7); - ST_D1(res6, 0, dst + 4 * dst_stride); - ST_D1(res7, 0, dst + 11 * dst_stride); - - VP9_MADD_SHORT(out10, out11, k0, k3, out10, out11); - SRARI_H2_SH(out10, out11, 6); - dst10 = LD_UB(dst + 6 * dst_stride); - dst11 = LD_UB(dst + 9 * dst_stride); - ILVR_B2_SH(zero, dst10, zero, dst11, res10, res11); - ADD2(res10, out10, res11, out11, res10, res11); - CLIP_SH2_0_255(res10, res11); - PCKEV_B2_SH(res10, res10, res11, res11, res10, res11); - ST_D1(res10, 0, dst + 6 * dst_stride); - ST_D1(res11, 0, dst + 9 * dst_stride); - - k1 = VP9_SET_COSPI_PAIR(-cospi_16_64, -cospi_16_64); - k2 = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64); - VP9_MADD_SHORT(h10, h11, k1, k2, out2, out3); - SRARI_H2_SH(out2, out3, 6); - dst2 = LD_UB(dst + 7 * dst_stride); - dst3 = LD_UB(dst + 8 * dst_stride); - ILVR_B2_SH(zero, dst2, zero, dst3, res2, res3); - ADD2(res2, out2, res3, out3, res2, res3); - CLIP_SH2_0_255(res2, res3); - PCKEV_B2_SH(res2, res2, res3, res3, res2, res3); - ST_D1(res2, 0, dst + 7 * dst_stride); - ST_D1(res3, 0, dst + 8 * dst_stride); - - VP9_MADD_SHORT(out14, out15, k1, k2, out14, out15); - SRARI_H2_SH(out14, out15, 6); - dst14 = LD_UB(dst + 5 * dst_stride); - dst15 = LD_UB(dst + 10 * dst_stride); - ILVR_B2_SH(zero, dst14, zero, dst15, res14, res15); - ADD2(res14, out14, res15, out15, res14, res15); - CLIP_SH2_0_255(res14, res15); - PCKEV_B2_SH(res14, res14, res15, res15, res14, res15); - ST_D1(res14, 0, dst + 5 * dst_stride); - ST_D1(res15, 0, dst + 10 * dst_stride); -} - -static void vp9_iadst16x16_colcol_addblk_msa(int16_t *input, uint8_t *dst, - int32_t dst_stride) -{ - int16_t out_arr[16 * 16] ALLOC_ALIGNED(ALIGNMENT); - int16_t *out = out_arr; - int32_t i; - - /* transform rows */ - for (i = 0; i < 2; i++) { - /* process 16 * 8 block */ - vp9_iadst16_1d_columns_msa((input + (i << 3)), (out + (i << 7))); - } - - /* transform columns */ - for (i = 0; i < 2; i++) { - /* process 8 * 16 block */ - vp9_iadst16_1d_columns_addblk_msa((out + (i << 3)), (dst + (i << 3)), - dst_stride); - } -} - -static void vp9_iadst_idct_16x16_add_msa(int16_t *input, uint8_t *dst, - int32_t dst_stride, int32_t eob) -{ - int32_t i; - int16_t out[16 * 16]; - int16_t *out_ptr = &out[0]; - - /* transform rows */ - for (i = 0; i < 2; i++) { - /* process 8 * 16 block */ - vp9_iadst16_1d_columns_msa((input + (i << 3)), (out_ptr + (i << 7))); - } - - /* transform columns */ - for (i = 0; i < 2; i++) { - /* process 8 * 16 block */ - vp9_idct16_1d_columns_addblk_msa((out_ptr + (i << 3)), - (dst + (i << 3)), dst_stride); - } -} - -static void vp9_idct_iadst_16x16_add_msa(int16_t *input, uint8_t *dst, - int32_t dst_stride, int32_t eob) -{ - int32_t i; - int16_t out[16 * 16]; - int16_t *out_ptr = &out[0]; - - /* transform rows */ - for (i = 0; i < 2; i++) { - /* process 8 * 16 block */ - vp9_idct16_1d_columns_msa((input + (i << 3)), (out_ptr + (i << 7))); - } - - /* transform columns */ - for (i = 0; i < 2; i++) { - /* process 8 * 16 block */ - vp9_iadst16_1d_columns_addblk_msa((out_ptr + (i << 3)), - (dst + (i << 3)), dst_stride); - } -} - -static void vp9_idct_butterfly_transpose_store(int16_t *tmp_buf, - int16_t *tmp_eve_buf, - int16_t *tmp_odd_buf, - int16_t *dst) -{ - v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3; - v8i16 m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7; - - /* FINAL BUTTERFLY : Dependency on Even & Odd */ - vec0 = LD_SH(tmp_odd_buf); - vec1 = LD_SH(tmp_odd_buf + 9 * 8); - vec2 = LD_SH(tmp_odd_buf + 14 * 8); - vec3 = LD_SH(tmp_odd_buf + 6 * 8); - loc0 = LD_SH(tmp_eve_buf); - loc1 = LD_SH(tmp_eve_buf + 8 * 8); - loc2 = LD_SH(tmp_eve_buf + 4 * 8); - loc3 = LD_SH(tmp_eve_buf + 12 * 8); - - ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m0, m4, m2, m6); - - ST_SH((loc0 - vec3), (tmp_buf + 31 * 8)); - ST_SH((loc1 - vec2), (tmp_buf + 23 * 8)); - ST_SH((loc2 - vec1), (tmp_buf + 27 * 8)); - ST_SH((loc3 - vec0), (tmp_buf + 19 * 8)); - - /* Load 8 & Store 8 */ - vec0 = LD_SH(tmp_odd_buf + 4 * 8); - vec1 = LD_SH(tmp_odd_buf + 13 * 8); - vec2 = LD_SH(tmp_odd_buf + 10 * 8); - vec3 = LD_SH(tmp_odd_buf + 3 * 8); - loc0 = LD_SH(tmp_eve_buf + 2 * 8); - loc1 = LD_SH(tmp_eve_buf + 10 * 8); - loc2 = LD_SH(tmp_eve_buf + 6 * 8); - loc3 = LD_SH(tmp_eve_buf + 14 * 8); - - ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m1, m5, m3, m7); - - ST_SH((loc0 - vec3), (tmp_buf + 29 * 8)); - ST_SH((loc1 - vec2), (tmp_buf + 21 * 8)); - ST_SH((loc2 - vec1), (tmp_buf + 25 * 8)); - ST_SH((loc3 - vec0), (tmp_buf + 17 * 8)); - - /* Load 8 & Store 8 */ - vec0 = LD_SH(tmp_odd_buf + 2 * 8); - vec1 = LD_SH(tmp_odd_buf + 11 * 8); - vec2 = LD_SH(tmp_odd_buf + 12 * 8); - vec3 = LD_SH(tmp_odd_buf + 7 * 8); - loc0 = LD_SH(tmp_eve_buf + 1 * 8); - loc1 = LD_SH(tmp_eve_buf + 9 * 8); - loc2 = LD_SH(tmp_eve_buf + 5 * 8); - loc3 = LD_SH(tmp_eve_buf + 13 * 8); - - ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n0, n4, n2, n6); - - ST_SH((loc0 - vec3), (tmp_buf + 30 * 8)); - ST_SH((loc1 - vec2), (tmp_buf + 22 * 8)); - ST_SH((loc2 - vec1), (tmp_buf + 26 * 8)); - ST_SH((loc3 - vec0), (tmp_buf + 18 * 8)); - - /* Load 8 & Store 8 */ - vec0 = LD_SH(tmp_odd_buf + 5 * 8); - vec1 = LD_SH(tmp_odd_buf + 15 * 8); - vec2 = LD_SH(tmp_odd_buf + 8 * 8); - vec3 = LD_SH(tmp_odd_buf + 1 * 8); - loc0 = LD_SH(tmp_eve_buf + 3 * 8); - loc1 = LD_SH(tmp_eve_buf + 11 * 8); - loc2 = LD_SH(tmp_eve_buf + 7 * 8); - loc3 = LD_SH(tmp_eve_buf + 15 * 8); - - ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n1, n5, n3, n7); - - ST_SH((loc0 - vec3), (tmp_buf + 28 * 8)); - ST_SH((loc1 - vec2), (tmp_buf + 20 * 8)); - ST_SH((loc2 - vec1), (tmp_buf + 24 * 8)); - ST_SH((loc3 - vec0), (tmp_buf + 16 * 8)); - - /* Transpose : 16 vectors */ - /* 1st & 2nd 8x8 */ - TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3, - m0, n0, m1, n1, m2, n2, m3, n3); - ST_SH4(m0, n0, m1, n1, (dst + 0), 32); - ST_SH4(m2, n2, m3, n3, (dst + 4 * 32), 32); - - TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7, - m4, n4, m5, n5, m6, n6, m7, n7); - ST_SH4(m4, n4, m5, n5, (dst + 8), 32); - ST_SH4(m6, n6, m7, n7, (dst + 8 + 4 * 32), 32); - - /* 3rd & 4th 8x8 */ - LD_SH8((tmp_buf + 8 * 16), 8, m0, n0, m1, n1, m2, n2, m3, n3); - LD_SH8((tmp_buf + 12 * 16), 8, m4, n4, m5, n5, m6, n6, m7, n7); - TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3, - m0, n0, m1, n1, m2, n2, m3, n3); - ST_SH4(m0, n0, m1, n1, (dst + 16), 32); - ST_SH4(m2, n2, m3, n3, (dst + 16 + 4 * 32), 32); - - TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7, - m4, n4, m5, n5, m6, n6, m7, n7); - ST_SH4(m4, n4, m5, n5, (dst + 24), 32); - ST_SH4(m6, n6, m7, n7, (dst + 24 + 4 * 32), 32); -} - -static void vp9_idct8x32_column_even_process_store(int16_t *tmp_buf, - int16_t *tmp_eve_buf) -{ - v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3; - v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; - v8i16 stp0, stp1, stp2, stp3, stp4, stp5, stp6, stp7; - v8i16 zero = { 0 }; - - /* Even stage 1 */ - LD_SH8(tmp_buf, (4 * 32), reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7); - ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, tmp_buf, (4 * 32)); - tmp_buf += (2 * 32); - - VP9_DOTP_CONST_PAIR(reg1, reg7, cospi_28_64, cospi_4_64, reg1, reg7); - VP9_DOTP_CONST_PAIR(reg5, reg3, cospi_12_64, cospi_20_64, reg5, reg3); - BUTTERFLY_4(reg1, reg7, reg3, reg5, vec1, vec3, vec2, vec0); - VP9_DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3); - - loc1 = vec3; - loc0 = vec1; - - VP9_DOTP_CONST_PAIR(reg0, reg4, cospi_16_64, cospi_16_64, reg0, reg4); - VP9_DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6); - BUTTERFLY_4(reg4, reg0, reg2, reg6, vec1, vec3, vec2, vec0); - BUTTERFLY_4(vec0, vec1, loc1, loc0, stp3, stp0, stp7, stp4); - BUTTERFLY_4(vec2, vec3, loc3, loc2, stp2, stp1, stp6, stp5); - - /* Even stage 2 */ - /* Load 8 */ - LD_SH8(tmp_buf, (4 * 32), reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7); - ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, tmp_buf, (4 * 32)); - - VP9_DOTP_CONST_PAIR(reg0, reg7, cospi_30_64, cospi_2_64, reg0, reg7); - VP9_DOTP_CONST_PAIR(reg4, reg3, cospi_14_64, cospi_18_64, reg4, reg3); - VP9_DOTP_CONST_PAIR(reg2, reg5, cospi_22_64, cospi_10_64, reg2, reg5); - VP9_DOTP_CONST_PAIR(reg6, reg1, cospi_6_64, cospi_26_64, reg6, reg1); - - vec0 = reg0 + reg4; - reg0 = reg0 - reg4; - reg4 = reg6 + reg2; - reg6 = reg6 - reg2; - reg2 = reg1 + reg5; - reg1 = reg1 - reg5; - reg5 = reg7 + reg3; - reg7 = reg7 - reg3; - reg3 = vec0; - - vec1 = reg2; - reg2 = reg3 + reg4; - reg3 = reg3 - reg4; - reg4 = reg5 - vec1; - reg5 = reg5 + vec1; - - VP9_DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7); - VP9_DOTP_CONST_PAIR((-reg6), reg1, cospi_24_64, cospi_8_64, reg6, reg1); - - vec0 = reg0 - reg6; - reg0 = reg0 + reg6; - vec1 = reg7 - reg1; - reg7 = reg7 + reg1; - - VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, reg6, reg1); - VP9_DOTP_CONST_PAIR(reg4, reg3, cospi_16_64, cospi_16_64, reg3, reg4); - - /* Even stage 3 : Dependency on Even stage 1 & Even stage 2 */ - /* Store 8 */ - BUTTERFLY_4(stp0, stp1, reg7, reg5, loc1, loc3, loc2, loc0); - ST_SH2(loc1, loc3, tmp_eve_buf, 8); - ST_SH2(loc2, loc0, (tmp_eve_buf + 14 * 8), 8); - - BUTTERFLY_4(stp2, stp3, reg4, reg1, loc1, loc3, loc2, loc0); - ST_SH2(loc1, loc3, (tmp_eve_buf + 2 * 8), 8); - ST_SH2(loc2, loc0, (tmp_eve_buf + 12 * 8), 8); - - /* Store 8 */ - BUTTERFLY_4(stp4, stp5, reg6, reg3, loc1, loc3, loc2, loc0); - ST_SH2(loc1, loc3, (tmp_eve_buf + 4 * 8), 8); - ST_SH2(loc2, loc0, (tmp_eve_buf + 10 * 8), 8); - - BUTTERFLY_4(stp6, stp7, reg2, reg0, loc1, loc3, loc2, loc0); - ST_SH2(loc1, loc3, (tmp_eve_buf + 6 * 8), 8); - ST_SH2(loc2, loc0, (tmp_eve_buf + 8 * 8), 8); -} - -static void vp9_idct8x32_column_odd_process_store(int16_t *tmp_buf, - int16_t *tmp_odd_buf) -{ - v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3; - v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7; - v8i16 zero = { 0 }; - - /* Odd stage 1 */ - reg0 = LD_SH(tmp_buf + 32); - reg1 = LD_SH(tmp_buf + 7 * 32); - reg2 = LD_SH(tmp_buf + 9 * 32); - reg3 = LD_SH(tmp_buf + 15 * 32); - reg4 = LD_SH(tmp_buf + 17 * 32); - reg5 = LD_SH(tmp_buf + 23 * 32); - reg6 = LD_SH(tmp_buf + 25 * 32); - reg7 = LD_SH(tmp_buf + 31 * 32); - - ST_SH(zero, tmp_buf + 32); - ST_SH(zero, tmp_buf + 7 * 32); - ST_SH(zero, tmp_buf + 9 * 32); - ST_SH(zero, tmp_buf + 15 * 32); - ST_SH(zero, tmp_buf + 17 * 32); - ST_SH(zero, tmp_buf + 23 * 32); - ST_SH(zero, tmp_buf + 25 * 32); - ST_SH(zero, tmp_buf + 31 * 32); - - VP9_DOTP_CONST_PAIR(reg0, reg7, cospi_31_64, cospi_1_64, reg0, reg7); - VP9_DOTP_CONST_PAIR(reg4, reg3, cospi_15_64, cospi_17_64, reg3, reg4); - VP9_DOTP_CONST_PAIR(reg2, reg5, cospi_23_64, cospi_9_64, reg2, reg5); - VP9_DOTP_CONST_PAIR(reg6, reg1, cospi_7_64, cospi_25_64, reg1, reg6); - - vec0 = reg0 + reg3; - reg0 = reg0 - reg3; - reg3 = reg7 + reg4; - reg7 = reg7 - reg4; - reg4 = reg1 + reg2; - reg1 = reg1 - reg2; - reg2 = reg6 + reg5; - reg6 = reg6 - reg5; - reg5 = vec0; - - /* 4 Stores */ - ADD2(reg5, reg4, reg3, reg2, vec0, vec1); - ST_SH2(vec0, vec1, (tmp_odd_buf + 4 * 8), 8); - SUB2(reg5, reg4, reg3, reg2, vec0, vec1); - VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_24_64, cospi_8_64, vec0, vec1); - ST_SH2(vec0, vec1, tmp_odd_buf, 8); - - /* 4 Stores */ - VP9_DOTP_CONST_PAIR(reg7, reg0, cospi_28_64, cospi_4_64, reg0, reg7); - VP9_DOTP_CONST_PAIR(reg6, reg1, -cospi_4_64, cospi_28_64, reg1, reg6); - BUTTERFLY_4(reg0, reg7, reg6, reg1, vec0, vec1, vec2, vec3); - ST_SH2(vec0, vec1, (tmp_odd_buf + 6 * 8), 8); - VP9_DOTP_CONST_PAIR(vec2, vec3, cospi_24_64, cospi_8_64, vec2, vec3); - ST_SH2(vec2, vec3, (tmp_odd_buf + 2 * 8), 8); - - /* Odd stage 2 */ - /* 8 loads */ - reg0 = LD_SH(tmp_buf + 3 * 32); - reg1 = LD_SH(tmp_buf + 5 * 32); - reg2 = LD_SH(tmp_buf + 11 * 32); - reg3 = LD_SH(tmp_buf + 13 * 32); - reg4 = LD_SH(tmp_buf + 19 * 32); - reg5 = LD_SH(tmp_buf + 21 * 32); - reg6 = LD_SH(tmp_buf + 27 * 32); - reg7 = LD_SH(tmp_buf + 29 * 32); - - ST_SH(zero, tmp_buf + 3 * 32); - ST_SH(zero, tmp_buf + 5 * 32); - ST_SH(zero, tmp_buf + 11 * 32); - ST_SH(zero, tmp_buf + 13 * 32); - ST_SH(zero, tmp_buf + 19 * 32); - ST_SH(zero, tmp_buf + 21 * 32); - ST_SH(zero, tmp_buf + 27 * 32); - ST_SH(zero, tmp_buf + 29 * 32); - - VP9_DOTP_CONST_PAIR(reg1, reg6, cospi_27_64, cospi_5_64, reg1, reg6); - VP9_DOTP_CONST_PAIR(reg5, reg2, cospi_11_64, cospi_21_64, reg2, reg5); - VP9_DOTP_CONST_PAIR(reg3, reg4, cospi_19_64, cospi_13_64, reg3, reg4); - VP9_DOTP_CONST_PAIR(reg7, reg0, cospi_3_64, cospi_29_64, reg0, reg7); - - /* 4 Stores */ - SUB4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4, - vec0, vec1, vec2, vec3); - VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_12_64, cospi_20_64, loc0, loc1); - VP9_DOTP_CONST_PAIR(vec3, vec2, -cospi_20_64, cospi_12_64, loc2, loc3); - BUTTERFLY_4(loc2, loc3, loc1, loc0, vec0, vec1, vec3, vec2); - ST_SH2(vec0, vec1, (tmp_odd_buf + 12 * 8), 3 * 8); - VP9_DOTP_CONST_PAIR(vec3, vec2, -cospi_8_64, cospi_24_64, vec0, vec1); - ST_SH2(vec0, vec1, (tmp_odd_buf + 10 * 8), 8); - - /* 4 Stores */ - ADD4(reg0, reg3, reg1, reg2, reg5, reg6, reg4, reg7, - vec0, vec1, vec2, vec3); - BUTTERFLY_4(vec0, vec3, vec2, vec1, reg0, reg1, reg3, reg2); - ST_SH2(reg0, reg1, (tmp_odd_buf + 13 * 8), 8); - VP9_DOTP_CONST_PAIR(reg3, reg2, -cospi_8_64, cospi_24_64, reg0, reg1); - ST_SH2(reg0, reg1, (tmp_odd_buf + 8 * 8), 8); - - /* Odd stage 3 : Dependency on Odd stage 1 & Odd stage 2 */ - /* Load 8 & Store 8 */ - LD_SH4(tmp_odd_buf, 8, reg0, reg1, reg2, reg3); - LD_SH4((tmp_odd_buf + 8 * 8), 8, reg4, reg5, reg6, reg7); - - ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, - loc0, loc1, loc2, loc3); - ST_SH4(loc0, loc1, loc2, loc3, tmp_odd_buf, 8); - - SUB2(reg0, reg4, reg1, reg5, vec0, vec1); - VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1); - - SUB2(reg2, reg6, reg3, reg7, vec0, vec1); - VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3); - ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 8 * 8), 8); - - /* Load 8 & Store 8 */ - LD_SH4((tmp_odd_buf + 4 * 8), 8, reg1, reg2, reg0, reg3); - LD_SH4((tmp_odd_buf + 12 * 8), 8, reg4, reg5, reg6, reg7); - - ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, - loc0, loc1, loc2, loc3); - ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 4 * 8), 8); - - SUB2(reg0, reg4, reg3, reg7, vec0, vec1); - VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1); - - SUB2(reg1, reg5, reg2, reg6, vec0, vec1); - VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3); - ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 12 * 8), 8); -} - -static void vp9_idct8x32_column_butterfly_addblk(int16_t *tmp_eve_buf, - int16_t *tmp_odd_buf, - uint8_t *dst, - int32_t dst_stride) -{ - v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3; - v8i16 m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7; - - /* FINAL BUTTERFLY : Dependency on Even & Odd */ - vec0 = LD_SH(tmp_odd_buf); - vec1 = LD_SH(tmp_odd_buf + 9 * 8); - vec2 = LD_SH(tmp_odd_buf + 14 * 8); - vec3 = LD_SH(tmp_odd_buf + 6 * 8); - loc0 = LD_SH(tmp_eve_buf); - loc1 = LD_SH(tmp_eve_buf + 8 * 8); - loc2 = LD_SH(tmp_eve_buf + 4 * 8); - loc3 = LD_SH(tmp_eve_buf + 12 * 8); - - ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m0, m4, m2, m6); - SRARI_H4_SH(m0, m2, m4, m6, 6); - VP9_ADDBLK_ST8x4_UB(dst, (4 * dst_stride), m0, m2, m4, m6); - - SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m6, m2, m4, m0); - SRARI_H4_SH(m0, m2, m4, m6, 6); - VP9_ADDBLK_ST8x4_UB((dst + 19 * dst_stride), (4 * dst_stride), - m0, m2, m4, m6); - - /* Load 8 & Store 8 */ - vec0 = LD_SH(tmp_odd_buf + 4 * 8); - vec1 = LD_SH(tmp_odd_buf + 13 * 8); - vec2 = LD_SH(tmp_odd_buf + 10 * 8); - vec3 = LD_SH(tmp_odd_buf + 3 * 8); - loc0 = LD_SH(tmp_eve_buf + 2 * 8); - loc1 = LD_SH(tmp_eve_buf + 10 * 8); - loc2 = LD_SH(tmp_eve_buf + 6 * 8); - loc3 = LD_SH(tmp_eve_buf + 14 * 8); - - ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m1, m5, m3, m7); - SRARI_H4_SH(m1, m3, m5, m7, 6); - VP9_ADDBLK_ST8x4_UB((dst + 2 * dst_stride), (4 * dst_stride), - m1, m3, m5, m7); - - SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m7, m3, m5, m1); - SRARI_H4_SH(m1, m3, m5, m7, 6); - VP9_ADDBLK_ST8x4_UB((dst + 17 * dst_stride), (4 * dst_stride), - m1, m3, m5, m7); - - /* Load 8 & Store 8 */ - vec0 = LD_SH(tmp_odd_buf + 2 * 8); - vec1 = LD_SH(tmp_odd_buf + 11 * 8); - vec2 = LD_SH(tmp_odd_buf + 12 * 8); - vec3 = LD_SH(tmp_odd_buf + 7 * 8); - loc0 = LD_SH(tmp_eve_buf + 1 * 8); - loc1 = LD_SH(tmp_eve_buf + 9 * 8); - loc2 = LD_SH(tmp_eve_buf + 5 * 8); - loc3 = LD_SH(tmp_eve_buf + 13 * 8); - - ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n0, n4, n2, n6); - SRARI_H4_SH(n0, n2, n4, n6, 6); - VP9_ADDBLK_ST8x4_UB((dst + 1 * dst_stride), (4 * dst_stride), - n0, n2, n4, n6); - - SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n6, n2, n4, n0); - SRARI_H4_SH(n0, n2, n4, n6, 6); - VP9_ADDBLK_ST8x4_UB((dst + 18 * dst_stride), (4 * dst_stride), - n0, n2, n4, n6); - - /* Load 8 & Store 8 */ - vec0 = LD_SH(tmp_odd_buf + 5 * 8); - vec1 = LD_SH(tmp_odd_buf + 15 * 8); - vec2 = LD_SH(tmp_odd_buf + 8 * 8); - vec3 = LD_SH(tmp_odd_buf + 1 * 8); - loc0 = LD_SH(tmp_eve_buf + 3 * 8); - loc1 = LD_SH(tmp_eve_buf + 11 * 8); - loc2 = LD_SH(tmp_eve_buf + 7 * 8); - loc3 = LD_SH(tmp_eve_buf + 15 * 8); - - ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n1, n5, n3, n7); - SRARI_H4_SH(n1, n3, n5, n7, 6); - VP9_ADDBLK_ST8x4_UB((dst + 3 * dst_stride), (4 * dst_stride), - n1, n3, n5, n7); - - SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n7, n3, n5, n1); - SRARI_H4_SH(n1, n3, n5, n7, 6); - VP9_ADDBLK_ST8x4_UB((dst + 16 * dst_stride), (4 * dst_stride), - n1, n3, n5, n7); -} - -static void vp9_idct8x32_1d_columns_addblk_msa(int16_t *input, uint8_t *dst, - int32_t dst_stride) -{ - int16_t tmp_odd_buf[16 * 8] ALLOC_ALIGNED(ALIGNMENT); - int16_t tmp_eve_buf[16 * 8] ALLOC_ALIGNED(ALIGNMENT); - - vp9_idct8x32_column_even_process_store(input, &tmp_eve_buf[0]); - vp9_idct8x32_column_odd_process_store(input, &tmp_odd_buf[0]); - vp9_idct8x32_column_butterfly_addblk(&tmp_eve_buf[0], &tmp_odd_buf[0], - dst, dst_stride); -} - -static void vp9_idct8x32_1d_columns_msa(int16_t *input, int16_t *output, - int16_t *tmp_buf) -{ - int16_t tmp_odd_buf[16 * 8] ALLOC_ALIGNED(ALIGNMENT); - int16_t tmp_eve_buf[16 * 8] ALLOC_ALIGNED(ALIGNMENT); - - vp9_idct8x32_column_even_process_store(input, &tmp_eve_buf[0]); - vp9_idct8x32_column_odd_process_store(input, &tmp_odd_buf[0]); - vp9_idct_butterfly_transpose_store(tmp_buf, &tmp_eve_buf[0], - &tmp_odd_buf[0], output); -} - -static void vp9_idct32x32_1_add_msa(int16_t *input, uint8_t *dst, - int32_t dst_stride) -{ - int32_t i; - int16_t out; - v16u8 dst0, dst1, dst2, dst3, tmp0, tmp1, tmp2, tmp3; - v8i16 res0, res1, res2, res3, res4, res5, res6, res7, vec; - - out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), VP9_DCT_CONST_BITS); - out = ROUND_POWER_OF_TWO((out * cospi_16_64), VP9_DCT_CONST_BITS); - out = ROUND_POWER_OF_TWO(out, 6); - input[0] = 0; - - vec = __msa_fill_h(out); - - for (i = 16; i--;) { - LD_UB2(dst, 16, dst0, dst1); - LD_UB2(dst + dst_stride, 16, dst2, dst3); - - UNPCK_UB_SH(dst0, res0, res4); - UNPCK_UB_SH(dst1, res1, res5); - UNPCK_UB_SH(dst2, res2, res6); - UNPCK_UB_SH(dst3, res3, res7); - ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2, - res3); - ADD4(res4, vec, res5, vec, res6, vec, res7, vec, res4, res5, res6, - res7); - CLIP_SH8_0_255(res0, res1, res2, res3, res4, res5, res6, res7); - PCKEV_B4_UB(res4, res0, res5, res1, res6, res2, res7, res3, - tmp0, tmp1, tmp2, tmp3); - - ST_UB2(tmp0, tmp1, dst, 16); - dst += dst_stride; - ST_UB2(tmp2, tmp3, dst, 16); - dst += dst_stride; - } -} - -static void vp9_idct32x32_34_colcol_addblk_msa(int16_t *input, uint8_t *dst, - int32_t dst_stride) -{ - int32_t i; - int16_t out_arr[32 * 32] ALLOC_ALIGNED(ALIGNMENT); - int16_t *out_ptr = out_arr; - int16_t tmp_buf[8 * 32] ALLOC_ALIGNED(ALIGNMENT); - - for (i = 32; i--;) { - __asm__ volatile ( - "sw $zero, (%[out_ptr]) \n\t" - "sw $zero, 4(%[out_ptr]) \n\t" - "sw $zero, 8(%[out_ptr]) \n\t" - "sw $zero, 12(%[out_ptr]) \n\t" - "sw $zero, 16(%[out_ptr]) \n\t" - "sw $zero, 20(%[out_ptr]) \n\t" - "sw $zero, 24(%[out_ptr]) \n\t" - "sw $zero, 28(%[out_ptr]) \n\t" - "sw $zero, 32(%[out_ptr]) \n\t" - "sw $zero, 36(%[out_ptr]) \n\t" - "sw $zero, 40(%[out_ptr]) \n\t" - "sw $zero, 44(%[out_ptr]) \n\t" - "sw $zero, 48(%[out_ptr]) \n\t" - "sw $zero, 52(%[out_ptr]) \n\t" - "sw $zero, 56(%[out_ptr]) \n\t" - "sw $zero, 60(%[out_ptr]) \n\t" - - : - : [out_ptr] "r" (out_ptr) - ); - - out_ptr += 32; - } - - out_ptr = out_arr; - - /* process 8*32 block */ - vp9_idct8x32_1d_columns_msa(input, out_ptr, &tmp_buf[0]); - - /* transform columns */ - for (i = 0; i < 4; i++) { - /* process 8*32 block */ - vp9_idct8x32_1d_columns_addblk_msa((out_ptr + (i << 3)), - (dst + (i << 3)), dst_stride); - } -} - -static void vp9_idct32x32_colcol_addblk_msa(int16_t *input, uint8_t *dst, - int32_t dst_stride) -{ - int32_t i; - int16_t out_arr[32 * 32] ALLOC_ALIGNED(ALIGNMENT); - int16_t *out_ptr = out_arr; - int16_t tmp_buf[8 * 32] ALLOC_ALIGNED(ALIGNMENT); - - /* transform rows */ - for (i = 0; i < 4; i++) { - /* process 8*32 block */ - vp9_idct8x32_1d_columns_msa((input + (i << 3)), (out_ptr + (i << 8)), - &tmp_buf[0]); - } - - /* transform columns */ - for (i = 0; i < 4; i++) { - /* process 8*32 block */ - vp9_idct8x32_1d_columns_addblk_msa((out_ptr + (i << 3)), - (dst + (i << 3)), dst_stride); - } -} - -void ff_idct_idct_4x4_add_msa(uint8_t *dst, ptrdiff_t stride, - int16_t *block, int eob) -{ - if (eob > 1) { - vp9_idct4x4_colcol_addblk_msa(block, dst, stride); - } - else { - vp9_idct4x4_1_add_msa(block, dst, stride); - } -} - -void ff_idct_idct_8x8_add_msa(uint8_t *dst, ptrdiff_t stride, - int16_t *block, int eob) -{ - if (eob == 1) { - vp9_idct8x8_1_add_msa(block, dst, stride); - } - else if (eob <= 12) { - vp9_idct8x8_12_colcol_addblk_msa(block, dst, stride); - } - else { - vp9_idct8x8_colcol_addblk_msa(block, dst, stride); - } -} - -void ff_idct_idct_16x16_add_msa(uint8_t *dst, ptrdiff_t stride, - int16_t *block, int eob) -{ - if (eob == 1) { - /* DC only DCT coefficient. */ - vp9_idct16x16_1_add_msa(block, dst, stride); - } - else if (eob <= 10) { - vp9_idct16x16_10_colcol_addblk_msa(block, dst, stride); - } - else { - vp9_idct16x16_colcol_addblk_msa(block, dst, stride); - } -} - -void ff_idct_idct_32x32_add_msa(uint8_t *dst, ptrdiff_t stride, - int16_t *block, int eob) -{ - if (eob == 1) { - vp9_idct32x32_1_add_msa(block, dst, stride); - } - else if (eob <= 34) { - vp9_idct32x32_34_colcol_addblk_msa(block, dst, stride); - } - else { - vp9_idct32x32_colcol_addblk_msa(block, dst, stride); - } -} - -void ff_iadst_iadst_4x4_add_msa(uint8_t *dst, ptrdiff_t stride, - int16_t *block, int eob) -{ - vp9_iadst4x4_colcol_addblk_msa(block, dst, stride); -} - -void ff_iadst_iadst_8x8_add_msa(uint8_t *dst, ptrdiff_t stride, - int16_t *block, int eob) -{ - vp9_iadst8x8_colcol_addblk_msa(block, dst, stride); -} - -void ff_iadst_iadst_16x16_add_msa(uint8_t *dst, ptrdiff_t stride, - int16_t *block, int eob) -{ - vp9_iadst16x16_colcol_addblk_msa(block, dst, stride); -} - -void ff_idct_iadst_4x4_add_msa(uint8_t *dst, ptrdiff_t stride, - int16_t *block, int eob) -{ - vp9_idct_iadst_4x4_add_msa(block, dst, stride, eob); -} - -void ff_idct_iadst_8x8_add_msa(uint8_t *dst, ptrdiff_t stride, - int16_t *block, int eob) -{ - vp9_idct_iadst_8x8_add_msa(block, dst, stride, eob); -} - -void ff_idct_iadst_16x16_add_msa(uint8_t *dst, ptrdiff_t stride, - int16_t *block, int eob) -{ - vp9_idct_iadst_16x16_add_msa(block, dst, stride, eob); -} - -void ff_iadst_idct_4x4_add_msa(uint8_t *dst, ptrdiff_t stride, - int16_t *block, int eob) -{ - vp9_iadst_idct_4x4_add_msa(block, dst, stride, eob); -} - -void ff_iadst_idct_8x8_add_msa(uint8_t *dst, ptrdiff_t stride, - int16_t *block, int eob) -{ - vp9_iadst_idct_8x8_add_msa(block, dst, stride, eob); -} - -void ff_iadst_idct_16x16_add_msa(uint8_t *dst, ptrdiff_t stride, - int16_t *block, int eob) -{ - vp9_iadst_idct_16x16_add_msa(block, dst, stride, eob); -} diff --git a/spaces/congsaPfin/Manga-OCR/logs/Barbie Dreamhouse Adventures APK (MOD Unlocked) - The Ultimate Game for Barbie Fans.md b/spaces/congsaPfin/Manga-OCR/logs/Barbie Dreamhouse Adventures APK (MOD Unlocked) - The Ultimate Game for Barbie Fans.md deleted file mode 100644 index a6438612e252857119a88455483e5427681b43f4..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Barbie Dreamhouse Adventures APK (MOD Unlocked) - The Ultimate Game for Barbie Fans.md +++ /dev/null @@ -1,80 +0,0 @@ -
-

Download Barbie Dreamhouse Adventures APK (MOD Unlocked)

-

Do you love playing with Barbie dolls and designing your own dream house? If yes, then you will love Barbie Dreamhouse Adventures, a simulation game where you can create your own room, join fun activities with Barbie and her friends, and explore the city. But what if you want to enjoy all the features of the game without spending any money? Well, you can do that by downloading Barbie Dreamhouse Adventures APK (MOD Unlocked), a modified version of the game that gives you unlimited access to everything. In this article, we will tell you what is Barbie Dreamhouse Adventures, why you should download the APK file, and how to install it on your device.

-

What is Barbie Dreamhouse Adventures?

-

Barbie Dreamhouse Adventures is an exciting simulation game developed by Budge Studios, a company that specializes in creating games for kids. The game is based on the popular animated series of the same name, where Barbie and her friends live in a dream house and have fun adventures together. The game allows you to design your own room with various furniture, decorations, and accessories. You can also join Barbie and her friends in different activities, such as baking, dancing, swimming, and more. You can customize your character and outfits, and explore the city and discover new places. The game is suitable for children of all ages, as it has colorful graphics, easy controls, and positive messages.

-

download barbie dreamhouse adventures apk (mod unlocked)


Download File 🆓 https://urlca.com/2uOd9s



-

Features of Barbie Dreamhouse Adventures

-

Create your own dream room

-

One of the main features of the game is that you can create your own dream room with Barbie. You can choose from different themes, such as glam, sporty, or cozy. You can also add various furniture, such as beds, sofas, tables, chairs, lamps, and more. You can decorate your room with wallpapers, rugs, curtains, paintings, and other items. You can even add pets, such as dogs, cats, or horses. You can change the layout of your room anytime you want, and make it as unique as you are.

-

Join fun activities with Barbie and her friends

-

Another feature of the game is that you can join fun activities with Barbie and her friends. You can bake delicious cakes and cookies in the kitchen, dance to your favorite music in the living room, swim in the pool or relax in the hot tub, play with your pets in the backyard, or have a sleepover party in your room. You can also go on exciting adventures with Barbie and her friends, such as camping in the mountains, going to the beach, or visiting a carnival. You can interact with different characters and objects in the game, and have a lot of fun.

-

Customize your character and outfits

-

The game also lets you customize your character and outfits. You can choose from different hairstyles, eye colors, skin tones, and facial features. You can also dress up your character with various clothes, shoes, accessories, and jewelry. You can mix and match different items to create your own style. You can also change your outfits according to the occasion or mood. You can be a fashionista like Barbie or a sporty girl like Teresa.

-

Explore the city and discover new places

-

The game also allows you to explore the city and discover new places. You can visit different locations in the city, such as the mall, the park, the cinema, or the salon

. You can also travel to different destinations, such as Paris, New York, or Tokyo. You can meet new people and make new friends. You can also learn about different cultures and traditions. You can have a lot of fun exploring the world with Barbie.

-

Why download Barbie Dreamhouse Adventures APK (MOD Unlocked)?

-

Barbie Dreamhouse Adventures is a free game that you can download from the Google Play Store or the App Store. However, the game has some limitations that may affect your gaming experience. For example, some of the rooms and activities are locked and require VIP membership to access. VIP membership costs $4.99 per month or $39.99 per year. Also, some of the items and outfits are expensive and require coins and gems to buy. Coins and gems are the in-game currencies that you can earn by playing the game or watching ads. However, earning enough coins and gems can be time-consuming and frustrating.

-

How to download barbie dreamhouse adventures apk with mod unlocked
-Barbie dreamhouse adventures mod apk latest version 2023.4.1
-Barbie dreamhouse adventures apk free download for android
-Barbie dreamhouse adventures mod apk unlimited money and gems
-Barbie dreamhouse adventures apk full version with all features unlocked
-Barbie dreamhouse adventures mod apk offline mode
-Barbie dreamhouse adventures apk + obb data download
-Barbie dreamhouse adventures mod apk no root required
-Barbie dreamhouse adventures apk hack cheats tool
-Barbie dreamhouse adventures mod apk vip access
-Barbie dreamhouse adventures apk best simulation game for girls
-Barbie dreamhouse adventures mod apk all episodes unlocked
-Barbie dreamhouse adventures apk download from apkpure
-Barbie dreamhouse adventures mod apk premium features unlocked
-Barbie dreamhouse adventures apk safe and secure download
-Barbie dreamhouse adventures mod apk easy installation guide
-Barbie dreamhouse adventures apk fun and interactive gameplay
-Barbie dreamhouse adventures mod apk unlimited everything
-Barbie dreamhouse adventures apk download link (mod unlocked)
-Barbie dreamhouse adventures mod apk high quality graphics and sound
-Barbie dreamhouse adventures apk create your own barbie house and style
-Barbie dreamhouse adventures mod apk all characters and pets unlocked
-Barbie dreamhouse adventures apk enjoy various activities with barbie and friends
-Barbie dreamhouse adventures mod apk no ads and no in-app purchases
-Barbie dreamhouse adventures apk latest update and new features
-Barbie dreamhouse adventures mod apk download from google play store
-Barbie dreamhouse adventures apk compatible with all android devices
-Barbie dreamhouse adventures mod apk fast and smooth performance
-Barbie dreamhouse adventures apk review and rating by users
-Barbie dreamhouse adventures mod apk unlimited coins and diamonds
-Barbie dreamhouse adventures apk customize your avatar and outfits
-Barbie dreamhouse adventures mod apk all levels and challenges unlocked
-Barbie dreamhouse adventures apk join the barbie fan club and get rewards
-Barbie dreamhouse adventures mod apk support multiple languages and regions
-Barbie dreamhouse adventures apk share your creations and stories with others

-

That's why you may want to download Barbie Dreamhouse Adventures APK (MOD Unlocked), a modified version of the game that gives you unlimited access to everything. By downloading this APK file, you can unlock VIP features for free, enjoy unlimited access to all rooms and activities, and get unlimited coins and gems to buy anything you want. You can also remove ads and enjoy a smoother gaming experience. You can have more fun and freedom playing the game with this APK file.

-

How to download and install Barbie Dreamhouse Adventures APK (MOD Unlocked)?

-

Downloading and installing Barbie Dreamhouse Adventures APK (MOD Unlocked) is easy and safe. Just follow these simple steps:

-

Step 1: Download the APK file from a trusted source

-

The first step is to download the APK file from a trusted source. You can find many websites that offer this APK file, but be careful as some of them may contain viruses or malware that can harm your device. We recommend you to use this link to download the APK file, as it is verified and tested by us.

-

Step 2: Enable unknown sources on your device

-

The second step is to enable unknown sources on your device. This is necessary because your device may not allow you to install apps from sources other than the official app stores. To enable unknown sources, go to your device settings, then security, then unknown sources, and turn it on.

-

Step 3: Install the APK file and launch the game

-

The third step is to install the APK file and launch the game. To install the APK file, locate it in your device storage, tap on it, and follow the instructions on the screen. To launch the game, find its icon on your home screen or app drawer, tap on it, and enjoy.

-

Conclusion

-

Barbie Dreamhouse Adventures is a fun and creative simulation game that lets you design your own room, join fun activities with Barbie and her friends, and explore the city and the world. However, if you want to enjoy all the features of the game without spending any money, you can download Barbie Dreamhouse Adventures APK (MOD Unlocked), a modified version of the game that gives you unlimited access to everything. You can download this APK file from this link and install it on your device following these simple steps. You can have more fun and freedom playing the game with this APK file.

-

We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave them in the comments section below. Thank you for reading!

-

FAQs

-

Here are some frequently asked questions about Barbie Dreamhouse Adventures APK (MOD Unlocked):

-
    -
  • Is Barbie Dreamhouse Adventures APK (MOD Unlocked) safe?
  • -

    Yes, Barbie Dreamhouse Adventures APK (MOD Unlocked) is safe to download and install on your device. It does not contain any viruses or malware that can harm your device. However, make sure you download it from a trusted source like this link, as some websites may offer fake or corrupted files.

    -
  • Is Barbie Dreamhouse Adventures APK (MOD Unlocked) legal?
  • -

    No, Barbie Dreamhouse Adventures APK (MOD Unlocked) is not legal, as it violates the terms and conditions of the original game developer Budge Studios. By downloading this APK file, you are using an unauthorized version of the game that gives you an unfair advantage over other players. This may result in legal action or account suspension by Budge Studios.

    -
  • Will Barbie Dreamhouse Adventures APK (MOD Unlocked) work on my device?
  • -

    Barbie Dreamhouse Adventures APK (MOD Unlocked) will work on most Android devices that have Android 4.4 or higher. However, some devices may not be compatible with the game or the APK file, and may experience crashes or errors. To check if your device is compatible, you can visit the official Google Play Store page of the game and see if it is available for your device.

    -
  • Can I update Barbie Dreamhouse Adventures APK (MOD Unlocked)?
  • -

    No, you cannot update Barbie Dreamhouse Adventures APK (MOD Unlocked) from the Google Play Store or the App Store, as it is a modified version of the game that is not recognized by the official app stores. If you try to update it, you may lose all your progress and data, and revert back to the original version of the game. To get the latest version of the APK file, you need to visit this link again and download it manually.

    -
  • Can I play Barbie Dreamhouse Adventures APK (MOD Unlocked) online with other players?
  • -

    Yes, you can play Barbie Dreamhouse Adventures APK (MOD Unlocked) online with other players who have the same version of the game. However, you may not be able to play with players who have the original version of the game, as they may have different features and settings. Also, you may face some issues or bugs while playing online, as the game is not designed to support the APK file.

    -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Stickman Zombies How to Unlock All the Weapons and Levels.md b/spaces/congsaPfin/Manga-OCR/logs/Stickman Zombies How to Unlock All the Weapons and Levels.md deleted file mode 100644 index d166225b8add485d803a03ba90b2eaa9f45d7663..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Stickman Zombies How to Unlock All the Weapons and Levels.md +++ /dev/null @@ -1,91 +0,0 @@ -
-

Stickman Zombies: What Are They and How to Survive Them

-

If you are a fan of stickman games and zombie games, you might have come across the term "stickman zombies". But what are they exactly and how did they come to be? And more importantly, how can you survive them in the games and animations that feature them? In this article, we will answer these questions and more, as we explore the world of stickman zombies.

-

Introduction

-

Stickman zombies are a type of undead creatures that resemble stick figures. They have thin limbs, round heads, and usually no facial features. They are often depicted as wearing clothes or carrying weapons, such as guns, knives, or axes. They are usually hostile and aggressive, and will attack any living being on sight.

-

stickman zombies


Download Zip ✔✔✔ https://urlca.com/2uO6mO



-

What are stickman zombies?

-

Stickman zombies are a combination of two popular genres: stickman games and zombie games. Stickman games are games that feature simple graphics and animations of stick figures performing various actions, such as fighting, running, jumping, or shooting. Zombie games are games that involve surviving a zombie apocalypse, where the player has to fend off hordes of undead creatures that want to eat their brains.

-

Stickman zombies are a result of merging these two genres together, creating a unique and challenging gameplay experience. The player has to control a stick figure that has to survive in a world overrun by stick figure zombies. The player has to use various weapons and skills to kill the zombies and avoid getting bitten or infected.

-

How did they originate?

-

The origin of stickman zombies is not clear, but there are some possible explanations. One theory is that they are the result of a virus or a mutation that turned normal stick figures into mindless cannibals. Another theory is that they are the product of a scientific experiment gone wrong, where stick figures were exposed to radiation or chemicals that altered their DNA. A third theory is that they are the manifestation of a curse or a spell that brought the dead back to life as stick figures.

-

Whatever the cause, stickman zombies have become a menace to society and a threat to humanity. They have spread across the globe, infecting and killing anyone they encounter. They have no remorse, no mercy, and no reason. They only have one goal: to feed on the living.

-

Why are they dangerous?

-

Stickman zombies are dangerous for several reasons. First of all, they are fast and agile. They can run, jump, climb, and dodge with ease. They can also use weapons and objects to attack their prey. Second of all, they are relentless and persistent. They will not stop chasing their target until they catch it or kill it. They will also work together in groups or hordes to overwhelm their enemies. Third of all, they are infectious and contagious. If they bite or scratch someone, they will transmit their virus or curse to them, turning them into one of them.

-

Therefore, stickman zombies pose a serious challenge for anyone who wants to survive in their world. The player has to be careful, alert, and resourceful to avoid getting caught or killed by them.

-

Stickman Zombie Games

-

One way to experience the thrill and horror of stickman zombies is to play stickman zombie games. These are games that let you control a stick figure that has to fight against stick figure zombies in various scenarios and environments. Some of these games are:

What are some popular stickman zombie games?

-

Stickman Zombie 3D

-

This is a 3D shooting game that puts you in the role of a stickman soldier who has to survive a zombie outbreak in a military base. You have to use different guns and explosives to kill the zombies and find a way out of the base. The game has realistic graphics, sound effects, and physics, as well as multiple levels and modes to play.

-

stickman zombie 3d game online
-stickman vs zombies android app
-stickman legacy of zombie war strategy
-stickman zombie shooter apk download
-stickman zombie survival mod apk
-stickman zombie outbreak hacked
-stickman zombie hunter unblocked
-stickman zombie defense games
-stickman zombie apocalypse simulator
-stickman zombie attack cheats
-stickman zombie killer 2
-stickman zombie sniper 3d
-stickman zombie survival multiplayer
-stickman zombie run game
-stickman zombie escape walkthrough
-stickman zombie roadkill mod
-stickman zombie shooting games online free
-stickman zombie infection pc
-stickman zombie survival sandbox
-stickman zombie bike racing
-stickman zombie tower defense
-stickman zombie squad game
-stickman zombie apocalypse minecraft map
-stickman zombie survival io
-stickman zombie parkour game
-stickman zombie arena 4d
-stickman zombie survival roblox
-stickman zombie city game online free
-stickman zombie hunter 3d mod apk
-stickman zombie survival craft game download
-stickman zombie survival battle royale online
-stickman zombie shooter ios app store
-stickman zombie apocalypse y8 games
-stickman zombie survival escape game online free play now
-stickman zombie hunter 2 mod apk unlimited money and gems download for android latest version 2021 free download link in description below video tutorial how to install and play on your device step by step guide no root needed no survey no human verification no password no virus 100% working tested and verified by millions of users worldwide best action shooting arcade adventure casual fun addictive offline single player co op multiplayer pvp fps tps rpg simulation sports racing puzzle strategy trivia card board word educational music role playing family kids girls boys teens adults everyone all ages easy hard challenging levels missions weapons skins characters items power ups coins gems diamonds gold cash rewards achievements leaderboards rankings events updates features bugs fixes improvements feedback support contact us email facebook twitter instagram youtube tiktok discord reddit twitch pinterest snapchat tiktok whatsapp telegram wechat line messenger skype zoom google meet microsoft teams webex zoom cloud meetings zoom video conferencing zoom app zoom download zoom login zoom meeting zoom online zoom.us join meeting zoom.us sign in zoom.us download zoom.us app zoom.us login zoom.us meeting zoom.us online zoom.us join meeting id zoom.us sign in with google zoom.us app download for pc zoom.us app download for android zoom.us app download for laptop zoom.us app download for windows 10 zoom.us app download for mac zoom.us app download for iphone zoom.us app download for ipad zoom.us app download for chromebook zoom.us app download for linux

-

Stickman vs Zombies

-

This is a 2D side-scrolling game that lets you control a stickman who has to run and jump through a city infested by zombies. You have to collect coins and power-ups, as well as avoid obstacles and traps, while shooting the zombies with your gun. The game has colorful graphics, catchy music, and simple controls, as well as various characters and weapons to unlock.

-

Stickman Zombie Annihilation

-

This is a 2D top-down game that allows you to drive a car and run over zombies in a post-apocalyptic world. You have to upgrade your car and weapons, as well as complete missions and challenges, while avoiding the zombies and other hazards. The game has pixelated graphics, retro music, and addictive gameplay, as well as a leaderboard and achievements to compete with other players.

-

What are some tips and tricks to play stickman zombie games?

-

Here are some general tips and tricks that can help you play stickman zombie games better: - Aim for the head. This will deal more damage and kill the zombies faster. - Keep moving. This will make you harder to hit and allow you to dodge the zombies' attacks. - Use cover. This will protect you from the zombies' projectiles and give you time to reload or heal. - Conserve ammo. This will prevent you from running out of bullets when you need them most. - Upgrade your weapons. This will increase your firepower and effectiveness against the zombies. - Explore the environment. This will help you find hidden items, secrets, and shortcuts that can aid you in your survival.

-

Stickman Zombie Animation

-

Another way to enjoy the fun and horror of stickman zombies is to watch stickman zombie animation. These are short videos that feature stick figures in various situations involving zombies. They are usually humorous, creative, and action-packed, and showcase the skills and talents of the animators.

-

What are some examples of stickman zombie animation?

-

"EVACUATION" THE WHITE

-

This is a stickman zombie animation by Alan Becker, the creator of the famous Animator vs Animation series. It tells the story of a white stick figure who has to escape from a building full of zombies with the help of his friends. The animation has smooth movements, detailed backgrounds, and clever references to other zombie movies and games.

-

Marbles VS Zombies Battle

-

This is a stickman zombie animation by Marbles Animations, a YouTube channel that specializes in marble racing videos. It shows a battle between marbles and zombies in a stadium, where the marbles have to use their speed and agility to avoid being eaten by the zombies. The animation has colorful visuals, dynamic camera angles, and suspenseful music.

-

Stickman vs Zombie - Epic Battle for Survival!

-

This is a stickman zombie animation by JZBoy Animations, another YouTube channel that produces stick figure videos. It depicts a fight between a stickman and a horde of zombies in an abandoned city, where the stickman has to use his martial arts skills and weapons to survive. The animation has fast-paced action, cool effects, and humorous moments.

-

What are some benefits of watching stickman zombie animation?

-

Watching stickman zombie animation can have several benefits for you, such as: - Entertaining you. Stickman zombie animation can make you laugh, gasp, or cheer with their stories, characters, and humor. - Inspiring you. Stickman zombie animation can motivate you to create your own animations or games with your imagination and creativity. - Educating you. Stickman zombie animation can teach you about animation techniques, software, or tools that you can use to improve your skills or learn new ones.

-

Conclusion

-

In conclusion, stickman zombies are a fascinating phenomenon that combines two popular genres: stickman games and zombie games. They are undead creatures that resemble stick figures and attack any living being on sight. They can be found in various games and animations that challenge your skills and entertain your senses with their graphics, sound, and animation. If you are looking for a new and exciting way to have fun and test your abilities, you should try playing or watching stickman zombie games and animations. You will not regret it!

-

Summary of the main points

-

Here is a summary of the main points we covered in this article: - Stickman zombies are a type of undead creatures that resemble stick figures and attack any living being on sight. - They originated from a virus, a mutation, or a curse that turned normal stick figures into mindless cannibals. - They are dangerous because they are fast, agile, relentless, persistent, infectious, and contagious. - Stickman zombie games are games that let you control a stick figure that has to fight against stick figure zombies in various scenarios and environments. - Stickman zombie animation are short videos that feature stick figures in various situations involving zombies. - Playing or watching stickman zombie games and animations can entertain you, inspire you, and educate you.

-

Call to action

-

If you enjoyed this article, please share it with your friends and family who might also be interested in stickman zombies. You can also leave a comment below and tell us what you think about stickman zombies, or suggest other topics you would like us to write about. And don't forget to subscribe to our newsletter to get more articles like this one delivered to your inbox. Thank you for reading!

-

FAQs

-

Here are some frequently asked questions about stickman zombies:

-

What is the difference between stickman zombies and regular zombies?

-

The main difference between stickman zombies and regular zombies is their appearance. Stickman zombies look like stick figures, while regular zombies look like humans. However, they both share the same characteristics of being undead, mindless, and hungry for flesh.

-

Are stickman zombies real?

-

No, stickman zombies are not real. They are fictional creatures that only exist in games and animations. However, some people believe that zombies could become real in the future, due to a pandemic, a war, or a disaster.

-

How can I make my own stickman zombie game or animation?

-

If you want to make your own stickman zombie game or animation, you will need some software and tools to help you. For example, you can use Adobe Flash or Adobe Animate to create 2D graphics and animations of stick figures and zombies. You can also use Unity or Unreal Engine to create 3D games and simulations of stickman zombie scenarios. You can also use online platforms or apps that allow you to make your own stickman zombie games or animations without coding, such as Stick Nodes or Pivot Animator.

-

What are some other genres that feature stickman zombies?

-

Besides games and animations, there are some other genres that feature stickman zombies, such as comics, books, movies, or TV shows. For example, there is a comic series called The Walking Dead: Stick Edition, which is a parody of the popular zombie comic and TV show The Walking Dead. There is also a book called Stick Z: The Zombie Apocalypse Begins Here!, which is a novel about a group of kids who have to survive a stickman zombie outbreak in their school. There is also a movie called Stick Zombie Massacre!, which is a horror comedy film about a college student who has to fight off an army of stick figure zombies.

-

Where can I find more information about stickman zombies?

-

If you want to find more information about stickman zombies, you can visit some websites or blogs that are dedicated to them, such as StickZombie.com or StickZombieBlog.com. You can also join some online communities or forums that discuss them, such as Reddit.com/r/StickZombies or StickZombies.net/forum. You can also follow some social media accounts or channels that post about them, such as @StickZombie on Twitter or Stick Zombie on YouTube.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/consciousAI/question_answering/README.md b/spaces/consciousAI/question_answering/README.md deleted file mode 100644 index e099bba8e2315aab4a57341a5fd46be2729ac536..0000000000000000000000000000000000000000 --- a/spaces/consciousAI/question_answering/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Question Answering Encoders vs Generative -emoji: 🔥 -colorFrom: green -colorTo: blue -sdk: gradio -sdk_version: 3.8.2 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/contluForse/HuggingGPT/assets/Ala A Little Agency Melissa Sets 001 026.md b/spaces/contluForse/HuggingGPT/assets/Ala A Little Agency Melissa Sets 001 026.md deleted file mode 100644 index a4b81360bd964d8e8baa0413bd42c7fdef562fa9..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Ala A Little Agency Melissa Sets 001 026.md +++ /dev/null @@ -1,6 +0,0 @@ -

Ala A Little Agency Melissa Sets 001 026


DOWNLOAD 🆗 https://ssurll.com/2uzwJd



- -Part 1 is available . ... Let's Play Shogun II - Oda Campaign - Korean Mod - Part 1 - The Conquest ... Ala A Little Agency Melissa Sets 001 026 1fdad05405
-
-
-

diff --git a/spaces/cooelf/Multimodal-CoT/timm/models/layers/median_pool.py b/spaces/cooelf/Multimodal-CoT/timm/models/layers/median_pool.py deleted file mode 100644 index 40bd71a7a3840aaebefd2af0a99605b845054cd7..0000000000000000000000000000000000000000 --- a/spaces/cooelf/Multimodal-CoT/timm/models/layers/median_pool.py +++ /dev/null @@ -1,49 +0,0 @@ -""" Median Pool -Hacked together by / Copyright 2020 Ross Wightman -""" -import torch.nn as nn -import torch.nn.functional as F -from .helpers import to_2tuple, to_4tuple - - -class MedianPool2d(nn.Module): - """ Median pool (usable as median filter when stride=1) module. - - Args: - kernel_size: size of pooling kernel, int or 2-tuple - stride: pool stride, int or 2-tuple - padding: pool padding, int or 4-tuple (l, r, t, b) as in pytorch F.pad - same: override padding and enforce same padding, boolean - """ - def __init__(self, kernel_size=3, stride=1, padding=0, same=False): - super(MedianPool2d, self).__init__() - self.k = to_2tuple(kernel_size) - self.stride = to_2tuple(stride) - self.padding = to_4tuple(padding) # convert to l, r, t, b - self.same = same - - def _padding(self, x): - if self.same: - ih, iw = x.size()[2:] - if ih % self.stride[0] == 0: - ph = max(self.k[0] - self.stride[0], 0) - else: - ph = max(self.k[0] - (ih % self.stride[0]), 0) - if iw % self.stride[1] == 0: - pw = max(self.k[1] - self.stride[1], 0) - else: - pw = max(self.k[1] - (iw % self.stride[1]), 0) - pl = pw // 2 - pr = pw - pl - pt = ph // 2 - pb = ph - pt - padding = (pl, pr, pt, pb) - else: - padding = self.padding - return padding - - def forward(self, x): - x = F.pad(x, self._padding(x), mode='reflect') - x = x.unfold(2, self.k[0], self.stride[0]).unfold(3, self.k[1], self.stride[1]) - x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] - return x diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/openpose/model.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/openpose/model.py deleted file mode 100644 index 5dfc80de827a17beccb9b0f3f7588545be78c9de..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/openpose/model.py +++ /dev/null @@ -1,219 +0,0 @@ -import torch -from collections import OrderedDict - -import torch -import torch.nn as nn - -def make_layers(block, no_relu_layers): - layers = [] - for layer_name, v in block.items(): - if 'pool' in layer_name: - layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1], - padding=v[2]) - layers.append((layer_name, layer)) - else: - conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], - kernel_size=v[2], stride=v[3], - padding=v[4]) - layers.append((layer_name, conv2d)) - if layer_name not in no_relu_layers: - layers.append(('relu_'+layer_name, nn.ReLU(inplace=True))) - - return nn.Sequential(OrderedDict(layers)) - -class bodypose_model(nn.Module): - def __init__(self): - super(bodypose_model, self).__init__() - - # these layers have no relu layer - no_relu_layers = ['conv5_5_CPM_L1', 'conv5_5_CPM_L2', 'Mconv7_stage2_L1',\ - 'Mconv7_stage2_L2', 'Mconv7_stage3_L1', 'Mconv7_stage3_L2',\ - 'Mconv7_stage4_L1', 'Mconv7_stage4_L2', 'Mconv7_stage5_L1',\ - 'Mconv7_stage5_L2', 'Mconv7_stage6_L1', 'Mconv7_stage6_L1'] - blocks = {} - block0 = OrderedDict([ - ('conv1_1', [3, 64, 3, 1, 1]), - ('conv1_2', [64, 64, 3, 1, 1]), - ('pool1_stage1', [2, 2, 0]), - ('conv2_1', [64, 128, 3, 1, 1]), - ('conv2_2', [128, 128, 3, 1, 1]), - ('pool2_stage1', [2, 2, 0]), - ('conv3_1', [128, 256, 3, 1, 1]), - ('conv3_2', [256, 256, 3, 1, 1]), - ('conv3_3', [256, 256, 3, 1, 1]), - ('conv3_4', [256, 256, 3, 1, 1]), - ('pool3_stage1', [2, 2, 0]), - ('conv4_1', [256, 512, 3, 1, 1]), - ('conv4_2', [512, 512, 3, 1, 1]), - ('conv4_3_CPM', [512, 256, 3, 1, 1]), - ('conv4_4_CPM', [256, 128, 3, 1, 1]) - ]) - - - # Stage 1 - block1_1 = OrderedDict([ - ('conv5_1_CPM_L1', [128, 128, 3, 1, 1]), - ('conv5_2_CPM_L1', [128, 128, 3, 1, 1]), - ('conv5_3_CPM_L1', [128, 128, 3, 1, 1]), - ('conv5_4_CPM_L1', [128, 512, 1, 1, 0]), - ('conv5_5_CPM_L1', [512, 38, 1, 1, 0]) - ]) - - block1_2 = OrderedDict([ - ('conv5_1_CPM_L2', [128, 128, 3, 1, 1]), - ('conv5_2_CPM_L2', [128, 128, 3, 1, 1]), - ('conv5_3_CPM_L2', [128, 128, 3, 1, 1]), - ('conv5_4_CPM_L2', [128, 512, 1, 1, 0]), - ('conv5_5_CPM_L2', [512, 19, 1, 1, 0]) - ]) - blocks['block1_1'] = block1_1 - blocks['block1_2'] = block1_2 - - self.model0 = make_layers(block0, no_relu_layers) - - # Stages 2 - 6 - for i in range(2, 7): - blocks['block%d_1' % i] = OrderedDict([ - ('Mconv1_stage%d_L1' % i, [185, 128, 7, 1, 3]), - ('Mconv2_stage%d_L1' % i, [128, 128, 7, 1, 3]), - ('Mconv3_stage%d_L1' % i, [128, 128, 7, 1, 3]), - ('Mconv4_stage%d_L1' % i, [128, 128, 7, 1, 3]), - ('Mconv5_stage%d_L1' % i, [128, 128, 7, 1, 3]), - ('Mconv6_stage%d_L1' % i, [128, 128, 1, 1, 0]), - ('Mconv7_stage%d_L1' % i, [128, 38, 1, 1, 0]) - ]) - - blocks['block%d_2' % i] = OrderedDict([ - ('Mconv1_stage%d_L2' % i, [185, 128, 7, 1, 3]), - ('Mconv2_stage%d_L2' % i, [128, 128, 7, 1, 3]), - ('Mconv3_stage%d_L2' % i, [128, 128, 7, 1, 3]), - ('Mconv4_stage%d_L2' % i, [128, 128, 7, 1, 3]), - ('Mconv5_stage%d_L2' % i, [128, 128, 7, 1, 3]), - ('Mconv6_stage%d_L2' % i, [128, 128, 1, 1, 0]), - ('Mconv7_stage%d_L2' % i, [128, 19, 1, 1, 0]) - ]) - - for k in blocks.keys(): - blocks[k] = make_layers(blocks[k], no_relu_layers) - - self.model1_1 = blocks['block1_1'] - self.model2_1 = blocks['block2_1'] - self.model3_1 = blocks['block3_1'] - self.model4_1 = blocks['block4_1'] - self.model5_1 = blocks['block5_1'] - self.model6_1 = blocks['block6_1'] - - self.model1_2 = blocks['block1_2'] - self.model2_2 = blocks['block2_2'] - self.model3_2 = blocks['block3_2'] - self.model4_2 = blocks['block4_2'] - self.model5_2 = blocks['block5_2'] - self.model6_2 = blocks['block6_2'] - - - def forward(self, x): - - out1 = self.model0(x) - - out1_1 = self.model1_1(out1) - out1_2 = self.model1_2(out1) - out2 = torch.cat([out1_1, out1_2, out1], 1) - - out2_1 = self.model2_1(out2) - out2_2 = self.model2_2(out2) - out3 = torch.cat([out2_1, out2_2, out1], 1) - - out3_1 = self.model3_1(out3) - out3_2 = self.model3_2(out3) - out4 = torch.cat([out3_1, out3_2, out1], 1) - - out4_1 = self.model4_1(out4) - out4_2 = self.model4_2(out4) - out5 = torch.cat([out4_1, out4_2, out1], 1) - - out5_1 = self.model5_1(out5) - out5_2 = self.model5_2(out5) - out6 = torch.cat([out5_1, out5_2, out1], 1) - - out6_1 = self.model6_1(out6) - out6_2 = self.model6_2(out6) - - return out6_1, out6_2 - -class handpose_model(nn.Module): - def __init__(self): - super(handpose_model, self).__init__() - - # these layers have no relu layer - no_relu_layers = ['conv6_2_CPM', 'Mconv7_stage2', 'Mconv7_stage3',\ - 'Mconv7_stage4', 'Mconv7_stage5', 'Mconv7_stage6'] - # stage 1 - block1_0 = OrderedDict([ - ('conv1_1', [3, 64, 3, 1, 1]), - ('conv1_2', [64, 64, 3, 1, 1]), - ('pool1_stage1', [2, 2, 0]), - ('conv2_1', [64, 128, 3, 1, 1]), - ('conv2_2', [128, 128, 3, 1, 1]), - ('pool2_stage1', [2, 2, 0]), - ('conv3_1', [128, 256, 3, 1, 1]), - ('conv3_2', [256, 256, 3, 1, 1]), - ('conv3_3', [256, 256, 3, 1, 1]), - ('conv3_4', [256, 256, 3, 1, 1]), - ('pool3_stage1', [2, 2, 0]), - ('conv4_1', [256, 512, 3, 1, 1]), - ('conv4_2', [512, 512, 3, 1, 1]), - ('conv4_3', [512, 512, 3, 1, 1]), - ('conv4_4', [512, 512, 3, 1, 1]), - ('conv5_1', [512, 512, 3, 1, 1]), - ('conv5_2', [512, 512, 3, 1, 1]), - ('conv5_3_CPM', [512, 128, 3, 1, 1]) - ]) - - block1_1 = OrderedDict([ - ('conv6_1_CPM', [128, 512, 1, 1, 0]), - ('conv6_2_CPM', [512, 22, 1, 1, 0]) - ]) - - blocks = {} - blocks['block1_0'] = block1_0 - blocks['block1_1'] = block1_1 - - # stage 2-6 - for i in range(2, 7): - blocks['block%d' % i] = OrderedDict([ - ('Mconv1_stage%d' % i, [150, 128, 7, 1, 3]), - ('Mconv2_stage%d' % i, [128, 128, 7, 1, 3]), - ('Mconv3_stage%d' % i, [128, 128, 7, 1, 3]), - ('Mconv4_stage%d' % i, [128, 128, 7, 1, 3]), - ('Mconv5_stage%d' % i, [128, 128, 7, 1, 3]), - ('Mconv6_stage%d' % i, [128, 128, 1, 1, 0]), - ('Mconv7_stage%d' % i, [128, 22, 1, 1, 0]) - ]) - - for k in blocks.keys(): - blocks[k] = make_layers(blocks[k], no_relu_layers) - - self.model1_0 = blocks['block1_0'] - self.model1_1 = blocks['block1_1'] - self.model2 = blocks['block2'] - self.model3 = blocks['block3'] - self.model4 = blocks['block4'] - self.model5 = blocks['block5'] - self.model6 = blocks['block6'] - - def forward(self, x): - out1_0 = self.model1_0(x) - out1_1 = self.model1_1(out1_0) - concat_stage2 = torch.cat([out1_1, out1_0], 1) - out_stage2 = self.model2(concat_stage2) - concat_stage3 = torch.cat([out_stage2, out1_0], 1) - out_stage3 = self.model3(concat_stage3) - concat_stage4 = torch.cat([out_stage3, out1_0], 1) - out_stage4 = self.model4(concat_stage4) - concat_stage5 = torch.cat([out_stage4, out1_0], 1) - out_stage5 = self.model5(concat_stage5) - concat_stage6 = torch.cat([out_stage5, out1_0], 1) - out_stage6 = self.model6(concat_stage6) - return out_stage6 - - diff --git a/spaces/cscan/CodeFormer/README.md b/spaces/cscan/CodeFormer/README.md deleted file mode 100644 index 6fafbe6f03ca8588a58a159d4ab39fe2256c9d88..0000000000000000000000000000000000000000 --- a/spaces/cscan/CodeFormer/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: CodeFormer -emoji: 🐼 -colorFrom: blue -colorTo: green -sdk: gradio -sdk_version: 3.4 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: sczhou/CodeFormer ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/__init__.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/__init__.py deleted file mode 100644 index 5d28506120e9e25c27858fb1d6b94bcdbcea095e..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -import logging -from fontTools.misc.loggingTools import configLogger - -log = logging.getLogger(__name__) - -version = __version__ = "4.42.0" - -__all__ = ["version", "log", "configLogger"] diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/merge/layout.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/merge/layout.py deleted file mode 100644 index 6b85cd503387291f326e937b36a5739b1de23ef1..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/merge/layout.py +++ /dev/null @@ -1,530 +0,0 @@ -# Copyright 2013 Google, Inc. All Rights Reserved. -# -# Google Author(s): Behdad Esfahbod, Roozbeh Pournader - -from fontTools import ttLib -from fontTools.ttLib.tables.DefaultTable import DefaultTable -from fontTools.ttLib.tables import otTables -from fontTools.merge.base import add_method, mergeObjects -from fontTools.merge.util import * -import logging - - -log = logging.getLogger("fontTools.merge") - - -def mergeLookupLists(lst): - # TODO Do smarter merge. - return sumLists(lst) - - -def mergeFeatures(lst): - assert lst - self = otTables.Feature() - self.FeatureParams = None - self.LookupListIndex = mergeLookupLists( - [l.LookupListIndex for l in lst if l.LookupListIndex] - ) - self.LookupCount = len(self.LookupListIndex) - return self - - -def mergeFeatureLists(lst): - d = {} - for l in lst: - for f in l: - tag = f.FeatureTag - if tag not in d: - d[tag] = [] - d[tag].append(f.Feature) - ret = [] - for tag in sorted(d.keys()): - rec = otTables.FeatureRecord() - rec.FeatureTag = tag - rec.Feature = mergeFeatures(d[tag]) - ret.append(rec) - return ret - - -def mergeLangSyses(lst): - assert lst - - # TODO Support merging ReqFeatureIndex - assert all(l.ReqFeatureIndex == 0xFFFF for l in lst) - - self = otTables.LangSys() - self.LookupOrder = None - self.ReqFeatureIndex = 0xFFFF - self.FeatureIndex = mergeFeatureLists( - [l.FeatureIndex for l in lst if l.FeatureIndex] - ) - self.FeatureCount = len(self.FeatureIndex) - return self - - -def mergeScripts(lst): - assert lst - - if len(lst) == 1: - return lst[0] - langSyses = {} - for sr in lst: - for lsr in sr.LangSysRecord: - if lsr.LangSysTag not in langSyses: - langSyses[lsr.LangSysTag] = [] - langSyses[lsr.LangSysTag].append(lsr.LangSys) - lsrecords = [] - for tag, langSys_list in sorted(langSyses.items()): - lsr = otTables.LangSysRecord() - lsr.LangSys = mergeLangSyses(langSys_list) - lsr.LangSysTag = tag - lsrecords.append(lsr) - - self = otTables.Script() - self.LangSysRecord = lsrecords - self.LangSysCount = len(lsrecords) - dfltLangSyses = [s.DefaultLangSys for s in lst if s.DefaultLangSys] - if dfltLangSyses: - self.DefaultLangSys = mergeLangSyses(dfltLangSyses) - else: - self.DefaultLangSys = None - return self - - -def mergeScriptRecords(lst): - d = {} - for l in lst: - for s in l: - tag = s.ScriptTag - if tag not in d: - d[tag] = [] - d[tag].append(s.Script) - ret = [] - for tag in sorted(d.keys()): - rec = otTables.ScriptRecord() - rec.ScriptTag = tag - rec.Script = mergeScripts(d[tag]) - ret.append(rec) - return ret - - -otTables.ScriptList.mergeMap = { - "ScriptCount": lambda lst: None, # TODO - "ScriptRecord": mergeScriptRecords, -} -otTables.BaseScriptList.mergeMap = { - "BaseScriptCount": lambda lst: None, # TODO - # TODO: Merge duplicate entries - "BaseScriptRecord": lambda lst: sorted( - sumLists(lst), key=lambda s: s.BaseScriptTag - ), -} - -otTables.FeatureList.mergeMap = { - "FeatureCount": sum, - "FeatureRecord": lambda lst: sorted(sumLists(lst), key=lambda s: s.FeatureTag), -} - -otTables.LookupList.mergeMap = { - "LookupCount": sum, - "Lookup": sumLists, -} - -otTables.Coverage.mergeMap = { - "Format": min, - "glyphs": sumLists, -} - -otTables.ClassDef.mergeMap = { - "Format": min, - "classDefs": sumDicts, -} - -otTables.LigCaretList.mergeMap = { - "Coverage": mergeObjects, - "LigGlyphCount": sum, - "LigGlyph": sumLists, -} - -otTables.AttachList.mergeMap = { - "Coverage": mergeObjects, - "GlyphCount": sum, - "AttachPoint": sumLists, -} - -# XXX Renumber MarkFilterSets of lookups -otTables.MarkGlyphSetsDef.mergeMap = { - "MarkSetTableFormat": equal, - "MarkSetCount": sum, - "Coverage": sumLists, -} - -otTables.Axis.mergeMap = { - "*": mergeObjects, -} - -# XXX Fix BASE table merging -otTables.BaseTagList.mergeMap = { - "BaseTagCount": sum, - "BaselineTag": sumLists, -} - -otTables.GDEF.mergeMap = ( - otTables.GSUB.mergeMap -) = ( - otTables.GPOS.mergeMap -) = otTables.BASE.mergeMap = otTables.JSTF.mergeMap = otTables.MATH.mergeMap = { - "*": mergeObjects, - "Version": max, -} - -ttLib.getTableClass("GDEF").mergeMap = ttLib.getTableClass( - "GSUB" -).mergeMap = ttLib.getTableClass("GPOS").mergeMap = ttLib.getTableClass( - "BASE" -).mergeMap = ttLib.getTableClass( - "JSTF" -).mergeMap = ttLib.getTableClass( - "MATH" -).mergeMap = { - "tableTag": onlyExisting(equal), # XXX clean me up - "table": mergeObjects, -} - - -@add_method(ttLib.getTableClass("GSUB")) -def merge(self, m, tables): - assert len(tables) == len(m.duplicateGlyphsPerFont) - for i, (table, dups) in enumerate(zip(tables, m.duplicateGlyphsPerFont)): - if not dups: - continue - if table is None or table is NotImplemented: - log.warning( - "Have non-identical duplicates to resolve for '%s' but no GSUB. Are duplicates intended?: %s", - m.fonts[i]._merger__name, - dups, - ) - continue - - synthFeature = None - synthLookup = None - for script in table.table.ScriptList.ScriptRecord: - if script.ScriptTag == "DFLT": - continue # XXX - for langsys in [script.Script.DefaultLangSys] + [ - l.LangSys for l in script.Script.LangSysRecord - ]: - if langsys is None: - continue # XXX Create! - feature = [v for v in langsys.FeatureIndex if v.FeatureTag == "locl"] - assert len(feature) <= 1 - if feature: - feature = feature[0] - else: - if not synthFeature: - synthFeature = otTables.FeatureRecord() - synthFeature.FeatureTag = "locl" - f = synthFeature.Feature = otTables.Feature() - f.FeatureParams = None - f.LookupCount = 0 - f.LookupListIndex = [] - table.table.FeatureList.FeatureRecord.append(synthFeature) - table.table.FeatureList.FeatureCount += 1 - feature = synthFeature - langsys.FeatureIndex.append(feature) - langsys.FeatureIndex.sort(key=lambda v: v.FeatureTag) - - if not synthLookup: - subtable = otTables.SingleSubst() - subtable.mapping = dups - synthLookup = otTables.Lookup() - synthLookup.LookupFlag = 0 - synthLookup.LookupType = 1 - synthLookup.SubTableCount = 1 - synthLookup.SubTable = [subtable] - if table.table.LookupList is None: - # mtiLib uses None as default value for LookupList, - # while feaLib points to an empty array with count 0 - # TODO: make them do the same - table.table.LookupList = otTables.LookupList() - table.table.LookupList.Lookup = [] - table.table.LookupList.LookupCount = 0 - table.table.LookupList.Lookup.append(synthLookup) - table.table.LookupList.LookupCount += 1 - - if feature.Feature.LookupListIndex[:1] != [synthLookup]: - feature.Feature.LookupListIndex[:0] = [synthLookup] - feature.Feature.LookupCount += 1 - - DefaultTable.merge(self, m, tables) - return self - - -@add_method( - otTables.SingleSubst, - otTables.MultipleSubst, - otTables.AlternateSubst, - otTables.LigatureSubst, - otTables.ReverseChainSingleSubst, - otTables.SinglePos, - otTables.PairPos, - otTables.CursivePos, - otTables.MarkBasePos, - otTables.MarkLigPos, - otTables.MarkMarkPos, -) -def mapLookups(self, lookupMap): - pass - - -# Copied and trimmed down from subset.py -@add_method( - otTables.ContextSubst, - otTables.ChainContextSubst, - otTables.ContextPos, - otTables.ChainContextPos, -) -def __merge_classify_context(self): - class ContextHelper(object): - def __init__(self, klass, Format): - if klass.__name__.endswith("Subst"): - Typ = "Sub" - Type = "Subst" - else: - Typ = "Pos" - Type = "Pos" - if klass.__name__.startswith("Chain"): - Chain = "Chain" - else: - Chain = "" - ChainTyp = Chain + Typ - - self.Typ = Typ - self.Type = Type - self.Chain = Chain - self.ChainTyp = ChainTyp - - self.LookupRecord = Type + "LookupRecord" - - if Format == 1: - self.Rule = ChainTyp + "Rule" - self.RuleSet = ChainTyp + "RuleSet" - elif Format == 2: - self.Rule = ChainTyp + "ClassRule" - self.RuleSet = ChainTyp + "ClassSet" - - if self.Format not in [1, 2, 3]: - return None # Don't shoot the messenger; let it go - if not hasattr(self.__class__, "_merge__ContextHelpers"): - self.__class__._merge__ContextHelpers = {} - if self.Format not in self.__class__._merge__ContextHelpers: - helper = ContextHelper(self.__class__, self.Format) - self.__class__._merge__ContextHelpers[self.Format] = helper - return self.__class__._merge__ContextHelpers[self.Format] - - -@add_method( - otTables.ContextSubst, - otTables.ChainContextSubst, - otTables.ContextPos, - otTables.ChainContextPos, -) -def mapLookups(self, lookupMap): - c = self.__merge_classify_context() - - if self.Format in [1, 2]: - for rs in getattr(self, c.RuleSet): - if not rs: - continue - for r in getattr(rs, c.Rule): - if not r: - continue - for ll in getattr(r, c.LookupRecord): - if not ll: - continue - ll.LookupListIndex = lookupMap[ll.LookupListIndex] - elif self.Format == 3: - for ll in getattr(self, c.LookupRecord): - if not ll: - continue - ll.LookupListIndex = lookupMap[ll.LookupListIndex] - else: - assert 0, "unknown format: %s" % self.Format - - -@add_method(otTables.ExtensionSubst, otTables.ExtensionPos) -def mapLookups(self, lookupMap): - if self.Format == 1: - self.ExtSubTable.mapLookups(lookupMap) - else: - assert 0, "unknown format: %s" % self.Format - - -@add_method(otTables.Lookup) -def mapLookups(self, lookupMap): - for st in self.SubTable: - if not st: - continue - st.mapLookups(lookupMap) - - -@add_method(otTables.LookupList) -def mapLookups(self, lookupMap): - for l in self.Lookup: - if not l: - continue - l.mapLookups(lookupMap) - - -@add_method(otTables.Lookup) -def mapMarkFilteringSets(self, markFilteringSetMap): - if self.LookupFlag & 0x0010: - self.MarkFilteringSet = markFilteringSetMap[self.MarkFilteringSet] - - -@add_method(otTables.LookupList) -def mapMarkFilteringSets(self, markFilteringSetMap): - for l in self.Lookup: - if not l: - continue - l.mapMarkFilteringSets(markFilteringSetMap) - - -@add_method(otTables.Feature) -def mapLookups(self, lookupMap): - self.LookupListIndex = [lookupMap[i] for i in self.LookupListIndex] - - -@add_method(otTables.FeatureList) -def mapLookups(self, lookupMap): - for f in self.FeatureRecord: - if not f or not f.Feature: - continue - f.Feature.mapLookups(lookupMap) - - -@add_method(otTables.DefaultLangSys, otTables.LangSys) -def mapFeatures(self, featureMap): - self.FeatureIndex = [featureMap[i] for i in self.FeatureIndex] - if self.ReqFeatureIndex != 65535: - self.ReqFeatureIndex = featureMap[self.ReqFeatureIndex] - - -@add_method(otTables.Script) -def mapFeatures(self, featureMap): - if self.DefaultLangSys: - self.DefaultLangSys.mapFeatures(featureMap) - for l in self.LangSysRecord: - if not l or not l.LangSys: - continue - l.LangSys.mapFeatures(featureMap) - - -@add_method(otTables.ScriptList) -def mapFeatures(self, featureMap): - for s in self.ScriptRecord: - if not s or not s.Script: - continue - s.Script.mapFeatures(featureMap) - - -def layoutPreMerge(font): - # Map indices to references - - GDEF = font.get("GDEF") - GSUB = font.get("GSUB") - GPOS = font.get("GPOS") - - for t in [GSUB, GPOS]: - if not t: - continue - - if t.table.LookupList: - lookupMap = {i: v for i, v in enumerate(t.table.LookupList.Lookup)} - t.table.LookupList.mapLookups(lookupMap) - t.table.FeatureList.mapLookups(lookupMap) - - if ( - GDEF - and GDEF.table.Version >= 0x00010002 - and GDEF.table.MarkGlyphSetsDef - ): - markFilteringSetMap = { - i: v for i, v in enumerate(GDEF.table.MarkGlyphSetsDef.Coverage) - } - t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap) - - if t.table.FeatureList and t.table.ScriptList: - featureMap = {i: v for i, v in enumerate(t.table.FeatureList.FeatureRecord)} - t.table.ScriptList.mapFeatures(featureMap) - - # TODO FeatureParams nameIDs - - -def layoutPostMerge(font): - # Map references back to indices - - GDEF = font.get("GDEF") - GSUB = font.get("GSUB") - GPOS = font.get("GPOS") - - for t in [GSUB, GPOS]: - if not t: - continue - - if t.table.FeatureList and t.table.ScriptList: - # Collect unregistered (new) features. - featureMap = GregariousIdentityDict(t.table.FeatureList.FeatureRecord) - t.table.ScriptList.mapFeatures(featureMap) - - # Record used features. - featureMap = AttendanceRecordingIdentityDict( - t.table.FeatureList.FeatureRecord - ) - t.table.ScriptList.mapFeatures(featureMap) - usedIndices = featureMap.s - - # Remove unused features - t.table.FeatureList.FeatureRecord = [ - f - for i, f in enumerate(t.table.FeatureList.FeatureRecord) - if i in usedIndices - ] - - # Map back to indices. - featureMap = NonhashableDict(t.table.FeatureList.FeatureRecord) - t.table.ScriptList.mapFeatures(featureMap) - - t.table.FeatureList.FeatureCount = len(t.table.FeatureList.FeatureRecord) - - if t.table.LookupList: - # Collect unregistered (new) lookups. - lookupMap = GregariousIdentityDict(t.table.LookupList.Lookup) - t.table.FeatureList.mapLookups(lookupMap) - t.table.LookupList.mapLookups(lookupMap) - - # Record used lookups. - lookupMap = AttendanceRecordingIdentityDict(t.table.LookupList.Lookup) - t.table.FeatureList.mapLookups(lookupMap) - t.table.LookupList.mapLookups(lookupMap) - usedIndices = lookupMap.s - - # Remove unused lookups - t.table.LookupList.Lookup = [ - l for i, l in enumerate(t.table.LookupList.Lookup) if i in usedIndices - ] - - # Map back to indices. - lookupMap = NonhashableDict(t.table.LookupList.Lookup) - t.table.FeatureList.mapLookups(lookupMap) - t.table.LookupList.mapLookups(lookupMap) - - t.table.LookupList.LookupCount = len(t.table.LookupList.Lookup) - - if GDEF and GDEF.table.Version >= 0x00010002: - markFilteringSetMap = NonhashableDict( - GDEF.table.MarkGlyphSetsDef.Coverage - ) - t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap) - - # TODO FeatureParams nameIDs diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/pens/boundsPen.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/pens/boundsPen.py deleted file mode 100644 index d833cc89b90b38937aa0e21c26bc7e7e84f5ee7d..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/pens/boundsPen.py +++ /dev/null @@ -1,100 +0,0 @@ -from fontTools.misc.arrayTools import updateBounds, pointInRect, unionRect -from fontTools.misc.bezierTools import calcCubicBounds, calcQuadraticBounds -from fontTools.pens.basePen import BasePen - - -__all__ = ["BoundsPen", "ControlBoundsPen"] - - -class ControlBoundsPen(BasePen): - - """Pen to calculate the "control bounds" of a shape. This is the - bounding box of all control points, so may be larger than the - actual bounding box if there are curves that don't have points - on their extremes. - - When the shape has been drawn, the bounds are available as the - ``bounds`` attribute of the pen object. It's a 4-tuple:: - - (xMin, yMin, xMax, yMax). - - If ``ignoreSinglePoints`` is True, single points are ignored. - """ - - def __init__(self, glyphSet, ignoreSinglePoints=False): - BasePen.__init__(self, glyphSet) - self.ignoreSinglePoints = ignoreSinglePoints - self.init() - - def init(self): - self.bounds = None - self._start = None - - def _moveTo(self, pt): - self._start = pt - if not self.ignoreSinglePoints: - self._addMoveTo() - - def _addMoveTo(self): - if self._start is None: - return - bounds = self.bounds - if bounds: - self.bounds = updateBounds(bounds, self._start) - else: - x, y = self._start - self.bounds = (x, y, x, y) - self._start = None - - def _lineTo(self, pt): - self._addMoveTo() - self.bounds = updateBounds(self.bounds, pt) - - def _curveToOne(self, bcp1, bcp2, pt): - self._addMoveTo() - bounds = self.bounds - bounds = updateBounds(bounds, bcp1) - bounds = updateBounds(bounds, bcp2) - bounds = updateBounds(bounds, pt) - self.bounds = bounds - - def _qCurveToOne(self, bcp, pt): - self._addMoveTo() - bounds = self.bounds - bounds = updateBounds(bounds, bcp) - bounds = updateBounds(bounds, pt) - self.bounds = bounds - - -class BoundsPen(ControlBoundsPen): - - """Pen to calculate the bounds of a shape. It calculates the - correct bounds even when the shape contains curves that don't - have points on their extremes. This is somewhat slower to compute - than the "control bounds". - - When the shape has been drawn, the bounds are available as the - ``bounds`` attribute of the pen object. It's a 4-tuple:: - - (xMin, yMin, xMax, yMax) - """ - - def _curveToOne(self, bcp1, bcp2, pt): - self._addMoveTo() - bounds = self.bounds - bounds = updateBounds(bounds, pt) - if not pointInRect(bcp1, bounds) or not pointInRect(bcp2, bounds): - bounds = unionRect( - bounds, calcCubicBounds(self._getCurrentPoint(), bcp1, bcp2, pt) - ) - self.bounds = bounds - - def _qCurveToOne(self, bcp, pt): - self._addMoveTo() - bounds = self.bounds - bounds = updateBounds(bounds, pt) - if not pointInRect(bcp, bounds): - bounds = unionRect( - bounds, calcQuadraticBounds(self._getCurrentPoint(), bcp, pt) - ) - self.bounds = bounds diff --git a/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/latent_diffusion_uncond/__init__.py b/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/latent_diffusion_uncond/__init__.py deleted file mode 100644 index 1b9fc5270a62bbb18d1393263101d4b9f73b7511..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/src/diffusers/pipelines/latent_diffusion_uncond/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .pipeline_latent_diffusion_uncond import LDMPipeline diff --git a/spaces/declare-lab/tango/diffusers/src/diffusers/utils/dummy_flax_objects.py b/spaces/declare-lab/tango/diffusers/src/diffusers/utils/dummy_flax_objects.py deleted file mode 100644 index 2bb80d136f338d193c67773266355956afd1d98a..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/src/diffusers/utils/dummy_flax_objects.py +++ /dev/null @@ -1,197 +0,0 @@ -# This file is autogenerated by the command `make fix-copies`, do not edit. -from ..utils import DummyObject, requires_backends - - -class FlaxControlNetModel(metaclass=DummyObject): - _backends = ["flax"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["flax"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["flax"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["flax"]) - - -class FlaxModelMixin(metaclass=DummyObject): - _backends = ["flax"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["flax"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["flax"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["flax"]) - - -class FlaxUNet2DConditionModel(metaclass=DummyObject): - _backends = ["flax"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["flax"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["flax"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["flax"]) - - -class FlaxAutoencoderKL(metaclass=DummyObject): - _backends = ["flax"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["flax"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["flax"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["flax"]) - - -class FlaxDiffusionPipeline(metaclass=DummyObject): - _backends = ["flax"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["flax"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["flax"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["flax"]) - - -class FlaxDDIMScheduler(metaclass=DummyObject): - _backends = ["flax"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["flax"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["flax"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["flax"]) - - -class FlaxDDPMScheduler(metaclass=DummyObject): - _backends = ["flax"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["flax"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["flax"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["flax"]) - - -class FlaxDPMSolverMultistepScheduler(metaclass=DummyObject): - _backends = ["flax"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["flax"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["flax"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["flax"]) - - -class FlaxKarrasVeScheduler(metaclass=DummyObject): - _backends = ["flax"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["flax"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["flax"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["flax"]) - - -class FlaxLMSDiscreteScheduler(metaclass=DummyObject): - _backends = ["flax"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["flax"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["flax"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["flax"]) - - -class FlaxPNDMScheduler(metaclass=DummyObject): - _backends = ["flax"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["flax"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["flax"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["flax"]) - - -class FlaxSchedulerMixin(metaclass=DummyObject): - _backends = ["flax"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["flax"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["flax"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["flax"]) - - -class FlaxScoreSdeVeScheduler(metaclass=DummyObject): - _backends = ["flax"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["flax"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["flax"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["flax"]) diff --git a/spaces/diacanFperku/AutoGPT/Marianas TrenchMasterpiece Theatre Full Album Zip REPACK.md b/spaces/diacanFperku/AutoGPT/Marianas TrenchMasterpiece Theatre Full Album Zip REPACK.md deleted file mode 100644 index d5b4181f757acce9b281a84415e5fbfcfc4edb2a..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Marianas TrenchMasterpiece Theatre Full Album Zip REPACK.md +++ /dev/null @@ -1,6 +0,0 @@ -

Marianas TrenchMasterpiece Theatre Full Album Zip


Download File 🆓 https://gohhs.com/2uFSYK



- -Zip-Lock (4 stars out of 5) ... Mariana Trench: MASTERPIECE THEATER. Painting. We really enjoyed this CD with their unique arrangements and harmonies. ... Sir Paul McCartney and his Beatles. Beatlemania. potpourri. ... The Beatles. Remix. This is a very good classical composition written in the style of the Beatles. And it's a very good choice for a CD. ... The Beatles. yellow submarine. This is another great performance on CD. ... Paul McCartney and his band Wings. Live at Wembley. ... Paul McCartney is back with his band. ... Michael Jackson. black and white. ... Michael Jackson and his band. ... Michael Jackson. Michael Jackson: Live in New York. ... 8a78ff9644
-
-
-

diff --git a/spaces/digitalxingtong/Bufeiyan-b-Bert-VITS2/README.md b/spaces/digitalxingtong/Bufeiyan-b-Bert-VITS2/README.md deleted file mode 100644 index 7904d4445f0da894113405b7b0b1c6ea9d0ba4c9..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Bufeiyan-b-Bert-VITS2/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 步非烟 Ver.b -emoji: 🌟 -colorFrom: red -colorTo: indigo -sdk: gradio -sdk_version: 3.36.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/digitalxingtong/Eileen-Bert-Vits2/text/symbols.py b/spaces/digitalxingtong/Eileen-Bert-Vits2/text/symbols.py deleted file mode 100644 index 9dfae4e633829f20c4fd767b1c7a9198911ed801..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Eileen-Bert-Vits2/text/symbols.py +++ /dev/null @@ -1,51 +0,0 @@ -punctuation = ['!', '?', '…', ",", ".", "'", '-'] -pu_symbols = punctuation + ["SP", "UNK"] -pad = '_' - -# chinese -zh_symbols = ['E', 'En', 'a', 'ai', 'an', 'ang', 'ao', 'b', 'c', 'ch', 'd', 'e', 'ei', 'en', 'eng', 'er', 'f', 'g', 'h', - 'i', 'i0', 'ia', 'ian', 'iang', 'iao', 'ie', 'in', 'ing', 'iong', 'ir', 'iu', 'j', 'k', 'l', 'm', 'n', 'o', - 'ong', - 'ou', 'p', 'q', 'r', 's', 'sh', 't', 'u', 'ua', 'uai', 'uan', 'uang', 'ui', 'un', 'uo', 'v', 'van', 've', 'vn', - 'w', 'x', 'y', 'z', 'zh', - "AA", "EE", "OO"] -num_zh_tones = 6 - -# japanese -ja_symbols = ['I', 'N', 'U', 'a', 'b', 'by', 'ch', 'cl', 'd', 'dy', 'e', 'f', 'g', 'gy', 'h', 'hy', 'i', 'j', 'k', 'ky', - 'm', 'my', 'n', 'ny', 'o', 'p', 'py', 'r', 'ry', 's', 'sh', 't', 'ts', 'u', 'V', 'w', 'y', 'z'] -num_ja_tones = 1 - -# English -en_symbols = ['aa', 'ae', 'ah', 'ao', 'aw', 'ay', 'b', 'ch', 'd', 'dh', 'eh', 'er', 'ey', 'f', 'g', 'hh', 'ih', 'iy', - 'jh', 'k', 'l', 'm', 'n', 'ng', 'ow', 'oy', 'p', 'r', 's', - 'sh', 't', 'th', 'uh', 'uw', 'V', 'w', 'y', 'z', 'zh'] -num_en_tones = 4 - -# combine all symbols -normal_symbols = sorted(set(zh_symbols + ja_symbols + en_symbols)) -symbols = [pad] + normal_symbols + pu_symbols -sil_phonemes_ids = [symbols.index(i) for i in pu_symbols] - -# combine all tones -num_tones = num_zh_tones + num_ja_tones + num_en_tones - -# language maps -language_id_map = { - 'ZH': 0, - "JA": 1, - "EN": 2 -} -num_languages = len(language_id_map.keys()) - -language_tone_start_map = { - 'ZH': 0, - "JA": num_zh_tones, - "EN": num_zh_tones + num_ja_tones -} - -if __name__ == '__main__': - a = set(zh_symbols) - b = set(en_symbols) - print(sorted(a&b)) - diff --git a/spaces/dineshreddy/WALT/mmdet/models/dense_heads/__init__.py b/spaces/dineshreddy/WALT/mmdet/models/dense_heads/__init__.py deleted file mode 100644 index f004dd95d97df16167f932587b3ce73b05b04a37..0000000000000000000000000000000000000000 --- a/spaces/dineshreddy/WALT/mmdet/models/dense_heads/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -from .anchor_free_head import AnchorFreeHead -from .anchor_head import AnchorHead -from .atss_head import ATSSHead -from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead -from .centripetal_head import CentripetalHead -from .corner_head import CornerHead -from .embedding_rpn_head import EmbeddingRPNHead -from .fcos_head import FCOSHead -from .fovea_head import FoveaHead -from .free_anchor_retina_head import FreeAnchorRetinaHead -from .fsaf_head import FSAFHead -from .ga_retina_head import GARetinaHead -from .ga_rpn_head import GARPNHead -from .gfl_head import GFLHead -from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead -from .ld_head import LDHead -from .nasfcos_head import NASFCOSHead -from .paa_head import PAAHead -from .pisa_retinanet_head import PISARetinaHead -from .pisa_ssd_head import PISASSDHead -from .reppoints_head import RepPointsHead -from .retina_head import RetinaHead -from .retina_sepbn_head import RetinaSepBNHead -from .rpn_head import RPNHead -from .sabl_retina_head import SABLRetinaHead -from .ssd_head import SSDHead -from .transformer_head import TransformerHead -from .vfnet_head import VFNetHead -from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead -from .yolo_head import YOLOV3Head - -__all__ = [ - 'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption', - 'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead', - 'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead', - 'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead', - 'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead', - 'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', - 'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'TransformerHead', - 'StageCascadeRPNHead', 'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead' -] diff --git a/spaces/dorkai/pygmalion/README.md b/spaces/dorkai/pygmalion/README.md deleted file mode 100644 index 7aa040d767d6c8a53bb3db0cf8124b0dd0d1ee3b..0000000000000000000000000000000000000000 --- a/spaces/dorkai/pygmalion/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Pygmalion -emoji: 🐠 -colorFrom: blue -colorTo: blue -sdk: gradio -sdk_version: 3.28.3 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/dragonSwing/annotate-anything/GroundingDINO/demo/gradio_app.py b/spaces/dragonSwing/annotate-anything/GroundingDINO/demo/gradio_app.py deleted file mode 100644 index 15e08323f485291df8b53eefd4691c087d7863f7..0000000000000000000000000000000000000000 --- a/spaces/dragonSwing/annotate-anything/GroundingDINO/demo/gradio_app.py +++ /dev/null @@ -1,125 +0,0 @@ -import argparse -from functools import partial -import cv2 -import requests -import os -from io import BytesIO -from PIL import Image -import numpy as np -from pathlib import Path - - -import warnings - -import torch - -# prepare the environment -os.system("python setup.py build develop --user") -os.system("pip install packaging==21.3") -os.system("pip install gradio") - - -warnings.filterwarnings("ignore") - -import gradio as gr - -from groundingdino.models import build_model -from groundingdino.util.slconfig import SLConfig -from groundingdino.util.utils import clean_state_dict -from groundingdino.util.inference import annotate, load_image, predict -import groundingdino.datasets.transforms as T - -from huggingface_hub import hf_hub_download - - - -# Use this command for evaluate the GLIP-T model -config_file = "groundingdino/config/GroundingDINO_SwinT_OGC.py" -ckpt_repo_id = "ShilongLiu/GroundingDINO" -ckpt_filenmae = "groundingdino_swint_ogc.pth" - - -def load_model_hf(model_config_path, repo_id, filename, device='cpu'): - args = SLConfig.fromfile(model_config_path) - model = build_model(args) - args.device = device - - cache_file = hf_hub_download(repo_id=repo_id, filename=filename) - checkpoint = torch.load(cache_file, map_location='cpu') - log = model.load_state_dict(clean_state_dict(checkpoint['model']), strict=False) - print("Model loaded from {} \n => {}".format(cache_file, log)) - _ = model.eval() - return model - -def image_transform_grounding(init_image): - transform = T.Compose([ - T.RandomResize([800], max_size=1333), - T.ToTensor(), - T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) - ]) - image, _ = transform(init_image, None) # 3, h, w - return init_image, image - -def image_transform_grounding_for_vis(init_image): - transform = T.Compose([ - T.RandomResize([800], max_size=1333), - ]) - image, _ = transform(init_image, None) # 3, h, w - return image - -model = load_model_hf(config_file, ckpt_repo_id, ckpt_filenmae) - -def run_grounding(input_image, grounding_caption, box_threshold, text_threshold): - init_image = input_image.convert("RGB") - original_size = init_image.size - - _, image_tensor = image_transform_grounding(init_image) - image_pil: Image = image_transform_grounding_for_vis(init_image) - - # run grounidng - boxes, logits, phrases = predict(model, image_tensor, grounding_caption, box_threshold, text_threshold, device='cpu') - annotated_frame = annotate(image_source=np.asarray(image_pil), boxes=boxes, logits=logits, phrases=phrases) - image_with_box = Image.fromarray(cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB)) - - - return image_with_box - -if __name__ == "__main__": - - parser = argparse.ArgumentParser("Grounding DINO demo", add_help=True) - parser.add_argument("--debug", action="store_true", help="using debug mode") - parser.add_argument("--share", action="store_true", help="share the app") - args = parser.parse_args() - - block = gr.Blocks().queue() - with block: - gr.Markdown("# [Grounding DINO](https://github.com/IDEA-Research/GroundingDINO)") - gr.Markdown("### Open-World Detection with Grounding DINO") - - with gr.Row(): - with gr.Column(): - input_image = gr.Image(source='upload', type="pil") - grounding_caption = gr.Textbox(label="Detection Prompt") - run_button = gr.Button(label="Run") - with gr.Accordion("Advanced options", open=False): - box_threshold = gr.Slider( - label="Box Threshold", minimum=0.0, maximum=1.0, value=0.25, step=0.001 - ) - text_threshold = gr.Slider( - label="Text Threshold", minimum=0.0, maximum=1.0, value=0.25, step=0.001 - ) - - with gr.Column(): - gallery = gr.outputs.Image( - type="pil", - # label="grounding results" - ).style(full_width=True, full_height=True) - # gallery = gr.Gallery(label="Generated images", show_label=False).style( - # grid=[1], height="auto", container=True, full_width=True, full_height=True) - - run_button.click(fn=run_grounding, inputs=[ - input_image, grounding_caption, box_threshold, text_threshold], outputs=[gallery]) - - - block.launch(server_name='0.0.0.0', server_port=7579, debug=args.debug, share=args.share) - diff --git a/spaces/duycse1603/math2tex/ScanSSD/gtdb/split_annotations_per_page.py b/spaces/duycse1603/math2tex/ScanSSD/gtdb/split_annotations_per_page.py deleted file mode 100644 index 772cc9600ba477d8f31957369ab897ba8cddb615..0000000000000000000000000000000000000000 --- a/spaces/duycse1603/math2tex/ScanSSD/gtdb/split_annotations_per_page.py +++ /dev/null @@ -1,112 +0,0 @@ -# Author: Parag Mali -# This script generates page level annotations from the PDF level annotations -# provided in the dataset - -import sys -import os -from multiprocessing import Pool -import csv -import cv2 - -def split(args): - - gt_dir, pdf_name, out_dir, ext = args - - file_path = os.path.join(gt_dir, pdf_name + "." + ext) - img_dir = '/home/psm2208/data/GTDB/images/' - - # create a map of page to list of math boxes - map = {} - - if ext == "math": - - file_ip = open(file_path, "r") - for line in file_ip: - entries = line.strip().split(",") - - # if entry is not in map - if entries[0] not in map: - map[entries[0]] = [] - - map[entries[0]].append(entries[1:]) - - for key in map: - - boxes = map[key] - key = float(key) - img_file = os.path.join(img_dir, pdf_name, str(int(key) + 1) + ".png") - img = cv2.imread(img_file) - - height, width, channels = img.shape - - #width_ratio = 512 / width - #height_ratio = 512 / height - - width_ratio = 1 - height_ratio = 1 - - # create processed math file - file_op = open(os.path.join(out_dir, pdf_name, str(int(key) + 1)) + ".p" + ext, "w") - - for box in boxes: - # xmin, ymin, xmax, ymax - - box[0] = float(box[0]) * width_ratio - box[1] = float(box[1]) * height_ratio - box[2] = float(box[2]) * width_ratio - box[3] = float(box[3]) * height_ratio - - file_op.write(','.join(str(e) for e in box) + "\n") - - file_op.close() - file_ip.close() - - elif ext == "char": - with open(file_path, 'r') as csvfile: - reader = csv.reader(csvfile, delimiter=',') - for row in reader: - # if entry is not in map - if row[0] not in map: - map[row[0]] = [] - - map[row[0]].append(row) - - for key in map: - - boxes = map[key] - - with open(os.path.join(out_dir, pdf_name, str(int(key) + 1)) + ".p" + ext, "w") as csvfile: - writer = csv.writer(csvfile, delimiter=',') - - for box in boxes: - writer.writerow(box) - -def test(): - - filename = sys.argv[1] # file names to be processed - out_dir = sys.argv[2] # output dir - gt_dir = sys.argv[3] # gt dir - ext = sys.argv[4] # file extension - - pdf_names_list = [] - pdf_names = open(filename, 'r') - - for pdf_name in pdf_names: - pdf_name = pdf_name.strip() - - if not os.path.exists(os.path.join(out_dir, pdf_name)): - os.mkdir(os.path.join(out_dir, pdf_name)) - - if pdf_name != '': - pdf_names_list.append((gt_dir, pdf_name, out_dir, ext)) - - pdf_names.close() - - pool = Pool(processes=32) - pool.map(split, pdf_names_list) - pool.close() - pool.join() - - -if __name__ == "__main__": - test() diff --git a/spaces/editing-images/ai-halloween-photobooth/cog_sdxl_dataset_and_utils.py b/spaces/editing-images/ai-halloween-photobooth/cog_sdxl_dataset_and_utils.py deleted file mode 100644 index d0f5bd01c9e535390b68a298db944ff4ecf986b9..0000000000000000000000000000000000000000 --- a/spaces/editing-images/ai-halloween-photobooth/cog_sdxl_dataset_and_utils.py +++ /dev/null @@ -1,422 +0,0 @@ -# dataset_and_utils.py file taken from https://github.com/replicate/cog-sdxl/blob/main/dataset_and_utils.py -import os -from typing import Dict, List, Optional, Tuple - -import numpy as np -import pandas as pd -import PIL -import torch -import torch.utils.checkpoint -from diffusers import AutoencoderKL, DDPMScheduler, UNet2DConditionModel -from PIL import Image -from safetensors import safe_open -from safetensors.torch import save_file -from torch.utils.data import Dataset -from transformers import AutoTokenizer, PretrainedConfig - - -def prepare_image( - pil_image: PIL.Image.Image, w: int = 512, h: int = 512 -) -> torch.Tensor: - pil_image = pil_image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1) - arr = np.array(pil_image.convert("RGB")) - arr = arr.astype(np.float32) / 127.5 - 1 - arr = np.transpose(arr, [2, 0, 1]) - image = torch.from_numpy(arr).unsqueeze(0) - return image - - -def prepare_mask( - pil_image: PIL.Image.Image, w: int = 512, h: int = 512 -) -> torch.Tensor: - pil_image = pil_image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1) - arr = np.array(pil_image.convert("L")) - arr = arr.astype(np.float32) / 255.0 - arr = np.expand_dims(arr, 0) - image = torch.from_numpy(arr).unsqueeze(0) - return image - - -class PreprocessedDataset(Dataset): - def __init__( - self, - csv_path: str, - tokenizer_1, - tokenizer_2, - vae_encoder, - text_encoder_1=None, - text_encoder_2=None, - do_cache: bool = False, - size: int = 512, - text_dropout: float = 0.0, - scale_vae_latents: bool = True, - substitute_caption_map: Dict[str, str] = {}, - ): - super().__init__() - - self.data = pd.read_csv(csv_path) - self.csv_path = csv_path - - self.caption = self.data["caption"] - # make it lowercase - self.caption = self.caption.str.lower() - for key, value in substitute_caption_map.items(): - self.caption = self.caption.str.replace(key.lower(), value) - - self.image_path = self.data["image_path"] - - if "mask_path" not in self.data.columns: - self.mask_path = None - else: - self.mask_path = self.data["mask_path"] - - if text_encoder_1 is None: - self.return_text_embeddings = False - else: - self.text_encoder_1 = text_encoder_1 - self.text_encoder_2 = text_encoder_2 - self.return_text_embeddings = True - assert ( - NotImplementedError - ), "Preprocessing Text Encoder is not implemented yet" - - self.tokenizer_1 = tokenizer_1 - self.tokenizer_2 = tokenizer_2 - - self.vae_encoder = vae_encoder - self.scale_vae_latents = scale_vae_latents - self.text_dropout = text_dropout - - self.size = size - - if do_cache: - self.vae_latents = [] - self.tokens_tuple = [] - self.masks = [] - - self.do_cache = True - - print("Captions to train on: ") - for idx in range(len(self.data)): - token, vae_latent, mask = self._process(idx) - self.vae_latents.append(vae_latent) - self.tokens_tuple.append(token) - self.masks.append(mask) - - del self.vae_encoder - - else: - self.do_cache = False - - @torch.no_grad() - def _process( - self, idx: int - ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], torch.Tensor, torch.Tensor]: - image_path = self.image_path[idx] - image_path = os.path.join(os.path.dirname(self.csv_path), image_path) - - image = PIL.Image.open(image_path).convert("RGB") - image = prepare_image(image, self.size, self.size).to( - dtype=self.vae_encoder.dtype, device=self.vae_encoder.device - ) - - caption = self.caption[idx] - - print(caption) - - # tokenizer_1 - ti1 = self.tokenizer_1( - caption, - padding="max_length", - max_length=77, - truncation=True, - add_special_tokens=True, - return_tensors="pt", - ).input_ids - - ti2 = self.tokenizer_2( - caption, - padding="max_length", - max_length=77, - truncation=True, - add_special_tokens=True, - return_tensors="pt", - ).input_ids - - vae_latent = self.vae_encoder.encode(image).latent_dist.sample() - - if self.scale_vae_latents: - vae_latent = vae_latent * self.vae_encoder.config.scaling_factor - - if self.mask_path is None: - mask = torch.ones_like( - vae_latent, dtype=self.vae_encoder.dtype, device=self.vae_encoder.device - ) - - else: - mask_path = self.mask_path[idx] - mask_path = os.path.join(os.path.dirname(self.csv_path), mask_path) - - mask = PIL.Image.open(mask_path) - mask = prepare_mask(mask, self.size, self.size).to( - dtype=self.vae_encoder.dtype, device=self.vae_encoder.device - ) - - mask = torch.nn.functional.interpolate( - mask, size=(vae_latent.shape[-2], vae_latent.shape[-1]), mode="nearest" - ) - mask = mask.repeat(1, vae_latent.shape[1], 1, 1) - - assert len(mask.shape) == 4 and len(vae_latent.shape) == 4 - - return (ti1.squeeze(), ti2.squeeze()), vae_latent.squeeze(), mask.squeeze() - - def __len__(self) -> int: - return len(self.data) - - def atidx( - self, idx: int - ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], torch.Tensor, torch.Tensor]: - if self.do_cache: - return self.tokens_tuple[idx], self.vae_latents[idx], self.masks[idx] - else: - return self._process(idx) - - def __getitem__( - self, idx: int - ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], torch.Tensor, torch.Tensor]: - token, vae_latent, mask = self.atidx(idx) - return token, vae_latent, mask - - -def import_model_class_from_model_name_or_path( - pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder" -): - text_encoder_config = PretrainedConfig.from_pretrained( - pretrained_model_name_or_path, subfolder=subfolder, revision=revision - ) - model_class = text_encoder_config.architectures[0] - - if model_class == "CLIPTextModel": - from transformers import CLIPTextModel - - return CLIPTextModel - elif model_class == "CLIPTextModelWithProjection": - from transformers import CLIPTextModelWithProjection - - return CLIPTextModelWithProjection - else: - raise ValueError(f"{model_class} is not supported.") - - -def load_models(pretrained_model_name_or_path, revision, device, weight_dtype): - tokenizer_one = AutoTokenizer.from_pretrained( - pretrained_model_name_or_path, - subfolder="tokenizer", - revision=revision, - use_fast=False, - ) - tokenizer_two = AutoTokenizer.from_pretrained( - pretrained_model_name_or_path, - subfolder="tokenizer_2", - revision=revision, - use_fast=False, - ) - - # Load scheduler and models - noise_scheduler = DDPMScheduler.from_pretrained( - pretrained_model_name_or_path, subfolder="scheduler" - ) - # import correct text encoder classes - text_encoder_cls_one = import_model_class_from_model_name_or_path( - pretrained_model_name_or_path, revision - ) - text_encoder_cls_two = import_model_class_from_model_name_or_path( - pretrained_model_name_or_path, revision, subfolder="text_encoder_2" - ) - text_encoder_one = text_encoder_cls_one.from_pretrained( - pretrained_model_name_or_path, subfolder="text_encoder", revision=revision - ) - text_encoder_two = text_encoder_cls_two.from_pretrained( - pretrained_model_name_or_path, subfolder="text_encoder_2", revision=revision - ) - - vae = AutoencoderKL.from_pretrained( - pretrained_model_name_or_path, subfolder="vae", revision=revision - ) - unet = UNet2DConditionModel.from_pretrained( - pretrained_model_name_or_path, subfolder="unet", revision=revision - ) - - vae.requires_grad_(False) - text_encoder_one.requires_grad_(False) - text_encoder_two.requires_grad_(False) - - unet.to(device, dtype=weight_dtype) - vae.to(device, dtype=torch.float32) - text_encoder_one.to(device, dtype=weight_dtype) - text_encoder_two.to(device, dtype=weight_dtype) - - return ( - tokenizer_one, - tokenizer_two, - noise_scheduler, - text_encoder_one, - text_encoder_two, - vae, - unet, - ) - - -def unet_attn_processors_state_dict(unet) -> Dict[str, torch.tensor]: - """ - Returns: - a state dict containing just the attention processor parameters. - """ - attn_processors = unet.attn_processors - - attn_processors_state_dict = {} - - for attn_processor_key, attn_processor in attn_processors.items(): - for parameter_key, parameter in attn_processor.state_dict().items(): - attn_processors_state_dict[ - f"{attn_processor_key}.{parameter_key}" - ] = parameter - - return attn_processors_state_dict - - -class TokenEmbeddingsHandler: - def __init__(self, text_encoders, tokenizers): - self.text_encoders = text_encoders - self.tokenizers = tokenizers - - self.train_ids: Optional[torch.Tensor] = None - self.inserting_toks: Optional[List[str]] = None - self.embeddings_settings = {} - - def initialize_new_tokens(self, inserting_toks: List[str]): - idx = 0 - for tokenizer, text_encoder in zip(self.tokenizers, self.text_encoders): - assert isinstance( - inserting_toks, list - ), "inserting_toks should be a list of strings." - assert all( - isinstance(tok, str) for tok in inserting_toks - ), "All elements in inserting_toks should be strings." - - self.inserting_toks = inserting_toks - special_tokens_dict = {"additional_special_tokens": self.inserting_toks} - tokenizer.add_special_tokens(special_tokens_dict) - text_encoder.resize_token_embeddings(len(tokenizer)) - - self.train_ids = tokenizer.convert_tokens_to_ids(self.inserting_toks) - - # random initialization of new tokens - - std_token_embedding = ( - text_encoder.text_model.embeddings.token_embedding.weight.data.std() - ) - - print(f"{idx} text encodedr's std_token_embedding: {std_token_embedding}") - - text_encoder.text_model.embeddings.token_embedding.weight.data[ - self.train_ids - ] = ( - torch.randn( - len(self.train_ids), text_encoder.text_model.config.hidden_size - ) - .to(device=self.device) - .to(dtype=self.dtype) - * std_token_embedding - ) - self.embeddings_settings[ - f"original_embeddings_{idx}" - ] = text_encoder.text_model.embeddings.token_embedding.weight.data.clone() - self.embeddings_settings[f"std_token_embedding_{idx}"] = std_token_embedding - - inu = torch.ones((len(tokenizer),), dtype=torch.bool) - inu[self.train_ids] = False - - self.embeddings_settings[f"index_no_updates_{idx}"] = inu - - print(self.embeddings_settings[f"index_no_updates_{idx}"].shape) - - idx += 1 - - def save_embeddings(self, file_path: str): - assert ( - self.train_ids is not None - ), "Initialize new tokens before saving embeddings." - tensors = {} - for idx, text_encoder in enumerate(self.text_encoders): - assert text_encoder.text_model.embeddings.token_embedding.weight.data.shape[ - 0 - ] == len(self.tokenizers[0]), "Tokenizers should be the same." - new_token_embeddings = ( - text_encoder.text_model.embeddings.token_embedding.weight.data[ - self.train_ids - ] - ) - tensors[f"text_encoders_{idx}"] = new_token_embeddings - - save_file(tensors, file_path) - - @property - def dtype(self): - return self.text_encoders[0].dtype - - @property - def device(self): - return self.text_encoders[0].device - - def _load_embeddings(self, loaded_embeddings, tokenizer, text_encoder): - # Assuming new tokens are of the format - self.inserting_toks = [f"" for i in range(loaded_embeddings.shape[0])] - special_tokens_dict = {"additional_special_tokens": self.inserting_toks} - tokenizer.add_special_tokens(special_tokens_dict) - text_encoder.resize_token_embeddings(len(tokenizer)) - - self.train_ids = tokenizer.convert_tokens_to_ids(self.inserting_toks) - assert self.train_ids is not None, "New tokens could not be converted to IDs." - text_encoder.text_model.embeddings.token_embedding.weight.data[ - self.train_ids - ] = loaded_embeddings.to(device=self.device).to(dtype=self.dtype) - - @torch.no_grad() - def retract_embeddings(self): - for idx, text_encoder in enumerate(self.text_encoders): - index_no_updates = self.embeddings_settings[f"index_no_updates_{idx}"] - text_encoder.text_model.embeddings.token_embedding.weight.data[ - index_no_updates - ] = ( - self.embeddings_settings[f"original_embeddings_{idx}"][index_no_updates] - .to(device=text_encoder.device) - .to(dtype=text_encoder.dtype) - ) - - # for the parts that were updated, we need to normalize them - # to have the same std as before - std_token_embedding = self.embeddings_settings[f"std_token_embedding_{idx}"] - - index_updates = ~index_no_updates - new_embeddings = ( - text_encoder.text_model.embeddings.token_embedding.weight.data[ - index_updates - ] - ) - off_ratio = std_token_embedding / new_embeddings.std() - - new_embeddings = new_embeddings * (off_ratio**0.1) - text_encoder.text_model.embeddings.token_embedding.weight.data[ - index_updates - ] = new_embeddings - - def load_embeddings(self, file_path: str): - with safe_open(file_path, framework="pt", device=self.device.type) as f: - for idx in range(len(self.text_encoders)): - text_encoder = self.text_encoders[idx] - tokenizer = self.tokenizers[idx] - - loaded_embeddings = f.get_tensor(f"text_encoders_{idx}") - self._load_embeddings(loaded_embeddings, tokenizer, text_encoder) \ No newline at end of file diff --git a/spaces/eliolio/yelp-reviews/app.py b/spaces/eliolio/yelp-reviews/app.py deleted file mode 100644 index edc4da72d090558ea739c631b9c28db5acb11d75..0000000000000000000000000000000000000000 --- a/spaces/eliolio/yelp-reviews/app.py +++ /dev/null @@ -1,133 +0,0 @@ -import torch -import transformers -from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification -import gradio as gr -import os - -model_name = 'eliolio/bart-finetuned-yelpreviews' -bert_model_name = 'eliolio/bert-correlation-yelpreviews' - -access_token = os.environ.get('private_token') - -model = AutoModelForSeq2SeqLM.from_pretrained( - model_name, use_auth_token=access_token -) -tokenizer = AutoTokenizer.from_pretrained( - model_name, use_auth_token=access_token -) - -bert_tokenizer = AutoTokenizer.from_pretrained( - bert_model_name, use_auth_token=access_token -) -bert_model = AutoModelForSequenceClassification.from_pretrained( - bert_model_name, use_auth_token=access_token -) - - -def correlation_score(table, review): - # Compute the correlation score - args = ((table, review)) - inputs = bert_tokenizer(*args, padding=True, max_length=128, truncation=True, return_tensors="pt") - logits = bert_model(**inputs).logits - probs = logits.softmax(dim=-1) - return { - "correlated": probs[:, 1].item(), - "uncorrelated": probs[:, 0].item() - } - -def create_prompt(stars, useful, funny, cool): - return f"Generate review: stars: {stars}, useful: {useful}, funny: {funny}, cool: {cool}" - - -def postprocess(review): - dot = review.rfind('.') - return review[:dot+1] - - -def generate_reviews(stars, useful, funny, cool): - text = create_prompt(stars, useful, funny, cool) - inputs = tokenizer(text, return_tensors='pt') - out = model.generate( - input_ids=inputs.input_ids, - attention_mask=inputs.attention_mask, - do_sample=True, - num_return_sequences=3, - temperature=1.2, - top_p=0.9 - ) - reviews = [] - scores = [] - for review in out: - reviews.append(postprocess(tokenizer.decode(review, skip_special_tokens=True))) - for review in reviews: - scores.append( - correlation_score(text[17:], review) - ) - - return reviews[0], reviews[1], reviews[2], scores[0], scores[1], scores[2] - - -css = """ - #ctr {text-align: center;} - #btn {color: white; background: linear-gradient( 90deg, rgba(255,166,0,1) 14.7%, rgba(255,99,97,1) 73% );} -""" - - -md_text = """

Generating Yelp reviews with BART-base ⭐⭐⭐

- -This space demonstrates how synthetic data generation can be performed on natural language columns, as found in the Yelp reviews dataset. - -| review id | stars | useful | funny | cool | text | -|:---:|:---:|:---:|:---:|:---:|:---:| -| 0 | 5 | 1 | 0 | 1 | "Wow! Yummy, different, delicious. Our favorite is the lamb curry and korma. With 10 different kinds of naan!!! Don't let the outside deter you (because we almost changed our minds)...go in and try something new! You'll be glad you did!" - - - - -The model is a fine-tuned version of [facebook/bart-base](https://huggingface.com/facebook/bart-base) on Yelp reviews with the following input-output pairs: - -- **Input**: "Generate review: stars: 5, useful: 1, funny: 0, cool: 1" -- **Output**: "Wow! Yummy, different, delicious. Our favorite is the lamb curry and korma. With 10 different kinds of naan!!! Don't let the outside deter you (because we almost changed our minds)...go in and try something new! You'll be glad you did!" -""" - -resources = """## Resources -- Code for training: [github repo](https://github.com/EliottZemour/yelp-reviews/) -- The Yelp reviews dataset can be found in json format [here](https://www.yelp.com/dataset).""" - -demo = gr.Blocks(css=css) -with demo: - with gr.Row(): - gr.Markdown(md_text) - - with gr.Row(): - stars = gr.inputs.Slider(minimum=0, maximum=5, - step=1, default=0, label="stars") - useful = gr.inputs.Slider( - minimum=0, maximum=5, step=1, default=0, label="useful") - funny = gr.inputs.Slider(minimum=0, maximum=5, - step=1, default=0, label="funny") - cool = gr.inputs.Slider(minimum=0, maximum=5, - step=1, default=0, label="cool") - with gr.Row(): - button = gr.Button("Generate reviews !", elem_id='btn') - - with gr.Row(): - output1 = gr.Textbox(label="Review #1") - output2 = gr.Textbox(label="Review #2") - output3 = gr.Textbox(label="Review #3") - - with gr.Row(): - score1 = gr.Label(label="Correlation score #1") - score2 = gr.Label(label="Correlation score #2") - score3 = gr.Label(label="Correlation score #3") - - with gr.Row(): - gr.Markdown(resources) - - button.click( - fn=generate_reviews, - inputs=[stars, useful, funny, cool], - outputs=[output1, output2, output3, score1, score2, score3] - ) - -demo.launch() diff --git a/spaces/elitecode/Detect_Emotions/app.py b/spaces/elitecode/Detect_Emotions/app.py deleted file mode 100644 index f68dfdbe8003c892e27a2a4064a5d0a179107abe..0000000000000000000000000000000000000000 --- a/spaces/elitecode/Detect_Emotions/app.py +++ /dev/null @@ -1,4 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/j-hartmann/emotion-english-distilroberta-base").launch() -# gr.Interface.load("models/elitecode/Detect_Emotions").launch() \ No newline at end of file diff --git a/spaces/exnav29/Real_Estate_Bot/app.py b/spaces/exnav29/Real_Estate_Bot/app.py deleted file mode 100644 index a4fae9fb19716c1f297abc0b922d6985780e9995..0000000000000000000000000000000000000000 --- a/spaces/exnav29/Real_Estate_Bot/app.py +++ /dev/null @@ -1,32 +0,0 @@ -import os -import openai -import gradio as gr - -openai.api_key = os.environ["OPENAI_API_KEY"] - -messages = [ - {"role": "system", "content": "You are an AI specialized in Real Estate Investing Do not answer anything other than Real Estate-related queries"}, -] -counter = 0 -def chatbot(input): - global counter - if input: - messages.append({"role": "user", "content": input}) - chat = openai.ChatCompletion.create( - model="gpt-3.5-turbo", messages=messages, temperature=0.2 - ) - reply = chat.choices[0].message.content - messages.append({"role": "assistant", "content": reply}) - counter += 1 - if counter >= 3: - gr.Interface.show_feedback_message("Thank you for using our chatbot! If you find our service helpful, please consider making a donation to support us: Donate Now") - counter = 0 - return reply - -inputs = gr.inputs.Textbox(lines=7, label="Ask your question") -outputs = gr.outputs.Textbox(label="Reply") - -app = gr.Interface(fn=chatbot, inputs=inputs, outputs=outputs, - title="JChat Real Estate Investing Chatbot", - description="Ask anything you want about investing in real estate") -app.launch() \ No newline at end of file diff --git a/spaces/failfast/2D-GameCreator/src/components/Examples.tsx b/spaces/failfast/2D-GameCreator/src/components/Examples.tsx deleted file mode 100644 index 6e5e870473a1b4f909e640d1f20dbe2ee2004b81..0000000000000000000000000000000000000000 --- a/spaces/failfast/2D-GameCreator/src/components/Examples.tsx +++ /dev/null @@ -1,62 +0,0 @@ -import { Link, Typography } from "@mui/material"; -import { DividerBox, SectionBox } from "./base/boxes"; -import ExamplesGrid, { Example } from "@/components/ExamplesGrid"; - -const examples: Example[] = [ - { - title: "Space Invaders Extreme", - creatorLink: "https://nerddis.co", - creatorName: "NERDDISCO", - image: "img/space_invaders_extreme.jpg", - playLink: "https://codesandbox.io/s/space-invaders-extreme-7xhp4v", - model: "GPT 3.5 Turbo", - iterations: 20, - controls: "Keyboard: Movement (Arrow Left, Arrow right), Shooting (Space), Restart (R)", - hints: "The bigger the window, the easier it gets to play", - }, - { - title: "Flappy Bird SPEED", - creatorLink: "https://nerddis.co", - creatorName: "NERDDISCO", - image: "img/flappy_bird_speed.jpg", - playLink: "https://codesandbox.io/s/flappy-bird-speed-rg9z6f", - model: "GPT 4.0", - iterations: 24, - controls: "Keyboard: Fly (space)", - hints: "Don't collide with the pipes ;)", - }, -]; - -export default function Examples() { - return ( - <> - - - - - Example Games - - - We re-created some of our favorite games! You want to share your game with the - community? Then head over to our{" "} - - - - , post it in #showcase and we might add it to this section as well! - - - - - - - - ); -} diff --git a/spaces/falterWliame/Face_Mask_Detection/148 Yoga Muthirai In Tamil.md b/spaces/falterWliame/Face_Mask_Detection/148 Yoga Muthirai In Tamil.md deleted file mode 100644 index 354665dcb59493ec1b1a37005abeb76c33f861c9..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/148 Yoga Muthirai In Tamil.md +++ /dev/null @@ -1,10 +0,0 @@ -
-

this mudra for the nose is performed by the person who is in the pose. place the tip of your right index finger and ring finger together. interlock the other fingers and place your hands on your nose. this yoga mudra will give you relief from nasal congestion.

-

148 Yoga Muthirai In Tamil


DOWNLOAD ->->->-> https://urlca.com/2uDcYS



-

the two yoga mudras for the stomach are performed by the person who is in the pose. cross your right index finger over your right thumb and place your right hand on your belly button. this mudra will aid digestion.

-

this mudra for the jaw is performed by the person who is in the pose. place your index and middle fingers together. interlock the other fingers and place your hands on your jaw line. this yoga mudra will help you calm your nerves and boost your confidence.

-

the two yoga mudras for the body are performed by the person who is in the pose. cross your right index finger over your right thumb and place your right hand on your chest. this mudra will ease your chest congestion. cross your right middle finger over your right index finger and place your right hand on your abdomen. this mudra will aid digestion.

-

this mudra for the feet is performed by the person who is in the pose. cross your left index finger over your left thumb and place your left hand on your left foot. this yoga mudra will calm your nerves and boost your confidence.

-

for many people, yoga is regarded as a type of religion. in fact, yoga has been practiced by monks, nuns and other religious devotees for a long time. there are certain hand gestures that people perform, similar to prayer, as part of yoga or meditation. they are called mudras. the following mudras are used in yoga.

899543212b
-
-
\ No newline at end of file diff --git a/spaces/falterWliame/Face_Mask_Detection/Beauty And The Beast (English) 3 Telugu Dubbed Movie Onlinel [UPD].md b/spaces/falterWliame/Face_Mask_Detection/Beauty And The Beast (English) 3 Telugu Dubbed Movie Onlinel [UPD].md deleted file mode 100644 index 927dac351685cfe7c9255693dbe892807757d1b6..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Beauty And The Beast (English) 3 Telugu Dubbed Movie Onlinel [UPD].md +++ /dev/null @@ -1,43 +0,0 @@ -
-

Beauty And The Beast (English) 3 Telugu Dubbed Movie Online

-

Beauty and the Beast is one of the most beloved fairy tales of all time. It tells the story of Belle, a young woman who is taken prisoner by a beast in his enchanted castle. Despite her fears, she learns to look beyond his appearance and discover the kind heart and soul of the true prince within.

-

There have been many adaptations of this classic story, but one of the most recent and popular ones is the 2017 live-action film starring Emma Watson and Dan Stevens. This film was a huge success, earning over $1 billion at the box office and receiving positive reviews from critics and audiences alike. It also won two Academy Awards for Best Production Design and Best Costume Design.

-

Beauty And The Beast (English) 3 Telugu Dubbed Movie Onlinel


Download Zip ✶✶✶ https://urlca.com/2uDcf1



-

If you are a fan of this movie and want to watch it in Telugu, you are in luck. There are several online platforms where you can stream or download Beauty And The Beast (English) 3 Telugu Dubbed Movie Online. Here are some of them:

-
    -
  • Disney+ Hotstar: This is the official streaming service of Disney, where you can watch all their movies and shows in various languages, including Telugu. You can watch Beauty And The Beast Full Movie on Disney+ Hotstar with a subscription or a VIP plan.
  • -
  • Google Play Movies: This is another option where you can rent or buy Beauty And The Beast (2014) - English Dub in Telugu. This is a different version of the movie, directed by Christophe Gans and starring Léa Seydoux and Vincent Cassel. It is a more faithful adaptation of the original French fairy tale, with a darker and more romantic tone.
  • -
  • Dailymotion: This is a video-sharing platform where you can find many movies and shows uploaded by users. You can watch Beauty And The Beast (2017) Full Movie on Dailymotion for free, but be aware that the quality may not be very good and there may be ads or pop-ups.
  • -
-

These are some of the best ways to watch Beauty And The Beast (English) 3 Telugu Dubbed Movie Online. Whether you prefer the modern or the classic version, you will surely enjoy this timeless tale of love and magic.

-

Another option to watch Beauty And The Beast (English) 3 Telugu Dubbed Movie Online is YouTube. YouTube is a popular video-sharing platform where you can find many movies and shows uploaded by users or official channels. You can watch Beauty And The Beast (2009) Telugu Dubbed Movie on YouTube for free, but be aware that the quality may not be very good and there may be ads or pop-ups.

-

Beauty And The Beast (2009) is a different version of the movie, directed by David Lister and starring Estella Warren, Rhett Giles, and Victor Parascos. It is a low-budget Australian film that follows the original story loosely, with some changes and additions. It is more of a horror film than a romance, with a darker and more violent tone.

-

Whether you prefer the modern or the classic version, you will surely enjoy this timeless tale of love and magic. Watch Beauty And The Beast (English) 3 Telugu Dubbed Movie Online and experience the enchantment for yourself.

-

Beauty And The Beast (English) 3 Telugu Dubbed Movie Online is a great choice for anyone who loves fairy tales, romance, and adventure. The movie has stunning visuals, amazing music, and a talented cast that brings the characters to life. The movie also has some important themes and messages, such as the power of love, the value of inner beauty, and the importance of being yourself.

-

-

Beauty And The Beast (English) 3 Telugu Dubbed Movie Online is also a great way to enjoy this movie in your own language. You can watch it with your family and friends, and share your thoughts and feelings about the story. You can also learn some new words and expressions in English, as well as appreciate the culture and history of France, where the story is set.

-

Beauty And The Beast (English) 3 Telugu Dubbed Movie Online is a movie that you will never forget. It is a movie that will make you laugh, cry, and dream. It is a movie that will touch your heart and inspire your imagination. Watch Beauty And The Beast (English) 3 Telugu Dubbed Movie Online today and discover the magic for yourself.

-

Beauty And The Beast (English) 3 Telugu Dubbed Movie Online is not only a movie, but also a musical. The movie features many songs that are catchy, emotional, and memorable. Some of the songs are original from the 1991 animated film, such as "Beauty and the Beast", "Be Our Guest", and "Gaston". Some of the songs are new additions, such as "Evermore", "Days in the Sun", and "How Does a Moment Last Forever". The songs are performed by the cast members, as well as famous singers like Celine Dion, Ariana Grande, and John Legend.

-

Beauty And The Beast (English) 3 Telugu Dubbed Movie Online is also a movie that has a lot of trivia and behind-the-scenes facts. For example, did you know that Emma Watson wore a corset under her yellow dress to make it look more authentic? Or that Dan Stevens had to wear stilts and a motion-capture suit to play the Beast? Or that the movie was filmed in various locations in England and France, including the Palace of Versailles?

-

Beauty And The Beast (English) 3 Telugu Dubbed Movie Online is a movie that has something for everyone. It is a movie that will make you laugh, cry, and dream. It is a movie that will touch your heart and inspire your imagination. Watch Beauty And The Beast (English) 3 Telugu Dubbed Movie Online today and discover the magic for yourself.

-

Beauty And The Beast (English) 3 Telugu Dubbed Movie Online is also a movie that has a lot of fans and followers. The movie has inspired many people to create fan art, fan fiction, cosplay, and merchandise based on the characters and the story. You can find many of these online, and join the community of Beauty And The Beast lovers. You can also share your own creations and opinions, and interact with other fans.

-

Beauty And The Beast (English) 3 Telugu Dubbed Movie Online is also a movie that has a lot of history and trivia. The movie is based on a French fairy tale that dates back to the 18th century, written by Gabrielle-Suzanne Barbot de Villeneuve. The story has been adapted many times in different media, such as books, comics, plays, musicals, TV shows, and video games. The most famous adaptation is the 1991 animated film by Disney, which was the first animated film to be nominated for the Academy Award for Best Picture.

-

Beauty And The Beast (English) 3 Telugu Dubbed Movie Online is a movie that you will never regret watching. It is a movie that will make you happy, sad, and amazed. It is a movie that will make you fall in love with the characters and the story. Watch Beauty And The Beast (English) 3 Telugu Dubbed Movie Online today and discover the magic for yourself.

-

Beauty And The Beast (English) 3 Telugu Dubbed Movie Online is also a movie that has a lot of awards and nominations. The movie was nominated for four Academy Awards, including Best Picture, Best Production Design, Best Costume Design, and Best Original Song. It won two of them, for Best Production Design and Best Costume Design. The movie also won several other awards, such as the Golden Globe Award for Best Motion Picture - Musical or Comedy, the MTV Movie Award for Best Movie, and the People's Choice Award for Favorite Movie.

-

Beauty And The Beast (English) 3 Telugu Dubbed Movie Online is also a movie that has a lot of fun and entertainment. The movie has many scenes that are humorous, thrilling, and romantic. Some of the scenes are iconic, such as the ballroom dance, the snowball fight, and the final battle. The movie also has many characters that are lovable, such as Lumiere, Cogsworth, Mrs. Potts, and Chip. The movie also has a villain that is despicable, such as Gaston.

-

Beauty And The Beast (English) 3 Telugu Dubbed Movie Online is a movie that you will never get bored of watching. It is a movie that will make you smile, cheer, and sing. It is a movie that will make you appreciate the beauty and the beast in everyone. Watch Beauty And The Beast (English) 3 Telugu Dubbed Movie Online today and discover the magic for yourself.

-

Beauty And The Beast (English) 3 Telugu Dubbed Movie Online is also a movie that has a lot of reviews and ratings. The movie has received mostly positive feedback from critics and audiences, who praised the performances, the visuals, the music, and the story. The movie has a score of 7.1 out of 10 on IMDb, 71% on Rotten Tomatoes, and 85% on Google Play. The movie also has many testimonials from fans who loved the movie and shared their opinions online.

-

Beauty And The Beast (English) 3 Telugu Dubbed Movie Online is also a movie that has a lot of influence and impact. The movie has inspired many people to learn more about the fairy tale, the culture, and the history behind it. The movie has also raised awareness and funds for various causes, such as women's education, animal welfare, and environmental protection. The movie has also contributed to the popularity and success of other Disney movies and products.

-

Beauty And The Beast (English) 3 Telugu Dubbed Movie Online is a movie that you will never regret watching. It is a movie that will make you happy, sad, and amazed. It is a movie that will make you fall in love with the characters and the story. Watch Beauty And The Beast (English) 3 Telugu Dubbed Movie Online today and discover the magic for yourself.

-

Beauty And The Beast (English) 3 Telugu Dubbed Movie Online is also a movie that has a lot of comparisons and contrasts. The movie has many similarities and differences with other versions of the story, such as the original fairy tale, the 1991 animated film, and the 2014 French film. Some of the comparisons and contrasts are:

-
    -
  • The 2017 film follows the plot of the 1991 film closely, with some changes and additions. For example, the 2017 film reveals more about the backstories of Belle and the Beast, and introduces new characters such as Agathe and Cadenza.
  • -
  • The 2017 film has more realistic and detailed visuals than the 1991 film, thanks to the advances in technology and CGI. For example, the 2017 film shows more details of the castle, the enchanted objects, and the transformation scenes.
  • -
  • The 2014 film is more faithful to the original fairy tale than the 2017 film, with some changes and additions. For example, the 2014 film has more elements of magic and fantasy, such as a garden of living statues, a forest of wolves, and a giant eagle.
  • -
  • The 2014 film has more dark and romantic visuals than the 2017 film, thanks to the artistic direction and cinematography. For example, the 2014 film has more contrast between light and shadow, more use of warm and cold colors, and more close-ups of the characters.
  • -
-

Beauty And The Beast (English) 3 Telugu Dubbed Movie Online is a movie that you will never get tired of watching. It is a movie that will make you think, feel, and wonder. It is a movie that will make you appreciate the beauty and the beast in everyone. Watch Beauty And The Beast (English) 3 Telugu Dubbed Movie Online today and discover the magic for yourself.

-

Conclusion

-

Beauty And The Beast (English) 3 Telugu Dubbed Movie Online is a movie that you should not miss. It is a movie that has everything you could ask for: a captivating story, a charming cast, a stunning soundtrack, and a beautiful message. It is a movie that will make you laugh, cry, and dream. It is a movie that will make you fall in love with the characters and the story. Watch Beauty And The Beast (English) 3 Telugu Dubbed Movie Online today and discover the magic for yourself.

3cee63e6c2
-
-
\ No newline at end of file diff --git a/spaces/falterWliame/Face_Mask_Detection/Dhoom 3 Film Download Telugu.md b/spaces/falterWliame/Face_Mask_Detection/Dhoom 3 Film Download Telugu.md deleted file mode 100644 index 58b9dd0e22104f08c265ac15de2524b0752861ce..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Dhoom 3 Film Download Telugu.md +++ /dev/null @@ -1,6 +0,0 @@ -

dhoom 3 film download telugu


DOWNLOADhttps://urlca.com/2uDdym



- - 1fdad05405
-
-
-

diff --git a/spaces/falterWliame/Face_Mask_Detection/FREE Silca Key Programs 18.2.0.40 Free _BEST_ Download.md b/spaces/falterWliame/Face_Mask_Detection/FREE Silca Key Programs 18.2.0.40 Free _BEST_ Download.md deleted file mode 100644 index 9907da159dd2a031faeff1ab278daafe79f3a2a8..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/FREE Silca Key Programs 18.2.0.40 Free _BEST_ Download.md +++ /dev/null @@ -1,6 +0,0 @@ -

FREE Silca Key Programs 18.2.0.40 Free Download


Download --->>> https://urlca.com/2uDdxH



- -By necsawasearch. FREE Silca Key Programs 18.2.0.40 Free Download. Container ... necsawasearch/calibri-body-font-free-downloadl. By necsawasearch. 1fdad05405
-
-
-

diff --git a/spaces/falterWliame/Face_Mask_Detection/MELSOFT GX Developer 8.78G Update 8.91V 2011 !!LINK!! Crack.md b/spaces/falterWliame/Face_Mask_Detection/MELSOFT GX Developer 8.78G Update 8.91V 2011 !!LINK!! Crack.md deleted file mode 100644 index d6dc28aba8207126b138a5b8482fee8e72ca9786..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/MELSOFT GX Developer 8.78G Update 8.91V 2011 !!LINK!! Crack.md +++ /dev/null @@ -1,5 +0,0 @@ -
-

Extreme This app is incompatible with Windows 8. Help us improve our software by submitting your feedback. This product will automatically be updated when new versions are available. It was added to our catalog on Wednesday 01 August, 2008 and is the property of its owner. Please tell us if you find it useful. RestoreDefender is actively monitoring the publisher's activity. Pros: • This is a fully-functional download. The publisher has not yet responded or responded very slowly to our requests. Help your friends get the latest updates. Thank you for telling us about the problem. This software has not been reviewed by CNET's ratings team. It may be stuck in the middle of the download, during which time you'll see "0 of xxx files downloaded." Was your download interrupted? The publisher has not yet responded or responded very slowly to our requests. You may have reached this page in error. Does this app work under XP? All downloads go through the cloud and do not require any downloads. Comments on the publisher's page: JavaScript seems to be disabled in your browser.For the best experience on our site, be sure to turn on Javascript in your browser. Fast! (2/4) Future Releases Een Iers de Gezondheid (Uitgever) All products purchased from this publisher can be updated to the latest version automatically through the use of a free automatic update service. App Details System requirements Phone OS Blackberry 10 Blackberry 10 Blackberry 10 Version 8.28 Rating (12) Platform BlackBerry OS BlackBerry OS BlackBerry OS Price Free Free Free Description Frame4.0 gezginler0 Key Crack Frame Network 4.0 gezginler0 Key Crack Frame Network 4.0 Gezginler0 Key Crack kleben broker interp loic ddos indir. Additional functional and security updates will be included in a future. When multiple MELSOFT products have been installed, specify the installation destination of the first product. (a) When GX Developer has been.

-

MELSOFT GX Developer 8.78G Update 8.91V 2011 Crack


Download Zip ►►► https://urlca.com/2uDc9G



899543212b
-
-
\ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Blue Apron Home Cooking Made Easy and Fun - Download the App and Get 80 Off.md b/spaces/fatiXbelha/sd/Blue Apron Home Cooking Made Easy and Fun - Download the App and Get 80 Off.md deleted file mode 100644 index e804838ffe5b3093080d93bf14974523321d21db..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Blue Apron Home Cooking Made Easy and Fun - Download the App and Get 80 Off.md +++ /dev/null @@ -1,130 +0,0 @@ - -

Blue Apron Download: How to Get the App and Enjoy the Benefits

-

If you love cooking but hate grocery shopping and meal planning, you might want to try Blue Apron, one of the most popular meal kit delivery services in the US. And if you want to make your experience even more convenient and enjoyable, you might want to download the Blue Apron app on your smartphone or tablet. In this article, we'll tell you what Blue Apron is, why you should download the app, how to do it, and what are some alternatives to consider.

-

What is Blue Apron?

-

Blue Apron is a meal kit delivery service that delivers fresh ingredients and recipes to your door every week. You can choose from different plans and menus that feature chef-designed recipes, preportioned ingredients, and wine pairings. The recipes are suitable for various skill levels and take 20 to 55 minutes to prepare. The service aims to help you make healthy, home-cooked meals without the hassle of grocery shopping or meal planning.

-

blue apron download


Download Zip →→→ https://urllie.com/2uNDyi



-

Blue Apron is a meal kit delivery service that offers fresh ingredients and recipes to your door

-

With Blue Apron, you can enjoy cooking delicious meals at home with high-quality ingredients that are sourced from local farms and producers. You can also learn new cuisines, techniques, and flavors with each recipe. Some of the examples of recent recipes are:

-
    -
  • Cheesy Truffle Cavatappi with Roasted Broccoli
  • -
  • Pesto Chicken & Orzo with Zucchini & Tomatoes
  • -
  • Mafalda Pasta & Beef Ragu with Zucchini & Pecorino Cheese
  • -
  • Sheet Pan Cheesy Jalapeño Chicken with Roasted Potatoes & Carrots
  • -
  • Quinoa & Vegetable "Fried Rice" with Fried Eggs & Sriracha Mayo
  • -
-

Blue Apron has different plans and menus to suit your preferences and dietary needs

-

Blue Apron offers four different plans to suit your needs and preferences. You can choose from:

-
Blackout BingoClassic BingoOther Bingo Apps
Uses a 75-ball systemUses a 75-ball or 90-ball systemUses a 75-ball or 90-ball system
Has a time limit for each gameHas no time limit for each gameHas no time limit or a longer time limit for each game
Offers real money gaming and prizesOffers no money gaming and prizesOffers virtual money gaming and prizes
Matches players based on skill levelMatches players randomly or by roomMatches players randomly or by room
Has power-ups and bonus ballsHas no power-ups and bonus ballsHas power-ups and bonus balls or similar features
Has chat and emoji featuresHas no chat and emoji featuresHas chat and emoji features or similar features
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
PlanNumber of servingsNumber of recipes per weekPrice per serving
Signature2 or 42, 3, or 4$9.99
Signature for 442, 3, or 4$8.99
Vegetarian for 222 or 3$9.99
Wellness for 222 or 3$9.99
-

The Signature plan offers a variety of meat, fish, and vegetarian recipes that are balanced and flavorful. The Signature for 4 plan is similar but designed for larger households. The Vegetarian for 2 plan features meat-free recipes that are rich in plant-based proteins and seasonal produce. The Wellness for 2 plan includes recipes that are nutritionist-approved and focus on lean proteins, whole grains, and fresh vegetables.

-

You can also customize your menu by choosing from different categories such as:

-