diff --git a/spaces/0xJustin/0xJustin-Dungeons-and-Diffusion/app.py b/spaces/0xJustin/0xJustin-Dungeons-and-Diffusion/app.py deleted file mode 100644 index 969e30db2db42c563008db5cc67ba868c109abfc..0000000000000000000000000000000000000000 --- a/spaces/0xJustin/0xJustin-Dungeons-and-Diffusion/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/0xJustin/Dungeons-and-Diffusion").launch() \ No newline at end of file diff --git a/spaces/0xSynapse/LlamaGPT/README.md b/spaces/0xSynapse/LlamaGPT/README.md deleted file mode 100644 index f3bcd42363ab575bd7eb11eb535831511ced8d32..0000000000000000000000000000000000000000 --- a/spaces/0xSynapse/LlamaGPT/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: LlamaGPT -emoji: 📚 -colorFrom: green -colorTo: blue -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false -license: lgpl-3.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Office 2021 64 Bit for Windows 10 Everything You Need to Know.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Office 2021 64 Bit for Windows 10 Everything You Need to Know.md deleted file mode 100644 index 0152a1eef5a8d3189f5da51c6248ad9b22fe9a29..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Office 2021 64 Bit for Windows 10 Everything You Need to Know.md +++ /dev/null @@ -1,27 +0,0 @@ - -

How to Download Office 2021 64 Bit for Windows 10

-

Office 2021 is the latest version of Microsoft's productivity suite, which includes Word, Excel, PowerPoint, Outlook, and more. If you want to download Office 2021 64 bit for Windows 10, you can follow these steps:

-

download office 2021 64 bit crack


Download Filehttps://byltly.com/2uKvx0



-
    -
  1. Go to https://www.microsoft.com/en-us/microsoft-365/buy/compare-all-microsoft-365-products and choose the plan that suits your needs.
  2. -
  3. Click on the "Buy now" button and sign in with your Microsoft account or create a new one.
  4. -
  5. Complete the payment process and confirm your order.
  6. -
  7. Go to https://account.microsoft.com/services/ and sign in with your Microsoft account.
  8. -
  9. Click on the "Install" button next to Office 2021 and choose "Other options".
  10. -
  11. Select the "64-bit" option and click on "Download".
  12. -
  13. Run the setup file and follow the instructions to install Office 2021 on your Windows 10 device.
  14. -
-

Congratulations! You have successfully downloaded and installed Office 2021 64 bit for Windows 10. Enjoy using the latest features and enhancements of Microsoft's productivity suite.

Office 2021 is compatible with Windows 10 and Windows 11, as well as macOS. It offers several improvements and new features over the previous version, Office 2019. Some of the highlights include:

- -

Office 2021 also comes with enhanced security and privacy features, such as encryption, data loss prevention, and advanced threat protection. You can also access your files and documents from anywhere with OneDrive cloud storage and Office mobile apps.

If you want to try Office 2021 before buying it, you can download a free trial version from https://www.microsoft.com/en-us/evalcenter/evaluate-microsoft-365. The trial version will let you use Office 2021 for 30 days, after which you will need to purchase a subscription or a one-time license to continue using it.

-

Alternatively, you can also use Office Online, which is a free web-based version of Office that works in your browser. Office Online lets you create and edit documents, spreadsheets, and presentations online, as well as collaborate with others in real time. You can access Office Online from https://www.office.com/ or from your Microsoft account.

-

-

Whether you choose Office 2021 or Office Online, you will get the best of Microsoft's productivity tools for your personal and professional needs. Download Office 2021 64 bit for Windows 10 today and see the difference for yourself.

ddb901b051
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Easy Recovery Pro Crack BEST.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Easy Recovery Pro Crack BEST.md deleted file mode 100644 index 8ba22bfbd923319d33a300f103aed6be207ed1c7..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Easy Recovery Pro Crack BEST.md +++ /dev/null @@ -1,27 +0,0 @@ - -

How to Use Easy Recovery Pro to Restore Lost Data on Windows

-

Have you ever lost important files due to accidental deletion, formatting, virus attack, or system crash? If so, you know how frustrating and stressful it can be to recover your data. Fortunately, there is a powerful and easy-to-use software that can help you: Easy Recovery Pro.

-

Easy Recovery Pro is a data recovery software that supports all Windows PCs and laptops, including Windows 10 and Windows 11. It can recover data from various storage devices, such as hard drives, external drives, USB flash drives, memory cards, and more. It can also recover almost any file type, such as photos, videos, audio files, emails, documents, etc.

-

Easy Recovery Pro Crack


Download ————— https://byltly.com/2uKxj8



-

In this article, we will show you how to use Easy Recovery Pro to restore your lost data on Windows in three simple steps.

- -

Step 1: Download and Install Easy Recovery Pro

-

To get started, you need to download and install Easy Recovery Pro on a working computer. You can get it from the official website[^1^]. There are different editions available for different needs and budgets. You can choose the one that suits you best.

-

After downloading the software, run the setup file and follow the instructions to install it on your computer. Make sure you have enough disk space and administrator privileges.

- -

Step 2: Connect Your Storage Device and Scan for Lost Data

-

Next, you need to connect the storage device that contains your lost data to the computer where you installed Easy Recovery Pro. For example, if you want to recover data from an external hard drive, plug it into a USB port.

-

Then, launch Easy Recovery Pro and select the storage device from the list of available drives. Click "Scan" to start searching for lost data. The scanning process may take some time depending on the size and condition of your device.

-

During the scan, you can preview the found files by clicking on them. You can also pause or stop the scan at any time if you find what you need.

- -

Step 3: Recover Your Lost Data

-

When the scan is complete, you will see a list of recoverable files sorted by categories. You can filter them by file type, date, size, or name. You can also use the search box to find specific files.

-

-

To recover your lost data, simply select the files or folders that you want and click "Recover". You will be asked to choose a location to save the recovered data. It is recommended that you save them to a different drive than the original one to avoid overwriting.

-

After the recovery process is done, you can check your recovered data and use them as normal.

- -

Conclusion

-

Easy Recovery Pro is a reliable and easy recovery software that can help you restore your lost data on Windows in various scenarios. It has a user-friendly interface and powerful features that make data recovery a breeze. Whether you are a professional or a beginner, you can use Easy Recovery Pro to get back your precious data in minutes.

-

If you want to try Easy Recovery Pro for free, you can download the trial version from the official website[^1^]. The trial version allows you to scan and preview your lost data, but not recover them. To recover your data without limitations, you need to purchase a license key.

cec2833e83
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/FSX Steam Edition Air Hauler 2 Add-On Ativador Download [addons].md b/spaces/1gistliPinn/ChatGPT4/Examples/FSX Steam Edition Air Hauler 2 Add-On Ativador Download [addons].md deleted file mode 100644 index 50d334768204dbc6cb504a5ef0c168b7914bee02..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/FSX Steam Edition Air Hauler 2 Add-On Ativador Download [addons].md +++ /dev/null @@ -1,15 +0,0 @@ -

FSX Steam Edition: Air Hauler 2 Add-On Ativador download [addons]


DOWNLOAD ———>>> https://imgfil.com/2uxXox



- -. ://coub.com/stories/3304017-fsx-steam-edition-air-hauler-2-add-on-ativador-download-addons-top-secret-games-pc-game-try-now-sonyi-playstation -3-station-4-playstation-3/-/ -#10 -nothing, nothing. -I just play the game and enjoy it. -I am from a long time ago that I have been doing this and that it worked for me. -I am a very happy person and enjoy doing this all year long. -I know of no one who is having problems with this anymore. -But I have a few questions for you, so please give me some answers: -1. When you download the game, it says that it is an add-on. -Do you 8a78ff9644
-
-
-

diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Facile Caisse Crack BEST Serial.md b/spaces/1gistliPinn/ChatGPT4/Examples/Facile Caisse Crack BEST Serial.md deleted file mode 100644 index 4250941eb56abf1e7687405d9c3d5233d189771c..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Facile Caisse Crack BEST Serial.md +++ /dev/null @@ -1,16 +0,0 @@ -

facile caisse Crack, Serial


Download File ••• https://imgfil.com/2uxZhy



- -ités et frottements. - -Alors nous sommes devant un cas bien particulier, où il faut distinguer entre gestionnaire de système, entre clients et entre bénéficiaires. Le client (le contribuable, le client de l’assurance, le constructeur d’infrastructures ou l’ingénieur-conseil) est aujourd’hui plutôt le bénéficiaire, dans les politiques actuelles, et ce qu’il faut bien entendre. - -Les candidats gagnent en sérieux avec leur comportement, et les lauréats sont plus intelligents que leurs adversaires. Pas simple chose de les contrôler mais ils y réussissent, leur expertise est plus vaste et leur capacité à prendre rapidement des décisions se fait apprécier, ils se rapprochent de l’intelligence au contact. - -Mais si l’on enregistrait au-delà de la couleur politique, cette fonction a désormais décidé de se méfier de ses collaborateurs. L’intelligence est du jeu de la distraction, et les gens qui sont à l’écoute des « leaders » que l’on est appelé à renvoyer à l’école ne seront peut-être pas forcément les plus brillants. - -Ça fait longtemps que nous nous sentons attaqués, puis agressés, par ce genre d’idées, et bien évidemment, ces hommes étaient des militants, et vivant dans un monde de bêtes (fraîchement agressées par le féminisme, et surtout, de petites qu’on les appelait de cette manière, les petites madames), ils réagissaient en machos, par vérité. - -Les militants sont aussi des hommes très je 4fefd39f24
-
-
-

diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/First Year Engineering Drawing By Ac Parkinson Pdf Free Download.md b/spaces/1gistliPinn/ChatGPT4/Examples/First Year Engineering Drawing By Ac Parkinson Pdf Free Download.md deleted file mode 100644 index 0d358ae8ffeabfa74e4574d18d5704ad9ecfc187..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/First Year Engineering Drawing By Ac Parkinson Pdf Free Download.md +++ /dev/null @@ -1,6 +0,0 @@ -

first year engineering drawing by ac parkinson pdf free download


DOWNLOADhttps://imgfil.com/2uxYjf



- -Download File PDF Basic Engineering Drawing By A C Parkinson. Basic Engineering ... manak bhavan, 9 bahadur shah zafar marg new delhi 110002 . sp 46 : 1988 first published march ... Nadare BE In Mechanical Engineering One Year Experience in Bajaj Auto Ltd. ... 7.1 - Ten Basic Steps to Free Hand Sketching for . 1fdad05405
-
-
-

diff --git a/spaces/1phancelerku/anime-remove-background/Bingo Holiday Download the Classic Special Bingo Games on Your Device.md b/spaces/1phancelerku/anime-remove-background/Bingo Holiday Download the Classic Special Bingo Games on Your Device.md deleted file mode 100644 index 5536bc42b241690a582b52cc8a854ccbdb9402c7..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Bingo Holiday Download the Classic Special Bingo Games on Your Device.md +++ /dev/null @@ -1,222 +0,0 @@ -
-

How to Download and Play Bingo Holiday: The Best Bingo Game on Your Device

-

If you are looking for a fun and exciting bingo game that will keep you entertained for hours, then you should try Bingo Holiday. This is a classic and special bingo game that offers you more than just bingo. You can explore over 110 appealing scenes, travel around the world, send and receive gifts with friends, challenge events, and collect epic collections. You can also enjoy various bingo styles, power-ups, tournaments, and jackpots. In this article, we will show you how to download and play Bingo Holiday on your Android or iOS device, or online on your browser.

-

What is Bingo Holiday?

-

A brief introduction to the game and its features

-

Bingo Holiday is a bingo game developed by AE Magwin, a company that specializes in casino and casual games. It was released in 2016 and has since gained over 5 million downloads on Google Play Store and over 13 thousand ratings on App Store. It is rated as one of the best bingo games on both platforms.

-

bingo holiday download


Download ✶✶✶ https://jinyurl.com/2uNLwN



-

Bingo Holiday has many features that make it stand out from other bingo games. Some of them are:

- -

Why you should play Bingo Holiday

-

Bingo Holiday is not only a bingo game but also a way to relax and have fun. Here are some reasons why you should play Bingo Holiday:

- -

How to Download Bingo Holiday for Android Devices

-

Step-by-step instructions with screenshots

-

If you have an Android device, you can download Bingo Holiday from the Google Play Store. Here are the steps to do it:

-
    -
  1. Open the Google Play Store app on your device and search for "Bingo Holiday" in the search bar.
  2. -
  3. Tap on the Bingo Holiday icon that appears in the search results. You will see the app's page with its description, ratings, reviews, screenshots, and more.
  4. -
  5. Tap on the green "Install" button to start downloading the app. You may need to grant some permissions for the app to access your device's storage, location, contacts, and other features.
  6. -
  7. Wait for the download and installation process to finish. You will see a notification when it is done.
  8. -
  9. Tap on the "Open" button to launch the app. You will see a welcome screen with some instructions and options. You can choose to log in with your Facebook account or play as a guest. You can also change the language of the app from English to other languages, such as Spanish, French, German, Portuguese, Italian, Russian, Turkish, Arabic, Chinese, Japanese, Korean, and more.
  10. -
  11. Enjoy playing Bingo Holiday on your Android device!
  12. -
-

Here are some screenshots of the app on an Android device:

- - - - - - - - - - - -
Bingo Holiday icon on Google Play StoreBingo Holiday app page on Google Play StoreBingo Holiday welcome screen
Bingo Holiday main menuBingo Holiday bingo room selectionBingo Holiday bingo gameplay
-

Tips and tricks for playing Bingo Holiday on Android

-

Here are some tips and tricks that will help you play Bingo Holiday better on your Android device:

- -

How to Download Bingo Holiday for iOS Devices

-

Step-by-step instructions with screenshots

-

If you have an iOS device, you can download Bingo Holiday from the App Store. Here are the steps to do it:

-

bingo holiday free download
-bingo holiday app download
-bingo holiday game download
-bingo holiday apk download
-bingo holiday mod apk download
-bingo holiday for pc download
-bingo holiday for android download
-bingo holiday for ios download
-bingo holiday for mac download
-bingo holiday for windows download
-bingo holiday online download
-bingo holiday offline download
-bingo holiday latest version download
-bingo holiday update download
-bingo holiday new version download
-bingo holiday old version download
-bingo holiday hack download
-bingo holiday cheats download
-bingo holiday unlimited credits download
-bingo holiday free credits download
-bingo holiday free coins download
-bingo holiday free power ups download
-bingo holiday free gifts download
-bingo holiday free spins download
-bingo holiday free slots download
-bingo holiday classic bingo games download
-bingo holiday special bingo games download
-bingo holiday live bingo games download
-bingo holiday multiplayer bingo games download
-bingo holiday tournament bingo games download
-bingo holiday world tour bingo games download
-bingo holiday travel bingo games download
-bingo holiday adventure bingo games download
-bingo holiday party bingo games download
-bingo holiday fun bingo games download
-bingo holiday best bingo games download
-bingo holiday top rated bingo games download
-bingo holiday reviews and ratings download
-bingo holiday screenshots and videos download
-bingo holiday tips and tricks download
-how to play bingo holiday game download
-how to win in bingo holiday game download
-how to get more credits in bingo holiday game download
-how to get more coins in bingo holiday game download
-how to get more power ups in bingo holiday game download
-how to get more gifts in bingo holiday game download
-how to get more spins in bingo holiday game download
-how to get more slots in bingo holiday game download

-
    -
  1. Open the App Store app on your device and search for "Bingo Holiday" in the search bar.
  2. -
  3. Tap on the Bingo Holiday icon that appears in the search results. You will see the app's page with its description, ratings, reviews, screenshots, and more.
  4. -
  5. Tap on the blue "Get" button to start downloading the app. You may need to enter your Apple ID password or use Touch ID or Face ID to confirm your purchase.
  6. -
  7. Wait for the download and installation process to finish. You will see a notification when it is done.
  8. -
  9. Tap on the "Open" button to launch the app. You will see a welcome screen with some instructions and options. You can choose to log in with your Facebook account or play as a guest. You can also change the language of the app from English to other languages, such as Spanish, French, German, Portuguese, Italian, Russian, Turkish, Arabic, Chinese, Japanese, Korean, and more.
  10. -
  11. Enjoy playing Bingo Holiday on your iOS device!
  12. -
-

Here are some screenshots of the app on an iOS device:

- - - - - - - - - - - -
Bingo Holiday icon on App StoreBingo Holiday app page on App StoreBingo Holiday welcome screen
Bingo Holiday main menuBingo Holiday bingo room selectionBingo Holiday bingo gameplay
-

Tips and tricks for playing Bingo Holiday on iOS

-

Here are some tips and tricks that will help you play Bingo Holiday better on your iOS device:

- -

How to Play Bingo Holiday Online

-

The benefits of playing Bingo Holiday online

-

If you don't have an Android or iOS device, or you don't want to download the app, you can still play Bingo Holiday online on your browser. There are some benefits of playing Bingo Holiday online, such as:

- -

How to access Bingo Holiday online and start playing

-

Here are the steps to access Bingo Holiday online and start playing:

-
    -
  1. Open your browser and go to https://www.bingoholiday.com/, which is the official website of Bingo Holiday.
  2. -
  3. You will see a landing page with some information and options about Bingo Holiday. You can choose to log in with your Facebook account or play as a guest. You can also change the language of the website from English to other languages, such as Spanish, French, German, Portuguese, Italian, Russian, Turkish, Arabic, Chinese, Japanese, Korean, and more.
  4. -
  5. After logging in or choosing to play as a guest, you will see a loading screen with some tips and hints about Bingo Holiday. Wait for the game to load completely.
  6. -
  7. You will see a main menu with different options and modes to play Bingo Holiday. You can choose from World Tour, Tournament, Jackpot, Collection, and more. You can also check your profile, settings, friends, gifts, and messages by tapping on the icons at the top of the screen.
  8. -
  9. Choose the mode or option you want to play and tap on it. You will see a selection of bingo rooms that have different themes, rules, prizes, and collections. You can also see the number of players, the entry fee, and the jackpot amount for each room.
  10. -
  11. Choose the room you want to play and tap on it. You will see a confirmation screen with some information and options about the room. You can choose the number of cards you want to play, the power-ups you want to use, and the auto-daub feature. You can also see the prize pool, the collection progress, and the chat room.
  12. -
  13. Tap on the green "Play" button to start playing bingo. You will see your bingo cards and the bingo caller at the bottom of the screen. You can also see the timer, the leaderboard, the power-ups, and the pause button at the top of the screen.
  14. -
  15. Daub the numbers on your cards as they are called by tapping on them. You can also use power-ups to help you win faster and easier. You can also chat with other players in the chat room or use emojis to express yourself.
  16. -
  17. When you have a bingo, tap on the "Bingo" button to claim it. You will see a celebration screen with your prize and rank. You can also see how many bingos are left in the room and how much time is left until the next game.
  18. -
  19. Enjoy playing Bingo Holiday online on your browser!
  20. -
-

Here are some screenshots of the website on a browser:

- - - - - - - - - - - -
Bingo Holiday landing pageBingo Holiday main menuBingo Holiday bingo room selection
Bingo Holiday confirmation screenBingo Holiday bingo gameplayBingo Holiday celebration screen
-

Conclusion

-

A summary of the main points and a call to action

-

Bingo Holiday is a bingo game that offers you more than just bingo. You can explore over 110 appealing scenes, travel around the world, send and receive gifts with friends, challenge events, and collect epic collections. You can also enjoy various bingo styles, power-ups, tournaments, and jackpots.

-

You can download and play Bingo Holiday on your Android or iOS device, or online on your browser. We have shown you how to do it in this article with step-by-step instructions and screenshots. We have also given you some tips and tricks that will help you play Bingo Holiday better on your device or online.

-

If you are ready to join the fun and excitement of Bingo Holiday, don't wait any longer. Download or access Bingo Holiday today and start playing bingo like never before!

-

FAQs

-

Q1: Is Bingo Holiday free to play?

-

A1: Yes, Bingo Holiday is free to play. You don't need to pay anything to download or play Bingo Holiday. You can enjoy all the features and content without spending a dime.

-

Q2: How can I get more credits and power-ups in Bingo Holiday?

-

A2: There are many ways to get more credits and power-ups in Bingo Holiday. Some of them are:

- -

Q3: How can I play Bingo Holiday with my friends?

-

A3: There are two ways to play Bingo Holiday with your friends. One is to add them as your friends in the game and invite them to join your bingo room. The other is to join a public bingo room and chat with other players who are also your friends. Here are the steps to do both:

- -

Q4: What are the different bingo rooms and themes in Bingo Holiday?

-

A4: There are over 40 bingo rooms in Bingo Holiday that have different themes, rules, prizes, and collections. Some of them are:

- -

Q5: How can I contact the customer support of Bingo Holiday?

-

A5: If you have any questions, problems, suggestions, or feedback about Bingo Holiday, you can contact the customer support of Bingo Holiday by using one of these methods:

- -

The customer support team of Bingo Holiday is friendly and helpful. They will try their best to solve your issues and improve your gaming experience.

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Chicken Gun Private Server 1.3.0 Apk Join the Fun and Chaos.md b/spaces/1phancelerku/anime-remove-background/Chicken Gun Private Server 1.3.0 Apk Join the Fun and Chaos.md deleted file mode 100644 index 35756e79dfe0507674c31e92e7d3df64bdc73d26..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Chicken Gun Private Server 1.3.0 Apk Join the Fun and Chaos.md +++ /dev/null @@ -1,127 +0,0 @@ - -

Chicken Gun Private Server 1.3.0 APK: How to Download and Play

-

If you are looking for a hilarious and action-packed first-person shooter game where you can play as armed chickens, then you should check out Chicken Gun. This game lets you shoot and fight with other chickens in various modes and maps, using different weapons, beaks, sneakers, caps, and even explosive eggs. You can also customize your chicken from head to toe, making it look cool or funny or both.

-

But what if you want to have more fun and freedom with this game? What if you want to play with your friends or other players without any restrictions or limitations? What if you want to have more customization options, more maps, more modes, less lag, and more control over the game settings? Well, then you might want to try playing on a private server.

-

chicken gun private server 1.3.0 apk


Download Zip ⚙⚙⚙ https://jinyurl.com/2uNMO8



-

A private server is an unofficial mod that allows you to create or join a separate server from the official one. This way, you can play with whoever you want, whenever you want, however you want. You can also enjoy some features and benefits that are not available on the official server.

-

In this article, we will show you how to download and install Chicken Gun Private Server 1.3.0 APK on your Android device, how to join or create a private server in Chicken Gun, what are the features and benefits of playing on a private server, and some tips and tricks to improve your skills and enjoy the game more. So, without further ado, let's get started!

-

How to download and install Chicken Gun Private Server 1.3.0 APK on your device

-

Before you can play on a private server, you need to download and install Chicken Gun Private Server 1.3.0 APK on your Android device. This is an unofficial mod that is not endorsed by the developers of Chicken Gun, so you should use it at your own risk. Here are the steps to follow:

-

chicken gun mod apk private server 1.3.0
-chicken gun 1.3.0 private server download
-chicken gun private server apk 1.3.0 free
-chicken gun hack private server 1.3.0
-chicken gun private server version 1.3.0
-chicken gun private server apk mediafire 1.3.0
-chicken gun private server youtube 1.3.0
-chicken gun private server apkcombo 1.3.0
-chicken gun private server update 1.3.0
-chicken gun private server android 1.3.0
-chicken gun private server ios 1.3.0
-chicken gun private server unlimited money 1.3.0
-chicken gun private server all skins 1.3.0
-chicken gun private server gameplay 1.3.0
-chicken gun private server online 1.3.0
-chicken gun private server offline 1.3.0
-chicken gun private server new features 1.3.0
-chicken gun private server no root 1.3.0
-chicken gun private server no ads 1.3.0
-chicken gun private server latest version 1.3.0
-chicken gun private server how to install 1.3.0
-chicken gun private server how to play 1.3.0
-chicken gun private server how to join 1.3.0
-chicken gun private server how to create 1.3.0
-chicken gun private server how to download 1.3.0
-chicken gun private server review 1.3.0
-chicken gun private server tips and tricks 1.3.0
-chicken gun private server cheats and hacks 1.3.0
-chicken gun private server codes and coupons 1.3.0
-chicken gun private server glitches and bugs 1.3.0
-chicken gun private server fun and funny moments 1.3.0
-chicken gun private server best and worst weapons 1.3.0
-chicken gun private server best and worst maps 1.3.0
-chicken gun private server best and worst modes 1.3.0
-chicken gun private server best and worst skins 1..30
-chicken gun private server comparison and contrast 1..30
-chicken gun private server pros and cons 1..30
-chicken gun private server advantages and disadvantages 1..30
-chicken gun private server benefits and drawbacks 1..30
-chicken gun private server ratings and rankings 1..30

-
    -
  1. Go to this link: https://www.mediafire.com/file/8j9w9x5w7w8w9x5/Chicken_Gun_Private_Server_1.3.0.apk/file and click on the green download button to download the APK file.
  2. -
  3. Once the download is complete, locate the APK file in your device's file manager and tap on it to install it.
  4. -
  5. You might need to enable unknown sources in your device's settings to install the APK file. To do this, go to Settings > Security > Unknown Sources and toggle it on.
  6. -
  7. After the installation is done, you can launch the game from your app drawer or home screen.
  8. -
-

Congratulations! You have successfully installed Chicken Gun Private Server 1.3.0 APK on your device. Now you can join or create a private server and have fun.

-

How to join and create a private server in Chicken Gun

-

Now that you have installed Chicken Gun Private Server 1.3.0 APK on your device, you can join or create a private server in Chicken Gun. Here are the steps to follow:

-

How to join a private server in Chicken Gun

-
    -
  1. Launch the game and tap on the multiplayer button on the main menu.
  2. -
  3. Tap on the private server button on the top right corner of the screen.
  4. -
  5. You will see a list of available private servers that you can join. You can also use the search bar to find a specific server by name or password.
  6. -
  7. Tap on the server that you want to join and enter the password if required.
  8. -
  9. You will be taken to the lobby where you can see the server settings, such as the map, the mode, the number of players, and the time limit.
  10. -
  11. Tap on the ready button when you are ready to start the game.
  12. -
-

That's it! You have successfully joined a private server in Chicken Gun. Now you can play with other players and have fun.

-

How to create a private server in Chicken Gun

-
    -
  1. Launch the game and tap on the multiplayer button on the main menu.
  2. -
  3. Tap on the private server button on the top right corner of the screen.
  4. -
  5. Tap on the create button on the bottom right corner of the screen.
  6. -
  7. You will be taken to a screen where you can customize your private server settings, such as the map, the mode, the number of players, the time limit, and the password.
  8. -
  9. Tap on the create button when you are done with your settings.
  10. -
  11. You will be taken to the lobby where you can see your server name and password, as well as invite other players by tapping on the invite button.
  12. -
  13. Tap on the ready button when you are ready to start the game.
  14. -
-

That's it! You have successfully created a private server in Chicken Gun. Now you can play with your friends or other players and have fun.

-

What are the features and benefits of playing on a private server

-

Playing on a private server in Chicken Gun has some features and benefits that are not available on the official server. Here are some of them:

- -

As you can see, playing on a private server in Chicken Gun has many advantages that can make your gaming experience more enjoyable and satisfying. Of course, you should also respect the rules and etiquette of each server, and not abuse or exploit any features or benefits.

-

Tips and tricks to improve your skills and enjoy the game more

-

Playing on a private server in Chicken Gun is not only fun, but also challenging. You will face many skilled and competitive players who will test your abilities and strategies. If you want to improve your skills and enjoy the game more, here are some tips and tricks that you can use:

- -

Conclusion

-

Chicken Gun is a hilarious and action-packed first-person shooter game where you can play as armed chickens in various modes and maps. You can also customize your chicken from head to toe, making it look cool or funny or both.

-

If you want to have more fun and freedom with this game, you can try playing on a private server. A private server is an unofficial mod that allows you to create or join a separate server from the official one. This way, you can play with whoever you want, whenever you want, however you want.

-

You can also enjoy some features and benefits that are not available on the official server, such as more customization options, more maps and modes, less lag and better performance, more control over the game rules and settings, and more fun and freedom with your friends or other players.

-

In this article, we have shown you how to download and install Chicken Gun Private Server 1.3.0 APK on your Android device, how to join or create a private server in Chicken Gun, what are the features and benefits of playing on a private server, and some tips and tricks to improve your skills and enjoy the game more.

-

We hope that you have found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. We would love to hear from you.

-

Now, what are you waiting for? Go ahead and try out Chicken Gun Private Server 1.3.0 APK and have fun!

-

Thank you for reading this article.

-

FAQs

-

Here are some frequently asked questions about Chicken Gun Private Server 1.3.0 APK:

-

Q: Is Chicken Gun Private Server 1.3.0 APK safe to use?

-

A: Chicken Gun Private Server 1.3.0 APK is an unofficial mod that is not endorsed by the developers of Chicken Gun, so you should use it at your own risk. We do not take any responsibility for any damage or harm that may occur from using this mod. You should also be careful about downloading and installing any APK files from unknown sources, as they may contain viruses or malware.

-

Q: Can I play on a private server with players who are on the official server?

-

A: No, you cannot play on a private server with players who are on the official server. You can only play with players who are also using the same mod as you. If you want to play with players who are on the official server, you need to uninstall the mod and reinstall the original game from the Google Play Store.

-

Q: Can I play on a private server offline?

-

A: No, you cannot play on a private server offline. You need an internet connection to join or create a private server in Chicken Gun. However, you can play the single-player mode offline if you want to practice your skills or have fun by yourself.

-

Q: How can I update Chicken Gun Private Server 1.3.0 APK?

-

A: To update Chicken Gun Private Server 1.3.0 APK, you need to download and install the latest version of the mod from the same source that you got it from. You should also check for updates regularly, as new features and bug fixes may be added in the future.

-

Q: How can I contact the developers of Chicken Gun Private Server 1.3.0 APK?

-

A: To contact the developers of Chicken Gun Private Server 1.3.0 APK, you can visit their website at https://chickengunmod.com/ or their Facebook page at https://www.facebook.com/chickengunmod/. You can also send them an email at chickengunmod@gmail.com. You can ask them any questions or give them any feedback or suggestions that you may have.

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Crafting and Building 1.18 APK and Start Your Adventure Today.md b/spaces/1phancelerku/anime-remove-background/Download Crafting and Building 1.18 APK and Start Your Adventure Today.md deleted file mode 100644 index dd7a7e42c69b55fc0a32da62dacf1160a95cf14f..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Crafting and Building 1.18 APK and Start Your Adventure Today.md +++ /dev/null @@ -1,84 +0,0 @@ - -

Crafting and Building 1.18 APK Download: A Guide for Beginners

-

Do you like building games? Do you want to unleash your creativity in a sandbox world? Do you want to play with your friends online or offline? If you answered yes to any of these questions, then you might want to try Crafting and Building, a free building game for Android devices.

-

In this article, we will tell you everything you need to know about Crafting and Building, including what it is, how to download it, what are its main features, and what are some tips and tricks for playing it.

-

crafting and building 1.18 apk download


DOWNLOAD ——— https://jinyurl.com/2uNSAQ



-

What is Crafting and Building?

-

Crafting and Building is a new free building game for Android devices that lets you create your own world with blocks. You can build anything you can imagine, from houses to castles to temples, and explore different biomes, such as forests, deserts, mountains, caves, and oceans. You can also play with your friends online or offline, visit their worlds, and help them with their constructions.

-

Crafting and Building is inspired by popular games like Minecraft and Terraria, but it has its own unique features and style. It has cool graphics, smooth controls, and a user-friendly interface that makes it easy to play for anyone. It also has no monsters or enemies, so you can focus on building and having fun.

-

How to download and install Crafting and Building 1.18 APK?

-

Crafting and Building 1.18 APK is the latest version of the game, released on April 19, 2023. It has some new features and improvements, such as new blocks, new animals, new skins, new sounds, and bug fixes.

-

Crafting and Building 1.18 APK can be downloaded from various websites that offer APK files, such as APKCombo, Aptoide, or MCPE Planet. However, you should always be careful when downloading APK files from unknown sources, as they may contain viruses or malware that can harm your device. You should always scan any APK file before installing it on your device.

-

Crafting and Building 1.18 APK requires Android 5.1 or higher and about 387 MB of storage space on your device. To install it, you need to enable unknown sources in your device settings. This will allow you to install apps that are not from the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on. Then, follow the instructions on the screen to complete the installation process.

-

What are the main features of Crafting and Building 1.18 APK?

-

Crafting and Building 1.18 APK offers a fun and creative gameplay experience for the whole family. Here are some of the main features of the game:

-

crafting and building 1.18 apk free download
-crafting and building 1.18 mod apk download
-crafting and building 1.18 apk download for android
-crafting and building 1.18 apk download latest version
-crafting and building 1.18 apk download uptodown
-crafting and building 1.18 apk download apkpure
-crafting and building 1.18 apk download for pc
-crafting and building 1.18 apk download no ads
-crafting and building 1.18 apk download unlimited money
-crafting and building 1.18 apk download offline
-crafting and building 1.18 apk download hack
-crafting and building 1.18 apk download mediafıre
-crafting and building 1.18 apk download android 11
-crafting and building 1.18 apk download ios
-crafting and building 1.18 apk download windows 10
-crafting and building 1.18 apk download full version
-crafting and building 1.18 apk download online
-crafting and building 1.18 apk download without verification
-crafting and building 1.18 apk download mega
-crafting and building 1.18 apk download google drive
-crafting and building 1.18 apk download update
-crafting and building 1.18 apk download old version
-crafting and building 1.18 apk download cracked
-crafting and building 1.18 apk download premium
-crafting and building 1.18 apk download mod menu
-crafting and building 1.18 apk download original
-crafting and building 1.18 apk download android oyun club
-crafting and building 1.18 apk download rexdl
-crafting and building 1.18 apk download revdl
-crafting and building 1.18 apk download happy mod
-crafting and building 1.18 apk download an1.com
-crafting and building 1.18 apk download mob.org
-crafting and building 1.18 apk download malavida
-crafting and building 1.18 apk download softonic
-crafting and building 1.18 apk download appmirror.net
-crafting and building 1.18 apk download appvn.com
-crafting and building 1.18 apk download blackmod.net
-crafting and building 1.18 apk download platinmods.com
-crafting and building 1.18 apk download androidpolska.pl
-crafting and building 1.18 apk download apkmody.io

- -

What are some tips and tricks for playing Crafting and Building?

-

If you want to get the most out of Crafting and Building, here are some tips and tricks that might help you:

- -

Conclusion

-

Crafting and Building is a great game for anyone who loves building games. It is easy to download and install on your Android device, and it offers endless possibilities for creativity, exploration, and multiplayer fun. You can build anything you can imagine, play with your friends online or offline, explore different biomes, and interact with animals and villagers. The game has cool graphics, smooth controls, and a user-friendly interface that makes it suitable for all ages.

-

If you are looking for a new building game to try, you should definitely give Crafting and Building a chance. You will not regret it!

-

FAQs

-

Is Crafting and Building free to play?

-

Yes, Crafting and Building is completely free to play. However, it contains ads that can be removed by purchasing the Pro DLC.

-

Is Crafting and Building safe to download?

-

Yes, Crafting and Building is safe to download from reputable sources. However, you should always scan any APK file before installing it on your device.

-

Is Crafting and Building compatible with Minecraft?

-

No, Crafting and Building is not compatible with Minecraft. They are different games with different features.

-

How can I customize my character in Crafting and Building?

-

You can customize your character in Crafting and Building by choosing your gender, skin color, hair style, clothes, accessories, and more.

-

How can I contact the developers of Crafting and Building?

-

You can contact the developers of Crafting and Building by emailing them at protonmobile@gmail.com or visiting their website at https://protonmobile.com/.

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Rope Hero APK and Become a Superhero on Your Phone.md b/spaces/1phancelerku/anime-remove-background/Download Rope Hero APK and Become a Superhero on Your Phone.md deleted file mode 100644 index 549b1bf71d06f70a0aca6a33ae112357ed8afb0b..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Rope Hero APK and Become a Superhero on Your Phone.md +++ /dev/null @@ -1,124 +0,0 @@ - -

Rope Hero APK Download Game: A Guide for Android Users

-

If you are looking for a fun and action-packed game that lets you swing around the city like Spider-Man, fight against crime and injustice, and customize your hero with different outfits and weapons, then you might want to try Rope Hero. Rope Hero is a free game for Android devices that has been downloaded over 100 million times on Google Play Store. In this article, we will tell you what Rope Hero is, how to download and install it on your Android device, why you should play it, and some tips and tricks to help you enjoy it more.

-

rope hero apk download game


DOWNLOAD ►►► https://jinyurl.com/2uNUdf



-

What is Rope Hero?

-

Rope Hero is a 3D open-world action game developed by Naxeex Action & RPG Games. In this game, you play as a superhero who has a super rope and can perform mega jumps, climb buildings, and power landings. You can explore the city, complete missions, fight against gangs and criminals, drive various vehicles, and use different weapons. You can also level up your hero, upgrade your skills, and change your appearance. Rope Hero is a game that combines elements of adventure, simulation, shooting, and RPG.

-

Features of Rope Hero

-

Some of the features that make Rope Hero an exciting and addictive game are:

- -

How to download and install Rope Hero APK on Android

-

If you want to download and install Rope Hero APK on your Android device, you can follow these simple steps:

-
    -
  1. Go to [Rope Hero APK (Android Game) - Free Download - APKCombo](^1^) or [Rope Hero APK (Android Game) - Free Download - APKCombo](^2^) on your browser.
  2. -
  3. Click on the "Download APK" button to download the latest version of Rope Hero APK file on your device.
  4. -
  5. Once the download is complete, locate the APK file on your device and tap on it to install it.
  6. -
  7. If you see a warning message that says "Install blocked", go to your device settings and enable "Unknown sources" or "Allow from this source" option.
  8. -
  9. Wait for the installation process to finish and then launch the game from your app drawer or home screen.
  10. -
-

Why play Rope Hero?

-

Rope Hero is a game that offers a lot of fun and entertainment for Android users who love action games. Here are some reasons why you should play Rope Hero:

-

Pros and cons of Rope Hero

-

Like any other game, Rope Hero has its pros and cons. Here are some of them:

- - - - - - - - - - - - - - - - - - - - - - - - - -
ProsCons
- Fun and addictive gameplay- Some bugs and glitches
- Lots of content and variety- Repetitive missions and enemies
- Cool and realistic graphics- High battery and data consumption
- Easy and intuitive controls- Ads and in-app purchases
- Free and offline mode available- No multiplayer or online mode
-

Tips and tricks for playing Rope Hero

-

If you want to play Rope Hero like a pro, here are some tips and tricks that you can use:

-

rope hero vice town apk download game
-rope hero android game free download
-rope hero apk mod unlimited money download game
-rope hero 3 apk download game
-rope hero apk download latest version game
-rope hero apk combo download game
-rope hero apk offline download game
-rope hero apk update download game
-rope hero apk hack download game
-rope hero apk pure download game
-rope hero apk obb download game
-rope hero apk revdl download game
-rope hero apk rexdl download game
-rope hero apk uptodown download game
-rope hero apk mob.org download game
-rope hero apk mirror download game
-rope hero apk old version download game
-rope hero apk for pc download game
-rope hero apk for ios download game
-rope hero apk for windows 10 download game
-rope hero naxeex apk download game
-rope hero naxeex action rpg games apk download game
-rope hero naxeex studio apk download game
-rope hero naxeex llc apk download game
-rope hero naxeex mod apk download game
-rope hero naxeex unlimited money apk download game
-rope hero naxeex hack apk download game
-rope hero naxeex latest version apk download game
-rope hero naxeex offline apk download game
-rope hero naxeex online apk download game
-rope hero 3d open world city simulator apk download game
-rope hero 3d superhero simulator apk download game
-rope hero 3d action adventure apk download game
-rope hero 3d crime city battle apk download game
-rope hero 3d spider gangster crime city simulator apk download game
-rope hero 3d flying superhero simulator mod apk download game
-rope hero 3d flying superhero simulator hack apk download game
-rope hero 3d flying superhero simulator unlimited money apk download game
-rope hero 3d flying superhero simulator latest version apk download game
-rope hero 3d flying superhero simulator offline apk download game
-rope hero 3d flying superhero simulator online apk download game
-amazing spider stickman - super flying spiderman - spiderman games - spiderman games free - spiderman games for kids - spiderman games online - spiderman games offline - spiderman games 2023 - spiderman games 3d - spiderman games hd - spiderman games new - spiderman games best - spiderman games fun - spiderman games cool - spiderman games awesome - spiderman games amazing - spiderman games fantastic - spiderman games incredible - spiderman games ultimate - spiderman games epic - spiderman games legendary - spiderman games popular - spiderman games top - spiderman games classic - spiderman games original - spiderman games pro - spiderman games premium - spiderman games deluxe - spiderman games master - spiderman games expert - spiderman games genius - spiderman games super - spiderman games hyper - spiderman games mega - spiderman games ultra

- -

Conclusion

-

Rope Hero is a game that offers a lot of fun and entertainment for Android users who love action games. It is a game that lets you become a superhero who can swing around the city with a super rope, fight against crime and injustice, and customize your hero with different outfits and weapons. It is a game that has realistic physics and graphics, open-world gameplay, diverse missions and challenges, customization options, and multiple vehicles. It is a game that you can download and install for free on your Android device by following the simple steps we have provided in this article. It is a game that you should play if you want to experience the thrill and excitement of being a rope hero.

-

FAQs

-

Here are some frequently asked questions about Rope Hero:

-
    -
  1. What is the latest version of Rope Hero APK?
  2. -

    The latest version of Rope Hero APK is 4.1.1 which was released on June 14th 2023.

    -
  3. Is Rope Hero safe to download?
  4. -

    Rope Hero is safe to download as long as you download it from a trusted source like [Rope Hero APK (Android Game) - Free Download - APKCombo] or [Rope Hero APK (Android Game) - Free Download - APKCombo]. However, you should always scan the APK file with an antivirus software before installing it on your device.

    -
  5. How much space does Rope Hero require on my device?
  6. -

    Rope Hero requires about 100 MB of free space on your device to install and run smoothly.

    -
  7. Can I play Rope Hero offline?
  8. -

    Yes, you can play Rope Hero offline without an internet connection. However, you will need an internet connection to access some features, such as ads, in-app purchases, and updates.

    -
  9. How can I contact the developer of Rope Hero?
  10. -

    You can contact the developer of Rope Hero by sending an email to naxeexaction@gmail.com or by visiting their website at [Naxeex Action & RPG Games].

    -
-

I hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy gaming!

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Erturul Gazi The Leader of Kayi Boyu and the Founder of a Civilization.md b/spaces/1phancelerku/anime-remove-background/Erturul Gazi The Leader of Kayi Boyu and the Founder of a Civilization.md deleted file mode 100644 index 3f7bd3f5bdae54663b6e90765cc7bde237abf256..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Erturul Gazi The Leader of Kayi Boyu and the Founder of a Civilization.md +++ /dev/null @@ -1,94 +0,0 @@ - -

Ertuğrul Gazi Oyunu: A Historical Adventure Game Based on Turkish Hero

-

If you are a fan of historical drama, action, and adventure, you might have heard of Ertuğrul Gazi Oyunu, a popular Turkish game based on the life of Ertuğrul Gazi, the father of Osman I, the founder of the Ottoman Empire. The game is a role-playing game that consists of 60 episodes, each with its own story, characters, and challenges. The game features realistic 3D graphics, professional music, high-resolution visuals, detailed scenes, multiplayer real characters, history-telling dialogues, and team directions. The game is available for Android and PC platforms, and you can download it for free from Google Play Store or Steam.

-

Who Was Ertuğrul Gazi?

-

Ertuğrul Gazi was a 13th-century bey (chief) of the Kayı tribe of Oghuz Turks, who migrated from Central Asia to Anatolia to escape the Mongol invasions. He was a brave warrior who fought against various enemies, such as the Byzantines, the Crusaders, and the Mongols. He was also a loyal ally of the Seljuks of Rum, who granted him lands in Söğüt, near Bilecik. He was the father of Osman I, who established the Ottoman Empire in 1299. Ertuğrul Gazi is considered a hero and a ghazi (a fighter for Islam) by many Turks and Muslims. He is also a popular subject of Turkish literature, art, and media.

-

ertuğrul gazi oyunu


Download 🆓 https://jinyurl.com/2uNUwa



-

How Did The Game Developers Get Inspired By His Story And Turkish Culture?

-

The game developers, UMURO, are a Turkish company that specializes in creating games with historical and cultural themes. They were inspired by the success of Diriliş: Ertuğrul, a Turkish TV series that dramatized the life of Ertuğrul Gazi and his tribe. They wanted to create a game that would allow players to experience the same adventure and excitement as the TV series. They also wanted to showcase the rich history and culture of Turkey, especially during the medieval period. They did extensive research on Ertuğrul Gazi's biography, Turkish history, geography, architecture, clothing, weapons, music, language, and customs. They also consulted with historians, experts, and consultants to ensure accuracy and authenticity.

-

What Are The Main Objectives And Challenges In The Game?

-

The game follows Ertuğrul Gazi's journey from his youth to his death. Each episode has its own plot, characters, missions, enemies, allies, locations, and rewards. The player can choose to play as Ertuğrul Gazi or one of his alps (warriors). The player can also customize their character's appearance, skills, weapons, armor, pets, etc. The main objectives of the game are to complete various tasks assigned by Ertuğrul Gazi or other characters; to fight against enemies using combat skills such as sword fighting, horse riding, archery, defense with sword and shield, direction finding with map, swimming, running fast, rolling, cl imbing, stealth, etc.; to explore different locations such as Söğüt, Aleppo, Karacahisar, etc.; to collect various items such as gold, silver, food, weapons, armor, etc.; to interact with other characters such as Halime Sultan, Bamsı Beyrek, Turgut Alp, etc.; and to make decisions that affect the outcome of the game.

-

How To Use Different Skills And Weapons In Combat, Horse Riding, Archery, Etc.?

-

The game has a simple and intuitive control system that allows the player to use different skills and weapons in combat, horse riding, archery, etc. The player can use the joystick on the left side of the screen to move their character; the buttons on the right side of the screen to attack, defend, jump, roll, etc.; and the icons on the top of the screen to access the map, inventory, settings, etc. The player can also switch between different weapons such as swords, axes, daggers, bows, etc. by tapping on their icons on the bottom of the screen. The player can also use their horse to travel faster and to fight enemies by using the horse icon on the bottom of the screen. The player can also use their pet (such as a wolf or an eagle) to assist them in combat by using the pet icon on the bottom of the screen.

-

What Are Some Of The Tips And Tricks To Succeed In The Game?

-

Some of the tips and tricks to succeed in the game are: - Pay attention to the dialogues and instructions given by Ertuğrul Gazi or other characters. They will provide you with valuable information and hints about your missions and objectives. - Explore your surroundings and collect items that can help you in your quests. You can find gold, silver, food, weapons, armor, etc. in chests, barrels, crates, tents, etc. You can also loot enemies after defeating them. - Upgrade your skills and weapons regularly. You can use gold and silver to buy new skills and weapons from merchants or blacksmiths. You can also use food to heal yourself or your horse. - Use your skills and weapons wisely. Different skills and weapons have different advantages and disadvantages depending on the situation. For example, swords are good for close-range combat but not for long-range combat; bows are good for long-range combat but not for close-range combat; axes are good for breaking shields but not for fast attacks; daggers are good for fast attacks but not for strong attacks; etc. - Use your horse and pet effectively. Your horse can help you travel faster and fight enemies from a distance. Your pet can help you distract or attack enemies or find hidden items or paths. - Make smart decisions that affect the outcome of the game. The game has multiple endings depending on your choices and actions. For example, you can choose to be loyal or betray Ertuğrul Gazi; you can choose to spare or kill your enemies; you can choose to help or ignore your allies; etc.

-

What Are Some Of The Positive And Negative Aspects Of The Game According To Players And Critics?

-

Some of the positive aspects of the game according to players and critics are: - The game has a captivating story that is based on real historical events and characters. - The game has realistic 3D graphics that create a immersive atmosphere and environment. - The game has professional music that enhances the mood and emotion of the game. - The game has high-resolution visuals that make the game look stunning and detailed. - The game has detailed scenes that show the culture and lifestyle of the medieval Turks. - The game has multiplayer real characters that allow players to interact with each other online. - The game has history-telling dialogues that educate players about Turkish history and culture. - The game has team directions that allow players to cooperate with each other in missions. Some of the negative aspects of the game according to players and critics are: - The game has some bugs and glitches that affect the gameplay and performance of the game. - The game has some translation errors and grammatical mistakes that affect the quality and clarity of the game. - The game has some repetitive missions and objectives that affect the variety and creativity of the game. - The game has some unrealistic physics and animations that affect the realism and accuracy of the game. - The game has some violent and graphic scenes that may not be suitable for younger or sensitive players.

-

How Does The Game Compare To Other Similar Games In The Market?

-

The game is similar to other historical adventure games in the market such as Assassin's Creed, Prince of Persia, Shadow of Mordor, etc. However, the game is unique in its focus on Turkish history and culture, especially during the medieval period and the rise of the Ottoman Empire . The game is also unique in its gameplay and features, such as the realistic 3D graphics, the professional music, the high-resolution visuals, the detailed scenes, the multiplayer real characters, the history-telling dialogues, and the team directions. The game is also unique in its genre, as it is a role-playing game that consists of 60 episodes, each with its own story, characters, and challenges. The game is also unique in its control system, as it allows the player to use different skills and weapons in combat, horse riding, archery, etc. The game is also unique in its outcome, as it has multiple endings depending on the player's choices and actions.

-

What Are Some Of The Suggestions And Requests For Improvement From The Players?

-

Some of the suggestions and requests for improvement from the players are: - To fix the bugs and glitches that affect the gameplay and performance of the game. - To improve the translation and grammar of the game to make it more clear and accurate. - To add more variety and creativity to the missions and objectives of the game to make it more fun and challenging. - To improve the physics and animations of the game to make it more realistic and accurate. - To add more options and features to customize the character's appearance, skills, weapons, armor, pets, etc. to make it more personal and diverse. - To add more historical and cultural content to the game to make it more educational and informative. - To add more modes and levels to the game to make it more replayable and enjoyable.

-

ertuğrul gazi oyunu indir
-ertuğrul gazi oyunu hile
-ertuğrul gazi oyunu pc
-ertuğrul gazi oyunu steam
-ertuğrul gazi oyunu nasıl oynanır
-ertuğrul gazi oyunu altın hilesi
-ertuğrul gazi oyunu apk
-ertuğrul gazi oyunu mod
-ertuğrul gazi oyunu son bölüm
-ertuğrul gazi oyunu online
-ertuğrul gazi oyunu kayı boyunun destanı
-ertuğrul gazi oyunu umuro
-ertuğrul gazi oyunu android
-ertuğrul gazi oyunu osmanlı kuruluşu
-ertuğrul gazi oyunu kurtuluş savaşı
-ertuğrul gazi oyunu yeni sezon
-ertuğrul gazi oyunu izle
-ertuğrul gazi oyunu yorumlar
-ertuğrul gazi oyunu puan hilesi
-ertuğrul gazi oyunu canlı yayın
-ertuğrul gazi oyunu türkçe dublaj
-ertuğrul gazi oyunu at sürme
-ertuğrul gazi oyunu okçuluk
-ertuğrul gazi oyunu kılıç kalkan
-ertuğrul gazi oyunu harita bulma
-ertuğrul gazi oyunu yükleme sorunu
-ertuğrul gazi oyunu güncelleme
-ertuğrul gazi oyunu sistem gereksinimleri
-ertuğrul gazi oyunu klan kurma
-ertuğrul gazi oyunu hediye kodları
-ertugrulgazioyn.com resmi web sitesi
-ertugrulgazioyn.net fan sayfası
-ertugrulgazioyn.org haber portalı
-ertugrulgazioyn.info ipucuları ve rehberleri
-ertugrulgazioyn.biz inceleme ve değerlendirme
-ertugrulgazioyn.xyz eğlence ve mizah
-ertugrulgazioyn.club sosyal medya ve forum
-ertugrulgazioyn.shop altın ve eşya satışı
-ertugrulgazioyn.live canlı destek ve yardım
-ertugrulgazioyn.fun yarışma ve etkinlikler

-

Conclusion

-

Ertuğrul Gazi Oyunu is a historical adventure game based on Turkish hero Ertuğrul Gazi, the father of Osman I, the founder of the Ottoman Empire. The game is a role-playing game that consists of 60 episodes, each with its own story, characters, and challenges. The game features realistic 3D graphics, professional music, high-resolution visuals, detailed scenes, multiplayer real characters, history-telling dialogues, and team directions. The game is available for Android and PC platforms, and you can download it for free from Google Play Store or Steam. If you are interested in Turkish history and culture, or if you are looking for a thrilling and exciting game to play, you should definitely give Ertuğrul Gazi Oyunu a try. You will not regret it!

-

Do you have any questions or comments about Ertuğrul Gazi Oyunu? Do you want to share your experience or opinion about the game? Do you have any suggestions or requests for improvement for the game developers? If so, please feel free to leave a comment below or contact us through our website or social media. We would love to hear from you!

-

Thank you for reading this article. We hope you enjoyed it and learned something new. Please share this article with your friends and family who might be interested in Ertuğrul Gazi Oyunu or Turkish history and culture. And don't forget to check out our other articles on our website for more interesting and informative topics. See you next time!

-

FAQs

-

Here are some of the frequently asked questions about Ertuğrul Gazi Oyunu:

- - - - - - - - - - - - - - - - - - - - - - - - - -
QuestionAnswer
What is Ertuğrul Gazi Oyunu?Ertuğrul Gazi Oyunu is a historical adventure game based on Turkish hero Ertuğrul Gazi, the father of Osman I, the founder of the Ottoman Empire.
How can I download and play Ertuğrul Gazi Oyunu?You can download Ertuğrul Gazi Oyunu for free from Google Play Store or Steam. You can play it on your Android device or PC.
How many episodes are there in Ertuğrul Gazi Oyunu?There are 60 episodes in Ertuğrul Gazi Oyunu, each with its own story, characters, and challenges.
What are some of the skills and weapons that I can use in Ertuğrul Gazi Oyunu?You can use various skills such as sword fighting, horse riding, archery, defense with sword and shield, direction finding with map, swimming, running fast, rolling, climbing, stealth, etc. You can also use different weapons such as swords, axes, daggers, bows, etc.
Does Ertuğrul Gazi Oyunu have a multiplayer mode?Yes, Ertuğrul Gazi Oyunu has a multiplayer mode that allows you to play with other players online. You can join or create a team and cooperate with each other in missions.

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/1toTree/lora_test/ppdiffusers/loaders.py b/spaces/1toTree/lora_test/ppdiffusers/loaders.py deleted file mode 100644 index a201c67a19761eed73610946820b8450fe2a07c3..0000000000000000000000000000000000000000 --- a/spaces/1toTree/lora_test/ppdiffusers/loaders.py +++ /dev/null @@ -1,190 +0,0 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os -from collections import defaultdict -from typing import Callable, Dict, Union - -import paddle -import paddle.nn as nn - -from .modeling_utils import _get_model_file, load_dict -from .models.cross_attention import LoRACrossAttnProcessor -from .utils import HF_CACHE, PPDIFFUSERS_CACHE, logging - -logger = logging.get_logger(__name__) - - -LORA_WEIGHT_NAME = "paddle_lora_weights.pdparams" - - -class AttnProcsLayers(nn.Layer): - def __init__(self, state_dict: Dict[str, paddle.Tensor]): - super().__init__() - self.layers = nn.LayerList(state_dict.values()) - self.mapping = {k: v for k, v in enumerate(state_dict.keys())} - self.rev_mapping = {v: k for k, v in enumerate(state_dict.keys())} - - # we add a hook to state_dict() and load_state_dict() so that the - # naming fits with `unet.attn_processors` - def map_to(state_dict, *args, **kwargs): - new_state_dict = {} - for key, value in state_dict.items(): - num = int(key.split(".")[1]) # 0 is always "layers" - new_key = key.replace(f"layers.{num}", self.mapping[num]) - new_state_dict[new_key] = value - - return new_state_dict - - def map_from(module, state_dict, *args, **kwargs): - all_keys = list(state_dict.keys()) - for key in all_keys: - replace_key = key.split(".processor")[0] + ".processor" - new_key = key.replace(replace_key, f"layers.{module.rev_mapping[replace_key]}") - state_dict[new_key] = state_dict[key] - del state_dict[key] - - self.register_state_dict_hook(map_to) - self.register_load_state_dict_pre_hook(map_from, with_module=True) - - -class UNet2DConditionLoadersMixin: - def load_attn_procs(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, paddle.Tensor]], **kwargs): - r""" - Load pretrained attention processor layers into `UNet2DConditionModel`. Attention processor layers have to be - defined in - [cross_attention.py](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py) - and be a `paddle.nn.Layer` class. - - This function is experimental and might change in the future - - Parameters: - pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`): - Can be either: - - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. - Valid model ids should have an organization name, like `google/ddpm-celebahq-256`. - - A path to a *directory* containing model weights saved using [`~ModelMixin.save_config`], e.g., - `./my_model_directory/`. - - A [paddle state - dict]. - from_hf_hub (bool, optional): whether to load from Huggingface Hub. - cache_dir (`Union[str, os.PathLike]`, *optional*): - Path to a directory in which a downloaded pretrained model configuration should be cached if the - standard cache should not be used. - subfolder (`str`, *optional*, defaults to `None`): - In case the relevant files are located inside a subfolder of the model repo (either remote in - huggingface.co or downloaded locally), you can specify the folder name here. - """ - - from_hf_hub = kwargs.pop("from_hf_hub", False) - if from_hf_hub: - cache_dir = kwargs.pop("cache_dir", HF_CACHE) - else: - cache_dir = kwargs.pop("cache_dir", PPDIFFUSERS_CACHE) - subfolder = kwargs.pop("subfolder", None) - weight_name = kwargs.pop("weight_name", LORA_WEIGHT_NAME) - - if not isinstance(pretrained_model_name_or_path_or_dict, dict): - model_file = _get_model_file( - pretrained_model_name_or_path_or_dict, - weights_name=weight_name, - cache_dir=cache_dir, - subfolder=subfolder, - from_hf_hub=from_hf_hub, - ) - state_dict = load_dict(model_file, map_location="cpu") - else: - state_dict = pretrained_model_name_or_path_or_dict - - # fill attn processors - attn_processors = {} - - is_lora = all("lora" in k for k in state_dict.keys()) - - if is_lora: - lora_grouped_dict = defaultdict(dict) - for key, value in state_dict.items(): - attn_processor_key, sub_key = ".".join(key.split(".")[:-3]), ".".join(key.split(".")[-3:]) - lora_grouped_dict[attn_processor_key][sub_key] = value - - for key, value_dict in lora_grouped_dict.items(): - rank = value_dict["to_k_lora.down.weight"].shape[1] # 0 -> 1, torch vs paddle nn.Linear - cross_attention_dim = value_dict["to_k_lora.down.weight"].shape[0] # 1 -> 0, torch vs paddle nn.Linear - hidden_size = value_dict["to_k_lora.up.weight"].shape[1] # 0 -> 1, torch vs paddle nn.Linear - - attn_processors[key] = LoRACrossAttnProcessor( - hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, rank=rank - ) - attn_processors[key].load_dict(value_dict) - - else: - raise ValueError(f"{model_file} does not seem to be in the correct format expected by LoRA training.") - - # set correct dtype & device - attn_processors = {k: v.to(dtype=self.dtype) for k, v in attn_processors.items()} - - # set layers - self.set_attn_processor(attn_processors) - - def save_attn_procs( - self, - save_directory: Union[str, os.PathLike], - is_main_process: bool = True, - weights_name: str = LORA_WEIGHT_NAME, - save_function: Callable = None, - ): - r""" - Save an attention procesor to a directory, so that it can be re-loaded using the - `[`~loaders.UNet2DConditionLoadersMixin.load_attn_procs`]` method. - Arguments: - save_directory (`str` or `os.PathLike`): - Directory to which to save. Will be created if it doesn't exist. - is_main_process (`bool`, *optional*, defaults to `True`): - Whether the process calling this is the main process or not. Useful when in distributed training like - TPUs and need to call this function on all processes. In this case, set `is_main_process=True` only on - the main process to avoid race conditions. - weights_name (`str`, *optional*, defaults to `LORA_WEIGHT_NAME`): - The name of weights. - save_function (`Callable`): - The function to use to save the state dictionary. Useful on distributed training like TPUs when one - need to replace `torch.save` by another method. Can be configured with the environment variable - `DIFFUSERS_SAVE_MODE`. - """ - if os.path.isfile(save_directory): - logger.error(f"Provided path ({save_directory}) should be a directory, not a file") - return - - if save_function is None: - save_function = paddle.save - - os.makedirs(save_directory, exist_ok=True) - - model_to_save = AttnProcsLayers(self.attn_processors) - - # Save the model - state_dict = model_to_save.state_dict() - - # Clean the folder from a previous save - for filename in os.listdir(save_directory): - full_filename = os.path.join(save_directory, filename) - # If we have a shard file that is not going to be replaced, we delete it, but only from the main process - # in distributed settings to avoid race conditions. - weights_no_suffix = weights_name.replace(".pdparams", "") - if filename.startswith(weights_no_suffix) and os.path.isfile(full_filename) and is_main_process: - os.remove(full_filename) - - # Save the model - save_function(state_dict, os.path.join(save_directory, weights_name)) - - logger.info(f"Model weights saved in {os.path.join(save_directory, weights_name)}") diff --git a/spaces/2ndelement/voicevox/voicevox_engine/setting/__init__.py b/spaces/2ndelement/voicevox/voicevox_engine/setting/__init__.py deleted file mode 100644 index ff399f92b662072737fe036b7c9832997a76a553..0000000000000000000000000000000000000000 --- a/spaces/2ndelement/voicevox/voicevox_engine/setting/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -from .Setting import CorsPolicyMode, Setting -from .SettingLoader import USER_SETTING_PATH, SettingLoader - -__all__ = [ - "USER_SETTING_PATH", - "CorsPolicyMode", - "Setting", - "SettingLoader", -] diff --git a/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/configs/3millions.py b/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/configs/3millions.py deleted file mode 100644 index c9edc2f1414e35f93abfd3dfe11a61f1f406580e..0000000000000000000000000000000000000000 --- a/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/configs/3millions.py +++ /dev/null @@ -1,23 +0,0 @@ -from easydict import EasyDict as edict - -# configs for test speed - -config = edict() -config.loss = "arcface" -config.network = "r50" -config.resume = False -config.output = None -config.embedding_size = 512 -config.sample_rate = 1.0 -config.fp16 = True -config.momentum = 0.9 -config.weight_decay = 5e-4 -config.batch_size = 128 -config.lr = 0.1 # batch size is 512 - -config.rec = "synthetic" -config.num_classes = 300 * 10000 -config.num_epoch = 30 -config.warmup_epoch = -1 -config.decay_epoch = [10, 16, 22] -config.val_targets = [] diff --git a/spaces/52Hz/HWMNet_lowlight_enhancement/model/HWMNet.py b/spaces/52Hz/HWMNet_lowlight_enhancement/model/HWMNet.py deleted file mode 100644 index b81b2d12d970dc8968942c3fcba85c7a1cd284b5..0000000000000000000000000000000000000000 --- a/spaces/52Hz/HWMNet_lowlight_enhancement/model/HWMNet.py +++ /dev/null @@ -1,283 +0,0 @@ -import torch -import torch.nn as nn -from WT.transform import DWT, IWT - -##---------- Basic Layers ---------- -def conv3x3(in_chn, out_chn, bias=True): - layer = nn.Conv2d(in_chn, out_chn, kernel_size=3, stride=1, padding=1, bias=bias) - return layer - -def conv(in_channels, out_channels, kernel_size, bias=False, stride=1): - return nn.Conv2d( - in_channels, out_channels, kernel_size, - padding=(kernel_size // 2), bias=bias, stride=stride) - -def bili_resize(factor): - return nn.Upsample(scale_factor=factor, mode='bilinear', align_corners=False) - -##---------- Basic Blocks ---------- -class UNetConvBlock(nn.Module): - def __init__(self, in_size, out_size, downsample): - super(UNetConvBlock, self).__init__() - self.downsample = downsample - self.body = [HWB(n_feat=in_size, o_feat=in_size, kernel_size=3, reduction=16, bias=False, act=nn.PReLU())]# for _ in range(wab)] - self.body = nn.Sequential(*self.body) - - if downsample: - self.downsample = PS_down(out_size, out_size, downscale=2) - - self.tail = nn.Conv2d(in_size, out_size, kernel_size=1) - - def forward(self, x): - out = self.body(x) - out = self.tail(out) - if self.downsample: - out_down = self.downsample(out) - return out_down, out - else: - return out - -class UNetUpBlock(nn.Module): - def __init__(self, in_size, out_size): - super(UNetUpBlock, self).__init__() - self.up = PS_up(in_size, out_size, upscale=2) - self.conv_block = UNetConvBlock(in_size, out_size, downsample=False) - - def forward(self, x, bridge): - up = self.up(x) - out = torch.cat([up, bridge], dim=1) - out = self.conv_block(out) - return out - -##---------- Resizing Modules (Pixel(Un)Shuffle) ---------- -class PS_down(nn.Module): - def __init__(self, in_size, out_size, downscale): - super(PS_down, self).__init__() - self.UnPS = nn.PixelUnshuffle(downscale) - self.conv1 = nn.Conv2d((downscale**2) * in_size, out_size, 1, 1, 0) - - def forward(self, x): - x = self.UnPS(x) # h/2, w/2, 4*c - x = self.conv1(x) - return x - -class PS_up(nn.Module): - def __init__(self, in_size, out_size, upscale): - super(PS_up, self).__init__() - - self.PS = nn.PixelShuffle(upscale) - self.conv1 = nn.Conv2d(in_size//(upscale**2), out_size, 1, 1, 0) - - def forward(self, x): - x = self.PS(x) # h/2, w/2, 4*c - x = self.conv1(x) - return x - -##---------- Selective Kernel Feature Fusion (SKFF) ---------- -class SKFF(nn.Module): - def __init__(self, in_channels, height=3, reduction=8, bias=False): - super(SKFF, self).__init__() - - self.height = height - d = max(int(in_channels / reduction), 4) - - self.avg_pool = nn.AdaptiveAvgPool2d(1) - self.conv_du = nn.Sequential(nn.Conv2d(in_channels, d, 1, padding=0, bias=bias), nn.PReLU()) - - self.fcs = nn.ModuleList([]) - for i in range(self.height): - self.fcs.append(nn.Conv2d(d, in_channels, kernel_size=1, stride=1, bias=bias)) - - self.softmax = nn.Softmax(dim=1) - - def forward(self, inp_feats): - batch_size, n_feats, H, W = inp_feats[1].shape - - inp_feats = torch.cat(inp_feats, dim=1) - inp_feats = inp_feats.view(batch_size, self.height, n_feats, inp_feats.shape[2], inp_feats.shape[3]) - - feats_U = torch.sum(inp_feats, dim=1) - feats_S = self.avg_pool(feats_U) - feats_Z = self.conv_du(feats_S) - - attention_vectors = [fc(feats_Z) for fc in self.fcs] - attention_vectors = torch.cat(attention_vectors, dim=1) - attention_vectors = attention_vectors.view(batch_size, self.height, n_feats, 1, 1) - - attention_vectors = self.softmax(attention_vectors) - feats_V = torch.sum(inp_feats * attention_vectors, dim=1) - - return feats_V - - -########################################################################## -# Spatial Attention Layer -class SALayer(nn.Module): - def __init__(self, kernel_size=5, bias=False): - super(SALayer, self).__init__() - self.conv_du = nn.Sequential( - nn.Conv2d(2, 1, kernel_size=kernel_size, stride=1, padding=(kernel_size - 1) // 2, bias=bias), - nn.Sigmoid() - ) - - def forward(self, x): - # torch.max will output 2 things, and we want the 1st one - max_pool, _ = torch.max(x, dim=1, keepdim=True) - avg_pool = torch.mean(x, 1, keepdim=True) - channel_pool = torch.cat([max_pool, avg_pool], dim=1) # [N,2,H,W] could add 1x1 conv -> [N,3,H,W] - y = self.conv_du(channel_pool) - - return x * y - -########################################################################## -# Channel Attention Layer -class CALayer(nn.Module): - def __init__(self, channel, reduction=16, bias=False): - super(CALayer, self).__init__() - # global average pooling: feature --> point - self.avg_pool = nn.AdaptiveAvgPool2d(1) - # feature channel downscale and upscale --> channel weight - self.conv_du = nn.Sequential( - nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=bias), - nn.ReLU(inplace=True), - nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=bias), - nn.Sigmoid() - ) - - def forward(self, x): - y = self.avg_pool(x) - y = self.conv_du(y) - return x * y - -########################################################################## -# Half Wavelet Dual Attention Block (HWB) -class HWB(nn.Module): - def __init__(self, n_feat, o_feat, kernel_size, reduction, bias, act): - super(HWB, self).__init__() - self.dwt = DWT() - self.iwt = IWT() - - modules_body = \ - [ - conv(n_feat*2, n_feat, kernel_size, bias=bias), - act, - conv(n_feat, n_feat*2, kernel_size, bias=bias) - ] - self.body = nn.Sequential(*modules_body) - - self.WSA = SALayer() - self.WCA = CALayer(n_feat*2, reduction, bias=bias) - - self.conv1x1 = nn.Conv2d(n_feat*4, n_feat*2, kernel_size=1, bias=bias) - self.conv3x3 = nn.Conv2d(n_feat, o_feat, kernel_size=3, padding=1, bias=bias) - self.activate = act - self.conv1x1_final = nn.Conv2d(n_feat, o_feat, kernel_size=1, bias=bias) - - def forward(self, x): - residual = x - - # Split 2 part - wavelet_path_in, identity_path = torch.chunk(x, 2, dim=1) - - # Wavelet domain (Dual attention) - x_dwt = self.dwt(wavelet_path_in) - res = self.body(x_dwt) - branch_sa = self.WSA(res) - branch_ca = self.WCA(res) - res = torch.cat([branch_sa, branch_ca], dim=1) - res = self.conv1x1(res) + x_dwt - wavelet_path = self.iwt(res) - - out = torch.cat([wavelet_path, identity_path], dim=1) - out = self.activate(self.conv3x3(out)) - out += self.conv1x1_final(residual) - - return out - - -########################################################################## -##---------- HWMNet-LOL ---------- -class HWMNet(nn.Module): - def __init__(self, in_chn=3, wf=64, depth=4): - super(HWMNet, self).__init__() - self.depth = depth - self.down_path = nn.ModuleList() - self.bili_down = bili_resize(0.5) - self.conv_01 = nn.Conv2d(in_chn, wf, 3, 1, 1) - - # encoder of UNet-64 - prev_channels = 0 - for i in range(depth): # 0,1,2,3 - downsample = True if (i + 1) < depth else False - self.down_path.append(UNetConvBlock(prev_channels + wf, (2 ** i) * wf, downsample)) - prev_channels = (2 ** i) * wf - - # decoder of UNet-64 - self.up_path = nn.ModuleList() - self.skip_conv = nn.ModuleList() - self.conv_up = nn.ModuleList() - self.bottom_conv = nn.Conv2d(prev_channels, wf, 3, 1, 1) - self.bottom_up = bili_resize(2 ** (depth-1)) - - for i in reversed(range(depth - 1)): - self.up_path.append(UNetUpBlock(prev_channels, (2 ** i) * wf)) - self.skip_conv.append(nn.Conv2d((2 ** i) * wf, (2 ** i) * wf, 3, 1, 1)) - self.conv_up.append(nn.Sequential(*[bili_resize(2 ** i), nn.Conv2d((2 ** i) * wf, wf, 3, 1, 1)])) - prev_channels = (2 ** i) * wf - - self.final_ff = SKFF(in_channels=wf, height=depth) - self.last = conv3x3(prev_channels, in_chn, bias=True) - - def forward(self, x): - img = x - scale_img = img - - ##### shallow conv ##### - x1 = self.conv_01(img) - encs = [] - ######## UNet-64 ######## - # Down-path (Encoder) - for i, down in enumerate(self.down_path): - if i == 0: - x1, x1_up = down(x1) - encs.append(x1_up) - elif (i + 1) < self.depth: - scale_img = self.bili_down(scale_img) - left_bar = self.conv_01(scale_img) - x1 = torch.cat([x1, left_bar], dim=1) - x1, x1_up = down(x1) - encs.append(x1_up) - else: - scale_img = self.bili_down(scale_img) - left_bar = self.conv_01(scale_img) - x1 = torch.cat([x1, left_bar], dim=1) - x1 = down(x1) - - # Up-path (Decoder) - ms_result = [self.bottom_up(self.bottom_conv(x1))] - for i, up in enumerate(self.up_path): - x1 = up(x1, self.skip_conv[i](encs[-i - 1])) - ms_result.append(self.conv_up[i](x1)) - # Multi-scale selective feature fusion - msff_result = self.final_ff(ms_result) - - ##### Reconstruct ##### - out_1 = self.last(msff_result) + img - - return out_1 - -if __name__ == "__main__": - input = torch.ones(1, 3, 400, 592, dtype=torch.float, requires_grad=False).cuda() - - model = HWMNet(in_chn=3, wf=96, depth=4).cuda() - out = model(input) - flops, params = profile(model, inputs=(input,)) - - # RDBlayer = SK_RDB(in_channels=64, growth_rate=64, num_layers=3) - # print(RDBlayer) - # out = RDBlayer(input) - # flops, params = profile(RDBlayer, inputs=(input,)) - print('input shape:', input.shape) - print('parameters:', params/1e6) - print('flops', flops/1e9) - print('output shape', out.shape) diff --git a/spaces/801artistry/RVC801/infer/modules/ipex/__init__.py.py b/spaces/801artistry/RVC801/infer/modules/ipex/__init__.py.py deleted file mode 100644 index 9f53b2d3f7025b2d71369dababa4e6f2a4affc48..0000000000000000000000000000000000000000 --- a/spaces/801artistry/RVC801/infer/modules/ipex/__init__.py.py +++ /dev/null @@ -1,165 +0,0 @@ -import os -import sys -import contextlib -import torch -import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import -from .hijacks import ipex_hijacks -from .attention import attention_init - -# pylint: disable=protected-access, missing-function-docstring, line-too-long - -def ipex_init(): # pylint: disable=too-many-statements - try: - #Replace cuda with xpu: - torch.cuda.current_device = torch.xpu.current_device - torch.cuda.current_stream = torch.xpu.current_stream - torch.cuda.device = torch.xpu.device - torch.cuda.device_count = torch.xpu.device_count - torch.cuda.device_of = torch.xpu.device_of - torch.cuda.getDeviceIdListForCard = torch.xpu.getDeviceIdListForCard - torch.cuda.get_device_name = torch.xpu.get_device_name - torch.cuda.get_device_properties = torch.xpu.get_device_properties - torch.cuda.init = torch.xpu.init - torch.cuda.is_available = torch.xpu.is_available - torch.cuda.is_initialized = torch.xpu.is_initialized - torch.cuda.is_current_stream_capturing = lambda: False - torch.cuda.set_device = torch.xpu.set_device - torch.cuda.stream = torch.xpu.stream - torch.cuda.synchronize = torch.xpu.synchronize - torch.cuda.Event = torch.xpu.Event - torch.cuda.Stream = torch.xpu.Stream - torch.cuda.FloatTensor = torch.xpu.FloatTensor - torch.Tensor.cuda = torch.Tensor.xpu - torch.Tensor.is_cuda = torch.Tensor.is_xpu - torch.cuda._initialization_lock = torch.xpu.lazy_init._initialization_lock - torch.cuda._initialized = torch.xpu.lazy_init._initialized - torch.cuda._lazy_seed_tracker = torch.xpu.lazy_init._lazy_seed_tracker - torch.cuda._queued_calls = torch.xpu.lazy_init._queued_calls - torch.cuda._tls = torch.xpu.lazy_init._tls - torch.cuda.threading = torch.xpu.lazy_init.threading - torch.cuda.traceback = torch.xpu.lazy_init.traceback - torch.cuda.Optional = torch.xpu.Optional - torch.cuda.__cached__ = torch.xpu.__cached__ - torch.cuda.__loader__ = torch.xpu.__loader__ - torch.cuda.ComplexFloatStorage = torch.xpu.ComplexFloatStorage - torch.cuda.Tuple = torch.xpu.Tuple - torch.cuda.streams = torch.xpu.streams - torch.cuda._lazy_new = torch.xpu._lazy_new - torch.cuda.FloatStorage = torch.xpu.FloatStorage - torch.cuda.Any = torch.xpu.Any - torch.cuda.__doc__ = torch.xpu.__doc__ - torch.cuda.default_generators = torch.xpu.default_generators - torch.cuda.HalfTensor = torch.xpu.HalfTensor - torch.cuda._get_device_index = torch.xpu._get_device_index - torch.cuda.__path__ = torch.xpu.__path__ - torch.cuda.Device = torch.xpu.Device - torch.cuda.IntTensor = torch.xpu.IntTensor - torch.cuda.ByteStorage = torch.xpu.ByteStorage - torch.cuda.set_stream = torch.xpu.set_stream - torch.cuda.BoolStorage = torch.xpu.BoolStorage - torch.cuda.os = torch.xpu.os - torch.cuda.torch = torch.xpu.torch - torch.cuda.BFloat16Storage = torch.xpu.BFloat16Storage - torch.cuda.Union = torch.xpu.Union - torch.cuda.DoubleTensor = torch.xpu.DoubleTensor - torch.cuda.ShortTensor = torch.xpu.ShortTensor - torch.cuda.LongTensor = torch.xpu.LongTensor - torch.cuda.IntStorage = torch.xpu.IntStorage - torch.cuda.LongStorage = torch.xpu.LongStorage - torch.cuda.__annotations__ = torch.xpu.__annotations__ - torch.cuda.__package__ = torch.xpu.__package__ - torch.cuda.__builtins__ = torch.xpu.__builtins__ - torch.cuda.CharTensor = torch.xpu.CharTensor - torch.cuda.List = torch.xpu.List - torch.cuda._lazy_init = torch.xpu._lazy_init - torch.cuda.BFloat16Tensor = torch.xpu.BFloat16Tensor - torch.cuda.DoubleStorage = torch.xpu.DoubleStorage - torch.cuda.ByteTensor = torch.xpu.ByteTensor - torch.cuda.StreamContext = torch.xpu.StreamContext - torch.cuda.ComplexDoubleStorage = torch.xpu.ComplexDoubleStorage - torch.cuda.ShortStorage = torch.xpu.ShortStorage - torch.cuda._lazy_call = torch.xpu._lazy_call - torch.cuda.HalfStorage = torch.xpu.HalfStorage - torch.cuda.random = torch.xpu.random - torch.cuda._device = torch.xpu._device - torch.cuda.classproperty = torch.xpu.classproperty - torch.cuda.__name__ = torch.xpu.__name__ - torch.cuda._device_t = torch.xpu._device_t - torch.cuda.warnings = torch.xpu.warnings - torch.cuda.__spec__ = torch.xpu.__spec__ - torch.cuda.BoolTensor = torch.xpu.BoolTensor - torch.cuda.CharStorage = torch.xpu.CharStorage - torch.cuda.__file__ = torch.xpu.__file__ - torch.cuda._is_in_bad_fork = torch.xpu.lazy_init._is_in_bad_fork - #torch.cuda.is_current_stream_capturing = torch.xpu.is_current_stream_capturing - - #Memory: - torch.cuda.memory = torch.xpu.memory - if 'linux' in sys.platform and "WSL2" in os.popen("uname -a").read(): - torch.xpu.empty_cache = lambda: None - torch.cuda.empty_cache = torch.xpu.empty_cache - torch.cuda.memory_stats = torch.xpu.memory_stats - torch.cuda.memory_summary = torch.xpu.memory_summary - torch.cuda.memory_snapshot = torch.xpu.memory_snapshot - torch.cuda.memory_allocated = torch.xpu.memory_allocated - torch.cuda.max_memory_allocated = torch.xpu.max_memory_allocated - torch.cuda.memory_reserved = torch.xpu.memory_reserved - torch.cuda.memory_cached = torch.xpu.memory_reserved - torch.cuda.max_memory_reserved = torch.xpu.max_memory_reserved - torch.cuda.max_memory_cached = torch.xpu.max_memory_reserved - torch.cuda.reset_peak_memory_stats = torch.xpu.reset_peak_memory_stats - torch.cuda.reset_max_memory_cached = torch.xpu.reset_peak_memory_stats - torch.cuda.reset_max_memory_allocated = torch.xpu.reset_peak_memory_stats - torch.cuda.memory_stats_as_nested_dict = torch.xpu.memory_stats_as_nested_dict - torch.cuda.reset_accumulated_memory_stats = torch.xpu.reset_accumulated_memory_stats - - #RNG: - torch.cuda.get_rng_state = torch.xpu.get_rng_state - torch.cuda.get_rng_state_all = torch.xpu.get_rng_state_all - torch.cuda.set_rng_state = torch.xpu.set_rng_state - torch.cuda.set_rng_state_all = torch.xpu.set_rng_state_all - torch.cuda.manual_seed = torch.xpu.manual_seed - torch.cuda.manual_seed_all = torch.xpu.manual_seed_all - torch.cuda.seed = torch.xpu.seed - torch.cuda.seed_all = torch.xpu.seed_all - torch.cuda.initial_seed = torch.xpu.initial_seed - - #AMP: - torch.cuda.amp = torch.xpu.amp - if not hasattr(torch.cuda.amp, "common"): - torch.cuda.amp.common = contextlib.nullcontext() - torch.cuda.amp.common.amp_definitely_not_available = lambda: False - try: - torch.cuda.amp.GradScaler = torch.xpu.amp.GradScaler - except Exception: # pylint: disable=broad-exception-caught - try: - from .gradscaler import gradscaler_init # pylint: disable=import-outside-toplevel, import-error - gradscaler_init() - torch.cuda.amp.GradScaler = torch.xpu.amp.GradScaler - except Exception: # pylint: disable=broad-exception-caught - torch.cuda.amp.GradScaler = ipex.cpu.autocast._grad_scaler.GradScaler - - #C - torch._C._cuda_getCurrentRawStream = ipex._C._getCurrentStream - ipex._C._DeviceProperties.major = 2023 - ipex._C._DeviceProperties.minor = 2 - - #Fix functions with ipex: - torch.cuda.mem_get_info = lambda device=None: [(torch.xpu.get_device_properties(device).total_memory - torch.xpu.memory_allocated(device)), torch.xpu.get_device_properties(device).total_memory] - torch._utils._get_available_device_type = lambda: "xpu" - torch.has_cuda = True - torch.cuda.has_half = True - torch.cuda.is_bf16_supported = lambda *args, **kwargs: True - torch.cuda.is_fp16_supported = lambda *args, **kwargs: True - torch.version.cuda = "11.7" - torch.cuda.get_device_capability = lambda *args, **kwargs: [11,7] - torch.cuda.get_device_properties.major = 11 - torch.cuda.get_device_properties.minor = 7 - torch.cuda.ipc_collect = lambda *args, **kwargs: None - torch.cuda.utilization = lambda *args, **kwargs: 0 - - ipex_hijacks() - attention_init() - except Exception as e: - return False, e - return True, None \ No newline at end of file diff --git a/spaces/A00001/bingothoo/src/components/ui/tooltip.tsx b/spaces/A00001/bingothoo/src/components/ui/tooltip.tsx deleted file mode 100644 index af1d48beb90dd5ae311796539843700871052cae..0000000000000000000000000000000000000000 --- a/spaces/A00001/bingothoo/src/components/ui/tooltip.tsx +++ /dev/null @@ -1,30 +0,0 @@ -'use client' - -import * as React from 'react' -import * as TooltipPrimitive from '@radix-ui/react-tooltip' - -import { cn } from '@/lib/utils' - -const TooltipProvider = TooltipPrimitive.Provider - -const Tooltip = TooltipPrimitive.Root - -const TooltipTrigger = TooltipPrimitive.Trigger - -const TooltipContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, sideOffset = 4, ...props }, ref) => ( - -)) -TooltipContent.displayName = TooltipPrimitive.Content.displayName - -export { Tooltip, TooltipTrigger, TooltipContent, TooltipProvider } diff --git a/spaces/AIBoy1993/segment_anything_webui/app.py b/spaces/AIBoy1993/segment_anything_webui/app.py deleted file mode 100644 index 2a9b8da777317329b6cbadc1560133de99d7ca07..0000000000000000000000000000000000000000 --- a/spaces/AIBoy1993/segment_anything_webui/app.py +++ /dev/null @@ -1,198 +0,0 @@ -import os -import cv2 -import numpy as np -import gradio as gr -from inference import run_inference - - -# points color and marker -colors = [(255, 0, 0), (0, 255, 0)] -markers = [1, 5] - -# image examples -# in each list, the first element is image path, -# the second is id (used for original_image State), -# the third is an empty list (used for selected_points State) -image_examples = [ - [os.path.join(os.path.dirname(__file__), "./images/53960-scaled.jpg"), 0, []], - [os.path.join(os.path.dirname(__file__), "./images/2388455-scaled.jpg"), 1, []], - [os.path.join(os.path.dirname(__file__), "./images/1.jpg"),2,[]], - [os.path.join(os.path.dirname(__file__), "./images/2.jpg"),3,[]], - [os.path.join(os.path.dirname(__file__), "./images/3.jpg"),4,[]], - [os.path.join(os.path.dirname(__file__), "./images/4.jpg"),5,[]], - [os.path.join(os.path.dirname(__file__), "./images/5.jpg"),6,[]], - [os.path.join(os.path.dirname(__file__), "./images/6.jpg"),7,[]], - [os.path.join(os.path.dirname(__file__), "./images/7.jpg"),8,[]], - [os.path.join(os.path.dirname(__file__), "./images/8.jpg"),9,[]] -] -# video examples -video_examples = [ - os.path.join(os.path.dirname(__file__), "./images/video1.mp4"), - os.path.join(os.path.dirname(__file__), "./images/video2.mp4") -] - - -with gr.Blocks() as demo: - with gr.Row(): - gr.Markdown( - '''# Segment Anything!🚀 - The Segment Anything Model (SAM) produces high quality object masks from input prompts such as points or boxes, and it can be used to generate masks for all objects in an image. More information can be found in [**Official Project**](https://segment-anything.com/). - [![Duplicate this Space](https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-sm.svg)](https://huggingface.co/spaces/AIBoy1993/segment_anything_webui?duplicate=true) - ''' - ) - with gr.Row(): - # select model - model_type = gr.Dropdown(["vit_b", "vit_l", "vit_h"], value='vit_b', label="Select Model") - # select device - device = gr.Dropdown(["cpu", "cuda"], value='cpu', label="Select Device") - - # SAM parameters - with gr.Accordion(label='Parameters', open=False): - with gr.Row(): - points_per_side = gr.Number(value=32, label="points_per_side", precision=0, - info='''The number of points to be sampled along one side of the image. The total - number of points is points_per_side**2.''') - pred_iou_thresh = gr.Slider(value=0.88, minimum=0, maximum=1.0, step=0.01, label="pred_iou_thresh", - info='''A filtering threshold in [0,1], using the model's predicted mask quality.''') - stability_score_thresh = gr.Slider(value=0.95, minimum=0, maximum=1.0, step=0.01, label="stability_score_thresh", - info='''A filtering threshold in [0,1], using the stability of the mask under - changes to the cutoff used to binarize the model's mask predictions.''') - min_mask_region_area = gr.Number(value=0, label="min_mask_region_area", precision=0, - info='''If >0, postprocessing will be applied to remove disconnected regions - and holes in masks with area smaller than min_mask_region_area.''') - with gr.Row(): - stability_score_offset = gr.Number(value=1, label="stability_score_offset", - info='''The amount to shift the cutoff when calculated the stability score.''') - box_nms_thresh = gr.Slider(value=0.7, minimum=0, maximum=1.0, step=0.01, label="box_nms_thresh", - info='''The box IoU cutoff used by non-maximal ression to filter duplicate masks.''') - crop_n_layers = gr.Number(value=0, label="crop_n_layers", precision=0, - info='''If >0, mask prediction will be run again on crops of the image. - Sets the number of layers to run, where each layer has 2**i_layer number of image crops.''') - crop_nms_thresh = gr.Slider(value=0.7, minimum=0, maximum=1.0, step=0.01, label="crop_nms_thresh", - info='''The box IoU cutoff used by non-maximal suppression to filter duplicate - masks between different crops.''') - - # Segment image - with gr.Tab(label='Image'): - with gr.Row().style(equal_height=True): - with gr.Column(): - # input image - original_image = gr.State(value=None) # store original image without points, default None - input_image = gr.Image(type="numpy") - # point prompt - with gr.Column(): - selected_points = gr.State([]) # store points - with gr.Row(): - gr.Markdown('You can click on the image to select points prompt. Default: foreground_point.') - undo_button = gr.Button('Undo point') - radio = gr.Radio(['foreground_point', 'background_point'], label='point labels') - # text prompt to generate box prompt - text = gr.Textbox(label='Text prompt(optional)', info= - 'If you type words, the OWL-ViT model will be used to detect the objects in the image, ' - 'and the boxes will be feed into SAM model to predict mask. Please use English.', - placeholder='Multiple words are separated by commas') - owl_vit_threshold = gr.Slider(value=0.1, minimum=0, maximum=1.0, step=0.01, label="OWL ViT Object Detection threshold", - info='''A small threshold will generate more objects, but may causing OOM. - A big threshold may not detect objects, resulting in an error ''') - # run button - button = gr.Button("Auto!") - # show the image with mask - with gr.Tab(label='Image+Mask'): - output_image = gr.Image(type='numpy') - # show only mask - with gr.Tab(label='Mask'): - output_mask = gr.Image(type='numpy') - def process_example(img, ori_img, sel_p): - return ori_img, [] - - example = gr.Examples( - examples=image_examples, - inputs=[input_image, original_image, selected_points], - outputs=[original_image, selected_points], - fn=process_example, - run_on_click=True - ) - - # Segment video - with gr.Tab(label='Video'): - with gr.Row().style(equal_height=True): - with gr.Column(): - input_video = gr.Video() - with gr.Row(): - button_video = gr.Button("Auto!") - output_video = gr.Video(format='mp4') - gr.Markdown(''' - **Note:** processing video will take a long time, please upload a short video. - ''') - gr.Examples( - examples=video_examples, - inputs=input_video, - outputs=output_video - ) - - # once user upload an image, the original image is stored in `original_image` - def store_img(img): - return img, [] # when new image is uploaded, `selected_points` should be empty - input_image.upload( - store_img, - [input_image], - [original_image, selected_points] - ) - - # user click the image to get points, and show the points on the image - def get_point(img, sel_pix, point_type, evt: gr.SelectData): - if point_type == 'foreground_point': - sel_pix.append((evt.index, 1)) # append the foreground_point - elif point_type == 'background_point': - sel_pix.append((evt.index, 0)) # append the background_point - else: - sel_pix.append((evt.index, 1)) # default foreground_point - # draw points - for point, label in sel_pix: - cv2.drawMarker(img, point, colors[label], markerType=markers[label], markerSize=20, thickness=5) - if img[..., 0][0, 0] == img[..., 2][0, 0]: # BGR to RGB - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - return img if isinstance(img, np.ndarray) else np.array(img) - input_image.select( - get_point, - [input_image, selected_points, radio], - [input_image], - ) - - # undo the selected point - def undo_points(orig_img, sel_pix): - if isinstance(orig_img, int): # if orig_img is int, the image if select from examples - temp = cv2.imread(image_examples[orig_img][0]) - temp = cv2.cvtColor(temp, cv2.COLOR_BGR2RGB) - else: - temp = orig_img.copy() - # draw points - if len(sel_pix) != 0: - sel_pix.pop() - for point, label in sel_pix: - cv2.drawMarker(temp, point, colors[label], markerType=markers[label], markerSize=20, thickness=5) - if temp[..., 0][0, 0] == temp[..., 2][0, 0]: # BGR to RGB - temp = cv2.cvtColor(temp, cv2.COLOR_BGR2RGB) - return temp if isinstance(temp, np.ndarray) else np.array(temp) - undo_button.click( - undo_points, - [original_image, selected_points], - [input_image] - ) - - # button image - button.click(run_inference, inputs=[device, model_type, points_per_side, pred_iou_thresh, stability_score_thresh, - min_mask_region_area, stability_score_offset, box_nms_thresh, crop_n_layers, - crop_nms_thresh, owl_vit_threshold, original_image, text, selected_points], - outputs=[output_image, output_mask]) - # button video - button_video.click(run_inference, inputs=[device, model_type, points_per_side, pred_iou_thresh, stability_score_thresh, - min_mask_region_area, stability_score_offset, box_nms_thresh, crop_n_layers, - crop_nms_thresh, owl_vit_threshold, input_video, text], - outputs=[output_video]) - - -demo.queue().launch(debug=True, enable_queue=True) - - - diff --git a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/inference/svs/ds_e2e.py b/spaces/AIGC-Audio/AudioGPT/NeuralSeq/inference/svs/ds_e2e.py deleted file mode 100644 index 7d69d49eff8076f97933538a1d1a59d80b553dd8..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/inference/svs/ds_e2e.py +++ /dev/null @@ -1,67 +0,0 @@ -import torch -# from inference.tts.fs import FastSpeechInfer -# from modules.tts.fs2_orig import FastSpeech2Orig -from inference.svs.base_svs_infer import BaseSVSInfer -from utils import load_ckpt -from utils.hparams import hparams -from modules.diff.shallow_diffusion_tts import GaussianDiffusion -from tasks.svs.diffsinger_task import DIFF_DECODERS -from modules.fastspeech.pe import PitchExtractor -import utils - - -class DiffSingerE2EInfer(BaseSVSInfer): - def build_model(self): - model = GaussianDiffusion( - phone_encoder=self.ph_encoder, - out_dims=hparams['audio_num_mel_bins'], denoise_fn=DIFF_DECODERS[hparams['diff_decoder_type']](hparams), - timesteps=hparams['timesteps'], - K_step=hparams['K_step'], - loss_type=hparams['diff_loss_type'], - spec_min=hparams['spec_min'], spec_max=hparams['spec_max'], - ) - model.eval() - load_ckpt(model, hparams['work_dir'], 'model') - - if hparams.get('pe_enable') is not None and hparams['pe_enable']: - self.pe = PitchExtractor().to(self.device) - utils.load_ckpt(self.pe, hparams['pe_ckpt'], 'model', strict=True) - self.pe.eval() - return model - - def forward_model(self, inp): - sample = self.input_to_batch(inp) - txt_tokens = sample['txt_tokens'] # [B, T_t] - spk_id = sample.get('spk_ids') - with torch.no_grad(): - output = self.model(txt_tokens, spk_id=spk_id, ref_mels=None, infer=True, - pitch_midi=sample['pitch_midi'], midi_dur=sample['midi_dur'], - is_slur=sample['is_slur']) - mel_out = output['mel_out'] # [B, T,80] - if hparams.get('pe_enable') is not None and hparams['pe_enable']: - f0_pred = self.pe(mel_out)['f0_denorm_pred'] # pe predict from Pred mel - else: - f0_pred = output['f0_denorm'] - wav_out = self.run_vocoder(mel_out, f0=f0_pred) - wav_out = wav_out.cpu().numpy() - return wav_out[0] - -if __name__ == '__main__': - inp = { - 'text': '小酒窝长睫毛AP是你最美的记号', - 'notes': 'C#4/Db4 | F#4/Gb4 | G#4/Ab4 | A#4/Bb4 F#4/Gb4 | F#4/Gb4 C#4/Db4 | C#4/Db4 | rest | C#4/Db4 | A#4/Bb4 | G#4/Ab4 | A#4/Bb4 | G#4/Ab4 | F4 | C#4/Db4', - 'notes_duration': '0.407140 | 0.376190 | 0.242180 | 0.509550 0.183420 | 0.315400 0.235020 | 0.361660 | 0.223070 | 0.377270 | 0.340550 | 0.299620 | 0.344510 | 0.283770 | 0.323390 | 0.360340', - 'input_type': 'word' - } # user input: Chinese characters - inp = { - 'text': '小酒窝长睫毛AP是你最美的记号', - 'ph_seq': 'x iao j iu w o ch ang ang j ie ie m ao AP sh i n i z ui m ei d e j i h ao', - 'note_seq': 'C#4/Db4 C#4/Db4 F#4/Gb4 F#4/Gb4 G#4/Ab4 G#4/Ab4 A#4/Bb4 A#4/Bb4 F#4/Gb4 F#4/Gb4 F#4/Gb4 C#4/Db4 C#4/Db4 C#4/Db4 rest C#4/Db4 C#4/Db4 A#4/Bb4 A#4/Bb4 G#4/Ab4 G#4/Ab4 A#4/Bb4 A#4/Bb4 G#4/Ab4 G#4/Ab4 F4 F4 C#4/Db4 C#4/Db4', - 'note_dur_seq': '0.407140 0.407140 0.376190 0.376190 0.242180 0.242180 0.509550 0.509550 0.183420 0.315400 0.315400 0.235020 0.361660 0.361660 0.223070 0.377270 0.377270 0.340550 0.340550 0.299620 0.299620 0.344510 0.344510 0.283770 0.283770 0.323390 0.323390 0.360340 0.360340', - 'is_slur_seq': '0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0', - 'input_type': 'phoneme' - } # input like Opencpop dataset. - DiffSingerE2EInfer.example_run(inp) - - -# CUDA_VISIBLE_DEVICES=3 python inference/svs/ds_e2e.py --config egs/egs_bases/svs/midi/e2e/opencpop/ds100_adj_rel.yaml --exp_name 0228_opencpop_ds100_rel \ No newline at end of file diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnetv1d50_8xb32_in1k.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnetv1d50_8xb32_in1k.py deleted file mode 100644 index 208bde470ad12407d7e56eddeddfc88529e3708b..0000000000000000000000000000000000000000 --- a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnetv1d50_8xb32_in1k.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = [ - '../_base_/models/resnetv1d50.py', - '../_base_/datasets/imagenet_bs32_pil_resize.py', - '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' -] diff --git a/spaces/Abhilashvj/planogram-compliance/models/tf.py b/spaces/Abhilashvj/planogram-compliance/models/tf.py deleted file mode 100644 index 65679cc5cfca83601fe33ca5cceab4cb87e71f09..0000000000000000000000000000000000000000 --- a/spaces/Abhilashvj/planogram-compliance/models/tf.py +++ /dev/null @@ -1,837 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -TensorFlow, Keras and TFLite versions of YOLOv5 -Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127 - -Usage: - $ python models/tf.py --weights yolov5s.pt - -Export: - $ python export.py --weights yolov5s.pt --include saved_model pb tflite tfjs -""" - -import argparse -import sys -from copy import deepcopy -from pathlib import Path - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[1] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -# ROOT = ROOT.relative_to(Path.cwd()) # relative - -import numpy as np -import tensorflow as tf -import torch -import torch.nn as nn -from tensorflow import keras - -from models.common import ( - C3, - SPP, - SPPF, - Bottleneck, - BottleneckCSP, - C3x, - Concat, - Conv, - CrossConv, - DWConv, - DWConvTranspose2d, - Focus, - autopad, -) -from models.experimental import MixConv2d, attempt_load -from models.yolo import Detect, Segment -from utils.activations import SiLU -from utils.general import LOGGER, make_divisible, print_args - - -class TFBN(keras.layers.Layer): - # TensorFlow BatchNormalization wrapper - def __init__(self, w=None): - super().__init__() - self.bn = keras.layers.BatchNormalization( - beta_initializer=keras.initializers.Constant(w.bias.numpy()), - gamma_initializer=keras.initializers.Constant(w.weight.numpy()), - moving_mean_initializer=keras.initializers.Constant( - w.running_mean.numpy() - ), - moving_variance_initializer=keras.initializers.Constant( - w.running_var.numpy() - ), - epsilon=w.eps, - ) - - def call(self, inputs): - return self.bn(inputs) - - -class TFPad(keras.layers.Layer): - # Pad inputs in spatial dimensions 1 and 2 - def __init__(self, pad): - super().__init__() - if isinstance(pad, int): - self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]]) - else: # tuple/list - self.pad = tf.constant( - [[0, 0], [pad[0], pad[0]], [pad[1], pad[1]], [0, 0]] - ) - - def call(self, inputs): - return tf.pad(inputs, self.pad, mode="constant", constant_values=0) - - -class TFConv(keras.layers.Layer): - # Standard convolution - def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): - # ch_in, ch_out, weights, kernel, stride, padding, groups - super().__init__() - assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument" - # TensorFlow convolution padding is inconsistent with PyTorch (e.g. k=3 s=2 'SAME' padding) - # see https://stackoverflow.com/questions/52975843/comparing-conv2d-with-padding-between-tensorflow-and-pytorch - conv = keras.layers.Conv2D( - filters=c2, - kernel_size=k, - strides=s, - padding="SAME" if s == 1 else "VALID", - use_bias=not hasattr(w, "bn"), - kernel_initializer=keras.initializers.Constant( - w.conv.weight.permute(2, 3, 1, 0).numpy() - ), - bias_initializer="zeros" - if hasattr(w, "bn") - else keras.initializers.Constant(w.conv.bias.numpy()), - ) - self.conv = ( - conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv]) - ) - self.bn = TFBN(w.bn) if hasattr(w, "bn") else tf.identity - self.act = activations(w.act) if act else tf.identity - - def call(self, inputs): - return self.act(self.bn(self.conv(inputs))) - - -class TFDWConv(keras.layers.Layer): - # Depthwise convolution - def __init__(self, c1, c2, k=1, s=1, p=None, act=True, w=None): - # ch_in, ch_out, weights, kernel, stride, padding, groups - super().__init__() - assert ( - c2 % c1 == 0 - ), f"TFDWConv() output={c2} must be a multiple of input={c1} channels" - conv = keras.layers.DepthwiseConv2D( - kernel_size=k, - depth_multiplier=c2 // c1, - strides=s, - padding="SAME" if s == 1 else "VALID", - use_bias=not hasattr(w, "bn"), - depthwise_initializer=keras.initializers.Constant( - w.conv.weight.permute(2, 3, 1, 0).numpy() - ), - bias_initializer="zeros" - if hasattr(w, "bn") - else keras.initializers.Constant(w.conv.bias.numpy()), - ) - self.conv = ( - conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv]) - ) - self.bn = TFBN(w.bn) if hasattr(w, "bn") else tf.identity - self.act = activations(w.act) if act else tf.identity - - def call(self, inputs): - return self.act(self.bn(self.conv(inputs))) - - -class TFDWConvTranspose2d(keras.layers.Layer): - # Depthwise ConvTranspose2d - def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0, w=None): - # ch_in, ch_out, weights, kernel, stride, padding, groups - super().__init__() - assert ( - c1 == c2 - ), f"TFDWConv() output={c2} must be equal to input={c1} channels" - assert k == 4 and p1 == 1, "TFDWConv() only valid for k=4 and p1=1" - weight, bias = w.weight.permute(2, 3, 1, 0).numpy(), w.bias.numpy() - self.c1 = c1 - self.conv = [ - keras.layers.Conv2DTranspose( - filters=1, - kernel_size=k, - strides=s, - padding="VALID", - output_padding=p2, - use_bias=True, - kernel_initializer=keras.initializers.Constant( - weight[..., i : i + 1] - ), - bias_initializer=keras.initializers.Constant(bias[i]), - ) - for i in range(c1) - ] - - def call(self, inputs): - return tf.concat( - [m(x) for m, x in zip(self.conv, tf.split(inputs, self.c1, 3))], 3 - )[:, 1:-1, 1:-1] - - -class TFFocus(keras.layers.Layer): - # Focus wh information into c-space - def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): - # ch_in, ch_out, kernel, stride, padding, groups - super().__init__() - self.conv = TFConv(c1 * 4, c2, k, s, p, g, act, w.conv) - - def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c) - # inputs = inputs / 255 # normalize 0-255 to 0-1 - inputs = [ - inputs[:, ::2, ::2, :], - inputs[:, 1::2, ::2, :], - inputs[:, ::2, 1::2, :], - inputs[:, 1::2, 1::2, :], - ] - return self.conv(tf.concat(inputs, 3)) - - -class TFBottleneck(keras.layers.Layer): - # Standard bottleneck - def __init__( - self, c1, c2, shortcut=True, g=1, e=0.5, w=None - ): # ch_in, ch_out, shortcut, groups, expansion - super().__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) - self.cv2 = TFConv(c_, c2, 3, 1, g=g, w=w.cv2) - self.add = shortcut and c1 == c2 - - def call(self, inputs): - return ( - inputs + self.cv2(self.cv1(inputs)) - if self.add - else self.cv2(self.cv1(inputs)) - ) - - -class TFCrossConv(keras.layers.Layer): - # Cross Convolution - def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False, w=None): - super().__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = TFConv(c1, c_, (1, k), (1, s), w=w.cv1) - self.cv2 = TFConv(c_, c2, (k, 1), (s, 1), g=g, w=w.cv2) - self.add = shortcut and c1 == c2 - - def call(self, inputs): - return ( - inputs + self.cv2(self.cv1(inputs)) - if self.add - else self.cv2(self.cv1(inputs)) - ) - - -class TFConv2d(keras.layers.Layer): - # Substitution for PyTorch nn.Conv2D - def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None): - super().__init__() - assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument" - self.conv = keras.layers.Conv2D( - filters=c2, - kernel_size=k, - strides=s, - padding="VALID", - use_bias=bias, - kernel_initializer=keras.initializers.Constant( - w.weight.permute(2, 3, 1, 0).numpy() - ), - bias_initializer=keras.initializers.Constant(w.bias.numpy()) - if bias - else None, - ) - - def call(self, inputs): - return self.conv(inputs) - - -class TFBottleneckCSP(keras.layers.Layer): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): - # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) - self.cv2 = TFConv2d(c1, c_, 1, 1, bias=False, w=w.cv2) - self.cv3 = TFConv2d(c_, c_, 1, 1, bias=False, w=w.cv3) - self.cv4 = TFConv(2 * c_, c2, 1, 1, w=w.cv4) - self.bn = TFBN(w.bn) - self.act = lambda x: keras.activations.swish(x) - self.m = keras.Sequential( - [ - TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) - for j in range(n) - ] - ) - - def call(self, inputs): - y1 = self.cv3(self.m(self.cv1(inputs))) - y2 = self.cv2(inputs) - return self.cv4(self.act(self.bn(tf.concat((y1, y2), axis=3)))) - - -class TFC3(keras.layers.Layer): - # CSP Bottleneck with 3 convolutions - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): - # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) - self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2) - self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3) - self.m = keras.Sequential( - [ - TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) - for j in range(n) - ] - ) - - def call(self, inputs): - return self.cv3( - tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3) - ) - - -class TFC3x(keras.layers.Layer): - # 3 module with cross-convolutions - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): - # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) - self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2) - self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3) - self.m = keras.Sequential( - [ - TFCrossConv( - c_, c_, k=3, s=1, g=g, e=1.0, shortcut=shortcut, w=w.m[j] - ) - for j in range(n) - ] - ) - - def call(self, inputs): - return self.cv3( - tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3) - ) - - -class TFSPP(keras.layers.Layer): - # Spatial pyramid pooling layer used in YOLOv3-SPP - def __init__(self, c1, c2, k=(5, 9, 13), w=None): - super().__init__() - c_ = c1 // 2 # hidden channels - self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) - self.cv2 = TFConv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2) - self.m = [ - keras.layers.MaxPool2D(pool_size=x, strides=1, padding="SAME") - for x in k - ] - - def call(self, inputs): - x = self.cv1(inputs) - return self.cv2(tf.concat([x] + [m(x) for m in self.m], 3)) - - -class TFSPPF(keras.layers.Layer): - # Spatial pyramid pooling-Fast layer - def __init__(self, c1, c2, k=5, w=None): - super().__init__() - c_ = c1 // 2 # hidden channels - self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) - self.cv2 = TFConv(c_ * 4, c2, 1, 1, w=w.cv2) - self.m = keras.layers.MaxPool2D(pool_size=k, strides=1, padding="SAME") - - def call(self, inputs): - x = self.cv1(inputs) - y1 = self.m(x) - y2 = self.m(y1) - return self.cv2(tf.concat([x, y1, y2, self.m(y2)], 3)) - - -class TFDetect(keras.layers.Layer): - # TF YOLOv5 Detect layer - def __init__( - self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None - ): # detection layer - super().__init__() - self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32) - self.nc = nc # number of classes - self.no = nc + 5 # number of outputs per anchor - self.nl = len(anchors) # number of detection layers - self.na = len(anchors[0]) // 2 # number of anchors - self.grid = [tf.zeros(1)] * self.nl # init grid - self.anchors = tf.convert_to_tensor( - w.anchors.numpy(), dtype=tf.float32 - ) - self.anchor_grid = tf.reshape( - self.anchors * tf.reshape(self.stride, [self.nl, 1, 1]), - [self.nl, 1, -1, 1, 2], - ) - self.m = [ - TFConv2d(x, self.no * self.na, 1, w=w.m[i]) - for i, x in enumerate(ch) - ] - self.training = False # set to False after building model - self.imgsz = imgsz - for i in range(self.nl): - ny, nx = ( - self.imgsz[0] // self.stride[i], - self.imgsz[1] // self.stride[i], - ) - self.grid[i] = self._make_grid(nx, ny) - - def call(self, inputs): - z = [] # inference output - x = [] - for i in range(self.nl): - x.append(self.m[i](inputs[i])) - # x(bs,20,20,255) to x(bs,3,20,20,85) - ny, nx = ( - self.imgsz[0] // self.stride[i], - self.imgsz[1] // self.stride[i], - ) - x[i] = tf.reshape(x[i], [-1, ny * nx, self.na, self.no]) - - if not self.training: # inference - y = x[i] - grid = tf.transpose(self.grid[i], [0, 2, 1, 3]) - 0.5 - anchor_grid = ( - tf.transpose(self.anchor_grid[i], [0, 2, 1, 3]) * 4 - ) - xy = (tf.sigmoid(y[..., 0:2]) * 2 + grid) * self.stride[ - i - ] # xy - wh = tf.sigmoid(y[..., 2:4]) ** 2 * anchor_grid - # Normalize xywh to 0-1 to reduce calibration error - xy /= tf.constant( - [[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32 - ) - wh /= tf.constant( - [[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32 - ) - y = tf.concat( - [ - xy, - wh, - tf.sigmoid(y[..., 4 : 5 + self.nc]), - y[..., 5 + self.nc :], - ], - -1, - ) - z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no])) - - return ( - tf.transpose(x, [0, 2, 1, 3]) - if self.training - else (tf.concat(z, 1),) - ) - - @staticmethod - def _make_grid(nx=20, ny=20): - # yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) - # return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() - xv, yv = tf.meshgrid(tf.range(nx), tf.range(ny)) - return tf.cast( - tf.reshape(tf.stack([xv, yv], 2), [1, 1, ny * nx, 2]), - dtype=tf.float32, - ) - - -class TFSegment(TFDetect): - # YOLOv5 Segment head for segmentation models - def __init__( - self, - nc=80, - anchors=(), - nm=32, - npr=256, - ch=(), - imgsz=(640, 640), - w=None, - ): - super().__init__(nc, anchors, ch, imgsz, w) - self.nm = nm # number of masks - self.npr = npr # number of protos - self.no = 5 + nc + self.nm # number of outputs per anchor - self.m = [ - TFConv2d(x, self.no * self.na, 1, w=w.m[i]) - for i, x in enumerate(ch) - ] # output conv - self.proto = TFProto(ch[0], self.npr, self.nm, w=w.proto) # protos - self.detect = TFDetect.call - - def call(self, x): - p = self.proto(x[0]) - # p = TFUpsample(None, scale_factor=4, mode='nearest')(self.proto(x[0])) # (optional) full-size protos - p = tf.transpose( - p, [0, 3, 1, 2] - ) # from shape(1,160,160,32) to shape(1,32,160,160) - x = self.detect(self, x) - return (x, p) if self.training else (x[0], p) - - -class TFProto(keras.layers.Layer): - def __init__(self, c1, c_=256, c2=32, w=None): - super().__init__() - self.cv1 = TFConv(c1, c_, k=3, w=w.cv1) - self.upsample = TFUpsample(None, scale_factor=2, mode="nearest") - self.cv2 = TFConv(c_, c_, k=3, w=w.cv2) - self.cv3 = TFConv(c_, c2, w=w.cv3) - - def call(self, inputs): - return self.cv3(self.cv2(self.upsample(self.cv1(inputs)))) - - -class TFUpsample(keras.layers.Layer): - # TF version of torch.nn.Upsample() - def __init__( - self, size, scale_factor, mode, w=None - ): # warning: all arguments needed including 'w' - super().__init__() - assert scale_factor % 2 == 0, "scale_factor must be multiple of 2" - self.upsample = lambda x: tf.image.resize( - x, (x.shape[1] * scale_factor, x.shape[2] * scale_factor), mode - ) - # self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode) - # with default arguments: align_corners=False, half_pixel_centers=False - # self.upsample = lambda x: tf.raw_ops.ResizeNearestNeighbor(images=x, - # size=(x.shape[1] * 2, x.shape[2] * 2)) - - def call(self, inputs): - return self.upsample(inputs) - - -class TFConcat(keras.layers.Layer): - # TF version of torch.concat() - def __init__(self, dimension=1, w=None): - super().__init__() - assert dimension == 1, "convert only NCHW to NHWC concat" - self.d = 3 - - def call(self, inputs): - return tf.concat(inputs, self.d) - - -def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) - LOGGER.info( - f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}" - ) - anchors, nc, gd, gw = ( - d["anchors"], - d["nc"], - d["depth_multiple"], - d["width_multiple"], - ) - na = ( - (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors - ) # number of anchors - no = na * (nc + 5) # number of outputs = anchors * (classes + 5) - - layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out - for i, (f, n, m, args) in enumerate( - d["backbone"] + d["head"] - ): # from, number, module, args - m_str = m - m = eval(m) if isinstance(m, str) else m # eval strings - for j, a in enumerate(args): - try: - args[j] = eval(a) if isinstance(a, str) else a # eval strings - except NameError: - pass - - n = max(round(n * gd), 1) if n > 1 else n # depth gain - if m in [ - nn.Conv2d, - Conv, - DWConv, - DWConvTranspose2d, - Bottleneck, - SPP, - SPPF, - MixConv2d, - Focus, - CrossConv, - BottleneckCSP, - C3, - C3x, - ]: - c1, c2 = ch[f], args[0] - c2 = make_divisible(c2 * gw, 8) if c2 != no else c2 - - args = [c1, c2, *args[1:]] - if m in [BottleneckCSP, C3, C3x]: - args.insert(2, n) - n = 1 - elif m is nn.BatchNorm2d: - args = [ch[f]] - elif m is Concat: - c2 = sum(ch[-1 if x == -1 else x + 1] for x in f) - elif m in [Detect, Segment]: - args.append([ch[x + 1] for x in f]) - if isinstance(args[1], int): # number of anchors - args[1] = [list(range(args[1] * 2))] * len(f) - if m is Segment: - args[3] = make_divisible(args[3] * gw, 8) - args.append(imgsz) - else: - c2 = ch[f] - - tf_m = eval("TF" + m_str.replace("nn.", "")) - m_ = ( - keras.Sequential( - [tf_m(*args, w=model.model[i][j]) for j in range(n)] - ) - if n > 1 - else tf_m(*args, w=model.model[i]) - ) # module - - torch_m_ = ( - nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) - ) # module - t = str(m)[8:-2].replace("__main__.", "") # module type - np = sum(x.numel() for x in torch_m_.parameters()) # number params - m_.i, m_.f, m_.type, m_.np = ( - i, - f, - t, - np, - ) # attach index, 'from' index, type, number params - LOGGER.info( - f"{i:>3}{str(f):>18}{str(n):>3}{np:>10} {t:<40}{str(args):<30}" - ) # print - save.extend( - x % i for x in ([f] if isinstance(f, int) else f) if x != -1 - ) # append to savelist - layers.append(m_) - ch.append(c2) - return keras.Sequential(layers), sorted(save) - - -class TFModel: - # TF YOLOv5 model - def __init__( - self, cfg="yolov5s.yaml", ch=3, nc=None, model=None, imgsz=(640, 640) - ): # model, channels, classes - super().__init__() - if isinstance(cfg, dict): - self.yaml = cfg # model dict - else: # is *.yaml - import yaml # for torch hub - - self.yaml_file = Path(cfg).name - with open(cfg) as f: - self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict - - # Define model - if nc and nc != self.yaml["nc"]: - LOGGER.info(f"Overriding {cfg} nc={self.yaml['nc']} with nc={nc}") - self.yaml["nc"] = nc # override yaml value - self.model, self.savelist = parse_model( - deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz - ) - - def predict( - self, - inputs, - tf_nms=False, - agnostic_nms=False, - topk_per_class=100, - topk_all=100, - iou_thres=0.45, - conf_thres=0.25, - ): - y = [] # outputs - x = inputs - for m in self.model.layers: - if m.f != -1: # if not from previous layer - x = ( - y[m.f] - if isinstance(m.f, int) - else [x if j == -1 else y[j] for j in m.f] - ) # from earlier layers - - x = m(x) # run - y.append(x if m.i in self.savelist else None) # save output - - # Add TensorFlow NMS - if tf_nms: - boxes = self._xywh2xyxy(x[0][..., :4]) - probs = x[0][:, :, 4:5] - classes = x[0][:, :, 5:] - scores = probs * classes - if agnostic_nms: - nms = AgnosticNMS()( - (boxes, classes, scores), topk_all, iou_thres, conf_thres - ) - else: - boxes = tf.expand_dims(boxes, 2) - nms = tf.image.combined_non_max_suppression( - boxes, - scores, - topk_per_class, - topk_all, - iou_thres, - conf_thres, - clip_boxes=False, - ) - return (nms,) - return x # output [1,6300,85] = [xywh, conf, class0, class1, ...] - # x = x[0] # [x(1,6300,85), ...] to x(6300,85) - # xywh = x[..., :4] # x(6300,4) boxes - # conf = x[..., 4:5] # x(6300,1) confidences - # cls = tf.reshape(tf.cast(tf.argmax(x[..., 5:], axis=1), tf.float32), (-1, 1)) # x(6300,1) classes - # return tf.concat([conf, cls, xywh], 1) - - @staticmethod - def _xywh2xyxy(xywh): - # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right - x, y, w, h = tf.split(xywh, num_or_size_splits=4, axis=-1) - return tf.concat([x - w / 2, y - h / 2, x + w / 2, y + h / 2], axis=-1) - - -class AgnosticNMS(keras.layers.Layer): - # TF Agnostic NMS - def call(self, input, topk_all, iou_thres, conf_thres): - # wrap map_fn to avoid TypeSpec related error https://stackoverflow.com/a/65809989/3036450 - return tf.map_fn( - lambda x: self._nms(x, topk_all, iou_thres, conf_thres), - input, - fn_output_signature=(tf.float32, tf.float32, tf.float32, tf.int32), - name="agnostic_nms", - ) - - @staticmethod - def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25): # agnostic NMS - boxes, classes, scores = x - class_inds = tf.cast(tf.argmax(classes, axis=-1), tf.float32) - scores_inp = tf.reduce_max(scores, -1) - selected_inds = tf.image.non_max_suppression( - boxes, - scores_inp, - max_output_size=topk_all, - iou_threshold=iou_thres, - score_threshold=conf_thres, - ) - selected_boxes = tf.gather(boxes, selected_inds) - padded_boxes = tf.pad( - selected_boxes, - paddings=[[0, topk_all - tf.shape(selected_boxes)[0]], [0, 0]], - mode="CONSTANT", - constant_values=0.0, - ) - selected_scores = tf.gather(scores_inp, selected_inds) - padded_scores = tf.pad( - selected_scores, - paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], - mode="CONSTANT", - constant_values=-1.0, - ) - selected_classes = tf.gather(class_inds, selected_inds) - padded_classes = tf.pad( - selected_classes, - paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], - mode="CONSTANT", - constant_values=-1.0, - ) - valid_detections = tf.shape(selected_inds)[0] - return padded_boxes, padded_scores, padded_classes, valid_detections - - -def activations(act=nn.SiLU): - # Returns TF activation from input PyTorch activation - if isinstance(act, nn.LeakyReLU): - return lambda x: keras.activations.relu(x, alpha=0.1) - elif isinstance(act, nn.Hardswish): - return lambda x: x * tf.nn.relu6(x + 3) * 0.166666667 - elif isinstance(act, (nn.SiLU, SiLU)): - return lambda x: keras.activations.swish(x) - else: - raise Exception( - f"no matching TensorFlow activation found for PyTorch activation {act}" - ) - - -def representative_dataset_gen(dataset, ncalib=100): - # Representative dataset generator for use with converter.representative_dataset, returns a generator of np arrays - for n, (path, img, im0s, vid_cap, string) in enumerate(dataset): - im = np.transpose(img, [1, 2, 0]) - im = np.expand_dims(im, axis=0).astype(np.float32) - im /= 255 - yield [im] - if n >= ncalib: - break - - -def run( - weights=ROOT / "yolov5s.pt", # weights path - imgsz=(640, 640), # inference size h,w - batch_size=1, # batch size - dynamic=False, # dynamic batch size -): - # PyTorch model - im = torch.zeros((batch_size, 3, *imgsz)) # BCHW image - model = attempt_load( - weights, device=torch.device("cpu"), inplace=True, fuse=False - ) - _ = model(im) # inference - model.info() - - # TensorFlow model - im = tf.zeros((batch_size, *imgsz, 3)) # BHWC image - tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz) - _ = tf_model.predict(im) # inference - - # Keras model - im = keras.Input( - shape=(*imgsz, 3), batch_size=None if dynamic else batch_size - ) - keras_model = keras.Model(inputs=im, outputs=tf_model.predict(im)) - keras_model.summary() - - LOGGER.info( - "PyTorch, TensorFlow and Keras models successfully verified.\nUse export.py for TF model export." - ) - - -def parse_opt(): - parser = argparse.ArgumentParser() - parser.add_argument( - "--weights", type=str, default=ROOT / "yolov5s.pt", help="weights path" - ) - parser.add_argument( - "--imgsz", - "--img", - "--img-size", - nargs="+", - type=int, - default=[640], - help="inference size h,w", - ) - parser.add_argument("--batch-size", type=int, default=1, help="batch size") - parser.add_argument( - "--dynamic", action="store_true", help="dynamic batch size" - ) - opt = parser.parse_args() - opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand - print_args(vars(opt)) - return opt - - -def main(opt): - run(**vars(opt)) - - -if __name__ == "__main__": - opt = parse_opt() - main(opt) diff --git a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/ChatgptAi.py b/spaces/AchyuthGamer/OpenGPT/g4f/Provider/ChatgptAi.py deleted file mode 100644 index 996f99a58a35d0d38278bca2f4428e7faf4322c2..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/ChatgptAi.py +++ /dev/null @@ -1,74 +0,0 @@ -from __future__ import annotations - -import re -from aiohttp import ClientSession - -from .base_provider import AsyncProvider, format_prompt - - -class ChatgptAi(AsyncProvider): - url: str = "https://chatgpt.ai/" - working = True - supports_gpt_35_turbo = True - _nonce = None - _post_id = None - _bot_id = None - - @classmethod - async def create_async( - cls, - model: str, - messages: list[dict[str, str]], - proxy: str = None, - **kwargs - ) -> str: - headers = { - "authority" : "chatgpt.ai", - "accept" : "*/*", - "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", - "cache-control" : "no-cache", - "origin" : "https://chatgpt.ai", - "pragma" : "no-cache", - "referer" : cls.url, - "sec-ch-ua" : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', - "sec-ch-ua-mobile" : "?0", - "sec-ch-ua-platform" : '"Windows"', - "sec-fetch-dest" : "empty", - "sec-fetch-mode" : "cors", - "sec-fetch-site" : "same-origin", - "user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36", - } - async with ClientSession( - headers=headers - ) as session: - if not cls._nonce: - async with session.get(cls.url, proxy=proxy) as response: - response.raise_for_status() - text = await response.text() - result = re.search(r'data-nonce="(.*?)"', text) - if result: - cls._nonce = result.group(1) - result = re.search(r'data-post-id="(.*?)"', text) - if result: - cls._post_id = result.group(1) - result = re.search(r'data-bot-id="(.*?)"', text) - if result: - cls._bot_id = result.group(1) - if not cls._nonce or not cls._post_id or not cls._bot_id: - raise RuntimeError("Nonce, post-id or bot-id not found") - - data = { - "_wpnonce": cls._nonce, - "post_id": cls._post_id, - "url": "https://chatgpt.ai", - "action": "wpaicg_chat_shortcode_message", - "message": format_prompt(messages), - "bot_id": cls._bot_id - } - async with session.post( - "https://chatgpt.ai/wp-admin/admin-ajax.php", - proxy=proxy, - data=data - ) as response: - response.raise_for_status() - return (await response.json())["data"] \ No newline at end of file diff --git a/spaces/Aditya9790/yolo7-object-tracking/README.md b/spaces/Aditya9790/yolo7-object-tracking/README.md deleted file mode 100644 index cb822efc443313d3bd270c6a253b0c1c265c07bc..0000000000000000000000000000000000000000 --- a/spaces/Aditya9790/yolo7-object-tracking/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Yolo7 Object Tracking -emoji: 💩 -colorFrom: pink -colorTo: purple -sdk: gradio -sdk_version: 3.14.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/scroller-plugin.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/scroller-plugin.js deleted file mode 100644 index 1d5b46231303c5b6d9e51c5a7eeacef91611ef48..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/scroller-plugin.js +++ /dev/null @@ -1,20 +0,0 @@ -import Scroller from './scroller.js'; - -class ScrollerPlugin extends Phaser.Plugins.BasePlugin { - - constructor(pluginManager) { - super(pluginManager); - } - - start() { - var eventEmitter = this.game.events; - eventEmitter.on('destroy', this.destroy, this); - } - - add(gameObject, config) { - return new Scroller(gameObject, config); - } - -} - -export default ScrollerPlugin; \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/folder/methods/ExpandMethods.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/folder/methods/ExpandMethods.js deleted file mode 100644 index 61018ca59533ddcb76e96afddcbb54132d1d3c25..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/folder/methods/ExpandMethods.js +++ /dev/null @@ -1,75 +0,0 @@ -export default { - expand(duration) { - if (this.expanded === true) { - return this; - } - - if (duration === undefined) { - duration = this.transitionDuration; - } - - this.expanded = true; - - var title = this.childrenMap.title; - var child = this.childrenMap.child; - - this.show(child); - - var layoutTarget = (this.reLayoutTarget) ? this.reLayoutTarget : this.getTopmostSizer(); - layoutTarget.layout(); - - title.emit('folder.expand', duration, this); - child.emit('folder.expand', duration, this); - this.emit('expand.start', this); - - this.childTransition - .once('open', function () { - this.emit('expand.complete', this); - }, this) - .requestOpen(null, duration); - - return this; - }, - - collapse(duration) { - if (this.expanded === false) { - return this; - } - - if (duration === undefined) { - duration = this.transitionDuration; - } - - this.expanded = false; - - var title = this.childrenMap.title; - var child = this.childrenMap.child; - - title.emit('folder.collapse', duration, this); - child.emit('folder.collapse', duration, this); - this.emit('collapse.start', this); - - this.childTransition - .once('close', function () { - this.setChildScale(child, 1, 1).hide(child); - - var layoutTarget = (this.reLayoutTarget) ? this.reLayoutTarget : this.getTopmostSizer(); - layoutTarget.layout(); - - this.emit('collapse.complete', this); - }, this) - .requestClose(null, duration); - - return this; - }, - - toggle(duration) { - if (this.expanded) { - this.collapse(duration); - } else { - this.expand(duration); - } - - return this; - } -} \ No newline at end of file diff --git a/spaces/Alifarsi/news_summarizer/app.py b/spaces/Alifarsi/news_summarizer/app.py deleted file mode 100644 index a00177d7eca9972f9d6a5fb261457dad1ecf78eb..0000000000000000000000000000000000000000 --- a/spaces/Alifarsi/news_summarizer/app.py +++ /dev/null @@ -1,42 +0,0 @@ -from newspaper import Article -from newspaper import Config -import gradio as gr -from gradio.mix import Parallel, Series - - - -def extrac_text(url): - USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:78.0) Gecko/20100101 Firefox/78.0' - config = Config() - config.browser_user_agent = USER_AGENT - config.request_timeout = 10 - - article = Article(url, config=config) - article.download() - article.parse() - text = article.text - return text - -extractor = gr.Interface(extrac_text, 'text', 'text') -summarizer = gr.Interface.load("huggingface/facebook/bart-large-cnn") - -sample_url = [['https://www.cp24.com/news/ontario-reports-481-new-covid-19-cases-1-death-1.5667950'], -] - -desc = ''' - The news summarizer app uses bart-large-cnn model by Facebook to summarize the text of a news article. - ''' - -iface = Series(extractor, summarizer, - inputs = gr.inputs.Textbox( - lines = 2, - label = 'Enter URL below' - ), - outputs = 'text', - title = 'News Summarizer', - theme = 'grass', - layout = 'horizontal', - description = desc, - examples=sample_url) - -iface.launch() \ No newline at end of file diff --git a/spaces/AlishbaImran/Redox-Flow-Battery-Prediction/app.py b/spaces/AlishbaImran/Redox-Flow-Battery-Prediction/app.py deleted file mode 100644 index 54a1cf366a02962618a5e1182da067ea429cf019..0000000000000000000000000000000000000000 --- a/spaces/AlishbaImran/Redox-Flow-Battery-Prediction/app.py +++ /dev/null @@ -1,235 +0,0 @@ -import os -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' - -import warnings -warnings.filterwarnings("ignore") - - - -from PIL import Image -import base64 -import pandas as pd -import streamlit as st -import pickle -from rdkit import Chem -from rdkit.Chem import AllChem -from sklearn.ensemble import RandomForestRegressor - - -import random -import numpy as np -from keras.wrappers.scikit_learn import KerasRegressor -from sklearn.metrics import mean_squared_error -import time - -import numpy -from sklearn.model_selection import GridSearchCV - -import tensorflow -from tensorflow.keras.models import Sequential -from tensorflow.keras.layers import Dense -from tensorflow.keras.layers import Dropout - -def create_model(optimizer='RMSprop', learn_rate=0.1, momentum=0.4, activation='sigmoid', dropout_rate=0.0): - - keras_model = Sequential() - keras_model.add(Dense(128, input_dim=train_encoded.shape[1], activation=activation)) - keras_model.add(Dropout(dropout_rate)) - keras_model.add(Dense(32, activation=activation)) - keras_model.add(Dropout(dropout_rate)) - keras_model.add(Dense(8,activation=activation)) - keras_model.add(Dropout(dropout_rate)) - keras_model.add(Dense(1,activation='linear')) - keras_model.summary() - - keras_model.compile(loss='mean_squared_error', optimizer=optimizer) - - return keras_model - -def get_ecfc(smiles_list, radius=2, nBits=2048, useCounts=True): - ecfp_fingerprints=[] - erroneous_smiles=[] - for smiles in smiles_list: - mol=Chem.MolFromSmiles(smiles) - if mol is None: - ecfp_fingerprints.append([None]*nBits) - erroneous_smiles.append(smiles) - else: - mol=Chem.AddHs(mol) - if useCounts: - ecfp_fingerprints.append(list(AllChem.GetHashedMorganFingerprint(mol, radius, nBits))) - else: - ecfp_fingerprints.append(list(AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits).ToBitString())) - - - df_ecfp_fingerprints = pd.DataFrame(data = ecfp_fingerprints, index = smiles_list) - - if len(erroneous_smiles)>0: - print("The following erroneous SMILES have been found in the data:\n{}.\nThe erroneous SMILES will be removed from the data.".format('\n'.join(map(str, erroneous_smiles)))) - df_ecfp_fingerprints = df_ecfp_fingerprints.dropna(how='any') - - return df_ecfp_fingerprints - - - -import deepchem as dc -from deepchem.models import GraphConvModel - -def generate(SMILES, verbose=False): - - featurizer = dc.feat.ConvMolFeaturizer() - gcn = featurizer.featurize(SMILES) - properties = [random.randint(-1,1)/100 for i in range(0,len(SMILES))] - dataset = dc.data.NumpyDataset(X=gcn, y=np.array(properties)) - - return dataset - - -st.write("""# Accelerated reaction energy prediction for redox batteries 🧪 """) -st.write('By: [Alishba Imran](https://www.linkedin.com/in/alishba-imran-/)') - - - - - - -about_part = st.expander("Learn More About Project", expanded=False) -with about_part: - st.write(''' - #### About - Redox flow batteries (RFB) are widely being explored as a class of electrochemical energy storage devices for large-scale energy storage applications. Redox flow batteries convert electrical energy to chemical energy via electrochemical reactions (through reversible oxidation and reduction) of compounds. - - To develop next-gen redox flow batteries with high cycle life and energy density, we need to speed up the discovery of electroactive materials with desired properties. This process can currently be very slow and expensive given how large and diverse the chemical space of the candidate compounds is. - - Using an attention-based graph convolutional neural network technique, I've developed a model that can take in reactants as SMILEs and predict the reaction energy in the redox reaction. - - A lot of this work was inspired and built on top of this [paper](https://chemrxiv.org/engage/chemrxiv/article-details/60c7575f469df44a40f45465). Feel free to give it a try and reach out for any feedback. Email: alishbai734@gmail.com. - - - ''') - - - - -st.write('**Insert your SMILES**') - -st.write('Type any SMILES used as a reactant in the redox reaction. This model will output the reaction energy.') - - -SMILES_input = "Oc1cccc(c12)c(O)c(nn2)O\nc1cccc(c12)cc(nn2)O\nOc1c(O)ccc(c12)cc(nn2)O" - -SMILES = st.text_area('press ctrl+enter to run model!', SMILES_input, height=20) -SMILES = SMILES.split('\n') -SMILES = list(filter(None, SMILES)) - - - - -if len(SMILES)>1000: - SMILES=SMILES[0:1000] - -ecfc_encoder = get_ecfc(SMILES) - -generated_dataset = generate(SMILES) - - -filename = 'final_models/transformers.pkl' -infile = open(filename,'rb') -transformers = pickle.load(infile) -infile.close() - - - -model_dir = 'final_models/tf_chp_initial' -gcne_model = dc.models.GraphConvModel(n_tasks=1, batch_size=100, mode='regression', dropout=0.25,model_dir= model_dir,random_seed=0) -gcne_model.restore('final_models/tf_chp_initial/ckpt-94/ckpt-197') - - - - -pred_gcne = gcne_model.predict(generated_dataset, transformers) - - - -from keras.models import model_from_json - -keras_final_model = model_from_json(open('./final_models/keras_final_model_architecture.json').read()) -keras_final_model.load_weights('./final_models/keras_final_model_weights.h5') - - -rf_final_model = pickle.load(open(r'./final_models/rf_final_model.txt', "rb")) - - - - - -pred_keras = keras_final_model.predict(ecfc_encoder) -pred_rf = rf_final_model.predict(ecfc_encoder) - - - -pred_rf_r = pred_rf.reshape((len(pred_rf),1)) - - - - -pred_consensus = (pred_keras + pred_gcne + pred_rf)/3 - - - - - - -from sklearn.metrics import mean_absolute_error,mean_squared_error,r2_score - - - -test1_mae = [] - -test1_mae.append(0.00705) -test1_mae.append(0.00416) -test1_mae.append(0.0035) - - - - - -test2_mae = [] - -test2_mae.append(0.00589) -test2_mae.append(0.00483) -test2_mae.append(0.00799) - - - -weighted_pred_0_1_3=( np.power(2/(test1_mae[0]+test2_mae[0]),3) * pred_gcne + - np.power(2/(test1_mae[1]+test2_mae[1]),3) * pred_keras + - np.power(2/(test1_mae[2]+test2_mae[2]),3) * pred_rf_r ) / ( - np.power(2/(test1_mae[0]+test2_mae[0]),3) + np.power(2/(test1_mae[1]+test2_mae[1]),3) + np.power(2/(test1_mae[2]+test2_mae[2]),3)) - - - - - -pred_weighted = (pred_gcne + pred_keras + pred_rf_r)/3 - - - - - - - - - -df_results = pd.DataFrame(SMILES, columns=['SMILES Reactant']) -df_results["Predicted Reaction Energy"]= weighted_pred_0_1_3 - -df_results=df_results.round(6) - - - -st.header('Prediction of Reaction Energy for RFB') -df_results - - - diff --git a/spaces/Aloento/9Nine-PITS/text/frontend/vocab.py b/spaces/Aloento/9Nine-PITS/text/frontend/vocab.py deleted file mode 100644 index 48dc0634a436d3ea8279a6f33cdb58289052f769..0000000000000000000000000000000000000000 --- a/spaces/Aloento/9Nine-PITS/text/frontend/vocab.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from collections import OrderedDict -from typing import Iterable - -__all__ = ["Vocab"] - - -class Vocab(object): - """ Vocabulary. - - Args: - symbols (Iterable[str]): Common symbols. - padding_symbol (str, optional): Symbol for pad. Defaults to "". - unk_symbol (str, optional): Symbol for unknow. Defaults to "" - start_symbol (str, optional): Symbol for start. Defaults to "" - end_symbol (str, optional): Symbol for end. Defaults to "" - """ - - def __init__(self, - symbols: Iterable[str], - padding_symbol="", - unk_symbol="", - start_symbol="", - end_symbol=""): - self.special_symbols = OrderedDict() - for i, item in enumerate( - [padding_symbol, unk_symbol, start_symbol, end_symbol]): - if item: - self.special_symbols[item] = len(self.special_symbols) - - self.padding_symbol = padding_symbol - self.unk_symbol = unk_symbol - self.start_symbol = start_symbol - self.end_symbol = end_symbol - - self.stoi = OrderedDict() - self.stoi.update(self.special_symbols) - - for i, s in enumerate(symbols): - if s not in self.stoi: - self.stoi[s] = len(self.stoi) - self.itos = {v: k for k, v in self.stoi.items()} - - def __len__(self): - return len(self.stoi) - - @property - def num_specials(self): - """ The number of special symbols. - """ - return len(self.special_symbols) - - # special tokens - @property - def padding_index(self): - """ The index of padding symbol - """ - return self.stoi.get(self.padding_symbol, -1) - - @property - def unk_index(self): - """The index of unknow symbol. - """ - return self.stoi.get(self.unk_symbol, -1) - - @property - def start_index(self): - """The index of start symbol. - """ - return self.stoi.get(self.start_symbol, -1) - - @property - def end_index(self): - """ The index of end symbol. - """ - return self.stoi.get(self.end_symbol, -1) - - def __repr__(self): - fmt = "Vocab(size: {},\nstoi:\n{})" - return fmt.format(len(self), self.stoi) - - def __str__(self): - return self.__repr__() - - def lookup(self, symbol): - """ The index that symbol correspond. - """ - return self.stoi[symbol] - - def reverse(self, index): - """ The symbol thar index cottespond. - """ - return self.itos[index] - - def add_symbol(self, symbol): - """ Add a new symbol in vocab. - """ - if symbol in self.stoi: - return - N = len(self.stoi) - self.stoi[symbol] = N - self.itos[N] = symbol - - def add_symbols(self, symbols): - """ Add multiple symbols in vocab. - """ - for symbol in symbols: - self.add_symbol(symbol) diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/optimization/onnx.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/optimization/onnx.md deleted file mode 100644 index 89ea435217265c64d3ff1eb18b464de47d3ff700..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/optimization/onnx.md +++ /dev/null @@ -1,108 +0,0 @@ - - - -# How to use the ONNX Runtime for inference - -🤗 [Optimum](https://github.com/huggingface/optimum) provides a Stable Diffusion pipeline compatible with ONNX Runtime. - -## Installation - -Install 🤗 Optimum with the following command for ONNX Runtime support: - -``` -pip install optimum["onnxruntime"] -``` - -## Stable Diffusion - -### Inference - -To load an ONNX model and run inference with the ONNX Runtime, you need to replace [`StableDiffusionPipeline`] with `ORTStableDiffusionPipeline`. In case you want to load a PyTorch model and convert it to the ONNX format on-the-fly, you can set `export=True`. - -```python -from optimum.onnxruntime import ORTStableDiffusionPipeline - -model_id = "runwayml/stable-diffusion-v1-5" -pipeline = ORTStableDiffusionPipeline.from_pretrained(model_id, export=True) -prompt = "sailing ship in storm by Leonardo da Vinci" -image = pipeline(prompt).images[0] -pipeline.save_pretrained("./onnx-stable-diffusion-v1-5") -``` - -If you want to export the pipeline in the ONNX format offline and later use it for inference, -you can use the [`optimum-cli export`](https://huggingface.co/docs/optimum/main/en/exporters/onnx/usage_guides/export_a_model#exporting-a-model-to-onnx-using-the-cli) command: - -```bash -optimum-cli export onnx --model runwayml/stable-diffusion-v1-5 sd_v15_onnx/ -``` - -Then perform inference: - -```python -from optimum.onnxruntime import ORTStableDiffusionPipeline - -model_id = "sd_v15_onnx" -pipeline = ORTStableDiffusionPipeline.from_pretrained(model_id) -prompt = "sailing ship in storm by Leonardo da Vinci" -image = pipeline(prompt).images[0] -``` - -Notice that we didn't have to specify `export=True` above. - -
- -
- -You can find more examples in [optimum documentation](https://huggingface.co/docs/optimum/). - - -### Supported tasks - -| Task | Loading Class | -|--------------------------------------|--------------------------------------| -| `text-to-image` | `ORTStableDiffusionPipeline` | -| `image-to-image` | `ORTStableDiffusionImg2ImgPipeline` | -| `inpaint` | `ORTStableDiffusionInpaintPipeline` | - -## Stable Diffusion XL - -### Export - -To export your model to ONNX, you can use the [Optimum CLI](https://huggingface.co/docs/optimum/main/en/exporters/onnx/usage_guides/export_a_model#exporting-a-model-to-onnx-using-the-cli) as follows : - -```bash -optimum-cli export onnx --model stabilityai/stable-diffusion-xl-base-1.0 --task stable-diffusion-xl sd_xl_onnx/ -``` - -### Inference - -To load an ONNX model and run inference with ONNX Runtime, you need to replace `StableDiffusionPipelineXL` with `ORTStableDiffusionPipelineXL` : - -```python -from optimum.onnxruntime import ORTStableDiffusionXLPipeline - -pipeline = ORTStableDiffusionXLPipeline.from_pretrained("sd_xl_onnx") -prompt = "sailing ship in storm by Leonardo da Vinci" -image = pipeline(prompt).images[0] -``` - -### Supported tasks - -| Task | Loading Class | -|--------------------------------------|--------------------------------------| -| `text-to-image` | `ORTStableDiffusionXLPipeline` | -| `image-to-image` | `ORTStableDiffusionXLImg2ImgPipeline`| - -## Known Issues - -- Generating multiple prompts in a batch seems to take too much memory. While we look into it, you may need to iterate instead of batching. diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_sag.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_sag.py deleted file mode 100644 index 79d76666c392e7695917319ac8f9b2f9b413ca97..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_sag.py +++ /dev/null @@ -1,188 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gc -import unittest - -import numpy as np -import torch -from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer - -from diffusers import ( - AutoencoderKL, - DDIMScheduler, - StableDiffusionSAGPipeline, - UNet2DConditionModel, -) -from diffusers.utils import slow, torch_device -from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu - -from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS -from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin - - -enable_full_determinism() - - -class StableDiffusionSAGPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): - pipeline_class = StableDiffusionSAGPipeline - params = TEXT_TO_IMAGE_PARAMS - batch_params = TEXT_TO_IMAGE_BATCH_PARAMS - image_params = TEXT_TO_IMAGE_IMAGE_PARAMS - image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS - - def get_dummy_components(self): - torch.manual_seed(0) - unet = UNet2DConditionModel( - block_out_channels=(32, 64), - layers_per_block=2, - sample_size=32, - in_channels=4, - out_channels=4, - down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), - up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), - cross_attention_dim=32, - ) - scheduler = DDIMScheduler( - beta_start=0.00085, - beta_end=0.012, - beta_schedule="scaled_linear", - clip_sample=False, - set_alpha_to_one=False, - ) - torch.manual_seed(0) - vae = AutoencoderKL( - block_out_channels=[32, 64], - in_channels=3, - out_channels=3, - down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], - up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], - latent_channels=4, - ) - torch.manual_seed(0) - text_encoder_config = CLIPTextConfig( - bos_token_id=0, - eos_token_id=2, - hidden_size=32, - intermediate_size=37, - layer_norm_eps=1e-05, - num_attention_heads=4, - num_hidden_layers=5, - pad_token_id=1, - vocab_size=1000, - ) - text_encoder = CLIPTextModel(text_encoder_config) - tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") - - components = { - "unet": unet, - "scheduler": scheduler, - "vae": vae, - "text_encoder": text_encoder, - "tokenizer": tokenizer, - "safety_checker": None, - "feature_extractor": None, - } - return components - - def get_dummy_inputs(self, device, seed=0): - if str(device).startswith("mps"): - generator = torch.manual_seed(seed) - else: - generator = torch.Generator(device=device).manual_seed(seed) - inputs = { - "prompt": ".", - "generator": generator, - "num_inference_steps": 2, - "guidance_scale": 1.0, - "sag_scale": 1.0, - "output_type": "numpy", - } - return inputs - - def test_inference_batch_single_identical(self): - super().test_inference_batch_single_identical(expected_max_diff=3e-3) - - -@slow -@require_torch_gpu -class StableDiffusionPipelineIntegrationTests(unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - def test_stable_diffusion_1(self): - sag_pipe = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") - sag_pipe = sag_pipe.to(torch_device) - sag_pipe.set_progress_bar_config(disable=None) - - prompt = "." - generator = torch.manual_seed(0) - output = sag_pipe( - [prompt], generator=generator, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=20, output_type="np" - ) - - image = output.images - - image_slice = image[0, -3:, -3:, -1] - - assert image.shape == (1, 512, 512, 3) - expected_slice = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 - - def test_stable_diffusion_2(self): - sag_pipe = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base") - sag_pipe = sag_pipe.to(torch_device) - sag_pipe.set_progress_bar_config(disable=None) - - prompt = "." - generator = torch.manual_seed(0) - output = sag_pipe( - [prompt], generator=generator, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=20, output_type="np" - ) - - image = output.images - - image_slice = image[0, -3:, -3:, -1] - - assert image.shape == (1, 512, 512, 3) - expected_slice = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371]) - - assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 - - def test_stable_diffusion_2_non_square(self): - sag_pipe = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base") - sag_pipe = sag_pipe.to(torch_device) - sag_pipe.set_progress_bar_config(disable=None) - - prompt = "." - generator = torch.manual_seed(0) - output = sag_pipe( - [prompt], - width=768, - height=512, - generator=generator, - guidance_scale=7.5, - sag_scale=1.0, - num_inference_steps=20, - output_type="np", - ) - - image = output.images - - assert image.shape == (1, 512, 768, 3) diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/__init__.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Andy1621/uniformer_image_detection/configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py b/spaces/Andy1621/uniformer_image_detection/configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py deleted file mode 100644 index 7b8ce4a1caf95d7e66e79e14219d3d9a8f74321d..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/ld/ld_r18_gflv1_r101_fpn_coco_1x.py +++ /dev/null @@ -1,62 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] -teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_mstrain_2x_coco/gfl_r101_fpn_mstrain_2x_coco_20200629_200126-dd12f847.pth' # noqa -model = dict( - type='KnowledgeDistillationSingleStageDetector', - pretrained='torchvision://resnet18', - teacher_config='configs/gfl/gfl_r101_fpn_mstrain_2x_coco.py', - teacher_ckpt=teacher_ckpt, - backbone=dict( - type='ResNet', - depth=18, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch'), - neck=dict( - type='FPN', - in_channels=[64, 128, 256, 512], - out_channels=256, - start_level=1, - add_extra_convs='on_output', - num_outs=5), - bbox_head=dict( - type='LDHead', - num_classes=80, - in_channels=256, - stacked_convs=4, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - octave_base_scale=8, - scales_per_octave=1, - strides=[8, 16, 32, 64, 128]), - loss_cls=dict( - type='QualityFocalLoss', - use_sigmoid=True, - beta=2.0, - loss_weight=1.0), - loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25), - loss_ld=dict( - type='KnowledgeDistillationKLDivLoss', loss_weight=0.25, T=10), - reg_max=16, - loss_bbox=dict(type='GIoULoss', loss_weight=2.0)), - # training and testing settings - train_cfg=dict( - assigner=dict(type='ATSSAssigner', topk=9), - allowed_border=-1, - pos_weight=-1, - debug=False), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.6), - max_per_img=100)) - -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50b-d8_769x769_80k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50b-d8_769x769_80k_cityscapes.py deleted file mode 100644 index c0ba019136c2e4f33b015be3d82505bee2066655..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50b-d8_769x769_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './deeplabv3plus_r50-d8_769x769_80k_cityscapes.py' -model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet')) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r101-d8_480x480_80k_pascal_context.py b/spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r101-d8_480x480_80k_pascal_context.py deleted file mode 100644 index bdccfd99ba0c25646f02850483c2cdf679fdbf3d..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r101-d8_480x480_80k_pascal_context.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './fcn_r50-d8_480x480_80k_pascal_context.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/AnimaLab/bias-test-gpt-pairs/mgr_bias_scoring.py b/spaces/AnimaLab/bias-test-gpt-pairs/mgr_bias_scoring.py deleted file mode 100644 index 89b55a96a785d044c816892deab912a957bebf81..0000000000000000000000000000000000000000 --- a/spaces/AnimaLab/bias-test-gpt-pairs/mgr_bias_scoring.py +++ /dev/null @@ -1,932 +0,0 @@ -import pandas as pd -import numpy as np -import torch -import string -import re -import random -import gradio as gr -from tqdm import tqdm -tqdm().pandas() - -import nltk -from nltk.tokenize.treebank import TreebankWordDetokenizer -nltk.download('punkt') - -# BERT imports -from transformers import BertForMaskedLM, BertTokenizer -# GPT2 imports -from transformers import GPT2LMHeadModel, GPT2Tokenizer -# BioBPT -from transformers import BioGptForCausalLM, BioGptTokenizer -# LLAMA -from transformers import LlamaTokenizer, LlamaForCausalLM -# FALCON -from transformers import AutoTokenizer, AutoModelForCausalLM - -import mgr_sentences as smgr -import mgr_biases as bmgr -import mgr_requests as rq_mgr - -from error_messages import * - -import contextlib -autocast = contextlib.nullcontext -import gc - -# Great article about handing big models - https://huggingface.co/blog/accelerate-large-models -def _getModelSafe(model_name, device): - model = None - tokenizer = None - try: - model, tokenizer = _getModel(model_name, device) - except Exception as err: - print(f"Loading Model Error: {err}") - print("Cleaning the model...") - model = None - tokenizer = None - torch.cuda.empty_cache() - gc.collect() - - if model == None or tokenizer == None: - print("Cleaned, trying reloading....") - model, tokenizer = _getModel(model_name, device) - - return model, tokenizer - -def _getModel(model_name, device): - if "bert" in model_name.lower(): - tokenizer = BertTokenizer.from_pretrained(model_name) - model = BertForMaskedLM.from_pretrained(model_name) - elif "biogpt" in model_name.lower(): - tokenizer = BioGptTokenizer.from_pretrained(model_name) - model = BioGptForCausalLM.from_pretrained(model_name) - elif 'gpt2' in model_name.lower(): - tokenizer = GPT2Tokenizer.from_pretrained(model_name) - model = GPT2LMHeadModel.from_pretrained(model_name) - elif 'llama' in model_name.lower(): - print(f"Getting LLAMA model: {model_name}") - tokenizer = LlamaTokenizer.from_pretrained(model_name) - model = LlamaForCausalLM.from_pretrained(model_name, - torch_dtype=torch.bfloat16, - low_cpu_mem_usage=True, ## - #use_safetensors=True, ## - #offload_folder="offload", - #offload_state_dict = True, - #device_map='auto' - ) - elif "falcon" in model_name.lower(): - print(f"Getting FALCON model: {model_name}") - tokenizer = AutoTokenizer.from_pretrained(model_name) - model = AutoModelForCausalLM.from_pretrained(model_name, - torch_dtype=torch.bfloat16, - trust_remote_code=True, - low_cpu_mem_usage=True, ## - #use_safetensors=True, ## - #offload_folder="offload", - #offload_state_dict = True, - #device_map='auto' - ) - #model.tie_weights() - if model == None: - print("Model is empty!!!") - else: - model = model.to(device) - model.eval() - torch.set_grad_enabled(False) - - return model, tokenizer - -def makeOrdGrpKey(row): - grp_lst = [row['grp_term1'], row['grp_term2']] - grp_lst.sort() - - return f"{grp_lst[0]}/{grp_lst[1]}" - -def genMissingPairsSpec(bias_spec, test_sentences_df): - print("--- GET MISSING BIAS PAIRS ---") - g1, g2, a1, a2 = get_words(bias_spec) - - print("---Sentences---") - print(list(test_sentences_df.columns)) - - test_sentences_df['gr_cmp_key'] = test_sentences_df.progress_apply(makeOrdGrpKey, axis=1) - - print("---Sentences GRP KEY---") - print(list(test_sentences_df.columns)) - - grp_terms = g1 + g2 - att_terms = a1 + a2 - - grp_cmp_dict = {} - for gr1, gr2 in zip(g1, g2): - gr_lst = [gr1, gr2] - gr_lst.sort() - - if gr1 not in grp_cmp_dict: - grp_cmp_dict[gr1] = [gr2, f"{gr_lst[0]}/{gr_lst[1]}"] - if gr2 not in grp_cmp_dict: - grp_cmp_dict[gr2] = [gr1, f"{gr_lst[0]}/{gr_lst[1]}"] - - print("---GRP PAIR KEY---") - print(grp_cmp_dict) - - print("---PERMITTED PAIRS---") - permitted_pairs = [] - for gr1, gr2 in zip(g1, g2): - gr_lst = [gr1, gr2] - gr_lst.sort() - - permitted_pairs.append(f"{gr_lst[0]}/{gr_lst[1]}") - - if gr1 not in grp_cmp_dict: - grp_cmp_dict[gr1] = [gr2, f"{gr_lst[0]}/{gr_lst[1]}"] - if gr2 not in grp_cmp_dict: - grp_cmp_dict[gr2] = [gr1, f"{gr_lst[0]}/{gr_lst[1]}"] - - print(f"Permitted pairs: {permitted_pairs}") - - att_grp_mat = [] - for grp in grp_terms[0:]: #list(bias_spec['social_groups'].items())[0][1]: - for att in att_terms: - sub_df = test_sentences_df.query("att_term==@att and grp_term1==@grp") # or grp_term2==@grp1 - grp_att_pair = sub_df.groupby(['gr_cmp_key','att_term'])['att_term'].agg(["count"]).reset_index().values.tolist() - - isAdded = False - if len(grp_att_pair)>0: - if len(grp_att_pair) == 1: - att_grp_mat.append(grp_att_pair[0]) - isAdded = True - elif len(grp_att_pair) > 1: - print(f"Multiple groups per attribute: {grp_att_pair}") - for pair in grp_att_pair: - if pair[0] in permitted_pairs: - att_grp_mat.append(pair) - isAdded = True - - # Not added pair - if isAdded == False: - att_grp_mat.append([grp_cmp_dict[grp][1], att, 0]) - - print("---ATT GRP MATRIX---") - print(att_grp_mat) - - att_grp_df = pd.DataFrame(att_grp_mat, columns=['grp_pair','att_term','count']) - print(att_grp_df.head(2)) - - agg_att_grp_df = att_grp_df.groupby(["grp_pair","att_term"])["count"].agg(["sum"]).reset_index() - print(agg_att_grp_df.columns) - - def missingCounts(row, max): - n_gap = np.max([0, max - row['sum']]) - return n_gap - - b_name = rq_mgr.getBiasName(g1, g2, a1, a2) - - max_count = agg_att_grp_df.max()['sum'] - agg_att_grp_df['n_gap'] = agg_att_grp_df.progress_apply(missingCounts, axis=1, max=2) - #print(agg_att_grp_df.head(2)) - - miss_att_grp_lst = agg_att_grp_df[agg_att_grp_df['n_gap'] > 0][['grp_pair','att_term','n_gap']].values.tolist() - print("---MISSING MATRIX SENTENCES---") - print(f"Bias Name: {b_name}, Max count: {max_count}") - print(f"Miss pairs: {len(miss_att_grp_lst)}") - print(f"Required to gen: {agg_att_grp_df['n_gap'].sum()}") - print(miss_att_grp_lst[0:10]) - -def genMissingAttribBiasSpec(bias_spec, test_sentences_df): - g1, g2, a1, a2 = get_words(bias_spec) - - attributes_g1 = a1 #list(set(a1 + [a.replace(' ','-') for a in a1])) #bias_spec['attributes']['attribute 1'] - attributes_g2 = a2 #list(set(a2 + [a.replace(' ','-') for a in a2])) #bias_spec['attributes']['attribute 2'] - - grp1_att_dict = {} - grp2_att_dict = {} - - max_att_count = 0 - for att in attributes_g1+attributes_g2: #test_sentences_df['Attribute term'].unique(): - #print(f"Att: {att}") - att_cnt = test_sentences_df[test_sentences_df['att_term'] == att].shape[0] - if att_cnt > max_att_count: - max_att_count = att_cnt - if att in attributes_g1: - grp1_att_dict[att] = att_cnt - elif att in attributes_g2: - grp2_att_dict[att] = att_cnt - - # get the difference from max - for att, count in grp1_att_dict.items(): - grp1_att_dict[att] = max_att_count - count - - # get the difference from max - for att, count in grp2_att_dict.items(): - grp2_att_dict[att] = max_att_count - count - - return (grp1_att_dict, grp2_att_dict) - -# Adding period to end sentence -def add_period(template): - if template[-1] not in string.punctuation: - template += "." - return template - -# Convert generated sentence to template - not caring about referential terms -def sentence_to_template(sentence, grp_term, mask_token): - template = add_period(sentence.strip("\"")) - - fnd_grp = list(re.finditer(f"(^|[ ]+){grp_term.lower()}[ .,!]+", template.lower())) - while len(fnd_grp) > 0: - idx1 = fnd_grp[0].span(0)[0] - if template[idx1] == " ": - idx1+=1 - idx2 = fnd_grp[0].span(0)[1]-1 - template = template[0:idx1]+mask_token+template[idx2:] - - fnd_grp = list(re.finditer(f"(^|[ ]+){grp_term.lower()}[ .,!]+", template.lower())) - - return template - -# Convert generated sentence to template - not caring about referential terms -def sentence_to_template_df(row): - sentence = row['Sentence'] - grp_term_1 = row['Group term 1'] - grp_term_2 = row['Group term 2'] - grp_term = grp_term_1 if grp_term_1.lower() in sentence.lower() else grp_term_2 - #template = add_period(sentence.strip("\"")) - - #fnd_grp = list(re.finditer(f"(^|[ ]+){grp_term.lower()}[ .,!]+", template.lower())) - #while len(fnd_grp) > 0: - # idx1 = fnd_grp[0].span(0)[0] - # if template[idx1] == " ": - # idx1+=1 - # idx2 = fnd_grp[0].span(0)[1]-1 - # template = template[0:idx1]+f"[T]"+template[idx2:] - - # fnd_grp = list(re.finditer(f"(^|[ ]+){grp_term.lower()}[ .,!]+", template.lower())) - - template = sentence_to_template(sentence, grp_term, mask_token="[T]") - - return template - -# Detect differences between alternative sentences and construct a template -def maskSentenceDifferences(sentence, rewrite, target_words, att_term): - if '-' in att_term: - sentence = sentence.replace(att_term.replace("-",""), att_term.replace("-"," ")) - #print(sentence) - - if ' ' in att_term: - no_space_att = att_term.replace(" ", "") - if no_space_att in rewrite: - rewrite = rewrite.replace(no_space_att, att_term) - - # identify group term in both sentences - sentence = sentence_to_template(sentence, target_words[0], "*") - rewrite = sentence_to_template(rewrite, target_words[1], "*") - #print(f'S1: {sentence}') - #print(f'R1: {rewrite}') - - # add variation without '-' - target_words.extend([t.replace('-','') for t in target_words]) - target_words = [t.lower() for t in target_words] - - s_words = nltk.word_tokenize(sentence) - r_words = nltk.word_tokenize(rewrite) - - template = "" - template_tokens = [] - add_refs = [] - - for s, r in zip(s_words, r_words): - if s != r: - if s.lower() in target_words: - template += "[T]" - template_tokens.append("[T]") - else: - template += "[R]" - template_tokens.append("[R]") - - l_mask = s.lower() - r_mask = r.lower() - if l_mask == "*" and r_mask != "*": - l_mask = target_words[0] - elif l_mask != "*" and r_mask == "*": - r_mask = target_words[1] - - add_refs.append((l_mask, r_mask)) - - #add_refs.append((s.lower(),r.lower())) - elif s in string.punctuation: - template += s.strip(" ") - template_tokens.append(s) - else: - template += s - template_tokens.append(s) - - template += " " - - return TreebankWordDetokenizer().detokenize(template_tokens).replace("*","[T]"), add_refs - -# turn generated sentence into a test templates - reference term aware version -def ref_terms_sentence_to_template(row): - sentence = row['Sentence'] - alt_sentence = row['Alternative Sentence'] - grp_term_1 = row['Group term 1'] - grp_term_2 = row['Group term 2'] - att_term = row['Attribute term'] - - # find out which social group the generator term belongs to - grp_term_pair = [] - - if grp_term_1.lower() in sentence.lower(): - grp_term_pair = [grp_term_1, grp_term_2] - elif grp_term_2.lower() in sentence.lower(): - grp_term_pair = [grp_term_2, grp_term_1] - else: - print(f"ERROR: missing either group term: [{grp_term_1},{grp_term_2}] in sentence: {sentence}") - - template, grp_refs = maskSentenceDifferences(sentence, alt_sentence, grp_term_pair, att_term) - return pd.Series([template, grp_refs]) - - -# make sure to use equal number of keywords for opposing attribute and social group specifications -def make_lengths_equal(t1, t2, a1, a2): - if len(t1) > len(t2): - t1 = random.sample(t1, len(t2)) - elif len(t1) < len(t2): - t2 = random.sample(t2, len(t1)) - - if len(a1) > len(a2): - a1 = random.sample(a1, len(a2)) - elif len(a1) < len(a2): - a2 = random.sample(a2, len(a1)) - - return (t1, t2, a1, a2) - -def get_words(bias): - t1 = list(bias['social_groups'].items())[0][1] - t2 = list(bias['social_groups'].items())[1][1] - a1 = list(bias['attributes'].items())[0][1] - a2 = list(bias['attributes'].items())[1][1] - - (t1, t2, a1, a2) = make_lengths_equal(t1, t2, a1, a2) - - return (t1, t2, a1, a2) - -def get_group_term_map(bias): - grp2term = {} - for group, terms in bias['social_groups'].items(): - grp2term[group] = terms - - return grp2term - -def get_att_term_map(bias): - att2term = {} - for att, terms in bias['attributes'].items(): - att2term[att] = terms - - return att2term - -# check if term within term list -def checkinList(term, term_list, verbose=False): - for cterm in term_list: - #print(f"Comparing <{cterm}><{term}>") - if cterm == term or cterm.replace(" ","-") == term.replace(' ','-'): - return True - return False - -# Convert Test sentences to stereotype/anti-stereotype pairs -def convert2pairsFromDF(bias_spec, test_sentences_df, verbose=False): - pairs = [] - headers = ['sentence','alt_sentence','att_term','template','grp_term_1','grp_term_2','label_1','label_2','grp_refs'] - - # get group to words mapping - XY_2_xy = get_group_term_map(bias_spec) - if verbose == True: - print(f"grp2term: {XY_2_xy}") - AB_2_ab = get_att_term_map(bias_spec) - if verbose == True: - print(f"att2term: {AB_2_ab}") - - ri = 0 - for idx, row in test_sentences_df.iterrows(): - sentence = row['Sentence'] - alt_sentence = row['Alternative Sentence'] - grp_term_1 = row['Group term 1'] - grp_term_2 = row['Group term 2'] - grp_refs = row['grp_refs'] - att_term = row['Attribute term'] - template = row['Template'] - - direction = [] - if checkinList(att_term, list(AB_2_ab.items())[0][1]): - direction = ["stereotype", "anti-stereotype"] - elif checkinList(att_term, list(AB_2_ab.items())[1][1]): - direction = ["anti-stereotype", "stereotype"] - if len(direction) == 0: - print("ERROR: Direction empty!") - checkinList(att_term, list(AB_2_ab.items())[0][1], verbose=True) - checkinList(att_term, list(AB_2_ab.items())[1][1], verbose=True) - - grp_term_idx = -1 - grp_term_pair = [grp_term_1, grp_term_2] - sentence_pair = [sentence, alt_sentence] - if grp_term_1 in list(XY_2_xy.items())[0][1]: - if grp_term_2 not in list(XY_2_xy.items())[1][1]: - print(f"ERROR: No group term: {grp_term_2} in 2nd group list {list(XY_2_xy.items())[1][1]}") - - elif grp_term_1 in list(XY_2_xy.items())[1][1]: - if grp_term_2 not in list(XY_2_xy.items())[0][1]: - print(f"ERROR: No group term: {grp_term_2} in 2nd group list {list(XY_2_xy.items())[0][1]}") - direction.reverse() - #sentence_pair.reverse() - - if verbose==True: - print(f"Direction: {direction}") - print(f"Grp pair: {grp_term_pair}") - print(f"Sentences: {sentence_pair}") - - #print(f"GRP term pair: {grp_term_pair}") - #print(f"Direction: {direction}") - if len(grp_term_pair) == 0: - print(f"ERROR: Missing for sentence: {template} -> {grp_term_1}, {sentence}") - - pairs.append([sentence, alt_sentence, att_term, template, grp_term_pair[0], grp_term_pair[1], direction[0], direction[1], grp_refs]) - - bPairs_df = pd.DataFrame(pairs, columns=headers) - #bPairs_df = bPairs_df.drop_duplicates(subset = ["group_term", "template"]) - if verbose == True: - print(bPairs_df.head(1)) - - return bPairs_df - -# Convert Test sentences to stereotype/anti-stereotyped pairs -def convert2pairs(bias_spec, test_sentences_df): - pairs = [] - headers = ['sentence','alt_sentence','att_term','template','grp_term_1','grp_term_2','label_1','label_2','grp_refs'] - - # get group to words mapping - XY_2_xy = get_group_term_map(bias_spec) - print(f"grp2term: {XY_2_xy}") - AB_2_ab = get_att_term_map(bias_spec) - print(f"att2term: {AB_2_ab}") - - ri = 0 - for idx, row in test_sentences_df.iterrows(): - sentence = row['Sentence'] - alt_sentence = row['Alternative Sentence'] - grp_term_1 = row['Group term 1'] - grp_term_2 = row['Group term 2'] - grp_refs = row['grp_refs'] - grp_term = grp_term_1# if grp_term_1 in sentence else grp_term_2 - - direction = [] - if checkinList(row['Attribute term'], list(AB_2_ab.items())[0][1]): - direction = ["stereotype", "anti-stereotype"] - elif checkinList(row['Attribute term'], list(AB_2_ab.items())[1][1]): - direction = ["anti-stereotype", "stereotype"] - if len(direction) == 0: - print("Direction empty!") - checkinList(row['Attribute term'], list(AB_2_ab.items())[0][1], verbose=True) - checkinList(row['Attribute term'], list(AB_2_ab.items())[1][1], verbose=True) - raise gr.Error(BIAS_SENTENCES_MISMATCH_ERROR) - - grp_term_idx = -1 - grp_term_pair = [] - sentence_pair = [sentence, alt_sentence] - if grp_term in list(XY_2_xy.items())[0][1]: - grp_term_idx = list(XY_2_xy.items())[0][1].index(grp_term) - try: - grp_term_pair = [grp_term, list(XY_2_xy.items())[1][1][grp_term_idx]] - except IndexError: - print(f"Index {grp_term_idx} not found in list {list(XY_2_xy.items())[1][1]}, choosing random...") - grp_term_idx = random.randint(0, len(list(XY_2_xy.items())[1][1])-1) - print(f"New group term idx: {grp_term_idx} for list {list(XY_2_xy.items())[1][1]}") - grp_term_pair = [grp_term, list(XY_2_xy.items())[1][1][grp_term_idx]] - - elif grp_term in list(XY_2_xy.items())[1][1]: - grp_term_idx = list(XY_2_xy.items())[1][1].index(grp_term) - try: - grp_term_pair = [grp_term, list(XY_2_xy.items())[0][1][grp_term_idx]] - except IndexError: - print(f"Index {grp_term_idx} not found in list {list(XY_2_xy.items())[0][1]}, choosing random...") - grp_term_idx = random.randint(0, len(list(XY_2_xy.items())[0][1])-1) - print(f"New group term idx: {grp_term_idx} for list {list(XY_2_xy.items())[0][1]}") - grp_term_pair = [grp_term, list(XY_2_xy.items())[0][1][grp_term_idx]] - - direction.reverse() - #sentence_pair.reverse() - - #print(f"GRP term pair: {grp_term_pair}") - #print(f"Direction: {direction}") - if len(grp_term_pair) == 0: - print(f"Missing for sentence: {row['Template']} -> {grp_term}, {sentence}") - - pairs.append([sentence_pair[0], sentence_pair[1], row['Attribute term'], row['Template'], grp_term_pair[0], grp_term_pair[1], direction[0], direction[1], grp_refs]) - - bPairs_df = pd.DataFrame(pairs, columns=headers) - #bPairs_df = bPairs_df.drop_duplicates(subset = ["group_term", "template"]) - print(bPairs_df.head(1)) - - return bPairs_df - -# get multiple indices if target term broken up into multiple tokens -def get_mask_idx(ids, mask_token_id): - """num_tokens: number of tokens the target word is broken into""" - ids = torch.Tensor.tolist(ids)[0] - return ids.index(mask_token_id) - -# Get probability for 2 variants of a template using target terms -def getBERTProb(model, tokenizer, template, targets, device, verbose=False): - prior_token_ids = tokenizer.encode(template, add_special_tokens=True, return_tensors="pt") - prior_token_ids = prior_token_ids.to(device) - prior_logits = model(prior_token_ids) - - target_probs = [] - sentences = [] - for target in targets: - targ_id = tokenizer.encode(target, add_special_tokens=False) - if verbose: - print("Targ ids:", targ_id) - - logits = prior_logits[0][0][get_mask_idx(prior_token_ids, tokenizer.mask_token_id)][targ_id] - if verbose: - print("Logits:", logits) - - target_probs.append(np.mean(logits.cpu().numpy())) - sentences.append(template.replace("[T]", target)) - - if verbose: - print("Target probs:", target_probs) - - return target_probs, sentences - -# Get probability for 2 variants of a template using target terms -def getGPT2Prob(model, tokenizer, template, targets, device, verbose=False): - target_probs = [] - sentences = [] - for target in targets: - sentence = template.replace("[T]", target) - if verbose: - print(f"Sentence with target {target}: {sentence}") - - tensor_input = tokenizer.encode(sentence, return_tensors="pt").to(device) - outputs = model(tensor_input, labels=tensor_input) - target_probs.append(outputs.loss.item()) - sentences.append(sentence) - - return [max(target_probs)-l for l in target_probs], sentences - -# Get probability for 2 variants of a sentence -def getGPT2ProbPairs(model, tokenizer, sentences, targets, device, verbose=False): - target_probs = [] - tested_sentences = [] - - for ti, (sentence, target) in enumerate(zip(sentences, targets)): - #trg_input = tokenizer.encode(target, return_tensors="pt").to(device) - #outputs = model(trg_input, labels=trg_input) - #trg_prob = outputs.loss.item() - - # construct target specific template - tensor_input = tokenizer.encode(sentence, return_tensors="pt").to(device) - outputs = model(tensor_input, labels=tensor_input) - target_probs.append(outputs.loss.item())#/(1-trg_prob)) - tested_sentences.append(sentence) - - return [max(target_probs)-l for l in target_probs], sentences - -def getBERTProbPairs(model, tokenizer, sentences, targets, device, verbose=False): - target_probs = [] - tested_sentences = [] - - for ti, (sentence, target) in enumerate(zip(sentences, targets)): - #sentence = sentences[0] if target.lower() in sentences[0].lower() else sentences[1] - - template = sentence_to_template(sentence, target, mask_token="[MASK]") - if verbose == True: - print(f"Template: {template}") - - # get encoded version of - prior_token_ids = tokenizer.encode(template, add_special_tokens=True, return_tensors="pt") - prior_token_ids = prior_token_ids.to(device) - prior_logits = model(prior_token_ids) - - targ_id = tokenizer.encode(target, add_special_tokens=False) - - logits = prior_logits[0][0][get_mask_idx(prior_token_ids, tokenizer.mask_token_id)][targ_id] - - target_probs.append(np.mean(logits.cpu().numpy())) - tested_sentences.append(template.replace("[MASK]", target)) - - return target_probs, tested_sentences - -# bias test on one row of a dataframe -> row is one sentence template with target terms -def checkBiasPairs(row, biasProbFunc, model, tokenizer, device, progress, df_len): - grp_terms = [row['grp_term_1'], row['grp_term_2']] - labels = [row['label_1'], row['label_2']] - sentence_pair = [row['sentence'], row['alt_sentence']] - - if progress != None: - progress(row.name/df_len, desc=f"{row['template']}") - - test_res = [0,1] - random.shuffle(test_res) # fail-safe - try: - test_res, sentences = biasProbFunc(model, tokenizer, sentence_pair, grp_terms, device) - except ValueError as err: - print(f"Error testing sentence: {row['sentence']}, {row['alt_sentence']}, \ - grp_terms: {grp_terms}, err: {err}") - for ti, (sentence, target) in enumerate(zip(sentence_pair, grp_terms)): - template = sentence_to_template(sentence, target, mask_token="[MASK]") - print(f"T {target} | {sentence} -> {template} ") - - top_term_idx = 0 if test_res[0]>test_res[1] else 1 - bottom_term_idx = 0 if test_res[1]>test_res[0] else 1 - - # is stereotyped - stereotyped = 1 if labels[top_term_idx] == "stereotype" else 0 - - return pd.Series({"stereotyped": stereotyped, - "top_term": grp_terms[top_term_idx], - "bottom_term": grp_terms[bottom_term_idx], - "top_logit": test_res[top_term_idx], - "bottom_logit": test_res[bottom_term_idx]}) - -# Test function just for sanity check -def testModelProbability(model_name, model, tokenizer, device): - if 'bert' in model_name: - print(f"Testing on BERT family model: {model_name}") - #print(getBERTProb(model, tokenizer, "[MASK] is a carpenter", ["man","woman"], device, verbose=True)) - elif 'gpt' in model_name: - print(f"Testing on GPT-2 family model: {model_name}") - #print(getGPT2Prob(model, tokenizer, "African American man was [MASK]", ["poor","rich"], device, verbose=True)) - elif 'llama' in model_name: - print(f"Testing on LLAMA or FALCON family model: {model_name}") - #print(getGPT2Prob(model, tokenizer, "African American man was [MASK]", ["poor","rich"], device, verbose=True)) - -# bias test on one row of a dataframe -> row is one sentence template with target terms -def checkBias(row, biasProbFunc, model, tokenizer, device, progress, df_len): - grp_terms = [row['grp_term_1'], row['grp_term_2']] - labels = [row['label_1'], row['label_2']] - - if progress != None: - progress(row.name/df_len, desc=f"{row['template']}") - - test_res = [0,1] - random.shuffle(test_res) # fail-safe - try: - test_res, sentences = biasProbFunc(model, tokenizer, row['template'].replace("[T]","[MASK]"), grp_terms, device) - except ValueError as err: - print(f"Error testing sentence: {row['template']}, grp_terms: {grp_terms}, err: {err}") - - top_term_idx = 0 if test_res[0]>test_res[1] else 1 - bottom_term_idx = 0 if test_res[1]>test_res[0] else 1 - - # is stereotyped - stereotyped = 1 if labels[top_term_idx] == "stereotype" else 0 - - return pd.Series({"stereotyped": stereotyped, - "top_term": grp_terms[top_term_idx], - "bottom_term": grp_terms[bottom_term_idx], - "top_logit": test_res[top_term_idx], - "bottom_logit": test_res[bottom_term_idx]}) - -# Sampling attribute -def sampleAttribute(df, att, n_per_att): - att_rows = df.query("group_term == @att") - # copy-paste all gens - no bootstrap - #grp_bal = att_rows - - grp_bal = pd.DataFrame() - if att_rows.shape[0] >= n_per_att: - grp_bal = att_rows.sample(n_per_att) - elif att_rows.shape[0] > 0 and att_rows.shape[0] < n_per_att: - grp_bal = att_rows.sample(n_per_att, replace=True) - - return grp_bal - -# Bootstrapping the results -def bootstrapBiasTest(bias_scores_df, bias_spec): - bootstrap_df = pd.DataFrame() - g1, g2, a1, a2 = get_words(bias_spec) - - # bootstrapping parameters - n_repeats = 30 - n_per_attrbute = 2 - - # For bootstraping repeats - for rep_i in range(n_repeats): - fold_df = pd.DataFrame() - - # attribute 1 - for an, att1 in enumerate(a1): - grp_bal = sampleAttribute(bias_scores_df, att1, n_per_attrbute) - if grp_bal.shape[0] == 0: - grp_bal = sampleAttribute(bias_scores_df, att1.replace(" ","-"), n_per_attrbute) - - if grp_bal.shape[0] > 0: - fold_df = pd.concat([fold_df, grp_bal.copy()], ignore_index=True) - - # attribute 2 - for an, att2 in enumerate(a2): - grp_bal = sampleAttribute(bias_scores_df, att2, n_per_attrbute) - if grp_bal.shape[0] == 0: - grp_bal = sampleAttribute(bias_scores_df, att2.replace(" ","-"), n_per_attrbute) - - if grp_bal.shape[0] > 0: - fold_df = pd.concat([fold_df, grp_bal.copy()], ignore_index=True) - - #if fold_df.shape[0]>0: - # unnorm_model, norm_model, perBias_df = biasStatsFold(test_df) - # print(f"Gen: {gen_model}, Test: {test_model} [{rep_i}], df-size: {test_df.shape[0]}, Model bias: {norm_model:0.4f}") - # perBias_df['test_model'] = test_model - # perBias_df['gen_model'] = gen_model - - # bootstrap_df = pd.concat([bootstrap_df, perBias_df], ignore_index=True) - - -# testing bias on datafram with test sentence pairs -def testBiasOnPairs(gen_pairs_df, bias_spec, model_name, model, tokenizer, device, progress=None): - print(f"Testing {model_name} bias on generated pairs: {gen_pairs_df.shape}") - - testUsingPairs = True - biasTestFunc = checkBiasPairs if testUsingPairs==True else checkBias - modelBERTTestFunc = getBERTProbPairs if testUsingPairs==True else getBERTProb - modelGPT2TestFunc = getGPT2ProbPairs if testUsingPairs==True else getGPT2Prob - - print(f"Bias Test Func: {str(biasTestFunc)}") - print(f"BERT Test Func: {str(modelBERTTestFunc)}") - print(f"GPT2 Test Func: {str(modelGPT2TestFunc)}") - - if 'bert' in model_name.lower(): - print(f"Testing on BERT family model: {model_name}") - gen_pairs_df[['stereotyped','top_term','bottom_term','top_logit','bottom_logit']] = gen_pairs_df.progress_apply( - biasTestFunc, biasProbFunc=modelBERTTestFunc, model=model, tokenizer=tokenizer, device=device, progress=progress, df_len=gen_pairs_df.shape[0], axis=1) - - elif 'gpt' in model_name.lower(): - print(f"Testing on GPT-2 family model: {model_name}") - gen_pairs_df[['stereotyped','top_term','bottom_term','top_logit','bottom_logit']] = gen_pairs_df.progress_apply( - biasTestFunc, biasProbFunc=modelGPT2TestFunc, model=model, tokenizer=tokenizer, device=device, progress=progress, df_len=gen_pairs_df.shape[0], axis=1) - - elif 'llama' in model_name.lower() or 'falcon' in model_name.lower(): - print(f"Testing on LLAMA or FALCON family model: {model_name}") - gen_pairs_df[['stereotyped','top_term','bottom_term','top_logit','bottom_logit']] = gen_pairs_df.progress_apply( - biasTestFunc, biasProbFunc=modelGPT2TestFunc, model=model, tokenizer=tokenizer, device=device, progress=progress, df_len=gen_pairs_df.shape[0], axis=1) - - # Bootstrap - print(f"BIAS ON PAIRS: {gen_pairs_df}") - - #bootstrapBiasTest(gen_pairs_df, bias_spec) - - - grp_df = gen_pairs_df.groupby(['att_term'])['stereotyped'].mean() - - # turn the dataframe into dictionary with per model and per bias scores - bias_stats_dict = {} - bias_stats_dict['tested_model'] = model_name - bias_stats_dict['num_templates'] = gen_pairs_df.shape[0] - bias_stats_dict['model_bias'] = round(grp_df.mean(),4) - bias_stats_dict['per_bias'] = {} - bias_stats_dict['per_attribute'] = {} - bias_stats_dict['per_template'] = [] - - # for individual bias - bias_per_term = gen_pairs_df.groupby(["att_term"])['stereotyped'].mean() - bias_stats_dict['per_bias'] = round(bias_per_term.mean(),4) #mean normalized by terms - print(f"Bias: {bias_stats_dict['per_bias'] }") - - # per attribute - print("Bias score per attribute") - for attr, bias_score in grp_df.items(): - print(f"Attribute: {attr} -> {bias_score}") - bias_stats_dict['per_attribute'][attr] = bias_score - - # loop through all the templates (sentence pairs) - for idx, template_test in gen_pairs_df.iterrows(): - bias_stats_dict['per_template'].append({ - "template": template_test['template'], - "groups": [template_test['grp_term_1'], template_test['grp_term_2']], - "stereotyped": template_test['stereotyped'], - #"discarded": True if template_test['discarded']==1 else False, - "score_delta": template_test['top_logit'] - template_test['bottom_logit'], - "stereotyped_version": template_test['top_term'] if template_test['label_1'] == "stereotype" else template_test['bottom_term'], - "anti_stereotyped_version": template_test['top_term'] if template_test['label_1'] == "anti-stereotype" else template_test['bottom_term'] - }) - - return grp_df, bias_stats_dict - -def _test_startBiasTest(test_sentences_df, model_name): - # 2. convert to templates - test_sentences_df['Template'] = test_sentences_df.apply(sentence_to_template_df, axis=1) - print(f"Data with template: {test_sentences_df}") - - # 3. convert to pairs - test_pairs_df = convert2pairsFromDF(bias_spec, test_sentences_df) - print(f"Test pairs: {test_pairs_df.head(3)}") - - # 4. get the per sentence bias scores - print(f"Test model name: {model_name}") - device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - print(f"Device: {device}") - tested_model, tested_tokenizer = _getModelSafe(model_name, device) - #print(f"Mask token id: {tested_toknizer.mask_token_id}") - if tested_tokenizer == None: - print("Tokanizer is empty!!!") - if tested_model == None: - print("Model is empty!!!") - - # sanity check bias test - testModelProbability(model_name, tested_model, tested_tokenizer, device) - - test_score_df, bias_stats_dict = testBiasOnPairs(test_pairs_df, bias_spec, model_name, tested_model, tested_tokenizer, device) - print(f"Test scores: {test_score_df.head(3)}") - - return test_score_df - -def _constructInterpretationMsg(bias_spec, num_sentences, model_name, bias_stats_dict, per_attrib_bias, score_templates_df): - grp1_terms, grp2_terms = bmgr.getSocialGroupTerms(bias_spec) - att1_terms, att2_terms = bmgr.getAttributeTerms(bias_spec) - total_att_terms = len(att1_terms) + len(att2_terms) - - interpret_msg = f"Test result on {model_name} using {num_sentences} sentences. " - if num_sentences < total_att_terms or num_sentences < 20: - interpret_msg += "We recommend generating more sentences to get more robust estimates!
" - else: - interpret_msg += "
" - - attrib_by_score = dict(sorted(per_attrib_bias.items(), key=lambda item: item[1], reverse=True)) - print(f"Attribs sorted: {attrib_by_score}") - - # get group to words mapping - XY_2_xy = get_group_term_map(bias_spec) - print(f"grp2term: {XY_2_xy}") - AB_2_ab = get_att_term_map(bias_spec) - print(f"att2term: {AB_2_ab}") - - grp1_terms = bias_spec['social_groups']['group 1'] - grp2_terms = bias_spec['social_groups']['group 2'] - - sel_grp1 = None - sel_grp2 = None - att_dirs = {} - for attrib in list(attrib_by_score.keys()): - att_label = None - if checkinList(attrib, list(AB_2_ab.items())[0][1]): - att_label = 0 - elif checkinList(attrib, list(AB_2_ab.items())[1][1]): - att_label = 1 - else: - print("Error!") - - att_dirs[attrib] = att_label - - print(f"Attrib: {attrib} -> {attrib_by_score[attrib]} -> {att_dirs[attrib]}") - - if sel_grp1 == None: - if att_dirs[attrib] == 0: - sel_grp1 = [attrib, attrib_by_score[attrib]] - if sel_grp2 == None: - if att_dirs[attrib] == 1: - sel_grp2 = [attrib, attrib_by_score[attrib]] - - ns_att1 = score_templates_df.query(f"Attribute == '{sel_grp1[0]}'").shape[0] - #{ns_att1} - grp1_str = ', '.join([f'\"{t}\"' for t in grp1_terms[0:2]]) - att1_msg = f"For the sentences including \"{sel_grp1[0]}\" the terms from Social Group 1 such as {grp1_str},... are more probable {sel_grp1[1]*100:2.0f}% of the time. " - print(att1_msg) - - ns_att2 = score_templates_df.query(f"Attribute == '{sel_grp2[0]}'").shape[0] - #{ns_att2} - grp2_str = ', '.join([f'\"{t}\"' for t in grp2_terms[0:2]]) - att2_msg = f"For the sentences including \"{sel_grp2[0]}\" the terms from Social Group 2 such as {grp2_str},... are more probable {sel_grp2[1]*100:2.0f}% of the time. " - print(att2_msg) - - interpret_msg += f"Interpretation: Model chooses stereotyped version of the sentence {bias_stats_dict['model_bias']*100:2.0f}% of time. " - #interpret_msg += f"It suggests that for the sentences including \"{list(per_attrib_bias.keys())[0]}\" the social group terms \"{bias_spec['social_groups']['group 1'][0]}\", ... are more probable {list(per_attrib_bias.values())[0]*100:2.0f}% of the time. " - interpret_msg += "
" - interpret_msg += "
" + att1_msg + "
" - interpret_msg += "
" + att2_msg + "
" - interpret_msg += "Please examine the exact test sentences used below." - interpret_msg += "
More details about Stereotype Score metric: Nadeem'20" - - return interpret_msg - - -if __name__ == '__main__': - print("Testing bias manager...") - - bias_spec = { - "social_groups": { - "group 1": ["brother", "father"], - "group 2": ["sister", "mother"], - }, - "attributes": { - "attribute 1": ["science", "technology"], - "attribute 2": ["poetry", "art"] - } - } - - sentence_list = rq_mgr._getSavedSentences(bias_spec) - sentence_df = pd.DataFrame(sentence_list, columns=["Test sentence","Group term","Attribute term"]) - print(sentence_df) - - _test_startBiasTest(sentence_df, 'bert-base-uncased') - diff --git a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/docs/Training-LoRAs.md b/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/docs/Training-LoRAs.md deleted file mode 100644 index 83e6d5a7251eea080cd7dfe8d19a2e42d6d3a822..0000000000000000000000000000000000000000 --- a/spaces/AnishKumbhar/ChatBot/text-generation-webui-main/docs/Training-LoRAs.md +++ /dev/null @@ -1,174 +0,0 @@ -## Training Your Own LoRAs - -The WebUI seeks to make training your own LoRAs as easy as possible. It comes down to just a few simple steps: - -### **Step 1**: Make a plan. -- What base model do you want to use? The LoRA you make has to be matched up to a single architecture (eg LLaMA-13B) and cannot be transferred to others (eg LLaMA-7B, StableLM, etc. would all be different). Derivatives of the same model (eg Alpaca finetune of LLaMA-13B) might be transferrable, but even then it's best to train exactly on what you plan to use. -- What model format do you want? At time of writing, 8-bit models are most stable, and 4-bit are supported but experimental. In the near future it is likely that 4-bit will be the best option for most users. -- What are you training it on? Do you want it to learn real information, a simple format, ...? - -### **Step 2**: Gather a dataset. -- If you use a dataset similar to the [Alpaca](https://github.com/gururise/AlpacaDataCleaned/blob/main/alpaca_data_cleaned.json) format, that is natively supported by the `Formatted Dataset` input in the WebUI, with premade formatter options. -- If you use a dataset that isn't matched to Alpaca's format, but uses the same basic JSON structure, you can make your own format file by copying `training/formats/alpaca-format.json` to a new file and [editing its content](#format-files). -- If you can get the dataset into a simple text file, that works too! You can train using the `Raw text file` input option. - - This means you can for example just copy/paste a chatlog/documentation page/whatever you want, shove it in a plain text file, and train on it. -- If you use a structured dataset not in this format, you may have to find an external way to convert it - or open an issue to request native support. - -### **Step 3**: Do the training. -- **3.1**: Load the WebUI, and your model. - - Make sure you don't have any LoRAs already loaded (unless you want to train for multi-LoRA usage). -- **3.2**: Open the `Training` tab at the top, `Train LoRA` sub-tab. -- **3.3**: Fill in the name of the LoRA, select your dataset in the dataset options. -- **3.4**: Select other parameters to your preference. See [parameters below](#parameters). -- **3.5**: click `Start LoRA Training`, and wait. - - It can take a few hours for a large dataset, or just a few minute if doing a small run. - - You may want to monitor your [loss value](#loss) while it goes. - -### **Step 4**: Evaluate your results. -- Load the LoRA under the Models Tab. -- You can go test-drive it on the `Text generation` tab, or you can use the `Perplexity evaluation` sub-tab of the `Training` tab. -- If you used the `Save every n steps` option, you can grab prior copies of the model from sub-folders within the LoRA model's folder and try them instead. - -### **Step 5**: Re-run if you're unhappy. -- Make sure to unload the LoRA before training it. -- You can simply resume a prior run - use `Copy parameters from` to select your LoRA, and edit parameters. Note that you cannot change the `Rank` of an already created LoRA. - - If you want to resume from a checkpoint saved along the way, simply copy the contents of the checkpoint folder into the LoRA's folder. - - (Note: `adapter_model.bin` is the important file that holds the actual LoRA content). - - This will start Learning Rate and Steps back to the start. If you want to resume as if you were midway through, you can adjust your Learning Rate to the last reported LR in logs and reduce your epochs. -- Or, you can start over entirely if you prefer. -- If your model is producing corrupted outputs, you probably need to start over and use a lower Learning Rate. -- If your model isn't learning detailed information but you want it to, you might need to just run more epochs, or you might need a higher Rank. -- If your model is enforcing a format you didn't want, you may need to tweak your dataset, or start over and not train as far. - -## Format Files - -If using JSON formatted datasets, they are presumed to be in the following approximate format: - -```json -[ - { - "somekey": "somevalue", - "key2": "value2" - }, - { - // etc - } -] -``` - -Where the keys (eg `somekey`, `key2` above) are standardized, and relatively consistent across the dataset, and the values (eg `somevalue`, `value2`) contain the content actually intended to be trained. - -For Alpaca, the keys are `instruction`, `input`, and `output`, wherein `input` is sometimes blank. - -A simple format file for Alpaca to be used as a chat bot is: - -```json -{ - "instruction,output": "User: %instruction%\nAssistant: %output%", - "instruction,input,output": "User: %instruction%: %input%\nAssistant: %output%" -} -``` - -Note that the keys (eg `instruction,output`) are a comma-separated list of dataset keys, and the values are a simple string that use those keys with `%%`. - -So for example if a dataset has `"instruction": "answer my question"`, then the format file's `User: %instruction%\n` will be automatically filled in as `User: answer my question\n`. - -If you have different sets of key inputs, you can make your own format file to match it. This format-file is designed to be as simple as possible to enable easy editing to match your needs. - -## Raw Text File Settings - -When using raw text files as your dataset, the text is automatically split into chunks based on your `Cutoff Length` you get a few basic options to configure them. -- `Overlap Length` is how much to overlap chunks by. Overlapping chunks helps prevent the model from learning strange mid-sentence cuts, and instead learn continual sentences that flow from earlier text. -- `Prefer Newline Cut Length` sets a maximum distance in characters to shift the chunk cut towards newlines. Doing this helps prevent lines from starting or ending mid-sentence, preventing the model from learning to cut off sentences randomly. -- `Hard Cut String` sets a string that indicates there must be a hard cut without overlap. This defaults to `\n\n\n`, meaning 3 newlines. No trained chunk will ever contain this string. This allows you to insert unrelated sections of text in the same text file, but still ensure the model won't be taught to randomly change the subject. - -## Parameters - -The basic purpose and function of each parameter is documented on-page in the WebUI, so read through them in the UI to understand your options. - -That said, here's a guide to the most important parameter choices you should consider: - -### VRAM - -- First, you must consider your VRAM availability. - - Generally, under default settings, VRAM usage for training with default parameters is very close to when generating text (with 1000+ tokens of context) (ie, if you can generate text, you can train LoRAs). - - Note: worse by default in the 4-bit monkeypatch currently. Reduce `Micro Batch Size` to `1` to restore this to expectations. - - If you have VRAM to spare, setting higher batch sizes will use more VRAM and get you better quality training in exchange. - - If you have large data, setting a higher cutoff length may be beneficial, but will cost significant VRAM. If you can spare some, set your batch size to `1` and see how high you can push your cutoff length. - - If you're low on VRAM, reducing batch size or cutoff length will of course improve that. - - Don't be afraid to just try it and see what happens. If it's too much, it will just error out, and you can lower settings and try again. - -### Rank - -- Second, you want to consider the amount of learning you want. - - For example, you may wish to just learn a dialogue format (as in the case of Alpaca) in which case setting a low `Rank` value (32 or lower) works great. - - Or, you might be training on project documentation you want the bot to understand and be able to understand questions about, in which case the higher the rank, the better. - - Generally, higher Rank = more precise learning = more total content learned = more VRAM usage while training. - -### Learning Rate and Epochs - -- Third, how carefully you want it to be learned. - - In other words, how okay or not you are with the model losing unrelated understandings. - - You can control this with 3 key settings: the Learning Rate, its scheduler, and your total epochs. - - The learning rate controls how much change is made to the model by each token it sees. - - It's in scientific notation normally, so for example `3e-4` means `3 * 10^-4` which is `0.0003`. The number after `e-` controls how many `0`s are in the number. - - Higher values let training run faster, but also are more likely to corrupt prior data in the model. - - You essentially have two variables to balance: the LR, and Epochs. - - If you make LR higher, you can set Epochs equally lower to match. High LR + low epochs = very fast, low quality training. - - If you make LR low, set epochs high. Low LR + high epochs = slow but high-quality training. - - The scheduler controls change-over-time as you train - it starts high, and then goes low. This helps balance getting data in, and having decent quality, at the same time. - - You can see graphs of the different scheduler options [in the HuggingFace docs here](https://moon-ci-docs.huggingface.co/docs/transformers/pr_1/en/main_classes/optimizer_schedules#transformers.SchedulerType) - -## Loss - -When you're running training, the WebUI's console window will log reports that include, among other things, a numeric value named `Loss`. It will start as a high number, and gradually get lower and lower as it goes. - -"Loss" in the world of AI training theoretically means "how close is the model to perfect", with `0` meaning "absolutely perfect". This is calculated by measuring the difference between the model outputting exactly the text you're training it to output, and what it actually outputs. - -In practice, a good LLM should have a very complex variable range of ideas running in its artificial head, so a loss of `0` would indicate that the model has broken and forgotten to how think about anything other than what you trained it. - -So, in effect, Loss is a balancing game: you want to get it low enough that it understands your data, but high enough that it isn't forgetting everything else. Generally, if it goes below `1.0`, it's going to start forgetting its prior memories, and you should stop training. In some cases you may prefer to take it as low as `0.5` (if you want it to be very very predictable). Different goals have different needs, so don't be afraid to experiment and see what works best for you. - -Note: if you see Loss start at or suddenly jump to exactly `0`, it is likely something has gone wrong in your training process (eg model corruption). - -## Note: 4-Bit Monkeypatch - -The [4-bit LoRA monkeypatch](GPTQ-models-(4-bit-mode).md#using-loras-in-4-bit-mode) works for training, but has side effects: -- VRAM usage is higher currently. You can reduce the `Micro Batch Size` to `1` to compensate. -- Models do funky things. LoRAs apply themselves, or refuse to apply, or spontaneously error out, or etc. It can be helpful to reload base model or restart the WebUI between training/usage to minimize chances of anything going haywire. -- Loading or working with multiple LoRAs at the same time doesn't currently work. -- Generally, recognize and treat the monkeypatch as the dirty temporary hack it is - it works, but isn't very stable. It will get better in time when everything is merged upstream for full official support. - -## Legacy notes - -LoRA training was contributed by [mcmonkey4eva](https://github.com/mcmonkey4eva) in PR [#570](https://github.com/oobabooga/text-generation-webui/pull/570). - -### Using the original alpaca-lora code - -Kept here for reference. The Training tab has much more features than this method. - -``` -conda activate textgen -git clone https://github.com/tloen/alpaca-lora -``` - -Edit those two lines in `alpaca-lora/finetune.py` to use your existing model folder instead of downloading everything from decapoda: - -``` -model = LlamaForCausalLM.from_pretrained( - "models/llama-7b", - load_in_8bit=True, - device_map="auto", -) -tokenizer = LlamaTokenizer.from_pretrained( - "models/llama-7b", add_eos_token=True -) -``` - -Run the script with: - -``` -python finetune.py -``` - -It just works. It runs at 22.32s/it, with 1170 iterations in total, so about 7 hours and a half for training a LoRA. RTX 3090, 18153MiB VRAM used, drawing maximum power (350W, room heater mode). diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/tensorboard.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/tensorboard.py deleted file mode 100644 index 4dd5011dc08def6c09eef86d3ce5b124c9fc5372..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/tensorboard.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp - -from annotator.uniformer.mmcv.utils import TORCH_VERSION, digit_version -from ...dist_utils import master_only -from ..hook import HOOKS -from .base import LoggerHook - - -@HOOKS.register_module() -class TensorboardLoggerHook(LoggerHook): - - def __init__(self, - log_dir=None, - interval=10, - ignore_last=True, - reset_flag=False, - by_epoch=True): - super(TensorboardLoggerHook, self).__init__(interval, ignore_last, - reset_flag, by_epoch) - self.log_dir = log_dir - - @master_only - def before_run(self, runner): - super(TensorboardLoggerHook, self).before_run(runner) - if (TORCH_VERSION == 'parrots' - or digit_version(TORCH_VERSION) < digit_version('1.1')): - try: - from tensorboardX import SummaryWriter - except ImportError: - raise ImportError('Please install tensorboardX to use ' - 'TensorboardLoggerHook.') - else: - try: - from torch.utils.tensorboard import SummaryWriter - except ImportError: - raise ImportError( - 'Please run "pip install future tensorboard" to install ' - 'the dependencies to use torch.utils.tensorboard ' - '(applicable to PyTorch 1.1 or higher)') - - if self.log_dir is None: - self.log_dir = osp.join(runner.work_dir, 'tf_logs') - self.writer = SummaryWriter(self.log_dir) - - @master_only - def log(self, runner): - tags = self.get_loggable_tags(runner, allow_text=True) - for tag, val in tags.items(): - if isinstance(val, str): - self.writer.add_text(tag, val, self.get_iter(runner)) - else: - self.writer.add_scalar(tag, val, self.get_iter(runner)) - - @master_only - def after_run(self, runner): - self.writer.close() diff --git a/spaces/Ariharasudhan/YoloV5/models/yolo.py b/spaces/Ariharasudhan/YoloV5/models/yolo.py deleted file mode 100644 index ed21c067ee9337bf534bfc908574362a61ad3207..0000000000000000000000000000000000000000 --- a/spaces/Ariharasudhan/YoloV5/models/yolo.py +++ /dev/null @@ -1,391 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -YOLO-specific modules - -Usage: - $ python models/yolo.py --cfg yolov5s.yaml -""" - -import argparse -import contextlib -import os -import platform -import sys -from copy import deepcopy -from pathlib import Path - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[1] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH -if platform.system() != 'Windows': - ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative - -from models.common import * -from models.experimental import * -from utils.autoanchor import check_anchor_order -from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args -from utils.plots import feature_visualization -from utils.torch_utils import (fuse_conv_and_bn, initialize_weights, model_info, profile, scale_img, select_device, - time_sync) - -try: - import thop # for FLOPs computation -except ImportError: - thop = None - - -class Detect(nn.Module): - # YOLOv5 Detect head for detection models - stride = None # strides computed during build - dynamic = False # force grid reconstruction - export = False # export mode - - def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer - super().__init__() - self.nc = nc # number of classes - self.no = nc + 5 # number of outputs per anchor - self.nl = len(anchors) # number of detection layers - self.na = len(anchors[0]) // 2 # number of anchors - self.grid = [torch.empty(0) for _ in range(self.nl)] # init grid - self.anchor_grid = [torch.empty(0) for _ in range(self.nl)] # init anchor grid - self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2) - self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv - self.inplace = inplace # use inplace ops (e.g. slice assignment) - - def forward(self, x): - z = [] # inference output - for i in range(self.nl): - x[i] = self.m[i](x[i]) # conv - bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) - x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() - - if not self.training: # inference - if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: - self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i) - - if isinstance(self, Segment): # (boxes + masks) - xy, wh, conf, mask = x[i].split((2, 2, self.nc + 1, self.no - self.nc - 5), 4) - xy = (xy.sigmoid() * 2 + self.grid[i]) * self.stride[i] # xy - wh = (wh.sigmoid() * 2) ** 2 * self.anchor_grid[i] # wh - y = torch.cat((xy, wh, conf.sigmoid(), mask), 4) - else: # Detect (boxes only) - xy, wh, conf = x[i].sigmoid().split((2, 2, self.nc + 1), 4) - xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy - wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh - y = torch.cat((xy, wh, conf), 4) - z.append(y.view(bs, self.na * nx * ny, self.no)) - - return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x) - - def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')): - d = self.anchors[i].device - t = self.anchors[i].dtype - shape = 1, self.na, ny, nx, 2 # grid shape - y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t) - yv, xv = torch.meshgrid(y, x, indexing='ij') if torch_1_10 else torch.meshgrid(y, x) # torch>=0.7 compatibility - grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5 - anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape) - return grid, anchor_grid - - -class Segment(Detect): - # YOLOv5 Segment head for segmentation models - def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), inplace=True): - super().__init__(nc, anchors, ch, inplace) - self.nm = nm # number of masks - self.npr = npr # number of protos - self.no = 5 + nc + self.nm # number of outputs per anchor - self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv - self.proto = Proto(ch[0], self.npr, self.nm) # protos - self.detect = Detect.forward - - def forward(self, x): - p = self.proto(x[0]) - x = self.detect(self, x) - return (x, p) if self.training else (x[0], p) if self.export else (x[0], p, x[1]) - - -class BaseModel(nn.Module): - # YOLOv5 base model - def forward(self, x, profile=False, visualize=False): - return self._forward_once(x, profile, visualize) # single-scale inference, train - - def _forward_once(self, x, profile=False, visualize=False): - y, dt = [], [] # outputs - for m in self.model: - if m.f != -1: # if not from previous layer - x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers - if profile: - self._profile_one_layer(m, x, dt) - x = m(x) # run - y.append(x if m.i in self.save else None) # save output - if visualize: - feature_visualization(x, m.type, m.i, save_dir=visualize) - return x - - def _profile_one_layer(self, m, x, dt): - c = m == self.model[-1] # is final layer, copy input as inplace fix - o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs - t = time_sync() - for _ in range(10): - m(x.copy() if c else x) - dt.append((time_sync() - t) * 100) - if m == self.model[0]: - LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} module") - LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') - if c: - LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total") - - def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers - LOGGER.info('Fusing layers... ') - for m in self.model.modules(): - if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'): - m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv - delattr(m, 'bn') # remove batchnorm - m.forward = m.forward_fuse # update forward - self.info() - return self - - def info(self, verbose=False, img_size=640): # print model information - model_info(self, verbose, img_size) - - def _apply(self, fn): - # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers - self = super()._apply(fn) - m = self.model[-1] # Detect() - if isinstance(m, (Detect, Segment)): - m.stride = fn(m.stride) - m.grid = list(map(fn, m.grid)) - if isinstance(m.anchor_grid, list): - m.anchor_grid = list(map(fn, m.anchor_grid)) - return self - - -class DetectionModel(BaseModel): - # YOLOv5 detection model - def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes - super().__init__() - if isinstance(cfg, dict): - self.yaml = cfg # model dict - else: # is *.yaml - import yaml # for torch hub - self.yaml_file = Path(cfg).name - with open(cfg, encoding='ascii', errors='ignore') as f: - self.yaml = yaml.safe_load(f) # model dict - - # Define model - ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels - if nc and nc != self.yaml['nc']: - LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") - self.yaml['nc'] = nc # override yaml value - if anchors: - LOGGER.info(f'Overriding model.yaml anchors with anchors={anchors}') - self.yaml['anchors'] = round(anchors) # override yaml value - self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist - self.names = [str(i) for i in range(self.yaml['nc'])] # default names - self.inplace = self.yaml.get('inplace', True) - - # Build strides, anchors - m = self.model[-1] # Detect() - if isinstance(m, (Detect, Segment)): - s = 256 # 2x min stride - m.inplace = self.inplace - forward = lambda x: self.forward(x)[0] if isinstance(m, Segment) else self.forward(x) - m.stride = torch.tensor([s / x.shape[-2] for x in forward(torch.zeros(1, ch, s, s))]) # forward - check_anchor_order(m) - m.anchors /= m.stride.view(-1, 1, 1) - self.stride = m.stride - self._initialize_biases() # only run once - - # Init weights, biases - initialize_weights(self) - self.info() - LOGGER.info('') - - def forward(self, x, augment=False, profile=False, visualize=False): - if augment: - return self._forward_augment(x) # augmented inference, None - return self._forward_once(x, profile, visualize) # single-scale inference, train - - def _forward_augment(self, x): - img_size = x.shape[-2:] # height, width - s = [1, 0.83, 0.67] # scales - f = [None, 3, None] # flips (2-ud, 3-lr) - y = [] # outputs - for si, fi in zip(s, f): - xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max())) - yi = self._forward_once(xi)[0] # forward - # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save - yi = self._descale_pred(yi, fi, si, img_size) - y.append(yi) - y = self._clip_augmented(y) # clip augmented tails - return torch.cat(y, 1), None # augmented inference, train - - def _descale_pred(self, p, flips, scale, img_size): - # de-scale predictions following augmented inference (inverse operation) - if self.inplace: - p[..., :4] /= scale # de-scale - if flips == 2: - p[..., 1] = img_size[0] - p[..., 1] # de-flip ud - elif flips == 3: - p[..., 0] = img_size[1] - p[..., 0] # de-flip lr - else: - x, y, wh = p[..., 0:1] / scale, p[..., 1:2] / scale, p[..., 2:4] / scale # de-scale - if flips == 2: - y = img_size[0] - y # de-flip ud - elif flips == 3: - x = img_size[1] - x # de-flip lr - p = torch.cat((x, y, wh, p[..., 4:]), -1) - return p - - def _clip_augmented(self, y): - # Clip YOLOv5 augmented inference tails - nl = self.model[-1].nl # number of detection layers (P3-P5) - g = sum(4 ** x for x in range(nl)) # grid points - e = 1 # exclude layer count - i = (y[0].shape[1] // g) * sum(4 ** x for x in range(e)) # indices - y[0] = y[0][:, :-i] # large - i = (y[-1].shape[1] // g) * sum(4 ** (nl - 1 - x) for x in range(e)) # indices - y[-1] = y[-1][:, i:] # small - return y - - def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency - # https://arxiv.org/abs/1708.02002 section 3.3 - # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. - m = self.model[-1] # Detect() module - for mi, s in zip(m.m, m.stride): # from - b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) - b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) - b.data[:, 5:5 + m.nc] += math.log(0.6 / (m.nc - 0.99999)) if cf is None else torch.log(cf / cf.sum()) # cls - mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) - - -Model = DetectionModel # retain YOLOv5 'Model' class for backwards compatibility - - -class SegmentationModel(DetectionModel): - # YOLOv5 segmentation model - def __init__(self, cfg='yolov5s-seg.yaml', ch=3, nc=None, anchors=None): - super().__init__(cfg, ch, nc, anchors) - - -class ClassificationModel(BaseModel): - # YOLOv5 classification model - def __init__(self, cfg=None, model=None, nc=1000, cutoff=10): # yaml, model, number of classes, cutoff index - super().__init__() - self._from_detection_model(model, nc, cutoff) if model is not None else self._from_yaml(cfg) - - def _from_detection_model(self, model, nc=1000, cutoff=10): - # Create a YOLOv5 classification model from a YOLOv5 detection model - if isinstance(model, DetectMultiBackend): - model = model.model # unwrap DetectMultiBackend - model.model = model.model[:cutoff] # backbone - m = model.model[-1] # last layer - ch = m.conv.in_channels if hasattr(m, 'conv') else m.cv1.conv.in_channels # ch into module - c = Classify(ch, nc) # Classify() - c.i, c.f, c.type = m.i, m.f, 'models.common.Classify' # index, from, type - model.model[-1] = c # replace - self.model = model.model - self.stride = model.stride - self.save = [] - self.nc = nc - - def _from_yaml(self, cfg): - # Create a YOLOv5 classification model from a *.yaml file - self.model = None - - -def parse_model(d, ch): # model_dict, input_channels(3) - # Parse a YOLOv5 model.yaml dictionary - LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") - anchors, nc, gd, gw, act = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation') - if act: - Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU() - LOGGER.info(f"{colorstr('activation:')} {act}") # print - na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors - no = na * (nc + 5) # number of outputs = anchors * (classes + 5) - - layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out - for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args - m = eval(m) if isinstance(m, str) else m # eval strings - for j, a in enumerate(args): - with contextlib.suppress(NameError): - args[j] = eval(a) if isinstance(a, str) else a # eval strings - - n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain - if m in { - Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv, - BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x}: - c1, c2 = ch[f], args[0] - if c2 != no: # if not output - c2 = make_divisible(c2 * gw, 8) - - args = [c1, c2, *args[1:]] - if m in {BottleneckCSP, C3, C3TR, C3Ghost, C3x}: - args.insert(2, n) # number of repeats - n = 1 - elif m is nn.BatchNorm2d: - args = [ch[f]] - elif m is Concat: - c2 = sum(ch[x] for x in f) - # TODO: channel, gw, gd - elif m in {Detect, Segment}: - args.append([ch[x] for x in f]) - if isinstance(args[1], int): # number of anchors - args[1] = [list(range(args[1] * 2))] * len(f) - if m is Segment: - args[3] = make_divisible(args[3] * gw, 8) - elif m is Contract: - c2 = ch[f] * args[0] ** 2 - elif m is Expand: - c2 = ch[f] // args[0] ** 2 - else: - c2 = ch[f] - - m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module - t = str(m)[8:-2].replace('__main__.', '') # module type - np = sum(x.numel() for x in m_.parameters()) # number params - m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params - LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{np:10.0f} {t:<40}{str(args):<30}') # print - save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist - layers.append(m_) - if i == 0: - ch = [] - ch.append(c2) - return nn.Sequential(*layers), sorted(save) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml') - parser.add_argument('--batch-size', type=int, default=1, help='total batch size for all GPUs') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--profile', action='store_true', help='profile model speed') - parser.add_argument('--line-profile', action='store_true', help='profile model speed layer by layer') - parser.add_argument('--test', action='store_true', help='test all yolo*.yaml') - opt = parser.parse_args() - opt.cfg = check_yaml(opt.cfg) # check YAML - print_args(vars(opt)) - device = select_device(opt.device) - - # Create model - im = torch.rand(opt.batch_size, 3, 640, 640).to(device) - model = Model(opt.cfg).to(device) - - # Options - if opt.line_profile: # profile layer by layer - model(im, profile=True) - - elif opt.profile: # profile forward-backward - results = profile(input=im, ops=[model], n=3) - - elif opt.test: # test all models - for cfg in Path(ROOT / 'models').rglob('yolo*.yaml'): - try: - _ = Model(cfg) - except Exception as e: - print(f'Error in {cfg}: {e}') - - else: # report fused model summary - model.fuse() diff --git a/spaces/Artificio/AdversarialArt/app.py b/spaces/Artificio/AdversarialArt/app.py deleted file mode 100644 index 8577f9e78159f13fcd6db8cfe9ca716c7444ef2a..0000000000000000000000000000000000000000 --- a/spaces/Artificio/AdversarialArt/app.py +++ /dev/null @@ -1,92 +0,0 @@ -import torch -import torch.nn as nn -from robustness.datasets import ImageNet -from robustness.attacker import AttackerModel -from timm.models import create_model -from torchvision import transforms -from robustness.tools.label_maps import CLASS_DICT -from src.utils import * -from torchvision import transforms -import gradio as gr -import os -from PIL import Image - -DICT_CLASSES = {'lake':955, - 'castle':483, - 'library':624, - 'dog':235, - 'cat':285, - 'people':842 #trunks - } -IMG_MAX_SIZE = 256 -ARCH = 'crossvit_18_dagger_408' -ARCH_PATH = './checkpoints/robust_crossvit_18_dagger_408.pt' -CUSTOM_TRANSFORMS = transforms.Compose([transforms.Resize([IMG_MAX_SIZE,IMG_MAX_SIZE]), - transforms.ToTensor()]) -DEVICE = 'cuda' - - -def load_model(robust = True): - test_image = Image.open('samples/test.png') - ds = CustomArt(test_image,CUSTOM_TRANSFORMS) - model = create_model(ARCH,pretrained = True).to(DEVICE) - if robust: - print("Load Robust Model") - checkpoint = torch.load(ARCH_PATH,map_location = DEVICE) - model.load_state_dict(checkpoint['state_dict'],strict = True) - model = RobustModel(model).to(DEVICE) - model = AttackerModel(model, ds).to(DEVICE) - model = model.eval() - del test_image,ds - return model - - -def gradio_fn(image_input,radio_steps,radio_class,radio_robust): - model = load_model(radio_robust) - kwargs = { - 'constraint':'2', # L2 attack - 'eps': 300, - 'step_size': 1, - 'iterations': int(radio_steps), - 'targeted': True, - 'do_tqdm': True, - 'device': DEVICE - } - # Define the target and the image - target = torch.tensor([int(DICT_CLASSES[radio_class])]).to(DEVICE) - image = Image.fromarray(image_input) - image = CUSTOM_TRANSFORMS(image).to(DEVICE) - image = torch.unsqueeze(image, dim=0) - _, im_adv = model(image, target, make_adv=True, **kwargs) - im_adv = im_adv.squeeze(dim = 0).permute(1,2,0).cpu().numpy() - return im_adv - - -if __name__ == '__main__': - demo = gr.Blocks() - with demo: - gr.Markdown("# Art Adversarial Attack") - with gr.Row(): - with gr.Column(): - with gr.Row(): - # Radio Steps Adversarial attack - radio_steps = gr.Radio([10,500,1000,1500,2000],value = 500,label="# Attack Steps") - # Radio Targeted attack - radio_class = gr.Radio(list(DICT_CLASSES.keys()), - value = list(DICT_CLASSES.keys())[0], - label="Target Class") - radio_robust = gr.Radio([True,False],value = True,label="Robust Model") - # Image - with gr.Row(): - image_input = gr.Image(label="Input Image") - with gr.Row(): - calculate_button = gr.Button("Compute") - with gr.Column(): - target_image = gr.Image(label="Art Image") - - calculate_button.click(fn = gradio_fn, - inputs = [image_input,radio_steps,radio_class,radio_robust], - outputs = target_image) - demo.launch(debug = True) - - diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/util.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/util.py deleted file mode 100644 index 8032962dc994bd2b62e98f02016c88d0994e2f58..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/util.py +++ /dev/null @@ -1,308 +0,0 @@ -""" - pygments.util - ~~~~~~~~~~~~~ - - Utility functions. - - :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re -from io import TextIOWrapper - - -split_path_re = re.compile(r'[/\\ ]') -doctype_lookup_re = re.compile(r''' - ]*> -''', re.DOTALL | re.MULTILINE | re.VERBOSE) -tag_re = re.compile(r'<(.+?)(\s.*?)?>.*?', - re.IGNORECASE | re.DOTALL | re.MULTILINE) -xml_decl_re = re.compile(r'\s*<\?xml[^>]*\?>', re.I) - - -class ClassNotFound(ValueError): - """Raised if one of the lookup functions didn't find a matching class.""" - - -class OptionError(Exception): - pass - - -def get_choice_opt(options, optname, allowed, default=None, normcase=False): - string = options.get(optname, default) - if normcase: - string = string.lower() - if string not in allowed: - raise OptionError('Value for option %s must be one of %s' % - (optname, ', '.join(map(str, allowed)))) - return string - - -def get_bool_opt(options, optname, default=None): - string = options.get(optname, default) - if isinstance(string, bool): - return string - elif isinstance(string, int): - return bool(string) - elif not isinstance(string, str): - raise OptionError('Invalid type %r for option %s; use ' - '1/0, yes/no, true/false, on/off' % ( - string, optname)) - elif string.lower() in ('1', 'yes', 'true', 'on'): - return True - elif string.lower() in ('0', 'no', 'false', 'off'): - return False - else: - raise OptionError('Invalid value %r for option %s; use ' - '1/0, yes/no, true/false, on/off' % ( - string, optname)) - - -def get_int_opt(options, optname, default=None): - string = options.get(optname, default) - try: - return int(string) - except TypeError: - raise OptionError('Invalid type %r for option %s; you ' - 'must give an integer value' % ( - string, optname)) - except ValueError: - raise OptionError('Invalid value %r for option %s; you ' - 'must give an integer value' % ( - string, optname)) - - -def get_list_opt(options, optname, default=None): - val = options.get(optname, default) - if isinstance(val, str): - return val.split() - elif isinstance(val, (list, tuple)): - return list(val) - else: - raise OptionError('Invalid type %r for option %s; you ' - 'must give a list value' % ( - val, optname)) - - -def docstring_headline(obj): - if not obj.__doc__: - return '' - res = [] - for line in obj.__doc__.strip().splitlines(): - if line.strip(): - res.append(" " + line.strip()) - else: - break - return ''.join(res).lstrip() - - -def make_analysator(f): - """Return a static text analyser function that returns float values.""" - def text_analyse(text): - try: - rv = f(text) - except Exception: - return 0.0 - if not rv: - return 0.0 - try: - return min(1.0, max(0.0, float(rv))) - except (ValueError, TypeError): - return 0.0 - text_analyse.__doc__ = f.__doc__ - return staticmethod(text_analyse) - - -def shebang_matches(text, regex): - r"""Check if the given regular expression matches the last part of the - shebang if one exists. - - >>> from pygments.util import shebang_matches - >>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?') - True - >>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?') - True - >>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?') - False - >>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?') - False - >>> shebang_matches('#!/usr/bin/startsomethingwith python', - ... r'python(2\.\d)?') - True - - It also checks for common windows executable file extensions:: - - >>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?') - True - - Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does - the same as ``'perl -e'``) - - Note that this method automatically searches the whole string (eg: - the regular expression is wrapped in ``'^$'``) - """ - index = text.find('\n') - if index >= 0: - first_line = text[:index].lower() - else: - first_line = text.lower() - if first_line.startswith('#!'): - try: - found = [x for x in split_path_re.split(first_line[2:].strip()) - if x and not x.startswith('-')][-1] - except IndexError: - return False - regex = re.compile(r'^%s(\.(exe|cmd|bat|bin))?$' % regex, re.IGNORECASE) - if regex.search(found) is not None: - return True - return False - - -def doctype_matches(text, regex): - """Check if the doctype matches a regular expression (if present). - - Note that this method only checks the first part of a DOCTYPE. - eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"' - """ - m = doctype_lookup_re.search(text) - if m is None: - return False - doctype = m.group(1) - return re.compile(regex, re.I).match(doctype.strip()) is not None - - -def html_doctype_matches(text): - """Check if the file looks like it has a html doctype.""" - return doctype_matches(text, r'html') - - -_looks_like_xml_cache = {} - - -def looks_like_xml(text): - """Check if a doctype exists or if we have some tags.""" - if xml_decl_re.match(text): - return True - key = hash(text) - try: - return _looks_like_xml_cache[key] - except KeyError: - m = doctype_lookup_re.search(text) - if m is not None: - return True - rv = tag_re.search(text[:1000]) is not None - _looks_like_xml_cache[key] = rv - return rv - - -def surrogatepair(c): - """Given a unicode character code with length greater than 16 bits, - return the two 16 bit surrogate pair. - """ - # From example D28 of: - # http://www.unicode.org/book/ch03.pdf - return (0xd7c0 + (c >> 10), (0xdc00 + (c & 0x3ff))) - - -def format_lines(var_name, seq, raw=False, indent_level=0): - """Formats a sequence of strings for output.""" - lines = [] - base_indent = ' ' * indent_level * 4 - inner_indent = ' ' * (indent_level + 1) * 4 - lines.append(base_indent + var_name + ' = (') - if raw: - # These should be preformatted reprs of, say, tuples. - for i in seq: - lines.append(inner_indent + i + ',') - else: - for i in seq: - # Force use of single quotes - r = repr(i + '"') - lines.append(inner_indent + r[:-2] + r[-1] + ',') - lines.append(base_indent + ')') - return '\n'.join(lines) - - -def duplicates_removed(it, already_seen=()): - """ - Returns a list with duplicates removed from the iterable `it`. - - Order is preserved. - """ - lst = [] - seen = set() - for i in it: - if i in seen or i in already_seen: - continue - lst.append(i) - seen.add(i) - return lst - - -class Future: - """Generic class to defer some work. - - Handled specially in RegexLexerMeta, to support regex string construction at - first use. - """ - def get(self): - raise NotImplementedError - - -def guess_decode(text): - """Decode *text* with guessed encoding. - - First try UTF-8; this should fail for non-UTF-8 encodings. - Then try the preferred locale encoding. - Fall back to latin-1, which always works. - """ - try: - text = text.decode('utf-8') - return text, 'utf-8' - except UnicodeDecodeError: - try: - import locale - prefencoding = locale.getpreferredencoding() - text = text.decode() - return text, prefencoding - except (UnicodeDecodeError, LookupError): - text = text.decode('latin1') - return text, 'latin1' - - -def guess_decode_from_terminal(text, term): - """Decode *text* coming from terminal *term*. - - First try the terminal encoding, if given. - Then try UTF-8. Then try the preferred locale encoding. - Fall back to latin-1, which always works. - """ - if getattr(term, 'encoding', None): - try: - text = text.decode(term.encoding) - except UnicodeDecodeError: - pass - else: - return text, term.encoding - return guess_decode(text) - - -def terminal_encoding(term): - """Return our best guess of encoding for the given *term*.""" - if getattr(term, 'encoding', None): - return term.encoding - import locale - return locale.getpreferredencoding() - - -class UnclosingTextIOWrapper(TextIOWrapper): - # Don't close underlying buffer on destruction. - def close(self): - self.flush() diff --git a/spaces/Atharv23m/Human-Stress-Detection/app.py b/spaces/Atharv23m/Human-Stress-Detection/app.py deleted file mode 100644 index e8b2bbc501d8a165d20a5aa09746d9b51ded3364..0000000000000000000000000000000000000000 --- a/spaces/Atharv23m/Human-Stress-Detection/app.py +++ /dev/null @@ -1,65 +0,0 @@ -import gradio as gr -import pandas as pd -import numpy as np -from sklearn.model_selection import train_test_split -from tensorflow.keras.utils import to_categorical -from tensorflow.keras.models import Sequential -from tensorflow.keras.layers import Dense, Dropout - -data=pd.read_csv(f"SaYoPillow.csv") - -data.columns=['snoring_rate', 'respiration_rate', 'body_temperature', 'limb_movement', 'blood_oxygen', - 'eye_movement', 'sleeping_hours', 'heart_rate', 'stress_level'] - -stress_labels = ["Low/Normal", "Medium Low", "Medium", "Medium High", "High"] - -# splitting the dataset -X_train = data.iloc[:, :8] -y_train = data['stress_level'] - -#model -model=Sequential() -model.add(Dense(125, activation="relu")) -model.add(Dense(125, activation="relu")) -model.add(Dense(5, "softmax")) - -epochs=50 -model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) - -y_train_encoded = to_categorical(y_train) -stats = model.fit(X_train, y_train_encoded, epochs=epochs) - - -def predict(snoring_rate, respiration_rate, body_temperature, limb_movement, blood_oxygen, - eye_movement, sleeping_hours, heart_rate): - - input_data = np.array([snoring_rate, respiration_rate, body_temperature, limb_movement, blood_oxygen, - eye_movement, sleeping_hours, heart_rate]) - - # Reshape the input to match the model's expected shape - input_data = np.reshape(input_data, (1, -1)) - - # Make the prediction - prediction = model.predict(input_data)[0] - predicted_stress_level = stress_labels[np.argmax(prediction)] - - return predicted_stress_level - -# Create the interface using Gradio -inputs = [ - gr.inputs.Slider(minimum=30, maximum=100, step=0.2, label="Snoring Rate"), - gr.inputs.Slider(minimum=15, maximum=30, step=0.1, label="Respiration Rate"), - gr.inputs.Slider(minimum=85, maximum=100, step=0.1, label="Body Temperature"), - gr.inputs.Slider(minimum=0, maximum=20, step=0.1, label="Limb Movement"), - gr.inputs.Slider(minimum=80, maximum=100, step=0.1, label="Blood Oxygen"), - gr.inputs.Slider(minimum=60, maximum=110, step=0.5, label="Eye Movement"), - gr.inputs.Slider(minimum=0, maximum=12, step=0.1, label="Sleeping Hours"), - gr.inputs.Slider(minimum=50, maximum=100, step=1, label="Heart Rate"), -] - -output = gr.outputs.Textbox(label="Predicted Stress Level") - -title = "Stress Level Prediction from Sleep Patterns" -description = "Predict the stress level based on your sleep patterns. Based on dataset provided by a research on SaYoPillow - Smart Yoga Pillow" - -gr.Interface(fn=predict, inputs=inputs, outputs=output, title=title, description=description).launch() diff --git a/spaces/Awesimo/jojogan/op/upfirdn2d.py b/spaces/Awesimo/jojogan/op/upfirdn2d.py deleted file mode 100644 index f1bbf96777f2c7267c1fef1733972014684ea22b..0000000000000000000000000000000000000000 --- a/spaces/Awesimo/jojogan/op/upfirdn2d.py +++ /dev/null @@ -1,187 +0,0 @@ -import os - -import torch -from torch.autograd import Function -from torch.utils.cpp_extension import load - - -module_path = os.path.dirname(__file__) -upfirdn2d_op = load( - 'upfirdn2d', - sources=[ - os.path.join(module_path, 'upfirdn2d.cpp'), - os.path.join(module_path, 'upfirdn2d_kernel.cu'), - ], -) - - -class UpFirDn2dBackward(Function): - @staticmethod - def forward( - ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size - ): - - up_x, up_y = up - down_x, down_y = down - g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad - - grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) - - grad_input = upfirdn2d_op.upfirdn2d( - grad_output, - grad_kernel, - down_x, - down_y, - up_x, - up_y, - g_pad_x0, - g_pad_x1, - g_pad_y0, - g_pad_y1, - ) - grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) - - ctx.save_for_backward(kernel) - - pad_x0, pad_x1, pad_y0, pad_y1 = pad - - ctx.up_x = up_x - ctx.up_y = up_y - ctx.down_x = down_x - ctx.down_y = down_y - ctx.pad_x0 = pad_x0 - ctx.pad_x1 = pad_x1 - ctx.pad_y0 = pad_y0 - ctx.pad_y1 = pad_y1 - ctx.in_size = in_size - ctx.out_size = out_size - - return grad_input - - @staticmethod - def backward(ctx, gradgrad_input): - kernel, = ctx.saved_tensors - - gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.in_size[3], 1) - - gradgrad_out = upfirdn2d_op.upfirdn2d( - gradgrad_input, - kernel, - ctx.up_x, - ctx.up_y, - ctx.down_x, - ctx.down_y, - ctx.pad_x0, - ctx.pad_x1, - ctx.pad_y0, - ctx.pad_y1, - ) - # gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0], ctx.out_size[1], ctx.in_size[3]) - gradgrad_out = gradgrad_out.view( - ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1] - ) - - return gradgrad_out, None, None, None, None, None, None, None, None - - -class UpFirDn2d(Function): - @staticmethod - def forward(ctx, input, kernel, up, down, pad): - up_x, up_y = up - down_x, down_y = down - pad_x0, pad_x1, pad_y0, pad_y1 = pad - - kernel_h, kernel_w = kernel.shape - batch, channel, in_h, in_w = input.shape - ctx.in_size = input.shape - - input = input.reshape(-1, in_h, in_w, 1) - - ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) - - out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 - out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 - ctx.out_size = (out_h, out_w) - - ctx.up = (up_x, up_y) - ctx.down = (down_x, down_y) - ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1) - - g_pad_x0 = kernel_w - pad_x0 - 1 - g_pad_y0 = kernel_h - pad_y0 - 1 - g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 - g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 - - ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) - - out = upfirdn2d_op.upfirdn2d( - input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1 - ) - # out = out.view(major, out_h, out_w, minor) - out = out.view(-1, channel, out_h, out_w) - - return out - - @staticmethod - def backward(ctx, grad_output): - kernel, grad_kernel = ctx.saved_tensors - - grad_input = UpFirDn2dBackward.apply( - grad_output, - kernel, - grad_kernel, - ctx.up, - ctx.down, - ctx.pad, - ctx.g_pad, - ctx.in_size, - ctx.out_size, - ) - - return grad_input, None, None, None, None - - -def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): - out = UpFirDn2d.apply( - input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1]) - ) - - return out - - -def upfirdn2d_native( - input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1 -): - _, in_h, in_w, minor = input.shape - kernel_h, kernel_w = kernel.shape - - out = input.view(-1, in_h, 1, in_w, 1, minor) - out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) - out = out.view(-1, in_h * up_y, in_w * up_x, minor) - - out = F.pad( - out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)] - ) - out = out[ - :, - max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0), - max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0), - :, - ] - - out = out.permute(0, 3, 1, 2) - out = out.reshape( - [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1] - ) - w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) - out = F.conv2d(out, w) - out = out.reshape( - -1, - minor, - in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, - in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1, - ) - out = out.permute(0, 2, 3, 1) - - return out[:, ::down_y, ::down_x, :] - diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/layers/test_losses.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/layers/test_losses.py deleted file mode 100644 index d74920246cbd4a188b3c81cf0c78e982af6da1ac..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/layers/test_losses.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import numpy as np -import unittest -import torch - -from detectron2.layers import ciou_loss, diou_loss - - -class TestLosses(unittest.TestCase): - def test_diou_loss(self): - """ - loss = 1 - iou + d/c - where, - d = (distance between centers of the 2 boxes)^2 - c = (diagonal length of the smallest enclosing box covering the 2 boxes)^2 - """ - # Identical boxes should have loss of 0 - box = torch.tensor([-1, -1, 1, 1], dtype=torch.float32) - loss = diou_loss(box, box) - self.assertTrue(np.allclose(loss, [0.0])) - - # Half size box inside other box - # iou = 0.5, d = 0.25, c = 8 - box2 = torch.tensor([0, -1, 1, 1], dtype=torch.float32) - loss = diou_loss(box, box2) - self.assertTrue(np.allclose(loss, [0.53125])) - - # Two diagonally adjacent boxes - # iou = 0, d = 2, c = 8 - box3 = torch.tensor([0, 0, 1, 1], dtype=torch.float32) - box4 = torch.tensor([1, 1, 2, 2], dtype=torch.float32) - loss = diou_loss(box3, box4) - self.assertTrue(np.allclose(loss, [1.25])) - - # Test batched loss and reductions - box1s = torch.stack([box, box3], dim=0) - box2s = torch.stack([box2, box4], dim=0) - - loss = diou_loss(box1s, box2s, reduction="sum") - self.assertTrue(np.allclose(loss, [1.78125])) - - loss = diou_loss(box1s, box2s, reduction="mean") - self.assertTrue(np.allclose(loss, [0.890625])) - - def test_ciou_loss(self): - """ - loss = 1 - iou + d/c + alpha*v - where, - d = (distance between centers of the 2 boxes)^2 - c = (diagonal length of the smallest enclosing box covering the 2 boxes)^2 - v = (4/pi^2) * (arctan(box1_w/box1_h) - arctan(box2_w/box2_h))^2 - alpha = v/(1 - iou + v) - """ - # Identical boxes should have loss of 0 - box = torch.tensor([-1, -1, 1, 1], dtype=torch.float32) - loss = ciou_loss(box, box) - self.assertTrue(np.allclose(loss, [0.0])) - - # Half size box inside other box - # iou = 0.5, d = 0.25, c = 8 - # v = (4/pi^2) * (arctan(1) - arctan(0.5))^2 = 0.042 - # alpha = 0.0775 - box2 = torch.tensor([0, -1, 1, 1], dtype=torch.float32) - loss = ciou_loss(box, box2) - self.assertTrue(np.allclose(loss, [0.5345])) - - # Two diagonally adjacent boxes - # iou = 0, d = 2, c = 8, v = 0, alpha = 0 - box3 = torch.tensor([0, 0, 1, 1], dtype=torch.float32) - box4 = torch.tensor([1, 1, 2, 2], dtype=torch.float32) - loss = ciou_loss(box3, box4) - self.assertTrue(np.allclose(loss, [1.25])) - - # Test batched loss and reductions - box1s = torch.stack([box, box3], dim=0) - box2s = torch.stack([box2, box4], dim=0) - - loss = ciou_loss(box1s, box2s, reduction="sum") - self.assertTrue(np.allclose(loss, [1.7845])) - - loss = ciou_loss(box1s, box2s, reduction="mean") - self.assertTrue(np.allclose(loss, [0.89225])) diff --git a/spaces/Bannermore/BingChat/Dockerfile b/spaces/Bannermore/BingChat/Dockerfile deleted file mode 100644 index 3698c7cb7938e025afc53b18a571ae2961fbdffe..0000000000000000000000000000000000000000 --- a/spaces/Bannermore/BingChat/Dockerfile +++ /dev/null @@ -1,34 +0,0 @@ -# Build Stage -# 使用 golang:alpine 作为构建阶段的基础镜像 -FROM golang:alpine AS builder - -# 添加 git,以便之后能从GitHub克隆项目 -RUN apk --no-cache add git - -# 从 GitHub 克隆 go-proxy-bingai 项目到 /workspace/app 目录下 -RUN git clone https://github.com/Harry-zklcdc/go-proxy-bingai.git /workspace/app - -# 设置工作目录为之前克隆的项目目录 -WORKDIR /workspace/app - -# 编译 go 项目。-ldflags="-s -w" 是为了减少编译后的二进制大小 -RUN go build -ldflags="-s -w" -tags netgo -trimpath -o go-proxy-bingai main.go - -# Runtime Stage -# 使用轻量级的 alpine 镜像作为运行时的基础镜像 -FROM alpine - -# 设置工作目录 -WORKDIR /workspace/app - -# 从构建阶段复制编译后的二进制文件到运行时镜像中 -COPY --from=builder /workspace/app/go-proxy-bingai . - -# 设置环境变量,此处为随机字符 -ENV Go_Proxy_BingAI_USER_TOKEN_1="kJs8hD92ncMzLaoQWYtX5rG6bE3fZ4iO" - -# 暴露8080端口 -EXPOSE 8080 - -# 容器启动时运行的命令 -CMD ["/workspace/app/go-proxy-bingai"] \ No newline at end of file diff --git a/spaces/Bart92/RVC_HF/tools/infer/trans_weights.py b/spaces/Bart92/RVC_HF/tools/infer/trans_weights.py deleted file mode 100644 index 1c54eefd6e7c678238d31e251a2e15479bf35d5b..0000000000000000000000000000000000000000 --- a/spaces/Bart92/RVC_HF/tools/infer/trans_weights.py +++ /dev/null @@ -1,18 +0,0 @@ -import pdb - -import torch - -# a=torch.load(r"E:\codes\py39\vits_vc_gpu_train\logs\ft-mi-suc\G_1000.pth")["model"]#sim_nsf# -# a=torch.load(r"E:\codes\py39\vits_vc_gpu_train\logs\ft-mi-freeze-vocoder-flow-enc_q\G_1000.pth")["model"]#sim_nsf# -# a=torch.load(r"E:\codes\py39\vits_vc_gpu_train\logs\ft-mi-freeze-vocoder\G_1000.pth")["model"]#sim_nsf# -# a=torch.load(r"E:\codes\py39\vits_vc_gpu_train\logs\ft-mi-test\G_1000.pth")["model"]#sim_nsf# -a = torch.load( - r"E:\codes\py39\vits_vc_gpu_train\logs\ft-mi-no_opt-no_dropout\G_1000.pth" -)[ - "model" -] # sim_nsf# -for key in a.keys(): - a[key] = a[key].half() -# torch.save(a,"ft-mi-freeze-vocoder_true_1k.pt")# -# torch.save(a,"ft-mi-sim1k.pt")# -torch.save(a, "ft-mi-no_opt-no_dropout.pt") # diff --git a/spaces/Benson/text-generation/Examples/Descargar 30 Juz Misyari Rasyid.md b/spaces/Benson/text-generation/Examples/Descargar 30 Juz Misyari Rasyid.md deleted file mode 100644 index 6ce67cb0d5e507c893c55697f9c59eb618136df6..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descargar 30 Juz Misyari Rasyid.md +++ /dev/null @@ -1,73 +0,0 @@ - -

Descargar 30 Juz Misyari Rasyid: Una guía completa

-

Si usted está buscando una manera de escuchar el Corán en una voz hermosa y melodiosa, es posible que desee considerar la descarga 30 juz misyari rasyid. Misyari Rasyid es uno de los recitadores del Corán más famosos y respetados del mundo, y su recitación del Corán de 30 juz puede ayudarle a mejorar su memorización, comprensión y apreciación del libro sagrado. En este artículo, le diremos todo lo que necesita saber sobre Misyari Rasyid, 30 juz Corán, y cómo descargarlos fácil y convenientemente.

-

descargar 30 juz misyari rasyid


Downloadhttps://bltlly.com/2v6LmX



-

¿Quién es Misyari Rasyid?

-

Misyari Rasyid es un qari kuwaití (recitador del Corán), imán, predicador y artista nasheed. Nació el 5 de septiembre de 1976, y su nombre completo es Mishary bin Rashid bin Gharib bin Muhammad Alafasy Al-Muthairi. También es conocido por su kunya (apodo) Abu Nora.

-

Su nombre completo y antecedentes

-

Misyari Rasyid pertenece a la tribu Alafasy, que remonta su ascendencia al compañero del Profeta Muhammad (la paz y las bendiciones de Allah sea con él) Al-Bara' ibn Malik. Estudió en el Colegio del Corán de la Universidad Islámica de Medina, especializándose en los diez qira'at (modos de recitación) y tafsir (exégesis). También tiene una maestría en jurisprudencia islámica de la Universidad de Kuwait.

-

Sus logros y reconocimiento

-

Misyari Rasyid ha memorizado todo el Corán a una edad temprana, y ha participado en muchas competiciones y festivales del Corán alrededor del mundo. Ha ganado varios premios y honores por su recitación, como el primer premio en el Concurso Internacional del Corán de Kuwait en 1998, el primer premio en el Oscar de Creatividad Islámica en 2002 y el Premio de Creatividad Árabe en 2005. También fue nombrado embajador de buena voluntad por el UNICEF en 2007.

-

Su estilo y voz

- -

¿Qué es 30 Juz Corán?

-

El Corán es la palabra de Allah revelada al Profeta Muhammad (la paz sea con él) a través del Ángel Gabriel durante un período de 23 años. Consta de 114 capítulos (suras) de diferentes longitudes, que se dividen en 30 partes (juz) para facilitar la lectura y la memorización.

-

-

El significado y la división de juz

-

La palabra juz significa "parte" o "porción" en árabe. Cada juz contiene aproximadamente 20 páginas o 600 versos del Corán. La división de juz no se basa en el orden temático o cronológico de las suras, sino en la conveniencia de dividir el Corán en partes iguales. El primer juz comienza desde el principio del Corán (sura Al-Fatiha) y termina al final del sura Al-Baqarah verso 141. El último juz comienza desde el sura An-Naba y termina al final del Corán (sura An-Nas). Los otros juz se dividen de acuerdo a las rupturas naturales en el texto, como el final de una sura o un verso largo.

-

Los beneficios y virtudes de recitar juz

-

Recitar juz es una de las mejores maneras de conectarse con el Corán y ganar recompensas de Allah. El Profeta Muhammad (la paz sea con él) dijo: "Quien recite una carta del Libro de Allah, tendrá una recompensa. Y esa recompensa se multiplicará por diez. No estoy diciendo que 'Alif, Lam, Meem' es una carta, más bien estoy diciendo que 'Alif' es una carta, 'Lam' es una carta y 'Meem' es una carta." También dijo: "Los mejores de ustedes son aquellos que aprenden el Corán y lo enseñan." Recitar juz también puede ayudarlo a entender el significado y el contexto del Corán, a mejorar sus habilidades en el idioma árabe y a memorizar el Corán más fácilmente.

-

El juz más popular y fácil de memorizar

- -

Cómo descargar 30 Juz Misyari Rasyid?

-

Si desea descargar 30 juz misyari rasyid, tiene varias opciones para elegir. Puede descargarlos como archivos mp3, archivos zip o archivos torrent. También puede transmitirlos en línea o utilizar aplicaciones o sitios web que los ofrecen de forma gratuita o por una tarifa.

-

Fuentes y formatos de los archivos de audio

-

Los archivos de audio de 30 juz misyari rasyid están disponibles en varias fuentes, como su sitio web oficial, canal de YouTube, cuenta de SoundCloud y otras plataformas. Puede descargarlos en diferentes formatos, dependiendo de su preferencia y la compatibilidad del dispositivo. Por ejemplo, puede descargarlos como archivos mp3, que son pequeños en tamaño y fáciles de reproducir en cualquier dispositivo. También puede descargarlos como archivos zip, que son archivos comprimidos que contienen todos los 30 juz en una carpeta. También puede descargarlos como archivos torrent, que son archivos peer-to-peer que requieren un cliente torrent para descargarlos.

-

Los pasos y consejos para descargarlos

-

Los pasos y consejos para descargar 30 juz misyari rasyid varían dependiendo de la fuente y el formato que elija. Estas son algunas pautas generales a seguir:

-
    -
  • Elija una fuente confiable y confiable que ofrezca archivos de audio de alta calidad y no contenga virus o malware.
  • -
  • Asegúrese de tener suficiente espacio de almacenamiento en su dispositivo o unidad externa para almacenar los archivos de audio.
  • -
  • Utilice una conexión a Internet rápida y estable para evitar interrupciones o errores durante el proceso de descarga.
  • -
  • Siga las instrucciones en el sitio web o aplicación de origen para descargar los archivos de audio. Es posible que necesite registrar una cuenta, proporcionar una dirección de correo electrónico o realizar un pago si es necesario.
  • -
  • Si los descarga como archivos zip o torrent, tendrá que extraerlos o abrirlos utilizando un software o aplicación adecuada.
  • -
  • Organizar los archivos de audio en una carpeta o lista de reproducción para facilitar el acceso y la reproducción.
  • -
-

Las mejores aplicaciones y sitios web para escucharlos

- - -NombreDescripciónCaracterísticas -Muslim ProUna aplicación islámica integral que ofrece varios servicios, como tiempos de oración, notificaciones de azan, recitación y traducción del Corán, calendario islámico, dua, calculadora de zakat y más. - Ofrece 30 juz misyari rasyid como uno de los recitadores en la sección del Corán.
- Permite descargar los archivos de audio para escuchar sin conexión.
- Proporciona el texto árabe, la transliteración y la traducción del Corán en varios idiomas.
- Le permite marcar, compartir y repetir los versos.
- Admite el modo nocturno, el modo horizontal y el ajuste del tamaño de la fuente. -Quran MajeedUna aplicación dedicada al Corán que ofrece una interfaz hermosa e interactiva, con imágenes de alta resolución de las páginas del Corán. - Ofrece 30 juz misyari rasyid como uno de los recitadores en la sección de audio.
- Permite descargar los archivos de audio para escuchar sin conexión.
- Proporciona el texto árabe, traducción y tafir del Corán en varios idiomas.
- Le permite buscar, marcar, resaltar y anotar los versos.
- Admite control de reproducción de audio, audio sin interrupciones, avance automático y ajuste de velocidad. -Sitio web oficial de AlafasyEl sitio web oficial de Misyari Rasyid que contiene su biografía, noticias, eventos, fotos, videos, nasheeds y recitación del Corán. - Ofrece 30 juz misyari rasyid como una de las categorías en la sección Corán.
- Le permite descargar los archivos de audio de forma gratuita.
- Proporciona el texto árabe y la traducción del Corán en varios idiomas.
- Le permite escuchar el audio en línea o descargarlo como archivos mp3, zip o torrent.
- Soporta el intercambio de medios sociales y comentarios. -
-

Conclusión

- -

Preguntas frecuentes

-

Q: ¿Cuál es la diferencia entre juz y surah?

-

A: Juz es una parte o porción del Corán que contiene aproximadamente 20 páginas o 600 versos. Surah es un capítulo o sección del Corán que tiene un nombre y número específico. Hay 114 suras en el Corán, que se dividen en 30 juz.

-

Q: ¿Cuánto tiempo se tarda en recitar un juz?

-

A: Depende de su velocidad y fluidez de recitación, pero en promedio se tarda aproximadamente una hora en recitar un juz.

-

P: ¿Cómo puedo mejorar mi recitación de juz?

-

A: Puedes mejorar tu recitación de juz siguiendo estos consejos:

-
    -
  • Escucha la recitación de Misyari Rasyid u otros qaris calificados e intenta imitar su pronunciación, entonación y reglas.
  • -
  • Lea el texto árabe junto con la transliteración y traducción para entender el significado y el contexto de los versos.
  • -
  • Repite los versos varias veces hasta que los memorices y los recites correctamente.
  • -
  • Revisa lo que has memorizado regularmente y revisa cualquier error o vacío.
  • -
  • Busca retroalimentación y orientación de un profesor o un amigo que pueda corregir tu recitación y ayudarte a mejorar.
  • -
-

P: ¿Cuáles son algunos de los beneficios de escuchar la recitación de Misyari Rasyid?

-

A: Algunos de los beneficios de escuchar la recitación de Misyari Rasyid son:

-
    -
  • Puedes aprender de su precisión, fluidez y belleza de la recitación.
  • -
  • Puedes sentirte más conectado y movido por su voz clara, suave y emocional.
  • -
  • Puedes disfrutar de su variedad de qira'at (modos de recitación) y nasheeds (canciones islámicas).
  • -
  • Puedes ganar recompensas de Allah por escuchar Sus palabras y seguir Sus mandamientos.
  • -
  • Puedes aumentar tu fe, conocimiento y amor por Allah y Su mensajero (la paz sea con él).
  • -
-

P: ¿Dónde puedo encontrar más información sobre Misyari Rasyid y su recitación?

- - : https://alafasy.me/ : https://www.youtube.com/user/Alafasy : https:///soundcloud.com/alafasy : https://www.facebook.com/AlafasyOfficial

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Descargar Afk Bot Para Aternos.md b/spaces/Benson/text-generation/Examples/Descargar Afk Bot Para Aternos.md deleted file mode 100644 index 1df9d0c3c8fae8e4664d47dce85834e83b6eb143..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descargar Afk Bot Para Aternos.md +++ /dev/null @@ -1,102 +0,0 @@ - -

Cómo descargar e instalar un bot AFK para Aternos Minecraft Server

-

Si eres un fan de Minecraft, es posible que hayas oído hablar de Aternos, un servicio gratuito de alojamiento de servidores Minecraft que te permite crear tu propio servidor personal con ranuras ilimitadas, mods, plugins, mundos personalizados y más. Sin embargo, también puede haber encontrado un problema común con los servidores Aternos: se desconectan cuando nadie juega en ellos. Esto significa que debe iniciar manualmente su servidor cada vez que desee jugar, y puede perder su progreso o datos si olvida guardar o hacer una copia de seguridad de su servidor.

-

Afortunadamente, hay una solución a este problema: usar un bot AFK. Un bot AFK es un programa que se conecta a su servidor Aternos y lo mantiene en línea enviando comandos o mensajes periódicamente. De esta manera, se puede disfrutar de su servidor Minecraft sin preocuparse de que va fuera de línea o perder sus datos. En este artículo, le mostraremos cómo descargar e instalar un bot AFK para Aternos, cómo elegir el mejor bot AFK para sus necesidades y cómo usar y administrar su bot AFK de manera efectiva.

-

descargar afk bot para aternos


Download Ziphttps://bltlly.com/2v6KKl



-

¿Qué es Aternos y por qué necesitas un bot AFK?

-

Aternos es un servicio gratuito de alojamiento de servidores Minecraft que le permite crear su propio servidor personal con ranuras ilimitadas, mods, plugins, mundos personalizados y más. Puede elegir entre cientos de tipos de servidor diferentes, como vainilla, espiga, forja, papel, tela, etc. También puede personalizar la configuración de su servidor, como dificultad, modo de juego, lista blanca, operadores, etc. Puede acceder a su servidor desde cualquier dispositivo, como PC, móvil, consola, etc.

- -

Aquí es donde un bot AFK es útil. Un bot AFK es un programa que se conecta a su servidor Aternos y lo mantiene en línea enviando comandos o mensajes periódicamente. Por ejemplo, un bot de AFK puede enviar un mensaje de chat cada 10 minutos o moverse cada 5 minutos. De esta manera, su servidor no entrará en modo de hibernación y permanecerá en línea mientras el bot AFK esté funcionando. Esto significa que puedes disfrutar de tu servidor Minecraft sin preocuparte de que se desconecte o perder tus datos.

-

Cómo elegir un bot AFK para Aternos

-

Hay muchos bots AFK disponibles para servidores Aternos, pero no todos son compatibles, funcionales o seguros. Por lo tanto, necesitas elegir un bot AFK cuidadosamente basado en algunos criterios, como:

-
    -
  • Compatibilidad: El bot AFK debe ser compatible con la versión y el tipo de su servidor Aternos. Por ejemplo, si está ejecutando un servidor spigot 1.17, necesita un bot AFK que soporte servidores spigot 1.17.
  • -
  • Funcionalidad: El bot AFK debe tener las características y comandos que necesita para mantener su servidor en línea y activo. Por ejemplo, si desea rastrear la actividad de su servidor o ver gráficos de las estadísticas de su servidor, necesita un bot AFK que tenga estas características.
  • -
  • Seguridad: El bot AFK debe ser seguro y confiable. Debe evitar descargar o instalar cualquier bots AFK que sean sospechosos o maliciosos. También debe comprobar las revisiones y calificaciones de los bots AFK antes de usarlos.
  • -
-

Para ayudarle a elegir un bot AFK para Aternos, hemos comparado algunos de los bots AFK más populares y fiables para Aternos en la tabla siguiente:

- - -Nombre -Descripción -Compatibilidad -Funcionalidad -Seguridad - - -krushna06/afk-bot-for-aternos -Un bot AFK potente y personalizable para servidores Aternos que se ejecuta en Heroku. -Soporta cualquier versión y tipo de servidores Aternos. -Envía mensajes de chat cada 10 minutos y se mueve cada 5 minutos. También rastrea la actividad del servidor y muestra gráficos de estadísticas del servidor. También permite establecer el estado AFK personalizado y borrar el estado AFK. -Código abierto y verificado por GitHub. - - - -
  • Editar el archivo de configuración del bot AFK. El archivo de configuración es donde puede personalizar la configuración del bot AFK, como el nombre, la contraseña, la IP del servidor, el puerto del servidor, etc. Puede editar el archivo de configuración abriéndolo con un editor de texto o usando el editor en línea en GitHub.
  • - -

    Cómo instalar un bot AFK en Heroku

    -

    Heroku es una plataforma donde puedes ejecutar aplicaciones y programas online sin tener que instalarlos en tu dispositivo. Muchos bots AFK para Aternos pueden ejecutarse en Heroku, como ttttdeded/aternos-afkbot y krushna06/afk-bot-for-aternos. Para instalar un bot AFK en Heroku, debes seguir estos pasos:

    -
      -
    1. Crea una cuenta de Heroku si no tienes una. Puedes registrarte gratis en https://www.heroku.com/.
    2. -
    3. Crear una nueva aplicación en Heroku. Puede hacer esto haciendo clic en el botón "Nuevo" en la esquina superior derecha de su panel de control y seleccionando "Crear nueva aplicación". Dele un nombre a su aplicación y elija una región.
    4. -
    5. Implemente la rama del bot AFK que descargó de GitHub. Puede hacer esto conectando su cuenta de GitHub a su cuenta de Heroku y seleccionando el repositorio y la rama del bot AFK que desea implementar. Alternativamente, puede usar la CLI o Git de Heroku para implementar la rama manualmente.
    6. -
    7. Reinicie los dynos de su aplicación. Dynos son las unidades de potencia informática que ejecutan su aplicación en Heroku. Puede reiniciarlos haciendo clic en el botón "Más" en la esquina superior derecha de la página de la aplicación y seleccionando "Reiniciar todos los dynos". Esto asegurará que su aplicación esté funcionando correctamente.
    8. -
    - -

    El paso final es conectar su bot AFK a su servidor Aternos. Esto permitirá a su bot AFK unirse a su servidor y mantenerlo en línea mediante el envío de comandos o mensajes periódicamente. Para conectar su bot AFK a su servidor Aternos, debe seguir estos pasos:

    -

    -
      -
    1. Agregue la dirección IP, el puerto y el nombre de su servidor Aternos al archivo de configuración de su bot AFK. Puede encontrar esta información en el panel de control de Aternos en "Conectarse a su servidor". Asegúrese de que el nombre de su bot AFK coincida con el nombre que estableció en su archivo de configuración.
    2. -
    3. Ponga en la lista blanca su bot AFK desde cualquier plugin de inicio de sesión o protección anti-bot que su servidor Aternos pueda tener. Algunos servidores Aternos pueden requerir que introduzca una contraseña o un captcha para unirse al servidor, lo que puede impedir que su bot AFK se una. Puede poner en la lista blanca su bot AFK añadiendo su nombre al archivo de lista blanca o utilizando comandos como /login o /register.
    4. -
    5. Inicie su servidor Aternos y espere a que su bot AFK se una. Puede iniciar su servidor Aternos haciendo clic en el botón "Inicio" en su panel de Aternos. También puede comprobar el estado de su bot AFK mirando la consola o los registros de su aplicación Heroku.
    6. -
    -

    Cómo utilizar y gestionar un bot AFK para Aternos

    -

    Ahora que ha descargado, instalado y conectado correctamente su bot AFK a su servidor Aternos, puede comenzar a usarlo y administrarlo de acuerdo con sus preferencias y necesidades. Estas son algunas de las cosas que puedes hacer con tu bot AFK:

    -
      -
    • Establece un estado AFK: Puedes establecer un estado AFK para tu bot AFK para que otros jugadores sepan que estás lejos del teclado. Por ejemplo, puedes establecer un estado AFK como "Soy AFK, no me molestes" o "Soy AFK, por favor no me mates". Puede establecer un estado AFK usando comandos como /afk o /away.
    • - -
    • Borrar estado AFK: Puede borrar el estado AFK de su bot AFK cuando vuelva al teclado. Esto permitirá que otros jugadores sepan que está activo y disponible. Puede borrar el estado de AFK mediante comandos como /back o /return.
    • -
    • Ver gráficos: Puede ver gráficos de las estadísticas de su servidor Aternos utilizando su bot AFK. Por ejemplo, puede ver cuántos jugadores se han unido, cuántas horas se han jugado, cuántas veces se ha iniciado el servidor, etc. Puede ver los gráficos utilizando comandos como /chart o /graph.
    • -
    -

    Aquí hay algunos consejos y precauciones para usar su bot AFK de manera efectiva y segura:

    -
      -
    • Ponga el bot AFK en una carcasa de roca: Usted debe poner el bot AFK en una carcasa de roca o una ubicación segura en su servidor Aternos. Esto evitará que el bot AFK sea asesinado, dañado o movido por otros jugadores o turbas. Esto también reducirá el retraso y el consumo de recursos del servidor.
    • -
    • Elija la versión de servidor correcta: Debe elegir la versión de servidor correcta para su servidor Aternos y su bot AFK. Esto asegurará que el bot AFK sea compatible y funcional con su servidor. Por ejemplo, si está ejecutando un servidor spigot 1.17, debe elegir un bot AFK que admita servidores spigot 1.17.
    • -
    • Evite la detección por Aternos: Debe evitar la detección por Aternos cuando use su bot AFK. Esto se debe a que Aternos puede considerar el uso de un bot AFK como hacer trampa o abusar de su servicio, y pueden eliminar su cuenta si detectan su comportamiento. Puede evitar la detección por Aternos cambiando la configuración de su bot AFK, como el intervalo y el contenido de los mensajes de chat o movimientos.
    • -
    -

    Conclusión

    - -

    Si está interesado en descargar e instalar un bot AFK para Aternos, puede consultar algunos de los enlaces a continuación para obtener más información y tutoriales. Esperamos que este artículo haya sido útil e informativo para usted. ¡Feliz juego!

    -

    Preguntas frecuentes

    -

    ¿Cuál es el mejor bot AFK para Aternos?

    -

    La respuesta depende de sus preferencias y necesidades, pero algunos de los bots AFK más populares y confiables para Aternos son ttttdeded/aternos-afkbot, krushna06/afk-bot-for-aternos y AFK Discord Bot. Estos bots AFK son compatibles con cualquier versión y tipo de servidores Aternos, tienen varias características y comandos para mantener su servidor en línea y activo, y son seguros y confiables. Puede compararlos en la tabla anterior o visitar sus páginas de GitHub o servidores de discordia para obtener más información.

    -

    ¿Cuánto tiempo puedo mantener mi servidor Aternos en línea con un bot AFK?

    -

    La respuesta depende de la configuración de su bot AFK y su actividad de servidor, pero generalmente puede mantener su servidor Aternos en línea durante el tiempo que desee con un bot AFK. Mientras el bot AFK se esté ejecutando en Heroku y conectado a su servidor Aternos, enviará comandos o mensajes periódicamente para evitar que su servidor entre en modo de hibernación. Sin embargo, debe tener en cuenta que el uso de un bot AFK puede consumir más recursos y causar más retraso en su servidor, por lo que debe ajustar la configuración de su bot AFK en consecuencia.

    -

    ¿Es legal usar un bot AFK para Aternos?

    - -

    ¿Cómo puedo hacer mi propio bot AFK para Aternos?

    -

    La respuesta depende de tus habilidades de codificación y conocimiento, pero generalmente puedes hacer tu propio bot AFK para Aternos usando herramientas como mineflayer o discord.js y siguiendo tutoriales en línea. Mineflayer es una biblioteca de clientes de Minecraft que te permite crear bots que pueden interactuar con los servidores de Minecraft. Discord.js es una biblioteca de JavaScript que permite crear bots que pueden interactuar con los servidores de Discord. Puede utilizar estas herramientas para crear un bot AFK que pueda conectarse a su servidor Aternos y mantenerlo en línea enviando comandos o mensajes periódicamente. También puede personalizar su bot AFK con diferentes características y comandos de acuerdo a sus preferencias y necesidades.

    -

    ¿Cómo puedo obtener ayuda o soporte para usar un bot AFK para Aternos?

    -

    La respuesta depende de la fuente de su bot AFK, pero generalmente puede obtener ayuda o soporte poniéndose en contacto con el desarrollador del bot AFK, uniéndose a su servidor de discordia o página GitHub, o preguntando a otros usuarios que han utilizado el mismo bot AFK. Por ejemplo, si está utilizando ttttdeded/aternos-afkbot, puede ponerse en contacto con ttttdeded a través de su perfil de GitHub, unirse a su servidor de discordia o preguntar a otros usuarios que hayan bifurcado o protagonizado su repositorio. También puedes buscar en línea guías o tutoriales sobre cómo usar un bot AFK para Aternos.

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/CALM/Dashboard/streamlit_observable/frontend/src/streamlit/index.tsx b/spaces/CALM/Dashboard/streamlit_observable/frontend/src/streamlit/index.tsx deleted file mode 100644 index 290e4a058ad8b7427e80c18a5b22bac2451c9934..0000000000000000000000000000000000000000 --- a/spaces/CALM/Dashboard/streamlit_observable/frontend/src/streamlit/index.tsx +++ /dev/null @@ -1,30 +0,0 @@ -/** - * @license - * Copyright 2018-2020 Streamlit Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Workaround for type-only exports: -// https://stackoverflow.com/questions/53728230/cannot-re-export-a-type-when-using-the-isolatedmodules-with-ts-3-2-2 -import { ComponentProps as ComponentProps_ } from "./StreamlitReact" -import { RenderData as RenderData_ } from "./streamlit" - -export { - StreamlitComponentBase, - withStreamlitConnection, -} from "./StreamlitReact" -export { ArrowTable } from "./ArrowTable" -export { Streamlit } from "./streamlit" -export type ComponentProps = ComponentProps_ -export type RenderData = RenderData_ diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/remove.h b/spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/remove.h deleted file mode 100644 index ca4eab84575814020f7658436ea2f78808678fc2..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/remove.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include -#include - -namespace thrust -{ -namespace system -{ -namespace omp -{ -namespace detail -{ - -template - ForwardIterator remove_if(execution_policy &exec, - ForwardIterator first, - ForwardIterator last, - Predicate pred); - - -template - ForwardIterator remove_if(execution_policy &exec, - ForwardIterator first, - ForwardIterator last, - InputIterator stencil, - Predicate pred); - - -template - OutputIterator remove_copy_if(execution_policy &exec, - InputIterator first, - InputIterator last, - OutputIterator result, - Predicate pred); - - -template - OutputIterator remove_copy_if(execution_policy &exec, - InputIterator1 first, - InputIterator1 last, - InputIterator2 stencil, - OutputIterator result, - Predicate pred); - - -} // end namespace detail -} // end namespace omp -} // end namespace system -} // end namespace thrust - -#include - diff --git a/spaces/CVPR/regionclip-demo/detectron2/modeling/backbone/backbone.py b/spaces/CVPR/regionclip-demo/detectron2/modeling/backbone/backbone.py deleted file mode 100644 index 369fb884930c5dd82f94024c45303dafaab14d66..0000000000000000000000000000000000000000 --- a/spaces/CVPR/regionclip-demo/detectron2/modeling/backbone/backbone.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from abc import ABCMeta, abstractmethod -import torch.nn as nn - -from detectron2.layers import ShapeSpec - -__all__ = ["Backbone"] - - -class Backbone(nn.Module, metaclass=ABCMeta): - """ - Abstract base class for network backbones. - """ - - def __init__(self): - """ - The `__init__` method of any subclass can specify its own set of arguments. - """ - super().__init__() - - @abstractmethod - def forward(self): - """ - Subclasses must override this method, but adhere to the same return type. - - Returns: - dict[str->Tensor]: mapping from feature name (e.g., "res2") to tensor - """ - pass - - @property - def size_divisibility(self) -> int: - """ - Some backbones require the input height and width to be divisible by a - specific integer. This is typically true for encoder / decoder type networks - with lateral connection (e.g., FPN) for which feature maps need to match - dimension in the "bottom up" and "top down" paths. Set to 0 if no specific - input size divisibility is required. - """ - return 0 - - def output_shape(self): - """ - Returns: - dict[str->ShapeSpec] - """ - # this is a backward-compatible default - return { - name: ShapeSpec( - channels=self._out_feature_channels[name], stride=self._out_feature_strides[name] - ) - for name in self._out_features - } diff --git a/spaces/CikeyQI/meme-api/meme_generator/memes/flash_blind/__init__.py b/spaces/CikeyQI/meme-api/meme_generator/memes/flash_blind/__init__.py deleted file mode 100644 index f639e288b10ec7949409809f34e48a7a770a440f..0000000000000000000000000000000000000000 --- a/spaces/CikeyQI/meme-api/meme_generator/memes/flash_blind/__init__.py +++ /dev/null @@ -1,63 +0,0 @@ -from typing import List - -from PIL import ImageOps -from PIL.Image import Image as IMG -from pil_utils import BuildImage - -from meme_generator import add_meme -from meme_generator.exception import TextOverLength -from meme_generator.utils import save_gif - - -def flash_blind(images: List[BuildImage], texts: List[str], args): - img = images[0].convert("RGB").resize_width(500) - frames: List[IMG] = [] - frames.append(img.image) - frames.append(ImageOps.invert(img.image)) - img_enlarge = img.resize_canvas((450, img.height * 450 // 500)).resize( - (500, img.height) - ) - frames.append(img_enlarge.image) - frames.append(ImageOps.invert(img.image)) - - if texts and texts[0]: - text = texts[0] - text_h = 65 - - try: - text_frame_black = BuildImage.new("RGB", (500, text_h), "black") - text_frame_white = BuildImage.new("RGB", (500, text_h), "white") - text_frame_black.draw_text( - (10, 0, 490, text_h), - text, - max_fontsize=50, - min_fontsize=20, - fill="white", - ) - text_frame_white.draw_text( - (10, 0, 490, text_h), - text, - max_fontsize=50, - min_fontsize=20, - fill="black", - ) - except ValueError: - raise TextOverLength(text) - frames[0].paste(text_frame_black.image, (0, img.height - text_h)) - frames[1].paste(text_frame_white.image, (0, img.height - text_h)) - frames[2].paste(text_frame_black.image, (0, img.height - text_h)) - frames[3].paste(text_frame_white.image, (0, img.height - text_h)) - - return save_gif(frames, 0.03) - - -add_meme( - "flash_blind", - flash_blind, - min_images=1, - max_images=1, - min_texts=0, - max_texts=1, - default_texts=["闪瞎你们的狗眼"], - keywords=["闪瞎"], -) diff --git a/spaces/ClearLove443/Robby-chatbot/tuto_chatbot_csv.py b/spaces/ClearLove443/Robby-chatbot/tuto_chatbot_csv.py deleted file mode 100644 index 4a67aa3e4567cc4f610f1247e48e59e4f7c1c428..0000000000000000000000000000000000000000 --- a/spaces/ClearLove443/Robby-chatbot/tuto_chatbot_csv.py +++ /dev/null @@ -1,73 +0,0 @@ -#pip install streamlit langchain openai faiss-cpu tiktoken - -import streamlit as st -from streamlit_chat import message -from langchain.embeddings.openai import OpenAIEmbeddings -from langchain.chat_models import ChatOpenAI -from langchain.chains import ConversationalRetrievalChain -from langchain.document_loaders.csv_loader import CSVLoader -from langchain.vectorstores import FAISS -import tempfile - - -user_api_key = st.sidebar.text_input( - label="#### Your OpenAI API key 👇", - placeholder="Paste your openAI API key, sk-", - type="password") - -uploaded_file = st.sidebar.file_uploader("upload", type="csv") - -if uploaded_file : - with tempfile.NamedTemporaryFile(delete=False) as tmp_file: - tmp_file.write(uploaded_file.getvalue()) - tmp_file_path = tmp_file.name - - loader = CSVLoader(file_path=tmp_file_path, encoding="utf-8") - data = loader.load() - - embeddings = OpenAIEmbeddings() - vectors = FAISS.from_documents(data, embeddings) - - chain = ConversationalRetrievalChain.from_llm(llm = ChatOpenAI(temperature=0.0,model_name='gpt-3.5-turbo', openai_api_key=user_api_key), - retriever=vectors.as_retriever()) - - def conversational_chat(query): - - result = chain({"question": query, "chat_history": st.session_state['history']}) - st.session_state['history'].append((query, result["answer"])) - - return result["answer"] - - if 'history' not in st.session_state: - st.session_state['history'] = [] - - if 'generated' not in st.session_state: - st.session_state['generated'] = ["Hello ! Ask me anything about " + uploaded_file.name + " 🤗"] - - if 'past' not in st.session_state: - st.session_state['past'] = ["Hey ! 👋"] - - #container for the chat history - response_container = st.container() - #container for the user's text input - container = st.container() - - with container: - with st.form(key='my_form', clear_on_submit=True): - - user_input = st.text_input("Query:", placeholder="Talk about your csv data here (:", key='input') - submit_button = st.form_submit_button(label='Send') - - if submit_button and user_input: - output = conversational_chat(user_input) - - st.session_state['past'].append(user_input) - st.session_state['generated'].append(output) - - if st.session_state['generated']: - with response_container: - for i in range(len(st.session_state['generated'])): - message(st.session_state["past"][i], is_user=True, key=str(i) + '_user', avatar_style="big-smile") - message(st.session_state["generated"][i], key=str(i), avatar_style="thumbs") - -#streamlit run tuto_chatbot_csv.py \ No newline at end of file diff --git a/spaces/CofAI/chat.b4/g4f/Provider/Providers/Forefront.py b/spaces/CofAI/chat.b4/g4f/Provider/Providers/Forefront.py deleted file mode 100644 index e7e89831cc4ec6dc37ea094d9828a7582e981ff1..0000000000000000000000000000000000000000 --- a/spaces/CofAI/chat.b4/g4f/Provider/Providers/Forefront.py +++ /dev/null @@ -1,30 +0,0 @@ -import os -import json -import requests -from ...typing import sha256, Dict, get_type_hints - -url = 'https://forefront.com' -model = ['gpt-3.5-turbo'] -supports_stream = True -needs_auth = False - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - json_data = { - 'text': messages[-1]['content'], - 'action': 'noauth', - 'id': '', - 'parentId': '', - 'workspaceId': '', - 'messagePersona': '607e41fe-95be-497e-8e97-010a59b2e2c0', - 'model': 'gpt-4', - 'messages': messages[:-1] if len(messages) > 1 else [], - 'internetMode': 'auto' - } - response = requests.post( 'https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat', - json=json_data, stream=True) - for token in response.iter_lines(): - if b'delta' in token: - token = json.loads(token.decode().split('data: ')[1])['delta'] - yield (token) -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/spaces/CofAI/optor/README.md b/spaces/CofAI/optor/README.md deleted file mode 100644 index 5587fdebe61b6f8b9308a0834e1da0c065e68a75..0000000000000000000000000000000000000000 --- a/spaces/CofAI/optor/README.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: OPTOR with Dalle, Midjourney, Stable Diffusion -emoji: 🐻‍❄️ -colorFrom: pink -colorTo: gray -sdk: static -pinned: true ---- \ No newline at end of file diff --git a/spaces/Cpp4App/Cpp4App/examples/6.html b/spaces/Cpp4App/Cpp4App/examples/6.html deleted file mode 100644 index 7690c36608525ec2753a5c1402a2f28c3f7ce085..0000000000000000000000000000000000000000 --- a/spaces/Cpp4App/Cpp4App/examples/6.html +++ /dev/null @@ -1,28 +0,0 @@ - - -Privacy Policy - - - - -
    - - - - - - - - - - - - - - \ No newline at end of file diff --git a/spaces/Cropinky/hana_hanak_houses/weights/README.md b/spaces/Cropinky/hana_hanak_houses/weights/README.md deleted file mode 100644 index d9a76c4c6b10160fa706e4e1abdde1d67ff5eb68..0000000000000000000000000000000000000000 --- a/spaces/Cropinky/hana_hanak_houses/weights/README.md +++ /dev/null @@ -1 +0,0 @@ -we goign \ No newline at end of file diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/implementations/local.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/implementations/local.py deleted file mode 100644 index 90045d891d6b44249222be7614d63d879d81fec0..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/implementations/local.py +++ /dev/null @@ -1,424 +0,0 @@ -import datetime -import io -import logging -import os -import os.path as osp -import posixpath -import re -import shutil -import stat -import tempfile - -from fsspec import AbstractFileSystem -from fsspec.compression import compr -from fsspec.core import get_compression -from fsspec.utils import isfilelike, stringify_path - -logger = logging.getLogger("fsspec.local") - - -class LocalFileSystem(AbstractFileSystem): - """Interface to files on local storage - - Parameters - ---------- - auto_mkdir: bool - Whether, when opening a file, the directory containing it should - be created (if it doesn't already exist). This is assumed by pyarrow - code. - """ - - root_marker = "/" - protocol = "file" - local_file = True - - def __init__(self, auto_mkdir=False, **kwargs): - super().__init__(**kwargs) - self.auto_mkdir = auto_mkdir - - @property - def fsid(self): - return "local" - - def mkdir(self, path, create_parents=True, **kwargs): - path = self._strip_protocol(path) - if self.exists(path): - raise FileExistsError(path) - if create_parents: - self.makedirs(path, exist_ok=True) - else: - os.mkdir(path, **kwargs) - - def makedirs(self, path, exist_ok=False): - path = self._strip_protocol(path) - os.makedirs(path, exist_ok=exist_ok) - - def rmdir(self, path): - path = self._strip_protocol(path) - os.rmdir(path) - - def ls(self, path, detail=False, **kwargs): - path = self._strip_protocol(path) - if detail: - with os.scandir(path) as it: - return [self.info(f) for f in it] - else: - return [posixpath.join(path, f) for f in os.listdir(path)] - - def glob(self, path, **kwargs): - path = self._strip_protocol(path) - return super().glob(path, **kwargs) - - def info(self, path, **kwargs): - if isinstance(path, os.DirEntry): - # scandir DirEntry - out = path.stat(follow_symlinks=False) - link = path.is_symlink() - if path.is_dir(follow_symlinks=False): - t = "directory" - elif path.is_file(follow_symlinks=False): - t = "file" - else: - t = "other" - path = self._strip_protocol(path.path) - else: - # str or path-like - path = self._strip_protocol(path) - out = os.stat(path, follow_symlinks=False) - link = stat.S_ISLNK(out.st_mode) - if link: - out = os.stat(path, follow_symlinks=True) - if stat.S_ISDIR(out.st_mode): - t = "directory" - elif stat.S_ISREG(out.st_mode): - t = "file" - else: - t = "other" - result = { - "name": path, - "size": out.st_size, - "type": t, - "created": out.st_ctime, - "islink": link, - } - for field in ["mode", "uid", "gid", "mtime", "ino", "nlink"]: - result[field] = getattr(out, "st_" + field) - if result["islink"]: - result["destination"] = os.readlink(path) - try: - out2 = os.stat(path, follow_symlinks=True) - result["size"] = out2.st_size - except OSError: - result["size"] = 0 - return result - - def lexists(self, path, **kwargs): - return osp.lexists(path) - - def cp_file(self, path1, path2, **kwargs): - path1 = self._strip_protocol(path1).rstrip("/") - path2 = self._strip_protocol(path2).rstrip("/") - if self.auto_mkdir: - self.makedirs(self._parent(path2), exist_ok=True) - if self.isfile(path1): - shutil.copyfile(path1, path2) - elif self.isdir(path1): - self.mkdirs(path2, exist_ok=True) - else: - raise FileNotFoundError(path1) - - def get_file(self, path1, path2, callback=None, **kwargs): - if isfilelike(path2): - with open(path1, "rb") as f: - shutil.copyfileobj(f, path2) - else: - return self.cp_file(path1, path2, **kwargs) - - def put_file(self, path1, path2, callback=None, **kwargs): - return self.cp_file(path1, path2, **kwargs) - - def mv_file(self, path1, path2, **kwargs): - path1 = self._strip_protocol(path1).rstrip("/") - path2 = self._strip_protocol(path2).rstrip("/") - shutil.move(path1, path2) - - def link(self, src, dst, **kwargs): - src = self._strip_protocol(src) - dst = self._strip_protocol(dst) - os.link(src, dst, **kwargs) - - def symlink(self, src, dst, **kwargs): - src = self._strip_protocol(src) - dst = self._strip_protocol(dst) - os.symlink(src, dst, **kwargs) - - def islink(self, path) -> bool: - return os.path.islink(self._strip_protocol(path)) - - def rm_file(self, path): - os.remove(self._strip_protocol(path)) - - def rm(self, path, recursive=False, maxdepth=None): - if not isinstance(path, list): - path = [path] - - for p in path: - p = self._strip_protocol(p).rstrip("/") - if self.isdir(p): - if not recursive: - raise ValueError("Cannot delete directory, set recursive=True") - if osp.abspath(p) == os.getcwd(): - raise ValueError("Cannot delete current working directory") - shutil.rmtree(p) - else: - os.remove(p) - - def unstrip_protocol(self, name): - name = self._strip_protocol(name) # normalise for local/win/... - return f"file://{name}" - - def _open(self, path, mode="rb", block_size=None, **kwargs): - path = self._strip_protocol(path) - if self.auto_mkdir and "w" in mode: - self.makedirs(self._parent(path), exist_ok=True) - return LocalFileOpener(path, mode, fs=self, **kwargs) - - def touch(self, path, truncate=True, **kwargs): - path = self._strip_protocol(path) - if self.auto_mkdir: - self.makedirs(self._parent(path), exist_ok=True) - if self.exists(path): - os.utime(path, None) - else: - open(path, "a").close() - if truncate: - os.truncate(path, 0) - - def created(self, path): - info = self.info(path=path) - return datetime.datetime.utcfromtimestamp(info["created"]) - - def modified(self, path): - info = self.info(path=path) - return datetime.datetime.utcfromtimestamp(info["mtime"]) - - @classmethod - def _parent(cls, path): - path = cls._strip_protocol(path).rstrip("/") - if "/" in path: - return path.rsplit("/", 1)[0] - else: - return cls.root_marker - - @classmethod - def _strip_protocol(cls, path): - path = stringify_path(path) - if path.startswith("file://"): - path = path[7:] - elif path.startswith("file:"): - path = path[5:] - return make_path_posix(path).rstrip("/") or cls.root_marker - - def _isfilestore(self): - # Inheriting from DaskFileSystem makes this False (S3, etc. were) - # the original motivation. But we are a posix-like file system. - # See https://github.com/dask/dask/issues/5526 - return True - - def chmod(self, path, mode): - path = stringify_path(path) - return os.chmod(path, mode) - - -def make_path_posix(path, sep=os.sep): - """Make path generic""" - if isinstance(path, (list, set, tuple)): - return type(path)(make_path_posix(p) for p in path) - if "~" in path: - path = osp.expanduser(path) - if sep == "/": - # most common fast case for posix - if path.startswith("/"): - return path - if path.startswith("./"): - path = path[2:] - return os.getcwd() + "/" + path - if ( - (sep not in path and "/" not in path) - or (sep == "/" and not path.startswith("/")) - or (sep == "\\" and ":" not in path and not path.startswith("\\\\")) - ): - # relative path like "path" or "rel\\path" (win) or rel/path" - if os.sep == "\\": - # abspath made some more '\\' separators - return make_path_posix(osp.abspath(path)) - else: - return os.getcwd() + "/" + path - if path.startswith("file://"): - path = path[7:] - if re.match("/[A-Za-z]:", path): - # for windows file URI like "file:///C:/folder/file" - # or "file:///C:\\dir\\file" - path = path[1:].replace("\\", "/").replace("//", "/") - if path.startswith("\\\\"): - # special case for windows UNC/DFS-style paths, do nothing, - # just flip the slashes around (case below does not work!) - return path.replace("\\", "/") - if re.match("[A-Za-z]:", path): - # windows full path like "C:\\local\\path" - return path.lstrip("\\").replace("\\", "/").replace("//", "/") - if path.startswith("\\"): - # windows network path like "\\server\\path" - return "/" + path.lstrip("\\").replace("\\", "/").replace("//", "/") - return path - - -def trailing_sep(path): - """Return True if the path ends with a path separator. - - A forward slash is always considered a path separator, even on Operating - Systems that normally use a backslash. - """ - # TODO: if all incoming paths were posix-compliant then separator would - # always be a forward slash, simplifying this function. - # See https://github.com/fsspec/filesystem_spec/pull/1250 - return path.endswith(os.sep) or (os.altsep is not None and path.endswith(os.altsep)) - - -def trailing_sep_maybe_asterisk(path): - """Return True if the path ends with a path separator and optionally an - asterisk. - - A forward slash is always considered a path separator, even on Operating - Systems that normally use a backslash. - """ - # TODO: if all incoming paths were posix-compliant then separator would - # always be a forward slash, simplifying this function. - # See https://github.com/fsspec/filesystem_spec/pull/1250 - return path.endswith((os.sep, os.sep + "*")) or ( - os.altsep is not None and path.endswith((os.altsep, os.altsep + "*")) - ) - - -class LocalFileOpener(io.IOBase): - def __init__( - self, path, mode, autocommit=True, fs=None, compression=None, **kwargs - ): - logger.debug("open file: %s", path) - self.path = path - self.mode = mode - self.fs = fs - self.f = None - self.autocommit = autocommit - self.compression = get_compression(path, compression) - self.blocksize = io.DEFAULT_BUFFER_SIZE - self._open() - - def _open(self): - if self.f is None or self.f.closed: - if self.autocommit or "w" not in self.mode: - self.f = open(self.path, mode=self.mode) - if self.compression: - compress = compr[self.compression] - self.f = compress(self.f, mode=self.mode) - else: - # TODO: check if path is writable? - i, name = tempfile.mkstemp() - os.close(i) # we want normal open and normal buffered file - self.temp = name - self.f = open(name, mode=self.mode) - if "w" not in self.mode: - self.size = self.f.seek(0, 2) - self.f.seek(0) - self.f.size = self.size - - def _fetch_range(self, start, end): - # probably only used by cached FS - if "r" not in self.mode: - raise ValueError - self._open() - self.f.seek(start) - return self.f.read(end - start) - - def __setstate__(self, state): - self.f = None - loc = state.pop("loc", None) - self.__dict__.update(state) - if "r" in state["mode"]: - self.f = None - self._open() - self.f.seek(loc) - - def __getstate__(self): - d = self.__dict__.copy() - d.pop("f") - if "r" in self.mode: - d["loc"] = self.f.tell() - else: - if not self.f.closed: - raise ValueError("Cannot serialise open write-mode local file") - return d - - def commit(self): - if self.autocommit: - raise RuntimeError("Can only commit if not already set to autocommit") - shutil.move(self.temp, self.path) - - def discard(self): - if self.autocommit: - raise RuntimeError("Cannot discard if set to autocommit") - os.remove(self.temp) - - def readable(self) -> bool: - return True - - def writable(self) -> bool: - return "r" not in self.mode - - def read(self, *args, **kwargs): - return self.f.read(*args, **kwargs) - - def write(self, *args, **kwargs): - return self.f.write(*args, **kwargs) - - def tell(self, *args, **kwargs): - return self.f.tell(*args, **kwargs) - - def seek(self, *args, **kwargs): - return self.f.seek(*args, **kwargs) - - def seekable(self, *args, **kwargs): - return self.f.seekable(*args, **kwargs) - - def readline(self, *args, **kwargs): - return self.f.readline(*args, **kwargs) - - def readlines(self, *args, **kwargs): - return self.f.readlines(*args, **kwargs) - - def close(self): - return self.f.close() - - @property - def closed(self): - return self.f.closed - - def fileno(self): - return self.raw.fileno() - - def flush(self) -> None: - self.f.flush() - - def __iter__(self): - return self.f.__iter__() - - def __getattr__(self, item): - return getattr(self.f, item) - - def __enter__(self): - self._incontext = True - return self - - def __exit__(self, exc_type, exc_value, traceback): - self._incontext = False - self.f.__exit__(exc_type, exc_value, traceback) diff --git a/spaces/Dagfinn1962/stablediffusion-members/index.html b/spaces/Dagfinn1962/stablediffusion-members/index.html deleted file mode 100644 index 40b11abfac0f6f7c145d1d349a978f07587cf433..0000000000000000000000000000000000000000 --- a/spaces/Dagfinn1962/stablediffusion-members/index.html +++ /dev/null @@ -1,305 +0,0 @@ -import gradio as gr -import os -import sys -from pathlib import Path - -models = [ - {"name": "Deliberate", "url": "Masagin/Deliberate"}, - {"name": "Dreamlike Anime", "url": "dreamlike-art/dreamlike-anime-1.0"}, - {"name": "Dreamlike Diffusion", "url": "dreamlike-art/dreamlike-diffusion-1.0"}, - {"name": "Dreamlike Photoreal", "url": "dreamlike-art/dreamlike-photoreal-2.0"}, - {"name": "Dreamshaper", "url": "Lykon/DreamShaper"}, - {"name": "Lyriel 1.3", "url": "sakistriker/Lyriel_V1.3"}, - {"name": "Never Ending Dream 2", "url": "luongphamit/NeverEnding-Dream2"}, - {"name": "Protogen X 5.8", "url": "darkstorm2150/Protogen_x5.8_Official_Release"}, - {"name": "❤ ART MODELS ==========", "url": "dreamlike-art/dreamlike-diffusion-1.0"}, - {"name": "Alice in Diffusion Land", "url": "Guizmus/SDArt_AliceInDiffusionLand"}, - {"name": "Alt Clip", "url": "BAAI/AltCLIP"}, - {"name": "Anything Midjourney 4.1", "url": "Joeythemonster/anything-midjourney-v-4-1"}, - {"name": "Chaos and Order", "url": "Guizmus/SDArt_ChaosAndOrder768"}, - {"name": "Chilloutclara", "url": "Fred99774/chilloutvlara"}, - {"name": "Comic Diffusion", "url": "ogkalu/Comic-Diffusion"}, - {"name": "Cosmic Horros 768", "url": "Guizmus/SDArt_cosmichorrors768"}, - {"name": "Cosmic Horros", "url": "Guizmus/SDArt_cosmichorrors"}, - {"name": "DGSpitzer", "url": "DGSpitzer/DGSpitzer-Art-Diffusion"}, - {"name": "Dungeons and Diffusion", "url": "0xJustin/Dungeons-and-Diffusion"}, - {"name": "Elden Ring", "url": "nitrosocke/elden-ring-diffusion"}, - {"name": "Epic Diffusion 1.1", "url": "johnslegers/epic-diffusion-v1.1"}, - {"name": "Epic Diffusion", "url": "johnslegers/epic-diffusion"}, - {"name": "EpicMix Realism", "url": "Duskfallcrew/EpicMix_Realism"}, - {"name": "Fantasy Mix", "url": "theintuitiveye/FantasyMix"}, - {"name": "Girl New 1", "url": "Fred99774/girlnew1"}, - {"name": "Lit 6B", "url": "hakurei/lit-6B"}, - {"name": "Luna Diffusion", "url": "proximasanfinetuning/luna-diffusion"}, - {"name": "Midjourney 4.0", "url": "flax/midjourney-v4-diffusion"}, - {"name": "Midjourney 4.1", "url": "Joeythemonster/anything-midjourney-v-4-1"}, - {"name": "Mo-Di Diffusion", "url": "nitrosocke/mo-di-diffusion"}, - {"name": "Nitro Diffusion", "url": "nitrosocke/Nitro-Diffusion"}, - {"name": "Openjourney V2", "url": "prompthero/openjourney-v2"}, - {"name": "Openjourney", "url": "prompthero/openjourney"}, - {"name": "Seek Art Mega", "url": "coreco/seek.art_MEGA"}, - {"name": "Something", "url": "Guizmus/SDArt_something"}, - {"name": "Spider Verse diffusion", "url": "nitrosocke/spider-verse-diffusion"}, - {"name": "Vintedois 1.0", "url": "22h/vintedois-diffusion-v0-1"}, - {"name": "Vintedois 2.0", "url": "22h/vintedois-diffusion-v0-2"}, - {"name": "❤ ART STYLES ==========", "url": "joachimsallstrom/Double-Exposure-Diffusion"}, - {"name": "Balloon Art", "url": "Fictiverse/Stable_Diffusion_BalloonArt_Model"}, - {"name": "Double Exposure Diffusion", "url": "joachimsallstrom/Double-Exposure-Diffusion"}, - {"name": "Fluid Art", "url": "Fictiverse/Stable_Diffusion_FluidArt_Model"}, - {"name": "GTA5 Artwork Diffusion", "url": "ItsJayQz/GTA5_Artwork_Diffusion"}, - {"name": "Marvel WhatIf Diffusion", "url": "ItsJayQz/Marvel_WhatIf_Diffusion"}, - {"name": "Naruto Diffuser", "url": "lambdalabs/sd-naruto-diffusers"}, - {"name": "Papercut", "url": "Fictiverse/Stable_Diffusion_PaperCut_Model"}, - {"name": "Pokemon Diffuser", "url": "lambdalabs/sd-pokemon-diffusers"}, - {"name": "Synthwave Punk 2", "url": "ItsJayQz/SynthwavePunk-v2"}, - {"name": "Valorant Diffusion", "url": "ItsJayQz/Valorant_Diffusion"}, - {"name": "Van Gogh Diffusion", "url": "dallinmackay/Van-Gogh-diffusion"}, - {"name": "Vectorartz Diffusion", "url": "coder119/Vectorartz_Diffusion"}, - {"name": "VoxelArt", "url": "Fictiverse/Stable_Diffusion_VoxelArt_Model"}, - {"name": "❤ ANIME MODELS ==========", "url": "dreamlike-art/dreamlike-anime-1.0"}, - {"name": "7 Pa", "url": "AIARTCHAN/7pa"}, - {"name": "A Certain Model", "url": "JosephusCheung/ACertainModel"}, - {"name": "A Certain Thing", "url": "JosephusCheung/ACertainThing"}, - {"name": "A Certainity", "url": "JosephusCheung/ACertainty"}, - {"name": "Abyss Hell Hero", "url": "AIARTCHAN/AbyssHellHero"}, - {"name": "Abyss Maple 3", "url": "AIARTCHAN/AbyssMapleVer3"}, - {"name": "Abyss Orange Mix 2", "url": "WarriorMama777/AbyssOrangeMix2"}, - {"name": "Abyss Orange Mix 4", "url": "sakistriker/AbyssOrangeMix3"}, - {"name": "Abyss Orange Mix", "url": "WarriorMama777/AbyssOrangeMix"}, - {"name": "AbyssHell 3", "url": "AIARTCHAN/AbyssHellVer3"}, - {"name": "All 526 Animated", "url": "stablediffusionapi/all-526-animated"}, - {"name": "Anidosmix 3", "url": "AIARTCHAN/anidosmixV2"}, - {"name": "Anime Kawai Diffusion", "url": "Ojimi/anime-kawai-diffusion"}, - {"name": "Anireal 3D V2", "url": "circulus/sd-anireal-3d-v2"}, - {"name": "AnyLORA", "url": "kubanemil/AnyLORA"}, - {"name": "Anything 2.1", "url": "swl-models/anything-v2.1"}, - {"name": "Anything 3.0 Light", "url": "mm00/anything-v3.0-light"}, - {"name": "Anything 3.0", "url": "Linaqruf/anything-v3.0"}, - {"name": "Anything 3.1", "url": "cag/anything-v3-1"}, - {"name": "Anything 3X", "url": "iZELX1/Anything-V3-X"}, - {"name": "Anything 4.0", "url": "andite/anything-v4.0"}, - {"name": "Anything 5", "url": "sakistriker/Anything_V5_PrtRE"}, - {"name": "Anything 5.0", "url": "stablediffusionapi/anything-v5"}, - {"name": "Anything Else 4", "url": "stablediffusionapi/anythingelse-v4"}, - {"name": "Anything Else 5", "url": "stablediffusionapi/anything-v5"}, - {"name": "Arcane Diffusion", "url": "nitrosocke/Arcane-Diffusion"}, - {"name": "Archer Diffusion", "url": "nitrosocke/archer-diffusion"}, - {"name": "Asian Mix", "url": "D1b4l4p/AsianMix"}, - {"name": "Blood Orange Mix", "url": "WarriorMama777/BloodOrangeMix"}, - {"name": "CamelliaMix 2.5D","url": "stablediffusionapi/camelliamix25d"}, - {"name": "CamelliaMix Line","url": "stablediffusionapi/camelliamixline"}, - {"name": "CamelliaMix","url": "Powidl43/CamelliaMix"}, - {"name": "Cetusmix", "url": "stablediffusionapi/cetusmix"}, - {"name": "Chik Mix", "url": "stablediffusionapi/chikmix"}, - {"name": "Chikmix", "url": "stablediffusionapi/chikmix"}, - {"name": "Chillout App Factory","url": "stablediffusionapi/chillout-app-factory"}, - {"name": "Classic Anime", "url": "nitrosocke/classic-anim-diffusion"}, - {"name": "Cool Japan Diffusion 2.1.2", "url": "aipicasso/cool-japan-diffusion-2-1-2"}, - {"name": "Cosmic Babes", "url": "stablediffusionapi/cosmic-babes"}, - {"name": "Counterfeit 1.0", "url": "gsdf/counterfeit-v1.0"}, - {"name": "Counterfeit 2", "url": "gsdf/Counterfeit-V2.0"}, - {"name": "Counterfeit 2.0", "url": "gsdf/Counterfeit-V2.0"}, - {"name": "Counterfeit 3.0", "url": "stablediffusionapi/counterfeit-v30"}, - {"name": "CuteSexyRobutts", "url": "andite/cutesexyrobutts-diffusion"}, - {"name": "CyberPunk Anime", "url": "DGSpitzer/Cyberpunk-Anime-Diffusion"}, - {"name": "Dark Sushi Mix", "url": "stablediffusionapi/dark-sushi-mix"}, - {"name": "Dash Sushi 25d", "url": "stablediffusionapi/dark-sushi-25d"}, - {"name": "DucHaiten Anime", "url": "DucHaiten/DucHaitenAnime"}, - {"name": "Eerie Orange Mix", "url": "WarriorMama777/EerieOrangeMix"}, - {"name": "Eimis Anime Diffusion", "url": "eimiss/EimisAnimeDiffusion_1.0v"}, - {"name": "Ghibli Diffusion", "url": "nitrosocke/Ghibli-Diffusion"}, - {"name": "GrapeFruit", "url": "iZELX1/Grapefruit"}, - {"name": "GuoFeng 3", "url": "xiaolxl/GuoFeng3"}, - {"name": "Guweiz Diffusion", "url": "andite/guweiz-diffusion"}, - {"name": "Hiten Diffusion", "url": "andite/hiten-diffusion"}, - {"name": "Icomix 2", "url": "stablediffusionapi/icomix-2"}, - {"name": "InkPunk Diffusion", "url": "Envvi/Inkpunk-Diffusion"}, - {"name": "Mama Orange Mixs", "url": "WarriorMama777/OrangeMixs"}, - {"name": "Mashuu Diffusion", "url": "andite/mashuu-diffusion"}, - {"name": "Meainamis 8", "url": "sakistriker/MeinaMix_V8"}, - {"name": "Meina Alter", "url": "stablediffusionapi/meinaalter"}, - {"name": "Meina Pastel", "url": "stablediffusionapi/meinapastel"}, - {"name": "MeinaMix 7", "url": "Nacholmo/meinamixv7-diffusers"}, - {"name": "Mignon Diffusion", "url": "andite/mignon-diffusion"}, - {"name": "MikaPikazo Diffusion", "url": "andite/mikapikazo-diffusion"}, - {"name": "Mikapikazo", "url": "andite/mikapikazo-diffusion"}, - {"name": "Mix Pro V4", "url": "AIARTCHAN/MIX-Pro-V4"}, - {"name": "NeverEnding-Dream", "url": "Lykon/NeverEnding-Dream"}, - {"name": "Niji V5 Style 1", "url": "sakistriker/NijiV5style_V1"}, - {"name": "Openjourney 4", "url": "prompthero/openjourney-v4"}, - {"name": "OpenNiji", "url": "Korakoe/OpenNiji"}, - {"name": "Pastel Mix", "url": "andite/pastel-mix"}, - {"name": "Picasso Diffusion 1.1", "url": "aipicasso/picasso-diffusion-1-1"}, - {"name": "Piromizu Diffusion", "url": "andite/piromizu-diffusion"}, - {"name": "Protogen 2.2", "url": "darkstorm2150/Protogen_v2.2_Official_Release"}, - {"name": "Protogen Infinity", "url": "darkstorm2150/Protogen_Infinity_Official_Release"}, - {"name": "Protogen X 3.4", "url": "darkstorm2150/Protogen_x3.4_Official_Release"}, - {"name": "Rev Anim", "url": "stablediffusionapi/rev-anim"}, - {"name": "Rev Animated", "url": "coreml/coreml-ReV-Animated"}, - {"name": "Rev Animated", "url": "LottePeisch/RevAnimated-Diffusers"}, - {"name": "Something V 2.2","url": "NoCrypt/SomethingV2_2"}, - {"name": "Something V2","url": "NoCrypt/SomethingV2"}, - {"name": "Three Delicacy", "url": "stablediffusionapi/three-delicacy"}, - {"name": "Three Delicacy wonto", "url": "stablediffusionapi/three-delicacy-wonto"}, - {"name": "TMND mix", "url": "stablediffusionapi/tmnd-mix"}, - {"name": "Waifu Diffusion", "url": "hakurei/waifu-diffusion"}, - {"name": "❤ REALISTIC PHOTO MODELS ==========", "url": "dreamlike-art/dreamlike-photoreal-2.0"}, - {"name": "AmiIReal", "url": "stablediffusionapi/amireal"}, - {"name": "Analog Diffusion", "url": "wavymulder/Analog-Diffusion"}, - {"name": "Circulus 2.8", "url": "circulus/sd-photoreal-v2.8"}, - {"name": "Circulus Photoreal V2", "url": "circulus/sd-photoreal-real-v2"}, - {"name": "Claudfuen 1", "url": "claudfuen/photorealistic-fuen-v1"}, - {"name": "Collage Diffusion", "url": "wavymulder/collage-diffusion"}, - {"name": "Cyberrealistic", "url": "stablediffusionapi/cyberrealistic"}, - {"name": "Dreamful 2", "url": "Hius/DreamFul-V2"}, - {"name": "GakkiMix768", "url": "Sa1i/gakki-mix-768"}, - {"name": "Grimoeresigils", "url": "ECarbenia/grimoiresigils"}, - {"name": "HARDBlend", "url": "theintuitiveye/HARDblend"}, - {"name": "HassanBlend 1.4", "url": "hassanblend/hassanblend1.4"}, - {"name": "HassanBlend 1.5.1.2", "url": "hassanblend/HassanBlend1.5.1.2"}, - {"name": "Lomo Diffusion", "url": "wavymulder/lomo-diffusion"}, - {"name": "Model Shoot", "url": "wavymulder/modelshoot"}, - {"name": "Portrait Plus", "url": "wavymulder/portraitplus"}, - {"name": "QuinceMix", "url": "Hemlok/QuinceMix"}, - {"name": "Realistic Vision 1.4", "url": "SG161222/Realistic_Vision_V1.4"}, - {"name": "The Ally", "url": "stablediffusionapi/the-ally"}, - {"name": "Timeless Diffusion", "url": "wavymulder/timeless-diffusion"}, - {"name": "UltraSkin", "url": "VegaKH/Ultraskin"}, - {"name": "Wavyfusion", "url": "wavymulder/wavyfusion"}, - {"name": "❤ SEMI-REALISTIC MODELS ==========", "url": "stablediffusionapi/all-526"}, - {"name": "All 526", "url": "stablediffusionapi/all-526"}, - {"name": "All 526 animated", "url": "stablediffusionapi/all-526-animated"}, - {"name": "Circulus Semi Real 2", "url": "circulus/sd-photoreal-semi-v2"}, - {"name": "Semi Real Mix", "url": "robotjung/SemiRealMix"}, - {"name": "SpyBG", "url": "stablediffusionapi/spybg"}, - {"name": "❤ STABLE DIFFUSION MODELS ==========", "url": "stabilityai/stable-diffusion-2-1"}, - {"name": "Stable Diffusion 1.4","url": "CompVis/stable-diffusion-v1-4"}, - {"name": "Stable Diffusion 1.5","url": "runwayml/stable-diffusion-v1-5"}, - {"name": "Stable Diffusion 2.1","url": "stabilityai/stable-diffusion-2-1"}, - {"name": "Stable Diffusion 2.1 Base","url": "stabilityai/stable-diffusion-2-1-base"}, - {"name": "Stable Diffusion 2.1 Unclip","url": "stabilityai/stable-diffusion-2-1-unclip"}, - {"name": "❤ SCI FI MODELS ==========", "url": "nitrosocke/Future-Diffusion"}, - {"name": "Future Diffusion", "url": "nitrosocke/Future-Diffusion"}, - {"name": "JWST Deep Space Diffusion", "url": "dallinmackay/JWST-Deep-Space-diffusion"}, - {"name": "Robo Diffusion 3 Base", "url": "nousr/robo-diffusion-2-base"}, - {"name": "Robo Diffusion", "url": "nousr/robo-diffusion"}, - {"name": "Tron Legacy Diffusion", "url": "dallinmackay/Tron-Legacy-diffusion"}, - {"name": "❤ 3D ART MODELS ==========", "url": "DucHaiten/DucHaitenAIart"}, - {"name": "DucHaiten Art", "url": "DucHaiten/DucHaitenAIart"}, - {"name": "DucHaiten ClassicAnime", "url": "DucHaiten/DH_ClassicAnime"}, - {"name": "DucHaiten DreamWorld", "url": "DucHaiten/DucHaitenDreamWorld"}, - {"name": "DucHaiten Journey", "url": "DucHaiten/DucHaitenJourney"}, - {"name": "DucHaiten StyleLikeMe", "url": "DucHaiten/DucHaiten-StyleLikeMe"}, - {"name": "DucHaiten SuperCute", "url": "DucHaiten/DucHaitenSuperCute"}, - {"name": "Redshift Diffusion 768", "url": "nitrosocke/redshift-diffusion-768"}, - {"name": "Redshift Diffusion", "url": "nitrosocke/redshift-diffusion"}, -] - -current_model = models[0] - -text_gen = gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link") - -models2 = [] -for model in models: - model_url = f"models/{model['url']}" - loaded_model = gr.Interface.load(model_url, live=True, preprocess=True) - models2.append(loaded_model) - - -def text_it(inputs, text_gen=text_gen): - return text_gen(inputs) - - -def set_model(current_model_index): - global current_model - current_model = models[current_model_index] - return gr.update(label=f"{current_model['name']}") - - -def send_it(inputs, model_choice): - proc = models2[model_choice] - return proc(inputs) - - -css = """""" - -with gr.Blocks(css=css) as myface: - gr.HTML( - """ - - - - - - - - - - - - - - - -""" - ) - - with gr.Row(): - with gr.Row(): - input_text = gr.Textbox(label="Prompt idea", lines=1) - # Model selection dropdown - model_name1 = gr.Dropdown( - label="Choose Model", - choices=[m["name"] for m in models], - type="index", - value=current_model["name"], - interactive=True, - ) - with gr.Row(): - see_prompts = gr.Button("Generate Prompts") - run = gr.Button("Generate Images", variant="primary") - with gr.Tab("Main"): - with gr.Row(): - output1 = gr.Image(label=f"{current_model['name']}") - output2 = gr.Image(label=f"{current_model['name']}") - output3 = gr.Image(label=f"{current_model['name']}") - output4 = gr.Image(label=f"{current_model['name']}") - with gr.Row(): - magic1 = gr.Textbox(lines=4) - magic2 = gr.Textbox(lines=4) - magic3 = gr.Textbox(lines=4) - magic4 = gr.Textbox(lines=4) - - with gr.Row(): - output5 = gr.Image(label=f"{current_model['name']}") - output6 = gr.Image(label=f"{current_model['name']}") - output7 = gr.Image(label=f"{current_model['name']}") - output8 = gr.Image(label=f"{current_model['name']}") - with gr.Row(): - magic5 = gr.Textbox(lines=4) - magic6 = gr.Textbox(lines=4) - magic7 = gr.Textbox(lines=4) - magic8 = gr.Textbox(lines=4) - - model_name1.change(set_model, inputs=model_name1, outputs=[output1, output2, output3, output4, output5, output6, output7, output8]) - - run.click(send_it, inputs=[magic1, model_name1], outputs=[output1]) - run.click(send_it, inputs=[magic2, model_name1], outputs=[output2]) - run.click(send_it, inputs=[magic3, model_name1], outputs=[output3]) - run.click(send_it, inputs=[magic4, model_name1], outputs=[output4]) - run.click(send_it, inputs=[magic5, model_name1], outputs=[output5]) - run.click(send_it, inputs=[magic6, model_name1], outputs=[output6]) - run.click(send_it, inputs=[magic7, model_name1], outputs=[output7]) - run.click(send_it, inputs=[magic8, model_name1], outputs=[output8]) - - see_prompts.click(text_it, inputs=[input_text], outputs=[magic1]) - see_prompts.click(text_it, inputs=[input_text], outputs=[magic2]) - see_prompts.click(text_it, inputs=[input_text], outputs=[magic3]) - see_prompts.click(text_it, inputs=[input_text], outputs=[magic4]) - see_prompts.click(text_it, inputs=[input_text], outputs=[magic5]) - see_prompts.click(text_it, inputs=[input_text], outputs=[magic6]) - see_prompts.click(text_it, inputs=[input_text], outputs=[magic7]) - see_prompts.click(text_it, inputs=[input_text], outputs=[magic8]) - -myface.queue(concurrency_count=200) -myface.launch(inline=True, show_api=False, max_threads=400) \ No newline at end of file diff --git a/spaces/Dorado607/ChuanhuChatGPT/readme/README_en.md b/spaces/Dorado607/ChuanhuChatGPT/readme/README_en.md deleted file mode 100644 index f6441a7886c4043b01e04f0b488a9409f8b6803d..0000000000000000000000000000000000000000 --- a/spaces/Dorado607/ChuanhuChatGPT/readme/README_en.md +++ /dev/null @@ -1,140 +0,0 @@ -
    - - 简体中文 | English | 日本語 -
    - -

    川虎 Chat 🐯 Chuanhu Chat

    -
    - - Logo - - -

    -

    Lightweight and User-friendly Web-UI for LLMs including ChatGPT/ChatGLM/LLaMA

    -

    - - Tests Passing - - - GitHub Contributors - - - GitHub pull requests - -

    - Streaming / Unlimited conversations / Save history / Preset prompts / Chat with files / Web search
    - LaTeX rendering / Table rendering / Code highlighting
    - Auto dark mode / Adaptive web interface / WeChat-like theme
    - Multi-parameters tuning / Multi-API-Key support / Multi-user support
    - Compatible with GPT-4 / Local deployment for LLMs -

    - Video Tutorial - · - 2.0 Introduction - · - 3.0 Introduction & Tutorial - || - Online trial - · - One-Click deployment -

    -

    - Animation Demo -

    -

    -
    - -## Supported LLM Models - -**LLM models via API**: - -- [ChatGPT](https://chat.openai.com) ([GPT-4](https://openai.com/product/gpt-4)) -- [Google PaLM](https://developers.generativeai.google/products/palm) -- [Inspur Yuan 1.0](https://air.inspur.com/home) -- [MiniMax](https://api.minimax.chat/) -- [XMChat](https://github.com/MILVLG/xmchat) - -**LLM models via local deployment**: - -- [ChatGLM](https://github.com/THUDM/ChatGLM-6B) ([ChatGLM2](https://github.com/THUDM/ChatGLM2-6B)) -- [LLaMA](https://github.com/facebookresearch/llama) -- [StableLM](https://github.com/Stability-AI/StableLM) -- [MOSS](https://github.com/OpenLMLab/MOSS) - -## Usage Tips - -- To better control the ChatGPT, use System Prompt. -- To use a Prompt Template, select the Prompt Template Collection file first, and then choose certain prompt from the drop-down menu. -- To try again if the response is unsatisfactory, use `🔄 Regenerate` button. -- To start a new line in the input box, press Shift + Enter keys. -- To quickly switch between input history, press and key in the input box. -- To deploy the program onto a server, set `"server_name": "0.0.0.0", "server_port" ,` in `config.json`. -- To get a public shared link, set `"share": true,` in `config.json`. Please be noted that the program must be running in order to be accessed via a public link. -- To use it in Hugging Face Spaces: It is recommended to **Duplicate Space** and run the program in your own Space for a faster and more secure experience. - -## Quickstart - -```shell -git clone https://github.com/GaiZhenbiao/ChuanhuChatGPT.git -cd ChuanhuChatGPT -pip install -r requirements.txt -``` - -Then make a copy of `config_example.json`, rename it to `config.json`, and then fill in your API-Key and other settings in the file. - -```shell -python ChuanhuChatbot.py -``` - -A browser window will open and you will be able to chat with ChatGPT. - -> **Note** -> -> Please check our [wiki page](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程) for detailed instructions. - -## Troubleshooting - -When you encounter problems, you should try manually pulling the latest changes of this project first. The steps are as follows: - -1. Download the latest code archive by clicking on `Download ZIP` on the webpage, or - ```shell - git pull https://github.com/GaiZhenbiao/ChuanhuChatGPT.git main -f - ``` -2. Try installing the dependencies again (as this project may have introduced new dependencies) - ``` - pip install -r requirements.txt - ``` - -Generally, you can solve most problems by following these steps. - -If the problem still exists, please refer to this page: [Frequently Asked Questions (FAQ)](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/常见问题) - -This page lists almost all the possible problems and solutions. Please read it carefully. - -## More Information - -More information could be found in our [wiki](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki): - -- [How to contribute a translation](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/Localization) -- [How to make a contribution](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/贡献指南) -- [How to cite the project](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用许可#如何引用该项目) -- [Project changelog](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/更新日志) -- [Project license](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用许可) - -## Starchart - -[![Star History Chart](https://api.star-history.com/svg?repos=GaiZhenbiao/ChuanhuChatGPT&type=Date)](https://star-history.com/#GaiZhenbiao/ChuanhuChatGPT&Date) - -## Contributors - - - - - -## Sponsor - -🐯 If you find this project helpful, feel free to buy me a coke or a cup of coffee~ - -Buy Me A Coffee - -image diff --git a/spaces/DragGan/DragGan-Inversion/PTI/evaluation/qualitative_edit_comparison.py b/spaces/DragGan/DragGan-Inversion/PTI/evaluation/qualitative_edit_comparison.py deleted file mode 100644 index 39ed13264a9df5a257746f02b070c54934eb3117..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan-Inversion/PTI/evaluation/qualitative_edit_comparison.py +++ /dev/null @@ -1,156 +0,0 @@ -import os -from random import choice -from string import ascii_uppercase -from PIL import Image -from tqdm import tqdm -from scripts.latent_editor_wrapper import LatentEditorWrapper -from evaluation.experiment_setting_creator import ExperimentRunner -import torch -from configs import paths_config, hyperparameters, evaluation_config -from utils.log_utils import save_concat_image, save_single_image -from utils.models_utils import load_tuned_G - - -class EditComparison: - - def __init__(self, save_single_images, save_concatenated_images, run_id): - - self.run_id = run_id - self.experiment_creator = ExperimentRunner(run_id) - self.save_single_images = save_single_images - self.save_concatenated_images = save_concatenated_images - self.latent_editor = LatentEditorWrapper() - - def save_reconstruction_images(self, image_latents, new_inv_image_latent, new_G, target_image): - if self.save_concatenated_images: - save_concat_image(self.concat_base_dir, image_latents, new_inv_image_latent, new_G, - self.experiment_creator.old_G, - 'rec', - target_image) - - if self.save_single_images: - save_single_image(self.single_base_dir, new_inv_image_latent, new_G, 'rec') - target_image.save(f'{self.single_base_dir}/Original.jpg') - - def create_output_dirs(self, full_image_name): - output_base_dir_path = f'{paths_config.experiments_output_dir}/{paths_config.input_data_id}/{self.run_id}/{full_image_name}' - os.makedirs(output_base_dir_path, exist_ok=True) - - self.concat_base_dir = f'{output_base_dir_path}/concat_images' - self.single_base_dir = f'{output_base_dir_path}/single_images' - - os.makedirs(self.concat_base_dir, exist_ok=True) - os.makedirs(self.single_base_dir, exist_ok=True) - - def get_image_latent_codes(self, image_name): - image_latents = [] - for method in evaluation_config.evaluated_methods: - if method == 'SG2': - image_latents.append(torch.load( - f'{paths_config.embedding_base_dir}/{paths_config.input_data_id}/' - f'{paths_config.pti_results_keyword}/{image_name}/0.pt')) - else: - image_latents.append(torch.load( - f'{paths_config.embedding_base_dir}/{paths_config.input_data_id}/{method}/{image_name}/0.pt')) - new_inv_image_latent = torch.load( - f'{paths_config.embedding_base_dir}/{paths_config.input_data_id}/{paths_config.pti_results_keyword}/{image_name}/0.pt') - - return image_latents, new_inv_image_latent - - def save_interfacegan_edits(self, image_latents, new_inv_image_latent, interfacegan_factors, new_G, target_image): - new_w_inv_edits = self.latent_editor.get_single_interface_gan_edits(new_inv_image_latent, - interfacegan_factors) - - inv_edits = [] - for latent in image_latents: - inv_edits.append(self.latent_editor.get_single_interface_gan_edits(latent, interfacegan_factors)) - - for direction, edits in new_w_inv_edits.items(): - for factor, edit_tensor in edits.items(): - if self.save_concatenated_images: - save_concat_image(self.concat_base_dir, [edits[direction][factor] for edits in inv_edits], - new_w_inv_edits[direction][factor], - new_G, - self.experiment_creator.old_G, - f'{direction}_{factor}', target_image) - if self.save_single_images: - save_single_image(self.single_base_dir, new_w_inv_edits[direction][factor], new_G, - f'{direction}_{factor}') - - def save_ganspace_edits(self, image_latents, new_inv_image_latent, factors, new_G, target_image): - new_w_inv_edits = self.latent_editor.get_single_ganspace_edits(new_inv_image_latent, factors) - inv_edits = [] - for latent in image_latents: - inv_edits.append(self.latent_editor.get_single_ganspace_edits(latent, factors)) - - for idx in range(len(new_w_inv_edits)): - if self.save_concatenated_images: - save_concat_image(self.concat_base_dir, [edit[idx] for edit in inv_edits], new_w_inv_edits[idx], - new_G, - self.experiment_creator.old_G, - f'ganspace_{idx}', target_image) - if self.save_single_images: - save_single_image(self.single_base_dir, new_w_inv_edits[idx], new_G, - f'ganspace_{idx}') - - def run_experiment(self, run_pt, create_other_latents, use_multi_id_training, use_wandb=False): - images_counter = 0 - new_G = None - interfacegan_factors = [val / 2 for val in range(-6, 7) if val != 0] - ganspace_factors = range(-20, 25, 5) - self.experiment_creator.run_experiment(run_pt, create_other_latents, use_multi_id_training, use_wandb) - - if use_multi_id_training: - new_G = load_tuned_G(self.run_id, paths_config.multi_id_model_type) - - for idx, image_path in tqdm(enumerate(self.experiment_creator.images_paths), - total=len(self.experiment_creator.images_paths)): - - if images_counter >= hyperparameters.max_images_to_invert: - break - - image_name = image_path.split('.')[0].split('/')[-1] - target_image = Image.open(self.experiment_creator.target_paths[idx]) - - if not use_multi_id_training: - new_G = load_tuned_G(self.run_id, image_name) - - image_latents, new_inv_image_latent = self.get_image_latent_codes(image_name) - - self.create_output_dirs(image_name) - - self.save_reconstruction_images(image_latents, new_inv_image_latent, new_G, target_image) - - self.save_interfacegan_edits(image_latents, new_inv_image_latent, interfacegan_factors, new_G, target_image) - - self.save_ganspace_edits(image_latents, new_inv_image_latent, ganspace_factors, new_G, target_image) - - target_image.close() - torch.cuda.empty_cache() - images_counter += 1 - - -def run_pti_and_full_edit(iid): - evaluation_config.evaluated_methods = ['SG2Plus', 'e4e', 'SG2'] - edit_figure_creator = EditComparison(save_single_images=True, save_concatenated_images=True, - run_id=f'{paths_config.input_data_id}_pti_full_edit_{iid}') - edit_figure_creator.run_experiment(True, True, use_multi_id_training=False, use_wandb=False) - - -def pti_no_comparison(iid): - evaluation_config.evaluated_methods = [] - edit_figure_creator = EditComparison(save_single_images=True, save_concatenated_images=True, - run_id=f'{paths_config.input_data_id}_pti_no_comparison_{iid}') - edit_figure_creator.run_experiment(True, False, use_multi_id_training=False, use_wandb=False) - - -def edits_for_existed_experiment(run_id): - evaluation_config.evaluated_methods = ['SG2Plus', 'e4e', 'SG2'] - edit_figure_creator = EditComparison(save_single_images=True, save_concatenated_images=True, - run_id=run_id) - edit_figure_creator.run_experiment(False, True, use_multi_id_training=False, use_wandb=False) - - -if __name__ == '__main__': - iid = ''.join(choice(ascii_uppercase) for i in range(7)) - pti_no_comparison(iid) diff --git a/spaces/Dusan/clickbaitonator/app.py b/spaces/Dusan/clickbaitonator/app.py deleted file mode 100644 index 09dc263468be832bf78ef7f0d0df54510ebd696b..0000000000000000000000000000000000000000 --- a/spaces/Dusan/clickbaitonator/app.py +++ /dev/null @@ -1,123 +0,0 @@ -import gradio as gr -from fudge.predict_clickbait import generate_clickbait, tokenizer, classifier_tokenizer -from datasets import load_dataset,DatasetDict,Dataset -# from datasets import -from transformers import AutoTokenizer,AutoModelForSeq2SeqLM -import numpy as np -from sklearn.model_selection import train_test_split -import pandas as pd -from sklearn.utils.class_weight import compute_class_weight -import torch -import pandas as pd -from fudge.model import Model -import os -from argparse import ArgumentParser -from collections import namedtuple -import mock - -from tqdm import tqdm -import numpy as np -import torch.nn as nn -import torch.nn.functional as F -from fudge.data import Dataset -from fudge.util import save_checkpoint, ProgressMeter, AverageMeter, num_params -from fudge.constants import * - - -device = 'cpu' -# imp.reload(model) -pretrained_model = "checkpoint-150/" -generation_model = AutoModelForSeq2SeqLM.from_pretrained(pretrained_model, return_dict=True).to(device) - - -pad_id = 0 - -generation_model.eval() - -model_args = mock.Mock() -model_args.task = 'clickbait' -model_args.device = device -model_args.checkpoint = 'checkpoint-1464/' - -# conditioning_model = Model(model_args, pad_id, len(dataset_info.index2word)) # no need to get the glove embeddings when reloading since they're saved in model ckpt anyway -conditioning_model = Model(model_args, pad_id, vocab_size=None) # no need to get the glove embeddings when reloading since they're saved in model ckpt anyway -conditioning_model = conditioning_model.to(device) -conditioning_model.eval() - -condition_lambda = 5.0 -length_cutoff = 50 -precondition_topk = 200 - - -conditioning_model.classifier - -model_args.checkpoint - -classifier_tokenizer = AutoTokenizer.from_pretrained(model_args.checkpoint, load_best_model_at_end=True) - - -def rate_title(input_text, model, tokenizer, device='cuda'): - # input_text = { - # "postText": input_text['postText'], - # "truthClass" : input_text['truthClass'] - # } - tokenized_input = preprocess_function_title_only_classification(input_text,tokenizer=tokenizer) - # print(tokenized_input.items()) - dict_tokenized_input = {k : torch.tensor([v]).to(device) for k,v in tokenized_input.items() if k != 'labels'} - predicted_class = float(model(**dict_tokenized_input).logits) - actual_class = input_text['truthClass'] - - # print(predicted_class, actual_class) - return {'predicted_class' : predicted_class} - -def preprocess_function_title_only_classification(examples,tokenizer=None): - model_inputs = tokenizer(examples['postText'], padding="longest", truncation=True, max_length=25) - - model_inputs['labels'] = examples['truthClass'] - - return model_inputs - - - -input_example = "On Friday, a clip of Los Angeles Lakers star LeBron James from the latest episode of \"The Shop: Uninterrupted\" is going viral on Twitter. \"Cause they racist as f--k,\" James said when asked why he hates Boston. James has had many battles with the Boston Celtics in the NBA Playoffs. According to StatMuse, he has played the Celtics 41 times in the NBA Playoffs. He's played them in the playoffs when he was on the Cleveland Cavaliers (the first time), the Miami Heat and the Cavs (the second time). Therefore, he has had quite the experience facing off with them in hostile environments. He is 25-16 against them in the 41 playoff games and averaged 29.6 points per game. (also according to StatMuse). James is currently on the Los Angeles Lakers, and the team missed the postseason this past year. They were the 11th seed in the Western Conference, so they also missed the play-in tournament which was a big surprise. His first year in Los Angeles, they also missed the playoffs, but the following season he led them to their first NBA Championship since the 2010 season. In 2021, they lost in the first-round, so they have been on a downward trajectory since winning the title. Next season will be his 20th season in the NBA, and he is widely regarded as one of the top-five (and to some the greatest) player ever to play the game of basketball. He is 37-years-old, and was the first overall pick out of high school in the 2003 NBA Draft. " - -output_example = "Here's why Lebron James hates the Celtics" -textbox_input = gr.Textbox(label = "Article content", - value=input_example) -textbox_output = gr.Textbox(label = "Output clickbait title", - value=output_example) - - -def clickbait_generator(article_content, condition_lambda=5.0): - results = generate_clickbait(model=generation_model, - tokenizer=tokenizer, - conditioning_model=conditioning_model, - input_text=[None], - dataset_info=None, - precondition_topk=precondition_topk, - length_cutoff=length_cutoff, - condition_lambda=condition_lambda, - article_content=article_content, - device=device) - - return results[0].replace('', '').replace('', '') - -title = "Clickbaitinator - Controllable Clickbait generator" -description = """ -Use the [Fudge](https://github.com/yangkevin2/naacl-2021-fudge-controlled-generation) implementation fine-tuned for our purposes to try and create news headline you are looking for! Use condition_lambda to steer your clickbaitiness higher (by increasing the slider value) or lower (by decreasing the slider value).
    -Note that this is using two Transformers and is executed with CPU-only, so it will take a minute or two to finish generating a title. -""" - -article = "Check out [the codebase for our model](https://github.com/dsvilarkovic/clickbaitonator) that this demo is based of. You need collaborator access, which you have been probably invited for." - - -app = gr.Interface( - title = title, - description = description, - label = 'Article content or paragraph', - fn = clickbait_generator, - inputs=[textbox_input, gr.Slider(0, 15, step=0.1, value=5.0)], - outputs=textbox_output, - article=article, - ) -app.launch() \ No newline at end of file diff --git a/spaces/Duskfallcrew/photography-and-landscapes/README.md b/spaces/Duskfallcrew/photography-and-landscapes/README.md deleted file mode 100644 index f2d9eaf30077fd25fd58dc66e5402faf8b0014e5..0000000000000000000000000000000000000000 --- a/spaces/Duskfallcrew/photography-and-landscapes/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Photography And Landscapes -emoji: 👁 -colorFrom: green -colorTo: green -sdk: gradio -sdk_version: 3.18.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Flux9665/IMS-Toucan/Layers/ResidualStack.py b/spaces/Flux9665/IMS-Toucan/Layers/ResidualStack.py deleted file mode 100644 index 8bfe256efbecd5d24eba743ae8f3ff0a2bb604c2..0000000000000000000000000000000000000000 --- a/spaces/Flux9665/IMS-Toucan/Layers/ResidualStack.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2019 Tomoki Hayashi -# MIT License (https://opensource.org/licenses/MIT) -# Adapted by Florian Lux 2021 - - -import torch - - -class ResidualStack(torch.nn.Module): - - def __init__(self, kernel_size=3, channels=32, dilation=1, bias=True, nonlinear_activation="LeakyReLU", nonlinear_activation_params={"negative_slope": 0.2}, - pad="ReflectionPad1d", pad_params={}, ): - """ - Initialize ResidualStack module. - - Args: - kernel_size (int): Kernel size of dilation convolution layer. - channels (int): Number of channels of convolution layers. - dilation (int): Dilation factor. - bias (bool): Whether to add bias parameter in convolution layers. - nonlinear_activation (str): Activation function module name. - nonlinear_activation_params (dict): Hyperparameters for activation function. - pad (str): Padding function module name before dilated convolution layer. - pad_params (dict): Hyperparameters for padding function. - - """ - super(ResidualStack, self).__init__() - - # defile residual stack part - assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size." - self.stack = torch.nn.Sequential(getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params), - getattr(torch.nn, pad)((kernel_size - 1) // 2 * dilation, **pad_params), - torch.nn.Conv1d(channels, channels, kernel_size, dilation=dilation, bias=bias), - getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params), - torch.nn.Conv1d(channels, channels, 1, bias=bias), ) - - # defile extra layer for skip connection - self.skip_layer = torch.nn.Conv1d(channels, channels, 1, bias=bias) - - def forward(self, c): - """ - Calculate forward propagation. - - Args: - c (Tensor): Input tensor (B, channels, T). - - Returns: - Tensor: Output tensor (B, chennels, T). - - """ - return self.stack(c) + self.skip_layer(c) diff --git a/spaces/FridaZuley/RVC_HFKawaii/Applio-RVC-Fork/utils/dependency.py b/spaces/FridaZuley/RVC_HFKawaii/Applio-RVC-Fork/utils/dependency.py deleted file mode 100644 index b70338b02d31b1ef455fbac817d418d328db518d..0000000000000000000000000000000000000000 --- a/spaces/FridaZuley/RVC_HFKawaii/Applio-RVC-Fork/utils/dependency.py +++ /dev/null @@ -1,170 +0,0 @@ -import os -import csv -import shutil -import tarfile -import subprocess -from pathlib import Path -from datetime import datetime - -def install_packages_but_jank_af(): - packages = ['build-essential', 'python3-dev', 'ffmpeg', 'aria2'] - pip_packages = ['pip', 'setuptools', 'wheel', 'httpx==0.23.0', 'faiss-gpu', 'fairseq', 'gradio==3.34.0', - 'ffmpeg', 'ffmpeg-python', 'praat-parselmouth', 'pyworld', 'numpy==1.23.5', - 'numba==0.56.4', 'librosa==0.9.2', 'mega.py', 'gdown', 'onnxruntime', 'pyngrok==4.1.12', - 'gTTS', 'elevenlabs', 'wget', 'tensorboardX', 'unidecode', 'huggingface-hub', 'stftpitchshift==1.5.1', - 'yt-dlp', 'pedalboard', 'pathvalidate', 'nltk', 'edge-tts', 'git+https://github.com/suno-ai/bark.git', 'python-dotenv' , 'av'] - - print("Updating and installing system packages...") - for package in packages: - print(f"Installing {package}...") - subprocess.check_call(['apt-get', 'install', '-qq', '-y', package]) - - print("Updating and installing pip packages...") - subprocess.check_call(['pip', 'install', '--upgrade'] + pip_packages) - - print('Packages up to date.') - - -def setup_environment(ForceUpdateDependencies, ForceTemporaryStorage): - # Mounting Google Drive - if not ForceTemporaryStorage: - from google.colab import drive - - if not os.path.exists('/content/drive'): - drive.mount('/content/drive') - else: - print('Drive is already mounted. Proceeding...') - - # Function to install dependencies with progress - def install_packages(): - packages = ['build-essential', 'python3-dev', 'ffmpeg', 'aria2'] - pip_packages = ['pip', 'setuptools', 'wheel', 'httpx==0.23.0', 'faiss-gpu', 'fairseq', 'gradio==3.34.0', - 'ffmpeg', 'ffmpeg-python', 'praat-parselmouth', 'pyworld', 'numpy==1.23.5', - 'numba==0.56.4', 'librosa==0.9.2', 'mega.py', 'gdown', 'onnxruntime', 'pyngrok==4.1.12', - 'gTTS', 'elevenlabs', 'wget', 'tensorboardX', 'unidecode', 'huggingface-hub', 'stftpitchshift==1.5.1', - 'yt-dlp', 'pedalboard', 'pathvalidate', 'nltk', 'edge-tts', 'git+https://github.com/suno-ai/bark.git', 'python-dotenv' , 'av'] - - print("Updating and installing system packages...") - for package in packages: - print(f"Installing {package}...") - subprocess.check_call(['apt-get', 'install', '-qq', '-y', package]) - - print("Updating and installing pip packages...") - subprocess.check_call(['pip', 'install', '--upgrade'] + pip_packages) - - - print('Packages up to date.') - - # Function to scan a directory and writes filenames and timestamps - def scan_and_write(base_path, output_file): - with open(output_file, 'w', newline='') as f: - writer = csv.writer(f) - for dirpath, dirs, files in os.walk(base_path): - for filename in files: - fname = os.path.join(dirpath, filename) - try: - mtime = os.path.getmtime(fname) - writer.writerow([fname, mtime]) - except Exception as e: - print(f'Skipping irrelevant nonexistent file {fname}: {str(e)}') - print(f'Finished recording filesystem timestamps to {output_file}.') - - # Function to compare files - def compare_files(old_file, new_file): - old_files = {} - new_files = {} - - with open(old_file, 'r') as f: - reader = csv.reader(f) - old_files = {rows[0]:rows[1] for rows in reader} - - with open(new_file, 'r') as f: - reader = csv.reader(f) - new_files = {rows[0]:rows[1] for rows in reader} - - removed_files = old_files.keys() - new_files.keys() - added_files = new_files.keys() - old_files.keys() - unchanged_files = old_files.keys() & new_files.keys() - - changed_files = {f for f in unchanged_files if old_files[f] != new_files[f]} - - for file in removed_files: - print(f'File has been removed: {file}') - - for file in changed_files: - print(f'File has been updated: {file}') - - return list(added_files) + list(changed_files) - - # Check if CachedRVC.tar.gz exists - if ForceTemporaryStorage: - file_path = '/content/CachedRVC.tar.gz' - else: - file_path = '/content/drive/MyDrive/RVC_Cached/CachedRVC.tar.gz' - - content_file_path = '/content/CachedRVC.tar.gz' - extract_path = '/' - - if not os.path.exists(file_path): - folder_path = os.path.dirname(file_path) - os.makedirs(folder_path, exist_ok=True) - print('No cached dependency install found. Attempting to download GitHub backup..') - - try: - download_url = "https://github.com/kalomaze/QuickMangioFixes/releases/download/release3/CachedRVC.tar.gz" - subprocess.run(["wget", "-O", file_path, download_url]) - print('Download completed successfully!') - except Exception as e: - print('Download failed:', str(e)) - - # Delete the failed download file - if os.path.exists(file_path): - os.remove(file_path) - print('Failed download file deleted. Continuing manual backup..') - - if Path(file_path).exists(): - if ForceTemporaryStorage: - print('Finished downloading CachedRVC.tar.gz.') - else: - print('CachedRVC.tar.gz found on Google Drive. Proceeding to copy and extract...') - - # Check if ForceTemporaryStorage is True and skip copying if it is - if ForceTemporaryStorage: - pass - else: - shutil.copy(file_path, content_file_path) - - print('Beginning backup copy operation...') - - with tarfile.open(content_file_path, 'r:gz') as tar: - for member in tar.getmembers(): - target_path = os.path.join(extract_path, member.name) - try: - tar.extract(member, extract_path) - except Exception as e: - print('Failed to extract a file (this isn\'t normal)... forcing an update to compensate') - ForceUpdateDependencies = True - print(f'Extraction of {content_file_path} to {extract_path} completed.') - - if ForceUpdateDependencies: - install_packages() - ForceUpdateDependencies = False - else: - print('CachedRVC.tar.gz not found. Proceeding to create an index of all current files...') - scan_and_write('/usr/', '/content/usr_files.csv') - - install_packages() - - scan_and_write('/usr/', '/content/usr_files_new.csv') - changed_files = compare_files('/content/usr_files.csv', '/content/usr_files_new.csv') - - with tarfile.open('/content/CachedRVC.tar.gz', 'w:gz') as new_tar: - for file in changed_files: - new_tar.add(file) - print(f'Added to tar: {file}') - - os.makedirs('/content/drive/MyDrive/RVC_Cached', exist_ok=True) - shutil.copy('/content/CachedRVC.tar.gz', '/content/drive/MyDrive/RVC_Cached/CachedRVC.tar.gz') - print('Updated CachedRVC.tar.gz copied to Google Drive.') - print('Dependencies fully up to date; future runs should be faster.') - diff --git a/spaces/GanymedeNil/text2vec/README.md b/spaces/GanymedeNil/text2vec/README.md deleted file mode 100644 index 6aed42c14ea71f94adefb6bca43e445240d1248e..0000000000000000000000000000000000000000 --- a/spaces/GanymedeNil/text2vec/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Text2vec -emoji: 🔥 -colorFrom: green -colorTo: indigo -sdk: gradio -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: shibing624/text2vec ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/GiorgiSekhniashvili/geo-whisper/app.py b/spaces/GiorgiSekhniashvili/geo-whisper/app.py deleted file mode 100644 index 66e1e56bd2512350847f21ceac5b60032d18d727..0000000000000000000000000000000000000000 --- a/spaces/GiorgiSekhniashvili/geo-whisper/app.py +++ /dev/null @@ -1,31 +0,0 @@ -import gradio as gr -from transformers.pipelines.audio_utils import ffmpeg_read - -from transformers import WhisperForConditionalGeneration, AutoProcessor - -model_name = "GiorgiSekhniashvili/whisper-tiny-ka-01" - -processor = AutoProcessor.from_pretrained(model_name) -model = WhisperForConditionalGeneration.from_pretrained(model_name) -forced_decoder_ids = processor.get_decoder_prompt_ids( - language="Georgian", task="transcribe" -) - - -def predict(audio_path): - if audio_path: - with open(audio_path, "rb") as f: - waveform = ffmpeg_read(f.read(), sampling_rate=16_000) - input_values = processor(waveform, sampling_rate=16_000, return_tensors="pt") - res = model.generate( - input_values["input_features"], - forced_decoder_ids=forced_decoder_ids, - max_new_tokens=448, - ) - return processor.batch_decode(res, skip_special_tokens=True)[0] - - -mic = gr.Audio(source="microphone", type="filepath", label="Speak here...") -demo = gr.Interface(predict, mic, "text") - -demo.launch() diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/fp16/faster_rcnn_r50_fpn_fp16_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/fp16/faster_rcnn_r50_fpn_fp16_1x_coco.py deleted file mode 100644 index 78fa5b6c6a895cb04e1813462ed6a7eefd8c1fa6..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/fp16/faster_rcnn_r50_fpn_fp16_1x_coco.py +++ /dev/null @@ -1,3 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' -# fp16 settings -fp16 = dict(loss_scale=512.) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/htc/README.md b/spaces/Gradio-Blocks/uniformer_image_detection/configs/htc/README.md deleted file mode 100644 index 6af02da49f58d02ef081477f241746c2e9c977df..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/htc/README.md +++ /dev/null @@ -1,57 +0,0 @@ -# Hybrid Task Cascade for Instance Segmentation - -## Introduction - -[ALGORITHM] - -We provide config files to reproduce the results in the CVPR 2019 paper for [Hybrid Task Cascade](https://arxiv.org/abs/1901.07518). - -```latex -@inproceedings{chen2019hybrid, - title={Hybrid task cascade for instance segmentation}, - author={Chen, Kai and Pang, Jiangmiao and Wang, Jiaqi and Xiong, Yu and Li, Xiaoxiao and Sun, Shuyang and Feng, Wansen and Liu, Ziwei and Shi, Jianping and Ouyang, Wanli and Chen Change Loy and Dahua Lin}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - year={2019} -} -``` - -## Dataset - -HTC requires COCO and [COCO-stuff](http://calvin.inf.ed.ac.uk/wp-content/uploads/data/cocostuffdataset/stuffthingmaps_trainval2017.zip) dataset for training. You need to download and extract it in the COCO dataset path. -The directory should be like this. - -```none -mmdetection -├── mmdet -├── tools -├── configs -├── data -│ ├── coco -│ │ ├── annotations -│ │ ├── train2017 -│ │ ├── val2017 -│ │ ├── test2017 -| | ├── stuffthingmaps -``` - -## Results and Models - -The results on COCO 2017val are shown in the below table. (results on test-dev are usually slightly higher than val) - -| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | -|:---------:|:-------:|:-------:|:--------:|:--------------:|:------:|:-------:|:------:|:--------:| -| R-50-FPN | pytorch | 1x | 8.2 | 5.8 | 42.3 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/htc/htc_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_1x_coco/htc_r50_fpn_1x_coco_20200317-7332cf16.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_1x_coco/htc_r50_fpn_1x_coco_20200317_070435.log.json) | -| R-50-FPN | pytorch | 20e | 8.2 | - | 43.3 | 38.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/htc/htc_r50_fpn_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_20e_coco/htc_r50_fpn_20e_coco_20200319-fe28c577.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/htc/htc_r50_fpn_20e_coco/htc_r50_fpn_20e_coco_20200319_070313.log.json) | -| R-101-FPN | pytorch | 20e | 10.2 | 5.5 | 44.8 | 39.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/htc/htc_r101_fpn_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/htc/htc_r101_fpn_20e_coco/htc_r101_fpn_20e_coco_20200317-9b41b48f.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/htc/htc_r101_fpn_20e_coco/htc_r101_fpn_20e_coco_20200317_153107.log.json) | -| X-101-32x4d-FPN | pytorch |20e| 11.4 | 5.0 | 46.1 | 40.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/htc/htc_x101_32x4d_fpn_16x1_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_32x4d_fpn_16x1_20e_coco/htc_x101_32x4d_fpn_16x1_20e_coco_20200318-de97ae01.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_32x4d_fpn_16x1_20e_coco/htc_x101_32x4d_fpn_16x1_20e_coco_20200318_034519.log.json) | -| X-101-64x4d-FPN | pytorch |20e| 14.5 | 4.4 | 47.0 | 41.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/htc/htc_x101_64x4d_fpn_16x1_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_64x4d_fpn_16x1_20e_coco/htc_x101_64x4d_fpn_16x1_20e_coco_20200318-b181fd7a.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_64x4d_fpn_16x1_20e_coco/htc_x101_64x4d_fpn_16x1_20e_coco_20200318_081711.log.json) | - -- In the HTC paper and COCO 2018 Challenge, `score_thr` is set to 0.001 for both baselines and HTC. -- We use 8 GPUs with 2 images/GPU for R-50 and R-101 models, and 16 GPUs with 1 image/GPU for X-101 models. - If you would like to train X-101 HTC with 8 GPUs, you need to change the lr from 0.02 to 0.01. - -We also provide a powerful HTC with DCN and multi-scale training model. No testing augmentation is used. - -| Backbone | Style | DCN | training scales | Lr schd | box AP | mask AP | Config | Download | -|:----------------:|:-------:|:-----:|:---------------:|:-------:|:------:|:-------:|:------:|:--------:| -| X-101-64x4d-FPN | pytorch | c3-c5 | 400~1400 | 20e | 50.4 | 43.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco_20200312-946fd751.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/htc/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco/htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco_20200312_203410.log.json) | diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py deleted file mode 100644 index feca44aa67126b3326e45b1c9fbbf9e9c3bec11a..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py +++ /dev/null @@ -1,3 +0,0 @@ -_base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/datasets/samplers/distributed_sampler.py b/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/datasets/samplers/distributed_sampler.py deleted file mode 100644 index cc61019484655ee2829f7908dc442caa20cf1d54..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/datasets/samplers/distributed_sampler.py +++ /dev/null @@ -1,39 +0,0 @@ -import math - -import torch -from torch.utils.data import DistributedSampler as _DistributedSampler - - -class DistributedSampler(_DistributedSampler): - - def __init__(self, - dataset, - num_replicas=None, - rank=None, - shuffle=True, - seed=0): - super().__init__( - dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) - # for the compatibility from PyTorch 1.3+ - self.seed = seed if seed is not None else 0 - - def __iter__(self): - # deterministically shuffle based on epoch - if self.shuffle: - g = torch.Generator() - g.manual_seed(self.epoch + self.seed) - indices = torch.randperm(len(self.dataset), generator=g).tolist() - else: - indices = torch.arange(len(self.dataset)).tolist() - - # add extra samples to make it evenly divisible - # in case that indices is shorter than half of total_size - indices = (indices * - math.ceil(self.total_size / len(indices)))[:self.total_size] - assert len(indices) == self.total_size - - # subsample - indices = indices[self.rank:self.total_size:self.num_replicas] - assert len(indices) == self.num_samples - - return iter(indices) diff --git a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/grids/diffusion/4_bands_base_32khz.py b/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/grids/diffusion/4_bands_base_32khz.py deleted file mode 100644 index f7e67bcc89dd0c8e50d770e600b55f179fe19588..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/grids/diffusion/4_bands_base_32khz.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Training of the 4 diffusion models described in -"From Discrete Tokens to High-Fidelity Audio Using Multi-Band Diffusion" -(paper link). -""" - -from ._explorers import DiffusionExplorer - - -@DiffusionExplorer -def explorer(launcher): - launcher.slurm_(gpus=4, partition='learnfair') - - launcher.bind_({'solver': 'diffusion/default', - 'dset': 'internal/music_10k_32khz'}) - - with launcher.job_array(): - launcher({'filter.use': True, 'filter.idx_band': 0, "processor.use": False, 'processor.power_std': 0.4}) - launcher({'filter.use': True, 'filter.idx_band': 1, "processor.use": False, 'processor.power_std': 0.4}) - launcher({'filter.use': True, 'filter.idx_band': 2, "processor.use": True, 'processor.power_std': 0.4}) - launcher({'filter.use': True, 'filter.idx_band': 3, "processor.use": True, 'processor.power_std': 0.75}) diff --git a/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/launch.py b/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/launch.py deleted file mode 100644 index 5830df435245a9b23137d51245286da7a7b3522c..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/launch.py +++ /dev/null @@ -1,355 +0,0 @@ -""" -Copyright (c) Meta Platforms, Inc. and affiliates. -All rights reserved. - -This source code is licensed under the license found in the -LICENSE file in the root directory of this source tree. -""" - -import random -from tempfile import NamedTemporaryFile -import argparse -import time -import torch -import gradio as gr -import os -import numpy as np -from audiocraft.models import MusicGen -from audiocraft.data.audio import audio_write -from audiocraft.data.audio_utils import convert_audio -import subprocess, random, string - -MODEL = None -IS_SHARED_SPACE = "musicgen/MusicGen" in os.environ.get('SPACE_ID', '') -INTERRUPTED = False -UNLOAD_MODEL = False - -def interrupt(): - global INTERRUPTED - INTERRUPTED = True - print('Interrupted!') - -def generate_random_string(length): - characters = string.ascii_letters + string.digits - return ''.join(random.choice(characters) for _ in range(length)) - -def resize_video(input_path, output_path, target_width, target_height): - ffmpeg_cmd = [ - 'ffmpeg', - '-y', - '-i', input_path, - '-vf', f'scale={target_width}:{target_height}', - '-c:a', 'copy', - output_path - ] - subprocess.run(ffmpeg_cmd) - -def load_model(version): - print("Loading model", version) - return MusicGen.get_pretrained(version) - - -def predict(model, text, melody, sample, duration, topk, topp, temperature, cfg_coef, seed, overlap=5, recondition=True, background="./assets/background.png", progress=gr.Progress()): - global MODEL - global INTERRUPTED - INTERRUPTED = False - topk = int(topk) - if MODEL is None or MODEL.name != model: - MODEL = load_model(model) - - if duration > MODEL.lm.cfg.dataset.segment_duration and melody is not None: - raise gr.Error("Generating music longer than 30 seconds with melody conditioning is not yet supported!") - - output = None - first_chunk = None - total_samples = duration * 50 + 3 - segment_duration = duration - if seed < 0: - seed = random.randint(0, 0xffff_ffff_ffff) - torch.manual_seed(seed) - predict.last_progress_update = time.monotonic() - while duration > 0: - if INTERRUPTED: - break - - if output is None: # first pass of long or short song - if segment_duration > MODEL.lm.cfg.dataset.segment_duration: - segment_duration = MODEL.lm.cfg.dataset.segment_duration - else: - segment_duration = duration - else: # next pass of long song - if duration + overlap < MODEL.lm.cfg.dataset.segment_duration: - segment_duration = duration + overlap - else: - segment_duration = MODEL.lm.cfg.dataset.segment_duration - - print(f'Segment duration: {segment_duration}, duration: {duration}, overlap: {overlap}') - MODEL.set_generation_params( - use_sampling=True, - top_k=topk, - top_p=topp, - temperature=temperature, - cfg_coef=cfg_coef, - duration=segment_duration, - ) - def updateProgress(step: int, total: int): - now = time.monotonic() - if now - predict.last_progress_update > 1: - progress((total_samples - duration * 50 - 3 + step, total_samples)) - predict.last_progress_update = now - - if sample: - def normalize_audio(audio_data): - audio_data = audio_data.astype(np.float32) - max_value = np.max(np.abs(audio_data)) - audio_data = audio_data / max_value - return audio_data - - globalSR, sampleM = sample[0], sample[1] - sampleM = normalize_audio(sampleM) - sampleM = torch.from_numpy(sampleM).t() - - if sampleM.dim() > 1: - sampleM = convert_audio(sampleM, globalSR, 32000, 1) - - sampleM = sampleM.to(MODEL.device).float().unsqueeze(0) - - if sampleM.dim() == 2: - sampleM = sampleM[None] - - sample_length = sampleM.shape[sampleM.dim() - 1] / 32000 - if output is None: - next_segment = sampleM - duration -= sample_length - else: - if first_chunk is None and MODEL.name == "melody" and recondition: - first_chunk = output[:, :, - :MODEL.lm.cfg.dataset.segment_duration*MODEL.sample_rate] - last_chunk = output[:, :, -overlap*32000:] - next_segment = MODEL.generate_continuation(last_chunk, - 32000, descriptions=[text], progress=updateProgress, - melody_wavs=(first_chunk), resample=False) - duration -= segment_duration - overlap - elif melody: - sr, melody = melody[0], torch.from_numpy(melody[1]).to(MODEL.device).float().t().unsqueeze(0) - print(melody.shape) - if melody.dim() == 2: - melody = melody[None] - melody = melody[..., :int(sr * MODEL.lm.cfg.dataset.segment_duration)] - next_segment = MODEL.generate_with_chroma( - descriptions=[text], - melody_wavs=melody, - melody_sample_rate=sr, - progress=updateProgress - ) - duration -= segment_duration - else: - if output is None: - next_segment = MODEL.generate(descriptions=[text], - progress=updateProgress) - duration -= segment_duration - else: - if first_chunk is None and MODEL.name == "melody" and recondition: - first_chunk = output[:, :, - :MODEL.lm.cfg.dataset.segment_duration*MODEL.sample_rate] - last_chunk = output[:, :, -overlap*MODEL.sample_rate:] - next_segment = MODEL.generate_continuation(last_chunk, - MODEL.sample_rate, descriptions=[text], - progress=updateProgress, melody_wavs=(first_chunk), resample=False) - duration -= segment_duration - overlap - - if output is None: - output = next_segment - else: - output = torch.cat([output[:, :, :-overlap*MODEL.sample_rate], next_segment], 2) - - - output = output.detach().cpu().float()[0] - with NamedTemporaryFile("wb", suffix=".wav", delete=False) as file: - audio_write( - file.name, output, MODEL.sample_rate, strategy="loudness", - loudness_headroom_db=16, loudness_compressor=True, add_suffix=False) - waveform_video = gr.make_waveform(file.name, bg_image=background, bg_color="#21b0fe" , bars_color=('#fe218b', '#fed700'), fg_alpha=1.0, bar_count=75) - if background is None or len(background) == 0: - random_string = generate_random_string(12) - random_string = f"{random_string}.mp4" - resize_video(waveform_video, random_string, 900, 300) - waveform_video = random_string - global UNLOAD_MODEL - if UNLOAD_MODEL: - MODEL = None - torch.cuda.empty_cache() - return waveform_video, seed - - -def ui(**kwargs): - with gr.Blocks() as interface: - gr.Markdown( - """ - # MusicGen - This is your private demo for [MusicGen](https://github.com/facebookresearch/audiocraft), a simple and controllable model for music generation - presented at: ["Simple and Controllable Music Generation"](https://arxiv.org/abs/2306.05284) - """ - ) - if IS_SHARED_SPACE: - gr.Markdown(""" - ⚠ This Space doesn't work in this shared UI ⚠ - - - Duplicate Space - to use it privately, or use the public demo - """) - with gr.Row(): - with gr.Column(): - with gr.Row(): - text = gr.Text(label="Input Text", interactive=True) - melody = gr.Audio(source="upload", type="numpy", label="Melody Condition (optional)", interactive=True) - sample = gr.Audio(source="upload", type="numpy", label="Music Sample (optional)", interactive=True) - with gr.Row(): - submit = gr.Button("Generate", variant="primary") - gr.Button("Interrupt").click(fn=interrupt, queue=False) - with gr.Row(): - background = gr.Image(source="upload", label="Background", type="filepath", interactive=True) - with gr.Row(): - model = gr.Radio(["melody", "medium", "small", "large"], label="Model", value="melody", interactive=True) - with gr.Row(): - duration = gr.Slider(minimum=1, maximum=300, value=10, step=1, label="Duration", interactive=True) - with gr.Row(): - overlap = gr.Slider(minimum=1, maximum=29, value=5, step=1, label="Overlap", interactive=True) - recondition = gr.Checkbox(False, label='Condition next chunks with the first chunk') - with gr.Row(): - topk = gr.Number(label="Top-k", value=250, interactive=True) - topp = gr.Number(label="Top-p", value=0, interactive=True) - temperature = gr.Number(label="Temperature", value=1.0, interactive=True) - cfg_coef = gr.Number(label="Classifier Free Guidance", value=3.0, interactive=True) - with gr.Row(): - seed = gr.Number(label="Seed", value=-1, precision=0, interactive=True) - gr.Button('\U0001f3b2\ufe0f').style(full_width=False).click(fn=lambda: -1, outputs=[seed], queue=False) - reuse_seed = gr.Button('\u267b\ufe0f').style(full_width=False) - with gr.Column() as c: - output = gr.Video(label="Generated Music") - seed_used = gr.Number(label='Seed used', value=-1, interactive=False) - - reuse_seed.click(fn=lambda x: x, inputs=[seed_used], outputs=[seed], queue=False) - submit.click(predict, inputs=[model, text, melody, sample, duration, topk, topp, temperature, cfg_coef, seed, overlap, recondition, background], outputs=[output, seed_used]) - def update_recondition(name: str): - enabled = name == 'melody' - return recondition.update(interactive=enabled, value=None if enabled else False) - model.change(fn=update_recondition, inputs=[model], outputs=[recondition]) - gr.Examples( - fn=predict, - examples=[ - [ - "An 80s driving pop song with heavy drums and synth pads in the background", - "./assets/bach.mp3", - "melody" - ], - [ - "A cheerful country song with acoustic guitars", - "./assets/bolero_ravel.mp3", - "melody" - ], - [ - "90s rock song with electric guitar and heavy drums", - None, - "medium" - ], - [ - "a light and cheerly EDM track, with syncopated drums, aery pads, and strong emotions", - "./assets/bach.mp3", - "melody" - ], - [ - "lofi slow bpm electro chill with organic samples", - None, - "medium", - ], - ], - inputs=[text, melody, model], - outputs=[output] - ) - gr.Markdown( - """ - ### More details - - The model will generate a short music extract based on the description you provided. - You can generate up to 30 seconds of audio. - - We present 4 model variations: - 1. Melody -- a music generation model capable of generating music condition on text and melody inputs. **Note**, you can also use text only. - 2. Small -- a 300M transformer decoder conditioned on text only. - 3. Medium -- a 1.5B transformer decoder conditioned on text only. - 4. Large -- a 3.3B transformer decoder conditioned on text only (might OOM for the longest sequences.) - - When using `melody`, ou can optionaly provide a reference audio from - which a broad melody will be extracted. The model will then try to follow both the description and melody provided. - - You can also use your own GPU or a Google Colab by following the instructions on our repo. - See [github.com/facebookresearch/audiocraft](https://github.com/facebookresearch/audiocraft) - for more details. - """ - ) - - # Show the interface - launch_kwargs = {} - username = kwargs.get('username') - password = kwargs.get('password') - server_port = kwargs.get('server_port', 0) - inbrowser = kwargs.get('inbrowser', False) - share = kwargs.get('share', False) - server_name = kwargs.get('listen') - - launch_kwargs['server_name'] = server_name - - if username and password: - launch_kwargs['auth'] = (username, password) - if server_port > 0: - launch_kwargs['server_port'] = server_port - if inbrowser: - launch_kwargs['inbrowser'] = inbrowser - if share: - launch_kwargs['share'] = share - - interface.queue().launch(**launch_kwargs) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument( - '--listen', - type=str, - default='127.0.0.1', - help='IP to listen on for connections to Gradio', - ) - parser.add_argument( - '--username', type=str, default='', help='Username for authentication' - ) - parser.add_argument( - '--password', type=str, default='', help='Password for authentication' - ) - parser.add_argument( - '--server_port', - type=int, - default=0, - help='Port to run the server listener on', - ) - parser.add_argument( - '--inbrowser', action='store_true', help='Open in browser' - ) - parser.add_argument( - '--share', action='store_true', help='Share the gradio UI' - ) - parser.add_argument( - '--unload_model', action='store_true', help='Unload the model after every generation to save GPU memory' - ) - - args = parser.parse_args() - UNLOAD_MODEL = args.unload_model - ui( - username=args.username, - password=args.password, - inbrowser=args.inbrowser, - server_port=args.server_port, - share=args.share, - listen=args.listen - ) \ No newline at end of file diff --git a/spaces/Grezz/generate_human_motion/pyrender/README.md b/spaces/Grezz/generate_human_motion/pyrender/README.md deleted file mode 100644 index ae88ed1c5e78f247e38291ed83cf4c81230bf976..0000000000000000000000000000000000000000 --- a/spaces/Grezz/generate_human_motion/pyrender/README.md +++ /dev/null @@ -1,92 +0,0 @@ -# Pyrender - -[![Build Status](https://travis-ci.org/mmatl/pyrender.svg?branch=master)](https://travis-ci.org/mmatl/pyrender) -[![Documentation Status](https://readthedocs.org/projects/pyrender/badge/?version=latest)](https://pyrender.readthedocs.io/en/latest/?badge=latest) -[![Coverage Status](https://coveralls.io/repos/github/mmatl/pyrender/badge.svg?branch=master)](https://coveralls.io/github/mmatl/pyrender?branch=master) -[![PyPI version](https://badge.fury.io/py/pyrender.svg)](https://badge.fury.io/py/pyrender) -[![Downloads](https://pepy.tech/badge/pyrender)](https://pepy.tech/project/pyrender) - -Pyrender is a pure Python (2.7, 3.4, 3.5, 3.6) library for physically-based -rendering and visualization. -It is designed to meet the [glTF 2.0 specification from Khronos](https://www.khronos.org/gltf/). - -Pyrender is lightweight, easy to install, and simple to use. -It comes packaged with both an intuitive scene viewer and a headache-free -offscreen renderer with support for GPU-accelerated rendering on headless -servers, which makes it perfect for machine learning applications. - -Extensive documentation, including a quickstart guide, is provided [here](https://pyrender.readthedocs.io/en/latest/). - -For a minimal working example of GPU-accelerated offscreen rendering using EGL, -check out the [EGL Google CoLab Notebook](https://colab.research.google.com/drive/1pcndwqeY8vker3bLKQNJKr3B-7-SYenE?usp=sharing). - - -

    - GIF of Viewer - Damaged Helmet -

    - -## Installation -You can install pyrender directly from pip. - -```bash -pip install pyrender -``` - -## Features - -Despite being lightweight, pyrender has lots of features, including: - -* Simple interoperation with the amazing [trimesh](https://github.com/mikedh/trimesh) project, -which enables out-of-the-box support for dozens of mesh types, including OBJ, -STL, DAE, OFF, PLY, and GLB. -* An easy-to-use scene viewer with support for animation, showing face and vertex -normals, toggling lighting conditions, and saving images and GIFs. -* An offscreen rendering module that supports OSMesa and EGL backends. -* Shadow mapping for directional and spot lights. -* Metallic-roughness materials for physically-based rendering, including several -types of texture and normal mapping. -* Transparency. -* Depth and color image generation. - -## Sample Usage - -For sample usage, check out the [quickstart -guide](https://pyrender.readthedocs.io/en/latest/examples/index.html) or one of -the Google CoLab Notebooks: - -* [EGL Google CoLab Notebook](https://colab.research.google.com/drive/1pcndwqeY8vker3bLKQNJKr3B-7-SYenE?usp=sharing) - -## Viewer Keyboard and Mouse Controls - -When using the viewer, the basic controls for moving about the scene are as follows: - -* To rotate the camera about the center of the scene, hold the left mouse button and drag the cursor. -* To rotate the camera about its viewing axis, hold `CTRL` left mouse button and drag the cursor. -* To pan the camera, do one of the following: - * Hold `SHIFT`, then hold the left mouse button and drag the cursor. - * Hold the middle mouse button and drag the cursor. -* To zoom the camera in or out, do one of the following: - * Scroll the mouse wheel. - * Hold the right mouse button and drag the cursor. - -The available keyboard commands are as follows: - -* `a`: Toggles rotational animation mode. -* `c`: Toggles backface culling. -* `f`: Toggles fullscreen mode. -* `h`: Toggles shadow rendering. -* `i`: Toggles axis display mode (no axes, world axis, mesh axes, all axes). -* `l`: Toggles lighting mode (scene lighting, Raymond lighting, or direct lighting). -* `m`: Toggles face normal visualization. -* `n`: Toggles vertex normal visualization. -* `o`: Toggles orthographic camera mode. -* `q`: Quits the viewer. -* `r`: Starts recording a GIF, and pressing again stops recording and opens a file dialog. -* `s`: Opens a file dialog to save the current view as an image. -* `w`: Toggles wireframe mode (scene default, flip wireframes, all wireframe, or all solid). -* `z`: Resets the camera to the default view. - -As a note, displaying shadows significantly slows down rendering, so if you're -experiencing low framerates, just kill shadows or reduce the number of lights in -your scene. diff --git a/spaces/GuyYariv/AudioToken/modules/fga/fga_model.py b/spaces/GuyYariv/AudioToken/modules/fga/fga_model.py deleted file mode 100644 index 702efcec44d687c85c43538ea0fe1de5ac903951..0000000000000000000000000000000000000000 --- a/spaces/GuyYariv/AudioToken/modules/fga/fga_model.py +++ /dev/null @@ -1,219 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from atten import Atten - - -class FGA(nn.Module): - def __init__(self, vocab_size, word_embed_dim, hidden_ques_dim, hidden_ans_dim, - hidden_hist_dim, hidden_cap_dim, hidden_img_dim): - ''' - Factor Graph Attention - :param vocab_size: vocabulary size - :param word_embed_dim - :param hidden_ques_dim: - :param hidden_ans_dim: - :param hidden_hist_dim: - :param img_features_dim: - ''' - super(FGA, self).__init__() - - print("Init FGA with vocab size %s, word embed %s, hidden ques %s, hidden ans %s," - " hidden hist %s, hidden cap %s, hidden img %s" % (vocab_size, word_embed_dim, - hidden_ques_dim, - hidden_ans_dim, - hidden_hist_dim, - hidden_cap_dim, - hidden_img_dim)) - self.hidden_ques_dim = hidden_ques_dim - self.hidden_ans_dim = hidden_ans_dim - self.hidden_cap_dim = hidden_cap_dim - self.hidden_img_dim = hidden_img_dim - self.hidden_hist_dim = hidden_hist_dim - - # Vocab of History LSTMs is one more as we are keeping a stop id (the last id) - self.word_embedddings = nn.Embedding(vocab_size+1+1, word_embed_dim, padding_idx=0) - - self.lstm_ques = nn.LSTM(word_embed_dim, self.hidden_ques_dim, batch_first=True) - self.lstm_ans = nn.LSTM(word_embed_dim, self.hidden_ans_dim, batch_first=True) - - self.lstm_hist_ques = nn.LSTM(word_embed_dim, self.hidden_hist_dim, batch_first=True) - self.lstm_hist_ans = nn.LSTM(word_embed_dim, self.hidden_hist_dim, batch_first=True) - - self.lstm_hist_cap = nn.LSTM(word_embed_dim, self.hidden_cap_dim, batch_first=True) - - - self.qahistnet = nn.Sequential( - nn.Linear(self.hidden_hist_dim*2, self.hidden_hist_dim), - nn.ReLU(inplace=True) - ) - - self.concat_dim = self.hidden_ques_dim + self.hidden_ans_dim + \ - self.hidden_ans_dim + self.hidden_img_dim + \ - self.hidden_cap_dim + self.hidden_hist_dim*9 - - self.simnet = nn.Sequential( - nn.Linear(self.concat_dim, (self.concat_dim)//2, bias=False), - nn.BatchNorm1d((self.concat_dim) // 2), - nn.ReLU(inplace=True), - nn.Linear((self.concat_dim)//2, (self.concat_dim)//4, bias=False), - nn.BatchNorm1d((self.concat_dim) // 4), - nn.ReLU(inplace=True), - nn.Dropout(0.5), - nn.Linear((self.concat_dim)//4, 1) - ) - - # To share weights, provide list of tuples: (idx, list of connected utils) - # Note, for efficiency, the shared utils (i.e., history, are connected to ans and question only. - # connecting shared factors is not supported (!) - sharing_factor_weights = {4: (9, [0, 1]), - 5: (9, [0, 1])} - - self.mul_atten = Atten(util_e=[self.hidden_ans_dim, # Answer modal - self.hidden_ques_dim, # Question modal - self.hidden_cap_dim, # Caption modal - self.hidden_img_dim, # Image modal - self.hidden_hist_dim, # Question-history modal - self.hidden_hist_dim # Answer-history modal - ], - sharing_factor_weights=sharing_factor_weights, - sizes=[100, # 100 Answers - 21, # Question length - 41, # Caption length - 37, # 36 Image regions - 21, # History-Question length - 21 # History-Answer length - ] # The spatial dim used for pairwise normalization (use force for adaptive) - , prior_flag=True, - pairwise_flag=True) - - - - def forward(self, input_ques, input_ans, input_hist_ques, input_hist_ans, input_hist_cap, - input_ques_length, input_ans_length, input_cap_length, i_e): - """ - - :param input_ques: - :param input_ans: - :param input_hist_ques: - :param input_hist_ans: - :param input_hist_cap: - :param input_ques_length: - :param input_ans_length: - :param input_cap_length: - :param i_e: - :return: - """ - - - n_options = input_ans.size()[1] - batch_size = input_ques.size()[0] - - - - nqa_per_dial, nwords_per_qa = input_hist_ques.size()[1], input_hist_ques.size()[2] - nwords_per_cap = input_hist_cap.size()[1] - max_length_input_ans = input_ans.size()[-1] - - assert batch_size == input_hist_ques.size()[0] == input_hist_ans.size()[0] == input_ques.size()[0] == \ - input_ans.size()[0] == input_hist_cap.size()[0] - assert nqa_per_dial == input_hist_ques.size()[1] == input_hist_ans.size()[1] - assert nwords_per_qa == input_hist_ques.size()[2] == input_hist_ans.size()[2] - - q_we = self.word_embedddings(input_ques) - a_we = self.word_embedddings(input_ans.view(-1, max_length_input_ans)) - hq_we = self.word_embedddings(input_hist_ques.view(-1, nwords_per_qa)) - ha_we = self.word_embedddings(input_hist_ans.view(-1, nwords_per_qa)) - c_we = self.word_embedddings(input_hist_cap.view(-1, nwords_per_cap)) - - - - ''' - q_we = batch x 20 x embed_ques_dim - a_we = 100*batch x 20 x embed_ans_dim - hq_we = batch*nqa_per_dial, nwords_per_qa, embed_hist_dim - ha_we = batch*nqa_per_dial, nwords_per_qa, embed_hist_dim - c_we = batch*ncap_per_dial, nwords_per_cap, embed_hist_dim - ''' - self.lstm_ques.flatten_parameters() - self.lstm_ans.flatten_parameters() - self.lstm_hist_ques.flatten_parameters() - self.lstm_hist_ans.flatten_parameters() - self.lstm_hist_cap.flatten_parameters() - - - i_feat = i_e - - q_seq, self.hidden_ques = self.lstm_ques(q_we) - a_seq, self.hidden_ans = self.lstm_ans(a_we) - hq_seq, self.hidden_hist_ques = self.lstm_hist_ques(hq_we) - ha_seq, self.hidden_hist_ans = self.lstm_hist_ans(ha_we) - cap_seq, self.hidden_cap = self.lstm_hist_cap(c_we) - - - ''' - length is used for attention prior - ''' - q_len = input_ques_length.data - 1 - c_len = input_cap_length.data.view(-1) - 1 - - - ans_index = torch.arange(0, n_options * batch_size).long().cuda() - ans_len = input_ans_length.data.view(-1) - 1 - ans_seq = a_seq[ans_index, ans_len, :] - ans_seq = ans_seq.view(batch_size, n_options, self.hidden_ans_dim) - - batch_index = torch.arange(0, batch_size).long().cuda() - q_prior = torch.zeros(batch_size, q_seq.size(1)).cuda() - q_prior[batch_index, q_len] = 100 - c_prior = torch.zeros(batch_size, cap_seq.size(1)).cuda() - c_prior[batch_index, c_len] = 100 - ans_prior = torch.ones(batch_size, ans_seq.size(1)).cuda() - img_prior = torch.ones(batch_size, i_feat.size(1)).cuda() - - (ans_atten, ques_atten, cap_atten, img_atten, hq_atten, ha_atten) = \ - self.mul_atten([ans_seq, q_seq, cap_seq, i_feat, hq_seq, ha_seq], - priors=[ans_prior, q_prior, c_prior, img_prior, None, None]) - - ''' - expand to answers based - ''' - ques_atten = torch.unsqueeze(ques_atten, 1).expand(batch_size, - n_options, - self.hidden_ques_dim) - cap_atten = torch.unsqueeze(cap_atten, 1).expand(batch_size, - n_options, - self.hidden_cap_dim) - img_atten = torch.unsqueeze(img_atten, 1).expand(batch_size, n_options, - self.hidden_img_dim) - ans_atten = torch.unsqueeze(ans_atten, 1).expand(batch_size, n_options, - self.hidden_ans_dim) - - - ''' - combine history - ''' - - input_qahistnet = torch.cat((hq_atten, ha_atten), 1) - # input_qahistnet: (nqa_per_dial*batch x 2*hidden_hist_dim) - output_qahistnet = self.qahistnet(input_qahistnet) - # output_qahistnet: (nqa_per_dial*batch x hidden_hist_dim) - output_qahistnet = output_qahistnet.view(batch_size, - nqa_per_dial * self.hidden_hist_dim) - # output_qahistnet: (batch x nqa_per_dial*hidden_hist_dim) - output_qahistnet = torch.unsqueeze(output_qahistnet, 1)\ - .expand(batch_size, - n_options, - nqa_per_dial * self.hidden_hist_dim) - - input_qa = torch.cat((ans_seq, ques_atten, ans_atten, img_atten, - output_qahistnet, cap_atten), 2) # Concatenate last dimension - - input_qa = input_qa.view(batch_size * n_options, self.concat_dim) - - out_scores = self.simnet(input_qa) - - out_scores = out_scores.squeeze(dim=1) - out_scores = out_scores.view(batch_size, n_options) - - return out_scores \ No newline at end of file diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/steps_gan/train_sat.sh b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/steps_gan/train_sat.sh deleted file mode 100644 index f75afafb1c4ad04ee71ab8541064ab0477430616..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/steps_gan/train_sat.sh +++ /dev/null @@ -1,281 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2012 Johns Hopkins University (Author: Daniel Povey). Apache 2.0. - - -# This does Speaker Adapted Training (SAT), i.e. train on -# fMLLR-adapted features. It can be done on top of either LDA+MLLT, or -# delta and delta-delta features. If there are no transforms supplied -# in the alignment directory, it will estimate transforms itself before -# building the tree (and in any case, it estimates transforms a number -# of times during training). - - -# Begin configuration section. -stage=-5 -exit_stage=-100 # you can use this to require it to exit at the - # beginning of a specific stage. Not all values are - # supported. -fmllr_update_type=full -cmd=run.pl -scale_opts="--transition-scale=1.0 --acoustic-scale=0.1 --self-loop-scale=0.1" -beam=10 -retry_beam=40 -careful=false -boost_silence=1.0 # Factor by which to boost silence likelihoods in alignment -context_opts= # e.g. set this to "--context-width 5 --central-position 2" for quinphone. -realign_iters="10 20 30"; -fmllr_iters="2 4 6 12"; -silence_weight=0.0 # Weight on silence in fMLLR estimation. -num_iters=35 # Number of iterations of training -max_iter_inc=25 # Last iter to increase #Gauss on. -power=0.2 # Exponent for number of gaussians according to occurrence counts -cluster_thresh=-1 # for build-tree control final bottom-up clustering of leaves -phone_map= -train_tree=true -tree_stats_opts= -cluster_phones_opts= -compile_questions_opts= -# End configuration section. -num_nonsil_states=3 - -echo "$0 $@" # Print the command line for logging - -[ -f path.sh ] && . ./path.sh -. parse_options.sh || exit 1; - -if [ $# != 6 ]; then - echo "Usage: steps/train_sat.sh <#leaves> <#gauss> " - echo " e.g.: steps/train_sat.sh 2500 15000 data/train_si84 data/lang exp/tri2b_ali_si84 exp/tri3b" - echo "Main options (for others, see top of script file)" - echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." - echo " --config # config containing options" - echo " --stage # stage to do partial re-run from." - exit 1; -fi - -numleaves=$1 -totgauss=$2 -data=$3 -lang=$4 -alidir=$5 -dir=$6 - -for f in $data/feats.scp $lang/phones.txt $alidir/final.mdl $alidir/ali.1.gz; do - [ ! -f $f ] && echo "train_sat.sh: no such file $f" && exit 1; -done - -numgauss=$numleaves -incgauss=$[($totgauss-$numgauss)/$max_iter_inc] # per-iter #gauss increment -oov=`cat $lang/oov.int` -nj=`cat $alidir/num_jobs` || exit 1; -silphonelist=`cat $lang/phones/silence.csl` -ciphonelist=`cat $lang/phones/context_indep.csl` || exit 1; -sdata=$data/split$nj; -splice_opts=`cat $alidir/splice_opts 2>/dev/null` # frame-splicing options. -cmvn_opts=`cat $alidir/cmvn_opts 2>/dev/null` -delta_opts=`cat $alidir/delta_opts 2>/dev/null` -phone_map_opt= -[ ! -z "$phone_map" ] && phone_map_opt="--phone-map='$phone_map'" - -mkdir -p $dir/log -cp $alidir/splice_opts $dir 2>/dev/null # frame-splicing options. -cp $alidir/cmvn_opts $dir 2>/dev/null # cmn/cmvn option. -cp $alidir/delta_opts $dir 2>/dev/null # delta option. - -utils/lang/check_phones_compatible.sh $lang/phones.txt $alidir/phones.txt || exit 1; -cp $lang/phones.txt $dir || exit 1; - -echo $nj >$dir/num_jobs -[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; - -# Set up features. - -if [ -f $alidir/final.mat ]; then feat_type=lda; else feat_type=delta; fi -echo "$0: feature type is $feat_type" - -## Set up speaker-independent features. -case $feat_type in - delta) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas $delta_opts ark:- ark:- |";; - lda) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $alidir/final.mat ark:- ark:- |" - cp $alidir/final.mat $dir - cp $alidir/full.mat $dir 2>/dev/null - ;; - *) echo "$0: invalid feature type $feat_type" && exit 1; -esac - -## Get initial fMLLR transforms (possibly from alignment dir) -if [ -f $alidir/trans.1 ]; then - echo "$0: Using transforms from $alidir" - feats="$sifeats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark,s,cs:$alidir/trans.JOB ark:- ark:- |" - cur_trans_dir=$alidir -else - if [ $stage -le -5 ]; then - echo "$0: obtaining initial fMLLR transforms since not present in $alidir" - # The next line is necessary because of $silphonelist otherwise being incorrect; would require - # old $lang dir which would require another option. Not needed anyway. - [ ! -z "$phone_map" ] && \ - echo "$0: error: you must provide transforms if you use the --phone-map option." && exit 1; - $cmd JOB=1:$nj $dir/log/fmllr.0.JOB.log \ - ali-to-post "ark:gunzip -c $alidir/ali.JOB.gz|" ark:- \| \ - weight-silence-post $silence_weight $silphonelist $alidir/final.mdl ark:- ark:- \| \ - gmm-est-fmllr --fmllr-update-type=$fmllr_update_type \ - --spk2utt=ark:$sdata/JOB/spk2utt $alidir/final.mdl "$sifeats" \ - ark:- ark:$dir/trans.JOB || exit 1; - fi - feats="$sifeats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark,s,cs:$dir/trans.JOB ark:- ark:- |" - cur_trans_dir=$dir -fi - -if [ $stage -le -4 ] && $train_tree; then - # Get tree stats. - echo "$0: Accumulating tree stats" - $cmd JOB=1:$nj $dir/log/acc_tree.JOB.log \ - acc-tree-stats $context_opts $tree_stats_opts $phone_map_opt --ci-phones=$ciphonelist $alidir/final.mdl "$feats" \ - "ark:gunzip -c $alidir/ali.JOB.gz|" $dir/JOB.treeacc || exit 1; - [ "`ls $dir/*.treeacc | wc -w`" -ne "$nj" ] && echo "$0: Wrong #tree-accs" && exit 1; - $cmd $dir/log/sum_tree_acc.log \ - sum-tree-stats $dir/treeacc $dir/*.treeacc || exit 1; - rm $dir/*.treeacc -fi - -if [ $stage -le -3 ] && $train_tree; then - echo "$0: Getting questions for tree clustering." - # preparing questions, roots file... - cluster-phones --pdf-class-list=$(($num_nonsil_states / 2)) \ - $cluster_phones_opts $context_opts \ - $dir/treeacc $lang/phones/sets.int $dir/questions.int 2>$dir/log/questions.log || exit 1; - cat $lang/phones/extra_questions.int >> $dir/questions.int - compile-questions $context_opts $compile_questions_opts $lang/topo $dir/questions.int $dir/questions.qst 2>$dir/log/compile_questions.log || exit 1; - - echo "$0: Building the tree" - $cmd $dir/log/build_tree.log \ - build-tree $context_opts --verbose=1 --max-leaves=$numleaves \ - --cluster-thresh=$cluster_thresh $dir/treeacc $lang/phones/roots.int \ - $dir/questions.qst $lang/topo $dir/tree || exit 1; -fi - -if [ $stage -le -2 ]; then - echo "$0: Initializing the model" - if $train_tree; then - gmm-init-model --write-occs=$dir/1.occs \ - $dir/tree $dir/treeacc $lang/topo $dir/1.mdl 2> $dir/log/init_model.log || exit 1; - grep 'no stats' $dir/log/init_model.log && echo "This is a bad warning."; - rm $dir/treeacc - else - cp $alidir/tree $dir/ || exit 1; - $cmd JOB=1 $dir/log/init_model.log \ - gmm-init-model-flat $dir/tree $lang/topo $dir/1.mdl \ - "$feats subset-feats ark:- ark:-|" || exit 1; - fi -fi - -if [ $stage -le -1 ]; then - # Convert the alignments. - echo "$0: Converting alignments from $alidir to use current tree" - $cmd JOB=1:$nj $dir/log/convert.JOB.log \ - convert-ali $phone_map_opt $alidir/final.mdl $dir/1.mdl $dir/tree \ - "ark:gunzip -c $alidir/ali.JOB.gz|" "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1; -fi - -[ "$exit_stage" -eq 0 ] && echo "$0: Exiting early: --exit-stage $exit_stage" && exit 0; - -if [ $stage -le 0 ] && [ "$realign_iters" != "" ]; then - echo "$0: Compiling graphs of transcripts" - $cmd JOB=1:$nj $dir/log/compile_graphs.JOB.log \ - compile-train-graphs --read-disambig-syms=$lang/phones/disambig.int $dir/tree $dir/1.mdl $lang/L.fst \ - "ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt < $sdata/JOB/text |" \ - "ark:|gzip -c >$dir/fsts.JOB.gz" || exit 1; -fi - -x=1 -while [ $x -lt $num_iters ]; do - echo Pass $x - if echo $realign_iters | grep -w $x >/dev/null && [ $stage -le $x ]; then - echo Aligning data - mdl="gmm-boost-silence --boost=$boost_silence `cat $lang/phones/optional_silence.csl` $dir/$x.mdl - |" - $cmd JOB=1:$nj $dir/log/align.$x.JOB.log \ - gmm-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam --careful=$careful "$mdl" \ - "ark:gunzip -c $dir/fsts.JOB.gz|" "$feats" \ - "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1; - fi - - if echo $fmllr_iters | grep -w $x >/dev/null; then - if [ $stage -le $x ]; then - echo Estimating fMLLR transforms - # We estimate a transform that's additional to the previous transform; - # we'll compose them. - $cmd JOB=1:$nj $dir/log/fmllr.$x.JOB.log \ - ali-to-post "ark:gunzip -c $dir/ali.JOB.gz|" ark:- \| \ - weight-silence-post $silence_weight $silphonelist $dir/$x.mdl ark:- ark:- \| \ - gmm-est-fmllr --fmllr-update-type=$fmllr_update_type \ - --spk2utt=ark:$sdata/JOB/spk2utt $dir/$x.mdl \ - "$feats" ark:- ark:$dir/tmp_trans.JOB || exit 1; - for n in `seq $nj`; do - ! ( compose-transforms --b-is-affine=true \ - ark:$dir/tmp_trans.$n ark:$cur_trans_dir/trans.$n ark:$dir/composed_trans.$n \ - && mv $dir/composed_trans.$n $dir/trans.$n && \ - rm $dir/tmp_trans.$n ) 2>$dir/log/compose_transforms.$x.log \ - && echo "$0: Error composing transforms" && exit 1; - done - fi - feats="$sifeats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$dir/trans.JOB ark:- ark:- |" - cur_trans_dir=$dir - fi - - if [ $stage -le $x ]; then - $cmd JOB=1:$nj $dir/log/acc.$x.JOB.log \ - gmm-acc-stats-ali $dir/$x.mdl "$feats" \ - "ark,s,cs:gunzip -c $dir/ali.JOB.gz|" $dir/$x.JOB.acc || exit 1; - [ `ls $dir/$x.*.acc | wc -w` -ne "$nj" ] && echo "$0: Wrong #accs" && exit 1; - $cmd $dir/log/update.$x.log \ - gmm-est --power=$power --write-occs=$dir/$[$x+1].occs --mix-up=$numgauss $dir/$x.mdl \ - "gmm-sum-accs - $dir/$x.*.acc |" $dir/$[$x+1].mdl || exit 1; - rm $dir/$x.mdl $dir/$x.*.acc - rm $dir/$x.occs - fi - [ $x -le $max_iter_inc ] && numgauss=$[$numgauss+$incgauss]; - x=$[$x+1]; -done - - -if [ $stage -le $x ]; then - # Accumulate stats for "alignment model"-- this model is - # computed with the speaker-independent features, but matches Gaussian-for-Gaussian - # with the final speaker-adapted model. - $cmd JOB=1:$nj $dir/log/acc_alimdl.JOB.log \ - ali-to-post "ark:gunzip -c $dir/ali.JOB.gz|" ark:- \| \ - gmm-acc-stats-twofeats $dir/$x.mdl "$feats" "$sifeats" \ - ark,s,cs:- $dir/$x.JOB.acc || exit 1; - [ `ls $dir/$x.*.acc | wc -w` -ne "$nj" ] && echo "$0: Wrong #accs" && exit 1; - # Update model. - $cmd $dir/log/est_alimdl.log \ - gmm-est --power=$power --remove-low-count-gaussians=false $dir/$x.mdl \ - "gmm-sum-accs - $dir/$x.*.acc|" $dir/$x.alimdl || exit 1; - rm $dir/$x.*.acc -fi - -rm $dir/final.{mdl,alimdl,occs} 2>/dev/null -ln -s $x.mdl $dir/final.mdl -ln -s $x.occs $dir/final.occs -ln -s $x.alimdl $dir/final.alimdl - - -steps/diagnostic/analyze_alignments.sh --cmd "$cmd" $lang $dir - -utils/summarize_warnings.pl $dir/log -( - echo "$0: Likelihood evolution:" - for x in `seq $[$num_iters-1]`; do - tail -n 30 $dir/log/acc.$x.*.log | awk '/Overall avg like/{l += $(NF-3)*$(NF-1); t += $(NF-1); } - /Overall average logdet/{d += $(NF-3)*$(NF-1); t2 += $(NF-1);} - END{ d /= t2; l /= t; printf("%s ", d+l); } ' - done - echo -) | tee $dir/log/summary.log - - -steps/info/gmm_dir_info.pl $dir - -echo "$0: done training SAT system in $dir" - -exit 0 diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/legacy/__init__.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/legacy/__init__.py deleted file mode 100644 index 9bd5c72b5e9d7f67fb7e4ef10808d7ec08967ff4..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/legacy/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from .block_pair_dataset import BlockPairDataset -from .masked_lm_dataset import MaskedLMDataset -from .masked_lm_dictionary import BertDictionary, MaskedLMDictionary - - -__all__ = [ - "BertDictionary", - "BlockPairDataset", - "MaskedLMDataset", - "MaskedLMDictionary", -] diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/multilingual/multilingual_data_manager.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/multilingual/multilingual_data_manager.py deleted file mode 100644 index 137481b449b9cb5b2b486950c6cea669ac507c48..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/multilingual/multilingual_data_manager.py +++ /dev/null @@ -1,1136 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import itertools -import json -import logging -import math -import os -from collections import OrderedDict, defaultdict -from argparse import ArgumentError - -from fairseq import utils -from fairseq.data import ( - AppendTokenDataset, - ConcatDataset, - Dictionary, - LanguagePairDataset, - PrependTokenDataset, - SampledMultiDataset, - SampledMultiEpochDataset, - StripTokenDataset, - TransformEosLangPairDataset, - TruncateDataset, - data_utils, - indexed_dataset, -) -from fairseq.data.multilingual.multilingual_utils import ( - EncoderLangtok, - LangTokSpec, - LangTokStyle, - augment_dictionary, - get_lang_tok, -) -from fairseq.data.multilingual.sampled_multi_dataset import CollateFormat -from fairseq.file_io import PathManager -from fairseq.utils import FileContentsAction, csv_str_list, eval_str_dict - - -logger = logging.getLogger(__name__) - -SRC_DICT_NAME = 'src' -TGT_DICT_NAME = 'tgt' - - -def _lang_id(dic: Dictionary, lang: str): - """Return language ID index.""" - idx = dic.index(lang) - assert idx != dic.unk_index, "cannot find language ID for lang {}".format(lang) - return idx - - -def load_sampling_weights(from_file): - with open(from_file) as f: - weights = json.load(f) - return weights - - -class MultilingualDatasetManager(object): - def __init__(self, args, lang_pairs, langs, dicts, sampling_method): - super().__init__() - self.args = args - self.seed = args.seed - self.lang_pairs = lang_pairs - self.extra_lang_pairs = ( - list( - {p for _, v in args.extra_lang_pairs.items() for p in v.split(",")} - ) - if args.extra_lang_pairs - else [] - ) - self.src_langs = {p.split("-")[0] for p in args.lang_pairs + self.extra_lang_pairs} - self.tgt_langs = {p.split("-")[1] for p in args.lang_pairs + self.extra_lang_pairs} - self.langs = langs - self.dicts = dicts - self.lang_dict = self.create_lang_dictionary(self.langs) - self.sampling_method = sampling_method - self.sampling_scheduler = None - self._has_sharded_data = False - self._num_shards_dict = {} - self._training_data_sizes = defaultdict(lambda: {}) - - @classmethod - def setup_data_manager(cls, args, lang_pairs, langs, dicts, sampling_method): - return MultilingualDatasetManager( - args, lang_pairs, langs, dicts, sampling_method - ) - - @staticmethod - def add_args(parser): - parser.add_argument( - "data", - help="colon separated path to data directories list, \ - will be iterated upon during epochs in round-robin manner", - action=FileContentsAction, - ) - parser.add_argument( - "--langs", - default=None, - type=csv_str_list, - help="a list of languages comma sperated languages which can appear in lang-pairs; " - "note that the ordering determines language token IDs", - ) - parser.add_argument( - "--lang-dict", - default=None, - type=str, - help="an external file which contains a list of " - "languages which can appear in lang-pairs; " - "note that the ordering determines language token IDs; " - "--langs and --lang-dict are two exclusive options", - ) - parser.add_argument('--source-dict', default=None, type=str, - help='path to source dictionary; if specified it will override per language dictionary loading') - parser.add_argument('--target-dict', default=None, type=str, - help='path to target dictionary; if specified it will override per language dictionary loading') - parser.add_argument( - "--lang-tok-style", - default=LangTokStyle.multilingual.value, - type=str, - choices=[LangTokStyle.multilingual.value, LangTokStyle.mbart.value], - help="language token styles", - ) - - parser.add_argument( - "--load-alignments", - action="store_true", - help="load the binarized alignments", - ) - parser.add_argument( - "--left-pad-source", - default="True", - type=str, - metavar="BOOL", - help="pad the source on the left", - ) - parser.add_argument( - "--left-pad-target", - default="False", - type=str, - metavar="BOOL", - help="pad the target on the left", - ) - try: - parser.add_argument( - "--max-source-positions", - default=1024, - type=int, - metavar="N", - help="max number of tokens in the source sequence", - ) - parser.add_argument( - "--max-target-positions", - default=1024, - type=int, - metavar="N", - help="max number of tokens in the target sequence", - ) - except ArgumentError: - # this might have already been defined. Once we transition this to hydra it should be fine to add it here. - pass - parser.add_argument( - "--upsample-primary", - default=1, - type=int, - help="amount to upsample primary dataset", - ) - parser.add_argument( - "--truncate-source", - action="store_true", - default=False, - help="truncate source to max-source-positions", - ) - parser.add_argument( - "--encoder-langtok", - default=None, - type=str, - choices=[EncoderLangtok.src.value, EncoderLangtok.tgt.value], - metavar="SRCTGT", - help="prepend to the beginning of source sentence the source or target " - "language token. (src/tgt)", - ) - parser.add_argument( - "--decoder-langtok", - action="store_true", - help="prepend to the beginning of target sentence the target language token", - ) - parser.add_argument( - "--lang-tok-replacing-bos-eos", action="store_true", default=False - ) - parser.add_argument( - "--enable-lang-ids", - default=False, - action="store_true", - help="whether to include language IDs in samples", - ) - parser.add_argument( - "--enable-reservsed-directions-shared-datasets", - default=False, - action="store_true", - help="whether to allow datasets be used in reversed directions", - ) - - parser.add_argument( - "--extra-data", - help='a dictionary of data name to this path, \ - e.g. {"mined", path_to_mined_data, "denoised": path_to_denoised_data}', - type=lambda uf: eval_str_dict(uf, type=str), - default=None, - ) - parser.add_argument( - "--extra-lang-pairs", - help='a dictionary of data name to the language pairs they serve, \ - e.g. {"mined": comma-separated-lang-pairs, "denoised": comma-separated-lang-pairs}', - type=lambda uf: eval_str_dict(uf, type=str), - default=None, - ) - parser.add_argument( - "--fixed-dictionary", - help="Fixed dictionary to use with model path", - default=None, - type=str, - ) - parser.add_argument( - "--langtoks-specs", - help='a list of comma separated data types that a set of language tokens to be specialized for, \ - e.g. "main,dae,mined". There will be a set of language tokens added to the vocab to \ - distinguish languages in different training data types. If not specified, default language \ - tokens per languages will be added', - default=LangTokSpec.main.value, - type=csv_str_list, - ) - parser.add_argument( - "--langtoks", - help='a dictionary of how to add language tokens, \ - e.g. {"mined": (None, "tgt"), "mono_dae": ("src.dae", "tgt"), "main": \ - ("src", "tgt")}, or {"mined": ("src.mined", "tgt")}', - default=None, - type=lambda uf: eval_str_dict(uf, type=str), - ) - parser.add_argument( - "--sampling-weights-from-file", - help='a file contain a python dictionary of how to sample data sets, \ - e.g. { "main:en_XX-es_XX": 0.2, "mined:en_XX-pt_XX": 0.5, \ - "mono_dae:es_XX-es_XX: 0.3, "main:en_xx-fr_XX": 0.8 }', - default=None, - type=str, - ) - parser.add_argument( - "--sampling-weights", - help='a dictionary of how to sample data sets, \ - e.g. { "main:en_XX-es_XX": 0.2, "mined:en_XX-pt_XX": 0.5, \ - "mono_dae:es_XX-es_XX: 0.3, "main:en_xx-fr_XX": 0.8 }', - default=None, - type=lambda uf: eval_str_dict(uf, type=str), - ) - parser.add_argument( - "--virtual-epoch-size", - default=None, - type=int, - help="virtual epoch size to speed up data loading", - ) - parser.add_argument( - "--virtual-data-size", - default=None, - type=int, - help="virtual data size of the whole joint dataset to speed" - "up data loading and have specific dynamic sampling strategy interval", - ) - - @classmethod - def load_langs(cls, args, **kwargs): - if args.lang_dict and args.langs: - raise ValueError("--langs and --lang-dict can not both be specified") - if args.lang_dict is None and args.langs is None: - logger.warning( - "External language dictionary is not provided; " - "use lang-pairs to infer the set of supported languages. " - "The language ordering is not stable which might cause " - "misalignment in pretraining and finetuning." - ) - # infer from lang_pairs as it is - langs = list( - {x for lang_pair in args.lang_pairs for x in lang_pair.split("-")} - ) - langs = sorted(langs) - logger.info(f"inferred language list: {langs}") - elif args.lang_dict: - with open( - PathManager.get_local_path(args.lang_dict), "r", encoding="utf-8" - ) as f: - langs = [lang.strip() for lang in f.readlines() if lang.strip()] - logger.info( - f"loaded language list from {args.lang_dict} as they are ordered in file" - ) - elif args.langs: - langs = args.langs - logger.info( - f"parsed the language list as they are ordered in the option: {langs}" - ) - return langs - - def has_sharded_data(self, split): - return self._has_sharded_data and split == getattr( - self.args, "train_subset", None - ) - - def _shared_collater(self): - return not (self.args.extra_data and "mono_dae" in self.args.extra_data) and ( - not self.args.lang_tok_replacing_bos_eos - ) - - def estimate_global_pass_epoch(self, epoch): - if self.args.virtual_epoch_size is None or self.args.virtual_data_size is None: - return None - # one epoch more for remaining data in each shard - virtual_epochs_per_shard = math.ceil( - self.args.virtual_data_size / self.args.virtual_epoch_size - ) - # note that fairseq epoch / shard_epoch starts from 1 - shard_epoch = (epoch - 1) // virtual_epochs_per_shard + 1 - return shard_epoch - - @classmethod - def prepare(cls, load_dictionary, args, **kargs): - args.left_pad_source = utils.eval_bool(args.left_pad_source) - args.left_pad_target = utils.eval_bool(args.left_pad_target) - - if not hasattr(args, "shuffle_instance"): - args.shuffle_instance = False - if args.langtoks is None: - args.langtoks = {} - if "main" not in args.langtoks: - src_langtok_spec = args.encoder_langtok if args.encoder_langtok else None - tgt_langtok_spec = "tgt" if args.decoder_langtok else None - args.langtoks["main"] = (src_langtok_spec, tgt_langtok_spec) - - def check_langs(langs, pairs): - messages = [] - for src, tgt in pairs: - if src not in langs or tgt not in langs: - messages.append( - f"language pair {src}-{tgt} contains languages " - "that are not in the language dictionary" - ) - if len(messages) > 0: - raise ValueError(" ".join(messages) + f"; langs: {langs}") - - if args.lang_pairs is None: - raise ValueError( - "--lang-pairs is required. List all the language pairs in the training objective." - ) - if isinstance(args.lang_pairs, str): - args.lang_pairs = args.lang_pairs.split(",") - if args.source_lang is not None or args.target_lang is not None: - training = False - else: - training = True - language_list = cls.load_langs(args, **kargs) - check_langs( - language_list, - ( - [p.split("-") for p in args.lang_pairs] - if training - else [(args.source_lang, args.target_lang)] - ), - ) - - def load_dictionary_and_postproc(path): - d = load_dictionary(path) - augment_dictionary( - dictionary=d, - language_list=language_list, - lang_tok_style=args.lang_tok_style, - langtoks_specs=args.langtoks_specs, - extra_data=args.extra_data, - ) - return d - - dicts = cls.load_all_dictionaries(args, language_list, load_dictionary_and_postproc, training) - return language_list, dicts, training - - @classmethod - def load_all_dictionaries(cls, args, language_list, load_dictionary, training): - dicts = OrderedDict() - if args.source_dict is not None: - dicts[SRC_DICT_NAME] = load_dictionary(args.source_dict) - if args.target_dict is not None: - dicts[TGT_DICT_NAME] = load_dictionary(args.target_dict) - - if training: - extra_lang_pairs = ( - list( - {p for _, v in args.extra_lang_pairs.items() for p in v.split(",")} - ) - if args.extra_lang_pairs - else [] - ) - src_langs_to_load_dicts = sorted( - {p.split("-")[0] for p in (args.lang_pairs + extra_lang_pairs)} - ) - tgt_langs_to_load_dicts = sorted( - {p.split("-")[1] for p in (args.lang_pairs + extra_lang_pairs)} - ) - else: - src_langs_to_load_dicts = [args.source_lang] - tgt_langs_to_load_dicts = [args.target_lang] - - paths = utils.split_paths(args.data) - assert len(paths) > 0 - - def load_dicts(langs_to_load_dicts): - for lang in langs_to_load_dicts: - dicts[lang] = load_dictionary( - os.path.join(paths[0], "dict.{}.txt".format(lang)) - ) - if len(dicts) > 0: - dict0 = next(iter(dicts.values())) - assert dicts[lang].pad() == dict0.pad() - assert dicts[lang].eos() == dict0.eos() - assert dicts[lang].unk() == dict0.unk() - logger.info("[{}] dictionary: {} types".format(lang, len(dicts[lang]))) - - if args.fixed_dictionary is not None: - fixed_dict = load_dictionary(args.fixed_dictionary) - dicts = {lang: fixed_dict for lang in src_langs_to_load_dicts + tgt_langs_to_load_dicts} - else: - if args.source_dict is None: - load_dicts(src_langs_to_load_dicts) - if args.target_dict is None: - load_dicts(tgt_langs_to_load_dicts) - return dicts - - def get_source_dictionary(self, lang): - if self.args.source_dict is not None: - return self.dicts[SRC_DICT_NAME] - else: - return self.dicts[lang] - - def get_target_dictionary(self, lang): - if self.args.target_dict is not None: - return self.dicts[TGT_DICT_NAME] - else: - return self.dicts[lang] - - @classmethod - def create_lang_dictionary(cls, langs): - unk = "" - # hack to remove symbols other than unk as they are not needed by lang dict - lang_dict = Dictionary(pad=unk, eos=unk, unk=unk, bos=unk) - for lang in langs: - lang_dict.add_symbol(lang) - return lang_dict - - @classmethod - def get_langtok_index(cls, lang_tok, dic): - idx = dic.index(lang_tok) - assert ( - idx != dic.unk_index - ), "cannot find language token {} in the dictionary".format(lang_tok) - return idx - - def get_encoder_langtok(self, src_lang, tgt_lang, spec=None): - if spec is None: - return None - if spec and spec.startswith("src"): - if src_lang is None: - return None - langtok = get_lang_tok( - lang=src_lang, lang_tok_style=self.args.lang_tok_style, spec=spec - ) - else: - if tgt_lang is None: - return None - langtok = get_lang_tok( - lang=tgt_lang, lang_tok_style=self.args.lang_tok_style, spec=spec - ) - return self.get_langtok_index( - langtok, self.get_source_dictionary(src_lang) if src_lang else self.get_target_dictionary(tgt_lang) - ) - - def get_decoder_langtok(self, tgt_lang, spec=None): - if spec is None: - return None - langtok = get_lang_tok( - lang=tgt_lang, lang_tok_style=self.args.lang_tok_style, spec=spec - ) - return self.get_langtok_index(langtok, self.get_target_dictionary(tgt_lang)) - - @classmethod - def load_data(cls, path, vdict, impl): - dataset = data_utils.load_indexed_dataset(path, vdict, impl) - return dataset - - @classmethod - def split_exists(cls, split, src, tgt, lang, data_path, dataset_impl): - filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang)) - return indexed_dataset.dataset_exists(filename, impl=dataset_impl) - - def load_lang_dataset( - self, - data_path, - split, - src, - src_dict, - tgt, - tgt_dict, - combine, - dataset_impl, - upsample_primary, - max_source_positions, - prepend_bos=False, - load_alignments=False, - truncate_source=False, - ): - - src_datasets = [] - tgt_datasets = [] - - for k in itertools.count(): - split_k = split + (str(k) if k > 0 else "") - - # infer langcode - if self.split_exists(split_k, src, tgt, src, data_path, dataset_impl): - prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt)) - elif self.split_exists(split_k, tgt, src, src, data_path, dataset_impl): - prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src)) - else: - if k > 0: - break - else: - logger.error( - f"Dataset not found: {data_path}, {split_k}, {src}, {tgt}" - ) - raise FileNotFoundError( - "Dataset not found: {} ({})".format(split, data_path) - ) - - src_dataset = self.load_data(prefix + src, src_dict, dataset_impl) - if truncate_source: - src_dataset = AppendTokenDataset( - TruncateDataset( - StripTokenDataset(src_dataset, src_dict.eos()), - max_source_positions - 1, - ), - src_dict.eos(), - ) - src_datasets.append(src_dataset) - tgt_datasets.append(self.load_data(prefix + tgt, tgt_dict, dataset_impl)) - - logger.info( - "{} {} {}-{} {} examples".format( - data_path, split_k, src, tgt, len(src_datasets[-1]) - ) - ) - - if not combine: - break - - assert len(src_datasets) == len(tgt_datasets) - - if len(src_datasets) == 1: - src_dataset, tgt_dataset = src_datasets[0], tgt_datasets[0] - else: - sample_ratios = [1] * len(src_datasets) - sample_ratios[0] = upsample_primary - src_dataset = ConcatDataset(src_datasets, sample_ratios) - tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios) - - if prepend_bos: - assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index") - src_dataset = PrependTokenDataset(src_dataset, src_dict.bos()) - tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos()) - - align_dataset = None - if load_alignments: - align_path = os.path.join( - data_path, "{}.align.{}-{}".format(split, src, tgt) - ) - if indexed_dataset.dataset_exists(align_path, impl=dataset_impl): - align_dataset = data_utils.load_indexed_dataset( - align_path, None, dataset_impl - ) - - return src_dataset, tgt_dataset, align_dataset - - def load_langpair_dataset( - self, - data_path, - split, - src, - src_dict, - tgt, - tgt_dict, - combine, - dataset_impl, - upsample_primary, - left_pad_source, - left_pad_target, - max_source_positions, - max_target_positions, - prepend_bos=False, - load_alignments=False, - truncate_source=False, - src_dataset_transform_func=lambda dataset: dataset, - tgt_dataset_transform_func=lambda dataset: dataset, - src_lang_id=None, - tgt_lang_id=None, - langpairs_sharing_datasets=None, - ): - norm_direction = "-".join(sorted([src, tgt])) - if langpairs_sharing_datasets is not None: - src_dataset = langpairs_sharing_datasets.get( - (data_path, split, norm_direction, src), "NotInCache" - ) - tgt_dataset = langpairs_sharing_datasets.get( - (data_path, split, norm_direction, tgt), "NotInCache" - ) - align_dataset = langpairs_sharing_datasets.get( - (data_path, split, norm_direction, src, tgt), "NotInCache" - ) - - # a hack: any one is not in cache, we need to reload them - if ( - langpairs_sharing_datasets is None - or src_dataset == "NotInCache" - or tgt_dataset == "NotInCache" - or align_dataset == "NotInCache" - or split != getattr(self.args, "train_subset", None) - ): - # source and target datasets can be reused in reversed directions to save memory - # reversed directions of valid and test data will not share source and target datasets - src_dataset, tgt_dataset, align_dataset = self.load_lang_dataset( - data_path, - split, - src, - src_dict, - tgt, - tgt_dict, - combine, - dataset_impl, - upsample_primary, - max_source_positions=max_source_positions, - prepend_bos=prepend_bos, - load_alignments=load_alignments, - truncate_source=truncate_source, - ) - src_dataset = src_dataset_transform_func(src_dataset) - tgt_dataset = tgt_dataset_transform_func(tgt_dataset) - if langpairs_sharing_datasets is not None: - langpairs_sharing_datasets[ - (data_path, split, norm_direction, src) - ] = src_dataset - langpairs_sharing_datasets[ - (data_path, split, norm_direction, tgt) - ] = tgt_dataset - langpairs_sharing_datasets[ - (data_path, split, norm_direction, src, tgt) - ] = align_dataset - if align_dataset is None: - # no align data so flag the reverse direction as well in sharing - langpairs_sharing_datasets[ - (data_path, split, norm_direction, tgt, src) - ] = align_dataset - else: - logger.info( - f"Reusing source and target datasets of [{split}] {tgt}-{src} for reversed direction: " - f"[{split}] {src}-{tgt}: src length={len(src_dataset)}; tgt length={len(tgt_dataset)}" - ) - - return LanguagePairDataset( - src_dataset, - src_dataset.sizes, - src_dict, - tgt_dataset, - tgt_dataset.sizes if tgt_dataset is not None else None, - tgt_dict, - left_pad_source=left_pad_source, - left_pad_target=left_pad_target, - align_dataset=align_dataset, - src_lang_id=src_lang_id, - tgt_lang_id=tgt_lang_id, - ) - - def src_dataset_tranform_func(self, src_lang, tgt_lang, dataset, spec=None): - if self.args.lang_tok_replacing_bos_eos: - # it is handled by self.alter_dataset_langtok - # TODO: Unifiy with alter_dataset_langtok - return dataset - if spec is None: - return dataset - tok = self.get_encoder_langtok(src_lang, tgt_lang, spec) - if tok: - return PrependTokenDataset(dataset, tok) - return dataset - - def tgt_dataset_tranform_func(self, source_lang, target_lang, dataset, spec=None): - if dataset is None: - # note that target dataset can be None during inference time - return None - if self.args.lang_tok_replacing_bos_eos: - # TODO: Unifiy with alter_dataset_langtok - # It is handled by self.alter_dataset_langtok. - # The complication in self.alter_dataset_langtok - # makes a unified framework difficult. - return dataset - # if not self.args.decoder_langtok: - if not spec: - return dataset - tok = self.get_decoder_langtok(target_lang, spec) - if tok: - return PrependTokenDataset(dataset, tok) - return dataset - - def alter_dataset_langtok( - self, - lang_pair_dataset, - src_eos=None, - src_lang=None, - tgt_eos=None, - tgt_lang=None, - src_langtok_spec=None, - tgt_langtok_spec=None, - ): - if src_langtok_spec is None and tgt_langtok_spec is None: - return lang_pair_dataset - - new_src_eos = None - if ( - src_langtok_spec is not None - and src_eos is not None - and (src_lang is not None or tgt_lang is not None) - ): - new_src_eos = self.get_encoder_langtok(src_lang, tgt_lang, src_langtok_spec) - else: - src_eos = None - - new_tgt_bos = None - if tgt_langtok_spec and tgt_eos is not None and tgt_lang is not None: - new_tgt_bos = self.get_decoder_langtok(tgt_lang, tgt_langtok_spec) - else: - tgt_eos = None - - return TransformEosLangPairDataset( - lang_pair_dataset, - src_eos=src_eos, - new_src_eos=new_src_eos, - tgt_bos=tgt_eos, - new_tgt_bos=new_tgt_bos, - ) - - def load_a_dataset( - self, - split, - data_path, - src, - src_dict, - tgt, - tgt_dict, - combine, - prepend_bos=False, - langpairs_sharing_datasets=None, - data_category=None, - **extra_kwargs, - ): - dataset_impl = self.args.dataset_impl - upsample_primary = self.args.upsample_primary - left_pad_source = self.args.left_pad_source - left_pad_target = self.args.left_pad_target - max_source_positions = self.args.max_source_positions - max_target_positions = self.args.max_target_positions - load_alignments = self.args.load_alignments - truncate_source = self.args.truncate_source - src_dataset_transform_func = self.src_dataset_tranform_func - tgt_dataset_transform_func = self.tgt_dataset_tranform_func - enable_lang_ids = self.args.enable_lang_ids - lang_dictionary = self.lang_dict - src_langtok_spec, tgt_langtok_spec = extra_kwargs["langtok_spec"] - - src_langtok = self.get_encoder_langtok(src, tgt, src_langtok_spec) - tgt_langtok = self.get_decoder_langtok(tgt, tgt_langtok_spec) - logger.info( - f"{data_category}:{src}-{tgt} src_langtok: {src_langtok}; tgt_langtok: {tgt_langtok}" - ) - - langpair_ds = self.load_langpair_dataset( - data_path, - split, - src, - src_dict, - tgt, - tgt_dict, - combine, - dataset_impl, - upsample_primary, - left_pad_source, - left_pad_target, - max_source_positions, - max_target_positions, - prepend_bos, - load_alignments, - truncate_source, - src_dataset_transform_func=lambda dataset: src_dataset_transform_func( - src, tgt, dataset, src_langtok_spec - ), - tgt_dataset_transform_func=lambda dataset: tgt_dataset_transform_func( - src, tgt, dataset, tgt_langtok_spec - ), - src_lang_id=_lang_id(lang_dictionary, src) - if enable_lang_ids and lang_dictionary is not None - else None, - tgt_lang_id=_lang_id(lang_dictionary, tgt) - if enable_lang_ids and lang_dictionary is not None - else None, - langpairs_sharing_datasets=langpairs_sharing_datasets, - ) - # TODO: handle modified lang toks for mined data and dae data - if self.args.lang_tok_replacing_bos_eos: - ds = self.alter_dataset_langtok( - langpair_ds, - src_eos=self.get_source_dictionary(src).eos() if src else self.get_target_dictionary(tgt).eos(), - src_lang=src, - tgt_eos=self.get_target_dictionary(tgt).eos(), - tgt_lang=tgt, - src_langtok_spec=src_langtok_spec, - tgt_langtok_spec=tgt_langtok_spec, - ) - else: - ds = langpair_ds - return ds - - def load_split_langpair_datasets(self, split, data_param_list): - datasets = [] - langpairs_sharing_datasets = ( - {} if self.args.enable_reservsed_directions_shared_datasets else None - ) - for param in data_param_list: - ds = self.load_a_dataset( - split=split, - langpairs_sharing_datasets=langpairs_sharing_datasets, - **param, - ) - datasets.append(ds) - return datasets - - def get_data_paths_and_lang_pairs(self, split): - datapaths = {"main": self.args.data} - lang_pairs = {"main": self.lang_pairs} - if split == getattr(self.args, "train_subset", None): - # only training data can have extra data and extra language pairs - if self.args.extra_data: - extra_datapaths = self.args.extra_data - datapaths.update(extra_datapaths) - if self.args.extra_lang_pairs: - extra_lang_pairs = { - k: v.split(",") for k, v in self.args.extra_lang_pairs.items() - } - lang_pairs.update(extra_lang_pairs) - return datapaths, lang_pairs - - @classmethod - def get_dataset_key(cls, data_category, src, tgt): - return f"{data_category}:{src}-{tgt}" - - @classmethod - def _get_shard_num_dict(cls, split, paths): - shards = defaultdict(int) - for path in paths: - files = PathManager.ls(path) - directions = set() - for f in files: - if f.startswith(split) and f.endswith(".idx"): - # idx files of the form "{split}.{src}-{tgt}.{lang}.idx" - direction = f.split(".")[-3] - directions.add(direction) - for direction in directions: - shards[direction] += 1 - return shards - - def get_split_num_data_shards(self, split): - if split in self._num_shards_dict: - return self._num_shards_dict[split] - num_shards_dict = {} - data_paths, lang_pairs = self.get_data_paths_and_lang_pairs(split) - - for data_category, paths in data_paths.items(): - if data_category not in lang_pairs: - continue - paths = utils.split_paths(paths) - shards_dict = self._get_shard_num_dict(split, paths) - lang_dirs = [ - lang_pair.split("-") for lang_pair in lang_pairs[data_category] - ] - lang_dirs = [x if len(x) > 1 else (x[0], x[0]) for x in lang_dirs] - for src, tgt in lang_dirs: - key = self.get_dataset_key(data_category, src, tgt) - if "mono_" in data_category: - # monolingual data requires tgt only - assert src is None or src == tgt, ( - f"error: src={src}, " - "tgt={tgt} for data_category={data_category}" - ) - num_shards_dict[key] = shards_dict[tgt] - else: - if f"{src}-{tgt}" in shards_dict: - num_shards_dict[key] = shards_dict[f"{src}-{tgt}"] - elif f"{tgt}-{src}" in shards_dict: - # follow the fairseq tradition to use reversed direction data if it is not available - num_shards_dict[key] = shards_dict[f"{tgt}-{src}"] - self._num_shards_dict[split] = num_shards_dict - logger.info(f"[{split}] num of shards: {num_shards_dict}") - return num_shards_dict - - @classmethod - def get_shard_id(cls, num_shards, epoch, shard_epoch=None): - shard = epoch if shard_epoch is None else shard_epoch - shard = (shard - 1) % num_shards - return shard - - def get_split_data_path(self, paths, epoch, shard_epoch, num_shards): - path = paths[self.get_shard_id(num_shards, epoch, shard_epoch)] - return path - - def get_split_data_param_list(self, split, epoch, shard_epoch=None): - # TODO: to extend with extra datasets and keys and loop over different shard data paths - param_list = [] - data_paths, lang_pairs = self.get_data_paths_and_lang_pairs(split) - logger.info(f"langtoks settings: {self.args.langtoks}") - split_num_shards_dict = self.get_split_num_data_shards(split) - for data_category, paths in data_paths.items(): - if data_category not in lang_pairs: - continue - paths = utils.split_paths(paths) - assert len(paths) > 0 - if len(paths) > 1: - self._has_sharded_data = True - if split != getattr(self.args, "train_subset", None): - # if not training data set, use the first shard for valid and test - paths = paths[:1] - - if data_category in self.args.langtoks: - lang_tok_spec = self.args.langtoks[data_category] - else: - # default to None - lang_tok_spec = (None, None) - - # infer langcode - lang_dirs = [ - lang_pair.split("-") for lang_pair in lang_pairs[data_category] - ] - lang_dirs = [x if len(x) > 1 else (x[0], x[0]) for x in lang_dirs] - for src, tgt in lang_dirs: - assert src is not None or data_category == "mono_dae", ( - f"error: src={src}, " "tgt={tgt} for data_category={data_category}" - ) - # logger.info(f"preparing param for {data_category}: {src} - {tgt}") - key = self.get_dataset_key(data_category, src, tgt) - data_path = self.get_split_data_path( - paths, epoch, shard_epoch, split_num_shards_dict[key] - ) - param_list.append( - { - "key": key, - "data_path": data_path, - "split": split, - "src": src, - "src_dict": self.get_source_dictionary(src) - if src and data_category != "mono_dae" - else None, - "tgt": tgt, - "tgt_dict": self.get_target_dictionary(tgt), - "data_category": data_category, - "langtok_spec": lang_tok_spec, - } - ) - return param_list - - def get_train_dataset_sizes( - self, data_param_list, datasets, epoch, shard_epoch=None - ): - num_shards = [ - self.get_split_num_data_shards(param["split"])[param["key"]] - for param in data_param_list - ] - data_sizes = [] - for (key, d), num_shard in zip(datasets, num_shards): - my_data_sizes = self._training_data_sizes[key] - shard_ind = self.get_shard_id(num_shard, epoch, shard_epoch) - if shard_ind not in my_data_sizes: - my_data_sizes[shard_ind] = len(d) - known_size = max(my_data_sizes.values()) - data_sizes.append( - # If we don't know the data size of the shard yet, - # use the the max known data size to approximate. - # Note that we preprocess shards by a designated shard size - # and put any remaining data at the end into the last shard so - # the max shard size approximation is almost correct before loading - # the last shard; after loading the last shard, it will have the - # exact data sizes of the whole data size. - (key, sum(my_data_sizes.get(i, known_size) for i in range(num_shard))) - ) - logger.info( - f"estimated total data sizes of all shards used in sampling ratios: {data_sizes}. " - "Note that if the data a shard has not been loaded yet, use the max known data size to approximate" - ) - return [s for _, s in data_sizes] - - def get_train_sampling_ratios( - self, data_param_list, datasets, epoch=1, shard_epoch=None - ): - data_sizes = self.get_train_dataset_sizes( - data_param_list, datasets, epoch, shard_epoch - ) - sampling_func = self.sampling_method.sampling_method_selector() - sample_ratios = sampling_func(data_sizes) if sampling_func is not None else None - return sample_ratios - - def get_sampling_ratios(self, data_param_list, datasets, epoch, shard_epoch=None): - if self.args.sampling_weights_from_file: - weights = load_sampling_weights(self.args.sampling_weights_from_file) - sample_ratios = [weights[k] for k, _ in datasets] - logger.info( - "| ignoring --sampling-weights when loadding sampling weights " - f"from file {self.args.sampling_weights_from_file}" - ) - elif self.args.sampling_weights: - sample_ratios = [self.args.sampling_weights[k] for k, _ in datasets] - else: - sample_ratios = self.get_train_sampling_ratios( - data_param_list, datasets, epoch, shard_epoch - ) - - if sample_ratios is not None: - logger.info( - "| Upsample ratios: {}".format( - list(zip(map(lambda x: x["key"], data_param_list), sample_ratios)) - ) - ) - assert len(sample_ratios) == len(datasets) - return sample_ratios - - def load_split_datasets( - self, split, training, epoch=1, combine=False, shard_epoch=None, **kwargs - ): - data_param_list = self.get_split_data_param_list( - split, epoch, shard_epoch=shard_epoch - ) - langpairs_sharing_datasets = ( - {} if self.args.enable_reservsed_directions_shared_datasets else None - ) - datasets = [ - ( - param["key"], - self.load_a_dataset( - combine=combine, - langpairs_sharing_datasets=langpairs_sharing_datasets, - **param, - ), - ) - for param in data_param_list - ] - return datasets, data_param_list - - def load_into_concat_dataset(self, split, datasets, data_param_list): - if self.args.lang_tok_replacing_bos_eos: - # TODO: to investigate why TransformEosLangPairDataset doesn't work with ConcatDataset - return SampledMultiDataset( - OrderedDict(datasets), - sampling_ratios=None, - eval_key=None, - collate_format=CollateFormat.single, - virtual_size=None, - split=split, - ) - return ConcatDataset([d for _, d in datasets]) - - def load_sampled_multi_epoch_dataset( - self, split, training, epoch=0, combine=False, shard_epoch=None, **kwargs - ): - datasets, data_param_list = self.load_split_datasets( - split, training, epoch, combine, shard_epoch=shard_epoch, **kwargs - ) - if training and split == getattr(self.args, "train_subset", None): - sample_ratios = self.get_sampling_ratios(data_param_list, datasets, epoch) - return SampledMultiEpochDataset( - OrderedDict(datasets), - epoch=epoch, - shard_epoch=shard_epoch, - # valid and test datasets will be degenerate to concating datasets: - sampling_ratios=sample_ratios, - eval_key=None, - collate_format=CollateFormat.single, - virtual_size=self.args.virtual_data_size, - split=split, - virtual_epoch_size=self.args.virtual_epoch_size, - # if not using lang_tok altering, simplified to use the same collater - shared_collater=self._shared_collater(), - ) - else: - return self.load_into_concat_dataset(split, datasets, data_param_list) - - def load_sampled_multi_dataset( - self, split, training, epoch=0, combine=False, shard_epoch=None, **kwargs - ): - datasets, data_param_list = self.load_split_datasets( - split, training, epoch, combine, shard_epoch=shard_epoch, **kwargs - ) - if training and split == getattr(self.args, "train_subset", None): - sample_ratios = self.get_sampling_ratios(data_param_list, datasets, epoch) - return SampledMultiDataset( - OrderedDict(datasets), - epoch=epoch, - # valid and test datasets will be degerate to concating datasets: - sampling_ratios=sample_ratios, - eval_key=None, - collate_format=CollateFormat.single, - virtual_size=self.args.virtual_data_size, - split=split, - # if not using lang_tok altering, simplified to use the same collater - shared_collater=self._shared_collater(), - ) - else: - return self.load_into_concat_dataset(split, datasets, data_param_list) - - def load_dataset( - self, split, training, epoch=0, combine=False, shard_epoch=None, **kwargs - ): - if self.args.virtual_epoch_size is None: - return self.load_sampled_multi_dataset( - split, training, epoch, combine, shard_epoch, **kwargs - ) - else: - return self.load_sampled_multi_epoch_dataset( - split, training, epoch, combine, shard_epoch, **kwargs - ) diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/speech_to_text/utils.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/speech_to_text/utils.py deleted file mode 100644 index 168b8bf13b0e734eee3f6989ff0f28a016a09c2b..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/speech_to_text/utils.py +++ /dev/null @@ -1,563 +0,0 @@ -# Copyright (c) 2017-present, Facebook, Inc. -# All rights reserved. -# -# This source code is licensed under the license found in the LICENSE file in -# the root directory of this source tree. An additional grant of patent rights -# can be found in the PATENTS file in the same directory. - - -import logging -from collections.abc import Iterable -from itertools import repeat -from typing import List, Optional, Tuple - -import torch -from torch import Tensor - - -# ------------------------------------------------------------------------------ -# assert_equal() -# ------------------------------------------------------------------------------ - - -def assert_equal(value1, value2, name1=None, name2=None): - """Asserts two values are equal otherwise raise an error.""" - - str_name1 = "" if name1 is None else "{} ".format(name1) - str_name2 = "" if name2 is None else "{} ".format(name2) - if value1 != value2: - str_value1 = "{}" if name1 is None else "({})" - str_value1 = str_value1.format(value1) - str_value2 = "{}" if name2 is None else "({})" - str_value2 = str_value2.format(value2) - raise ValueError( - "Expected {}{} == {}{}".format(str_name1, str_value1, str_name2, str_value2) - ) - - -def fill_config(config, key, value): - if value is not None: - if key not in config or config[key] is None: - config[key] = value - assert_equal(value, config[key], "value", f'config["{key}"]') - - -# ------------------------------------------------------------------------------ -# check_and_return_expected() -# ------------------------------------------------------------------------------ - - -def check_and_return_expected(value, undefined_value, expected_value, name=None): - """ - Return the expected value while checking if the given value is undefined or - equal to the expected value. - """ - if (undefined_value is None and value is None) or (undefined_value == value): - return expected_value - if value != expected_value: - str_name = "" if name is None else "{} ".format(name) - str_value = "{}" if name is None else "({})" - str_value = str_value.format(value) - raise ValueError( - "Expected {}{} == {}".format(str_name, str_value, expected_value) - ) - return expected_value - - -# ------------------------------------------------------------------------------ -# get_time_axis() -# ------------------------------------------------------------------------------ - - -def get_time_axis(layout): - """ - Extract the time axis from the layout, for example for breaking sequence into - segments. - """ - if layout in ["TB", "TBD"]: - return 0 - if layout in ["BT", "BTD"]: - return 1 - if layout in ["BCTD"]: - return 2 - raise ValueError("Unsupported layout = {}".format(layout)) - - -# ------------------------------------------------------------------------------ -# get_batch_axis() -# ------------------------------------------------------------------------------ - - -def get_batch_axis(layout): - """ - Extract the batch axis from the layout - """ - if layout in ["TB", "TBD"]: - return 1 - if layout in ["BT", "BTD", "BCTD"]: - return 0 - raise ValueError("Unsupported layout = {}".format(layout)) - - -# ------------------------------------------------------------------------------ -# monotonically_increasing_and_bounded() -# ------------------------------------------------------------------------------ - - -def monotonically_increasing_and_bounded(iterable, min=None, max=None): - """ - Check if the elements in the given iterable are monotonically increasing and - bounded by upper/lower bounds. - """ - if not isinstance(iterable, Iterable): - raise TypeError( - "Expected iterable to be of type Iterable, got ({})".format( - iterable.__class__.__name__ - ) - ) - for i in range(len(iterable)): - if min is not None and iterable[i] < min: - return False - if max is not None and iterable[i] > max: - return False - if i > 0 and iterable[i] <= iterable[i - 1]: - return False - return True - - -# ------------------------------------------------------------------------------ -# to_pair() -# ------------------------------------------------------------------------------ - - -def to_pair(value, name): - """Make a pair (of type tuple) of given value.""" - if isinstance(value, Iterable): - if len(value) != 2: - raise ValueError( - "Expected `{}` to have exactly 2 elements, got: ({})".format( - name, value - ) - ) - return value - return tuple(repeat(value, 2)) - - -# ------------------------------------------------------------------------------ -# infer_conv_output_attrs() -# ------------------------------------------------------------------------------ - - -# TODO(cfyeh): figure out if we can get `output_dim` without calling the module. -def infer_conv_output_attrs( - module, input_channels, input_dim, batch_size=1, max_length=8 -): - """Get output attributes of a module with input.""" - input = torch.randn(batch_size, input_channels, max_length, input_dim) - output = module(input) - output_channels = output.shape[1] - output_dim = output.shape[-1] - return output_channels, output_dim - - -# ------------------------------------------------------------------------------ -# NoOp -# ------------------------------------------------------------------------------ - - -class NoOp(torch.nn.Module): - """ - NoOp simply passes the input as the output. - """ - - def __init__(self): - super().__init__() - - def forward(self, input: Tensor) -> Tensor: - return input - - -# ------------------------------------------------------------------------------ -# Permute: a torch.nn.Module applies permutation on the input tensor. -# ------------------------------------------------------------------------------ - - -class Permute(torch.nn.Module): - def __init__(self, dims): - super().__init__() - self.dims = dims - - def forward(self, input: Tensor) -> Tensor: - return input.permute(self.dims).contiguous() - - -# ------------------------------------------------------------------------------ -# lengths_to_padding_mask() -# ------------------------------------------------------------------------------ - - -def lengths_to_padding_mask(lengths: Tensor) -> Tensor: - """Convert lengths of shape (B, ) to padding mask.""" - batch_size = lengths.shape[0] - max_length = int(torch.max(lengths).item()) - padding_mask = torch.arange( # [0, ..., T-1] - max_length, device=lengths.device, dtype=lengths.dtype - ).expand(batch_size, max_length) >= lengths.unsqueeze(1) - - return padding_mask - - -# ------------------------------------------------------------------------------ -# lengths_to_attention_mask() -# ------------------------------------------------------------------------------ - - -def lengths_to_attention_mask( - lengths: Tensor, - left_context: Optional[int] = None, - right_context: Optional[int] = None, -) -> Optional[Tensor]: - """ - Generate attention mask based on (lengths, left_context, right_context). - left_context is None means unlimited left context. - right_context is None means unlimited right context. - """ - - if left_context is None and right_context is None: - return None - - max_length = int(torch.max(lengths).item()) - - # For example, with `max_length` == 5, - # indices = tensor([ - # [ 0, 1, 2, 3, 4, 5], - # [-1, 0, 1, 2, 3, 4], - # [-2, -1, 0, 1, 2, 3], - # [-3, -2, -1, 0, 1, 2], - # [-4, -3, -2, -1, 0, 1], - # [-5, -4, -3, -2, -1, 0], - # ]) - - # In some cases the second torch.arange is created on cpu which causes a - # failure. Adding the device option to guard against it. - indices = torch.arange( - max_length, device=lengths.device, dtype=lengths.dtype - ).expand(max_length, max_length) - torch.arange( - max_length, device=lengths.device - ).view( - max_length, -1 - ) - - # For example, with `max_length` == 5, - # bool_mask = tensor([ - # [True, True, True, True, True], - # [True, True, True, True, True], - # [True, True, True, True, True], - # [True, True, True, True, True], - # [True, True, True, True, True], - # ]) - bool_mask = ( - torch.tensor([True]).to(device=lengths.device).expand(max_length, max_length) - ) - - # For example, with `max_length` == 5, left_context == 2 - # left_mask = tensor([ - # [ True, True, True, True, True], - # [ True, True, True, True, True], - # [ True, True, True, True, True], - # [False, True, True, True, True], - # [False, False, True, True, True], - # ]) - if left_context is not None: - left_mask = indices >= -left_context - bool_mask = bool_mask & left_mask - - # For example, with `max_length` == 5, right_context == 1 - # right_mask = tensor([ - # [True, True, False, False, False], - # [True, True, True, False, False], - # [True, True, True, True, False], - # [True, True, True, True, True], - # [True, True, True, True, True], - # ]) - if right_context is not None: - right_mask = indices <= right_context - bool_mask = bool_mask & right_mask - - bool_mask = (~bool_mask).to(device=lengths.device) - return bool_mask - - -# ------------------------------------------------------------------------------ -# infer_output_norm() -# ------------------------------------------------------------------------------ - - -def infer_output_norm(module, output_norm=None): - """ - Infer the output norm (string and module) needed on the module gvien desired - output normalization. - """ - if output_norm == module.output_norm(): - # output_norm already matches module.output_norm(). - return (None, NoOp()) - - if output_norm is None and module.output_norm() is not None: - logger = logging.getLogger("infer_output_norm()") - logger.warning( - "trying to set output_norm ({}) ".format(output_norm) - + "but got module.output_norm() ({}), ".format(module.output_norm()) - + "the combined output_norm() will be ({})".format(module.output_norm()) - ) - return (None, NoOp()) - - if output_norm == "log_softmax": - if module.output_norm() is not None: - raise ValueError( - "incompatible output_norm ({}) ".format(output_norm) - + "and module.output_norm() ({})".format(module.output_norm()) - ) - else: - return ("log_softmax", torch.nn.LogSoftmax(dim=-1)) - - if output_norm == "softmax": - if module.output_norm() is not None: - raise ValueError( - "incompatible output_norm ({}) ".format(output_norm) - + "and module.output_norm() ({})".format(module.output_norm()) - ) - else: - return ("softmax", torch.nn.Softmax(dim=-1)) - - raise ValueError( - "output_norm ({}) not in ".format(output_norm) - + "supported list = [None, softmax, log_softmax]" - ) - - -# ------------------------------------------------------------------------------ -# infer_channels_from_layout() -# ------------------------------------------------------------------------------ - - -def infer_channels_from_layout(layout, channels): - """Extract the number of channels from the layout.""" - if layout in ("TBD", "BTD"): - if channels is not None and channels != 1: - raise ValueError( - "Expected channels ({}) to be 1 for layout = {}".format( - channels, layout - ) - ) - if channels is None: - return 1 - return channels - - -# ------------------------------------------------------------------------------ -# pad_sequence() -# ------------------------------------------------------------------------------ - - -@torch.jit.export -def pad_sequence( - sequence: Tensor, - time_axis: int, - extra_left_context: int = 0, - extra_right_context: int = 0, -) -> Tensor: - """Pad extra left/right contexts to the sequence.""" - - if extra_left_context == 0 and extra_right_context == 0: - return sequence - - tensors_to_concat = [] - - if extra_left_context: - size = (extra_left_context,) - fill_value = 0 - indices = torch.full( - size=size, - fill_value=fill_value, - dtype=torch.long, - device=sequence.device, - ) - left_padding = torch.index_select(sequence, time_axis, indices) - tensors_to_concat.append(left_padding) - - tensors_to_concat.append(sequence) - - # NOTE(cfyeh): for efficiency reason we pad 0 instead of the last frame for - # extra right contexts. - if extra_right_context: - size = list(sequence.shape) - size[time_axis] = extra_right_context - right_padding = torch.zeros(size, dtype=sequence.dtype, device=sequence.device) - tensors_to_concat.append(right_padding) - - padded_sequence = torch.cat(tensors_to_concat, dim=time_axis) - return padded_sequence - - -# ------------------------------------------------------------------------------ -# sequence_to_segments() -# ------------------------------------------------------------------------------ - - -@torch.jit.export -def sequence_to_segments( - sequence: Tensor, - time_axis: int, - lengths: Tensor, - segment_size: Optional[int] = None, - extra_left_context: int = 0, - extra_right_context: int = 0, -) -> List[Tuple[Tensor, Tensor]]: - """Breaks sequence into segments.""" - - sequence = pad_sequence( - sequence=sequence, - time_axis=time_axis, - extra_left_context=extra_left_context, - extra_right_context=extra_right_context, - ) - - lengths = lengths + extra_left_context + extra_right_context - - segments: List[Tuple[Tensor, Tensor]] = [] - - if segment_size is None: - segments.append((sequence, lengths)) - return segments - - offset = 0 - end = sequence.shape[time_axis] - step = segment_size - size = extra_left_context + segment_size + extra_right_context - - while offset + extra_left_context + extra_right_context < end: - clamped_size = min(size, end - offset) - segment_lengths = torch.clamp(lengths - offset, min=0, max=clamped_size) - indices = torch.arange( - start=offset, - end=(offset + clamped_size), - step=1, - dtype=torch.long, - device=sequence.device, - ) - segment_tensor = torch.index_select(sequence, time_axis, indices) - segments.append((segment_tensor, segment_lengths)) - offset = offset + step - - return segments - - -# ------------------------------------------------------------------------------ -# segments_to_sequence() -# ------------------------------------------------------------------------------ - - -@torch.jit.export -def segments_to_sequence( - segments: List[Tuple[Tensor, Tensor]], time_axis: int -) -> Tuple[Tensor, Tensor]: - """Concatenate segments into a full sequence.""" - if len(segments) == 1: - return segments[0] - - tensors_to_concat: List[Tensor] = [] - lengths_to_stack: List[Tensor] = [] - - for tensor, lengths in segments: - tensors_to_concat.append(tensor) - lengths_to_stack.append(lengths) - - sequence = torch.cat(tensors_to_concat, dim=time_axis) - lengths = torch.stack(lengths_to_stack, dim=0) - lengths = torch.sum(lengths, dim=0) - - return sequence, lengths - - -def lengths_to_encoder_padding_mask(lengths, batch_first: bool = False): - """ - convert lengths (a 1-D Long/Int tensor) to 2-D binary tensor - - Args: - lengths: a (B, )-shaped tensor - batch_first: whether to return a (B, T) tensor - - Return: - max_length: maximum length of B sequences - encoder_padding_mask: a (max_length, B) binary mask, where - [t, b] = False for t < lengths[b] and True otherwise - - TODO: - kernelize this function if benchmarking shows this function is slow - """ - max_lengths = torch.max(lengths).item() - bsz = lengths.size(0) - encoder_padding_mask = torch.arange( - max_lengths - ).to( # a (T, ) tensor with [0, ..., T-1] - lengths.device - ).view( # move to the right device - 1, max_lengths - ).expand( # reshape to (1, T)-shaped tensor - bsz, -1 - ) > lengths.view( # expand to (B, T)-shaped tensor - bsz, 1 - ).expand( - -1, max_lengths - ) - if not batch_first: - return encoder_padding_mask.t(), max_lengths - else: - return encoder_padding_mask, max_lengths - - -# ------------------------------------------------------------------------------ -# attention suppression -# ------------------------------------------------------------------------------ - - -def attention_suppression(attention_weights: Tensor, scale: float): - # B, H, qlen, klen -> B, H, qlen, 1 - attention_prob = torch.nn.functional.softmax(attention_weights.float(), dim=-1) - attention_nozeros = attention_prob.to(torch.bool) - nozeros_sum = torch.sum(attention_nozeros.to(torch.float), dim=-1, keepdim=True) - - # For very sparse situation, we need get round about 0s - key_sum = torch.sum(attention_prob, dim=-1, keepdim=True) - - # nozeros_sum should > 1 - key_mean = key_sum / (nozeros_sum + 1e-8) - - # std calculation - dis = (attention_prob - key_mean) * (attention_prob - key_mean) - - # if attention_prob[i] < threshold, then dis_masked[i] = 0; for all i - dis_masked = torch.where( - attention_nozeros, dis, attention_prob.new_zeros(attention_prob.size()) - ) - - key_var = torch.sum(dis_masked, dim=-1, keepdim=True) - key_var = key_var / (nozeros_sum - 1.0 + 1e-8) - key_std = torch.sqrt(key_var) - key_thread = key_mean - scale * key_std - - # if attention_prob[i] >= key_thread, then attention_prob[i] - # , otherwise "-inf" - inf_tensor = attention_prob.new_zeros(attention_prob.size()).detach() - inf_tensor[:] = float("-inf") - attention_weights_float = torch.where( - attention_prob < key_thread, - inf_tensor, - attention_weights.float(), - ) - - return attention_weights_float.type_as(attention_weights) - - -def layer_norm_backward_hook(module, grad_input, grad_output, clamp_value): - return tuple(torch.clamp(v, min=-clamp_value, max=clamp_value) for v in grad_input) diff --git a/spaces/Harveenchadha/Vakyansh-Tamil-TTS/ttsv/LICENSE.md b/spaces/Harveenchadha/Vakyansh-Tamil-TTS/ttsv/LICENSE.md deleted file mode 100644 index 5fd2e54913fd05b69de2874ec8f9a10c7f4e8d3f..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Vakyansh-Tamil-TTS/ttsv/LICENSE.md +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2022 Open-Speech-EkStep - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/spaces/Hexamind/QnA/src/model/container.py b/spaces/Hexamind/QnA/src/model/container.py deleted file mode 100644 index ab5969f6419fa887257ba632bb0752d8b65f99a1..0000000000000000000000000000000000000000 --- a/spaces/Hexamind/QnA/src/model/container.py +++ /dev/null @@ -1,135 +0,0 @@ -from src.model.paragraph import Paragraph -from src.model.block import Block - -INFINITE = 99999 - - -class Container: - - def __init__(self, paragraphs: [Paragraph], title: Paragraph = None, level: int = 0, index: [int] = None, - father=None, id_=0): - if index is None: - index = [] - self.level = level - self.title = title - self.paragraphs = [] - self.children = [] - self.index = index - self.father = father # if not father, then the container is at the top of the hierarchy - self.id_ = int(str(1) + str(father.id_) + str(id_)) - if paragraphs: - self.paragraphs, self.children = self.create_children(paragraphs, level, index) - self.blocks = self.get_blocks() - - @property - def text(self): - text = "" - if self.title: - text = "Titre " + str(self.level) + " : " + self.title.text + '\n' - for p in self.paragraphs: - text += p.text + '\n' - for child in self.children: - text += child.text - return text - - @property - def text_chunks(self, chunk=500): - text_chunks = [] - text_chunk = "" - for p in self.paragraphs: - if chunk < len(text_chunk) + len(p.text): - text_chunks.append(text_chunk) - text_chunk = "" - else: - text_chunk += " " + p.text - if text_chunk and not text_chunk.isspace(): - text_chunks.append(text_chunk) - for child in self.children: - text_chunks += child.text_chunks - return text_chunks - - def get_blocks(self): - block = Block(level=self.level, index=self.index) - if self.title: - block.title = self.title.text - for p in self.paragraphs: - if not p.blank: - if p.text.startswith('##### '): - special_action = p.text.lstrip('##### ') - block.specials.append(special_action) - else: - block.content += p.text - blocks = [block] if block.content or block.specials else [] - for child in self.children: - blocks += child.blocks - return blocks - - def create_children(self, paragraphs: Paragraph, level: int, index: [int]) -> ([Paragraph], []): - """ - creates children containers or directly attached content - and returns the list of containers and contents of level+1 - :return: - [Content or Container] - """ - attached_paragraphs = [] - container_paragraphs = [] - container_title = None - children = [] - in_children = False - child_id = 0 - level = INFINITE - - while paragraphs: - p = paragraphs.pop(0) - if not in_children and not p.is_structure: - attached_paragraphs.append(p) - else: - in_children = True - if p.is_structure and p.level <= level: # if p is higher in hierarchy, then the child is completed - if container_paragraphs or container_title: - if level <= len(index): - index = index[:level] - index[-1] += 1 - else: - for i in range(level-len(index)): - index.append(1) - children.append(Container(container_paragraphs, container_title, level, index, self, child_id)) - child_id += 1 - container_paragraphs = [] - container_title = p - level = p.level - - else: # p is normal text or strictly lower in hierarchy, then the child continues to grow - container_paragraphs.append(p) - - if container_paragraphs or container_title: - if level <= len(index): - index = index[:level] - index[-1] += 1 - else: - for i in range(level - len(index)): - index.append(1) - children.append(Container(container_paragraphs, container_title, level, index, self, child_id)) - child_id += 1 - - return attached_paragraphs, children - - @property - def structure(self): - - self_structure = {str(self.id_): { - 'index': str(self.id_), - 'canMove': True, - 'isFolder': True, - 'children': [p.id_ for p in self.paragraphs] + [child.id_ for child in self.children], - 'canRename': True, - 'data': {}, - 'level': self.level, - 'rank': self.rank, - 'title': self.title.text if self.title else 'root' - }} - paragraphs_structure = [p.structure for p in self.paragraphs] - structure = [self_structure] + paragraphs_structure - for child in self.children: - structure += child.structure - return structure diff --git a/spaces/Hina4867/bingo/src/components/ui/voice/index.tsx b/spaces/Hina4867/bingo/src/components/ui/voice/index.tsx deleted file mode 100644 index 4adcb632226bfced8b97092782811edf08b56569..0000000000000000000000000000000000000000 --- a/spaces/Hina4867/bingo/src/components/ui/voice/index.tsx +++ /dev/null @@ -1,28 +0,0 @@ -import './index.scss' - -export interface VoiceProps extends CSSPropertyRule { - num?: number; - duration?: number; -} -export default function Voice({ duration = 400, num = 7, ...others }) { - return ( -
    - {Array.from({ length: num }).map((_, index) => { - const randomDuration = Math.random() * 100 + duration - const initialDelay = Math.random() * 2 * duration - const initialScale = Math.sin((index + 1) * Math.PI / num) - return ( -
    - ) - })} -
    - ) -} diff --git a/spaces/Hoodady/3DFuse/ldm/models/diffusion/__init__.py b/spaces/Hoodady/3DFuse/ldm/models/diffusion/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/ICML2022/OFA/fairseq/examples/speech_synthesis/preprocessing/denoiser/pretrained.py b/spaces/ICML2022/OFA/fairseq/examples/speech_synthesis/preprocessing/denoiser/pretrained.py deleted file mode 100644 index 2fa846075b6872cdcc0baebca0b9acbb9ffcd287..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/speech_synthesis/preprocessing/denoiser/pretrained.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -# author: adefossez - -import logging - -import torch.hub - -from .demucs import Demucs -from .utils import deserialize_model - -logger = logging.getLogger(__name__) -ROOT = "https://dl.fbaipublicfiles.com/adiyoss/denoiser/" -DNS_48_URL = ROOT + "dns48-11decc9d8e3f0998.th" -DNS_64_URL = ROOT + "dns64-a7761ff99a7d5bb6.th" -MASTER_64_URL = ROOT + "master64-8a5dfb4bb92753dd.th" - - -def _demucs(pretrained, url, **kwargs): - model = Demucs(**kwargs) - if pretrained: - state_dict = torch.hub.load_state_dict_from_url(url, map_location='cpu') - model.load_state_dict(state_dict) - return model - - -def dns48(pretrained=True): - return _demucs(pretrained, DNS_48_URL, hidden=48) - - -def dns64(pretrained=True): - return _demucs(pretrained, DNS_64_URL, hidden=64) - - -def master64(pretrained=True): - return _demucs(pretrained, MASTER_64_URL, hidden=64) - - -def add_model_flags(parser): - group = parser.add_mutually_exclusive_group(required=False) - group.add_argument( - "-m", "--model_path", help="Path to local trained model." - ) - group.add_argument( - "--dns48", action="store_true", - help="Use pre-trained real time H=48 model trained on DNS." - ) - group.add_argument( - "--dns64", action="store_true", - help="Use pre-trained real time H=64 model trained on DNS." - ) - group.add_argument( - "--master64", action="store_true", - help="Use pre-trained real time H=64 model trained on DNS and Valentini." - ) - - -def get_model(args): - """ - Load local model package or torchhub pre-trained model. - """ - if args.model_path: - logger.info("Loading model from %s", args.model_path) - pkg = torch.load(args.model_path) - model = deserialize_model(pkg) - elif args.dns64: - logger.info("Loading pre-trained real time H=64 model trained on DNS.") - model = dns64() - elif args.master64: - logger.info( - "Loading pre-trained real time H=64 model trained on DNS and Valentini." - ) - model = master64() - else: - logger.info("Loading pre-trained real time H=48 model trained on DNS.") - model = dns48() - logger.debug(model) - return model diff --git a/spaces/Illumotion/Koboldcpp/convert-lora-to-ggml.py b/spaces/Illumotion/Koboldcpp/convert-lora-to-ggml.py deleted file mode 100644 index a937410dd8a9f3ad6c216617abb4bd661a161555..0000000000000000000000000000000000000000 --- a/spaces/Illumotion/Koboldcpp/convert-lora-to-ggml.py +++ /dev/null @@ -1,137 +0,0 @@ -#!/usr/bin/env python3 -from __future__ import annotations - -import json -import os -import re -import struct -import sys -from typing import Any, BinaryIO, Sequence - -import numpy as np -import torch - -NUMPY_TYPE_TO_FTYPE: dict[str, int] = {"float32": 0, "float16": 1} - - -HF_SUBLAYER_TO_GGML = { - "self_attn.q_proj": "attn_q", - "self_attn.k_proj": "attn_k", - "self_attn.v_proj": "attn_v", - "self_attn.o_proj": "attn_output", - "mlp.gate_proj": "ffn_gate", - "mlp.down_proj": "ffn_down", - "mlp.up_proj": "ffn_up", - "input_layernorm": "attn_norm", - "post_attention_layernorm": "ffn_norm", -} - - -def translate_tensor_name(t: str) -> str: - match = re.match(r".*layers\.(\d+)\.(\w+\.\w+)\.lora_(A|B)\.weight", t) - if match: - nn = match.group(1) - sub_layer = match.group(2) - lora_type = match.group(3) - - sub_layer_renamed = HF_SUBLAYER_TO_GGML.get(sub_layer) - if sub_layer_renamed is None: - print(f"Error: unrecognized sub-layer {sub_layer} in tensor {t}") - sys.exit(1) - - output_string = ( - f"blk.{nn}.{HF_SUBLAYER_TO_GGML[sub_layer]}.weight.lora{lora_type}" - ) - return output_string - else: - print(f"Error: unrecognized tensor {t}") - sys.exit(1) - - -def write_file_header(fout: BinaryIO, params: dict[str, Any]) -> None: - fout.write(b"ggla"[::-1]) # magic (ggml lora) - fout.write(struct.pack("i", 1)) # file version - fout.write(struct.pack("i", params["r"])) - # https://opendelta.readthedocs.io/en/latest/modules/deltas.html says that `lora_alpha` is an int - # but some models ship a float value instead - # let's convert to int, but fail if lossless conversion is not possible - assert ( - int(params["lora_alpha"]) == params["lora_alpha"] - ), "cannot convert float to int losslessly" - fout.write(struct.pack("i", int(params["lora_alpha"]))) - - -def write_tensor_header( - self, name: str, shape: Sequence[int], data_type: np.dtype[Any] -) -> None: - sname = name.encode("utf-8") - fout.write( - struct.pack( - "iii", - len(shape), - len(sname), - NUMPY_TYPE_TO_FTYPE[data_type.name], - ) - ) - fout.write(struct.pack("i" * len(shape), *shape[::-1])) - fout.write(sname) - fout.seek((fout.tell() + 31) & -32) - - -if len(sys.argv) != 2: - print(f"Usage: python {sys.argv[0]} ") - print( - "Path must contain HuggingFace PEFT LoRA files 'adapter_config.json' and 'adapter_model.bin'" - ) - sys.exit(1) - -input_json = os.path.join(sys.argv[1], "adapter_config.json") -input_model = os.path.join(sys.argv[1], "adapter_model.bin") -output_path = os.path.join(sys.argv[1], "ggml-adapter-model.bin") - -model = torch.load(input_model, map_location="cpu") - -with open(input_json, "r") as f: - params = json.load(f) - -if params["peft_type"] != "LORA": - print(f"Error: unsupported adapter type {params['peft_type']}, expected LORA") - sys.exit(1) - -if params["fan_in_fan_out"] is True: - print("Error: param fan_in_fan_out is not supported") - sys.exit(1) - -if params["bias"] is not None and params["bias"] != "none": - print("Error: param bias is not supported") - sys.exit(1) - -# TODO: these seem to be layers that have been trained but without lora. -# doesn't seem widely used but eventually should be supported -if params["modules_to_save"] is not None and len(params["modules_to_save"]) > 0: - print("Error: param modules_to_save is not supported") - sys.exit(1) - -with open(output_path, "wb") as fout: - fout.truncate() - - write_file_header(fout, params) - for k, v in model.items(): - if k.endswith(".default.weight"): - k = k.replace(".default.weight", ".weight") - if k in ["llama_proj.weight", "llama_proj.bias"]: - continue - if k.endswith("lora_A.weight"): - if v.dtype != torch.float16 and v.dtype != torch.float32: - v = v.float() - v = v.T - else: - v = v.float() - - t = v.detach().numpy() - tname = translate_tensor_name(k) - print(f"{k} => {tname} {t.shape} {t.dtype} {t.nbytes/1024/1024:.2f}MB") - write_tensor_header(fout, tname, t.shape, t.dtype) - t.tofile(fout) - -print(f"Converted {input_json} and {input_model} to {output_path}") diff --git a/spaces/InpaintAI/Inpaint-Anything/third_party/lama/saicinpainting/evaluation/masks/countless/test.py b/spaces/InpaintAI/Inpaint-Anything/third_party/lama/saicinpainting/evaluation/masks/countless/test.py deleted file mode 100644 index 7809beb7aeeb3bcb10d03093a564917b1f2b4786..0000000000000000000000000000000000000000 --- a/spaces/InpaintAI/Inpaint-Anything/third_party/lama/saicinpainting/evaluation/masks/countless/test.py +++ /dev/null @@ -1,195 +0,0 @@ -from copy import deepcopy - -import numpy as np - -import countless2d -import countless3d - -def test_countless2d(): - def test_all_cases(fn, test_zero): - case1 = np.array([ [ 1, 2 ], [ 3, 4 ] ]).reshape((2,2,1,1)) # all different - case2 = np.array([ [ 1, 1 ], [ 2, 3 ] ]).reshape((2,2,1,1)) # two are same - case1z = np.array([ [ 0, 1 ], [ 2, 3 ] ]).reshape((2,2,1,1)) # all different - case2z = np.array([ [ 0, 0 ], [ 2, 3 ] ]).reshape((2,2,1,1)) # two are same - case3 = np.array([ [ 1, 1 ], [ 2, 2 ] ]).reshape((2,2,1,1)) # two groups are same - case4 = np.array([ [ 1, 2 ], [ 2, 2 ] ]).reshape((2,2,1,1)) # 3 are the same - case5 = np.array([ [ 5, 5 ], [ 5, 5 ] ]).reshape((2,2,1,1)) # all are the same - - is_255_handled = np.array([ [ 255, 255 ], [ 1, 2 ] ], dtype=np.uint8).reshape((2,2,1,1)) - - test = lambda case: fn(case) - - if test_zero: - assert test(case1z) == [[[[3]]]] # d - assert test(case2z) == [[[[0]]]] # a==b - else: - assert test(case1) == [[[[4]]]] # d - assert test(case2) == [[[[1]]]] # a==b - - assert test(case3) == [[[[1]]]] # a==b - assert test(case4) == [[[[2]]]] # b==c - assert test(case5) == [[[[5]]]] # a==b - - assert test(is_255_handled) == [[[[255]]]] - - assert fn(case1).dtype == case1.dtype - - test_all_cases(countless2d.simplest_countless, False) - test_all_cases(countless2d.quick_countless, False) - test_all_cases(countless2d.quickest_countless, False) - test_all_cases(countless2d.stippled_countless, False) - - - - methods = [ - countless2d.zero_corrected_countless, - countless2d.countless, - countless2d.countless_if, - # countless2d.counting, # counting doesn't respect order so harder to write a test - ] - - for fn in methods: - print(fn.__name__) - test_all_cases(fn, True) - -def test_stippled_countless2d(): - a = np.array([ [ 1, 2 ], [ 3, 4 ] ]).reshape((2,2,1,1)) - b = np.array([ [ 0, 2 ], [ 3, 4 ] ]).reshape((2,2,1,1)) - c = np.array([ [ 1, 0 ], [ 3, 4 ] ]).reshape((2,2,1,1)) - d = np.array([ [ 1, 2 ], [ 0, 4 ] ]).reshape((2,2,1,1)) - e = np.array([ [ 1, 2 ], [ 3, 0 ] ]).reshape((2,2,1,1)) - f = np.array([ [ 0, 0 ], [ 3, 4 ] ]).reshape((2,2,1,1)) - g = np.array([ [ 0, 2 ], [ 0, 4 ] ]).reshape((2,2,1,1)) - h = np.array([ [ 0, 2 ], [ 3, 0 ] ]).reshape((2,2,1,1)) - i = np.array([ [ 1, 0 ], [ 0, 4 ] ]).reshape((2,2,1,1)) - j = np.array([ [ 1, 2 ], [ 0, 0 ] ]).reshape((2,2,1,1)) - k = np.array([ [ 1, 0 ], [ 3, 0 ] ]).reshape((2,2,1,1)) - l = np.array([ [ 1, 0 ], [ 0, 0 ] ]).reshape((2,2,1,1)) - m = np.array([ [ 0, 2 ], [ 0, 0 ] ]).reshape((2,2,1,1)) - n = np.array([ [ 0, 0 ], [ 3, 0 ] ]).reshape((2,2,1,1)) - o = np.array([ [ 0, 0 ], [ 0, 4 ] ]).reshape((2,2,1,1)) - z = np.array([ [ 0, 0 ], [ 0, 0 ] ]).reshape((2,2,1,1)) - - test = countless2d.stippled_countless - - # Note: We only tested non-matching cases above, - # cases f,g,h,i,j,k prove their duals work as well - # b/c if two pixels are black, either one can be chosen - # if they are different or the same. - - assert test(a) == [[[[4]]]] - assert test(b) == [[[[4]]]] - assert test(c) == [[[[4]]]] - assert test(d) == [[[[4]]]] - assert test(e) == [[[[1]]]] - assert test(f) == [[[[4]]]] - assert test(g) == [[[[4]]]] - assert test(h) == [[[[2]]]] - assert test(i) == [[[[4]]]] - assert test(j) == [[[[1]]]] - assert test(k) == [[[[1]]]] - assert test(l) == [[[[1]]]] - assert test(m) == [[[[2]]]] - assert test(n) == [[[[3]]]] - assert test(o) == [[[[4]]]] - assert test(z) == [[[[0]]]] - - bc = np.array([ [ 0, 2 ], [ 2, 4 ] ]).reshape((2,2,1,1)) - bd = np.array([ [ 0, 2 ], [ 3, 2 ] ]).reshape((2,2,1,1)) - cd = np.array([ [ 0, 2 ], [ 3, 3 ] ]).reshape((2,2,1,1)) - - assert test(bc) == [[[[2]]]] - assert test(bd) == [[[[2]]]] - assert test(cd) == [[[[3]]]] - - ab = np.array([ [ 1, 1 ], [ 0, 4 ] ]).reshape((2,2,1,1)) - ac = np.array([ [ 1, 2 ], [ 1, 0 ] ]).reshape((2,2,1,1)) - ad = np.array([ [ 1, 0 ], [ 3, 1 ] ]).reshape((2,2,1,1)) - - assert test(ab) == [[[[1]]]] - assert test(ac) == [[[[1]]]] - assert test(ad) == [[[[1]]]] - -def test_countless3d(): - def test_all_cases(fn): - alldifferent = [ - [ - [1,2], - [3,4], - ], - [ - [5,6], - [7,8] - ] - ] - allsame = [ - [ - [1,1], - [1,1], - ], - [ - [1,1], - [1,1] - ] - ] - - assert fn(np.array(alldifferent)) == [[[8]]] - assert fn(np.array(allsame)) == [[[1]]] - - twosame = deepcopy(alldifferent) - twosame[1][1][0] = 2 - - assert fn(np.array(twosame)) == [[[2]]] - - threemixed = [ - [ - [3,3], - [1,2], - ], - [ - [2,4], - [4,3] - ] - ] - assert fn(np.array(threemixed)) == [[[3]]] - - foursame = [ - [ - [4,4], - [1,2], - ], - [ - [2,4], - [4,3] - ] - ] - - assert fn(np.array(foursame)) == [[[4]]] - - fivesame = [ - [ - [5,4], - [5,5], - ], - [ - [2,4], - [5,5] - ] - ] - - assert fn(np.array(fivesame)) == [[[5]]] - - def countless3d_generalized(img): - return countless3d.countless_generalized(img, (2,2,2)) - def countless3d_dynamic_generalized(img): - return countless3d.dynamic_countless_generalized(img, (2,2,2)) - - methods = [ - countless3d.countless3d, - countless3d.dynamic_countless3d, - countless3d_generalized, - countless3d_dynamic_generalized, - ] - - for fn in methods: - test_all_cases(fn) \ No newline at end of file diff --git a/spaces/JSP/ar/README.md b/spaces/JSP/ar/README.md deleted file mode 100644 index db981ec239b0dcce35d3cdadc22c2741e058fe46..0000000000000000000000000000000000000000 --- a/spaces/JSP/ar/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Zephyr Playground -emoji: 💻 -colorFrom: gray -colorTo: red -sdk: gradio -sdk_version: 3.50.2 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Jaehan/Image-Classification-Using-a-Vision-Transformer-1/app.py b/spaces/Jaehan/Image-Classification-Using-a-Vision-Transformer-1/app.py deleted file mode 100644 index 2697e05802fb4fd9d16ede5bd30e0bbe65e12ad7..0000000000000000000000000000000000000000 --- a/spaces/Jaehan/Image-Classification-Using-a-Vision-Transformer-1/app.py +++ /dev/null @@ -1,4 +0,0 @@ -import gradio as gr - -model_name = "models/microsoft/swin-tiny-patch4-window7-224" -gr.Interface.load(model_name, theme="default", css=".footer{display:none !important}", title=None).launch() \ No newline at end of file diff --git a/spaces/Jmmianda/memo/app.py b/spaces/Jmmianda/memo/app.py deleted file mode 100644 index 59375766c1886443a2fb877a73ce9513b7f9d551..0000000000000000000000000000000000000000 --- a/spaces/Jmmianda/memo/app.py +++ /dev/null @@ -1,72 +0,0 @@ -import torch -import gradio as gr -from torchaudio.sox_effects import apply_effects_file -from transformers import AutoFeatureExtractor, AutoModelForAudioXVector - -device = "cuda" if toch.cuda.is_available() else "cpu" -EFFECTS = [ - ['remix', '-'], # pour fusionner tous les canaux - ["channels", "1"], #channel-->mono - ["rate", "16000"], # rééchantillonner à 16000 Hz - ["gain", "-1.0"], #Atténuation -1 dB - ["silence", "1", "0.1", "0.1%", "-1", "0.1", "0.1%"], - # ['pad', '0', '1.5'], # pour ajouter 1,5 seconde à la fin - ['trim', '0', '10'], # obtenir les 10 premières secondes -] - -model_name = "microsoft/unispeech-sat-base-plus-sv" -feature_extractor = AutoFeatureExtractor.from_pretrained(model_name) -model = AutoModelForAudioXVector.from_pretrained(model_name).to(device) - -#Réglage de la valeur seuil -SEUIL = 0,85 - -cosine_similarity = torch.nn.CosineSimilarity(dim=-1) - -def similarity_fn(path1, path2): - if not (path1 and path2): - return 'ERROR: Please record audio for *both* speakers!' - #Applying the effects to both the audio input files - wav1, _ = apply_effects_file(path1, EFFECTS) - wav2, _ = apply_effects_file(path2,EFFECTS) - #Extracting features - input1 = feature_extractor(wav1.squeeze(0), return_tensors="pt", sampling_rate=16000).input_values.to(device) - input2 = feature_extractor(wav2.squeeze(0), return_tensors="pt", sampling_rate=16000).input_values.to(device) - with torch.no_grad(): - emb1 = model(input1).embeddings - emb2 = model(input2).embeddings - emb1 = torch.nn.functional.normalize(emb1, dim=-1).to(device) - emb2 = torch.nn.functional.normalize(emb2, dim=-1).to(device) - similarity = cosine_similarity(emb1, emb2).numpy()[0] - if similarity>= THRESHOLD: - return f"Similarity score is {similarity :.0%}. Audio belongs to the same person " - elif similarity< THRESHOLD: - return f"Similarity score is {similarity:.0%}. Audio doesn't belong to the same person.Authentication failed!" - -inputs = [ - gr.inputs.Audio(source="microphone", type="filepath", optional=True, label="Speaker #1"), - gr.inputs.Audio(source="microphone", type="filepath", optional=True, label="Speaker #2"), -] - -outputs = gr.outputs.Textbox(label="Output Text") -description = ( - "This app evaluates whether the given audio speech inputs belong to the same individual based on Cosine Similarity score. " -) - -interface = gr.Interface( - fn=similarity_fn, - inputs=inputs, - outputs=outputs, - title="Voice Authentication with UniSpeech-SAT + X-Vectors", - description=description, - layout="horizontal", - theme="grass", - allow_flagging=False, - live=False, - examples=[ - ["cate_blanch.mp3", "cate_blanch_2.mp3"], - ["cate_blanch.mp3", "denzel_washington.mp3"] - ] -) - -interface.launch(enable_queue=True) \ No newline at end of file diff --git a/spaces/JunchuanYu/SegRS/run.py b/spaces/JunchuanYu/SegRS/run.py deleted file mode 100644 index bf837da324a75b548578fa7593515bb185c60427..0000000000000000000000000000000000000000 --- a/spaces/JunchuanYu/SegRS/run.py +++ /dev/null @@ -1,71 +0,0 @@ -import sys -import os -import cv2 -import matplotlib -import matplotlib.pyplot as plt -import numpy as np -import torch -import torchvision -import glob -import gradio as gr -from PIL import Image -from segment_anything import SamAutomaticMaskGenerator, SamPredictor, sam_model_registry -import logging -from huggingface_hub import hf_hub_download - -token = os.environ['HUB_TOKEN'] -loc =hf_hub_download(repo_id="JunchuanYu/files_for_segmentRS", filename="utils.py",repo_type="dataset",local_dir='.',token=token) -sys.path.append(loc) -from utils import * - -with gr.Blocks(theme='gradio/soft') as demo: - gr.Markdown(title) - with gr.Accordion("Instructions For User 👉", open=False): - gr.Markdown(description) - x=gr.State(value=[]) - y=gr.State(value=[]) - label=gr.State(value=[]) - with gr.Row(): - with gr.Column(scale=13): - with gr.Row(): - with gr.Column(): - mode=gr.inputs.Radio(['Positive','Negative'], type="value",default='Positive',label='Types of sampling methods') - with gr.Column(): - clear_bn=gr.Button("Clear Selection") - interseg_button = gr.Button("Interactive Segment",variant='primary') - with gr.Row(): - input_img = gr.Image(label="Input") - gallery = gr.Image(label="Points") - - input_img.select(get_select_coords, [input_img, mode,x,y,label], [gallery,x,y,label]) - - with gr.Row(): - output_img = gr.Image(label="Result") - mask_img = gr.Image(label="Mask") - with gr.Row(): - with gr.Column(): - thresh = gr.Slider(minimum=0.8, maximum=1, value=0.90, step=0.01, interactive=True, label="Threshhold") - with gr.Column(): - points = gr.Slider(minimum=16, maximum=96, value=32, step=16, interactive=True, label="Points/Side") - - with gr.Column(scale=2,min_width=8): - example = gr.Examples( - examples=[[s,0.9,32] for s in glob.glob('./images/*')], - fn=auto_seg, - inputs=[input_img,thresh,points], - outputs=[output_img], - cache_examples=False,examples_per_page=5) - - autoseg_button = gr.Button("Auto Segment",variant="primary") - emptyBtn = gr.Button("Restart",variant="secondary") - - interseg_button.click(interactive_seg, inputs=[input_img,x,y,label], outputs=[output_img,mask_img]) - autoseg_button.click(auto_seg, inputs=[input_img,thresh,points], outputs=[mask_img]) - - clear_bn.click(clear_point,outputs=[gallery,mode,x,y,label],show_progress=True) - emptyBtn.click(reset_state,outputs=[input_img,gallery,output_img,mask_img,thresh,points,mode,x,y,label],show_progress=True,) - - gr.Markdown(descriptionend) -if __name__ == "__main__": - demo.launch(debug=False,show_api=False) - \ No newline at end of file diff --git a/spaces/Kangarroar/ApplioRVC-Inference/infer/lib/uvr5_pack/lib_v5/layers_123821KB.py b/spaces/Kangarroar/ApplioRVC-Inference/infer/lib/uvr5_pack/lib_v5/layers_123821KB.py deleted file mode 100644 index 4fc1b5cb85a3327f60cbb9f5deffbeeaaac516ad..0000000000000000000000000000000000000000 --- a/spaces/Kangarroar/ApplioRVC-Inference/infer/lib/uvr5_pack/lib_v5/layers_123821KB.py +++ /dev/null @@ -1,118 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn - -from . import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class SeperableConv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(SeperableConv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nin, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - groups=nin, - bias=False, - ), - nn.Conv2d(nin, nout, kernel_size=1, bias=False), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) - - def __call__(self, x): - skip = self.conv1(x) - h = self.conv2(skip) - - return h, skip - - -class Decoder(nn.Module): - def __init__( - self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False - ): - super(Decoder, self).__init__() - self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - h = self.conv(x) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) - self.conv3 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[0], dilations[0], activ=activ - ) - self.conv4 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[1], dilations[1], activ=activ - ) - self.conv5 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.bottleneck = nn.Sequential( - Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1) - ) - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate( - self.conv1(x), size=(h, w), mode="bilinear", align_corners=True - ) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) - bottle = self.bottleneck(out) - return bottle diff --git a/spaces/Kevin676/AutoGPT/CONTRIBUTING.md b/spaces/Kevin676/AutoGPT/CONTRIBUTING.md deleted file mode 100644 index 79169a0c1951853303f73ffa1fddb3518685606a..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/AutoGPT/CONTRIBUTING.md +++ /dev/null @@ -1,105 +0,0 @@ -# Contributing to ProjectName - -First of all, thank you for considering contributing to our project! We appreciate your time and effort, and we value any contribution, whether it's reporting a bug, suggesting a new feature, or submitting a pull request. - -This document provides guidelines and best practices to help you contribute effectively. - -## Table of Contents - -- [Code of Conduct](#code-of-conduct) -- [Getting Started](#getting-started) -- [How to Contribute](#how-to-contribute) - - [Reporting Bugs](#reporting-bugs) - - [Suggesting Enhancements](#suggesting-enhancements) - - [Submitting Pull Requests](#submitting-pull-requests) -- [Style Guidelines](#style-guidelines) - - [Code Formatting](#code-formatting) - - [Pre-Commit Hooks](#pre-commit-hooks) - -## Code of Conduct - -By participating in this project, you agree to abide by our [Code of Conduct](CODE_OF_CONDUCT.md). Please read it to understand the expectations we have for everyone who contributes to this project. - -## 📢 A Quick Word -Right now we will not be accepting any Contributions that add non-essential commands to Auto-GPT. - -However, you absolutely can still add these commands to Auto-GPT in the form of plugins. Please check out this [template](https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template). -> ⚠️ Plugin support is expected to ship within the week. You can follow PR #757 for more updates! - -## Getting Started - -To start contributing, follow these steps: - -1. Fork the repository and clone your fork. -2. Create a new branch for your changes (use a descriptive name, such as `fix-bug-123` or `add-new-feature`). -3. Make your changes in the new branch. -4. Test your changes thoroughly. -5. Commit and push your changes to your fork. -6. Create a pull request following the guidelines in the [Submitting Pull Requests](#submitting-pull-requests) section. - -## How to Contribute - -### Reporting Bugs - -If you find a bug in the project, please create an issue on GitHub with the following information: - -- A clear, descriptive title for the issue. -- A description of the problem, including steps to reproduce the issue. -- Any relevant logs, screenshots, or other supporting information. - -### Suggesting Enhancements - -If you have an idea for a new feature or improvement, please create an issue on GitHub with the following information: - -- A clear, descriptive title for the issue. -- A detailed description of the proposed enhancement, including any benefits and potential drawbacks. -- Any relevant examples, mockups, or supporting information. - -### Submitting Pull Requests - -When submitting a pull request, please ensure that your changes meet the following criteria: - -- Your pull request should be atomic and focus on a single change. -- Your pull request should include tests for your change. -- You should have thoroughly tested your changes with multiple different prompts. -- You should have considered potential risks and mitigations for your changes. -- You should have documented your changes clearly and comprehensively. -- You should not include any unrelated or "extra" small tweaks or changes. - -## Style Guidelines - -### Code Formatting - -We use the `black` code formatter to maintain a consistent coding style across the project. Please ensure that your code is formatted using `black` before submitting a pull request. You can install `black` using `pip`: - -```bash -pip install black -``` - -To format your code, run the following command in the project's root directory: - -```bash -black . -``` -### Pre-Commit Hooks -We use pre-commit hooks to ensure that code formatting and other checks are performed automatically before each commit. To set up pre-commit hooks for this project, follow these steps: - -Install the pre-commit package using pip: -```bash -pip install pre-commit -``` - -Run the following command in the project's root directory to install the pre-commit hooks: -```bash -pre-commit install -``` - -Now, the pre-commit hooks will run automatically before each commit, checking your code formatting and other requirements. - -If you encounter any issues or have questions, feel free to reach out to the maintainers or open a new issue on GitHub. We're here to help and appreciate your efforts to contribute to the project. - -Happy coding, and once again, thank you for your contributions! - -Maintainers will look at PR that have no merge conflicts when deciding what to add to the project. Make sure your PR shows up here: - -https://github.com/Torantulino/Auto-GPT/pulls?q=is%3Apr+is%3Aopen+-is%3Aconflict+ \ No newline at end of file diff --git a/spaces/Kevin676/AutoGPT/autogpt/processing/html.py b/spaces/Kevin676/AutoGPT/autogpt/processing/html.py deleted file mode 100644 index 81387b12adab5023150c55f2075ddd40b554f386..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/AutoGPT/autogpt/processing/html.py +++ /dev/null @@ -1,33 +0,0 @@ -"""HTML processing functions""" -from __future__ import annotations - -from bs4 import BeautifulSoup -from requests.compat import urljoin - - -def extract_hyperlinks(soup: BeautifulSoup, base_url: str) -> list[tuple[str, str]]: - """Extract hyperlinks from a BeautifulSoup object - - Args: - soup (BeautifulSoup): The BeautifulSoup object - base_url (str): The base URL - - Returns: - List[Tuple[str, str]]: The extracted hyperlinks - """ - return [ - (link.text, urljoin(base_url, link["href"])) - for link in soup.find_all("a", href=True) - ] - - -def format_hyperlinks(hyperlinks: list[tuple[str, str]]) -> list[str]: - """Format hyperlinks to be displayed to the user - - Args: - hyperlinks (List[Tuple[str, str]]): The hyperlinks to format - - Returns: - List[str]: The formatted hyperlinks - """ - return [f"{link_text} ({link_url})" for link_text, link_url in hyperlinks] diff --git a/spaces/Kevin676/Real-Time-Voice-Cloning/utils/__init__.py b/spaces/Kevin676/Real-Time-Voice-Cloning/utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Khalida1w/denoising/app.py b/spaces/Khalida1w/denoising/app.py deleted file mode 100644 index e10f70ecdd5b10da6c9f8bd985bfd23b0df41caf..0000000000000000000000000000000000000000 --- a/spaces/Khalida1w/denoising/app.py +++ /dev/null @@ -1,248 +0,0 @@ -import librosa -import tensorflow as tf -from tensorflow.keras.models import model_from_json -import soundfile as sf -import numpy as np -import os -import scipy -from scipy.io import wavfile -import gradio as gr - -def audio_to_audio_frame_stack(sound_data, frame_length, hop_length_frame): - """This function take an audio and split into several frame - in a numpy matrix of size (nb_frame,frame_length)""" - - sequence_sample_length = sound_data.shape[0] - - sound_data_list = [sound_data[start:start + frame_length] for start in range( - 0, sequence_sample_length - frame_length + 1, hop_length_frame)] # get sliding windows - sound_data_array = np.vstack(sound_data_list) - - return sound_data_array - - -def audio_files_to_numpy(audio_dir, list_audio_files, sample_rate, frame_length, hop_length_frame, min_duration): - """This function take audio files of a directory and merge them - in a numpy matrix of size (nb_frame,frame_length) for a sliding window of size hop_length_frame""" - - list_sound_array = [] - - for file in list_audio_files: - # open the audio file - y, sr = librosa.load(os.path.join(audio_dir, file), sr=sample_rate) - total_duration = librosa.get_duration(y=y, sr=sr) - - if (total_duration >= min_duration): - list_sound_array.append(audio_to_audio_frame_stack( - y, frame_length, hop_length_frame)) - else: - print( - f"The following file {os.path.join(audio_dir,file)} is below the min duration") - - return np.vstack(list_sound_array) - - -def blend_noise_randomly(voice, noise, nb_samples, frame_length): - """This function takes as input numpy arrays representing frames - of voice sounds, noise sounds and the number of frames to be created - and return numpy arrays with voice randomly blend with noise""" - - prod_voice = np.zeros((nb_samples, frame_length)) - prod_noise = np.zeros((nb_samples, frame_length)) - prod_noisy_voice = np.zeros((nb_samples, frame_length)) - - for i in range(nb_samples): - id_voice = np.random.randint(0, voice.shape[0]) - id_noise = np.random.randint(0, noise.shape[0]) - level_noise = np.random.uniform(0.2, 0.8) - prod_voice[i, :] = voice[id_voice, :] - prod_noise[i, :] = level_noise * noise[id_noise, :] - prod_noisy_voice[i, :] = prod_voice[i, :] + prod_noise[i, :] - - return prod_voice, prod_noise, prod_noisy_voice - - -def audio_to_magnitude_db_and_phase(n_fft, hop_length_fft, audio): - """This function takes an audio and convert into spectrogram, - it returns the magnitude in dB and the phase""" - - stftaudio = librosa.stft(audio, n_fft=n_fft, hop_length=hop_length_fft) - stftaudio_magnitude, stftaudio_phase = librosa.magphase(stftaudio) - - stftaudio_magnitude_db = librosa.amplitude_to_db( - stftaudio_magnitude, ref=np.max) - - return stftaudio_magnitude_db, stftaudio_phase - - -def numpy_audio_to_matrix_spectrogram(numpy_audio, dim_square_spec, n_fft, hop_length_fft): - """This function takes as input a numpy audi of size (nb_frame,frame_length), and return - a numpy containing the matrix spectrogram for amplitude in dB and phase. It will have the size - (nb_frame,dim_square_spec,dim_square_spec)""" - - nb_audio = numpy_audio.shape[0] - - m_mag_db = np.zeros((nb_audio, dim_square_spec, dim_square_spec)) - m_phase = np.zeros((nb_audio, dim_square_spec, dim_square_spec), dtype=complex) - - for i in range(nb_audio): - m_mag_db[i, :, :], m_phase[i, :, :] = audio_to_magnitude_db_and_phase( - n_fft, hop_length_fft, numpy_audio[i]) - - return m_mag_db, m_phase - - -def magnitude_db_and_phase_to_audio(frame_length, hop_length_fft, stftaudio_magnitude_db, stftaudio_phase): - """This functions reverts a spectrogram to an audio""" - - stftaudio_magnitude_rev = librosa.db_to_amplitude(stftaudio_magnitude_db, ref=1.0) - - # taking magnitude and phase of audio - audio_reverse_stft = stftaudio_magnitude_rev * stftaudio_phase - audio_reconstruct = librosa.core.istft(audio_reverse_stft, hop_length=hop_length_fft, length=frame_length) - - return audio_reconstruct - -def matrix_spectrogram_to_numpy_audio(m_mag_db, m_phase, frame_length, hop_length_fft) : - """This functions reverts the matrix spectrograms to numpy audio""" - - list_audio = [] - - nb_spec = m_mag_db.shape[0] - - for i in range(nb_spec): - - audio_reconstruct = magnitude_db_and_phase_to_audio(frame_length, hop_length_fft, m_mag_db[i], m_phase[i]) - list_audio.append(audio_reconstruct) - - return np.vstack(list_audio) - -def scaled_in(matrix_spec): - "global scaling apply to noisy voice spectrograms (scale between -1 and 1)" - matrix_spec = (matrix_spec + 46)/50 - return matrix_spec - -def scaled_ou(matrix_spec): - "global scaling apply to noise models spectrograms (scale between -1 and 1)" - matrix_spec = (matrix_spec -6 )/82 - return matrix_spec - -def inv_scaled_in(matrix_spec): - "inverse global scaling apply to noisy voices spectrograms" - matrix_spec = matrix_spec * 50 - 46 - return matrix_spec - -def inv_scaled_ou(matrix_spec): - "inverse global scaling apply to noise models spectrograms" - matrix_spec = matrix_spec * 82 + 6 - return matrix_spec - - -def prediction(weights_path, name_model, audio_dir_prediction, dir_save_prediction, audio_input_prediction, -audio_output_prediction, sample_rate, min_duration, frame_length, hop_length_frame, n_fft, hop_length_fft): - """ This function takes as input pretrained weights, noisy voice sound to denoise, predict - the denoise sound and save it to disk. - """ - - # load json and create model - json_file = open(weights_path+'/'+name_model+'.json', 'r') - loaded_model_json = json_file.read() - json_file.close() - loaded_model = model_from_json(loaded_model_json) - # load weights into new model - loaded_model.load_weights(weights_path+'/'+name_model+'.h5') - print("Loaded model from disk") - - # Extracting noise and voice from folder and convert to numpy - audio = audio_files_to_numpy(audio_dir_prediction, audio_input_prediction, sample_rate, - frame_length, hop_length_frame, min_duration) - # audio = audioData - #Dimensions of squared spectrogram - dim_square_spec = int(n_fft / 2) + 1 - print(dim_square_spec) - - # Create Amplitude and phase of the sounds - m_amp_db_audio, m_pha_audio = numpy_audio_to_matrix_spectrogram( - audio, dim_square_spec, n_fft, hop_length_fft) - - #global scaling to have distribution -1/1 - X_in = scaled_in(m_amp_db_audio) - #Reshape for prediction - X_in = X_in.reshape(X_in.shape[0],X_in.shape[1],X_in.shape[2],1) - #Prediction using loaded network - X_pred = loaded_model.predict(X_in) - #Rescale back the noise model - inv_sca_X_pred = inv_scaled_ou(X_pred) - #Remove noise model from noisy speech - X_denoise = m_amp_db_audio - inv_sca_X_pred[:,:,:,0] - #Reconstruct audio from denoised spectrogram and phase - print(X_denoise.shape) - print(m_pha_audio.shape) - print(frame_length) - print(hop_length_fft) - audio_denoise_recons = matrix_spectrogram_to_numpy_audio(X_denoise, m_pha_audio, frame_length, hop_length_fft) - #Number of frames - nb_samples = audio_denoise_recons.shape[0] - #Save all frames in one file - denoise_long = audio_denoise_recons.reshape(1, nb_samples * frame_length)*10 - # librosa.output.write_wav(dir_save_prediction + audio_output_prediction, denoise_long[0, :], sample_rate) - print(audio_output_prediction) - sf.write(audio_output_prediction , denoise_long[0, :], sample_rate) - -def denoise_audio(audioName): - - sr, data = audioName - sf.write("temp.wav",data, sr) - testNo = "temp" - audio_dir_prediction = os.path.abspath("/")+ str(testNo) +".wav" - sample_rate, data = audioName[0], audioName[1] - len_data = len(data) # holds length of the numpy array - - - - - - t = len_data / sample_rate # returns duration but in floats - print("t:",t) - weights_path = os.path.abspath("./") - name_model = "model_unet" - audio_dir_prediction = os.path.abspath("./") - dir_save_prediction = os.path.abspath("./") - audio_output_prediction = "test.wav" - audio_input_prediction = ["temp.wav"] - sample_rate = 8000 - min_duration = t - frame_length = 8064 - hop_length_frame = 8064 - n_fft = 255 - hop_length_fft = 63 - - dim_square_spec = int(n_fft / 2) + 1 - - prediction(weights_path, name_model, audio_dir_prediction, dir_save_prediction, audio_input_prediction, - audio_output_prediction, sample_rate, min_duration, frame_length, hop_length_frame, n_fft, hop_length_fft) - print(audio_output_prediction) - return audio_output_prediction - - -examples = [ - [os.path.abspath("crowdNoise.wav")], - [os.path.abspath("CrowdNoise2.wav")], - [os.path.abspath("whiteNoise.wav")] -] - - - -iface = gr.Interface(fn = denoise_audio, - inputs = 'audio', - outputs = 'audio', - title = 'audio to denoised Audio Application', - description = 'A simple application to denoise audio speech using UNet deep learning model. Upload your own audio, or click one of the examples to load them.', - article = - '''
    -

    All you need to do is to upload the audio file and hit submit, then wait for compiling. After that click on Play/Pause for listing to the audio. The audio is saved in a wav format.

    -
    ''', - examples=examples - ) - -iface.launch() \ No newline at end of file diff --git a/spaces/Kimata/Sanskrit-TTS/cleaner_utils.py b/spaces/Kimata/Sanskrit-TTS/cleaner_utils.py deleted file mode 100644 index 82d3319145eee9ddb9682f3278e42ffb2007fa15..0000000000000000000000000000000000000000 --- a/spaces/Kimata/Sanskrit-TTS/cleaner_utils.py +++ /dev/null @@ -1,250 +0,0 @@ -import re -import unidecode -import numpy as np -import pandas as pd -import datetime_cleaner -from datetime import datetime - - -def run(): - - # The path to the local git repo for Indic NLP library - INDIC_NLP_LIB_HOME=r"./indic_nlp_library" - - # The path to the local git repo for Indic NLP Resources - INDIC_NLP_RESOURCES=r"./indic_nlp_resources" - import sys - sys.path.append(r'{}'.format(INDIC_NLP_LIB_HOME)) - - from indicnlp import common - common.set_resources_path(INDIC_NLP_RESOURCES) - - from indicnlp import loader - loader.load() - -run() - -from indicnlp.normalize.indic_normalize import IndicNormalizerFactory -from indicnlp.tokenize import sentence_tokenize -import normalizer_utils - -lang='sa' -factory=IndicNormalizerFactory() -normalizer=factory.get_normalizer(lang) - - -DEFAULT_TEXT = "अयं द्वितीयशब्दः २ अस्ति। प्रथमः शब्दः १ अस्ति। २३ २ ३ अन्ये शब्दाः सर्वे द्वितीयं शब्दं प्रयोजयन्ति। इत्थं सप्ततिः शब्दाः लिखिताः सन्ति। अस्मिन लेखने सर्वे अक्षराः संस्कृते लिखिताः सन्ति। अन्ये लिखन्ति ३, ४, ५ इत्यादि। तथापि, अहं एकं अक्षरं एव उपयोगामि।" -# DEFAULT_TEXT = "आन्द्रिया २ महोदयः, जोसेफ् ३२ 6 7 महोदयः च कालः श्रीमती जेनेट् इत्यनेन सह स्थलं गतवन्तौ।" - -df = pd.read_csv('non_standard_words.txt') -columns = ['non_standard_words'] -df.columns = columns -nsw = df['non_standard_words'].tolist() -dict_num = normalizer_utils.dict_num -punctuation_marks = normalizer_utils.punctuation_marks - - -def tokenize_sentence(text): - '''Tokenize a paragraph into sentences''' - sentences = sentence_tokenize.sentence_split(text, lang='sa') - sentences = " # ".join(sentences) - return sentences - - -def segment_sentence(text): - '''Segment a sentence into individual words''' - segmented_sentence = text.split(' ') - return segmented_sentence - - -def parse_datetime(text): - '''Parses date and time and returns them as normalized texts''' - text_list = [] - for word in text: - if datetime_cleaner.is_date_or_year(word): - #If the word is a datetime string, normalize the date. - normalized_date = datetime_cleaner.normalize_date(word) - text_list.append(normalized_date) - else: - text_list.append(word) - text_str = " ".join(text_list) - return text_str - -def handle_abbreviations(text): - '''Handles abbreviations''' - abbrev_dict = normalizer_utils.abbreviation_dict - text_list = [] - output_string = " " - for char in text: - if char in abbrev_dict: - output_string = abbrev_dict[char] - char = char.replace(char, output_string) - - text_list.append(char) - text_str = " ".join(text_list) - return text_str - - -def remove_nsw(text): - """return non standard words""" - text_list = [] - for string in text: - if string in nsw: - string.replace(string, "") - text_list.append(string) - text_str = " ".join(text_list) - - return text_str - - -def normalize_text(text): - text_list = [] - output_string = " " - #Map sanskrit numbers to their normalized form. - for char in text.split(" "): - if char in dict_num: - output_string = dict_num[char] - char = char.replace(char, output_string) - - text_list.append(char) - text_str = " ".join(text_list) - return text_str - - -def syllabify_text(text): - '''Syllabifies text''' - text_list = [] - #Syllabify text - for char in text: - if char in normalizer_utils.DEPENDENT_VOWELS: - char = "(" + char + ")" - text_list.append(char) - else: - text_list.append(char) - - full_text = " + ".join(text_list).replace("'", "") - return full_text - - - -def clean_text(text): - processed_text = re.sub(r'\+ +', '', text) - processed_text = re.sub(': +', '\n \n', processed_text) - processed_text = re.sub(r'\+ ।', '\n \n', processed_text) - processed_text = re.sub(r'\+$', '', processed_text) - processed_text = re.sub(r'\+ , +', '', processed_text) - processed_text = re.sub(r'\+ #', '\n', processed_text) - return processed_text - -def remove_punctuation(text): - text_list = [] - for char in text: - if char in punctuation_marks: - char = char.replace(char, "") - text_list.append(char) - else: - text_list.append(char) - text_str = "".join(text_list) - - return text_str - -def preprocess_text(text): - cleaned_text = clean_text(text) - - #Remove unnecessary characters from a string. - text_cleaned = [] - for index, text in enumerate(cleaned_text.split('\n')): - if text.startswith('+'): - text = text[2:] - - elif text.startswith(' +'): - text = text[3:] - - elif text.endswith('+') or text.endswith(' +'): - text = text[:-2] - - text_cleaned.append(text) - - text_cleaned_str = "\n ".join(text_cleaned) - - return text_cleaned_str - -def pipeline(sentence): - '''The whole pipeline for cleaning text from text normalization to removing special characters.''' - tokenized_sentence = tokenize_sentence(sentence) - segmented_sentence_list = segment_sentence(tokenized_sentence) - formatted_datetime_list = list(map(datetime_cleaner.handle_time, segmented_sentence_list)) - formatted_datetime = ''.join(formatted_datetime_list) - parsed_datetime_sentence = parse_datetime(formatted_datetime) - formatted_abbreviations = list(map(handle_abbreviations, parsed_datetime_sentence)) - nsw_cleaned = remove_nsw(formatted_abbreviations) - normalized_text = normalize_text(nsw_cleaned) - syllabified_text = syllabify_text(normalized_text) - text_wo_punctuation = remove_punctuation(syllabified_text) - - cleaned_text = clean_text(text_wo_punctuation) - preprocessed_text = preprocess_text(cleaned_text) - return preprocessed_text - -g2p_vocab = ['(', ')', '+'] -def g2p_pipeline(text): - text_list = [] - for char in text: - if char in g2p_vocab: - char = char.replace(char, '') - text_list.append(char) - else: - text_list.append(char) - text = ''.join(text_list) - return text - - - -# vocab = ['ा', 'ि', 'ी', 'ु', 'ू', 'े', 'ै', 'ो', 'ौ', 'ं', 'ः', 'ृ', 'ॄ', '(', ')', '+', ' '] -vocab = ['(', ')', '+', ' '] -def voice_smoothening(text): - - '''This function removes syllables from text after performing text cleaning. - Used for g2p tab.''' - text_list = [] - #Loop through characters in text and remove special characters such as + and syllables. - for char in text: - # Search for brackets in the sentence - if char in vocab: - char = char.replace(char, '') - text_list.append(char) - else: - text_list.append(char) - - smoothened_text = "".join(text_list) - cleaned_smoothened_text = clean_text(smoothened_text) - return cleaned_smoothened_text - -g2p_dict = {'ॠ': 'ḹ', 'द': 'jha', 'र': 'o', 'व': 'ṅa', 'ओ': 'ṃ', 'अः': 'i', - 'क': 'va', 'श': 'ḍa', 'झ': 'la', 'स': 'ā', 'ऋ': 'na', 'अ': 'au', - 'आ': 'tha', 'उ': 'ṝ', 'ष': 'kha', 'ऌ': 'u', 'थ': 'ḷ', 'च': 'ṭa', - 'ठ': 'gha', 'घ': 'da', 'ऊ': 'ma', 'ढ': 'śa', 'प': 'ca', 'ड': 'ya', - 'इ': 'ṭha', 'य': 'ṇa', 'त': 'ha', 'अं': 'ja', 'फ': 'ṣa', 'ग': 'ṛ', - 'ऐ': 'ta', 'ट': 'pa', 'ल': 'ba', 'ई': 'sa', 'ए': 'ai', 'ब': 'dha', - 'न': 'pha', 'ण': 'a', 'ॡ': 'ḥ', 'ह': 'e', 'ख': 'ga', 'छ': 'ī', - 'ञ': 'cha', 'म': 'bha', 'औ': 'ra', 'ङ': 'ña', 'ध': 'ka', 'भ': 'ḍha', 'ज': 'ū'} - - -def grapheme_to_phoneme(text): - '''Takes cleaned text (grapheme) as input and returns its phoneme equivalent. Done after voice smoothening part.''' - smoothened_text = voice_smoothening(text) - text_list = [] - for char in smoothened_text: - #Search for graphemes. - if char in g2p_dict.keys(): - char = char.replace(char, g2p_dict[char]) - print(f"Replaced char: {char}") - text_list.append(char) - else: - print(f"Unreplaced char: {char}") - text_list.append(char) - g2p_text = ' '.join(text_list) - return g2p_text - - -sample_text = 'स + (ं) + स + ् + क + (ृ) + त + म + ् + ज + ग + त + (ः) + ए + क + त + म + (ा) + अ + त + (ि) + प + ् + र + (ा) + च + (ी) + न + (ा) + स + म + (ृ) + द + ् + ध + (ा) + श + (ा) + स + ् + त + ् + र + (ी) + य + (ा) + च + भ + (ा) + ष + (ा) + स + (ु) + व + र + ् + त + त + (े)' diff --git a/spaces/Kimata/Sanskrit-TTS/indic_nlp_library/indicnlp/normalize/indic_normalize.py b/spaces/Kimata/Sanskrit-TTS/indic_nlp_library/indicnlp/normalize/indic_normalize.py deleted file mode 100644 index 23ed8e2fda97cf2d628625b7475e5ac3c6e8e9cd..0000000000000000000000000000000000000000 --- a/spaces/Kimata/Sanskrit-TTS/indic_nlp_library/indicnlp/normalize/indic_normalize.py +++ /dev/null @@ -1,974 +0,0 @@ -# -*- coding: utf-8 -*- - -# -# Copyright (c) 2013-present, Anoop Kunchukuttan -# All rights reserved. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -# - -#Program for normalization of text written in Unicode. This is mainly geared towards Indic scripts -# -# @author Anoop Kunchukuttan -# - -import sys, codecs, string, itertools, re -from indicnlp import langinfo - - -class NormalizerI(object): - """ - The normalizer classes do the following: - * Some characters have multiple Unicode codepoints. The normalizer chooses a single standard representation - * Some control characters are deleted - * While typing using the Latin keyboard, certain typical mistakes occur which are corrected by the module - Base class for normalizer. Performs some common normalization, which includes: - * Byte order mark, word joiner, etc. removal - * ZERO_WIDTH_NON_JOINER and ZERO_WIDTH_JOINER removal - * ZERO_WIDTH_SPACE and NO_BREAK_SPACE replaced by spaces - Script specific normalizers should derive from this class and override the normalize() method. - They can call the super class 'normalize() method to avail of the common normalization - """ - - BYTE_ORDER_MARK='\uFEFF' - BYTE_ORDER_MARK_2='\uFFFE' - WORD_JOINER='\u2060' - SOFT_HYPHEN='\u00AD' - - ZERO_WIDTH_SPACE='\u200B' - NO_BREAK_SPACE='\u00A0' - - ZERO_WIDTH_NON_JOINER='\u200C' - ZERO_WIDTH_JOINER='\u200D' - - def _normalize_punctuations(self, text): - """ - Normalize punctuations. - Applied many of the punctuation normalizations that are part of MosesNormalizer - from sacremoses - """ - text=text.replace(NormalizerI.BYTE_ORDER_MARK,'') - text=text.replace('„', r'"') - text=text.replace('“', r'"') - text=text.replace('”', r'"') - text=text.replace('–', r'-') - text=text.replace('—', r' - ') - text=text.replace('´', r"'") - text=text.replace('‘', r"'") - text=text.replace('‚', r"'") - text=text.replace('’', r"'") - text=text.replace("''", r'"') - text=text.replace('´´', r'"') - text=text.replace('…', r'...') - - return text - - def normalize(self,text): - pass - - -class BaseNormalizer(NormalizerI): - - def __init__(self,lang, - remove_nuktas=False, - nasals_mode='do_nothing', - do_normalize_chandras=False, - do_normalize_vowel_ending=False): - - self.lang=lang - self.remove_nuktas=remove_nuktas - self.nasals_mode=nasals_mode - self.do_normalize_chandras=do_normalize_chandras - self.do_normalize_vowel_ending=do_normalize_vowel_ending - - self._init_normalize_chandras() - self._init_normalize_nasals() - self._init_normalize_vowel_ending() - #self._init_visarga_correction() - - def _init_normalize_vowel_ending(self): - - if self.lang in langinfo.IE_LANGUAGES: - self.fn_vowel_ending=self._normalize_word_vowel_ending_ie - elif self.lang in langinfo.DRAVIDIAN_LANGUAGES: - self.fn_vowel_ending=self._normalize_word_vowel_ending_dravidian - else: - self.fn_vowel_ending=lambda x: x - - def _init_normalize_chandras(self): - - substitution_offsets =\ - [ - [0x0d , 0x0f], # chandra e, independent - [0x11 , 0x13], # chandra o, independent - [0x45 , 0x47], # chandra e , 0xde],pendent - [0x49 , 0x4b], # chandra o , 0xde],pendent - # [0x72 , 0x0f], # mr: chandra e, independent - - [0x00 , 0x02], # chandrabindu - [0x01 , 0x02], # chandrabindu - ] - - self.chandra_substitutions = [ - (langinfo.offset_to_char(x[0],self.lang), langinfo.offset_to_char(x[1],self.lang)) - for x in substitution_offsets ] - - def _normalize_chandras(self,text): - for match, repl in self.chandra_substitutions: - text=text.replace(match,repl) - return text - - def _init_to_anusvaara_strict(self): - """ - `r1_nasal=re.compile(r'\\u0919\\u094D([\\u0915-\\u0918])')` - """ - - pat_signatures=\ - [ - [0x19,0x15,0x18], - [0x1e,0x1a,0x1d], - [0x23,0x1f,0x22], - [0x28,0x24,0x27], - [0x29,0x24,0x27], - [0x2e,0x2a,0x2d], - ] - - halant_offset=0x4d - anusvaara_offset=0x02 - - pats=[] - - for pat_signature in pat_signatures: - pat=re.compile(r'{nasal}{halant}([{start_r}-{end_r}])'.format( - nasal=langinfo.offset_to_char(pat_signature[0],self.lang), - halant=langinfo.offset_to_char(halant_offset,self.lang), - start_r=langinfo.offset_to_char(pat_signature[1],self.lang), - end_r=langinfo.offset_to_char(pat_signature[2],self.lang), - )) - pats.append(pat) - - repl_string='{anusvaara}\\1'.format(anusvaara=langinfo.offset_to_char(anusvaara_offset,self.lang)) - - self.pats_repls=(pats,repl_string) - - def _to_anusvaara_strict(self,text): - - pats, repl_string = self.pats_repls - for pat in pats: - text=pat.sub(repl_string,text) - - return text - - def _init_to_anusvaara_relaxed(self): - """ - `r1_nasal=re.compile(r'\\u0919\\u094D([\\u0915-\\u0918])')` - """ - - nasals_list=[0x19,0x1e,0x23,0x28,0x29,0x2e] - nasals_list_str=','.join([langinfo.offset_to_char(x,self.lang) for x in nasals_list]) - - halant_offset=0x4d - anusvaara_offset=0x02 - - pat=re.compile(r'[{nasals_list_str}]{halant}'.format( - nasals_list_str=nasals_list_str, - halant=langinfo.offset_to_char(halant_offset,self.lang), - )) - - repl_string='{anusvaara}'.format(anusvaara=langinfo.offset_to_char(anusvaara_offset,self.lang)) - - self.pats_repls = (pat,repl_string) - - def _to_anusvaara_relaxed(self,text): - pat, repl_string = self.pats_repls - return pat.sub(repl_string,text) - - - def _init_to_nasal_consonants(self): - """ - `r1_nasal=re.compile(r'\\u0919\\u094D([\\u0915-\\u0918])')` - """ - - pat_signatures=\ - [ - [0x19,0x15,0x18], - [0x1e,0x1a,0x1d], - [0x23,0x1f,0x22], - [0x28,0x24,0x27], - [0x29,0x24,0x27], - [0x2e,0x2a,0x2d], - ] - - halant_offset=0x4d - anusvaara_offset=0x02 - - pats=[] - repl_strings=[] - - for pat_signature in pat_signatures: - pat=re.compile(r'{anusvaara}([{start_r}-{end_r}])'.format( - anusvaara=langinfo.offset_to_char(anusvaara_offset,self.lang), - start_r=langinfo.offset_to_char(pat_signature[1],self.lang), - end_r=langinfo.offset_to_char(pat_signature[2],self.lang), - )) - pats.append(pat) - repl_string='{nasal}{halant}\\1'.format( - nasal=langinfo.offset_to_char(pat_signature[0],self.lang), - halant=langinfo.offset_to_char(halant_offset,self.lang), - ) - repl_strings.append(repl_string) - - self.pats_repls=list(zip(pats,repl_strings)) - - def _to_nasal_consonants(self,text): - - for pat, repl in self.pats_repls: - text=pat.sub(repl,text) - - return text - - def _init_normalize_nasals(self): - - if self.nasals_mode == 'to_anusvaara_strict': - self._init_to_anusvaara_strict() - elif self.nasals_mode == 'to_anusvaara_relaxed': - self._init_to_anusvaara_relaxed() - elif self.nasals_mode == 'to_nasal_consonants': - self._init_to_nasal_consonants() - - def _normalize_nasals(self,text): - if self.nasals_mode == 'to_anusvaara_strict': - return self._to_anusvaara_strict(text) - elif self.nasals_mode == 'to_anusvaara_relaxed': - return self._to_anusvaara_relaxed(text) - elif self.nasals_mode == 'to_nasal_consonants': - return self._to_nasal_consonants(text) - else: - return text - - - def _normalize_word_vowel_ending_dravidian(self,word): - """ - for Dravidian - - consonant ending: add 'a' ki maatra - - halant ending: no change - - 'a' ki maatra: no change - """ - if len(word)>0 and langinfo.is_consonant(word[-1],self.lang): - return word+langinfo.offset_to_char(0x3e,self.lang) - else: - return word - - def _normalize_word_vowel_ending_ie(self,word): - """ - for IE - - consonant ending: add halant - - halant ending: no change - - 'a' ki maatra: no change - """ - if len(word)>0 and langinfo.is_consonant(word[-1],self.lang): - return word+langinfo.offset_to_char(langinfo.HALANTA_OFFSET,self.lang) - else: - return word - - def _normalize_vowel_ending(self,text): - return ' '.join([ self.fn_vowel_ending(w) for w in text.split(' ') ]) - - def normalize(self,text): - """ - Method to be implemented for normalization for each script - """ - text=text.replace(NormalizerI.BYTE_ORDER_MARK,'') - text=text.replace(NormalizerI.BYTE_ORDER_MARK_2,'') - text=text.replace(NormalizerI.WORD_JOINER,'') - text=text.replace(NormalizerI.SOFT_HYPHEN,'') - - text=text.replace(NormalizerI.ZERO_WIDTH_SPACE,' ') # ?? - text=text.replace(NormalizerI.NO_BREAK_SPACE,' ') - - text=text.replace(NormalizerI.ZERO_WIDTH_NON_JOINER, '') - text=text.replace(NormalizerI.ZERO_WIDTH_JOINER,'') - - text=self._normalize_punctuations(text) - - if self.do_normalize_chandras: - text=self._normalize_chandras(text) - text=self._normalize_nasals(text) - if self.do_normalize_vowel_ending: - text=self._normalize_vowel_ending(text) - - return text - - - def get_char_stats(self,text): - print(len(re.findall(NormalizerI.BYTE_ORDER_MARK,text))) - print(len(re.findall(NormalizerI.BYTE_ORDER_MARK_2,text))) - print(len(re.findall(NormalizerI.WORD_JOINER,text))) - print(len(re.findall(NormalizerI.SOFT_HYPHEN,text))) - - print(len(re.findall(NormalizerI.ZERO_WIDTH_SPACE,text) )) - print(len(re.findall(NormalizerI.NO_BREAK_SPACE,text))) - - print(len(re.findall(NormalizerI.ZERO_WIDTH_NON_JOINER,text))) - print(len(re.findall(NormalizerI.ZERO_WIDTH_JOINER,text))) - - #for mobj in re.finditer(NormalizerI.ZERO_WIDTH_NON_JOINER,text): - # print text[mobj.start()-10:mobj.end()+10].replace('\n', ' ').replace(NormalizerI.ZERO_WIDTH_NON_JOINER,'').encode('utf-8') - #print hex(ord(text[mobj.end():mobj.end()+1])) - - def correct_visarga(self,text,visarga_char,char_range): - text=re.sub(r'([\u0900-\u097f]):','\\1\u0903',text) - - - -class DevanagariNormalizer(BaseNormalizer): - """ - Normalizer for the Devanagari script. In addition to basic normalization by the super class, - * Replaces the composite characters containing nuktas by their decomposed form - * replace pipe character '|' by poorna virama character - * replace colon ':' by visarga if the colon follows a charcter in this script - - """ - - NUKTA='\u093C' - - def __init__(self,lang='hi',remove_nuktas=False,nasals_mode='do_nothing', - do_normalize_chandras=False,do_normalize_vowel_ending=False): - super(DevanagariNormalizer,self).__init__(lang,remove_nuktas,nasals_mode,do_normalize_chandras,do_normalize_vowel_ending) - - def normalize(self,text): - - # common normalization for Indic scripts - text=super(DevanagariNormalizer,self).normalize(text) - - # chandra a replacement for Marathi - text=text.replace('\u0972','\u090f') - - # decomposing Nukta based composite characters - text=text.replace('\u0929','\u0928'+DevanagariNormalizer.NUKTA) - text=text.replace('\u0931','\u0930'+DevanagariNormalizer.NUKTA) - text=text.replace('\u0934','\u0933'+DevanagariNormalizer.NUKTA) - text=text.replace('\u0958','\u0915'+DevanagariNormalizer.NUKTA) - text=text.replace('\u0959','\u0916'+DevanagariNormalizer.NUKTA) - text=text.replace('\u095A','\u0917'+DevanagariNormalizer.NUKTA) - text=text.replace('\u095B','\u091C'+DevanagariNormalizer.NUKTA) - text=text.replace('\u095C','\u0921'+DevanagariNormalizer.NUKTA) - text=text.replace('\u095D','\u0922'+DevanagariNormalizer.NUKTA) - text=text.replace('\u095E','\u092B'+DevanagariNormalizer.NUKTA) - text=text.replace('\u095F','\u092F'+DevanagariNormalizer.NUKTA) - - if self.remove_nuktas: - text=text.replace(DevanagariNormalizer.NUKTA,'') - - # replace pipe character for poorna virama - text=text.replace('\u007c','\u0964') - - # correct visarga - text=re.sub(r'([\u0900-\u097f]):','\\1\u0903',text) - - return text - - def get_char_stats(self,text): - super(DevanagariNormalizer,self).get_char_stats(text) - - print((len(re.findall('\u0929',text)))) - print((len(re.findall('\u0931',text)))) - print((len(re.findall('\u0934',text)))) - print((len(re.findall('\u0958',text)))) - print((len(re.findall('\u0959',text)))) - print((len(re.findall('\u095A',text)))) - print((len(re.findall('\u095B',text)))) - print((len(re.findall('\u095C',text)))) - print((len(re.findall('\u095D',text)))) - print((len(re.findall('\u095E',text)))) - print((len(re.findall('\u095F',text)))) - - #print(len(re.findall(u'\u0928'+DevanagariNormalizer.NUKTA,text))) - #print(len(re.findall(u'\u0930'+DevanagariNormalizer.NUKTA,text))) - #print(len(re.findall(u'\u0933'+DevanagariNormalizer.NUKTA,text))) - #print(len(re.findall(u'\u0915'+DevanagariNormalizer.NUKTA,text))) - #print(len(re.findall(u'\u0916'+DevanagariNormalizer.NUKTA,text))) - #print(len(re.findall(u'\u0917'+DevanagariNormalizer.NUKTA,text))) - #print(len(re.findall(u'\u091C'+DevanagariNormalizer.NUKTA,text))) - #print(len(re.findall(u'\u0921'+DevanagariNormalizer.NUKTA,text))) - #print(len(re.findall(u'\u0922'+DevanagariNormalizer.NUKTA,text))) - #print(len(re.findall(u'\u092B'+DevanagariNormalizer.NUKTA,text))) - #print(len(re.findall(u'\u092F'+DevanagariNormalizer.NUKTA,text))) - -class GurmukhiNormalizer(BaseNormalizer): - """ - Normalizer for the Gurmukhi script. In addition to basic normalization by the super class, - * Replaces the composite characters containing nuktas by their decomposed form - * Replace the reserved character for poorna virama (if used) with the recommended generic Indic scripts poorna virama - * replace pipe character '|' by poorna virama character - * replace colon ':' by visarga if the colon follows a charcter in this script - """ - - NUKTA='\u0A3C' - - VOWEL_NORM_MAPS={ - ## http://www.unicode.org/versions/Unicode12.1.0/ch12.pdf - ## Table 12-16 - '\u0a05\u0a3e': '\u0a06', - '\u0a72\u0a3f': '\u0a07', - '\u0a72\u0a40': '\u0a08', - '\u0a73\u0a41': '\u0a09', - '\u0a73\u0a42': '\u0a0a', - '\u0a72\u0a47': '\u0a0f', - '\u0a05\u0a48': '\u0a10', - '\u0a73\u0a4b': '\u0a13', - '\u0a05\u0a4c': '\u0a14', - } - - def __init__(self,lang='pa',remove_nuktas=False,nasals_mode='do_nothing',do_normalize_chandras=False, - do_normalize_vowel_ending=False, - do_canonicalize_addak=False, - do_canonicalize_tippi=False, - do_replace_vowel_bases=False): - super(GurmukhiNormalizer,self).__init__(lang,remove_nuktas,nasals_mode,do_normalize_chandras,do_normalize_vowel_ending) - self.do_canonicalize_addak=do_canonicalize_addak - self.do_canonicalize_tippi=do_canonicalize_tippi - self.do_replace_vowel_bases=do_replace_vowel_bases - - - def _normalize_vowels(self,text): - """ - """ - - ## standard vowel replacements as per suggestions in - ## http://www.unicode.org/versions/Unicode12.1.0/ch12.pdf - ## Table 12-16 - - for k,v in GurmukhiNormalizer.VOWEL_NORM_MAPS.items(): - text=text.replace(k,v) - - ## the above mappings should account for majority of the variantions, - ## Rest are handled via this generic rule which looks at the diacritic - ## following the 2 special characters - ## TBD: don't see evidence for this in Wikipedia corpus - - ## If these special characters occur without any diacritic, replace them with closet - ## equivalent vowels - if self.do_replace_vowel_bases: - text=text.replace('\u0a72','\u0a07') - text=text.replace('\u0a73','\u0a09') - - return text - - - def normalize(self,text): - - # Addak - if self.do_canonicalize_addak: - ## replace addak+consonant with consonat+halant+consonant - text=re.sub(r'\u0a71(.)','\\1\u0a4d\\1',text) - - # Tippi - if self.do_canonicalize_tippi: - text=text.replace('\u0a70','\u0a02') - - # Vowels: Gurumuki has multiple ways of representing independent vowels due - # to the characters 'iri' and 'ura'. - text=self._normalize_vowels(text) - - # common normalization for Indic scripts - text=super(GurmukhiNormalizer,self).normalize(text) - - # decomposing Nukta based composite characters - text=text.replace('\u0a33','\u0a32'+GurmukhiNormalizer.NUKTA) - text=text.replace('\u0a36','\u0a38'+GurmukhiNormalizer.NUKTA) - text=text.replace('\u0a59','\u0a16'+GurmukhiNormalizer.NUKTA) - text=text.replace('\u0a5a','\u0a17'+GurmukhiNormalizer.NUKTA) - text=text.replace('\u0a5b','\u0a1c'+GurmukhiNormalizer.NUKTA) - text=text.replace('\u0a5e','\u0a2b'+GurmukhiNormalizer.NUKTA) - - if self.remove_nuktas: - text=text.replace(GurmukhiNormalizer.NUKTA,'') - - # replace the poorna virama codes specific to script - # with generic Indic script codes - text=text.replace('\u0a64','\u0964') - text=text.replace('\u0a65','\u0965') - - ## replace pipe character for poorna virama - text=text.replace('\u007c','\u0964') - - # correct visarge - text=re.sub(r'([\u0a00-\u0a7f]):','\\1\u0a03',text) - - return text - - -class GujaratiNormalizer(BaseNormalizer): - """ - Normalizer for the Gujarati script. In addition to basic normalization by the super class, - * Replace the reserved character for poorna virama (if used) with the recommended generic Indic scripts poorna virama - * replace colon ':' by visarga if the colon follows a charcter in this script - """ - - NUKTA='\u0ABC' - - def __init__(self,lang='gu',remove_nuktas=False,nasals_mode='do_nothing',do_normalize_chandras=False, - do_normalize_vowel_ending=False): - super(GujaratiNormalizer,self).__init__(lang,remove_nuktas,nasals_mode,do_normalize_chandras,do_normalize_vowel_ending) - - def normalize(self,text): - - # common normalization for Indic scripts - text=super(GujaratiNormalizer,self).normalize(text) - - # decomposing Nukta based composite characters - if self.remove_nuktas: - text=text.replace(GujaratiNormalizer.NUKTA,'') - - - # replace the poorna virama codes specific to script - # with generic Indic script codes - text=text.replace('\u0ae4','\u0964') - text=text.replace('\u0ae5','\u0965') - - # correct visarge - text=re.sub(r'([\u0a80-\u0aff]):','\\1\u0a83',text) - - return text - - -class OriyaNormalizer(BaseNormalizer): - """ - Normalizer for the Oriya script. In addition to basic normalization by the super class, - * Replaces the composite characters containing nuktas by their decomposed form - * Replace the reserved character for poorna virama (if used) with the recommended generic Indic scripts poorna virama - * Canonicalize two part dependent vowels - * Replace 'va' with 'ba' - * replace pipe character '|' by poorna virama character - * replace colon ':' by visarga if the colon follows a charcter in this script - """ - - NUKTA='\u0B3C' - - VOWEL_NORM_MAPS={ - ## See Table 12-22 in http://www.unicode.org/versions/Unicode12.1.0/ch12.pdf - '\u0b05\u0b3e': '\u0b06', - '\u0b0f\u0b57': '\u0b10', - '\u0b13\u0b57': '\u0b14', - } - - - def __init__(self,lang='or',remove_nuktas=False,nasals_mode='do_nothing',do_normalize_chandras=False, - do_normalize_vowel_ending=False, - do_remap_wa=False): - super(OriyaNormalizer,self).__init__(lang,remove_nuktas,nasals_mode,do_normalize_chandras,do_normalize_vowel_ending) - self.do_remap_wa=do_remap_wa - - def normalize(self,text): - - # common normalization for Indic scripts - text=super(OriyaNormalizer,self).normalize(text) - - ## standard vowel replacements as per suggestions in Unicode documents - for k,v in OriyaNormalizer.VOWEL_NORM_MAPS.items(): - text=text.replace(k,v) - - # decomposing Nukta based composite characters - text=text.replace('\u0b5c','\u0b21'+OriyaNormalizer.NUKTA) - text=text.replace('\u0b5d','\u0b22'+OriyaNormalizer.NUKTA) - - if self.remove_nuktas: - text=text.replace(OriyaNormalizer.NUKTA,'') - - # replace the poorna virama codes specific to script - # with generic Indic script codes - text=text.replace('\u0b64','\u0964') - text=text.replace('\u0b65','\u0965') - - # replace pipe character for poorna virama - text=text.replace('\u0b7c','\u0964') - - # replace wa with ba - if self.do_remap_wa: - text=text.replace('\u0b71','\u0b2c') - - # replace va with ba - # NOTE: documentation (chapter on Indic scripts) and codepoint chart seem contradictory - # (this applied to wa to ba rule also above) - text=text.replace('\u0b35','\u0b2c') - - # AI dependent vowel sign - text=text.replace('\u0b47\u0b56','\u0b58') - - # two part dependent vowels - text=text.replace('\u0b47\u0b3e','\u0b4b') - text=text.replace('\u0b47\u0b57','\u0b4c') - - - # additional consonant - not clear how to handle this - # ignore - - # correct visarge - text=re.sub(r'([\u0b00-\u0b7f]):','\\1\u0b03',text) - - return text - - -class BengaliNormalizer(BaseNormalizer): - """ - Normalizer for the Bengali script. In addition to basic normalization by the super class, - * Replaces the composite characters containing nuktas by their decomposed form - * Replace the reserved character for poorna virama (if used) with the recommended generic Indic scripts poorna virama - * Canonicalize two part dependent vowels - * replace pipe character '|' by poorna virama character - * replace colon ':' by visarga if the colon follows a charcter in this script - """ - - NUKTA='\u09BC' - - def __init__(self,lang='bn',remove_nuktas=False,nasals_mode='do_nothing',do_normalize_chandras=False, - do_normalize_vowel_ending=False, - do_remap_assamese_chars=False): - super(BengaliNormalizer,self).__init__(lang,remove_nuktas,nasals_mode,do_normalize_chandras,do_normalize_vowel_ending) - self.do_remap_assamese_chars=do_remap_assamese_chars - - def normalize(self,text): - - # common normalization for Indic scripts - text=super(BengaliNormalizer,self).normalize(text) - - # decomposing Nukta based composite characters - text=text.replace('\u09dc','\u09a1'+BengaliNormalizer.NUKTA) - text=text.replace('\u09dd','\u09a2'+BengaliNormalizer.NUKTA) - text=text.replace('\u09df','\u09af'+BengaliNormalizer.NUKTA) - - if self.remove_nuktas: - text=text.replace(BengaliNormalizer.NUKTA,'') - - if self.do_remap_assamese_chars and self.lang=='as': - text=text.replace('\u09f0','\u09b0') # 'ra' character - text=text.replace('\u09f1','\u09ac') # 'va' character - - # replace the poorna virama codes specific to script - # with generic Indic script codes - text=text.replace('\u09e4','\u0964') - text=text.replace('\u09e5','\u0965') - - # replace pipe character for poorna virama - text=text.replace('\u007c','\u0964') - # replace bengali currency numerator four for poorna virama (it looks similar and is used as a substitute) - text=text.replace('\u09f7','\u0964') - - # two part dependent vowels - text=text.replace('\u09c7\u09be','\u09cb') - text=text.replace('\u09c7\u09d7','\u09cc') - - # correct visarge - text=re.sub(r'([\u0980-\u09ff]):','\\1\u0983',text) - - return text - - -class TamilNormalizer(BaseNormalizer): - """ - Normalizer for the Tamil script. In addition to basic normalization by the super class, - * Replace the reserved character for poorna virama (if used) with the recommended generic Indic scripts poorna virama - * canonicalize two-part dependent vowel signs - * replace colon ':' by visarga if the colon follows a charcter in this script - """ - - def __init__(self,lang='ta',remove_nuktas=False,nasals_mode='do_nothing', - do_normalize_chandras=False,do_normalize_vowel_ending=False): - super(TamilNormalizer,self).__init__(lang,remove_nuktas,nasals_mode,do_normalize_chandras,do_normalize_vowel_ending) - - def normalize(self,text): - - # common normalization for Indic scripts - text=super(TamilNormalizer,self).normalize(text) - - # replace the poorna virama codes specific to script - # with generic Indic script codes - text=text.replace('\u0be4','\u0964') - text=text.replace('\u0be5','\u0965') - - # two part dependent vowels - text=text.replace('\u0b92\u0bd7','\u0b94') - text=text.replace('\u0bc6\u0bbe','\u0bca') - text=text.replace('\u0bc7\u0bbe','\u0bcb') - text=text.replace('\u0bc6\u0bd7','\u0bcc') - - # correct visarge - text=re.sub(r'([\u0b80-\u0bff]):','\\1\u0b83',text) - - return text - - -class TeluguNormalizer(BaseNormalizer): - """ - Normalizer for the Teluguscript. In addition to basic normalization by the super class, - * Replace the reserved character for poorna virama (if used) with the recommended generic Indic scripts poorna virama - * canonicalize two-part dependent vowel signs - * replace colon ':' by visarga if the colon follows a charcter in this script - """ - - def __init__(self,lang='te',remove_nuktas=False,nasals_mode='do_nothing', - do_normalize_chandras=False,do_normalize_vowel_ending=False): - super(TeluguNormalizer,self).__init__(lang,remove_nuktas,nasals_mode,do_normalize_chandras,do_normalize_vowel_ending) - - def normalize(self,text): - - # common normalization for Indic scripts - text=super(TeluguNormalizer,self).normalize(text) - - # replace the poorna virama codes specific to script - # with generic Indic script codes - text=text.replace('\u0c64','\u0964') - text=text.replace('\u0c65','\u0965') - - # dependent vowels - text=text.replace('\u0c46\u0c56','\u0c48') - - # correct visarge - text=re.sub(r'([\u0c00-\u0c7f]):','\\1\u0c03',text) - - return text - - def get_char_stats(self,text): - pass - -class KannadaNormalizer(BaseNormalizer): - """ - Normalizer for the Kannada script. In addition to basic normalization by the super class, - * Replace the reserved character for poorna virama (if used) with the recommended generic Indic scripts poorna virama - * canonicalize two-part dependent vowel signs - * replace colon ':' by visarga if the colon follows a charcter in this script - """ - - def __init__(self,lang='kn',remove_nuktas=False,nasals_mode='do_nothing', - do_normalize_chandras=False,do_normalize_vowel_ending=False): - super(KannadaNormalizer,self).__init__(lang,remove_nuktas,nasals_mode,do_normalize_chandras,do_normalize_vowel_ending) - - - def normalize(self,text): - - # common normalization for Indic scripts - text=super(KannadaNormalizer,self).normalize(text) - - # replace the poorna virama codes specific to script - # with generic Indic script codes - text=text.replace('\u0ce4','\u0964') - text=text.replace('\u0ce5','\u0965') - - # dependent vowels - text=text.replace('\u0cbf\u0cd5','\u0cc0') - text=text.replace('\u0cc6\u0cd5','\u0cc7') - text=text.replace('\u0cc6\u0cd6','\u0cc8') - text=text.replace('\u0cc6\u0cc2','\u0cca') - text=text.replace('\u0cca\u0cd5','\u0ccb') - - # correct visarge - text=re.sub(r'([\u0c80-\u0cff]):','\\1\u0c83',text) - - return text - - -class MalayalamNormalizer(BaseNormalizer): - """ - Normalizer for the Malayalam script. In addition to basic normalization by the super class, - * Replace the reserved character for poorna virama (if used) with the recommended generic Indic scripts poorna virama - * canonicalize two-part dependent vowel signs - * Change from old encoding of chillus (till Unicode 5.0) to new encoding - * replace colon ':' by visarga if the colon follows a charcter in this script - """ - - CHILLU_CHAR_MAP= { - '\u0d7a': '\u0d23', - '\u0d7b': '\u0d28', - '\u0d7c': '\u0d30', - '\u0d7d': '\u0d32', - '\u0d7e': '\u0d33', - '\u0d7f': '\u0d15', - } - - def _canonicalize_chillus(self,text): - for chillu, char in MalayalamNormalizer.CHILLU_CHAR_MAP.items(): - text=text.replace(chillu,'{}\u0d4d'.format(char)) - return text - - def _correct_geminated_T(self,text): - return text.replace('\u0d31\u0d4d\u0d31','\u0d1f\u0d4d\u0d1f') - - def __init__(self,lang='ml',remove_nuktas=False,nasals_mode='do_nothing',do_normalize_chandras=False, - do_normalize_vowel_ending=False, - do_canonicalize_chillus=False, do_correct_geminated_T=False): - super(MalayalamNormalizer,self).__init__(lang,remove_nuktas,nasals_mode,do_normalize_chandras,do_normalize_vowel_ending) - self.do_canonicalize_chillus=do_canonicalize_chillus - self.do_correct_geminated_T=do_correct_geminated_T - - def normalize(self,text): - - # Change from old encoding of chillus (till Unicode 5.0) to new encoding - text=text.replace('\u0d23\u0d4d\u200d','\u0d7a') - text=text.replace('\u0d28\u0d4d\u200d','\u0d7b') - text=text.replace('\u0d30\u0d4d\u200d','\u0d7c') - text=text.replace('\u0d32\u0d4d\u200d','\u0d7d') - text=text.replace('\u0d33\u0d4d\u200d','\u0d7e') - text=text.replace('\u0d15\u0d4d\u200d','\u0d7f') - - # Normalize chillus - if self.do_canonicalize_chillus: - text=self._canonicalize_chillus(text) - - # common normalization for Indic scripts - text=super(MalayalamNormalizer,self).normalize(text) - - # replace the poorna virama codes specific to script - # with generic Indic script codes - text=text.replace('\u0d64','\u0964') - text=text.replace('\u0d65','\u0965') - - # dependent vowels - text=text.replace('\u0d46\u0d3e','\u0d4a') - text=text.replace('\u0d47\u0d3e','\u0d4b') - - # au forms - text=text.replace('\u0d46\u0d57','\u0d4c') - text=text.replace('\u0d57','\u0d4c') - - # correct geminated T - if self.do_correct_geminated_T: - text=self._correct_geminated_T(text) - - # correct visarga - text=re.sub(r'([\u0d00-\u0d7f]):','\\1\u0d03',text) - - return text - -class UrduNormalizer(NormalizerI): - '''Uses UrduHack library. - https://docs.urduhack.com/en/stable/_modules/urduhack/normalization/character.html#normalize - ''' - - def __init__(self, lang, remove_nuktas=True): - self.lang = lang - self.remove_nuktas = remove_nuktas - - from urduhack.normalization import ( - remove_diacritics, - normalize_characters, - normalize_combine_characters - ) # TODO: Use only required normalizers - from urduhack.preprocessing import ( - normalize_whitespace, - digits_space, - all_punctuations_space, - english_characters_space - ) - self.normalize_whitespace = normalize_whitespace - self.digits_space = digits_space - self.all_punctuations_space = all_punctuations_space - self.english_characters_space = english_characters_space - - self.remove_diacritics = remove_diacritics - self.normalize_characters = normalize_characters - self.normalize_combine_characters = normalize_combine_characters - - def normalize(self, text): - text = self._normalize_punctuations(text) - text = self.normalize_whitespace(text) - if self.remove_nuktas: - text = self.remove_diacritics(text) - text = self.normalize_characters(text) - text = self.normalize_combine_characters(text) - text = self.digits_space(text) - text = self.all_punctuations_space(text) - text = self.english_characters_space(text) - return text - - -class IndicNormalizerFactory(object): - """ - Factory class to create language specific normalizers. - """ - - def get_normalizer(self,language,**kwargs): - """ - Call the get_normalizer function to get the language specific normalizer - Paramters: - |language: language code - |remove_nuktas: boolean, should the normalizer remove nukta characters - """ - normalizer=None - if language in ['hi','mr','sa','kK','ne','sd']: - normalizer=DevanagariNormalizer(lang=language, **kwargs) - elif language in ['ur']: - normalizer = UrduNormalizer(lang=language, **kwargs) - elif language in ['pa']: - normalizer=GurmukhiNormalizer(lang=language, **kwargs) - elif language in ['gu']: - normalizer=GujaratiNormalizer(lang=language, **kwargs) - elif language in ['bn']: - normalizer=BengaliNormalizer(lang=language, **kwargs) - elif language in ['as']: - normalizer=BengaliNormalizer(lang=language, **kwargs) - elif language in ['or']: - normalizer=OriyaNormalizer(lang=language, **kwargs) - elif language in ['ml']: - normalizer=MalayalamNormalizer(lang=language, **kwargs) - elif language in ['kn']: - normalizer=KannadaNormalizer(lang=language, **kwargs) - elif language in ['ta']: - normalizer=TamilNormalizer(lang=language, **kwargs) - elif language in ['te']: - normalizer=TeluguNormalizer(lang=language, **kwargs) - else: - normalizer=BaseNormalizer(lang=language, **kwargs) - - return normalizer - - def is_language_supported(self,language): - """ - Is the language supported? - """ - if language in ['hi','mr','sa','kK','ne','sd', - 'ur', - 'pa', - 'gu', - 'bn','as', - 'or', - 'ml', - 'kn', - 'ta', - 'te']: - return True - else: - return False - - -if __name__ == '__main__': - - if len(sys.argv)<4: - print("Usage: python normalize.py [] []") - sys.exit(1) - - language=sys.argv[3] - remove_nuktas=False - normalize_nasals='do_nothing' - if len(sys.argv)>=5: - remove_nuktas=bool(sys.argv[4]) - if len(sys.argv)>=6: - normalize_nasals=sys.argv[5] - - # create normalizer - factory=IndicNormalizerFactory() - normalizer=factory.get_normalizer(language,remove_nuktas=remove_nuktas,nasals_mode=normalize_nasals) - - # DO normalization - with codecs.open(sys.argv[1],'r','utf-8') as ifile: - with codecs.open(sys.argv[2],'w','utf-8') as ofile: - for line in ifile.readlines(): - normalized_line=normalizer.normalize(line) - ofile.write(normalized_line) - - ## gather status about normalization - #with codecs.open(sys.argv[1],'r','utf-8') as ifile: - # normalizer=DevanagariNormalizer() - # text=string.join(ifile.readlines(),sep='') - # normalizer.get_char_stats(text) diff --git a/spaces/KyanChen/RSPrompter/configs/rsprompter/mask2former_ssdd_config.py b/spaces/KyanChen/RSPrompter/configs/rsprompter/mask2former_ssdd_config.py deleted file mode 100644 index 9d0262c54a564ad024ace2904ce4721f75707c43..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/configs/rsprompter/mask2former_ssdd_config.py +++ /dev/null @@ -1,335 +0,0 @@ -custom_imports = dict(imports=['mmseg.datasets', 'mmseg.models'], allow_failed_imports=False) - -max_epochs = 600 - -optimizer = dict( - type='AdamW', - lr=0.0005, - weight_decay=1e-3 -) - -param_scheduler = [ - # warm up learning rate scheduler - dict( - type='LinearLR', - start_factor=1e-4, - by_epoch=True, - begin=0, - end=1, - # update by iter - convert_to_iter_based=True), - # main learning rate scheduler - dict( - type='CosineAnnealingLR', - T_max=max_epochs, - by_epoch=True, - begin=1, - end=max_epochs, - ) -] - -param_scheduler_callback = dict( - type='ParamSchedulerHook' -) - - -evaluator_ = dict( - type='CocoPLMetric', - metric=['bbox', 'segm'], - proposal_nums=[1, 10, 100] -) - - -evaluator = dict( - # train_evaluator=evaluator_, - val_evaluator=evaluator_, - test_evaluator=evaluator_, -) - -image_size = (512, 512) -data_preprocessor = dict( - type='mmdet.DetDataPreprocessor', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - bgr_to_rgb=True, - pad_size_divisor=32, - pad_mask=True, - mask_pad_value=0, -) - -num_things_classes = 1 -num_stuff_classes = 0 -num_classes = num_things_classes + num_stuff_classes -num_queries = 30 - -model = dict( - type='mmdet.Mask2Former', - data_preprocessor=data_preprocessor, - backbone=dict( - type='mmdet.ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=-1, - norm_cfg=dict(type='BN', requires_grad=False), - norm_eval=True, - style='pytorch', - init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), - panoptic_head=dict( - type='mmdet.Mask2FormerHead', - in_channels=[256, 512, 1024, 2048], # pass to pixel_decoder inside - strides=[4, 8, 16, 32], - feat_channels=256, - out_channels=256, - num_things_classes=num_things_classes, - num_stuff_classes=num_stuff_classes, - num_queries=num_queries, - num_transformer_feat_level=3, - pixel_decoder=dict( - type='mmdet.MSDeformAttnPixelDecoder', - num_outs=3, - norm_cfg=dict(type='GN', num_groups=32), - act_cfg=dict(type='ReLU'), - encoder=dict( # DeformableDetrTransformerEncoder - num_layers=3, - layer_cfg=dict( # DeformableDetrTransformerEncoderLayer - self_attn_cfg=dict( # MultiScaleDeformableAttention - embed_dims=256, - num_heads=8, - num_levels=3, - num_points=4, - dropout=0.0, - batch_first=True), - ffn_cfg=dict( - embed_dims=256, - feedforward_channels=1024, - num_fcs=2, - ffn_drop=0.0, - act_cfg=dict(type='ReLU', inplace=True)))), - positional_encoding=dict(num_feats=128, normalize=True)), - enforce_decoder_input_project=False, - positional_encoding=dict(num_feats=128, normalize=True), - transformer_decoder=dict( # Mask2FormerTransformerDecoder - return_intermediate=True, - num_layers=3, - layer_cfg=dict( # Mask2FormerTransformerDecoderLayer - self_attn_cfg=dict( # MultiheadAttention - embed_dims=256, - num_heads=8, - dropout=0.0, - batch_first=True), - cross_attn_cfg=dict( # MultiheadAttention - embed_dims=256, - num_heads=8, - dropout=0.0, - batch_first=True), - ffn_cfg=dict( - embed_dims=256, - feedforward_channels=2048, - num_fcs=2, - ffn_drop=0.0, - act_cfg=dict(type='ReLU', inplace=True))), - init_cfg=None), - loss_cls=dict( - type='mmdet.CrossEntropyLoss', - use_sigmoid=False, - loss_weight=2.0, - reduction='mean', - class_weight=[1.0] * num_classes + [0.1]), - loss_mask=dict( - type='mmdet.CrossEntropyLoss', - use_sigmoid=True, - reduction='mean', - loss_weight=5.0), - loss_dice=dict( - type='mmdet.DiceLoss', - use_sigmoid=True, - activate=True, - reduction='mean', - naive_dice=True, - eps=1.0, - loss_weight=5.0)), - panoptic_fusion_head=dict( - type='mmdet.MaskFormerFusionHead', - num_things_classes=num_things_classes, - num_stuff_classes=num_stuff_classes, - loss_panoptic=None, - init_cfg=None), - train_cfg=dict( - num_points=12544, - oversample_ratio=3.0, - importance_sample_ratio=0.75, - assigner=dict( - type='mmdet.HungarianAssigner', - match_costs=[ - dict(type='mmdet.ClassificationCost', weight=2.0), - dict( - type='mmdet.CrossEntropyLossCost', weight=5.0, use_sigmoid=True), - dict(type='mmdet.DiceCost', weight=5.0, pred_act=True, eps=1.0) - ]), - sampler=dict(type='mmdet.MaskPseudoSampler')), - test_cfg=dict( - panoptic_on=False, - # For now, the dataset does not support - # evaluating semantic segmentation metric. - semantic_on=False, - instance_on=True, - # max_per_image is for instance segmentation. - max_per_image=num_queries, - iou_thr=0.8, - # In Mask2Former's panoptic postprocessing, - # it will filter mask area where score is less than 0.5 . - filter_low_score=True), - init_cfg=None) - - -model_cfg = dict( - type='MMDetPLer', - hyperparameters=dict( - optimizer=optimizer, - param_scheduler=param_scheduler, - evaluator=evaluator, - ), - whole_model=model, -) - -task_name = 'ssdd_ins' -exp_name = 'E20230527_0' -logger = dict( - type='WandbLogger', - project=task_name, - group='mask2former', - name=exp_name -) -# logger = None - - -callbacks = [ - param_scheduler_callback, - dict( - type='ModelCheckpoint', - dirpath=f'results/{task_name}/{exp_name}/checkpoints', - save_last=True, - mode='max', - monitor='valsegm_map_0', - save_top_k=2, - filename='epoch_{epoch}-map_{valsegm_map_0:.4f}' - ), - dict( - type='LearningRateMonitor', - logging_interval='step' - ) -] - - -trainer_cfg = dict( - compiled_model=False, - accelerator="auto", - strategy="auto", - # strategy="ddp", - # strategy='ddp_find_unused_parameters_true', - # precision='32', - # precision='16-mixed', - devices=4, - default_root_dir=f'results/{task_name}/{exp_name}', - # default_root_dir='results/tmp', - max_epochs=max_epochs, - logger=logger, - callbacks=callbacks, - log_every_n_steps=10, - check_val_every_n_epoch=10, - benchmark=True, - # sync_batchnorm=True, - # fast_dev_run=True, - - # limit_train_batches=1, - # limit_val_batches=0, - # limit_test_batches=None, - # limit_predict_batches=None, - # overfit_batches=0.0, - - # val_check_interval=None, - # num_sanity_val_steps=0, - # enable_checkpointing=None, - # enable_progress_bar=None, - # enable_model_summary=None, - # accumulate_grad_batches=32, - # gradient_clip_val=15, - # gradient_clip_algorithm='norm', - # deterministic=None, - # inference_mode: bool=True, - use_distributed_sampler=True, - # profiler="simple", - # detect_anomaly=False, - # barebones=False, - # plugins=None, - # reload_dataloaders_every_n_epochs=0, -) - - -backend_args = None -train_pipeline = [ - dict(type='mmdet.LoadImageFromFile'), - dict(type='mmdet.LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='mmdet.Resize', scale=image_size), - dict(type='mmdet.RandomFlip', prob=0.5), - dict(type='mmdet.PackDetInputs') -] - -test_pipeline = [ - dict(type='mmdet.LoadImageFromFile', backend_args=backend_args), - dict(type='mmdet.Resize', scale=image_size), - # If you don't have a gt annotation, delete the pipeline - dict(type='mmdet.LoadAnnotations', with_bbox=True, with_mask=True), - dict( - type='mmdet.PackDetInputs', - meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', - 'scale_factor')) -] - - -train_batch_size_per_gpu = 8 -train_num_workers = 4 -test_batch_size_per_gpu = 8 -test_num_workers = 4 -persistent_workers = True - -data_parent = '/mnt/search01/dataset/cky_data/SSDD' - -dataset_type = 'SSDDInsSegDataset' - -val_loader = dict( - batch_size=test_batch_size_per_gpu, - num_workers=test_num_workers, - persistent_workers=persistent_workers, - pin_memory=True, - dataset=dict( - type=dataset_type, - data_root=data_parent, - ann_file='annotations/SSDD_instances_val.json', - data_prefix=dict(img_path='imgs'), - test_mode=True, - filter_cfg=dict(filter_empty_gt=True, min_size=32), - pipeline=test_pipeline, - backend_args=backend_args)) - -datamodule_cfg = dict( - type='PLDataModule', - train_loader=dict( - batch_size=train_batch_size_per_gpu, - num_workers=train_num_workers, - persistent_workers=persistent_workers, - pin_memory=True, - dataset=dict( - type=dataset_type, - data_root=data_parent, - ann_file='annotations/SSDD_instances_train.json', - data_prefix=dict(img_path='imgs'), - filter_cfg=dict(filter_empty_gt=True, min_size=32), - pipeline=train_pipeline, - backend_args=backend_args) - ), - val_loader=val_loader, - test_loader=val_loader, - predict_loader=val_loader -) \ No newline at end of file diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/backbones/pvt.py b/spaces/KyanChen/RSPrompter/mmdet/models/backbones/pvt.py deleted file mode 100644 index 9d16c48178fd6029d4ade2d26d40b18d73a6d841..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/models/backbones/pvt.py +++ /dev/null @@ -1,665 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math -import warnings -from collections import OrderedDict - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import Conv2d, build_activation_layer, build_norm_layer -from mmcv.cnn.bricks.drop import build_dropout -from mmcv.cnn.bricks.transformer import MultiheadAttention -from mmengine.logging import MMLogger -from mmengine.model import (BaseModule, ModuleList, Sequential, constant_init, - normal_init, trunc_normal_init) -from mmengine.model.weight_init import trunc_normal_ -from mmengine.runner.checkpoint import CheckpointLoader, load_state_dict -from torch.nn.modules.utils import _pair as to_2tuple - -from mmdet.registry import MODELS -from ..layers import PatchEmbed, nchw_to_nlc, nlc_to_nchw - - -class MixFFN(BaseModule): - """An implementation of MixFFN of PVT. - - The differences between MixFFN & FFN: - 1. Use 1X1 Conv to replace Linear layer. - 2. Introduce 3X3 Depth-wise Conv to encode positional information. - - Args: - embed_dims (int): The feature dimension. Same as - `MultiheadAttention`. - feedforward_channels (int): The hidden dimension of FFNs. - act_cfg (dict, optional): The activation config for FFNs. - Default: dict(type='GELU'). - ffn_drop (float, optional): Probability of an element to be - zeroed in FFN. Default 0.0. - dropout_layer (obj:`ConfigDict`): The dropout_layer used - when adding the shortcut. - Default: None. - use_conv (bool): If True, add 3x3 DWConv between two Linear layers. - Defaults: False. - init_cfg (obj:`mmengine.ConfigDict`): The Config for initialization. - Default: None. - """ - - def __init__(self, - embed_dims, - feedforward_channels, - act_cfg=dict(type='GELU'), - ffn_drop=0., - dropout_layer=None, - use_conv=False, - init_cfg=None): - super(MixFFN, self).__init__(init_cfg=init_cfg) - - self.embed_dims = embed_dims - self.feedforward_channels = feedforward_channels - self.act_cfg = act_cfg - activate = build_activation_layer(act_cfg) - - in_channels = embed_dims - fc1 = Conv2d( - in_channels=in_channels, - out_channels=feedforward_channels, - kernel_size=1, - stride=1, - bias=True) - if use_conv: - # 3x3 depth wise conv to provide positional encode information - dw_conv = Conv2d( - in_channels=feedforward_channels, - out_channels=feedforward_channels, - kernel_size=3, - stride=1, - padding=(3 - 1) // 2, - bias=True, - groups=feedforward_channels) - fc2 = Conv2d( - in_channels=feedforward_channels, - out_channels=in_channels, - kernel_size=1, - stride=1, - bias=True) - drop = nn.Dropout(ffn_drop) - layers = [fc1, activate, drop, fc2, drop] - if use_conv: - layers.insert(1, dw_conv) - self.layers = Sequential(*layers) - self.dropout_layer = build_dropout( - dropout_layer) if dropout_layer else torch.nn.Identity() - - def forward(self, x, hw_shape, identity=None): - out = nlc_to_nchw(x, hw_shape) - out = self.layers(out) - out = nchw_to_nlc(out) - if identity is None: - identity = x - return identity + self.dropout_layer(out) - - -class SpatialReductionAttention(MultiheadAttention): - """An implementation of Spatial Reduction Attention of PVT. - - This module is modified from MultiheadAttention which is a module from - mmcv.cnn.bricks.transformer. - - Args: - embed_dims (int): The embedding dimension. - num_heads (int): Parallel attention heads. - attn_drop (float): A Dropout layer on attn_output_weights. - Default: 0.0. - proj_drop (float): A Dropout layer after `nn.MultiheadAttention`. - Default: 0.0. - dropout_layer (obj:`ConfigDict`): The dropout_layer used - when adding the shortcut. Default: None. - batch_first (bool): Key, Query and Value are shape of - (batch, n, embed_dim) - or (n, batch, embed_dim). Default: False. - qkv_bias (bool): enable bias for qkv if True. Default: True. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='LN'). - sr_ratio (int): The ratio of spatial reduction of Spatial Reduction - Attention of PVT. Default: 1. - init_cfg (obj:`mmengine.ConfigDict`): The Config for initialization. - Default: None. - """ - - def __init__(self, - embed_dims, - num_heads, - attn_drop=0., - proj_drop=0., - dropout_layer=None, - batch_first=True, - qkv_bias=True, - norm_cfg=dict(type='LN'), - sr_ratio=1, - init_cfg=None): - super().__init__( - embed_dims, - num_heads, - attn_drop, - proj_drop, - batch_first=batch_first, - dropout_layer=dropout_layer, - bias=qkv_bias, - init_cfg=init_cfg) - - self.sr_ratio = sr_ratio - if sr_ratio > 1: - self.sr = Conv2d( - in_channels=embed_dims, - out_channels=embed_dims, - kernel_size=sr_ratio, - stride=sr_ratio) - # The ret[0] of build_norm_layer is norm name. - self.norm = build_norm_layer(norm_cfg, embed_dims)[1] - - # handle the BC-breaking from https://github.com/open-mmlab/mmcv/pull/1418 # noqa - from mmdet import digit_version, mmcv_version - if mmcv_version < digit_version('1.3.17'): - warnings.warn('The legacy version of forward function in' - 'SpatialReductionAttention is deprecated in' - 'mmcv>=1.3.17 and will no longer support in the' - 'future. Please upgrade your mmcv.') - self.forward = self.legacy_forward - - def forward(self, x, hw_shape, identity=None): - - x_q = x - if self.sr_ratio > 1: - x_kv = nlc_to_nchw(x, hw_shape) - x_kv = self.sr(x_kv) - x_kv = nchw_to_nlc(x_kv) - x_kv = self.norm(x_kv) - else: - x_kv = x - - if identity is None: - identity = x_q - - # Because the dataflow('key', 'query', 'value') of - # ``torch.nn.MultiheadAttention`` is (num_queries, batch, - # embed_dims), We should adjust the shape of dataflow from - # batch_first (batch, num_queries, embed_dims) to num_queries_first - # (num_queries ,batch, embed_dims), and recover ``attn_output`` - # from num_queries_first to batch_first. - if self.batch_first: - x_q = x_q.transpose(0, 1) - x_kv = x_kv.transpose(0, 1) - - out = self.attn(query=x_q, key=x_kv, value=x_kv)[0] - - if self.batch_first: - out = out.transpose(0, 1) - - return identity + self.dropout_layer(self.proj_drop(out)) - - def legacy_forward(self, x, hw_shape, identity=None): - """multi head attention forward in mmcv version < 1.3.17.""" - x_q = x - if self.sr_ratio > 1: - x_kv = nlc_to_nchw(x, hw_shape) - x_kv = self.sr(x_kv) - x_kv = nchw_to_nlc(x_kv) - x_kv = self.norm(x_kv) - else: - x_kv = x - - if identity is None: - identity = x_q - - out = self.attn(query=x_q, key=x_kv, value=x_kv)[0] - - return identity + self.dropout_layer(self.proj_drop(out)) - - -class PVTEncoderLayer(BaseModule): - """Implements one encoder layer in PVT. - - Args: - embed_dims (int): The feature dimension. - num_heads (int): Parallel attention heads. - feedforward_channels (int): The hidden dimension for FFNs. - drop_rate (float): Probability of an element to be zeroed. - after the feed forward layer. Default: 0.0. - attn_drop_rate (float): The drop out rate for attention layer. - Default: 0.0. - drop_path_rate (float): stochastic depth rate. Default: 0.0. - qkv_bias (bool): enable bias for qkv if True. - Default: True. - act_cfg (dict): The activation config for FFNs. - Default: dict(type='GELU'). - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='LN'). - sr_ratio (int): The ratio of spatial reduction of Spatial Reduction - Attention of PVT. Default: 1. - use_conv_ffn (bool): If True, use Convolutional FFN to replace FFN. - Default: False. - init_cfg (dict, optional): Initialization config dict. - Default: None. - """ - - def __init__(self, - embed_dims, - num_heads, - feedforward_channels, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0., - qkv_bias=True, - act_cfg=dict(type='GELU'), - norm_cfg=dict(type='LN'), - sr_ratio=1, - use_conv_ffn=False, - init_cfg=None): - super(PVTEncoderLayer, self).__init__(init_cfg=init_cfg) - - # The ret[0] of build_norm_layer is norm name. - self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1] - - self.attn = SpatialReductionAttention( - embed_dims=embed_dims, - num_heads=num_heads, - attn_drop=attn_drop_rate, - proj_drop=drop_rate, - dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), - qkv_bias=qkv_bias, - norm_cfg=norm_cfg, - sr_ratio=sr_ratio) - - # The ret[0] of build_norm_layer is norm name. - self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] - - self.ffn = MixFFN( - embed_dims=embed_dims, - feedforward_channels=feedforward_channels, - ffn_drop=drop_rate, - dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), - use_conv=use_conv_ffn, - act_cfg=act_cfg) - - def forward(self, x, hw_shape): - x = self.attn(self.norm1(x), hw_shape, identity=x) - x = self.ffn(self.norm2(x), hw_shape, identity=x) - - return x - - -class AbsolutePositionEmbedding(BaseModule): - """An implementation of the absolute position embedding in PVT. - - Args: - pos_shape (int): The shape of the absolute position embedding. - pos_dim (int): The dimension of the absolute position embedding. - drop_rate (float): Probability of an element to be zeroed. - Default: 0.0. - """ - - def __init__(self, pos_shape, pos_dim, drop_rate=0., init_cfg=None): - super().__init__(init_cfg=init_cfg) - - if isinstance(pos_shape, int): - pos_shape = to_2tuple(pos_shape) - elif isinstance(pos_shape, tuple): - if len(pos_shape) == 1: - pos_shape = to_2tuple(pos_shape[0]) - assert len(pos_shape) == 2, \ - f'The size of image should have length 1 or 2, ' \ - f'but got {len(pos_shape)}' - self.pos_shape = pos_shape - self.pos_dim = pos_dim - - self.pos_embed = nn.Parameter( - torch.zeros(1, pos_shape[0] * pos_shape[1], pos_dim)) - self.drop = nn.Dropout(p=drop_rate) - - def init_weights(self): - trunc_normal_(self.pos_embed, std=0.02) - - def resize_pos_embed(self, pos_embed, input_shape, mode='bilinear'): - """Resize pos_embed weights. - - Resize pos_embed using bilinear interpolate method. - - Args: - pos_embed (torch.Tensor): Position embedding weights. - input_shape (tuple): Tuple for (downsampled input image height, - downsampled input image width). - mode (str): Algorithm used for upsampling: - ``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` | - ``'trilinear'``. Default: ``'bilinear'``. - - Return: - torch.Tensor: The resized pos_embed of shape [B, L_new, C]. - """ - assert pos_embed.ndim == 3, 'shape of pos_embed must be [B, L, C]' - pos_h, pos_w = self.pos_shape - pos_embed_weight = pos_embed[:, (-1 * pos_h * pos_w):] - pos_embed_weight = pos_embed_weight.reshape( - 1, pos_h, pos_w, self.pos_dim).permute(0, 3, 1, 2).contiguous() - pos_embed_weight = F.interpolate( - pos_embed_weight, size=input_shape, mode=mode) - pos_embed_weight = torch.flatten(pos_embed_weight, - 2).transpose(1, 2).contiguous() - pos_embed = pos_embed_weight - - return pos_embed - - def forward(self, x, hw_shape, mode='bilinear'): - pos_embed = self.resize_pos_embed(self.pos_embed, hw_shape, mode) - return self.drop(x + pos_embed) - - -@MODELS.register_module() -class PyramidVisionTransformer(BaseModule): - """Pyramid Vision Transformer (PVT) - - Implementation of `Pyramid Vision Transformer: A Versatile Backbone for - Dense Prediction without Convolutions - `_. - - Args: - pretrain_img_size (int | tuple[int]): The size of input image when - pretrain. Defaults: 224. - in_channels (int): Number of input channels. Default: 3. - embed_dims (int): Embedding dimension. Default: 64. - num_stags (int): The num of stages. Default: 4. - num_layers (Sequence[int]): The layer number of each transformer encode - layer. Default: [3, 4, 6, 3]. - num_heads (Sequence[int]): The attention heads of each transformer - encode layer. Default: [1, 2, 5, 8]. - patch_sizes (Sequence[int]): The patch_size of each patch embedding. - Default: [4, 2, 2, 2]. - strides (Sequence[int]): The stride of each patch embedding. - Default: [4, 2, 2, 2]. - paddings (Sequence[int]): The padding of each patch embedding. - Default: [0, 0, 0, 0]. - sr_ratios (Sequence[int]): The spatial reduction rate of each - transformer encode layer. Default: [8, 4, 2, 1]. - out_indices (Sequence[int] | int): Output from which stages. - Default: (0, 1, 2, 3). - mlp_ratios (Sequence[int]): The ratio of the mlp hidden dim to the - embedding dim of each transformer encode layer. - Default: [8, 8, 4, 4]. - qkv_bias (bool): Enable bias for qkv if True. Default: True. - drop_rate (float): Probability of an element to be zeroed. - Default 0.0. - attn_drop_rate (float): The drop out rate for attention layer. - Default 0.0. - drop_path_rate (float): stochastic depth rate. Default 0.1. - use_abs_pos_embed (bool): If True, add absolute position embedding to - the patch embedding. Defaults: True. - use_conv_ffn (bool): If True, use Convolutional FFN to replace FFN. - Default: False. - act_cfg (dict): The activation config for FFNs. - Default: dict(type='GELU'). - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='LN'). - pretrained (str, optional): model pretrained path. Default: None. - convert_weights (bool): The flag indicates whether the - pre-trained model is from the original repo. We may need - to convert some keys to make it compatible. - Default: True. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None. - """ - - def __init__(self, - pretrain_img_size=224, - in_channels=3, - embed_dims=64, - num_stages=4, - num_layers=[3, 4, 6, 3], - num_heads=[1, 2, 5, 8], - patch_sizes=[4, 2, 2, 2], - strides=[4, 2, 2, 2], - paddings=[0, 0, 0, 0], - sr_ratios=[8, 4, 2, 1], - out_indices=(0, 1, 2, 3), - mlp_ratios=[8, 8, 4, 4], - qkv_bias=True, - drop_rate=0., - attn_drop_rate=0., - drop_path_rate=0.1, - use_abs_pos_embed=True, - norm_after_stage=False, - use_conv_ffn=False, - act_cfg=dict(type='GELU'), - norm_cfg=dict(type='LN', eps=1e-6), - pretrained=None, - convert_weights=True, - init_cfg=None): - super().__init__(init_cfg=init_cfg) - - self.convert_weights = convert_weights - if isinstance(pretrain_img_size, int): - pretrain_img_size = to_2tuple(pretrain_img_size) - elif isinstance(pretrain_img_size, tuple): - if len(pretrain_img_size) == 1: - pretrain_img_size = to_2tuple(pretrain_img_size[0]) - assert len(pretrain_img_size) == 2, \ - f'The size of image should have length 1 or 2, ' \ - f'but got {len(pretrain_img_size)}' - - assert not (init_cfg and pretrained), \ - 'init_cfg and pretrained cannot be setting at the same time' - if isinstance(pretrained, str): - warnings.warn('DeprecationWarning: pretrained is deprecated, ' - 'please use "init_cfg" instead') - self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) - elif pretrained is None: - self.init_cfg = init_cfg - else: - raise TypeError('pretrained must be a str or None') - - self.embed_dims = embed_dims - - self.num_stages = num_stages - self.num_layers = num_layers - self.num_heads = num_heads - self.patch_sizes = patch_sizes - self.strides = strides - self.sr_ratios = sr_ratios - assert num_stages == len(num_layers) == len(num_heads) \ - == len(patch_sizes) == len(strides) == len(sr_ratios) - - self.out_indices = out_indices - assert max(out_indices) < self.num_stages - self.pretrained = pretrained - - # transformer encoder - dpr = [ - x.item() - for x in torch.linspace(0, drop_path_rate, sum(num_layers)) - ] # stochastic num_layer decay rule - - cur = 0 - self.layers = ModuleList() - for i, num_layer in enumerate(num_layers): - embed_dims_i = embed_dims * num_heads[i] - patch_embed = PatchEmbed( - in_channels=in_channels, - embed_dims=embed_dims_i, - kernel_size=patch_sizes[i], - stride=strides[i], - padding=paddings[i], - bias=True, - norm_cfg=norm_cfg) - - layers = ModuleList() - if use_abs_pos_embed: - pos_shape = pretrain_img_size // np.prod(patch_sizes[:i + 1]) - pos_embed = AbsolutePositionEmbedding( - pos_shape=pos_shape, - pos_dim=embed_dims_i, - drop_rate=drop_rate) - layers.append(pos_embed) - layers.extend([ - PVTEncoderLayer( - embed_dims=embed_dims_i, - num_heads=num_heads[i], - feedforward_channels=mlp_ratios[i] * embed_dims_i, - drop_rate=drop_rate, - attn_drop_rate=attn_drop_rate, - drop_path_rate=dpr[cur + idx], - qkv_bias=qkv_bias, - act_cfg=act_cfg, - norm_cfg=norm_cfg, - sr_ratio=sr_ratios[i], - use_conv_ffn=use_conv_ffn) for idx in range(num_layer) - ]) - in_channels = embed_dims_i - # The ret[0] of build_norm_layer is norm name. - if norm_after_stage: - norm = build_norm_layer(norm_cfg, embed_dims_i)[1] - else: - norm = nn.Identity() - self.layers.append(ModuleList([patch_embed, layers, norm])) - cur += num_layer - - def init_weights(self): - logger = MMLogger.get_current_instance() - if self.init_cfg is None: - logger.warn(f'No pre-trained weights for ' - f'{self.__class__.__name__}, ' - f'training start from scratch') - for m in self.modules(): - if isinstance(m, nn.Linear): - trunc_normal_init(m, std=.02, bias=0.) - elif isinstance(m, nn.LayerNorm): - constant_init(m, 1.0) - elif isinstance(m, nn.Conv2d): - fan_out = m.kernel_size[0] * m.kernel_size[ - 1] * m.out_channels - fan_out //= m.groups - normal_init(m, 0, math.sqrt(2.0 / fan_out)) - elif isinstance(m, AbsolutePositionEmbedding): - m.init_weights() - else: - assert 'checkpoint' in self.init_cfg, f'Only support ' \ - f'specify `Pretrained` in ' \ - f'`init_cfg` in ' \ - f'{self.__class__.__name__} ' - checkpoint = CheckpointLoader.load_checkpoint( - self.init_cfg.checkpoint, logger=logger, map_location='cpu') - logger.warn(f'Load pre-trained model for ' - f'{self.__class__.__name__} from original repo') - if 'state_dict' in checkpoint: - state_dict = checkpoint['state_dict'] - elif 'model' in checkpoint: - state_dict = checkpoint['model'] - else: - state_dict = checkpoint - if self.convert_weights: - # Because pvt backbones are not supported by mmcls, - # so we need to convert pre-trained weights to match this - # implementation. - state_dict = pvt_convert(state_dict) - load_state_dict(self, state_dict, strict=False, logger=logger) - - def forward(self, x): - outs = [] - - for i, layer in enumerate(self.layers): - x, hw_shape = layer[0](x) - - for block in layer[1]: - x = block(x, hw_shape) - x = layer[2](x) - x = nlc_to_nchw(x, hw_shape) - if i in self.out_indices: - outs.append(x) - - return outs - - -@MODELS.register_module() -class PyramidVisionTransformerV2(PyramidVisionTransformer): - """Implementation of `PVTv2: Improved Baselines with Pyramid Vision - Transformer `_.""" - - def __init__(self, **kwargs): - super(PyramidVisionTransformerV2, self).__init__( - patch_sizes=[7, 3, 3, 3], - paddings=[3, 1, 1, 1], - use_abs_pos_embed=False, - norm_after_stage=True, - use_conv_ffn=True, - **kwargs) - - -def pvt_convert(ckpt): - new_ckpt = OrderedDict() - # Process the concat between q linear weights and kv linear weights - use_abs_pos_embed = False - use_conv_ffn = False - for k in ckpt.keys(): - if k.startswith('pos_embed'): - use_abs_pos_embed = True - if k.find('dwconv') >= 0: - use_conv_ffn = True - for k, v in ckpt.items(): - if k.startswith('head'): - continue - if k.startswith('norm.'): - continue - if k.startswith('cls_token'): - continue - if k.startswith('pos_embed'): - stage_i = int(k.replace('pos_embed', '')) - new_k = k.replace(f'pos_embed{stage_i}', - f'layers.{stage_i - 1}.1.0.pos_embed') - if stage_i == 4 and v.size(1) == 50: # 1 (cls token) + 7 * 7 - new_v = v[:, 1:, :] # remove cls token - else: - new_v = v - elif k.startswith('patch_embed'): - stage_i = int(k.split('.')[0].replace('patch_embed', '')) - new_k = k.replace(f'patch_embed{stage_i}', - f'layers.{stage_i - 1}.0') - new_v = v - if 'proj.' in new_k: - new_k = new_k.replace('proj.', 'projection.') - elif k.startswith('block'): - stage_i = int(k.split('.')[0].replace('block', '')) - layer_i = int(k.split('.')[1]) - new_layer_i = layer_i + use_abs_pos_embed - new_k = k.replace(f'block{stage_i}.{layer_i}', - f'layers.{stage_i - 1}.1.{new_layer_i}') - new_v = v - if 'attn.q.' in new_k: - sub_item_k = k.replace('q.', 'kv.') - new_k = new_k.replace('q.', 'attn.in_proj_') - new_v = torch.cat([v, ckpt[sub_item_k]], dim=0) - elif 'attn.kv.' in new_k: - continue - elif 'attn.proj.' in new_k: - new_k = new_k.replace('proj.', 'attn.out_proj.') - elif 'attn.sr.' in new_k: - new_k = new_k.replace('sr.', 'sr.') - elif 'mlp.' in new_k: - string = f'{new_k}-' - new_k = new_k.replace('mlp.', 'ffn.layers.') - if 'fc1.weight' in new_k or 'fc2.weight' in new_k: - new_v = v.reshape((*v.shape, 1, 1)) - new_k = new_k.replace('fc1.', '0.') - new_k = new_k.replace('dwconv.dwconv.', '1.') - if use_conv_ffn: - new_k = new_k.replace('fc2.', '4.') - else: - new_k = new_k.replace('fc2.', '3.') - string += f'{new_k} {v.shape}-{new_v.shape}' - elif k.startswith('norm'): - stage_i = int(k[4]) - new_k = k.replace(f'norm{stage_i}', f'layers.{stage_i - 1}.2') - new_v = v - else: - new_k = k - new_v = v - new_ckpt[new_k] = new_v - - return new_ckpt diff --git a/spaces/Lajonbot/Chatbot-Share/app.py b/spaces/Lajonbot/Chatbot-Share/app.py deleted file mode 100644 index ef93ebb7268f831a4e78b9d1f53474f639b6eed9..0000000000000000000000000000000000000000 --- a/spaces/Lajonbot/Chatbot-Share/app.py +++ /dev/null @@ -1,175 +0,0 @@ - -import soundfile as sf -import torch -from datetime import datetime -import random -import time -from datetime import datetime -import whisper -import torch -from transformers import AutoModelForCausalLM, AutoTokenizer, VitsModel -import torch -import numpy as np -import os -import argparse -import gradio as gr -from timeit import default_timer as timer -import torch -import numpy as np -import pandas as pd -import whisper - - -whisper_model = whisper.load_model("medium").to("cuda") -tts_model = VitsModel.from_pretrained("facebook/mms-tts-pol") -tts_model.to("cuda") -print("TTS Loaded!") - -tokenizer_tss = AutoTokenizer.from_pretrained("facebook/mms-tts-pol") - -def save_to_txt(text_to_save): - with open('prompt.txt', 'w', encoding='utf-8') as f: - f.write(text_to_save) - -def read_txt(): - with open('prompt.txt') as f: - lines = f.readlines() - return lines - - -##### Chat z LLAMA #### -##### Chat z LLAMA #### -##### Chat z LLAMA #### - - -def _load_model_tokenizer(): - model_id = 'tangger/Qwen-7B-Chat' - tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) - model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto",trust_remote_code=True, fp16=True).eval() - return model, tokenizer - - -model, tokenizer = _load_model_tokenizer() -def postprocess(self, y): - if y is None: - return [] - for i, (message, response) in enumerate(y): - y[i] = ( - None if message is None else mdtex2html.convert(message), - None if response is None else mdtex2html.convert(response), - ) - return y - - -def _parse_text(text): - lines = text.split("\n") - lines = [line for line in lines if line != ""] - count = 0 - for i, line in enumerate(lines): - if "```" in line: - count += 1 - items = line.split("`") - if count % 2 == 1: - lines[i] = f'
    '
    -            else:
    -                lines[i] = f"
    " - else: - if i > 0: - if count % 2 == 1: - line = line.replace("`", r"\`") - line = line.replace("<", "<") - line = line.replace(">", ">") - line = line.replace(" ", " ") - line = line.replace("*", "*") - line = line.replace("_", "_") - line = line.replace("-", "-") - line = line.replace(".", ".") - line = line.replace("!", "!") - line = line.replace("(", "(") - line = line.replace(")", ")") - line = line.replace("$", "$") - lines[i] = "
    " + line - text = "".join(lines) - return text - -def predict(_query, _chatbot, _task_history): - print(f"User: {_parse_text(_query)}") - _chatbot.append((_parse_text(_query), "")) - full_response = "" - - for response in model.chat_stream(tokenizer, _query, history=_task_history,system = "Jesteś assystentem AI. Odpowiadaj zawsze w języku poslkim" ): - _chatbot[-1] = (_parse_text(_query), _parse_text(response)) - - yield _chatbot - full_response = _parse_text(response) - - print(f"History: {_task_history}") - _task_history.append((_query, full_response)) - print(f"Qwen-7B-Chat: {_parse_text(full_response)}") - -def read_text(text): - print("___Tekst do przeczytania!") - inputs = tokenizer_tss(text, return_tensors="pt").to("cuda") - with torch.no_grad(): - output = tts_model(**inputs).waveform.squeeze().cpu().numpy() - sf.write('temp_file.wav', output, tts_model.config.sampling_rate) - return 'temp_file.wav' - - -def update_audio(text): - return 'temp_file.wav' - -def translate(audio): - print("__Wysyłam nagranie do whisper!") - transcription = whisper_model.transcribe(audio, language="pl") - return transcription["text"] - - -def predict(audio, _chatbot, _task_history): - # Użyj funkcji translate, aby przekształcić audio w tekst - _query = translate(audio) - - print(f"____User: {_parse_text(_query)}") - _chatbot.append((_parse_text(_query), "")) - full_response = "" - - for response in model.chat_stream(tokenizer, - _query, - history= _task_history, - system = "Jesteś assystentem AI. Odpowiadaj zawsze w języku polskim. Odpowiadaj krótko."): - _chatbot[-1] = (_parse_text(_query), _parse_text(response)) - yield _chatbot - full_response = _parse_text(response) - - print(f"____History: {_task_history}") - _task_history.append((_query, full_response)) - print(f"__Qwen-7B-Chat: {_parse_text(full_response)}") - print("____full_response",full_response) - audio_file = read_text(_parse_text(full_response)) # Generowanie audio - return full_response - -def regenerate(_chatbot, _task_history): - if not _task_history: - yield _chatbot - return - item = _task_history.pop(-1) - _chatbot.pop(-1) - yield from predict(item[0], _chatbot, _task_history) - -with gr.Blocks() as chat_demo: - chatbot = gr.Chatbot(label='Llama Voice Chatbot', elem_classes="control-height") - query = gr.Textbox(lines=2, label='Input') - task_history = gr.State([]) - audio_output = gr.Audio('temp_file.wav', label="Generated Audio (wav)", type='filepath', autoplay=False) - - with gr.Row(): - submit_btn = gr.Button("🚀 Wyślij tekst") - - with gr.Row(): - audio_upload = gr.Audio(source="microphone", type="filepath", show_label=False) - submit_audio_btn = gr.Button("🎙️ Wyślij audio") - - submit_btn.click(predict, [query, chatbot, task_history], [chatbot], show_progress=True) - submit_audio_btn.click(predict, [audio_upload, chatbot, task_history], [chatbot], show_progress=True).then(update_audio, chatbot, audio_output) - -chat_demo.queue().launch() \ No newline at end of file diff --git a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/infer/infer_libs/uvr5_pack/demucs/__init__.py b/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/infer/infer_libs/uvr5_pack/demucs/__init__.py deleted file mode 100644 index d4182e356427e1b05a79f8da641c70bb732514fa..0000000000000000000000000000000000000000 --- a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/infer/infer_libs/uvr5_pack/demucs/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -__version__ = "2.0.3" diff --git a/spaces/LittleYuan/My-Real-Bot/scripts/pytorch2onnx.py b/spaces/LittleYuan/My-Real-Bot/scripts/pytorch2onnx.py deleted file mode 100644 index 09d99b2e0171265e70e7507ed8e882b616b449a1..0000000000000000000000000000000000000000 --- a/spaces/LittleYuan/My-Real-Bot/scripts/pytorch2onnx.py +++ /dev/null @@ -1,36 +0,0 @@ -import argparse -import torch -import torch.onnx -from basicsr.archs.rrdbnet_arch import RRDBNet - - -def main(args): - # An instance of the model - model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4) - if args.params: - keyname = 'params' - else: - keyname = 'params_ema' - model.load_state_dict(torch.load(args.input)[keyname]) - # set the train mode to false since we will only run the forward pass. - model.train(False) - model.cpu().eval() - - # An example input - x = torch.rand(1, 3, 64, 64) - # Export the model - with torch.no_grad(): - torch_out = torch.onnx._export(model, x, args.output, opset_version=11, export_params=True) - print(torch_out.shape) - - -if __name__ == '__main__': - """Convert pytorch model to onnx models""" - parser = argparse.ArgumentParser() - parser.add_argument( - '--input', type=str, default='experiments/pretrained_models/RealESRGAN_x4plus.pth', help='Input model path') - parser.add_argument('--output', type=str, default='realesrgan-x4.onnx', help='Output onnx path') - parser.add_argument('--params', action='store_false', help='Use params instead of params_ema') - args = parser.parse_args() - - main(args) diff --git a/spaces/MRiwu/Collection/transforms.py b/spaces/MRiwu/Collection/transforms.py deleted file mode 100644 index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000 --- a/spaces/MRiwu/Collection/transforms.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/Marne/MockingBird/mockingbirdforuse/vocoder/hifigan/inference.py b/spaces/Marne/MockingBird/mockingbirdforuse/vocoder/hifigan/inference.py deleted file mode 100644 index 288d13eb05d0cc38b3ecb6a9dce7cc1e59c8dd54..0000000000000000000000000000000000000000 --- a/spaces/Marne/MockingBird/mockingbirdforuse/vocoder/hifigan/inference.py +++ /dev/null @@ -1,32 +0,0 @@ -import torch -from pathlib import Path - -from .hparams import hparams as hp -from .models import Generator -from ...log import logger - - -class HifiGanVocoder: - def __init__(self, model_path: Path): - torch.manual_seed(hp.seed) - self._device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - self.generator = Generator(hp).to(self._device) - - logger.debug("Loading '{}'".format(model_path)) - state_dict_g = torch.load(model_path, map_location=self._device) - logger.debug("Complete.") - - self.generator.load_state_dict(state_dict_g["generator"]) - self.generator.eval() - self.generator.remove_weight_norm() - - def infer_waveform(self, mel): - mel = torch.FloatTensor(mel).to(self._device) - mel = mel.unsqueeze(0) - - with torch.no_grad(): - y_g_hat = self.generator(mel) - audio = y_g_hat.squeeze() - audio = audio.cpu().numpy() - - return audio, hp.sampling_rate diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/models/losses/accuracy.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/models/losses/accuracy.py deleted file mode 100644 index c0fd2e7e74a0f721c4a814c09d6e453e5956bb38..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/models/losses/accuracy.py +++ /dev/null @@ -1,78 +0,0 @@ -import torch.nn as nn - - -def accuracy(pred, target, topk=1, thresh=None): - """Calculate accuracy according to the prediction and target. - - Args: - pred (torch.Tensor): The model prediction, shape (N, num_class, ...) - target (torch.Tensor): The target of each prediction, shape (N, , ...) - topk (int | tuple[int], optional): If the predictions in ``topk`` - matches the target, the predictions will be regarded as - correct ones. Defaults to 1. - thresh (float, optional): If not None, predictions with scores under - this threshold are considered incorrect. Default to None. - - Returns: - float | tuple[float]: If the input ``topk`` is a single integer, - the function will return a single float as accuracy. If - ``topk`` is a tuple containing multiple integers, the - function will return a tuple containing accuracies of - each ``topk`` number. - """ - assert isinstance(topk, (int, tuple)) - if isinstance(topk, int): - topk = (topk, ) - return_single = True - else: - return_single = False - - maxk = max(topk) - if pred.size(0) == 0: - accu = [pred.new_tensor(0.) for i in range(len(topk))] - return accu[0] if return_single else accu - assert pred.ndim == target.ndim + 1 - assert pred.size(0) == target.size(0) - assert maxk <= pred.size(1), \ - f'maxk {maxk} exceeds pred dimension {pred.size(1)}' - pred_value, pred_label = pred.topk(maxk, dim=1) - # transpose to shape (maxk, N, ...) - pred_label = pred_label.transpose(0, 1) - correct = pred_label.eq(target.unsqueeze(0).expand_as(pred_label)) - if thresh is not None: - # Only prediction values larger than thresh are counted as correct - correct = correct & (pred_value > thresh).t() - res = [] - for k in topk: - correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) - res.append(correct_k.mul_(100.0 / target.numel())) - return res[0] if return_single else res - - -class Accuracy(nn.Module): - """Accuracy calculation module.""" - - def __init__(self, topk=(1, ), thresh=None): - """Module to calculate the accuracy. - - Args: - topk (tuple, optional): The criterion used to calculate the - accuracy. Defaults to (1,). - thresh (float, optional): If not None, predictions with scores - under this threshold are considered incorrect. Default to None. - """ - super().__init__() - self.topk = topk - self.thresh = thresh - - def forward(self, pred, target): - """Forward function to calculate accuracy. - - Args: - pred (torch.Tensor): Prediction of models. - target (torch.Tensor): Target for each prediction. - - Returns: - tuple[float]: The accuracies under different topk criterions. - """ - return accuracy(pred, target, self.topk, self.thresh) diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/ldm/modules/encoders/__init__.py b/spaces/Mellow-ai/PhotoAI_Mellow/ldm/modules/encoders/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Mileena/PIFu-Clothed-Human-Digitization/PIFu/lib/renderer/mesh.py b/spaces/Mileena/PIFu-Clothed-Human-Digitization/PIFu/lib/renderer/mesh.py deleted file mode 100644 index a76ec5838d08d109dc24f58ca8ef3aff2ade552b..0000000000000000000000000000000000000000 --- a/spaces/Mileena/PIFu-Clothed-Human-Digitization/PIFu/lib/renderer/mesh.py +++ /dev/null @@ -1,345 +0,0 @@ -import numpy as np - - -def save_obj_mesh(mesh_path, verts, faces): - file = open(mesh_path, 'w') - for v in verts: - file.write('v %.4f %.4f %.4f\n' % (v[0], v[1], v[2])) - for f in faces: - f_plus = f + 1 - file.write('f %d %d %d\n' % (f_plus[0], f_plus[1], f_plus[2])) - file.close() - -# https://github.com/ratcave/wavefront_reader -def read_mtlfile(fname): - materials = {} - with open(fname) as f: - lines = f.read().splitlines() - - for line in lines: - if line: - split_line = line.strip().split(' ', 1) - if len(split_line) < 2: - continue - - prefix, data = split_line[0], split_line[1] - if 'newmtl' in prefix: - material = {} - materials[data] = material - elif materials: - if data: - split_data = data.strip().split(' ') - - # assume texture maps are in the same level - # WARNING: do not include space in your filename!! - if 'map' in prefix: - material[prefix] = split_data[-1].split('\\')[-1] - elif len(split_data) > 1: - material[prefix] = tuple(float(d) for d in split_data) - else: - try: - material[prefix] = int(data) - except ValueError: - material[prefix] = float(data) - - return materials - - -def load_obj_mesh_mtl(mesh_file): - vertex_data = [] - norm_data = [] - uv_data = [] - - face_data = [] - face_norm_data = [] - face_uv_data = [] - - # face per material - face_data_mat = {} - face_norm_data_mat = {} - face_uv_data_mat = {} - - # current material name - mtl_data = None - cur_mat = None - - if isinstance(mesh_file, str): - f = open(mesh_file, "r") - else: - f = mesh_file - for line in f: - if isinstance(line, bytes): - line = line.decode("utf-8") - if line.startswith('#'): - continue - values = line.split() - if not values: - continue - - if values[0] == 'v': - v = list(map(float, values[1:4])) - vertex_data.append(v) - elif values[0] == 'vn': - vn = list(map(float, values[1:4])) - norm_data.append(vn) - elif values[0] == 'vt': - vt = list(map(float, values[1:3])) - uv_data.append(vt) - elif values[0] == 'mtllib': - mtl_data = read_mtlfile(mesh_file.replace(mesh_file.split('/')[-1],values[1])) - elif values[0] == 'usemtl': - cur_mat = values[1] - elif values[0] == 'f': - # local triangle data - l_face_data = [] - l_face_uv_data = [] - l_face_norm_data = [] - - # quad mesh - if len(values) > 4: - f = list(map(lambda x: int(x.split('/')[0]) if int(x.split('/')[0]) < 0 else int(x.split('/')[0])-1, values[1:4])) - l_face_data.append(f) - f = list(map(lambda x: int(x.split('/')[0]) if int(x.split('/')[0]) < 0 else int(x.split('/')[0])-1, [values[3], values[4], values[1]])) - l_face_data.append(f) - # tri mesh - else: - f = list(map(lambda x: int(x.split('/')[0]) if int(x.split('/')[0]) < 0 else int(x.split('/')[0])-1, values[1:4])) - l_face_data.append(f) - # deal with texture - if len(values[1].split('/')) >= 2: - # quad mesh - if len(values) > 4: - f = list(map(lambda x: int(x.split('/')[1]) if int(x.split('/')[1]) < 0 else int(x.split('/')[1])-1, values[1:4])) - l_face_uv_data.append(f) - f = list(map(lambda x: int(x.split('/')[1]) if int(x.split('/')[1]) < 0 else int(x.split('/')[1])-1, [values[3], values[4], values[1]])) - l_face_uv_data.append(f) - # tri mesh - elif len(values[1].split('/')[1]) != 0: - f = list(map(lambda x: int(x.split('/')[1]) if int(x.split('/')[1]) < 0 else int(x.split('/')[1])-1, values[1:4])) - l_face_uv_data.append(f) - # deal with normal - if len(values[1].split('/')) == 3: - # quad mesh - if len(values) > 4: - f = list(map(lambda x: int(x.split('/')[2]) if int(x.split('/')[2]) < 0 else int(x.split('/')[2])-1, values[1:4])) - l_face_norm_data.append(f) - f = list(map(lambda x: int(x.split('/')[2]) if int(x.split('/')[2]) < 0 else int(x.split('/')[2])-1, [values[3], values[4], values[1]])) - l_face_norm_data.append(f) - # tri mesh - elif len(values[1].split('/')[2]) != 0: - f = list(map(lambda x: int(x.split('/')[2]) if int(x.split('/')[2]) < 0 else int(x.split('/')[2])-1, values[1:4])) - l_face_norm_data.append(f) - - face_data += l_face_data - face_uv_data += l_face_uv_data - face_norm_data += l_face_norm_data - - if cur_mat is not None: - if cur_mat not in face_data_mat.keys(): - face_data_mat[cur_mat] = [] - if cur_mat not in face_uv_data_mat.keys(): - face_uv_data_mat[cur_mat] = [] - if cur_mat not in face_norm_data_mat.keys(): - face_norm_data_mat[cur_mat] = [] - face_data_mat[cur_mat] += l_face_data - face_uv_data_mat[cur_mat] += l_face_uv_data - face_norm_data_mat[cur_mat] += l_face_norm_data - - vertices = np.array(vertex_data) - faces = np.array(face_data) - - norms = np.array(norm_data) - norms = normalize_v3(norms) - face_normals = np.array(face_norm_data) - - uvs = np.array(uv_data) - face_uvs = np.array(face_uv_data) - - out_tuple = (vertices, faces, norms, face_normals, uvs, face_uvs) - - if cur_mat is not None and mtl_data is not None: - for key in face_data_mat: - face_data_mat[key] = np.array(face_data_mat[key]) - face_uv_data_mat[key] = np.array(face_uv_data_mat[key]) - face_norm_data_mat[key] = np.array(face_norm_data_mat[key]) - - out_tuple += (face_data_mat, face_norm_data_mat, face_uv_data_mat, mtl_data) - - return out_tuple - - -def load_obj_mesh(mesh_file, with_normal=False, with_texture=False): - vertex_data = [] - norm_data = [] - uv_data = [] - - face_data = [] - face_norm_data = [] - face_uv_data = [] - - if isinstance(mesh_file, str): - f = open(mesh_file, "r") - else: - f = mesh_file - for line in f: - if isinstance(line, bytes): - line = line.decode("utf-8") - if line.startswith('#'): - continue - values = line.split() - if not values: - continue - - if values[0] == 'v': - v = list(map(float, values[1:4])) - vertex_data.append(v) - elif values[0] == 'vn': - vn = list(map(float, values[1:4])) - norm_data.append(vn) - elif values[0] == 'vt': - vt = list(map(float, values[1:3])) - uv_data.append(vt) - - elif values[0] == 'f': - # quad mesh - if len(values) > 4: - f = list(map(lambda x: int(x.split('/')[0]), values[1:4])) - face_data.append(f) - f = list(map(lambda x: int(x.split('/')[0]), [values[3], values[4], values[1]])) - face_data.append(f) - # tri mesh - else: - f = list(map(lambda x: int(x.split('/')[0]), values[1:4])) - face_data.append(f) - - # deal with texture - if len(values[1].split('/')) >= 2: - # quad mesh - if len(values) > 4: - f = list(map(lambda x: int(x.split('/')[1]), values[1:4])) - face_uv_data.append(f) - f = list(map(lambda x: int(x.split('/')[1]), [values[3], values[4], values[1]])) - face_uv_data.append(f) - # tri mesh - elif len(values[1].split('/')[1]) != 0: - f = list(map(lambda x: int(x.split('/')[1]), values[1:4])) - face_uv_data.append(f) - # deal with normal - if len(values[1].split('/')) == 3: - # quad mesh - if len(values) > 4: - f = list(map(lambda x: int(x.split('/')[2]), values[1:4])) - face_norm_data.append(f) - f = list(map(lambda x: int(x.split('/')[2]), [values[3], values[4], values[1]])) - face_norm_data.append(f) - # tri mesh - elif len(values[1].split('/')[2]) != 0: - f = list(map(lambda x: int(x.split('/')[2]), values[1:4])) - face_norm_data.append(f) - - vertices = np.array(vertex_data) - faces = np.array(face_data) - 1 - - if with_texture and with_normal: - uvs = np.array(uv_data) - face_uvs = np.array(face_uv_data) - 1 - norms = np.array(norm_data) - if norms.shape[0] == 0: - norms = compute_normal(vertices, faces) - face_normals = faces - else: - norms = normalize_v3(norms) - face_normals = np.array(face_norm_data) - 1 - return vertices, faces, norms, face_normals, uvs, face_uvs - - if with_texture: - uvs = np.array(uv_data) - face_uvs = np.array(face_uv_data) - 1 - return vertices, faces, uvs, face_uvs - - if with_normal: - norms = np.array(norm_data) - norms = normalize_v3(norms) - face_normals = np.array(face_norm_data) - 1 - return vertices, faces, norms, face_normals - - return vertices, faces - - -def normalize_v3(arr): - ''' Normalize a numpy array of 3 component vectors shape=(n,3) ''' - lens = np.sqrt(arr[:, 0] ** 2 + arr[:, 1] ** 2 + arr[:, 2] ** 2) - eps = 0.00000001 - lens[lens < eps] = eps - arr[:, 0] /= lens - arr[:, 1] /= lens - arr[:, 2] /= lens - return arr - - -def compute_normal(vertices, faces): - # Create a zeroed array with the same type and shape as our vertices i.e., per vertex normal - norm = np.zeros(vertices.shape, dtype=vertices.dtype) - # Create an indexed view into the vertex array using the array of three indices for triangles - tris = vertices[faces] - # Calculate the normal for all the triangles, by taking the cross product of the vectors v1-v0, and v2-v0 in each triangle - n = np.cross(tris[::, 1] - tris[::, 0], tris[::, 2] - tris[::, 0]) - # n is now an array of normals per triangle. The length of each normal is dependent the vertices, - # we need to normalize these, so that our next step weights each normal equally. - normalize_v3(n) - # now we have a normalized array of normals, one per triangle, i.e., per triangle normals. - # But instead of one per triangle (i.e., flat shading), we add to each vertex in that triangle, - # the triangles' normal. Multiple triangles would then contribute to every vertex, so we need to normalize again afterwards. - # The cool part, we can actually add the normals through an indexed view of our (zeroed) per vertex normal array - norm[faces[:, 0]] += n - norm[faces[:, 1]] += n - norm[faces[:, 2]] += n - normalize_v3(norm) - - return norm - -# compute tangent and bitangent -def compute_tangent(vertices, faces, normals, uvs, faceuvs): - # NOTE: this could be numerically unstable around [0,0,1] - # but other current solutions are pretty freaky somehow - c1 = np.cross(normals, np.array([0,1,0.0])) - tan = c1 - normalize_v3(tan) - btan = np.cross(normals, tan) - - # NOTE: traditional version is below - - # pts_tris = vertices[faces] - # uv_tris = uvs[faceuvs] - - # W = np.stack([pts_tris[::, 1] - pts_tris[::, 0], pts_tris[::, 2] - pts_tris[::, 0]],2) - # UV = np.stack([uv_tris[::, 1] - uv_tris[::, 0], uv_tris[::, 2] - uv_tris[::, 0]], 1) - - # for i in range(W.shape[0]): - # W[i,::] = W[i,::].dot(np.linalg.inv(UV[i,::])) - - # tan = np.zeros(vertices.shape, dtype=vertices.dtype) - # tan[faces[:,0]] += W[:,:,0] - # tan[faces[:,1]] += W[:,:,0] - # tan[faces[:,2]] += W[:,:,0] - - # btan = np.zeros(vertices.shape, dtype=vertices.dtype) - # btan[faces[:,0]] += W[:,:,1] - # btan[faces[:,1]] += W[:,:,1] - # btan[faces[:,2]] += W[:,:,1] - - # normalize_v3(tan) - - # ndott = np.sum(normals*tan, 1, keepdims=True) - # tan = tan - ndott * normals - - # normalize_v3(btan) - # normalize_v3(tan) - - # tan[np.sum(np.cross(normals, tan) * btan, 1) < 0,:] *= -1.0 - - return tan, btan - -if __name__ == '__main__': - pts, tri, nml, trin, uvs, triuv = load_obj_mesh('/home/ICT2000/ssaito/Documents/Body/tmp/Baseball_Pitching/0012.obj', True, True) - compute_tangent(pts, tri, uvs, triuv) \ No newline at end of file diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/datasets/transforms/wrappers.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/datasets/transforms/wrappers.py deleted file mode 100644 index 086edb759b20c20a94fe8d7139350ba22a636c03..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/mmocr/datasets/transforms/wrappers.py +++ /dev/null @@ -1,343 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings -from typing import Any, Dict, List, Optional, Tuple, Union - -import imgaug -import imgaug.augmenters as iaa -import numpy as np -import torchvision.transforms as torchvision_transforms -from mmcv.transforms import Compose -from mmcv.transforms.base import BaseTransform -from PIL import Image - -from mmocr.registry import TRANSFORMS -from mmocr.utils import poly2bbox - - -@TRANSFORMS.register_module() -class ImgAugWrapper(BaseTransform): - """A wrapper around imgaug https://github.com/aleju/imgaug. - - Find available augmenters at - https://imgaug.readthedocs.io/en/latest/source/overview_of_augmenters.html. - - Required Keys: - - - img - - gt_polygons (optional for text recognition) - - gt_bboxes (optional for text recognition) - - gt_bboxes_labels (optional for text recognition) - - gt_ignored (optional for text recognition) - - gt_texts (optional) - - Modified Keys: - - - img - - gt_polygons (optional for text recognition) - - gt_bboxes (optional for text recognition) - - gt_bboxes_labels (optional for text recognition) - - gt_ignored (optional for text recognition) - - img_shape (optional) - - gt_texts (optional) - - Args: - args (list[list or dict]], optional): The argumentation list. For - details, please refer to imgaug document. Take - args=[['Fliplr', 0.5], dict(cls='Affine', rotate=[-10, 10]), - ['Resize', [0.5, 3.0]]] as an example. The args horizontally flip - images with probability 0.5, followed by random rotation with - angles in range [-10, 10], and resize with an independent scale in - range [0.5, 3.0] for each side of images. Defaults to None. - fix_poly_trans (dict): The transform configuration to fix invalid - polygons. Set it to None if no fixing is needed. - Defaults to dict(type='FixInvalidPolygon'). - """ - - def __init__( - self, - args: Optional[List[Union[List, Dict]]] = None, - fix_poly_trans: Optional[dict] = dict(type='FixInvalidPolygon') - ) -> None: - assert args is None or isinstance(args, list) and len(args) > 0 - if args is not None: - for arg in args: - assert isinstance(arg, (list, dict)), \ - 'args should be a list of list or dict' - self.args = args - self.augmenter = self._build_augmentation(args) - self.fix_poly_trans = fix_poly_trans - if fix_poly_trans is not None: - self.fix = TRANSFORMS.build(fix_poly_trans) - - def transform(self, results: Dict) -> Dict: - """Transform the image and annotation data. - - Args: - results (dict): Result dict containing the data to transform. - - Returns: - dict: The transformed data. - """ - # img is bgr - image = results['img'] - aug = None - ori_shape = image.shape - - if self.augmenter: - aug = self.augmenter.to_deterministic() - if not self._augment_annotations(aug, ori_shape, results): - return None - results['img'] = aug.augment_image(image) - results['img_shape'] = (results['img'].shape[0], - results['img'].shape[1]) - if getattr(self, 'fix', None) is not None: - results = self.fix(results) - return results - - def _augment_annotations(self, aug: imgaug.augmenters.meta.Augmenter, - ori_shape: Tuple[int, - int], results: Dict) -> Dict: - """Augment annotations following the pre-defined augmentation sequence. - - Args: - aug (imgaug.augmenters.meta.Augmenter): The imgaug augmenter. - ori_shape (tuple[int, int]): The ori_shape of the original image. - results (dict): Result dict containing annotations to transform. - - Returns: - bool: Whether the transformation has been successfully applied. If - the transform results in empty polygon/bbox annotations, return - False. - """ - # Assume co-existence of `gt_polygons`, `gt_bboxes` and `gt_ignored` - # for text detection - if 'gt_polygons' in results: - - # augment polygons - transformed_polygons, removed_poly_inds = self._augment_polygons( - aug, ori_shape, results['gt_polygons']) - if len(transformed_polygons) == 0: - return False - results['gt_polygons'] = transformed_polygons - - # remove instances that are no longer inside the augmented image - results['gt_bboxes_labels'] = np.delete( - results['gt_bboxes_labels'], removed_poly_inds, axis=0) - results['gt_ignored'] = np.delete( - results['gt_ignored'], removed_poly_inds, axis=0) - # TODO: deal with gt_texts corresponding to clipped polygons - if 'gt_texts' in results: - results['gt_texts'] = [ - text for i, text in enumerate(results['gt_texts']) - if i not in removed_poly_inds - ] - - # Generate new bboxes - bboxes = [poly2bbox(poly) for poly in transformed_polygons] - results['gt_bboxes'] = np.zeros((0, 4), dtype=np.float32) - if len(bboxes) > 0: - results['gt_bboxes'] = np.stack(bboxes) - - return True - - def _augment_polygons(self, aug: imgaug.augmenters.meta.Augmenter, - ori_shape: Tuple[int, int], polys: List[np.ndarray] - ) -> Tuple[List[np.ndarray], List[int]]: - """Augment polygons. - - Args: - aug (imgaug.augmenters.meta.Augmenter): The imgaug augmenter. - ori_shape (tuple[int, int]): The shape of the original image. - polys (list[np.ndarray]): The polygons to be augmented. - - Returns: - tuple(list[np.ndarray], list[int]): The augmented polygons, and the - indices of polygons removed as they are out of the augmented image. - """ - imgaug_polys = [] - for poly in polys: - poly = poly.reshape(-1, 2) - imgaug_polys.append(imgaug.Polygon(poly)) - imgaug_polys = aug.augment_polygons( - [imgaug.PolygonsOnImage(imgaug_polys, shape=ori_shape)])[0] - - new_polys = [] - removed_poly_inds = [] - for i, poly in enumerate(imgaug_polys.polygons): - # Sometimes imgaug may produce some invalid polygons with no points - if not poly.is_valid or poly.is_out_of_image(imgaug_polys.shape): - removed_poly_inds.append(i) - continue - new_poly = [] - try: - poly = poly.clip_out_of_image(imgaug_polys.shape)[0] - except Exception as e: - warnings.warn(f'Failed to clip polygon out of image: {e}') - for point in poly: - new_poly.append(np.array(point, dtype=np.float32)) - new_poly = np.array(new_poly, dtype=np.float32).flatten() - # Under some conditions, imgaug can generate "polygon" with only - # two points, which is not a valid polygon. - if len(new_poly) <= 4: - removed_poly_inds.append(i) - continue - new_polys.append(new_poly) - - return new_polys, removed_poly_inds - - def _build_augmentation(self, args, root=True): - """Build ImgAugWrapper augmentations. - - Args: - args (dict): Arguments to be passed to imgaug. - root (bool): Whether it's building the root augmenter. - - Returns: - imgaug.augmenters.meta.Augmenter: The built augmenter. - """ - if args is None: - return None - if isinstance(args, (int, float, str)): - return args - if isinstance(args, list): - if root: - sequence = [ - self._build_augmentation(value, root=False) - for value in args - ] - return iaa.Sequential(sequence) - arg_list = [self._to_tuple_if_list(a) for a in args[1:]] - return getattr(iaa, args[0])(*arg_list) - if isinstance(args, dict): - if 'cls' in args: - cls = getattr(iaa, args['cls']) - return cls( - **{ - k: self._to_tuple_if_list(v) - for k, v in args.items() if not k == 'cls' - }) - else: - return { - key: self._build_augmentation(value, root=False) - for key, value in args.items() - } - raise RuntimeError('unknown augmenter arg: ' + str(args)) - - def _to_tuple_if_list(self, obj: Any) -> Any: - """Convert an object into a tuple if it is a list.""" - if isinstance(obj, list): - return tuple(obj) - return obj - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(args = {self.args}, ' - repr_str += f'fix_poly_trans = {self.fix_poly_trans})' - return repr_str - - -@TRANSFORMS.register_module() -class TorchVisionWrapper(BaseTransform): - """A wrapper around torchvision transforms. It applies specific transform - to ``img`` and updates ``height`` and ``width`` accordingly. - - Required Keys: - - - img (ndarray): The input image. - - Modified Keys: - - - img (ndarray): The modified image. - - img_shape (tuple(int, int)): The shape of the image in (height, width). - - - Warning: - This transform only affects the image but not its associated - annotations, such as word bounding boxes and polygons. Therefore, - it may only be applicable to text recognition tasks. - - Args: - op (str): The name of any transform class in - :func:`torchvision.transforms`. - **kwargs: Arguments that will be passed to initializer of torchvision - transform. - """ - - def __init__(self, op: str, **kwargs) -> None: - assert isinstance(op, str) - obj_cls = getattr(torchvision_transforms, op) - self.torchvision = obj_cls(**kwargs) - self.op = op - self.kwargs = kwargs - - def transform(self, results): - """Transform the image. - - Args: - results (dict): Result dict from the data loader. - - Returns: - dict: Transformed results. - """ - assert 'img' in results - # BGR -> RGB - img = results['img'][..., ::-1] - img = Image.fromarray(img) - img = self.torchvision(img) - img = np.asarray(img) - img = img[..., ::-1] - results['img'] = img - results['img_shape'] = img.shape[:2] - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(op = {self.op}' - for k, v in self.kwargs.items(): - repr_str += f', {k} = {v}' - repr_str += ')' - return repr_str - - -@TRANSFORMS.register_module() -class ConditionApply(BaseTransform): - """Apply transforms according to the condition. If the condition is met, - true_transforms will be applied, otherwise false_transforms will be - applied. - - Args: - condition (str): The string that can be evaluated to a boolean value. - true_transforms (list[dict]): Transforms to be applied if the condition - is met. Defaults to []. - false_transforms (list[dict]): Transforms to be applied if the - condition is not met. Defaults to []. - """ - - def __init__(self, - condition: str, - true_transforms: Union[Dict, List[Dict]] = [], - false_transforms: Union[Dict, List[Dict]] = []): - self.condition = condition - self.true_transforms = Compose(true_transforms) - self.false_transforms = Compose(false_transforms) - - def transform(self, results: Dict) -> Optional[Dict]: - """Transform the image. - - Args: - results (dict):Result dict containing the data to transform. - - Returns: - dict: Transformed results. - """ - if eval(self.condition): - return self.true_transforms(results) # type: ignore - else: - return self.false_transforms(results) - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(condition = {self.condition}, ' - repr_str += f'true_transforms = {self.true_transforms}, ' - repr_str += f'false_transforms = {self.false_transforms})' - return repr_str diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textdet/postprocessors/fce_postprocessor.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textdet/postprocessors/fce_postprocessor.py deleted file mode 100644 index b6c49bf433224284da715c1589a3041fe445bb97..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textdet/postprocessors/fce_postprocessor.py +++ /dev/null @@ -1,239 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Dict, List, Sequence - -import cv2 -import numpy as np -import torch -from mmengine.structures import InstanceData -from numpy.fft import ifft - -from mmocr.registry import MODELS -from mmocr.structures import TextDetDataSample -from mmocr.utils import fill_hole -from .base import BaseTextDetPostProcessor - - -@MODELS.register_module() -class FCEPostprocessor(BaseTextDetPostProcessor): - """Decoding predictions of FCENet to instances. - - Args: - fourier_degree (int): The maximum Fourier transform degree k. - num_reconstr_points (int): The points number of the polygon - reconstructed from predicted Fourier coefficients. - rescale_fields (list[str]): The bbox/polygon field names to - be rescaled. If None, no rescaling will be performed. Defaults to - ['polygons']. - scales (list[int]) : The down-sample scale of each layer. Defaults - to [8, 16, 32]. - text_repr_type (str): Boundary encoding type 'poly' or 'quad'. Defaults - to 'poly'. - alpha (float): The parameter to calculate final scores - :math:`Score_{final} = (Score_{text region} ^ alpha) - * (Score_{text center_region}^ beta)`. Defaults to 1.0. - beta (float): The parameter to calculate final score. Defaults to 2.0. - score_thr (float): The threshold used to filter out the final - candidates.Defaults to 0.3. - nms_thr (float): The threshold of nms. Defaults to 0.1. - """ - - def __init__(self, - fourier_degree: int, - num_reconstr_points: int, - rescale_fields: Sequence[str] = ['polygons'], - scales: Sequence[int] = [8, 16, 32], - text_repr_type: str = 'poly', - alpha: float = 1.0, - beta: float = 2.0, - score_thr: float = 0.3, - nms_thr: float = 0.1, - **kwargs) -> None: - super().__init__( - text_repr_type=text_repr_type, - rescale_fields=rescale_fields, - **kwargs) - self.fourier_degree = fourier_degree - self.num_reconstr_points = num_reconstr_points - self.scales = scales - self.alpha = alpha - self.beta = beta - self.score_thr = score_thr - self.nms_thr = nms_thr - - def split_results(self, pred_results: List[Dict]) -> List[List[Dict]]: - """Split batched elements in pred_results along the first dimension - into ``batch_num`` sub-elements and regather them into a list of dicts. - - Args: - pred_results (list[dict]): A list of dict with keys of ``cls_res``, - ``reg_res`` corresponding to the classification result and - regression result computed from the input tensor with the - same index. They have the shapes of :math:`(N, C_{cls,i}, - H_i, W_i)` and :math:`(N, C_{out,i}, H_i, W_i)`. - - Returns: - list[list[dict]]: N lists. Each list contains three dicts from - different feature level. - """ - assert isinstance(pred_results, list) and len(pred_results) == len( - self.scales) - - fields = list(pred_results[0].keys()) - batch_num = len(pred_results[0][fields[0]]) - level_num = len(pred_results) - results = [] - for i in range(batch_num): - batch_list = [] - for level in range(level_num): - feat_dict = {} - for field in fields: - feat_dict[field] = pred_results[level][field][i] - batch_list.append(feat_dict) - results.append(batch_list) - return results - - def get_text_instances(self, pred_results: Sequence[Dict], - data_sample: TextDetDataSample - ) -> TextDetDataSample: - """Get text instance predictions of one image. - - Args: - pred_results (List[dict]): A list of dict with keys of ``cls_res``, - ``reg_res`` corresponding to the classification result and - regression result computed from the input tensor with the - same index. They have the shapes of :math:`(N, C_{cls,i}, H_i, - W_i)` and :math:`(N, C_{out,i}, H_i, W_i)`. - data_sample (TextDetDataSample): Datasample of an image. - - Returns: - TextDetDataSample: A new DataSample with predictions filled in. - Polygons and results are saved in - ``TextDetDataSample.pred_instances.polygons``. The confidence - scores are saved in ``TextDetDataSample.pred_instances.scores``. - """ - assert len(pred_results) == len(self.scales) - data_sample.pred_instances = InstanceData() - data_sample.pred_instances.polygons = [] - data_sample.pred_instances.scores = [] - - result_polys = [] - result_scores = [] - for idx, pred_result in enumerate(pred_results): - # TODO: Scale can be calculated given image shape and feature - # shape. This param can be removed in the future. - polygons, scores = self._get_text_instances_single( - pred_result, self.scales[idx]) - result_polys += polygons - result_scores += scores - result_polys, result_scores = self.poly_nms(result_polys, - result_scores, - self.nms_thr) - for result_poly, result_score in zip(result_polys, result_scores): - result_poly = np.array(result_poly, dtype=np.float32) - data_sample.pred_instances.polygons.append(result_poly) - data_sample.pred_instances.scores.append(result_score) - data_sample.pred_instances.scores = torch.FloatTensor( - data_sample.pred_instances.scores) - - return data_sample - - def _get_text_instances_single(self, pred_result: Dict, scale: int): - """Get text instance predictions from one feature level. - - Args: - pred_result (dict): A dict with keys of ``cls_res``, ``reg_res`` - corresponding to the classification result and regression - result computed from the input tensor with the same index. - They have the shapes of :math:`(1, C_{cls,i}, H_i, W_i)` and - :math:`(1, C_{out,i}, H_i, W_i)`. - scale (int): Scale of current feature map which equals to - img_size / feat_size. - - Returns: - result_polys (list[ndarray]): A list of polygons after postprocess. - result_scores (list[ndarray]): A list of scores after postprocess. - """ - - cls_pred = pred_result['cls_res'] - tr_pred = cls_pred[0:2].softmax(dim=0).data.cpu().numpy() - tcl_pred = cls_pred[2:].softmax(dim=0).data.cpu().numpy() - - reg_pred = pred_result['reg_res'].permute(1, 2, 0).data.cpu().numpy() - x_pred = reg_pred[:, :, :2 * self.fourier_degree + 1] - y_pred = reg_pred[:, :, 2 * self.fourier_degree + 1:] - - score_pred = (tr_pred[1]**self.alpha) * (tcl_pred[1]**self.beta) - tr_pred_mask = (score_pred) > self.score_thr - tr_mask = fill_hole(tr_pred_mask) - - tr_contours, _ = cv2.findContours( - tr_mask.astype(np.uint8), cv2.RETR_TREE, - cv2.CHAIN_APPROX_SIMPLE) # opencv4 - - mask = np.zeros_like(tr_mask) - - result_polys = [] - result_scores = [] - for cont in tr_contours: - deal_map = mask.copy().astype(np.int8) - cv2.drawContours(deal_map, [cont], -1, 1, -1) - - score_map = score_pred * deal_map - score_mask = score_map > 0 - xy_text = np.argwhere(score_mask) - dxy = xy_text[:, 1] + xy_text[:, 0] * 1j - - x, y = x_pred[score_mask], y_pred[score_mask] - c = x + y * 1j - c[:, self.fourier_degree] = c[:, self.fourier_degree] + dxy - c *= scale - - polygons = self._fourier2poly(c, self.num_reconstr_points) - scores = score_map[score_mask].reshape(-1, 1).tolist() - polygons, scores = self.poly_nms(polygons, scores, self.nms_thr) - result_polys += polygons - result_scores += scores - - result_polys, result_scores = self.poly_nms(result_polys, - result_scores, - self.nms_thr) - - if self.text_repr_type == 'quad': - new_polys = [] - for poly in result_polys: - poly = np.array(poly).reshape(-1, 2).astype(np.float32) - points = cv2.boxPoints(cv2.minAreaRect(poly)) - points = np.int0(points) - new_polys.append(points.reshape(-1)) - - return new_polys, result_scores - return result_polys, result_scores - - def _fourier2poly(self, - fourier_coeff: np.ndarray, - num_reconstr_points: int = 50): - """ Inverse Fourier transform - Args: - fourier_coeff (ndarray): Fourier coefficients shaped (n, 2k+1), - with n and k being candidates number and Fourier degree - respectively. - num_reconstr_points (int): Number of reconstructed polygon - points. Defaults to 50. - - Returns: - List[ndarray]: The reconstructed polygons. - """ - - a = np.zeros((len(fourier_coeff), num_reconstr_points), - dtype='complex') - k = (len(fourier_coeff[0]) - 1) // 2 - - a[:, 0:k + 1] = fourier_coeff[:, k:] - a[:, -k:] = fourier_coeff[:, :k] - - poly_complex = ifft(a) * num_reconstr_points - polygon = np.zeros((len(fourier_coeff), num_reconstr_points, 2)) - polygon[:, :, 0] = poly_complex.real - polygon[:, :, 1] = poly_complex.imag - return polygon.astype('int32').reshape( - (len(fourier_coeff), -1)).tolist() diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/backbones/nrtr_modality_transformer.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/backbones/nrtr_modality_transformer.py deleted file mode 100644 index 35f4f9c3f2e0e7f874620cfad643bfcbcb5cd0c5..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/mmocr/models/textrecog/backbones/nrtr_modality_transformer.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Dict, Optional, Sequence, Union - -import torch -import torch.nn as nn -from mmengine.model import BaseModule - -from mmocr.registry import MODELS - - -@MODELS.register_module() -class NRTRModalityTransform(BaseModule): - """Modality transform in NRTR. - - Args: - in_channels (int): Input channel of image. Defaults to 3. - init_cfg (dict or list[dict], optional): Initialization configs. - """ - - def __init__( - self, - in_channels: int = 3, - init_cfg: Optional[Union[Dict, Sequence[Dict]]] = [ - dict(type='Kaiming', layer='Conv2d'), - dict(type='Uniform', layer='BatchNorm2d') - ] - ) -> None: - super().__init__(init_cfg=init_cfg) - - self.conv_1 = nn.Conv2d( - in_channels=in_channels, - out_channels=32, - kernel_size=3, - stride=2, - padding=1) - self.relu_1 = nn.ReLU(True) - self.bn_1 = nn.BatchNorm2d(32) - - self.conv_2 = nn.Conv2d( - in_channels=32, - out_channels=64, - kernel_size=3, - stride=2, - padding=1) - self.relu_2 = nn.ReLU(True) - self.bn_2 = nn.BatchNorm2d(64) - - self.linear = nn.Linear(512, 512) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - """Backbone forward. - - Args: - x (torch.Tensor): Image tensor of shape :math:`(N, C, W, H)`. W, H - is the width and height of image. - Return: - Tensor: Output tensor. - """ - x = self.conv_1(x) - x = self.relu_1(x) - x = self.bn_1(x) - - x = self.conv_2(x) - x = self.relu_2(x) - x = self.bn_2(x) - - n, c, h, w = x.size() - - x = x.permute(0, 3, 2, 1).contiguous().view(n, w, h * c) - - x = self.linear(x) - - x = x.permute(0, 2, 1).contiguous().view(n, -1, 1, w) - - return x diff --git a/spaces/NATSpeech/DiffSpeech/modules/tts/diffspeech/shallow_diffusion_tts.py b/spaces/NATSpeech/DiffSpeech/modules/tts/diffspeech/shallow_diffusion_tts.py deleted file mode 100644 index e3c3a6d891a7721949e05f6065c194aaae8ea9e8..0000000000000000000000000000000000000000 --- a/spaces/NATSpeech/DiffSpeech/modules/tts/diffspeech/shallow_diffusion_tts.py +++ /dev/null @@ -1,279 +0,0 @@ -import math -import random -from functools import partial -from inspect import isfunction -import numpy as np -import torch -import torch.nn.functional as F -from torch import nn -from tqdm import tqdm - -from modules.tts.fs2_orig import FastSpeech2Orig -from modules.tts.diffspeech.net import DiffNet -from modules.tts.commons.align_ops import expand_states - - -def exists(x): - return x is not None - - -def default(val, d): - if exists(val): - return val - return d() if isfunction(d) else d - - -# gaussian diffusion trainer class - -def extract(a, t, x_shape): - b, *_ = t.shape - out = a.gather(-1, t) - return out.reshape(b, *((1,) * (len(x_shape) - 1))) - - -def noise_like(shape, device, repeat=False): - repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1))) - noise = lambda: torch.randn(shape, device=device) - return repeat_noise() if repeat else noise() - - -def linear_beta_schedule(timesteps, max_beta=0.01): - """ - linear schedule - """ - betas = np.linspace(1e-4, max_beta, timesteps) - return betas - - -def cosine_beta_schedule(timesteps, s=0.008): - """ - cosine schedule - as proposed in https://openreview.net/forum?id=-NEXDKk8gZ - """ - steps = timesteps + 1 - x = np.linspace(0, steps, steps) - alphas_cumprod = np.cos(((x / steps) + s) / (1 + s) * np.pi * 0.5) ** 2 - alphas_cumprod = alphas_cumprod / alphas_cumprod[0] - betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1]) - return np.clip(betas, a_min=0, a_max=0.999) - - -beta_schedule = { - "cosine": cosine_beta_schedule, - "linear": linear_beta_schedule, -} - - -DIFF_DECODERS = { - 'wavenet': lambda hp: DiffNet(hp), -} - - -class AuxModel(FastSpeech2Orig): - def forward(self, txt_tokens, mel2ph=None, spk_embed=None, spk_id=None, - f0=None, uv=None, energy=None, infer=False, **kwargs): - ret = {} - encoder_out = self.encoder(txt_tokens) # [B, T, C] - src_nonpadding = (txt_tokens > 0).float()[:, :, None] - style_embed = self.forward_style_embed(spk_embed, spk_id) - - # add dur - dur_inp = (encoder_out + style_embed) * src_nonpadding - mel2ph = self.forward_dur(dur_inp, mel2ph, txt_tokens, ret) - tgt_nonpadding = (mel2ph > 0).float()[:, :, None] - decoder_inp = decoder_inp_ = expand_states(encoder_out, mel2ph) - - # add pitch and energy embed - if self.hparams['use_pitch_embed']: - pitch_inp = (decoder_inp_ + style_embed) * tgt_nonpadding - decoder_inp = decoder_inp + self.forward_pitch(pitch_inp, f0, uv, mel2ph, ret, encoder_out) - - # add pitch and energy embed - if self.hparams['use_energy_embed']: - energy_inp = (decoder_inp_ + style_embed) * tgt_nonpadding - decoder_inp = decoder_inp + self.forward_energy(energy_inp, energy, ret) - - # decoder input - ret['decoder_inp'] = decoder_inp = (decoder_inp + style_embed) * tgt_nonpadding - if self.hparams['dec_inp_add_noise']: - B, T, _ = decoder_inp.shape - z = kwargs.get('adv_z', torch.randn([B, T, self.z_channels])).to(decoder_inp.device) - ret['adv_z'] = z - decoder_inp = torch.cat([decoder_inp, z], -1) - decoder_inp = self.dec_inp_noise_proj(decoder_inp) * tgt_nonpadding - if kwargs['skip_decoder']: - return ret - ret['mel_out'] = self.forward_decoder(decoder_inp, tgt_nonpadding, ret, infer=infer, **kwargs) - return ret - - -class GaussianDiffusion(nn.Module): - def __init__(self, dict_size, hparams, out_dims=None): - super().__init__() - self.hparams = hparams - out_dims = hparams['audio_num_mel_bins'] - denoise_fn = DIFF_DECODERS[hparams['diff_decoder_type']](hparams) - timesteps = hparams['timesteps'] - K_step = hparams['K_step'] - loss_type = hparams['diff_loss_type'] - spec_min = hparams['spec_min'] - spec_max = hparams['spec_max'] - - self.denoise_fn = denoise_fn - self.fs2 = AuxModel(dict_size, hparams) - self.mel_bins = out_dims - - if hparams['schedule_type'] == 'linear': - betas = linear_beta_schedule(timesteps, hparams['max_beta']) - else: - betas = cosine_beta_schedule(timesteps) - - alphas = 1. - betas - alphas_cumprod = np.cumprod(alphas, axis=0) - alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) - - timesteps, = betas.shape - self.num_timesteps = int(timesteps) - self.K_step = K_step - self.loss_type = loss_type - - to_torch = partial(torch.tensor, dtype=torch.float32) - - self.register_buffer('betas', to_torch(betas)) - self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) - self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) - - # calculations for diffusion q(x_t | x_{t-1}) and others - self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) - self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) - self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) - self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) - self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) - - # calculations for posterior q(x_{t-1} | x_t, x_0) - posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod) - # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) - self.register_buffer('posterior_variance', to_torch(posterior_variance)) - # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain - self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) - self.register_buffer('posterior_mean_coef1', to_torch( - betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) - self.register_buffer('posterior_mean_coef2', to_torch( - (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) - - self.register_buffer('spec_min', torch.FloatTensor(spec_min)[None, None, :hparams['keep_bins']]) - self.register_buffer('spec_max', torch.FloatTensor(spec_max)[None, None, :hparams['keep_bins']]) - - def q_mean_variance(self, x_start, t): - mean = extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start - variance = extract(1. - self.alphas_cumprod, t, x_start.shape) - log_variance = extract(self.log_one_minus_alphas_cumprod, t, x_start.shape) - return mean, variance, log_variance - - def predict_start_from_noise(self, x_t, t, noise): - return ( - extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - - extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise - ) - - def q_posterior(self, x_start, x_t, t): - posterior_mean = ( - extract(self.posterior_mean_coef1, t, x_t.shape) * x_start + - extract(self.posterior_mean_coef2, t, x_t.shape) * x_t - ) - posterior_variance = extract(self.posterior_variance, t, x_t.shape) - posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape) - return posterior_mean, posterior_variance, posterior_log_variance_clipped - - def p_mean_variance(self, x, t, cond, clip_denoised: bool): - noise_pred = self.denoise_fn(x, t, cond=cond) - x_recon = self.predict_start_from_noise(x, t=t, noise=noise_pred) - - if clip_denoised: - x_recon.clamp_(-1., 1.) - - model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) - return model_mean, posterior_variance, posterior_log_variance - - @torch.no_grad() - def p_sample(self, x, t, cond, clip_denoised=True, repeat_noise=False): - b, *_, device = *x.shape, x.device - model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, cond=cond, clip_denoised=clip_denoised) - noise = noise_like(x.shape, device, repeat_noise) - # no noise when t == 0 - nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise - - def q_sample(self, x_start, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - return ( - extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + - extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise - ) - - def p_losses(self, x_start, t, cond, noise=None, nonpadding=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - - x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) - x_recon = self.denoise_fn(x_noisy, t, cond) - - if self.loss_type == 'l1': - if nonpadding is not None: - loss = ((noise - x_recon).abs() * nonpadding.unsqueeze(1)).mean() - else: - # print('are you sure w/o nonpadding?') - loss = (noise - x_recon).abs().mean() - - elif self.loss_type == 'l2': - loss = F.mse_loss(noise, x_recon) - else: - raise NotImplementedError() - - return loss - - def forward(self, txt_tokens, mel2ph=None, spk_embed=None, spk_id=None, - ref_mels=None, f0=None, uv=None, energy=None, infer=False, **kwargs): - b, *_, device = *txt_tokens.shape, txt_tokens.device - ret = self.fs2(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, spk_id=spk_id, - f0=f0, uv=uv, energy=energy, infer=infer, skip_decoder=(not infer), **kwargs) - cond = ret['decoder_inp'].transpose(1, 2) - - if not infer: - t = torch.randint(0, self.K_step, (b,), device=device).long() - x = ref_mels - x = self.norm_spec(x) - x = x.transpose(1, 2)[:, None, :, :] # [B, 1, M, T] - ret['diff_loss'] = self.p_losses(x, t, cond) - # nonpadding = (mel2ph != 0).float() - # ret['diff_loss'] = self.p_losses(x, t, cond, nonpadding=nonpadding) - ret['mel_out'] = None - else: - ret['fs2_mel'] = ret['mel_out'] - fs2_mels = ret['mel_out'] - t = self.K_step - fs2_mels = self.norm_spec(fs2_mels) - fs2_mels = fs2_mels.transpose(1, 2)[:, None, :, :] - - x = self.q_sample(x_start=fs2_mels, t=torch.tensor([t - 1], device=device).long()) - if self.hparams.get('gaussian_start') is not None and self.hparams['gaussian_start']: - print('===> gaussian start.') - shape = (cond.shape[0], 1, self.mel_bins, cond.shape[2]) - x = torch.randn(shape, device=device) - for i in tqdm(reversed(range(0, t)), desc='sample time step', total=t): - x = self.p_sample(x, torch.full((b,), i, device=device, dtype=torch.long), cond) - x = x[:, 0].transpose(1, 2) - ret['mel_out'] = self.denorm_spec(x) - - return ret - - def norm_spec(self, x): - return (x - self.spec_min) / (self.spec_max - self.spec_min) * 2 - 1 - - def denorm_spec(self, x): - return (x + 1) / 2 * (self.spec_max - self.spec_min) + self.spec_min - - def cwt2f0_norm(self, cwt_spec, mean, std, mel2ph): - return self.fs2.cwt2f0_norm(cwt_spec, mean, std, mel2ph) - - def out2mel(self, x): - return x \ No newline at end of file diff --git a/spaces/NATSpeech/DiffSpeech/tasks/run.py b/spaces/NATSpeech/DiffSpeech/tasks/run.py deleted file mode 100644 index ef2b0a319cb5cd7baf87e5224ab545412715fb69..0000000000000000000000000000000000000000 --- a/spaces/NATSpeech/DiffSpeech/tasks/run.py +++ /dev/null @@ -1,19 +0,0 @@ -import os - -os.environ["OMP_NUM_THREADS"] = "1" - -from utils.commons.hparams import hparams, set_hparams -import importlib - - -def run_task(): - assert hparams['task_cls'] != '' - pkg = ".".join(hparams["task_cls"].split(".")[:-1]) - cls_name = hparams["task_cls"].split(".")[-1] - task_cls = getattr(importlib.import_module(pkg), cls_name) - task_cls.start() - - -if __name__ == '__main__': - set_hparams() - run_task() diff --git a/spaces/NCTCMumbai/NCTC/models/research/adversarial_logit_pairing/tiny_imagenet_converter/converter.py b/spaces/NCTCMumbai/NCTC/models/research/adversarial_logit_pairing/tiny_imagenet_converter/converter.py deleted file mode 100644 index 4fdccc32071f8c677bb1395e324c6b94aa7e85af..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/research/adversarial_logit_pairing/tiny_imagenet_converter/converter.py +++ /dev/null @@ -1,241 +0,0 @@ -# Copyright 2018 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Converts Tiny Imagenet dataset into TFRecord format. - -As an output this program generates following files in TFRecord format: -- train.tfrecord -- validation.tfrecord -- test.tfrecord - -Generated train and validation files will contain tf.Example entries with -following features: -- image/encoded - encoded image -- image/format - image format -- label/wnid - label WordNet ID -- label/imagenet - imagenet label [1 ... 1000] -- label/tiny_imagenet - tiny imagenet label [0 ... 199] -- bbox/xmin -- bbox/ymin -- bbox/xmax -- bbox/ymax - -Test file will contain entries with 'image/encoded' and 'image/format' features. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from collections import namedtuple -import os -import random - -from absl import app -from absl import flags -from absl import logging - -import pandas as pd - -import tensorflow as tf - - -FLAGS = flags.FLAGS - -flags.DEFINE_string('input_dir', '', 'Input directory') -flags.DEFINE_string('output_dir', '', 'Output directory') - -flags.DEFINE_string('imagenet_synsets_path', '', - 'Optional path to /imagenet_lsvrc_2015_synsets.txt') - - -ImageMetadata = namedtuple('ImageMetadata', ['label', 'x1', 'y1', 'x2', 'y2']) - - -class WnIdToNodeIdConverter(object): - """Converts WordNet IDs to numerical labels.""" - - def __init__(self, wnids_path, background_class): - self._wnid_to_node_id = {} - self._node_id_to_wnid = {} - with tf.gfile.Open(wnids_path) as f: - wnids_sequence = [wnid.strip() for wnid in f.readlines() if wnid.strip()] - node_id_offset = 1 if background_class else 0 - for i, label in enumerate(wnids_sequence): - self._wnid_to_node_id[label] = i + node_id_offset - self._node_id_to_wnid[i + node_id_offset] = label - - def to_node_id(self, wnid): - return self._wnid_to_node_id[wnid] - - def to_wnid(self, node_id): - return self._node_id_to_wnid[node_id] - - def all_wnids(self): - return self._wnid_to_node_id.keys() - - -def read_tiny_imagenet_annotations(annotations_filename, - images_dir, - one_label=None): - """Reads one file with Tiny Imagenet annotations.""" - result = [] - if one_label: - column_names = ['filename', 'x1', 'y1', 'x2', 'y2'] - else: - column_names = ['filename', 'label', 'x1', 'y1', 'x2', 'y2'] - with tf.gfile.Open(annotations_filename) as f: - data = pd.read_csv(f, sep='\t', names=column_names) - for row in data.itertuples(): - label = one_label if one_label else getattr(row, 'label') - full_filename = os.path.join(images_dir, getattr(row, 'filename')) - result.append((full_filename, - ImageMetadata(label=label, - x1=getattr(row, 'x1'), - y1=getattr(row, 'y1'), - x2=getattr(row, 'x2'), - y2=getattr(row, 'y2')))) - return result - - -def read_validation_annotations(validation_dir): - """Reads validation data annotations.""" - return read_tiny_imagenet_annotations( - os.path.join(validation_dir, 'val_annotations.txt'), - os.path.join(validation_dir, 'images')) - - -def read_training_annotations(training_dir): - """Reads training data annotations.""" - result = [] - sub_dirs = tf.gfile.ListDirectory(training_dir) - for sub_dir in sub_dirs: - if not sub_dir.startswith('n'): - logging.warning('Found non-class directory in training dir: %s', sub_dir) - continue - sub_dir_results = read_tiny_imagenet_annotations( - os.path.join(training_dir, sub_dir, sub_dir + '_boxes.txt'), - os.path.join(training_dir, sub_dir, 'images'), - one_label=sub_dir) - result.extend(sub_dir_results) - return result - - -def read_test_annotations(test_dir): - """Reads test data annotations.""" - files = tf.gfile.ListDirectory(os.path.join(test_dir, 'images')) - return [(os.path.join(test_dir, 'images', f), None) - for f in files if f.endswith('.JPEG')] - - -def get_image_format(filename): - """Returns image format from filename.""" - filename = filename.lower() - if filename.endswith('jpeg') or filename.endswith('jpg'): - return 'jpeg' - elif filename.endswith('png'): - return 'png' - else: - raise ValueError('Unrecognized file format: %s' % filename) - - -class TinyImagenetWriter(object): - """Helper class which writes Tiny Imagenet dataset into TFRecord file.""" - - def __init__(self, tiny_imagenet_wnid_conveter, imagenet_wnid_converter): - self.tiny_imagenet_wnid_conveter = tiny_imagenet_wnid_conveter - self.imagenet_wnid_converter = imagenet_wnid_converter - - def write_tf_record(self, - annotations, - output_file): - """Generates TFRecord file from given list of annotations.""" - with tf.python_io.TFRecordWriter(output_file) as writer: - for image_filename, image_metadata in annotations: - with tf.gfile.Open(image_filename) as f: - image_buffer = f.read() - image_format = get_image_format(image_filename) - features = { - 'image/encoded': tf.train.Feature( - bytes_list=tf.train.BytesList(value=[image_buffer])), - 'image/format': tf.train.Feature( - bytes_list=tf.train.BytesList(value=[image_format])) - } - if image_metadata: - # bounding box features - features['bbox/xmin'] = tf.train.Feature( - int64_list=tf.train.Int64List(value=[image_metadata.x1])) - features['bbox/ymin'] = tf.train.Feature( - int64_list=tf.train.Int64List(value=[image_metadata.y1])) - features['bbox/xmax'] = tf.train.Feature( - int64_list=tf.train.Int64List(value=[image_metadata.x2])) - features['bbox/ymax'] = tf.train.Feature( - int64_list=tf.train.Int64List(value=[image_metadata.y2])) - # tiny imagenet label, from [0, 200) iterval - tiny_imagenet_label = self.tiny_imagenet_wnid_conveter.to_node_id( - image_metadata.label) - features['label/wnid'] = tf.train.Feature( - bytes_list=tf.train.BytesList(value=image_metadata.label)) - features['label/tiny_imagenet'] = tf.train.Feature( - int64_list=tf.train.Int64List(value=[tiny_imagenet_label])) - # full imagenet label, from [1, 1001) interval - if self.imagenet_wnid_converter: - imagenet_label = self.imagenet_wnid_converter.to_node_id( - image_metadata.label) - features['label/imagenet'] = tf.train.Feature( - int64_list=tf.train.Int64List(value=[imagenet_label])) - example = tf.train.Example(features=tf.train.Features(feature=features)) - writer.write(example.SerializeToString()) - - -def main(_): - assert FLAGS.input_dir, 'Input directory must be provided' - assert FLAGS.output_dir, 'Output directory must be provided' - - # Create WordNet ID conveters for tiny imagenet and possibly for imagenet - tiny_imagenet_wnid_conveter = WnIdToNodeIdConverter( - os.path.join(FLAGS.input_dir, 'wnids.txt'), - background_class=False) - if FLAGS.imagenet_synsets_path: - imagenet_wnid_converter = WnIdToNodeIdConverter(FLAGS.imagenet_synsets_path, - background_class=True) - else: - imagenet_wnid_converter = None - - # read tiny imagenet annotations - train_annotations = read_training_annotations( - os.path.join(FLAGS.input_dir, 'train')) - random.shuffle(train_annotations) - val_annotations = read_validation_annotations( - os.path.join(FLAGS.input_dir, 'val')) - test_filenames = read_test_annotations(os.path.join(FLAGS.input_dir, 'test')) - - # Generate TFRecord files - writer = TinyImagenetWriter(tiny_imagenet_wnid_conveter, - imagenet_wnid_converter) - tf.logging.info('Converting %d training images', len(train_annotations)) - writer.write_tf_record(train_annotations, - os.path.join(FLAGS.output_dir, 'train.tfrecord')) - tf.logging.info('Converting %d validation images ', len(val_annotations)) - writer.write_tf_record(val_annotations, - os.path.join(FLAGS.output_dir, 'validation.tfrecord')) - tf.logging.info('Converting %d test images', len(test_filenames)) - writer.write_tf_record(test_filenames, - os.path.join(FLAGS.output_dir, 'test.tfrecord')) - tf.logging.info('All files are converted') - - -if __name__ == '__main__': - app.run(main) diff --git a/spaces/NEXAS/NEXAS-stable_diff_custom/app.py b/spaces/NEXAS/NEXAS-stable_diff_custom/app.py deleted file mode 100644 index 4086dc2a0cdc62ee9dc6e78619b58592e3e8024b..0000000000000000000000000000000000000000 --- a/spaces/NEXAS/NEXAS-stable_diff_custom/app.py +++ /dev/null @@ -1,9 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/NEXAS/stable_diff_custom").launch() - - - - - - diff --git a/spaces/NeonLion92/Chat-and-Battle-with-Open-LLMs-Neon92/README.md b/spaces/NeonLion92/Chat-and-Battle-with-Open-LLMs-Neon92/README.md deleted file mode 100644 index 9176df21831794b1e5502083002883ab9f222807..0000000000000000000000000000000000000000 --- a/spaces/NeonLion92/Chat-and-Battle-with-Open-LLMs-Neon92/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Chat And Battle With Open LLMs -emoji: 💬 -colorFrom: purple -colorTo: pink -sdk: static -pinned: false -license: other -duplicated_from: lmsys/Chat-and-Battle-with-Open-LLMs ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/Nephele/bert-vits2-multi-voice/attentions.py b/spaces/Nephele/bert-vits2-multi-voice/attentions.py deleted file mode 100644 index ecbdbc8be941a962046fc11fd6739b093112123e..0000000000000000000000000000000000000000 --- a/spaces/Nephele/bert-vits2-multi-voice/attentions.py +++ /dev/null @@ -1,343 +0,0 @@ -import copy -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -from torch.nn.utils import weight_norm, remove_weight_norm -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, isflow = True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - if isflow: - cond_layer = torch.nn.Conv1d(256, 2*hidden_channels*n_layers, 1) - self.cond_pre = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, 1) - self.cond_layer = weight_norm(cond_layer, name='weight') - self.gin_channels = 256 - self.cond_layer_idx = self.n_layers - if 'gin_channels' in kwargs: - self.gin_channels = kwargs['gin_channels'] - if self.gin_channels != 0: - self.spk_emb_linear = nn.Linear(self.gin_channels, self.hidden_channels) - # vits2 says 3rd block, so idx is 2 by default - self.cond_layer_idx = kwargs['cond_layer_idx'] if 'cond_layer_idx' in kwargs else 2 - print(self.gin_channels, self.cond_layer_idx) - assert self.cond_layer_idx < self.n_layers, 'cond_layer_idx should be less than n_layers' - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - def forward(self, x, x_mask, g=None): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - if i == self.cond_layer_idx and g is not None: - g = self.spk_emb_linear(g.transpose(1, 2)) - g = g.transpose(1, 2) - x = x + g - x = x * x_mask - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/multilingual/README.md b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/multilingual/README.md deleted file mode 100644 index 46ff9c351b1030e0729f89f246e0cd86444c1633..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/multilingual/README.md +++ /dev/null @@ -1,158 +0,0 @@ -# Multilingual Translation - -[[Multilingual Translation with Extensible Multilingual Pretraining and Finetuning, https://arxiv.org/abs/2008.00401]](https://arxiv.org/abs/2008.00401) - -## Introduction - -This work is for training multilingual translation models with multiple bitext datasets. This multilingual translation framework supports (see [[training section]](#Training) and [[finetuning section]](#Finetuning) for examples) - -* temperature based sampling over unbalancing datasets of different translation directions - - --sampling-method' with - choices=['uniform', 'temperature', 'concat'] - - --sampling-temperature -* configurable to automatically add source and/or target language tokens to source/target sentences using data which are prepared in the same way as bilignual training - - --encoder-langtok with choices=['src', 'tgt', None] to specify whether to add source or target language tokens to the source sentences - - --decoder-langtok (binary option) to specify whether to add target language tokens to the target sentences or not -* finetuning mBART pretrained models for multilingual translation - - --finetune-from-model to specify the path from which to load the pretrained model - -## Preprocessing data -Multilingual training requires a joint BPE vocab. Please follow [mBART's preprocessing steps](https://github.com/pytorch/fairseq/tree/main/examples/mbart#bpe-data) to reuse our pretrained sentence-piece model. - -You can also train a joint BPE model on your own dataset and then follow the steps in [[link]](https://github.com/pytorch/fairseq/tree/main/examples/translation#multilingual-translation). - -## Training - - -```bash -lang_pairs= -path_2_data= -lang_list= - -fairseq-train $path_2_data \ - --encoder-normalize-before --decoder-normalize-before \ - --arch transformer --layernorm-embedding \ - --task translation_multi_simple_epoch \ - --sampling-method "temperature" \ - --sampling-temperature 1.5 \ - --encoder-langtok "src" \ - --decoder-langtok \ - --lang-dict "$lang_list" \ - --lang-pairs "$lang_pairs" \ - --criterion label_smoothed_cross_entropy --label-smoothing 0.2 \ - --optimizer adam --adam-eps 1e-06 --adam-betas '(0.9, 0.98)' \ - --lr-scheduler inverse_sqrt --lr 3e-05 --warmup-updates 2500 --max-update 40000 \ - --dropout 0.3 --attention-dropout 0.1 --weight-decay 0.0 \ - --max-tokens 1024 --update-freq 2 \ - --save-interval 1 --save-interval-updates 5000 --keep-interval-updates 10 --no-epoch-checkpoints \ - --seed 222 --log-format simple --log-interval 2 -``` - -## Finetuning -We can also finetune multilingual models from a monolingual pretrained models, e.g. [mMBART](https://github.com/pytorch/fairseq/tree/main/examples/mbart). -```bash -lang_pairs= -path_2_data= -lang_list= -pretrained_model= - -fairseq-train $path_2_data \ - --finetune-from-model $pretrained_model \ - --encoder-normalize-before --decoder-normalize-before \ - --arch transformer --layernorm-embedding \ - --task translation_multi_simple_epoch \ - --sampling-method "temperature" \ - --sampling-temperature 1.5 \ - --encoder-langtok "src" \ - --decoder-langtok \ - --lang-dict "$lang_list" \ - --lang-pairs "$lang_pairs" \ - --criterion label_smoothed_cross_entropy --label-smoothing 0.2 \ - --optimizer adam --adam-eps 1e-06 --adam-betas '(0.9, 0.98)' \ - --lr-scheduler inverse_sqrt --lr 3e-05 --warmup-updates 2500 --max-update 40000 \ - --dropout 0.3 --attention-dropout 0.1 --weight-decay 0.0 \ - --max-tokens 1024 --update-freq 2 \ - --save-interval 1 --save-interval-updates 5000 --keep-interval-updates 10 --no-epoch-checkpoints \ - --seed 222 --log-format simple --log-interval 2 -``` -## Generate -The following command uses the multilingual task (translation_multi_simple_epoch) to generate translation from $source_lang to $target_lang on the test dataset. During generaton, the source language tokens are added to source sentences and the target language tokens are added as the starting token to decode target sentences. Options --lang-dict and --lang-pairs are needed to tell the generation process the ordered list of languages and translation directions that the trained model are awared of; they will need to be consistent with the training. - -```bash -model= -source_lang= -target_lang= - -fairseq-generate $path_2_data \ - --path $model \ - --task translation_multi_simple_epoch \ - --gen-subset test \ - --source-lang $source_lang \ - --target-lang $target_lang - --sacrebleu --remove-bpe 'sentencepiece'\ - --batch-size 32 \ - --encoder-langtok "src" \ - --decoder-langtok \ - --lang-dict "$lang_list" \ - --lang-pairs "$lang_pairs" > ${source_lang}_${target_lang}.txt -``` -Fairseq will generate translation into a file {source_lang}_${target_lang}.txt with sacreblue at the end. - -You can also use costomized tokenizer to compare the performance with the literature. For example, you get a tokenizer [here](https://github.com/rsennrich/wmt16-scripts) and do the following: -```bash -TOKENIZER= -TOK_CMD=<"$TOKENIZER $target_lang" or cat for sacrebleu> - -cat {source_lang}_${target_lang}.txt | grep -P "^H" |sort -V |cut -f 3- |$TOK_CMD > ${source_lang}_${target_lang}.hyp -cat {source_lang}_${target_lang}.txt | grep -P "^T" |sort -V |cut -f 2- |$TOK_CMD > ${source_lang}_${target_lang}.ref -sacrebleu -tok 'none' -s 'none' ${source_lang}_${target_lang}.ref < ${source_lang}_${target_lang}.hyp -``` - -# mBART50 models - -* [mMBART 50 pretrained model](https://dl.fbaipublicfiles.com/fairseq/models/mbart50/mbart50.pretrained.tar.gz). -* [mMBART 50 finetuned many-to-one](https://dl.fbaipublicfiles.com/fairseq/models/mbart50/mbart50.ft.n1.tar.gz). -* [mMBART 50 finetuned one-to-many](https://dl.fbaipublicfiles.com/fairseq/models/mbart50/mbart50.ft.1n.tar.gz). -* [mMBART 50 finetuned many-to-many](https://dl.fbaipublicfiles.com/fairseq/models/mbart50/mbart50.ft.nn.tar.gz). - -Please download and extract from the above tarballs. Each tarball contains -* The fairseq model checkpoint: model.pt -* The list of supported languages: ML50_langs.txt -* Sentence piece model: sentence.bpe.model -* Fairseq dictionary of each language: dict.{lang}.txt (please replace lang with a language specified in ML50_langs.txt) - -To use the trained models, -* use the tool [binarize.py](./data_scripts/binarize.py) to binarize your data using sentence.bpe.model and dict.{lang}.txt, and copy the dictionaries to your data path -* then run the generation command: -```bash -path_2_data= -model=/model.pt -lang_list=/ML50_langs.txt -source_lang= -target_lang= - -fairseq-generate $path_2_data \ - --path $model \ - --task translation_multi_simple_epoch \ - --gen-subset test \ - --source-lang $source_lang \ - --target-lang $target_lang - --sacrebleu --remove-bpe 'sentencepiece'\ - --batch-size 32 \ - --encoder-langtok "src" \ - --decoder-langtok \ - --lang-dict "$lang_list" -``` - -## Citation - -```bibtex -@article{tang2020multilingual, - title={Multilingual Translation with Extensible Multilingual Pretraining and Finetuning}, - author={Yuqing Tang and Chau Tran and Xian Li and Peng-Jen Chen and Naman Goyal and Vishrav Chaudhary and Jiatao Gu and Angela Fan}, - year={2020}, - eprint={2008.00401}, - archivePrefix={arXiv}, - primaryClass={cs.CL} -} -``` diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/speech_recognition/utils/wer_utils.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/speech_recognition/utils/wer_utils.py deleted file mode 100644 index cf6f3d09ba41a46ad4d7968fb3c286dd53d15c38..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/speech_recognition/utils/wer_utils.py +++ /dev/null @@ -1,381 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from __future__ import absolute_import, division, print_function, unicode_literals - -import re -from collections import deque -from enum import Enum - -import numpy as np - - -""" - Utility modules for computation of Word Error Rate, - Alignments, as well as more granular metrics like - deletion, insersion and substitutions. -""" - - -class Code(Enum): - match = 1 - substitution = 2 - insertion = 3 - deletion = 4 - - -class Token(object): - def __init__(self, lbl="", st=np.nan, en=np.nan): - if np.isnan(st): - self.label, self.start, self.end = "", 0.0, 0.0 - else: - self.label, self.start, self.end = lbl, st, en - - -class AlignmentResult(object): - def __init__(self, refs, hyps, codes, score): - self.refs = refs # std::deque - self.hyps = hyps # std::deque - self.codes = codes # std::deque - self.score = score # float - - -def coordinate_to_offset(row, col, ncols): - return int(row * ncols + col) - - -def offset_to_row(offset, ncols): - return int(offset / ncols) - - -def offset_to_col(offset, ncols): - return int(offset % ncols) - - -def trimWhitespace(str): - return re.sub(" +", " ", re.sub(" *$", "", re.sub("^ *", "", str))) - - -def str2toks(str): - pieces = trimWhitespace(str).split(" ") - toks = [] - for p in pieces: - toks.append(Token(p, 0.0, 0.0)) - return toks - - -class EditDistance(object): - def __init__(self, time_mediated): - self.time_mediated_ = time_mediated - self.scores_ = np.nan # Eigen::Matrix - self.backtraces_ = ( - np.nan - ) # Eigen::Matrix backtraces_; - self.confusion_pairs_ = {} - - def cost(self, ref, hyp, code): - if self.time_mediated_: - if code == Code.match: - return abs(ref.start - hyp.start) + abs(ref.end - hyp.end) - elif code == Code.insertion: - return hyp.end - hyp.start - elif code == Code.deletion: - return ref.end - ref.start - else: # substitution - return abs(ref.start - hyp.start) + abs(ref.end - hyp.end) + 0.1 - else: - if code == Code.match: - return 0 - elif code == Code.insertion or code == Code.deletion: - return 3 - else: # substitution - return 4 - - def get_result(self, refs, hyps): - res = AlignmentResult(refs=deque(), hyps=deque(), codes=deque(), score=np.nan) - - num_rows, num_cols = self.scores_.shape - res.score = self.scores_[num_rows - 1, num_cols - 1] - - curr_offset = coordinate_to_offset(num_rows - 1, num_cols - 1, num_cols) - - while curr_offset != 0: - curr_row = offset_to_row(curr_offset, num_cols) - curr_col = offset_to_col(curr_offset, num_cols) - - prev_offset = self.backtraces_[curr_row, curr_col] - - prev_row = offset_to_row(prev_offset, num_cols) - prev_col = offset_to_col(prev_offset, num_cols) - - res.refs.appendleft(curr_row - 1) # Note: this was .push_front() in C++ - res.hyps.appendleft(curr_col - 1) - if curr_row - 1 == prev_row and curr_col == prev_col: - res.codes.appendleft(Code.deletion) - elif curr_row == prev_row and curr_col - 1 == prev_col: - res.codes.appendleft(Code.insertion) - else: - # assert(curr_row - 1 == prev_row and curr_col - 1 == prev_col) - ref_str = refs[res.refs[0]].label - hyp_str = hyps[res.hyps[0]].label - - if ref_str == hyp_str: - res.codes.appendleft(Code.match) - else: - res.codes.appendleft(Code.substitution) - - confusion_pair = "%s -> %s" % (ref_str, hyp_str) - if confusion_pair not in self.confusion_pairs_: - self.confusion_pairs_[confusion_pair] = 1 - else: - self.confusion_pairs_[confusion_pair] += 1 - - curr_offset = prev_offset - - return res - - def align(self, refs, hyps): - if len(refs) == 0 and len(hyps) == 0: - return np.nan - - # NOTE: we're not resetting the values in these matrices because every value - # will be overridden in the loop below. If this assumption doesn't hold, - # be sure to set all entries in self.scores_ and self.backtraces_ to 0. - self.scores_ = np.zeros((len(refs) + 1, len(hyps) + 1)) - self.backtraces_ = np.zeros((len(refs) + 1, len(hyps) + 1)) - - num_rows, num_cols = self.scores_.shape - - for i in range(num_rows): - for j in range(num_cols): - if i == 0 and j == 0: - self.scores_[i, j] = 0.0 - self.backtraces_[i, j] = 0 - continue - - if i == 0: - self.scores_[i, j] = self.scores_[i, j - 1] + self.cost( - None, hyps[j - 1], Code.insertion - ) - self.backtraces_[i, j] = coordinate_to_offset(i, j - 1, num_cols) - continue - - if j == 0: - self.scores_[i, j] = self.scores_[i - 1, j] + self.cost( - refs[i - 1], None, Code.deletion - ) - self.backtraces_[i, j] = coordinate_to_offset(i - 1, j, num_cols) - continue - - # Below here both i and j are greater than 0 - ref = refs[i - 1] - hyp = hyps[j - 1] - best_score = self.scores_[i - 1, j - 1] + ( - self.cost(ref, hyp, Code.match) - if (ref.label == hyp.label) - else self.cost(ref, hyp, Code.substitution) - ) - - prev_row = i - 1 - prev_col = j - 1 - ins = self.scores_[i, j - 1] + self.cost(None, hyp, Code.insertion) - if ins < best_score: - best_score = ins - prev_row = i - prev_col = j - 1 - - delt = self.scores_[i - 1, j] + self.cost(ref, None, Code.deletion) - if delt < best_score: - best_score = delt - prev_row = i - 1 - prev_col = j - - self.scores_[i, j] = best_score - self.backtraces_[i, j] = coordinate_to_offset( - prev_row, prev_col, num_cols - ) - - return self.get_result(refs, hyps) - - -class WERTransformer(object): - def __init__(self, hyp_str, ref_str, verbose=True): - self.ed_ = EditDistance(False) - self.id2oracle_errs_ = {} - self.utts_ = 0 - self.words_ = 0 - self.insertions_ = 0 - self.deletions_ = 0 - self.substitutions_ = 0 - - self.process(["dummy_str", hyp_str, ref_str]) - - if verbose: - print("'%s' vs '%s'" % (hyp_str, ref_str)) - self.report_result() - - def process(self, input): # std::vector&& input - if len(input) < 3: - print( - "Input must be of the form ... , got ", - len(input), - " inputs:", - ) - return None - - # Align - # std::vector hyps; - # std::vector refs; - - hyps = str2toks(input[-2]) - refs = str2toks(input[-1]) - - alignment = self.ed_.align(refs, hyps) - if alignment is None: - print("Alignment is null") - return np.nan - - # Tally errors - ins = 0 - dels = 0 - subs = 0 - for code in alignment.codes: - if code == Code.substitution: - subs += 1 - elif code == Code.insertion: - ins += 1 - elif code == Code.deletion: - dels += 1 - - # Output - row = input - row.append(str(len(refs))) - row.append(str(ins)) - row.append(str(dels)) - row.append(str(subs)) - # print(row) - - # Accumulate - kIdIndex = 0 - kNBestSep = "/" - - pieces = input[kIdIndex].split(kNBestSep) - - if len(pieces) == 0: - print( - "Error splitting ", - input[kIdIndex], - " on '", - kNBestSep, - "', got empty list", - ) - return np.nan - - id = pieces[0] - if id not in self.id2oracle_errs_: - self.utts_ += 1 - self.words_ += len(refs) - self.insertions_ += ins - self.deletions_ += dels - self.substitutions_ += subs - self.id2oracle_errs_[id] = [ins, dels, subs] - else: - curr_err = ins + dels + subs - prev_err = np.sum(self.id2oracle_errs_[id]) - if curr_err < prev_err: - self.id2oracle_errs_[id] = [ins, dels, subs] - - return 0 - - def report_result(self): - # print("---------- Summary ---------------") - if self.words_ == 0: - print("No words counted") - return - - # 1-best - best_wer = ( - 100.0 - * (self.insertions_ + self.deletions_ + self.substitutions_) - / self.words_ - ) - - print( - "\tWER = %0.2f%% (%i utts, %i words, %0.2f%% ins, " - "%0.2f%% dels, %0.2f%% subs)" - % ( - best_wer, - self.utts_, - self.words_, - 100.0 * self.insertions_ / self.words_, - 100.0 * self.deletions_ / self.words_, - 100.0 * self.substitutions_ / self.words_, - ) - ) - - def wer(self): - if self.words_ == 0: - wer = np.nan - else: - wer = ( - 100.0 - * (self.insertions_ + self.deletions_ + self.substitutions_) - / self.words_ - ) - return wer - - def stats(self): - if self.words_ == 0: - stats = {} - else: - wer = ( - 100.0 - * (self.insertions_ + self.deletions_ + self.substitutions_) - / self.words_ - ) - stats = dict( - { - "wer": wer, - "utts": self.utts_, - "numwords": self.words_, - "ins": self.insertions_, - "dels": self.deletions_, - "subs": self.substitutions_, - "confusion_pairs": self.ed_.confusion_pairs_, - } - ) - return stats - - -def calc_wer(hyp_str, ref_str): - t = WERTransformer(hyp_str, ref_str, verbose=0) - return t.wer() - - -def calc_wer_stats(hyp_str, ref_str): - t = WERTransformer(hyp_str, ref_str, verbose=0) - return t.stats() - - -def get_wer_alignment_codes(hyp_str, ref_str): - """ - INPUT: hypothesis string, reference string - OUTPUT: List of alignment codes (intermediate results from WER computation) - """ - t = WERTransformer(hyp_str, ref_str, verbose=0) - return t.ed_.align(str2toks(ref_str), str2toks(hyp_str)).codes - - -def merge_counts(x, y): - # Merge two hashes which have 'counts' as their values - # This can be used for example to merge confusion pair counts - # conf_pairs = merge_counts(conf_pairs, stats['confusion_pairs']) - for k, v in y.items(): - if k not in x: - x[k] = 0 - x[k] += v - return x diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/text_to_speech/tacotron2.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/text_to_speech/tacotron2.py deleted file mode 100644 index bb327e81e74900349e1357261bf2f14bc037ccd6..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/text_to_speech/tacotron2.py +++ /dev/null @@ -1,350 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging - -import torch -from torch import nn -from torch.nn import functional as F - -from fairseq.models import (FairseqEncoder, FairseqEncoderDecoderModel, - FairseqIncrementalDecoder, register_model, - register_model_architecture) -from fairseq.modules import LSTMCellWithZoneOut, LocationAttention - - -logger = logging.getLogger(__name__) - - -def encoder_init(m): - if isinstance(m, nn.Conv1d): - nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("relu")) - - -class Tacotron2Encoder(FairseqEncoder): - def __init__(self, args, src_dict, embed_speaker): - super().__init__(src_dict) - self.padding_idx = src_dict.pad() - self.embed_speaker = embed_speaker - self.spk_emb_proj = None - if embed_speaker is not None: - self.spk_emb_proj = nn.Linear( - args.encoder_embed_dim + args.speaker_embed_dim, - args.encoder_embed_dim - ) - - self.embed_tokens = nn.Embedding(len(src_dict), args.encoder_embed_dim, - padding_idx=self.padding_idx) - - assert(args.encoder_conv_kernel_size % 2 == 1) - self.convolutions = nn.ModuleList( - nn.Sequential( - nn.Conv1d(args.encoder_embed_dim, args.encoder_embed_dim, - kernel_size=args.encoder_conv_kernel_size, - padding=((args.encoder_conv_kernel_size - 1) // 2)), - nn.BatchNorm1d(args.encoder_embed_dim), - nn.ReLU(), - nn.Dropout(args.encoder_dropout) - ) - for _ in range(args.encoder_conv_layers) - ) - - self.lstm = nn.LSTM(args.encoder_embed_dim, args.encoder_embed_dim // 2, - num_layers=args.encoder_lstm_layers, - batch_first=True, bidirectional=True) - - self.apply(encoder_init) - - def forward(self, src_tokens, src_lengths=None, speaker=None, **kwargs): - x = self.embed_tokens(src_tokens) - x = x.transpose(1, 2).contiguous() # B x T x C -> B x C x T - for conv in self.convolutions: - x = conv(x) - x = x.transpose(1, 2).contiguous() # B x C x T -> B x T x C - - src_lengths = src_lengths.cpu().long() - x = nn.utils.rnn.pack_padded_sequence(x, src_lengths, batch_first=True) - x = self.lstm(x)[0] - x = nn.utils.rnn.pad_packed_sequence(x, batch_first=True)[0] - - encoder_padding_mask = src_tokens.eq(self.padding_idx) - - if self.embed_speaker is not None: - seq_len, bsz, _ = x.size() - emb = self.embed_speaker(speaker).expand(seq_len, bsz, -1) - x = self.spk_emb_proj(torch.cat([x, emb], dim=2)) - - return { - "encoder_out": [x], # B x T x C - "encoder_padding_mask": encoder_padding_mask, # B x T - } - - -class Prenet(nn.Module): - def __init__(self, in_dim, n_layers, n_units, dropout): - super().__init__() - self.layers = nn.ModuleList( - nn.Sequential(nn.Linear(in_dim if i == 0 else n_units, n_units), - nn.ReLU()) - for i in range(n_layers) - ) - self.dropout = dropout - - def forward(self, x): - for layer in self.layers: - x = F.dropout(layer(x), p=self.dropout) # always applies dropout - return x - - -class Postnet(nn.Module): - def __init__(self, in_dim, n_channels, kernel_size, n_layers, dropout): - super(Postnet, self).__init__() - self.convolutions = nn.ModuleList() - assert(kernel_size % 2 == 1) - for i in range(n_layers): - cur_layers = [ - nn.Conv1d(in_dim if i == 0 else n_channels, - n_channels if i < n_layers - 1 else in_dim, - kernel_size=kernel_size, - padding=((kernel_size - 1) // 2)), - nn.BatchNorm1d(n_channels if i < n_layers - 1 else in_dim) - ] + ([nn.Tanh()] if i < n_layers - 1 else []) + [nn.Dropout(dropout)] - nn.init.xavier_uniform_( - cur_layers[0].weight, - torch.nn.init.calculate_gain( - "tanh" if i < n_layers - 1 else "linear" - ) - ) - self.convolutions.append(nn.Sequential(*cur_layers)) - - def forward(self, x): - x = x.transpose(1, 2) # B x T x C -> B x C x T - for conv in self.convolutions: - x = conv(x) - return x.transpose(1, 2) - - -def decoder_init(m): - if isinstance(m, torch.nn.Conv1d): - nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("tanh")) - - -class Tacotron2Decoder(FairseqIncrementalDecoder): - def __init__(self, args, src_dict): - super().__init__(None) - self.args = args - self.n_frames_per_step = args.n_frames_per_step - self.out_dim = args.output_frame_dim * args.n_frames_per_step - - self.prenet = Prenet(self.out_dim, args.prenet_layers, args.prenet_dim, - args.prenet_dropout) - - # take prev_context, prev_frame, (speaker embedding) as input - self.attention_lstm = LSTMCellWithZoneOut( - args.zoneout, - args.prenet_dim + args.encoder_embed_dim, - args.decoder_lstm_dim - ) - - # take attention_lstm output, attention_state, encoder_out as input - self.attention = LocationAttention( - args.attention_dim, args.encoder_embed_dim, args.decoder_lstm_dim, - (1 + int(args.attention_use_cumprob)), - args.attention_conv_dim, args.attention_conv_kernel_size - ) - - # take attention_lstm output, context, (gated_latent) as input - self.lstm = nn.ModuleList( - LSTMCellWithZoneOut( - args.zoneout, - args.encoder_embed_dim + args.decoder_lstm_dim, - args.decoder_lstm_dim - ) - for i in range(args.decoder_lstm_layers) - ) - - proj_in_dim = args.encoder_embed_dim + args.decoder_lstm_dim - self.feat_proj = nn.Linear(proj_in_dim, self.out_dim) - self.eos_proj = nn.Linear(proj_in_dim, 1) - - self.postnet = Postnet(self.out_dim, args.postnet_conv_dim, - args.postnet_conv_kernel_size, - args.postnet_layers, args.postnet_dropout) - - self.ctc_proj = None - if getattr(args, "ctc_weight", 0.) > 0.: - self.ctc_proj = nn.Linear(self.out_dim, len(src_dict)) - - self.apply(decoder_init) - - def _get_states(self, incremental_state, enc_out): - bsz, in_len, _ = enc_out.size() - alstm_h = self.get_incremental_state(incremental_state, "alstm_h") - if alstm_h is None: - alstm_h = enc_out.new_zeros(bsz, self.args.decoder_lstm_dim) - alstm_c = self.get_incremental_state(incremental_state, "alstm_c") - if alstm_c is None: - alstm_c = enc_out.new_zeros(bsz, self.args.decoder_lstm_dim) - - lstm_h = self.get_incremental_state(incremental_state, "lstm_h") - if lstm_h is None: - lstm_h = [enc_out.new_zeros(bsz, self.args.decoder_lstm_dim) - for _ in range(self.args.decoder_lstm_layers)] - lstm_c = self.get_incremental_state(incremental_state, "lstm_c") - if lstm_c is None: - lstm_c = [enc_out.new_zeros(bsz, self.args.decoder_lstm_dim) - for _ in range(self.args.decoder_lstm_layers)] - - attn_w = self.get_incremental_state(incremental_state, "attn_w") - if attn_w is None: - attn_w = enc_out.new_zeros(bsz, in_len) - attn_w_cum = self.get_incremental_state(incremental_state, "attn_w_cum") - if attn_w_cum is None: - attn_w_cum = enc_out.new_zeros(bsz, in_len) - return alstm_h, alstm_c, lstm_h, lstm_c, attn_w, attn_w_cum - - def _get_init_attn_c(self, enc_out, enc_mask): - bsz = enc_out.size(0) - if self.args.init_attn_c == "zero": - return enc_out.new_zeros(bsz, self.args.encoder_embed_dim) - elif self.args.init_attn_c == "avg": - enc_w = (~enc_mask).type(enc_out.type()) - enc_w = enc_w / enc_w.sum(dim=1, keepdim=True) - return torch.sum(enc_out * enc_w.unsqueeze(2), dim=1) - else: - raise ValueError(f"{self.args.init_attn_c} not supported") - - def forward(self, prev_output_tokens, encoder_out=None, - incremental_state=None, target_lengths=None, **kwargs): - enc_mask = encoder_out["encoder_padding_mask"] - enc_out = encoder_out["encoder_out"][0] - in_len = enc_out.size(1) - - if incremental_state is not None: - prev_output_tokens = prev_output_tokens[:, -1:, :] - bsz, out_len, _ = prev_output_tokens.size() - - prenet_out = self.prenet(prev_output_tokens) - (alstm_h, alstm_c, lstm_h, lstm_c, - attn_w, attn_w_cum) = self._get_states(incremental_state, enc_out) - attn_ctx = self._get_init_attn_c(enc_out, enc_mask) - - attn_out = enc_out.new_zeros(bsz, in_len, out_len) - feat_out = enc_out.new_zeros(bsz, out_len, self.out_dim) - eos_out = enc_out.new_zeros(bsz, out_len) - for t in range(out_len): - alstm_in = torch.cat((attn_ctx, prenet_out[:, t, :]), dim=1) - alstm_h, alstm_c = self.attention_lstm(alstm_in, (alstm_h, alstm_c)) - - attn_state = attn_w.unsqueeze(1) - if self.args.attention_use_cumprob: - attn_state = torch.stack((attn_w, attn_w_cum), dim=1) - attn_ctx, attn_w = self.attention( - enc_out, enc_mask, alstm_h, attn_state - ) - attn_w_cum = attn_w_cum + attn_w - attn_out[:, :, t] = attn_w - - for i, cur_lstm in enumerate(self.lstm): - if i == 0: - lstm_in = torch.cat((attn_ctx, alstm_h), dim=1) - else: - lstm_in = torch.cat((attn_ctx, lstm_h[i - 1]), dim=1) - lstm_h[i], lstm_c[i] = cur_lstm(lstm_in, (lstm_h[i], lstm_c[i])) - - proj_in = torch.cat((attn_ctx, lstm_h[-1]), dim=1) - feat_out[:, t, :] = self.feat_proj(proj_in) - eos_out[:, t] = self.eos_proj(proj_in).squeeze(1) - self.attention.clear_cache() - - self.set_incremental_state(incremental_state, "alstm_h", alstm_h) - self.set_incremental_state(incremental_state, "alstm_c", alstm_c) - self.set_incremental_state(incremental_state, "lstm_h", lstm_h) - self.set_incremental_state(incremental_state, "lstm_c", lstm_c) - self.set_incremental_state(incremental_state, "attn_w", attn_w) - self.set_incremental_state(incremental_state, "attn_w_cum", attn_w_cum) - - post_feat_out = feat_out + self.postnet(feat_out) - eos_out = eos_out.view(bsz, out_len, 1) - return post_feat_out, eos_out, {"attn": attn_out, "feature_out": feat_out} - - -@register_model("tacotron_2") -class Tacotron2Model(FairseqEncoderDecoderModel): - """ - Implementation for https://arxiv.org/pdf/1712.05884.pdf - """ - - @staticmethod - def add_args(parser): - # encoder - parser.add_argument("--encoder-dropout", type=float) - parser.add_argument("--encoder-embed-dim", type=int) - parser.add_argument("--encoder-conv-layers", type=int) - parser.add_argument("--encoder-conv-kernel-size", type=int) - parser.add_argument("--encoder-lstm-layers", type=int) - # decoder - parser.add_argument("--attention-dim", type=int) - parser.add_argument("--attention-conv-dim", type=int) - parser.add_argument("--attention-conv-kernel-size", type=int) - parser.add_argument("--prenet-dropout", type=float) - parser.add_argument("--prenet-layers", type=int) - parser.add_argument("--prenet-dim", type=int) - parser.add_argument("--postnet-dropout", type=float) - parser.add_argument("--postnet-layers", type=int) - parser.add_argument("--postnet-conv-dim", type=int) - parser.add_argument("--postnet-conv-kernel-size", type=int) - parser.add_argument("--init-attn-c", type=str) - parser.add_argument("--attention-use-cumprob", action='store_true') - parser.add_argument("--zoneout", type=float) - parser.add_argument("--decoder-lstm-layers", type=int) - parser.add_argument("--decoder-lstm-dim", type=int) - parser.add_argument("--output-frame-dim", type=int) - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self._num_updates = 0 - - @classmethod - def build_model(cls, args, task): - embed_speaker = task.get_speaker_embeddings(args) - encoder = Tacotron2Encoder(args, task.src_dict, embed_speaker) - decoder = Tacotron2Decoder(args, task.src_dict) - return cls(encoder, decoder) - - def forward_encoder(self, src_tokens, src_lengths, **kwargs): - return self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) - - def set_num_updates(self, num_updates): - super().set_num_updates(num_updates) - self._num_updates = num_updates - - -@register_model_architecture("tacotron_2", "tacotron_2") -def base_architecture(args): - # encoder - args.encoder_dropout = getattr(args, "encoder_dropout", 0.5) - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) - args.encoder_conv_layers = getattr(args, "encoder_conv_layers", 3) - args.encoder_conv_kernel_size = getattr(args, "encoder_conv_kernel_size", 5) - args.encoder_lstm_layers = getattr(args, "encoder_lstm_layers", 1) - # decoder - args.attention_dim = getattr(args, "attention_dim", 128) - args.attention_conv_dim = getattr(args, "attention_conv_dim", 32) - args.attention_conv_kernel_size = getattr(args, - "attention_conv_kernel_size", 15) - args.prenet_dropout = getattr(args, "prenet_dropout", 0.5) - args.prenet_layers = getattr(args, "prenet_layers", 2) - args.prenet_dim = getattr(args, "prenet_dim", 256) - args.postnet_dropout = getattr(args, "postnet_dropout", 0.5) - args.postnet_layers = getattr(args, "postnet_layers", 5) - args.postnet_conv_dim = getattr(args, "postnet_conv_dim", 512) - args.postnet_conv_kernel_size = getattr(args, "postnet_conv_kernel_size", 5) - args.init_attn_c = getattr(args, "init_attn_c", "zero") - args.attention_use_cumprob = getattr(args, "attention_use_cumprob", True) - args.zoneout = getattr(args, "zoneout", 0.1) - args.decoder_lstm_layers = getattr(args, "decoder_lstm_layers", 2) - args.decoder_lstm_dim = getattr(args, "decoder_lstm_dim", 1024) - args.output_frame_dim = getattr(args, "output_frame_dim", 80) diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/waveglow_denoiser.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/waveglow_denoiser.py deleted file mode 100644 index 6a6585e8b6901a059445ff54ca20ea87751bbb11..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/textless_nlp/gslm/unit2speech/tacotron2/waveglow_denoiser.py +++ /dev/null @@ -1,40 +0,0 @@ -# import sys -# sys.path.append('tacotron2') -import torch -from .layers import STFT - - -class Denoiser(torch.nn.Module): - """ Removes model bias from audio produced with waveglow """ - - def __init__(self, waveglow, filter_length=1024, n_overlap=4, - win_length=1024, mode='zeros'): - super(Denoiser, self).__init__() - self.stft = STFT(filter_length=filter_length, - hop_length=int(filter_length/n_overlap), - win_length=win_length).cuda() - if mode == 'zeros': - mel_input = torch.zeros( - (1, 80, 88), - dtype=waveglow.upsample.weight.dtype, - device=waveglow.upsample.weight.device) - elif mode == 'normal': - mel_input = torch.randn( - (1, 80, 88), - dtype=waveglow.upsample.weight.dtype, - device=waveglow.upsample.weight.device) - else: - raise Exception("Mode {} if not supported".format(mode)) - - with torch.no_grad(): - bias_audio = waveglow.infer(mel_input, sigma=0.0).float() - bias_spec, _ = self.stft.transform(bias_audio) - - self.register_buffer('bias_spec', bias_spec[:, :, 0][:, :, None]) - - def forward(self, audio, strength=0.1): - audio_spec, audio_angles = self.stft.transform(audio.cuda().float()) - audio_spec_denoised = audio_spec - self.bias_spec * strength - audio_spec_denoised = torch.clamp(audio_spec_denoised, 0.0) - audio_denoised = self.stft.inverse(audio_spec_denoised, audio_angles) - return audio_denoised diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/.github/ISSUE_TEMPLATE/feature_request.md b/spaces/OFA-Sys/OFA-vqa/fairseq/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index 93c8668041f8a7af29e4c11e905d8b56b946dd51..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -name: 🚀 Feature Request -about: Submit a proposal/request for a new feature -labels: 'enhancement, help wanted, needs triage' ---- - -## 🚀 Feature Request - - -### Motivation - - - -### Pitch - - - -### Alternatives - - - -### Additional context - - diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/scripts/spm_decode.py b/spaces/OFA-Sys/OFA-vqa/fairseq/scripts/spm_decode.py deleted file mode 100644 index 1c18b1d2a7d7628b7aeb6fdb6c4ab5a096e9edf8..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/scripts/spm_decode.py +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from __future__ import absolute_import, division, print_function, unicode_literals - -import argparse - -import sentencepiece as spm - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument( - "--model", required=True, help="sentencepiece model to use for decoding" - ) - parser.add_argument("--input", required=True, help="input file to decode") - parser.add_argument("--input_format", choices=["piece", "id"], default="piece") - args = parser.parse_args() - - sp = spm.SentencePieceProcessor() - sp.Load(args.model) - - if args.input_format == "piece": - - def decode(l): - return "".join(sp.DecodePieces(l)) - - elif args.input_format == "id": - - def decode(l): - return "".join(sp.DecodeIds(l)) - - else: - raise NotImplementedError - - def tok2int(tok): - # remap reference-side (represented as <>) to 0 - return int(tok) if tok != "<>" else 0 - - with open(args.input, "r", encoding="utf-8") as h: - for line in h: - if args.input_format == "id": - print(decode(list(map(tok2int, line.rstrip().split())))) - elif args.input_format == "piece": - print(decode(line.rstrip().split())) - - -if __name__ == "__main__": - main() diff --git a/spaces/OFA-Sys/OFA-vqa/utils/checkpoint_utils.py b/spaces/OFA-Sys/OFA-vqa/utils/checkpoint_utils.py deleted file mode 100644 index 8fed4bc2a214833ab1153d5bc3ff6756db25048b..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/utils/checkpoint_utils.py +++ /dev/null @@ -1,875 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import ast -import collections -import contextlib -import logging -import numpy as np -import os -import re -import time -import traceback -import math -from collections import OrderedDict -from typing import Any, Dict, Optional, Union - -import torch -from fairseq.dataclass.configs import CheckpointConfig -from fairseq.dataclass.utils import ( - convert_namespace_to_omegaconf, - overwrite_args_by_name, -) -from fairseq.distributed.fully_sharded_data_parallel import FSDP, has_FSDP -from fairseq.file_io import PathManager -from fairseq.models import FairseqDecoder, FairseqEncoder -from omegaconf import DictConfig, open_dict, OmegaConf - -from data import data_utils - -logger = logging.getLogger(__name__) - - -def save_checkpoint(cfg: CheckpointConfig, trainer, epoch_itr, val_loss): - from fairseq import meters - - # only one worker should attempt to create the required dir - if trainer.data_parallel_rank == 0: - os.makedirs(cfg.save_dir, exist_ok=True) - - prev_best = getattr(save_checkpoint, "best", val_loss) - if val_loss is not None: - best_function = max if cfg.maximize_best_checkpoint_metric else min - save_checkpoint.best = best_function(val_loss, prev_best) - - if cfg.no_save: - return - - trainer.consolidate_optimizer() # TODO(SS): do we need this if no_save_optimizer_state - - if not trainer.should_save_checkpoint_on_current_rank: - if trainer.always_call_state_dict_during_save_checkpoint: - trainer.state_dict() - return - - write_timer = meters.StopwatchMeter() - write_timer.start() - - epoch = epoch_itr.epoch - end_of_epoch = epoch_itr.end_of_epoch() - updates = trainer.get_num_updates() - - logger.info(f"Preparing to save checkpoint for epoch {epoch} @ {updates} updates") - - def is_better(a, b): - return a >= b if cfg.maximize_best_checkpoint_metric else a <= b - - suffix = trainer.checkpoint_suffix - checkpoint_conds = collections.OrderedDict() - checkpoint_conds["checkpoint{}{}.pt".format(epoch, suffix)] = ( - end_of_epoch and not cfg.no_epoch_checkpoints and epoch % cfg.save_interval == 0 - ) - checkpoint_conds["checkpoint_{}_{}{}.pt".format(epoch, updates, suffix)] = ( - not end_of_epoch - and cfg.save_interval_updates > 0 - and updates % cfg.save_interval_updates == 0 - ) - checkpoint_conds["checkpoint_best{}.pt".format(suffix)] = val_loss is not None and ( - not hasattr(save_checkpoint, "best") - or is_better(val_loss, save_checkpoint.best) - ) - if val_loss is not None and cfg.keep_best_checkpoints > 0: - worst_best = getattr(save_checkpoint, "best", None) - chkpts = checkpoint_paths( - cfg.save_dir, - pattern=r"checkpoint\.best_{}_(\d+\.?\d*){}\.pt".format( - cfg.best_checkpoint_metric, suffix - ), - ) - if len(chkpts) > 0: - p = chkpts[-1] if cfg.maximize_best_checkpoint_metric else chkpts[0] - worst_best = float(p.rsplit("_")[-1].replace("{}.pt".format(suffix), "")) - # add random digits to resolve ties - with data_utils.numpy_seed(epoch, updates, val_loss): - rand_sfx = np.random.randint(0, cfg.keep_best_checkpoints) - - checkpoint_conds[ - "checkpoint.best_{}_{:.3f}{}{}.pt".format( - cfg.best_checkpoint_metric, - val_loss, - rand_sfx, - suffix - ) - ] = worst_best is None or is_better(val_loss, worst_best) - checkpoint_conds[ - "checkpoint_last{}.pt".format(suffix) - ] = not cfg.no_last_checkpoints - - extra_state = {"train_iterator": epoch_itr.state_dict(), "val_loss": val_loss} - if hasattr(save_checkpoint, "best"): - extra_state.update({"best": save_checkpoint.best}) - - checkpoints = [ - os.path.join(cfg.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond - ] - if len(checkpoints) > 0: - trainer.save_checkpoint(checkpoints[0], extra_state) - for cp in checkpoints[1:]: - if cfg.write_checkpoints_asynchronously: - # TODO[ioPath]: Need to implement a delayed asynchronous - # file copying/moving feature. - logger.warning( - f"ioPath is not copying {checkpoints[0]} to {cp} " - "since async write mode is on." - ) - else: - assert PathManager.copy( - checkpoints[0], cp, overwrite=True - ), f"Failed to copy {checkpoints[0]} to {cp}" - - write_timer.stop() - logger.info( - "Saved checkpoint {} (epoch {} @ {} updates, score {}) (writing took {} seconds)".format( - checkpoints[0], epoch, updates, val_loss, write_timer.sum - ) - ) - - if not end_of_epoch and cfg.keep_interval_updates > 0: - # remove old checkpoints; checkpoints are sorted in descending order - if cfg.keep_interval_updates_pattern == -1: - checkpoints = checkpoint_paths( - cfg.save_dir, pattern=r"checkpoint_\d+_(\d+){}\.pt".format(suffix) - ) - else: - checkpoints = checkpoint_paths( - cfg.save_dir, - pattern=r"checkpoint_\d+_(\d+){}\.pt".format(suffix), - keep_match=True, - ) - checkpoints = [ - x[0] - for x in checkpoints - if x[1] % cfg.keep_interval_updates_pattern != 0 - ] - - for old_chk in checkpoints[cfg.keep_interval_updates :]: - if os.path.lexists(old_chk): - os.remove(old_chk) - elif PathManager.exists(old_chk): - PathManager.rm(old_chk) - - if cfg.keep_last_epochs > 0: - # remove old epoch checkpoints; checkpoints are sorted in descending order - checkpoints = checkpoint_paths( - cfg.save_dir, pattern=r"checkpoint(\d+){}\.pt".format(suffix) - ) - for old_chk in checkpoints[cfg.keep_last_epochs :]: - if os.path.lexists(old_chk): - os.remove(old_chk) - elif PathManager.exists(old_chk): - PathManager.rm(old_chk) - - if cfg.keep_best_checkpoints > 0: - # only keep the best N checkpoints according to validation metric - checkpoints = checkpoint_paths( - cfg.save_dir, - pattern=r"checkpoint\.best_{}_(\d+\.?\d*){}\.pt".format( - cfg.best_checkpoint_metric, suffix - ), - ) - if not cfg.maximize_best_checkpoint_metric: - checkpoints = checkpoints[::-1] - for old_chk in checkpoints[cfg.keep_best_checkpoints :]: - if os.path.lexists(old_chk): - os.remove(old_chk) - elif PathManager.exists(old_chk): - PathManager.rm(old_chk) - - -def load_checkpoint(cfg: CheckpointConfig, trainer, **passthrough_args): - """ - Load a checkpoint and restore the training iterator. - - *passthrough_args* will be passed through to - ``trainer.get_train_iterator``. - """ - - reset_optimizer = cfg.reset_optimizer - reset_lr_scheduler = cfg.reset_lr_scheduler - optimizer_overrides = ast.literal_eval(cfg.optimizer_overrides) - reset_meters = cfg.reset_meters - reset_dataloader = cfg.reset_dataloader - - if cfg.finetune_from_model is not None and ( - reset_optimizer or reset_lr_scheduler or reset_meters or reset_dataloader - ): - raise ValueError( - "--finetune-from-model can not be set together with either --reset-optimizer" - " or reset_lr_scheduler or reset_meters or reset_dataloader" - ) - - suffix = trainer.checkpoint_suffix - if ( - cfg.restore_file == "checkpoint_last.pt" - ): # default value of restore_file is 'checkpoint_last.pt' - checkpoint_path = os.path.join( - cfg.save_dir, "checkpoint_last{}.pt".format(suffix) - ) - first_launch = not PathManager.exists(checkpoint_path) - if cfg.finetune_from_model is not None and first_launch: - # if there is no last checkpoint to restore, start the finetune from pretrained model - # else just use usual logic to load checkpoint, e.g. restart from last checkpoint and etc. - if PathManager.exists(cfg.finetune_from_model): - checkpoint_path = cfg.finetune_from_model - reset_optimizer = True - reset_lr_scheduler = True - reset_meters = True - reset_dataloader = True - logger.info( - f"loading pretrained model from {checkpoint_path}: " - "optimizer, lr scheduler, meters, dataloader will be reset" - ) - else: - raise ValueError( - f"--funetune-from-model {cfg.finetune_from_model} does not exist" - ) - elif suffix is not None: - checkpoint_path = cfg.restore_file.replace(".pt", suffix + ".pt") - else: - checkpoint_path = cfg.restore_file - - if cfg.restore_file != "checkpoint_last.pt" and cfg.finetune_from_model: - raise ValueError( - "--finetune-from-model and --restore-file (non-default value) " - "can not be specified together: " + str(cfg) - ) - - extra_state = trainer.load_checkpoint( - checkpoint_path, - reset_optimizer, - reset_lr_scheduler, - optimizer_overrides, - reset_meters=reset_meters, - ) - - if ( - extra_state is not None - and "best" in extra_state - and not reset_optimizer - and not reset_meters - ): - save_checkpoint.best = extra_state["best"] - - if extra_state is not None and not reset_dataloader: - # restore iterator from checkpoint - itr_state = extra_state["train_iterator"] - epoch_itr = trainer.get_train_iterator( - epoch=itr_state["epoch"], load_dataset=True, **passthrough_args - ) - epoch_itr.load_state_dict(itr_state) - _n = itr_state['iterations_in_epoch'] - offset = sum(len(_) for _ in epoch_itr.batch_sampler[:_n]) - epoch_itr.dataset.dataset._seek(offset=offset) - true_num = int(math.ceil(len(epoch_itr.dataset) / 8)) * 8 - another_offset = ((epoch_itr.epoch - 1) * true_num + offset) // 8 - if hasattr(epoch_itr.dataset, 'pure_text_dataset'): - text_offset = (2 * another_offset) % len(epoch_itr.dataset.pure_text_dataset) - epoch_itr.dataset.pure_text_dataset._seek(offset=text_offset) - if hasattr(epoch_itr.dataset, 'pure_image_dataset'): - image_offset = another_offset % len(epoch_itr.dataset.pure_image_dataset) - epoch_itr.dataset.pure_image_dataset._seek(offset=image_offset) - if hasattr(epoch_itr.dataset, 'detection_dataset'): - detection_offset = another_offset % len(epoch_itr.dataset.detection_dataset) - epoch_itr.dataset.detection_dataset._seek(offset=detection_offset) - else: - epoch_itr = trainer.get_train_iterator( - epoch=1, load_dataset=True, **passthrough_args - ) - - trainer.lr_step(epoch_itr.epoch) - - return extra_state, epoch_itr - - -def load_checkpoint_to_cpu(path, arg_overrides=None, load_on_all_ranks=False): - """Loads a checkpoint to CPU (with upgrading for backward compatibility). - - If doing single-GPU training or if the checkpoint is only being loaded by at - most one process on each node (current default behavior is for only rank 0 - to read the checkpoint from disk), load_on_all_ranks should be False to - avoid errors from torch.distributed not having been initialized or - torch.distributed.barrier() hanging. - - If all processes on each node may be loading the checkpoint - simultaneously, load_on_all_ranks should be set to True to avoid I/O - conflicts. - - There's currently no support for > 1 but < all processes loading the - checkpoint on each node. - """ - local_path = PathManager.get_local_path(path) - # The locally cached file returned by get_local_path() may be stale for - # remote files that are periodically updated/overwritten (ex: - # checkpoint_last.pt) - so we remove the local copy, sync across processes - # (if needed), and then download a fresh copy. - if local_path != path and PathManager.path_requires_pathmanager(path): - try: - os.remove(local_path) - except FileNotFoundError: - # With potentially multiple processes removing the same file, the - # file being missing is benign (missing_ok isn't available until - # Python 3.8). - pass - if load_on_all_ranks: - torch.distributed.barrier() - local_path = PathManager.get_local_path(path) - - with open(local_path, "rb") as f: - state = torch.load(f, map_location=torch.device("cpu")) - - if "args" in state and state["args"] is not None and arg_overrides is not None: - args = state["args"] - for arg_name, arg_val in arg_overrides.items(): - setattr(args, arg_name, arg_val) - - if "cfg" in state and state["cfg"] is not None: - - # hack to be able to set Namespace in dict config. this should be removed when we update to newer - # omegaconf version that supports object flags, or when we migrate all existing models - from omegaconf import _utils - - old_primitive = _utils.is_primitive_type - _utils.is_primitive_type = lambda _: True - - state["cfg"] = OmegaConf.create(state["cfg"]) - - _utils.is_primitive_type = old_primitive - OmegaConf.set_struct(state["cfg"], True) - - if arg_overrides is not None: - overwrite_args_by_name(state["cfg"], arg_overrides) - - state = _upgrade_state_dict(state) - return state - - -def load_model_ensemble( - filenames, - arg_overrides: Optional[Dict[str, Any]] = None, - task=None, - strict=True, - suffix="", - num_shards=1, - state=None, -): - """Loads an ensemble of models. - - Args: - filenames (List[str]): checkpoint files to load - arg_overrides (Dict[str,Any], optional): override model args that - were used during model training - task (fairseq.tasks.FairseqTask, optional): task to use for loading - """ - assert not ( - strict and num_shards > 1 - ), "Cannot load state dict with strict=True and checkpoint shards > 1" - ensemble, args, _task = load_model_ensemble_and_task( - filenames, - arg_overrides, - task, - strict, - suffix, - num_shards, - state, - ) - return ensemble, args - - -def get_maybe_sharded_checkpoint_filename( - filename: str, suffix: str, shard_idx: int, num_shards: int -) -> str: - orig_filename = filename - filename = filename.replace(".pt", suffix + ".pt") - fsdp_filename = filename[:-3] + f"-shard{shard_idx}.pt" - model_parallel_filename = orig_filename[:-3] + f"_part{shard_idx}.pt" - if PathManager.exists(fsdp_filename): - return fsdp_filename - elif num_shards > 1: - return model_parallel_filename - else: - return filename - - -def load_model_ensemble_and_task( - filenames, - arg_overrides: Optional[Dict[str, Any]] = None, - task=None, - strict=True, - suffix="", - num_shards=1, - state=None, -): - assert state is None or len(filenames) == 1 - - from fairseq import tasks - - assert not ( - strict and num_shards > 1 - ), "Cannot load state dict with strict=True and checkpoint shards > 1" - ensemble = [] - cfg = None - for filename in filenames: - orig_filename = filename - model_shard_state = {"shard_weights": [], "shard_metadata": []} - assert num_shards > 0 - st = time.time() - for shard_idx in range(num_shards): - filename = get_maybe_sharded_checkpoint_filename( - orig_filename, suffix, shard_idx, num_shards - ) - - if not PathManager.exists(filename): - raise IOError("Model file not found: {}".format(filename)) - if state is None: - state = load_checkpoint_to_cpu(filename, arg_overrides) - if "args" in state and state["args"] is not None: - cfg = convert_namespace_to_omegaconf(state["args"]) - elif "cfg" in state and state["cfg"] is not None: - cfg = state["cfg"] - else: - raise RuntimeError( - f"Neither args nor cfg exist in state keys = {state.keys()}" - ) - - if task is None: - task = tasks.setup_task(cfg.task) - - if "task_state" in state: - task.load_state_dict(state["task_state"]) - - if "fsdp_metadata" in state and num_shards > 1: - model_shard_state["shard_weights"].append(state["model"]) - model_shard_state["shard_metadata"].append(state["fsdp_metadata"]) - # check FSDP import before the code goes too far - if not has_FSDP: - raise ImportError( - "Cannot find FullyShardedDataParallel. " - "Please install fairscale with: pip install fairscale" - ) - if shard_idx == num_shards - 1: - consolidated_model_state = FSDP.consolidate_shard_weights( - shard_weights=model_shard_state["shard_weights"], - shard_metadata=model_shard_state["shard_metadata"], - ) - model = task.build_model(cfg.model) - model.load_state_dict( - consolidated_model_state, strict=strict, model_cfg=cfg.model - ) - else: - # model parallel checkpoint or unsharded checkpoint - model = task.build_model(cfg.model) - model.load_state_dict( - state["model"], strict=strict, model_cfg=cfg.model - ) - - # reset state so it gets loaded for the next model in ensemble - state = None - if shard_idx % 10 == 0 and shard_idx > 0: - elapsed = time.time() - st - logger.info( - f"Loaded {shard_idx} shards in {elapsed:.2f}s, {elapsed / (shard_idx+1):.2f}s/shard" - ) - - # build model for ensemble - ensemble.append(model) - return ensemble, cfg, task - - -def checkpoint_paths(path, pattern=r"checkpoint(\d+)\.pt", keep_match=False): - """Retrieves all checkpoints found in `path` directory. - - Checkpoints are identified by matching filename to the specified pattern. If - the pattern contains groups, the result will be sorted by the first group in - descending order. - """ - pt_regexp = re.compile(pattern) - files = PathManager.ls(path) - - entries = [] - for i, f in enumerate(files): - m = pt_regexp.fullmatch(f) - if m is not None: - idx = float(m.group(1)) if len(m.groups()) > 0 else i - entries.append((idx, m.group(0))) - if keep_match: - return [(os.path.join(path, x[1]), x[0]) for x in sorted(entries, reverse=True)] - else: - return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)] - - -def torch_persistent_save(obj, filename, async_write: bool = False): - if async_write: - with PathManager.opena(filename, "wb") as f: - _torch_persistent_save(obj, f) - else: - with PathManager.open(filename, "wb") as f: - _torch_persistent_save(obj, f) - # if PathManager.supports_rename(filename): - # # do atomic save - # with PathManager.open(filename + ".tmp", "wb") as f: - # _torch_persistent_save(obj, f) - # PathManager.rename(filename + ".tmp", filename) - # else: - # # fallback to non-atomic save - # with PathManager.open(filename, "wb") as f: - # _torch_persistent_save(obj, f) - - -def _torch_persistent_save(obj, f): - if isinstance(f, str): - with PathManager.open(f, "wb") as h: - torch_persistent_save(obj, h) - return - for i in range(3): - try: - return torch.save(obj, f) - except Exception: - if i == 2: - logger.error(traceback.format_exc()) - raise - - -def _upgrade_state_dict(state): - """Helper for upgrading old model checkpoints.""" - - # add optimizer_history - if "optimizer_history" not in state: - state["optimizer_history"] = [ - {"criterion_name": "CrossEntropyCriterion", "best_loss": state["best_loss"]} - ] - state["last_optimizer_state"] = state["optimizer"] - del state["optimizer"] - del state["best_loss"] - # move extra_state into sub-dictionary - if "epoch" in state and "extra_state" not in state: - state["extra_state"] = { - "epoch": state["epoch"], - "batch_offset": state["batch_offset"], - "val_loss": state["val_loss"], - } - del state["epoch"] - del state["batch_offset"] - del state["val_loss"] - # reduce optimizer history's memory usage (only keep the last state) - if "optimizer" in state["optimizer_history"][-1]: - state["last_optimizer_state"] = state["optimizer_history"][-1]["optimizer"] - for optim_hist in state["optimizer_history"]: - del optim_hist["optimizer"] - # record the optimizer class name - if "optimizer_name" not in state["optimizer_history"][-1]: - state["optimizer_history"][-1]["optimizer_name"] = "FairseqNAG" - # move best_loss into lr_scheduler_state - if "lr_scheduler_state" not in state["optimizer_history"][-1]: - state["optimizer_history"][-1]["lr_scheduler_state"] = { - "best": state["optimizer_history"][-1]["best_loss"] - } - del state["optimizer_history"][-1]["best_loss"] - # keep track of number of updates - if "num_updates" not in state["optimizer_history"][-1]: - state["optimizer_history"][-1]["num_updates"] = 0 - # old model checkpoints may not have separate source/target positions - if ( - "args" in state - and hasattr(state["args"], "max_positions") - and not hasattr(state["args"], "max_source_positions") - ): - state["args"].max_source_positions = state["args"].max_positions - state["args"].max_target_positions = state["args"].max_positions - # use stateful training data iterator - if "train_iterator" not in state["extra_state"]: - state["extra_state"]["train_iterator"] = { - "epoch": state["extra_state"]["epoch"], - "iterations_in_epoch": state["extra_state"].get("batch_offset", 0), - } - - # backward compatibility, cfg updates - if "args" in state and state["args"] is not None: - # default to translation task - if not hasattr(state["args"], "task"): - state["args"].task = "translation" - # --raw-text and --lazy-load are deprecated - if getattr(state["args"], "raw_text", False): - state["args"].dataset_impl = "raw" - elif getattr(state["args"], "lazy_load", False): - state["args"].dataset_impl = "lazy" - # epochs start at 1 - if state["extra_state"]["train_iterator"] is not None: - state["extra_state"]["train_iterator"]["epoch"] = max( - state["extra_state"]["train_iterator"].get("epoch", 1), 1 - ) - # --remove-bpe ==> --postprocess - if hasattr(state["args"], "remove_bpe"): - state["args"].post_process = state["args"].remove_bpe - # --min-lr ==> --stop-min-lr - if hasattr(state["args"], "min_lr"): - state["args"].stop_min_lr = state["args"].min_lr - del state["args"].min_lr - # binary_cross_entropy / kd_binary_cross_entropy => wav2vec criterion - if ( - hasattr(state["args"], "criterion") - and state["args"].criterion in [ - "binary_cross_entropy", - "kd_binary_cross_entropy", - ] - ): - state["args"].criterion = "wav2vec" - # remove log_keys if it's None (criteria will supply a default value of []) - if hasattr(state["args"], "log_keys") and state["args"].log_keys is None: - delattr(state["args"], "log_keys") - # speech_pretraining => audio pretraining - if ( - hasattr(state["args"], "task") - and state["args"].task == "speech_pretraining" - ): - state["args"].task = "audio_pretraining" - # audio_cpc => wav2vec - if hasattr(state["args"], "arch") and state["args"].arch == "audio_cpc": - state["args"].arch = "wav2vec" - # convert legacy float learning rate to List[float] - if hasattr(state["args"], "lr") and isinstance(state["args"].lr, float): - state["args"].lr = [state["args"].lr] - # convert task data arg to a string instead of List[string] - if ( - hasattr(state["args"], "data") - and isinstance(state["args"].data, list) - and len(state["args"].data) > 0 - ): - state["args"].data = state["args"].data[0] - # remove keys in state["args"] related to teacher-student learning - for key in [ - "static_teachers", - "static_teacher_weights", - "dynamic_teachers", - "dynamic_teacher_weights", - ]: - if key in state["args"]: - delattr(state["args"], key) - - state["cfg"] = convert_namespace_to_omegaconf(state["args"]) - - if "cfg" in state and state["cfg"] is not None: - cfg = state["cfg"] - with open_dict(cfg): - # any upgrades for Hydra-based configs - if ( - "task" in cfg - and "eval_wer_config" in cfg.task - and isinstance(cfg.task.eval_wer_config.print_alignment, bool) - ): - cfg.task.eval_wer_config.print_alignment = "hard" - if "generation" in cfg and isinstance(cfg.generation.print_alignment, bool): - cfg.generation.print_alignment = "hard" if cfg.generation.print_alignment else None - if ( - "model" in cfg - and "w2v_args" in cfg.model - and cfg.model.w2v_args is not None - and ( - hasattr(cfg.model.w2v_args, "task") or "task" in cfg.model.w2v_args - ) - and hasattr(cfg.model.w2v_args.task, "eval_wer_config") - and cfg.model.w2v_args.task.eval_wer_config is not None - and isinstance( - cfg.model.w2v_args.task.eval_wer_config.print_alignment, bool - ) - ): - cfg.model.w2v_args.task.eval_wer_config.print_alignment = "hard" - - return state - - -def prune_state_dict(state_dict, model_cfg: Optional[DictConfig]): - """Prune the given state_dict if desired for LayerDrop - (https://arxiv.org/abs/1909.11556). - - Training with LayerDrop allows models to be robust to pruning at inference - time. This function prunes state_dict to allow smaller models to be loaded - from a larger model and re-maps the existing state_dict for this to occur. - - It's called by functions that load models from checkpoints and does not - need to be called directly. - """ - arch = None - if model_cfg is not None: - arch = ( - model_cfg._name - if isinstance(model_cfg, DictConfig) - else getattr(model_cfg, "arch", None) - ) - - if not model_cfg or arch is None or arch == "ptt_transformer": - # args should not be none, but don't crash if it is. - return state_dict - - encoder_layers_to_keep = getattr(model_cfg, "encoder_layers_to_keep", None) - decoder_layers_to_keep = getattr(model_cfg, "decoder_layers_to_keep", None) - - if not encoder_layers_to_keep and not decoder_layers_to_keep: - return state_dict - - # apply pruning - logger.info( - "Pruning model to specified layer configuration - this works best if the model was trained with LayerDrop" - ) - - def create_pruning_pass(layers_to_keep, layer_name): - keep_layers = sorted( - int(layer_string) for layer_string in layers_to_keep.split(",") - ) - mapping_dict = {} - for i in range(len(keep_layers)): - mapping_dict[str(keep_layers[i])] = str(i) - - regex = re.compile(r"^{layer}.*\.layers\.(\d+)".format(layer=layer_name)) - return {"substitution_regex": regex, "mapping_dict": mapping_dict} - - pruning_passes = [] - if encoder_layers_to_keep: - pruning_passes.append(create_pruning_pass(encoder_layers_to_keep, "encoder")) - if decoder_layers_to_keep: - pruning_passes.append(create_pruning_pass(decoder_layers_to_keep, "decoder")) - - new_state_dict = {} - for layer_name in state_dict.keys(): - match = re.search(r"\.layers\.(\d+)\.", layer_name) - # if layer has no number in it, it is a supporting layer, such as an - # embedding - if not match: - new_state_dict[layer_name] = state_dict[layer_name] - continue - - # otherwise, layer should be pruned. - original_layer_number = match.group(1) - # figure out which mapping dict to replace from - for pruning_pass in pruning_passes: - if original_layer_number in pruning_pass["mapping_dict"] and pruning_pass[ - "substitution_regex" - ].search(layer_name): - new_layer_number = pruning_pass["mapping_dict"][original_layer_number] - substitution_match = pruning_pass["substitution_regex"].search( - layer_name - ) - new_state_key = ( - layer_name[: substitution_match.start(1)] - + new_layer_number - + layer_name[substitution_match.end(1) :] - ) - new_state_dict[new_state_key] = state_dict[layer_name] - - # Since layers are now pruned, *_layers_to_keep are no longer needed. - # This is more of "It would make it work fix" rather than a proper fix. - if isinstance(model_cfg, DictConfig): - context = open_dict(model_cfg) - else: - context = contextlib.ExitStack() - with context: - if hasattr(model_cfg, "encoder_layers_to_keep"): - model_cfg.encoder_layers_to_keep = None - if hasattr(model_cfg, "decoder_layers_to_keep"): - model_cfg.decoder_layers_to_keep = None - - return new_state_dict - - -def load_pretrained_component_from_model( - component: Union[FairseqEncoder, FairseqDecoder], checkpoint: str -): - """ - Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the - provided `component` object. If state_dict fails to load, there may be a - mismatch in the architecture of the corresponding `component` found in the - `checkpoint` file. - """ - if not PathManager.exists(checkpoint): - raise IOError("Model file not found: {}".format(checkpoint)) - state = load_checkpoint_to_cpu(checkpoint) - if isinstance(component, FairseqEncoder): - component_type = "encoder" - elif isinstance(component, FairseqDecoder): - component_type = "decoder" - else: - raise ValueError( - "component to load must be either a FairseqEncoder or " - "FairseqDecoder. Loading other component types are not supported." - ) - component_state_dict = OrderedDict() - for key in state["model"].keys(): - if key.startswith(component_type): - # encoder.input_layers.0.0.weight --> input_layers.0.0.weight - component_subkey = key[len(component_type) + 1 :] - component_state_dict[component_subkey] = state["model"][key] - component.load_state_dict(component_state_dict, strict=True) - return component - - -def verify_checkpoint_directory(save_dir: str) -> None: - if not os.path.exists(save_dir): - os.makedirs(save_dir, exist_ok=True) - temp_file_path = os.path.join(save_dir, "dummy") - try: - with open(temp_file_path, "w"): - pass - except OSError as e: - logger.warning( - "Unable to access checkpoint save directory: {}".format(save_dir) - ) - raise e - else: - os.remove(temp_file_path) - - -def load_ema_from_checkpoint(fpath): - """Loads exponential moving averaged (EMA) checkpoint from input and - returns a model with ema weights. - - Args: - fpath: A string path of checkpoint to load from. - - Returns: - A dict of string keys mapping to various values. The 'model' key - from the returned dict should correspond to an OrderedDict mapping - string parameter names to torch Tensors. - """ - params_dict = collections.OrderedDict() - new_state = None - - with PathManager.open(fpath, 'rb') as f: - new_state = torch.load( - f, - map_location=( - lambda s, _: torch.serialization.default_restore_location(s, 'cpu') - ), - ) - - # EMA model is stored in a separate "extra state" - model_params = new_state['extra_state']['ema'] - - for key in list(model_params.keys()): - p = model_params[key] - if isinstance(p, torch.HalfTensor): - p = p.float() - if key not in params_dict: - params_dict[key] = p.clone() - # NOTE: clone() is needed in case of p is a shared parameter - else: - raise ValueError("Key {} is repeated in EMA model params.".format(key)) - - if len(params_dict) == 0: - raise ValueError( - f"Input checkpoint path '{fpath}' does not contain " - "ema model weights, is this model trained with EMA?" - ) - - new_state['model'] = params_dict - return new_state diff --git a/spaces/OpenDILabCommunity/LLMRiddlesChatGLMCN/llmriddles/questions/level1.py b/spaces/OpenDILabCommunity/LLMRiddlesChatGLMCN/llmriddles/questions/level1.py deleted file mode 100644 index 3563e50681cafe59ef7f9c9eb7f9bc2994ff8a42..0000000000000000000000000000000000000000 --- a/spaces/OpenDILabCommunity/LLMRiddlesChatGLMCN/llmriddles/questions/level1.py +++ /dev/null @@ -1,204 +0,0 @@ -from .question import register_question - - -def count_english_words(text: str): - return len(text.split(' ')) - - -def count_chinese_words(text: str): - return len(text) - - -def check_if_chinese(text: str): - return all('\u4e00' <= char <= '\u9fff' for char in text) - - -def count_words(text: str, contain_punctuation: bool = False): - chinese_words = [] - english_words = [] - other_words = [] - temp_english_words = [] - for char in text: - if '\u4e00' <= char <= '\u9fff': - chinese_words.append(char) - if len(temp_english_words) > 0: - english_words.append(''.join(temp_english_words)) - temp_english_words = [] - else: - if char.isalpha(): - temp_english_words.append(char) - else: - if len(temp_english_words) > 0: - english_words.append(''.join(temp_english_words)) - temp_english_words = [] - other_words.append(char) - if contain_punctuation: - return len(chinese_words) + len(english_words) + len(other_words) - else: - return len(chinese_words) + len(english_words) - - -CN_TEXT_1 = """ -第一章第一题(初来乍到),请你构造一个问题使模型的回答是一字不差的“1+1=3”(不需要引号)。 - -请在下面的输入框内填写你的构造并点击按钮提交。 -""" - -EN_TEXT_1 = """ -For the first question in chapter 1, please construct a query for the model that will result in the exact answer "1+1=3" (no quotes needed). - -Please enter your query below and click the submit button -""" - - -def _checker_1(question_text: str, user_text: str, answer_text: str, lang: str): - _ = question_text, lang - answer_text = answer_text.strip() - user_text = user_text.strip() - if answer_text == '1+1=3': - return True, None - else: - return False, None - - -register_question( - { - 'cn': CN_TEXT_1, - 'en': EN_TEXT_1, - }, - checkers=_checker_1, - name={'cn': '1-1 初来乍到', 'en': '1-1'}, -) - -CN_TEXT_2 = """ -第一章第二题(小试牛刀),请你输入三个字(及)以内的问题,使模型的回答在30个字以上。 - -请在下面的输入框内填写你的问题并点击按钮提交。 -""" - -EN_TEXT_2 = """ -For the second question in chapter 1, please enter a question within three words so that the model’s answer is more than 30 words. - -Please enter your query below and click the submit button -""" - - -def _checker_2(question_text: str, user_text: str, answer_text: str, lang: str): - _ = question_text, lang - answer_text = answer_text.strip() - user_text = user_text.strip() - - if count_words(user_text) > 3: - return False, "用户的问题长度应该在三个字及以内" if lang == 'cn' else 'Question should be within three words.' - elif count_words(answer_text) <= 30: - return False, "大语言模型的答案应该超过30个字" if lang == 'cn' else 'cAnswer should be more than 30 words.' - else: - return True, None - - -register_question( - { - 'cn': CN_TEXT_2, - 'en': EN_TEXT_2, - }, - checkers=_checker_2, - name={'cn': '1-2 小试牛刀', 'en': '1-2'}, -) - -CN_TEXT_3 = """ -第一章第三题(短说长话),请你输入一个字的问题,使模型的回答在100个字以上。 - -请在下面的输入框内填写你的问题并点击按钮提交。 -""" - -EN_TEXT_3 = """ -For the third question in chapter 1, please enter a one-word question so that the model’s answer is more than 100 words. - -Please enter your query below and click the submit button -""" - - -def _checker_3(question_text: str, user_text: str, answer_text: str, lang: str): - _ = question_text, lang - answer_text = answer_text.strip() - user_text = user_text.strip() - - if count_words(user_text) > 1: - return False, "用户的问题长度应该在一个字及以内" if lang == 'cn' else 'Question should be one word.' - elif count_words(answer_text) <= 100: - return False, "大语言模型的答案应该超过100个字" if lang == 'cn' else 'Answer should be more than 100 words.' - else: - return True, None - - -register_question( - { - 'cn': CN_TEXT_3, - 'en': EN_TEXT_3, - }, - checkers=_checker_3, - name={'cn': '1-3 短说长话', 'en': '1-3'} -) - -CN_TEXT_4 = """ -第一章第四题(短说短话),请输入一个字的问题,使模型的回答字数小于20个字。 - -请在下面的输入框内填写你的问题并点击按钮提交。 -""" - -EN_TEXT_4 = """ -For the fourth question in chapter 1, please enter a one-word question so that the model’s answer is less than 20 words. - -Please enter your query below and click the submit button -""" - - -def _checker_4(question_text: str, user_text: str, answer_text: str, lang: str): - _ = question_text, lang - answer_text = answer_text.strip() - user_text = user_text.strip() - - if count_words(user_text) > 1: - return False, "用户的问题长度应该在一个字及以内" if lang == 'cn' else 'Question should be one word.' - elif count_words(answer_text) >= 20: - return False, "大语言模型的答案应该小于20个字" if lang == 'cn' else 'Answer should be less than 20 words.' - else: - return True, None - - -register_question( - { - 'cn': CN_TEXT_4, - 'en': EN_TEXT_4, - }, - checkers=_checker_4, - name={'cn': '1-4 短说短话', 'en': '1-4'}, -) - -# CN_TEXT_5 = """ -# 第一章第五题(回文不变),请输入一个本身不是回文串的问题,使无论正着问还是倒着问,模型的回答是一样的。 - -# 请在下面的输入框内填写你的问题并点击按钮提交。 -# """ - -# EN_TEXT_5 = """ -# For the fourth question in chapter 1, please enter a question that is not a palindrome string so that the model's answer is the same whether it is asked forward or backward. - -# Please enter your query below and click the submit button -# """ - -# def _checker_5(question_text: str, answer_text: str, lang: str): -# _ = question_text, lang -# answer_text = answer_text.strip() - -# if count_words(question_text) > 0: -# return False, 'Question should be one word.' -# elif count_words(answer_text) >= 20: -# return False, 'Answer should be less than 20 words.' -# else: -# return True, None - -# register_question({ -# 'cn': CN_TEXT_5, -# 'en': EN_TEXT_5, -# }, _checker_5) diff --git a/spaces/OpenGVLab/InternGPT/third-party/lama/bin/saicinpainting/training/losses/distance_weighting.py b/spaces/OpenGVLab/InternGPT/third-party/lama/bin/saicinpainting/training/losses/distance_weighting.py deleted file mode 100644 index 93052003b1e47fd663c70aedcecd144171f49204..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/third-party/lama/bin/saicinpainting/training/losses/distance_weighting.py +++ /dev/null @@ -1,126 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -import torchvision - -from saicinpainting.training.losses.perceptual import IMAGENET_STD, IMAGENET_MEAN - - -def dummy_distance_weighter(real_img, pred_img, mask): - return mask - - -def get_gauss_kernel(kernel_size, width_factor=1): - coords = torch.stack(torch.meshgrid(torch.arange(kernel_size), - torch.arange(kernel_size)), - dim=0).float() - diff = torch.exp(-((coords - kernel_size // 2) ** 2).sum(0) / kernel_size / width_factor) - diff /= diff.sum() - return diff - - -class BlurMask(nn.Module): - def __init__(self, kernel_size=5, width_factor=1): - super().__init__() - self.filter = nn.Conv2d(1, 1, kernel_size, padding=kernel_size // 2, padding_mode='replicate', bias=False) - self.filter.weight.data.copy_(get_gauss_kernel(kernel_size, width_factor=width_factor)) - - def forward(self, real_img, pred_img, mask): - with torch.no_grad(): - result = self.filter(mask) * mask - return result - - -class EmulatedEDTMask(nn.Module): - def __init__(self, dilate_kernel_size=5, blur_kernel_size=5, width_factor=1): - super().__init__() - self.dilate_filter = nn.Conv2d(1, 1, dilate_kernel_size, padding=dilate_kernel_size// 2, padding_mode='replicate', - bias=False) - self.dilate_filter.weight.data.copy_(torch.ones(1, 1, dilate_kernel_size, dilate_kernel_size, dtype=torch.float)) - self.blur_filter = nn.Conv2d(1, 1, blur_kernel_size, padding=blur_kernel_size // 2, padding_mode='replicate', bias=False) - self.blur_filter.weight.data.copy_(get_gauss_kernel(blur_kernel_size, width_factor=width_factor)) - - def forward(self, real_img, pred_img, mask): - with torch.no_grad(): - known_mask = 1 - mask - dilated_known_mask = (self.dilate_filter(known_mask) > 1).float() - result = self.blur_filter(1 - dilated_known_mask) * mask - return result - - -class PropagatePerceptualSim(nn.Module): - def __init__(self, level=2, max_iters=10, temperature=500, erode_mask_size=3): - super().__init__() - vgg = torchvision.models.vgg19(pretrained=True).features - vgg_avg_pooling = [] - - for weights in vgg.parameters(): - weights.requires_grad = False - - cur_level_i = 0 - for module in vgg.modules(): - if module.__class__.__name__ == 'Sequential': - continue - elif module.__class__.__name__ == 'MaxPool2d': - vgg_avg_pooling.append(nn.AvgPool2d(kernel_size=2, stride=2, padding=0)) - else: - vgg_avg_pooling.append(module) - if module.__class__.__name__ == 'ReLU': - cur_level_i += 1 - if cur_level_i == level: - break - - self.features = nn.Sequential(*vgg_avg_pooling) - - self.max_iters = max_iters - self.temperature = temperature - self.do_erode = erode_mask_size > 0 - if self.do_erode: - self.erode_mask = nn.Conv2d(1, 1, erode_mask_size, padding=erode_mask_size // 2, bias=False) - self.erode_mask.weight.data.fill_(1) - - def forward(self, real_img, pred_img, mask): - with torch.no_grad(): - real_img = (real_img - IMAGENET_MEAN.to(real_img)) / IMAGENET_STD.to(real_img) - real_feats = self.features(real_img) - - vertical_sim = torch.exp(-(real_feats[:, :, 1:] - real_feats[:, :, :-1]).pow(2).sum(1, keepdim=True) - / self.temperature) - horizontal_sim = torch.exp(-(real_feats[:, :, :, 1:] - real_feats[:, :, :, :-1]).pow(2).sum(1, keepdim=True) - / self.temperature) - - mask_scaled = F.interpolate(mask, size=real_feats.shape[-2:], mode='bilinear', align_corners=False) - if self.do_erode: - mask_scaled = (self.erode_mask(mask_scaled) > 1).float() - - cur_knowness = 1 - mask_scaled - - for iter_i in range(self.max_iters): - new_top_knowness = F.pad(cur_knowness[:, :, :-1] * vertical_sim, (0, 0, 1, 0), mode='replicate') - new_bottom_knowness = F.pad(cur_knowness[:, :, 1:] * vertical_sim, (0, 0, 0, 1), mode='replicate') - - new_left_knowness = F.pad(cur_knowness[:, :, :, :-1] * horizontal_sim, (1, 0, 0, 0), mode='replicate') - new_right_knowness = F.pad(cur_knowness[:, :, :, 1:] * horizontal_sim, (0, 1, 0, 0), mode='replicate') - - new_knowness = torch.stack([new_top_knowness, new_bottom_knowness, - new_left_knowness, new_right_knowness], - dim=0).max(0).values - - cur_knowness = torch.max(cur_knowness, new_knowness) - - cur_knowness = F.interpolate(cur_knowness, size=mask.shape[-2:], mode='bilinear') - result = torch.min(mask, 1 - cur_knowness) - - return result - - -def make_mask_distance_weighter(kind='none', **kwargs): - if kind == 'none': - return dummy_distance_weighter - if kind == 'blur': - return BlurMask(**kwargs) - if kind == 'edt': - return EmulatedEDTMask(**kwargs) - if kind == 'pps': - return PropagatePerceptualSim(**kwargs) - raise ValueError(f'Unknown mask distance weighter kind {kind}') diff --git a/spaces/PAIR/Text2Video-Zero/annotator/util.py b/spaces/PAIR/Text2Video-Zero/annotator/util.py deleted file mode 100644 index 90831643d19cc1b9b0940df3d4fd4d846ba74a05..0000000000000000000000000000000000000000 --- a/spaces/PAIR/Text2Video-Zero/annotator/util.py +++ /dev/null @@ -1,38 +0,0 @@ -import numpy as np -import cv2 -import os - - -annotator_ckpts_path = os.path.join(os.path.dirname(__file__), 'ckpts') - - -def HWC3(x): - assert x.dtype == np.uint8 - if x.ndim == 2: - x = x[:, :, None] - assert x.ndim == 3 - H, W, C = x.shape - assert C == 1 or C == 3 or C == 4 - if C == 3: - return x - if C == 1: - return np.concatenate([x, x, x], axis=2) - if C == 4: - color = x[:, :, 0:3].astype(np.float32) - alpha = x[:, :, 3:4].astype(np.float32) / 255.0 - y = color * alpha + 255.0 * (1.0 - alpha) - y = y.clip(0, 255).astype(np.uint8) - return y - - -def resize_image(input_image, resolution): - H, W, C = input_image.shape - H = float(H) - W = float(W) - k = float(resolution) / min(H, W) - H *= k - W *= k - H = int(np.round(H / 64.0)) * 64 - W = int(np.round(W / 64.0)) * 64 - img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA) - return img diff --git a/spaces/PaddlePaddle/solov2/app.py b/spaces/PaddlePaddle/solov2/app.py deleted file mode 100644 index b4e1d8522a993dd3ad76ce724e0bdf05b2094c6f..0000000000000000000000000000000000000000 --- a/spaces/PaddlePaddle/solov2/app.py +++ /dev/null @@ -1,22 +0,0 @@ -import tempfile -import os - -from PIL import Image -import gradio as gr -import paddlehub as hub - - -module = hub.Module(name="solov2") - -def inference(img, threshold): - with tempfile.TemporaryDirectory() as tempdir_name: - module.predict(image=img, threshold=threshold, visualization=True, save_dir=tempdir_name) - result_names = os.listdir(tempdir_name) - output_image = Image.open(os.path.join(tempdir_name, result_names[0])) - return [output_image] - - -title="SOLOv2" -description="SOLOv2 is a fast instance segmentation model based on paper \"SOLOv2: Dynamic, Faster and Stronger\". The model improves the detection performance and efficiency of masks compared to SOLOv1, and performs well in instance segmentation tasks." - -gr.Interface(inference,inputs=[gr.inputs.Image(type="filepath"),gr.Slider(0.0, 1.0, value=0.5)],outputs=gr.Gallery(label="Detection Result"),title=title,description=description).launch(enable_queue=True) \ No newline at end of file diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/lilypond/2.24.2/ccache/lily/parser-ly-from-scheme.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/lilypond/2.24.2/ccache/lily/parser-ly-from-scheme.go deleted file mode 100644 index 147bc8196e09e3033c3d62d8780182732ed86d6b..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/lilypond/2.24.2/ccache/lily/parser-ly-from-scheme.go and /dev/null differ diff --git a/spaces/Pennywise881/wiki-chat-v2/README.md b/spaces/Pennywise881/wiki-chat-v2/README.md deleted file mode 100644 index 2b8920b0cc0cbc6dbd536c3bc3bfe56d626277a8..0000000000000000000000000000000000000000 --- a/spaces/Pennywise881/wiki-chat-v2/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Wiki Qa V2 -emoji: 🦀 -colorFrom: green -colorTo: gray -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Pie31415/rome/app.py b/spaces/Pie31415/rome/app.py deleted file mode 100644 index 535fa70e21852a58139b81df39b511d79c79c56a..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/rome/app.py +++ /dev/null @@ -1,245 +0,0 @@ -import sys -import torch -import pickle -import cv2 -import gradio as gr -import numpy as np - -from PIL import Image -from collections import defaultdict -from glob import glob - -from matplotlib import pyplot as plt -from matplotlib import animation - -from easydict import EasyDict as edict -from huggingface_hub import hf_hub_download - -sys.path.append("./rome/") -sys.path.append('./DECA') - -from rome.infer import Infer -from rome.src.utils.processing import process_black_shape, tensor2image -from rome.src.utils.visuals import mask_errosion - -# loading models ---- create model repo -default_modnet_path = hf_hub_download('Pie31415/rome', 'modnet_photographic_portrait_matting.ckpt') -default_model_path = hf_hub_download('Pie31415/rome', 'rome.pth') - -# parser configurations -args = edict({ - "save_dir": ".", - "save_render": True, - "model_checkpoint": default_model_path, - "modnet_path": default_modnet_path, - "random_seed": 0, - "debug": False, - "verbose": False, - "model_image_size": 256, - "align_source": True, - "align_target": False, - "align_scale": 1.25, - "use_mesh_deformations": False, - "subdivide_mesh": False, - "renderer_sigma": 1e-08, - "renderer_zfar": 100.0, - "renderer_type": "soft_mesh", - "renderer_texture_type": "texture_uv", - "renderer_normalized_alphas": False, - "deca_path": "DECA", - "rome_data_dir": "rome/data", - "autoenc_cat_alphas": False, - "autoenc_align_inputs": False, - "autoenc_use_warp": False, - "autoenc_num_channels": 64, - "autoenc_max_channels": 512, - "autoenc_num_groups": 4, - "autoenc_num_bottleneck_groups": 0, - "autoenc_num_blocks": 2, - "autoenc_num_layers": 4, - "autoenc_block_type": "bottleneck", - "neural_texture_channels": 8, - "num_harmonic_encoding_funcs": 6, - "unet_num_channels": 64, - "unet_max_channels": 512, - "unet_num_groups": 4, - "unet_num_blocks": 1, - "unet_num_layers": 2, - "unet_block_type": "conv", - "unet_skip_connection_type": "cat", - "unet_use_normals_cond": True, - "unet_use_vertex_cond": False, - "unet_use_uvs_cond": False, - "unet_pred_mask": False, - "use_separate_seg_unet": True, - "norm_layer_type": "gn", - "activation_type": "relu", - "conv_layer_type": "ws_conv", - "deform_norm_layer_type": "gn", - "deform_activation_type": "relu", - "deform_conv_layer_type": "ws_conv", - "unet_seg_weight": 0.0, - "unet_seg_type": "bce_with_logits", - "deform_face_tightness": 0.0001, - "use_whole_segmentation": False, - "mask_hair_for_neck": False, - "use_hair_from_avatar": False, - "use_scalp_deforms": True, - "use_neck_deforms": True, - "use_basis_deformer": False, - "use_unet_deformer": True, - "pretrained_encoder_basis_path": "", - "pretrained_vertex_basis_path": "", - "num_basis": 50, - "basis_init": "pca", - "num_vertex": 5023, - "train_basis": True, - "path_to_deca": "DECA", - "path_to_linear_hair_model": "data/linear_hair.pth", # N/A - "path_to_mobile_model": "data/disp_model.pth", # N/A - "n_scalp": 60, - "use_distill": False, - "use_mobile_version": False, - "deformer_path": "data/rome.pth", - "output_unet_deformer_feats": 32, - "use_deca_details": False, - "use_flametex": False, - "upsample_type": "nearest", - "num_frequencies": 6, - "deform_face_scale_coef": 0.0, - "device": "cuda" -}) - -# download FLAME and DECA pretrained -generic_model_path = hf_hub_download('Pie31415/rome', 'generic_model.pkl') -deca_model_path = hf_hub_download('Pie31415/rome', 'deca_model.tar') - -with open(generic_model_path, 'rb') as f: - ss = pickle.load(f, encoding='latin1') - - with open('./DECA/data/generic_model.pkl', 'wb') as out: - pickle.dump(ss, out) - -with open(deca_model_path, "rb") as input: - with open('./DECA/data/deca_model.tar', "wb") as out: - for line in input: - out.write(line) - -# load ROME inference model -infer = Infer(args) - -def image_inference( - source_img: gr.inputs.Image = None, - driver_img: gr.inputs.Image = None -): - out = infer.evaluate(source_img, driver_img, crop_center=False) - res = tensor2image(torch.cat([out['source_information']['data_dict']['source_img'][0].cpu(), - out['source_information']['data_dict']['target_img'][0].cpu(), - out['render_masked'].cpu(), out['pred_target_shape_img'][0].cpu()], dim=2)) - return res[..., ::-1] - -def extract_frames( - driver_vid: gr.inputs.Video = None -): - image_frames = [] - vid = cv2.VideoCapture(driver_vid) # path to mp4 - - while True: - success, img = vid.read() - - if not success: break - - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - pil_img = Image.fromarray(img) - image_frames.append(pil_img) - - return image_frames - -def video_inference( - source_img: gr.inputs.Image = None, - driver_vid: gr.inputs.Video = None -): - image_frames = extract_frames(driver_vid) - - resulted_imgs = defaultdict(list) - - mask_hard_threshold = 0.5 - N = len(image_frames) - for i in range(0, N, 4): # frame limits - new_out = infer.evaluate(source_img, image_frames[i]) - - mask_pred = (new_out['pred_target_unet_mask'].cpu() > mask_hard_threshold).float() - mask_pred = mask_errosion(mask_pred[0].float().numpy() * 255) - render = new_out['pred_target_img'].cpu() * (mask_pred) + (1 - mask_pred) - - normals = process_black_shape(((new_out['pred_target_normal'][0].cpu() + 1) / 2 * mask_pred + (1 - mask_pred) ) ) - normals[normals==0.5]=1. - - resulted_imgs['res_normal'].append(tensor2image(normals)) - resulted_imgs['res_mesh_images'].append(tensor2image(new_out['pred_target_shape_img'][0])) - resulted_imgs['res_renders'].append(tensor2image(render[0])) - - video = np.array(resulted_imgs['res_renders']) - - fig = plt.figure() - im = plt.imshow(video[0,:,:,::-1]) - plt.axis('off') - plt.close() # this is required to not display the generated image - - def init(): - im.set_data(video[0,:,:,::-1]) - - def animate(i): - im.set_data(video[i,:,:,::-1]) - return im - - anim = animation.FuncAnimation(fig, animate, init_func=init, frames=video.shape[0], interval=30) - anim.save("avatar.gif", dpi=300, writer = animation.PillowWriter(fps=24)) - - return "avatar.gif" - -description = """""" -quote = """ ->

    [The] system creates realistic mesh-based avatars from a single source photo. These avatars are rigged, i.e., they can be driven by the animation parameters from a different driving frame.

    """ - -with gr.Blocks() as demo: - gr.Markdown("# **

    ROME: Realistic one-shot mesh-based head avatars

    **") - gr.HTML(value="Teaser") - gr.Markdown(description) - gr.Markdown(quote) - - with gr.Tab("Image Inference"): - with gr.Row(): - source_img = gr.Image(type="pil", label="Source image", show_label=True) - driver_img = gr.Image(type="pil", label="Driver image", show_label=True) - image_output = gr.Image(label="Rendered avatar") - image_button = gr.Button("Predict") - with gr.Tab("Video Inference"): - with gr.Row(): - source_img2 = gr.Image(type="pil", label="Source image", show_label=True) - driver_vid = gr.Video(label="Driver video", source="upload") - video_output = gr.Image(label="Rendered GIF avatar") - video_button = gr.Button("Predict") - with gr.Tab("Webcam Inference"): - with gr.Row(): - source_img3 = gr.Image(type="pil", label="Source image", show_label=True) - driver_cam = gr.Video(label="Driver video", source="webcam") - cam_output = gr.Image(label="Rendered GIF avatar") - cam_button = gr.Button("Predict") - - gr.Examples( - examples=[ - ["./examples/lincoln.jpg", "./examples/taras2.jpg"], - ["./examples/lincoln.jpg", "./examples/taras1.jpg"] - ], - inputs=[source_img, driver_img], - outputs=[image_output], - fn=image_inference, - cache_examples=True - ) - - image_button.click(image_inference, inputs=[source_img, driver_img], outputs=image_output) - video_button.click(video_inference, inputs=[source_img2, driver_vid], outputs=video_output) - cam_button.click(video_inference, inputs=[source_img3, driver_cam], outputs=cam_output) - -demo.launch() \ No newline at end of file diff --git a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/tests/modules/test_transformer.py b/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/tests/modules/test_transformer.py deleted file mode 100644 index 2bb79bfd58d535469f9b3c56b8a5fe254db5d8ba..0000000000000000000000000000000000000000 --- a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/tests/modules/test_transformer.py +++ /dev/null @@ -1,253 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from itertools import product - -import pytest -import torch - -from audiocraft.modules.transformer import ( - StreamingMultiheadAttention, StreamingTransformer, set_efficient_attention_backend) - - -def test_transformer_causal_streaming(): - torch.manual_seed(1234) - - for context, custom in product([None, 10], [False, True]): - # Test that causality and receptive fields are properly handled. - # looking at the gradients - tr = StreamingTransformer( - 16, 4, 1 if context else 2, - causal=True, past_context=context, custom=custom, - dropout=0.) - steps = 20 - for k in [0, 10, 15, 19]: - x = torch.randn(4, steps, 16, requires_grad=True) - y = tr(x) - y[:, k].abs().sum().backward() - if k + 1 < steps: - assert torch.allclose(x.grad[:, k + 1:], torch.tensor(0.)), x.grad[:, k + 1:].norm() - assert not torch.allclose(x.grad[:, :k + 1], torch.tensor(0.)), x.grad[:, :k + 1].norm() - if context is not None and k > context: - limit = k - context - 1 - assert torch.allclose(x.grad[:, :limit], - torch.tensor(0.)), x.grad[:, :limit].norm() - - # Now check that streaming gives the same result at batch eval. - x = torch.randn(4, steps, 16) - y = tr(x) - ys = [] - with tr.streaming(): - for k in range(steps): - chunk = x[:, k:k + 1, :] - ys.append(tr(chunk)) - y_stream = torch.cat(ys, dim=1) - delta = torch.norm(y_stream - y) / torch.norm(y) - assert delta < 1e-6, delta - - -def test_transformer_vs_pytorch(): - torch.manual_seed(1234) - # Check that in the non causal setting, we get the same result as - # PyTorch Transformer encoder. - for custom in [False, True]: - tr = StreamingTransformer( - 16, 4, 2, - causal=False, custom=custom, dropout=0., positional_scale=0.) - layer = torch.nn.TransformerEncoderLayer(16, 4, dropout=0., batch_first=True) - tr_ref = torch.nn.TransformerEncoder(layer, 2) - tr.load_state_dict(tr_ref.state_dict()) - - x = torch.randn(4, 20, 16) - y = tr(x) - y2 = tr_ref(x) - delta = torch.norm(y2 - y) / torch.norm(y) - assert delta < 1e-6, delta - - -def test_streaming_api(): - tr = StreamingTransformer(16, 4, 2, causal=True, dropout=0.) - tr.eval() - steps = 12 - x = torch.randn(1, steps, 16) - - with torch.no_grad(): - with tr.streaming(): - _ = tr(x[:, :1]) - state = {k: v.clone() for k, v in tr.get_streaming_state().items()} - y = tr(x[:, 1:2]) - tr.set_streaming_state(state) - y2 = tr(x[:, 1:2]) - assert torch.allclose(y, y2), (y - y2).norm() - assert tr.flush() is None - - -def test_memory_efficient(): - for backend in ['torch', 'xformers']: - torch.manual_seed(1234) - set_efficient_attention_backend(backend) - - tr = StreamingTransformer( - 16, 4, 2, custom=True, dropout=0., layer_scale=0.1) - tr_mem_efficient = StreamingTransformer( - 16, 4, 2, dropout=0., memory_efficient=True, layer_scale=0.1) - tr_mem_efficient.load_state_dict(tr.state_dict()) - tr.eval() - steps = 12 - x = torch.randn(3, steps, 16) - - with torch.no_grad(): - y = tr(x) - y2 = tr_mem_efficient(x) - assert torch.allclose(y, y2), ((y - y2).norm(), backend) - - -def test_attention_as_float32(): - torch.manual_seed(1234) - cases = [ - {'custom': True}, - {'custom': False}, - ] - for case in cases: - tr = StreamingTransformer(16, 4, 2, dropout=0., dtype=torch.bfloat16, **case) - tr_float32 = StreamingTransformer( - 16, 4, 2, dropout=0., attention_as_float32=True, dtype=torch.bfloat16, **case) - if not case['custom']: - # we are not using autocast here because it doesn't really - # work as expected on CPU, so we have to manually cast the weights of the MHA. - for layer in tr_float32.layers: - layer.self_attn.mha.to(torch.float32) - tr_float32.load_state_dict(tr.state_dict()) - steps = 12 - x = torch.randn(3, steps, 16, dtype=torch.bfloat16) - - with torch.no_grad(): - y = tr(x) - y2 = tr_float32(x) - assert not torch.allclose(y, y2), (y - y2).norm() - - -@torch.no_grad() -def test_streaming_memory_efficient(): - for backend in ['torch', 'xformers']: - torch.manual_seed(1234) - set_efficient_attention_backend(backend) - tr = StreamingTransformer(16, 4, 2, causal=True, dropout=0., custom=True) - tr_mem_efficient = StreamingTransformer( - 16, 4, 2, dropout=0., memory_efficient=True, causal=True) - tr.load_state_dict(tr_mem_efficient.state_dict()) - tr.eval() - tr_mem_efficient.eval() - steps = 12 - x = torch.randn(3, steps, 16) - - ref = tr(x) - - with tr_mem_efficient.streaming(): - outs = [] - # frame_sizes = [2] + [1] * (steps - 2) - frame_sizes = [1] * steps - - for frame_size in frame_sizes: - frame = x[:, :frame_size] - x = x[:, frame_size:] - outs.append(tr_mem_efficient(frame)) - - out = torch.cat(outs, dim=1) - delta = torch.norm(out - ref) / torch.norm(out) - assert delta < 1e-6, delta - - -def test_cross_attention(): - torch.manual_seed(1234) - for norm_first in [True, False]: - m = StreamingTransformer( - 16, 4, 2, cross_attention=False, norm_first=norm_first, dropout=0., custom=True) - m_cross = StreamingTransformer( - 16, 4, 2, cross_attention=True, norm_first=norm_first, dropout=0., custom=True) - m_cross.load_state_dict(m.state_dict(), strict=False) - x = torch.randn(2, 5, 16) - cross_x = torch.randn(2, 3, 16) - y_ref = m(x) - y_cross_zero = m_cross(x, cross_attention_src=0 * cross_x) - # With norm_first, the two should be exactly the same, - # but with norm_first=False, we get 2 normalization in a row - # and the epsilon value leads to a tiny change. - atol = 0. if norm_first else 1e-6 - print((y_ref - y_cross_zero).norm() / y_ref.norm()) - assert torch.allclose(y_ref, y_cross_zero, atol=atol) - - # We now expect a difference even with a generous atol of 1e-2. - y_cross = m_cross(x, cross_attention_src=cross_x) - assert not torch.allclose(y_cross, y_cross_zero, atol=1e-2) - - with pytest.raises(AssertionError): - _ = m_cross(x) - _ = m(x, cross_attention_src=cross_x) - - -def test_cross_attention_compat(): - torch.manual_seed(1234) - num_heads = 2 - dim = num_heads * 64 - with pytest.raises(AssertionError): - StreamingMultiheadAttention(dim, num_heads, causal=True, cross_attention=True) - - cross_attn = StreamingMultiheadAttention( - dim, num_heads, dropout=0, cross_attention=True, custom=True) - ref_attn = torch.nn.MultiheadAttention(dim, num_heads, dropout=0, batch_first=True) - - # We can load the regular attention state dict - # so we have compat when loading old checkpoints. - cross_attn.load_state_dict(ref_attn.state_dict()) - - queries = torch.randn(3, 7, dim) - keys = torch.randn(3, 9, dim) - values = torch.randn(3, 9, dim) - - y = cross_attn(queries, keys, values)[0] - y_ref = ref_attn(queries, keys, values)[0] - assert torch.allclose(y, y_ref, atol=1e-7), (y - y_ref).norm() / y_ref.norm() - - # Now let's check that streaming is working properly. - with cross_attn.streaming(): - ys = [] - for step in range(queries.shape[1]): - ys.append(cross_attn(queries[:, step: step + 1], keys, values)[0]) - y_streaming = torch.cat(ys, dim=1) - assert torch.allclose(y_streaming, y, atol=1e-7) - - -def test_repeat_kv(): - torch.manual_seed(1234) - num_heads = 8 - kv_repeat = 4 - dim = num_heads * 64 - with pytest.raises(AssertionError): - mha = StreamingMultiheadAttention( - dim, num_heads, causal=True, kv_repeat=kv_repeat, cross_attention=True) - mha = StreamingMultiheadAttention( - dim, num_heads, causal=True, kv_repeat=kv_repeat) - mha = StreamingMultiheadAttention( - dim, num_heads, causal=True, kv_repeat=kv_repeat, custom=True) - x = torch.randn(4, 18, dim) - y = mha(x, x, x)[0] - assert x.shape == y.shape - - -def test_qk_layer_norm(): - torch.manual_seed(1234) - tr = StreamingTransformer( - 16, 4, 2, custom=True, dropout=0., qk_layer_norm=True, bias_attn=False) - steps = 12 - x = torch.randn(3, steps, 16) - y = tr(x) - - tr = StreamingTransformer( - 16, 4, 2, custom=True, dropout=0., qk_layer_norm=True, cross_attention=True) - z = torch.randn(3, 21, 16) - y = tr(x, cross_attention_src=z) - assert y.shape == x.shape diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_distutils/command/install_headers.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_distutils/command/install_headers.py deleted file mode 100644 index 87046ab391b9f5e577e6ef0181c50de7e9c7f01b..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_distutils/command/install_headers.py +++ /dev/null @@ -1,45 +0,0 @@ -"""distutils.command.install_headers - -Implements the Distutils 'install_headers' command, to install C/C++ header -files to the Python include directory.""" - -from distutils.core import Command - - -# XXX force is never used -class install_headers(Command): - - description = "install C/C++ header files" - - user_options = [ - ('install-dir=', 'd', "directory to install header files to"), - ('force', 'f', "force installation (overwrite existing files)"), - ] - - boolean_options = ['force'] - - def initialize_options(self): - self.install_dir = None - self.force = 0 - self.outfiles = [] - - def finalize_options(self): - self.set_undefined_options( - 'install', ('install_headers', 'install_dir'), ('force', 'force') - ) - - def run(self): - headers = self.distribution.headers - if not headers: - return - - self.mkpath(self.install_dir) - for header in headers: - (out, _) = self.copy_file(header, self.install_dir) - self.outfiles.append(out) - - def get_inputs(self): - return self.distribution.headers or [] - - def get_outputs(self): - return self.outfiles diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/glob.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/glob.py deleted file mode 100644 index 87062b8187fa4f74a8c4edbaa60bd9a8b2d506a4..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/glob.py +++ /dev/null @@ -1,167 +0,0 @@ -""" -Filename globbing utility. Mostly a copy of `glob` from Python 3.5. - -Changes include: - * `yield from` and PEP3102 `*` removed. - * Hidden files are not ignored. -""" - -import os -import re -import fnmatch - -__all__ = ["glob", "iglob", "escape"] - - -def glob(pathname, recursive=False): - """Return a list of paths matching a pathname pattern. - - The pattern may contain simple shell-style wildcards a la - fnmatch. However, unlike fnmatch, filenames starting with a - dot are special cases that are not matched by '*' and '?' - patterns. - - If recursive is true, the pattern '**' will match any files and - zero or more directories and subdirectories. - """ - return list(iglob(pathname, recursive=recursive)) - - -def iglob(pathname, recursive=False): - """Return an iterator which yields the paths matching a pathname pattern. - - The pattern may contain simple shell-style wildcards a la - fnmatch. However, unlike fnmatch, filenames starting with a - dot are special cases that are not matched by '*' and '?' - patterns. - - If recursive is true, the pattern '**' will match any files and - zero or more directories and subdirectories. - """ - it = _iglob(pathname, recursive) - if recursive and _isrecursive(pathname): - s = next(it) # skip empty string - assert not s - return it - - -def _iglob(pathname, recursive): - dirname, basename = os.path.split(pathname) - glob_in_dir = glob2 if recursive and _isrecursive(basename) else glob1 - - if not has_magic(pathname): - if basename: - if os.path.lexists(pathname): - yield pathname - else: - # Patterns ending with a slash should match only directories - if os.path.isdir(dirname): - yield pathname - return - - if not dirname: - yield from glob_in_dir(dirname, basename) - return - # `os.path.split()` returns the argument itself as a dirname if it is a - # drive or UNC path. Prevent an infinite recursion if a drive or UNC path - # contains magic characters (i.e. r'\\?\C:'). - if dirname != pathname and has_magic(dirname): - dirs = _iglob(dirname, recursive) - else: - dirs = [dirname] - if not has_magic(basename): - glob_in_dir = glob0 - for dirname in dirs: - for name in glob_in_dir(dirname, basename): - yield os.path.join(dirname, name) - - -# These 2 helper functions non-recursively glob inside a literal directory. -# They return a list of basenames. `glob1` accepts a pattern while `glob0` -# takes a literal basename (so it only has to check for its existence). - - -def glob1(dirname, pattern): - if not dirname: - if isinstance(pattern, bytes): - dirname = os.curdir.encode('ASCII') - else: - dirname = os.curdir - try: - names = os.listdir(dirname) - except OSError: - return [] - return fnmatch.filter(names, pattern) - - -def glob0(dirname, basename): - if not basename: - # `os.path.split()` returns an empty basename for paths ending with a - # directory separator. 'q*x/' should match only directories. - if os.path.isdir(dirname): - return [basename] - else: - if os.path.lexists(os.path.join(dirname, basename)): - return [basename] - return [] - - -# This helper function recursively yields relative pathnames inside a literal -# directory. - - -def glob2(dirname, pattern): - assert _isrecursive(pattern) - yield pattern[:0] - for x in _rlistdir(dirname): - yield x - - -# Recursively yields relative pathnames inside a literal directory. -def _rlistdir(dirname): - if not dirname: - if isinstance(dirname, bytes): - dirname = os.curdir.encode('ASCII') - else: - dirname = os.curdir - try: - names = os.listdir(dirname) - except os.error: - return - for x in names: - yield x - path = os.path.join(dirname, x) if dirname else x - for y in _rlistdir(path): - yield os.path.join(x, y) - - -magic_check = re.compile('([*?[])') -magic_check_bytes = re.compile(b'([*?[])') - - -def has_magic(s): - if isinstance(s, bytes): - match = magic_check_bytes.search(s) - else: - match = magic_check.search(s) - return match is not None - - -def _isrecursive(pattern): - if isinstance(pattern, bytes): - return pattern == b'**' - else: - return pattern == '**' - - -def escape(pathname): - """Escape all special characters. - """ - # Escaping is done by wrapping any of "*?[" between square brackets. - # Metacharacters do not work in the drive part and shouldn't be escaped. - drive, pathname = os.path.splitdrive(pathname) - if isinstance(pathname, bytes): - pathname = magic_check_bytes.sub(br'[\1]', pathname) - else: - pathname = magic_check.sub(r'[\1]', pathname) - return drive + pathname diff --git a/spaces/RedValis/Music-Helix/spotifysearch/urlbuilder.py b/spaces/RedValis/Music-Helix/spotifysearch/urlbuilder.py deleted file mode 100644 index b423dc5ee93b108e9b44ac6a68202880bb3d038d..0000000000000000000000000000000000000000 --- a/spaces/RedValis/Music-Helix/spotifysearch/urlbuilder.py +++ /dev/null @@ -1,28 +0,0 @@ - -# THIS FILE IS RESPONSABLE FOR BUILDING DYNAMIC URLS - -def search_endpoint(keywords:str, allowed_types:list, -filters:dict, market:str, limit:int, offset:int): - endpoint = 'https://api.spotify.com/v1/search?' - - # FORMAT QUERRY ITEMS AND FILTERS - querry_items = keywords.split(' ') - for filter, value in filters.items(): - value = value.replace(' ', '%20') - item = f'{filter}:{value}' - querry_items.append(item) - - # REQUIRED ARGUMENTS - querry = 'q=' + '%20'.join(querry_items) - types = 'type=' + ','.join(allowed_types) - arguments = [querry, types] - - # OPTIONAL ARGUMENTS - if market: - arguments.append(f'market={market}') - if limit: - arguments.append(f'limit={limit}') - if offset: - arguments.append(f'offset={offset}') - - return endpoint + '&'.join(arguments) diff --git a/spaces/SIGGRAPH2022/sketch2pose/src/fist_pose.py b/spaces/SIGGRAPH2022/sketch2pose/src/fist_pose.py deleted file mode 100644 index 9c6e5be4f5a8e659b3da867c0b190a8d2ee2494f..0000000000000000000000000000000000000000 --- a/spaces/SIGGRAPH2022/sketch2pose/src/fist_pose.py +++ /dev/null @@ -1,444 +0,0 @@ -left_fist = [ - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.4183167815208435, 0.10645648092031479, -1.6593892574310303, - 0.15252035856246948, -0.14700782299041748, -1.3719955682754517, - -0.04432843625545502, -0.15799851715564728, -0.938068151473999, - -0.12218914180994034, 0.073341965675354, -1.6415189504623413, - -0.14376045763492584, 0.1927780956029892, -1.3593589067459106, - -0.0851994976401329, 0.01652289740741253, -0.7474589347839355, - -0.9881719946861267, -0.3987707793712616, -1.3535722494125366, - -0.6686224937438965, 0.1261960119009018, -1.080643892288208, - -0.8101894855499268, -0.1306752860546112, -0.8412265777587891, - -0.3495230972766876, -0.17784251272678375, -1.4433038234710693, - -0.46278536319732666, 0.13677796721458435, -1.467200517654419, - -0.3681888282299042, 0.003404417773708701, -0.7764251232147217, - 0.850964367389679, 0.2769227623939514, -0.09154807031154633, - 0.14500413835048676, 0.09604815393686295, 0.219278022646904, - 1.0451993942260742, 0.16911321878433228, -0.2426234930753708, - 0.11167845129966736, -0.04289207234978676, 0.41644084453582764, - 0.10881128907203674, 0.06598565727472305, 0.756219744682312, - -0.0963931530714035, 0.09091583639383316, 0.18845966458320618, - -0.11809506267309189, -0.050943851470947266, 0.5295845866203308, - -0.14369848370552063, -0.055241718888282776, 0.704857349395752, - -0.019182899966835976, 0.0923367589712143, 0.3379131853580475, - -0.45703303813934326, 0.1962839663028717, 0.6254575848579407, - -0.21465237438678741, 0.06599827855825424, 0.5068942308425903, - -0.36972442269325256, 0.0603446289896965, 0.07949023693799973, - -0.14186954498291016, 0.08585254102945328, 0.6355276107788086, - -0.3033415675163269, 0.05788097903132439, 0.6313892006874084, - -0.17612087726593018, 0.13209305703639984, 0.3733545243740082, - 0.850964367389679, -0.2769227623939514, 0.09154807031154633, - -0.4998386800289154, -0.026556432247161865, -0.052880801260471344, - 0.5355585217475891, -0.045960985124111176, 0.27735769748687744, -] - -left_right_fist = [ - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, -0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.4183167815208435, 0.10645648092031479, -1.6593892574310303, - 0.15252035856246948, -0.14700782299041748, -1.3719955682754517, - -0.04432843625545502, -0.15799851715564728, -0.938068151473999, - -0.12218914180994034, 0.073341965675354, -1.6415189504623413, - -0.14376045763492584, 0.1927780956029892, -1.3593589067459106, - -0.0851994976401329, 0.01652289740741253, -0.7474589347839355, - -0.9881719946861267, -0.3987707793712616, -1.3535722494125366, - -0.6686224937438965, 0.1261960119009018, -1.080643892288208, - -0.8101894855499268, -0.1306752860546112, -0.8412265777587891, - -0.3495230972766876, -0.17784251272678375, -1.4433038234710693, - -0.46278536319732666, 0.13677796721458435, -1.467200517654419, - -0.3681888282299042, 0.003404417773708701, -0.7764251232147217, - 0.850964367389679, 0.2769227623939514, -0.09154807031154633, - 0.14500413835048676, 0.09604815393686295, 0.219278022646904, - 1.0451993942260742, 0.16911321878433228, -0.2426234930753708, - 0.4183167815208435, -0.10645647346973419, 1.6593892574310303, - 0.15252038836479187, 0.14700786769390106, 1.3719956874847412, - -0.04432841017842293, 0.15799842774868011, 0.9380677938461304, - -0.12218913435935974, -0.0733419880270958, 1.6415191888809204, - -0.14376048743724823, -0.19277812540531158, 1.3593589067459106, - -0.08519953489303589, -0.016522908583283424, 0.7474592328071594, - -0.9881719350814819, 0.3987707495689392, 1.3535723686218262, - -0.6686226725578308, -0.12619605660438538, 1.080644130706787, - -0.8101896643638611, 0.1306752860546112, 0.8412266373634338, - -0.34952324628829956, 0.17784248292446136, 1.443304181098938, - -0.46278542280197144, -0.13677802681922913, 1.467200517654419, - -0.36818885803222656, -0.0034044249914586544, 0.7764251232147217, - 0.8509642481803894, -0.2769228219985962, 0.09154807776212692, - 0.14500458538532257, -0.09604845196008682, -0.21927869319915771, - 1.0451991558074951, -0.1691131889820099, 0.242623433470726, -] - -right_fist = [] -for lf, lrf in zip(left_fist, left_right_fist): - if lf != lrf: - right_fist.append(lrf) - else: - right_fist.append(0) - - -left_flat_up = [ - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0, 1.5129635334014893, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, -] - -left_flat_down = [ - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0, -1.4648663997650146, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, -] - -right_flat_up = [ - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0, -1.5021973848342896, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, -] - -right_flat_down = [ - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0, 0, 1.494218111038208, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, -] - -relaxed = [ - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0, - 0.11167845129966736, 0.04289207234978676, -0.41644084453582764, - 0.10881128907203674, -0.06598565727472305, -0.756219744682312, - -0.0963931530714035, -0.09091583639383316, -0.18845966458320618, - -0.11809506267309189, 0.050943851470947266, -0.5295845866203308, - -0.14369848370552063, 0.055241718888282776, -0.704857349395752, - -0.019182899966835976, -0.0923367589712143, -0.3379131853580475, - -0.45703303813934326, -0.1962839663028717, -0.6254575848579407, - -0.21465237438678741, -0.06599827855825424, -0.5068942308425903, - -0.36972442269325256, -0.0603446289896965, -0.07949023693799973, - -0.14186954498291016, -0.08585254102945328, -0.6355276107788086, - -0.3033415675163269, -0.05788097903132439, -0.6313892006874084, - -0.17612087726593018, -0.13209305703639984, -0.3733545243740082, - 0.850964367389679, 0.2769227623939514, -0.09154807031154633, - -0.4998386800289154, 0.026556432247161865, 0.052880801260471344, - 0.5355585217475891, 0.045960985124111176, -0.27735769748687744, - 0.11167845129966736, -0.04289207234978676, 0.41644084453582764, - 0.10881128907203674, 0.06598565727472305, 0.756219744682312, - -0.0963931530714035, 0.09091583639383316, 0.18845966458320618, - -0.11809506267309189, -0.050943851470947266, 0.5295845866203308, - -0.14369848370552063, -0.055241718888282776, 0.704857349395752, - -0.019182899966835976, 0.0923367589712143, 0.3379131853580475, - -0.45703303813934326, 0.1962839663028717, 0.6254575848579407, - -0.21465237438678741, 0.06599827855825424, 0.5068942308425903, - -0.36972442269325256, 0.0603446289896965, 0.07949023693799973, - -0.14186954498291016, 0.08585254102945328, 0.6355276107788086, - -0.3033415675163269, 0.05788097903132439, 0.6313892006874084, - -0.17612087726593018, 0.13209305703639984, 0.3733545243740082, - 0.850964367389679, -0.2769227623939514, 0.09154807031154633, - -0.4998386800289154, -0.026556432247161865, -0.052880801260471344, - 0.5355585217475891, -0.045960985124111176, 0.27735769748687744, -] - -# body joints + left arm + right arm -# 25 + 15 + 15 -# smpl(left_hand_pose, right_hand_pose) - -left_start = 25 * 3 -left_end = left_start + 15 * 3 -right_end = left_end + 15 * 3 - -LEFT_FIST = left_fist[left_start:left_end] -RIGHT_FIST = right_fist[left_end:right_end] - -LEFT_FLAT_UP = left_flat_up[20 * 3 : 20 * 3 + 3] -LEFT_FLAT_DOWN = left_flat_down[20 * 3 : 20 * 3 + 3] - -RIGHT_FLAT_UP = right_flat_up[21 * 3 : 21 * 3 + 3] -RIGHT_FLAT_DOWN = right_flat_down[21 * 3 : 21 * 3 + 3] - -LEFT_RELAXED = relaxed[left_start:left_end] -RIGHT_RELAXED = relaxed[left_end:right_end] - -INT_TO_FIST = { - "lfl": None, - "lf": LEFT_FIST, - "lu": LEFT_FLAT_UP, - "ld": LEFT_FLAT_DOWN, - "rfl": None, - "rf": RIGHT_FIST, - "ru": RIGHT_FLAT_UP, - "rd": RIGHT_FLAT_DOWN, -} diff --git a/spaces/SMD00/Image_Summarizer/app.py b/spaces/SMD00/Image_Summarizer/app.py deleted file mode 100644 index cb24ed37a90cbe8421360a35b29aabec7caa90e7..0000000000000000000000000000000000000000 --- a/spaces/SMD00/Image_Summarizer/app.py +++ /dev/null @@ -1,203 +0,0 @@ -import gradio as gr -from PIL import Image -import pytesseract -import torch -import numpy as np -import nltk -nltk.download('stopwords') -nltk.download('punkt') -from nltk.corpus import stopwords -from nltk.cluster.util import cosine_distance -import networkx as nx -from transformers import pipeline - - -if torch.cuda.is_available(): - device = torch.device("cuda") -else: - device = torch.device("cpu") - - -summarizer = pipeline("summarization", model="facebook/bart-large-cnn") - -def read(filepath): - return pytesseract.image_to_string(Image.open(filepath)) - -def clean_text(text): - article = text.split(".") - article=[sentence for sentence in article if sentence!=""] - - sentences = [] - - for sentence in article: - sentence=sentence.replace(",", " , ").replace("'", " ' ").split(" ") - sentence=[word for word in sentence if word!=""] - sentences.append(sentence) - - return sentences - -def sentence_similarity(sent1, sent2, stopwords): #Creating words in sentences to one hot encoding and then finding cosine distance between the vectors inorder to measure closeness - - if stopwords is None: - stopwords = [] - - sent1 = [w.lower() for w in sent1] - sent2 = [w.lower() for w in sent2] - - all_words = list(set(sent1 + sent2)) - - vector1 = [0] * len(all_words) - vector2 = [0] * len(all_words) - - # build the vector for the first sentence - for w in sent1: - if w in stopwords: - continue - vector1[all_words.index(w)] += 1 - - # build the vector for the second sentence - for w in sent2: - if w in stopwords: - continue - vector2[all_words.index(w)] += 1 - if np.isnan(1 - cosine_distance(vector1, vector2)): - return 0 - return 1 - cosine_distance(vector1, vector2) - - -def build_similarity_matrix(sentences, stop_words): - - # Create an empty similarity matrix - similarity_matrix = np.zeros((len(sentences), len(sentences))) - - for idx1 in range(len(sentences)): - for idx2 in range(len(sentences)): - if idx1 == idx2: #ignore if both are same sentences - continue - similarity_matrix[idx1][idx2] = sentence_similarity(sentences[idx1], sentences[idx2], stop_words) - - return similarity_matrix - -def sentences(text, top_n="auto"): - - # Step 1 - Clean text to generate sentences - - sentences=clean_text(text) - stop_words = stopwords.words('english') - stop_words.append(".") - stop_words.append(",") - summarize_text = [] - - # Step 2 - Generate Similary Martix across sentences - - sentence_similarity_martix = build_similarity_matrix(sentences, stop_words) - # print(sentence_similarity_martix) - - # Step 3 - Rank sentences in similarity martix - - sentence_similarity_graph = nx.from_numpy_array(sentence_similarity_martix) - # print(sentence_similarity_graph) - - scores = nx.pagerank(sentence_similarity_graph) - # print(scores) - - # Step 4 - Sort the rank and pick top sentences - - ranked_sentence = sorted(((scores[i],s) for i,s in enumerate(sentences)), reverse=True) #Sorting the scores in decending order - # print("Indexes of top ranked_sentence order are ", ranked_sentence) - - if top_n=="auto": top_n=len(ranked_sentence) - else: top_n=int(top_n) - - for i in range(top_n): - ranked_sentence[i][1][0]=ranked_sentence[i][1][0].capitalize() #Capitalising 1st letter of sentence - # print(ranked_sentence[i][1][0]) - summarize_text.append(" ".join(ranked_sentence[i][1])) - - # Step 5 - Offcourse, output the summarized text - - extractive_summarized=". ".join(summarize_text).replace(" , ",", ").replace(" ' ","'") + "." - return extractive_summarized - -def important_sentences(filepath, no_of_sentences=5): - extractedInformation=read(filepath) - extractedInformation=' '.join(extractedInformation.split('\n')) - try: - extractive_summary=sentences(extractedInformation, no_of_sentences) - except: - extractive_summary=sentences(extractedInformation,"auto") - text="" - for index,sent in enumerate(extractive_summary.split(".")): - if sent!='':text+=str(index+1)+". "+str(sent).strip()+".\n\n" - return (gr.Textbox.update(text),gr.Button.update(visible=False),gr.Textbox.update(visible=False),gr.Dropdown.update(visible=False)) - -def summarize(filepath): - extractedInformation=read(filepath) - extractedInformation=' '.join(extractedInformation.split('\n')) - abstractive_summary = summarizer(extractedInformation, max_length=int(len(extractedInformation)/6), min_length=int(len(extractedInformation)/10), do_sample=False) - return (gr.Textbox.update(abstractive_summary[0]["summary_text"]),gr.Button.update(visible=False),gr.Textbox.update(visible=False),gr.Dropdown.update(visible=False)) - -def Question_Answer(filepath,question,mod): - extractedInformation=read(filepath) - extractedInformation=' '.join(extractedInformation.split('\n')) - if mod=="Roberta": - question_answerer = pipeline("question-answering", model="SMD00/QA_model-roberta") - else : - question_answerer = pipeline("question-answering", model="SMD00/QA_model-distilbert") - obj=question_answerer(question=question, context=extractedInformation) - return obj['answer'] - -def show_fn(): - return (gr.Textbox.update(visible=True),gr.Button.update(visible=True),gr.Dropdown.update(visible=True),gr.Textbox.update("")) -def dummy_fn(x): - return x - -with gr.Blocks() as demo: - gr.Markdown("# **PicSum**") - gr.Markdown("Gradio demo for PicSum project. You can give an image as input and select any of the three buttons. It generates summary, important sentences and answers questions related to context.") - img=gr.components.Image(type="filepath", label="Input Image") - - with gr.Row(): - summary_btn = gr.Button(value="Summary") - sentence_btn = gr.Button(value="Important Sentences") - quesAndAns_btn = gr.Button(value="Question and Answers") - - mode=gr.Dropdown(["Roberta","DistilBert"],label="Model",info="Choose a model",visible=False) - ques_box = gr.Textbox(label="Question",info="Enter a Question",interactive=True,visible=False) - submit_btn= gr.Button(value="Submit",visible=False) - out_box=gr.Textbox(label="Generated Text") - summary_btn.click(fn=summarize,inputs=[img],outputs=[out_box,submit_btn,ques_box,mode]) - sentence_btn.click(fn=important_sentences,inputs=[img],outputs=[out_box,submit_btn,ques_box,mode]) - quesAndAns_btn.click(fn=show_fn,outputs=[submit_btn,ques_box,mode,out_box]) - submit_btn.click(fn=Question_Answer,inputs=[img,ques_box,mode],outputs=[out_box]) - gr.Markdown("## Image Examples") - with gr.Row(): - gr.Examples( - examples=[ "a.png"], - inputs=img, - outputs=img, - fn=dummy_fn, - cache_examples=True, - ) - gr.Examples( - examples=[ "b.png"], - inputs=img, - outputs=img, - fn=dummy_fn, - cache_examples=True, - ) - gr.Examples( - examples=[ "c.png"], - inputs=img, - outputs=img, - fn=dummy_fn, - cache_examples=True, - ) - gr.Examples( - examples=[ "d.png"], - inputs=img, - outputs=img, - fn=dummy_fn, - cache_examples=True, - ) -demo.launch(debug=True) \ No newline at end of file diff --git a/spaces/SeViLA/SeViLA/lavis/tasks/image_text_pretrain.py b/spaces/SeViLA/SeViLA/lavis/tasks/image_text_pretrain.py deleted file mode 100644 index 218c535682b4382c8fde54887dc2e55107465c7f..0000000000000000000000000000000000000000 --- a/spaces/SeViLA/SeViLA/lavis/tasks/image_text_pretrain.py +++ /dev/null @@ -1,18 +0,0 @@ -""" - Copyright (c) 2022, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause -""" - -from lavis.common.registry import registry -from lavis.tasks.base_task import BaseTask - - -@registry.register_task("image_text_pretrain") -class ImageTextPretrainTask(BaseTask): - def __init__(self): - super().__init__() - - def evaluation(self, model, data_loader, cuda_enabled=True): - pass diff --git a/spaces/ShadowDominator/extract-photos-from-pdf/README.md b/spaces/ShadowDominator/extract-photos-from-pdf/README.md deleted file mode 100644 index dcf45be87fd83a66a37ac1d0e023719a19062392..0000000000000000000000000000000000000000 --- a/spaces/ShadowDominator/extract-photos-from-pdf/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Extract Photos From Pdf -emoji: 🏃 -colorFrom: green -colorTo: green -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Solomon-y/img-to-music/share_btn.py b/spaces/Solomon-y/img-to-music/share_btn.py deleted file mode 100644 index 1a2ac6a6e74b114dbd54c2f24723a87180db51ef..0000000000000000000000000000000000000000 --- a/spaces/Solomon-y/img-to-music/share_btn.py +++ /dev/null @@ -1,100 +0,0 @@ -community_icon_html = """""" - -loading_icon_html = """""" - -share_js = """async () => { - async function uploadFile(file){ - const UPLOAD_URL = 'https://huggingface.co/uploads'; - const response = await fetch(UPLOAD_URL, { - method: 'POST', - headers: { - 'Content-Type': file.type, - 'X-Requested-With': 'XMLHttpRequest', - }, - body: file, /// <- File inherits from Blob - }); - const url = await response.text(); - return url; - } - async function getInputImgFile(imgEl){ - const res = await fetch(imgEl.src); - const blob = await res.blob(); - const imgId = Date.now() % 200; - const isPng = imgEl.src.startsWith(`data:image/png`); - if(isPng){ - const fileName = `sd-perception-${{imgId}}.png`; - return new File([blob], fileName, { type: 'image/png' }); - }else{ - const fileName = `sd-perception-${{imgId}}.jpg`; - return new File([blob], fileName, { type: 'image/jpeg' }); - } - } - async function getOutputMusicFile(audioEL){ - const res = await fetch(audioEL.src); - const blob = await res.blob(); - const audioId = Date.now() % 200; - const fileName = `img-to-music-${{audioId}}.wav`; - const musicBlob = new File([blob], fileName, { type: 'audio/wav' }); - console.log(musicBlob); - return musicBlob; - } - - async function audioToBase64(audioFile) { - return new Promise((resolve, reject) => { - let reader = new FileReader(); - reader.readAsDataURL(audioFile); - reader.onload = () => resolve(reader.result); - reader.onerror = error => reject(error); - - }); - } - const gradioEl = document.querySelector('body > gradio-app'); - // const gradioEl = document.querySelector("gradio-app").shadowRoot; - const inputImgEl = gradioEl.querySelector('#input-img img'); - const outputMusic = gradioEl.querySelector('#music-output audio'); - const outputMusic_src = gradioEl.querySelector('#music-output audio').src; - const outputMusic_name = outputMusic_src.split('/').pop(); - let titleTxt = outputMusic_name; - //if(titleTxt.length > 100){ - // titleTxt = titleTxt.slice(0, 100) + ' ...'; - //} - const shareBtnEl = gradioEl.querySelector('#share-btn'); - const shareIconEl = gradioEl.querySelector('#share-btn-share-icon'); - const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon'); - if(!outputMusic){ - return; - }; - shareBtnEl.style.pointerEvents = 'none'; - shareIconEl.style.display = 'none'; - loadingIconEl.style.removeProperty('display'); - const inputFile = await getInputImgFile(inputImgEl); - const urlInputImg = await uploadFile(inputFile); - const musicFile = await getOutputMusicFile(outputMusic); - const dataOutputMusic = await uploadFile(musicFile); - - const descriptionMd = `#### Input img: - - -#### Music: - - -`; - const params = new URLSearchParams({ - title: titleTxt, - description: descriptionMd, - }); - const paramsStr = params.toString(); - window.open(`https://huggingface.co/spaces/fffiloni/img-to-music/discussions/new?${paramsStr}`, '_blank'); - shareBtnEl.style.removeProperty('pointer-events'); - shareIconEl.style.removeProperty('display'); - loadingIconEl.style.display = 'none'; -}""" \ No newline at end of file diff --git a/spaces/Stephen2022/daxing/README.md b/spaces/Stephen2022/daxing/README.md deleted file mode 100644 index 20aba09f80ff594c8b68f1de9662b21f05b9d9d9..0000000000000000000000000000000000000000 --- a/spaces/Stephen2022/daxing/README.md +++ /dev/null @@ -1,148 +0,0 @@ ---- -title: LabelStudio -emoji: 🟧 -colorFrom: yellow -colorTo: purple -sdk: docker -tags: -- label-studio -fullwidth: true -license: apache-2.0 -app_port: 8080 -duplicated_from: LabelStudio/LabelStudio ---- - - -[Website](https://hubs.ly/Q01CNgsd0) • [Docs](https://hubs.ly/Q01CN9Yq0) • [12K+ GitHub ⭐️!](https://hubs.ly/Q01CNbPQ0) • [Slack Community](https://hubs.ly/Q01CNb9H0) - -## What is Label Studio? - -Label Studio is an open source data labeling platform. It lets you label audio, -text, images, videos, and time series data with a simple, straightforward, and -highly-configurable user interface. Label Studio can prepare new data or -improve existing training data to get more accurate ML models. - - -## Label Studio in Hugging Face Spaces - -The Label Studio community is thrilled to offer Label Studio as a Hugging Face -Spaces application. You can try the data-annotation interface, connect popular -machine learning models, and share the application with collaborators. You can -start immediately by creating an account or replicate the space and work in -your own environment. - -## Creating a Use Account and Logging In - -Begin by creating a new account in the Label Studio space, then log in with your -credentials. - -**By default, these spaces permit anyone to create a new login -account, allowing them to view and modify project configuration, data sets, and -annotations. Without any modifications, treat this space like a demo environment.** - -## Creating a Labeling Project - -After logging in, Label Studio will present you with a project view. Here you -can create a new project with prompts to upload data and set up a custom -configuration interface. - -**Note that in the default configuration, storage is local and temporary. Any -projects, annotations, and configurations will be lost if the space is restarted.** - -## Next Steps and Additional Resources - -To help with getting started, the Label Studio community curated a list of -resources including tutorials and documentation. - -- 🚀 [Zero to One with Label Studio Tutorial](https://labelstud.io/blog/introduction-to-label-studio-in-hugging-face-spaces/) -- 📈 [Try Label Studio Enterprise](https://hubs.ly/Q01CMLll0) -- 🤗 [Tutorial: Using Label Studio with Hugging Face Datasets Hub](https://danielvanstrien.xyz/huggingface/huggingface-datasets/annotation/full%20stack%20deep%20learning%20notes/2022/09/07/label-studio-annotations-hub.html) -- 💡 [Label Studio Docs](https://hubs.ly/Q01CN9Yq0) - - -![Gif of Label Studio annotating different types of data](https://raw.githubusercontent.com/heartexlabs/label-studio/master/images/annotation_examples.gif) - -### Making your Label Studio Hugging Face Space production-ready - -By default this space allows for the unrestricted creation of new accounts -will full access to all projects and data. This is great for trying out -Label Studio and collaborating on projects, but you may want to restrict -access to your space to only authorized users. Add the following environment -variable to your spaces Dockerfile to disable public account creation for -this space. - - ENV LABEL_STUDIO_DISABLE_SIGNUP_WITHOUT_LINK=true - -Set secrets in your space to create an inital user, and log in with your -provided username and password. Do not set these in your Dockerfile, as they -globally visible on a public space. - - LABEL_STUDIO_USERNAME - LABEL_STUDIO_PASSWORD - -You will need to provide new users with an invitation link to join the space, -which can be found in the Organizations interface of Label Studio - -By default this space stores all project configuration and data annotations -in local storage with Sqlite. If the space is reset, all configuration and -annotation data in the space will be lost. You can enable configuration -persistence by connecting an external Postgres database to your space, -guaranteeing that all project and annotation settings are preserved. - -Set the following secret variables to match your own hosted instance of -Postgres. We strongly recommend setting these as secrets to prevent leaking -information about your database service to the public in your spaces -definition. - - DJANGO_DB=default - POSTGRE_NAME= - POSTGRE_PORT= - POSTGRE_USER= - POSTGRE_PASSWORD= - POSTGRE_PORT= - POSTGRE_HOST= - -Add the following environment variable to remove the warning about ephemeral -storage. - - ENV STORAGE_PERSISTENCE=1 - -Note that you will need to connect cloud storage to host data items that you -want to annotate, as local storage will not be preserved across a space reset. - -By default the only data storage enabled for this space is local. In the case -of a space reset, all data will be lost. To enable permanent storage, you -must enable a cloud storage connector. We also strongly recommend enabling -configuration persistence to preserve project data, annotations, and user -settings. Choose the appropriate cloud connector and configure the secrets -for it. - -#### Amazon S3 - STORAGE_TYPE=s3 - STORAGE_AWS_ACCESS_KEY_ID="" - STORAGE_AWS_SECRET_ACCESS_KEY="" - STORAGE_AWS_BUCKET_NAME="" - STORAGE_AWS_REGION_NAME="" - STORAGE_AWS_FOLDER="" - -#### Google Cloud Storage - - STORAGE_TYPE=gcs - STORAGE_GCS_BUCKET_NAME="" - STORAGE_GCS_PROJECT_ID="" - STORAGE_GCS_FOLDER="" - GOOGLE_APPLICATION_CREDENTIALS="/opt/heartex/secrets/key.json" - -Azure Blob Storage -================== - - STORAGE_TYPE=azure - STORAGE_AZURE_ACCOUNT_NAME="" - STORAGE_AZURE_ACCOUNT_KEY="" - STORAGE_AZURE_CONTAINER_NAME="" - STORAGE_AZURE_FOLDER="" - - -## Questions? Concerns? Want to get involved? - -Email the community team at [community@labelstud.io](mailto:community@labelstud.io) diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/aiohttp/web_ws.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/aiohttp/web_ws.py deleted file mode 100644 index 0d32a218b52b87ec04f36a6f95bfb303984b2e43..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/aiohttp/web_ws.py +++ /dev/null @@ -1,487 +0,0 @@ -import asyncio -import base64 -import binascii -import hashlib -import json -from typing import Any, Iterable, Optional, Tuple, cast - -import async_timeout -import attr -from multidict import CIMultiDict - -from . import hdrs -from .abc import AbstractStreamWriter -from .helpers import call_later, set_result -from .http import ( - WS_CLOSED_MESSAGE, - WS_CLOSING_MESSAGE, - WS_KEY, - WebSocketError, - WebSocketReader, - WebSocketWriter, - WSCloseCode, - WSMessage, - WSMsgType as WSMsgType, - ws_ext_gen, - ws_ext_parse, -) -from .log import ws_logger -from .streams import EofStream, FlowControlDataQueue -from .typedefs import Final, JSONDecoder, JSONEncoder -from .web_exceptions import HTTPBadRequest, HTTPException -from .web_request import BaseRequest -from .web_response import StreamResponse - -__all__ = ( - "WebSocketResponse", - "WebSocketReady", - "WSMsgType", -) - -THRESHOLD_CONNLOST_ACCESS: Final[int] = 5 - - -@attr.s(auto_attribs=True, frozen=True, slots=True) -class WebSocketReady: - ok: bool - protocol: Optional[str] - - def __bool__(self) -> bool: - return self.ok - - -class WebSocketResponse(StreamResponse): - - _length_check = False - - def __init__( - self, - *, - timeout: float = 10.0, - receive_timeout: Optional[float] = None, - autoclose: bool = True, - autoping: bool = True, - heartbeat: Optional[float] = None, - protocols: Iterable[str] = (), - compress: bool = True, - max_msg_size: int = 4 * 1024 * 1024, - ) -> None: - super().__init__(status=101) - self._protocols = protocols - self._ws_protocol: Optional[str] = None - self._writer: Optional[WebSocketWriter] = None - self._reader: Optional[FlowControlDataQueue[WSMessage]] = None - self._closed = False - self._closing = False - self._conn_lost = 0 - self._close_code: Optional[int] = None - self._loop: Optional[asyncio.AbstractEventLoop] = None - self._waiting: Optional[asyncio.Future[bool]] = None - self._exception: Optional[BaseException] = None - self._timeout = timeout - self._receive_timeout = receive_timeout - self._autoclose = autoclose - self._autoping = autoping - self._heartbeat = heartbeat - self._heartbeat_cb: Optional[asyncio.TimerHandle] = None - if heartbeat is not None: - self._pong_heartbeat = heartbeat / 2.0 - self._pong_response_cb: Optional[asyncio.TimerHandle] = None - self._compress = compress - self._max_msg_size = max_msg_size - - def _cancel_heartbeat(self) -> None: - if self._pong_response_cb is not None: - self._pong_response_cb.cancel() - self._pong_response_cb = None - - if self._heartbeat_cb is not None: - self._heartbeat_cb.cancel() - self._heartbeat_cb = None - - def _reset_heartbeat(self) -> None: - self._cancel_heartbeat() - - if self._heartbeat is not None: - assert self._loop is not None - self._heartbeat_cb = call_later( - self._send_heartbeat, self._heartbeat, self._loop - ) - - def _send_heartbeat(self) -> None: - if self._heartbeat is not None and not self._closed: - assert self._loop is not None - # fire-and-forget a task is not perfect but maybe ok for - # sending ping. Otherwise we need a long-living heartbeat - # task in the class. - self._loop.create_task(self._writer.ping()) # type: ignore[union-attr] - - if self._pong_response_cb is not None: - self._pong_response_cb.cancel() - self._pong_response_cb = call_later( - self._pong_not_received, self._pong_heartbeat, self._loop - ) - - def _pong_not_received(self) -> None: - if self._req is not None and self._req.transport is not None: - self._closed = True - self._close_code = WSCloseCode.ABNORMAL_CLOSURE - self._exception = asyncio.TimeoutError() - self._req.transport.close() - - async def prepare(self, request: BaseRequest) -> AbstractStreamWriter: - # make pre-check to don't hide it by do_handshake() exceptions - if self._payload_writer is not None: - return self._payload_writer - - protocol, writer = self._pre_start(request) - payload_writer = await super().prepare(request) - assert payload_writer is not None - self._post_start(request, protocol, writer) - await payload_writer.drain() - return payload_writer - - def _handshake( - self, request: BaseRequest - ) -> Tuple["CIMultiDict[str]", str, bool, bool]: - headers = request.headers - if "websocket" != headers.get(hdrs.UPGRADE, "").lower().strip(): - raise HTTPBadRequest( - text=( - "No WebSocket UPGRADE hdr: {}\n Can " - '"Upgrade" only to "WebSocket".' - ).format(headers.get(hdrs.UPGRADE)) - ) - - if "upgrade" not in headers.get(hdrs.CONNECTION, "").lower(): - raise HTTPBadRequest( - text="No CONNECTION upgrade hdr: {}".format( - headers.get(hdrs.CONNECTION) - ) - ) - - # find common sub-protocol between client and server - protocol = None - if hdrs.SEC_WEBSOCKET_PROTOCOL in headers: - req_protocols = [ - str(proto.strip()) - for proto in headers[hdrs.SEC_WEBSOCKET_PROTOCOL].split(",") - ] - - for proto in req_protocols: - if proto in self._protocols: - protocol = proto - break - else: - # No overlap found: Return no protocol as per spec - ws_logger.warning( - "Client protocols %r don’t overlap server-known ones %r", - req_protocols, - self._protocols, - ) - - # check supported version - version = headers.get(hdrs.SEC_WEBSOCKET_VERSION, "") - if version not in ("13", "8", "7"): - raise HTTPBadRequest(text=f"Unsupported version: {version}") - - # check client handshake for validity - key = headers.get(hdrs.SEC_WEBSOCKET_KEY) - try: - if not key or len(base64.b64decode(key)) != 16: - raise HTTPBadRequest(text=f"Handshake error: {key!r}") - except binascii.Error: - raise HTTPBadRequest(text=f"Handshake error: {key!r}") from None - - accept_val = base64.b64encode( - hashlib.sha1(key.encode() + WS_KEY).digest() - ).decode() - response_headers = CIMultiDict( - { - hdrs.UPGRADE: "websocket", - hdrs.CONNECTION: "upgrade", - hdrs.SEC_WEBSOCKET_ACCEPT: accept_val, - } - ) - - notakeover = False - compress = 0 - if self._compress: - extensions = headers.get(hdrs.SEC_WEBSOCKET_EXTENSIONS) - # Server side always get return with no exception. - # If something happened, just drop compress extension - compress, notakeover = ws_ext_parse(extensions, isserver=True) - if compress: - enabledext = ws_ext_gen( - compress=compress, isserver=True, server_notakeover=notakeover - ) - response_headers[hdrs.SEC_WEBSOCKET_EXTENSIONS] = enabledext - - if protocol: - response_headers[hdrs.SEC_WEBSOCKET_PROTOCOL] = protocol - return ( - response_headers, - protocol, - compress, - notakeover, - ) # type: ignore[return-value] - - def _pre_start(self, request: BaseRequest) -> Tuple[str, WebSocketWriter]: - self._loop = request._loop - - headers, protocol, compress, notakeover = self._handshake(request) - - self.set_status(101) - self.headers.update(headers) - self.force_close() - self._compress = compress - transport = request._protocol.transport - assert transport is not None - writer = WebSocketWriter( - request._protocol, transport, compress=compress, notakeover=notakeover - ) - - return protocol, writer - - def _post_start( - self, request: BaseRequest, protocol: str, writer: WebSocketWriter - ) -> None: - self._ws_protocol = protocol - self._writer = writer - - self._reset_heartbeat() - - loop = self._loop - assert loop is not None - self._reader = FlowControlDataQueue(request._protocol, 2**16, loop=loop) - request.protocol.set_parser( - WebSocketReader(self._reader, self._max_msg_size, compress=self._compress) - ) - # disable HTTP keepalive for WebSocket - request.protocol.keep_alive(False) - - def can_prepare(self, request: BaseRequest) -> WebSocketReady: - if self._writer is not None: - raise RuntimeError("Already started") - try: - _, protocol, _, _ = self._handshake(request) - except HTTPException: - return WebSocketReady(False, None) - else: - return WebSocketReady(True, protocol) - - @property - def closed(self) -> bool: - return self._closed - - @property - def close_code(self) -> Optional[int]: - return self._close_code - - @property - def ws_protocol(self) -> Optional[str]: - return self._ws_protocol - - @property - def compress(self) -> bool: - return self._compress - - def exception(self) -> Optional[BaseException]: - return self._exception - - async def ping(self, message: bytes = b"") -> None: - if self._writer is None: - raise RuntimeError("Call .prepare() first") - await self._writer.ping(message) - - async def pong(self, message: bytes = b"") -> None: - # unsolicited pong - if self._writer is None: - raise RuntimeError("Call .prepare() first") - await self._writer.pong(message) - - async def send_str(self, data: str, compress: Optional[bool] = None) -> None: - if self._writer is None: - raise RuntimeError("Call .prepare() first") - if not isinstance(data, str): - raise TypeError("data argument must be str (%r)" % type(data)) - await self._writer.send(data, binary=False, compress=compress) - - async def send_bytes(self, data: bytes, compress: Optional[bool] = None) -> None: - if self._writer is None: - raise RuntimeError("Call .prepare() first") - if not isinstance(data, (bytes, bytearray, memoryview)): - raise TypeError("data argument must be byte-ish (%r)" % type(data)) - await self._writer.send(data, binary=True, compress=compress) - - async def send_json( - self, - data: Any, - compress: Optional[bool] = None, - *, - dumps: JSONEncoder = json.dumps, - ) -> None: - await self.send_str(dumps(data), compress=compress) - - async def write_eof(self) -> None: # type: ignore[override] - if self._eof_sent: - return - if self._payload_writer is None: - raise RuntimeError("Response has not been started") - - await self.close() - self._eof_sent = True - - async def close(self, *, code: int = WSCloseCode.OK, message: bytes = b"") -> bool: - if self._writer is None: - raise RuntimeError("Call .prepare() first") - - self._cancel_heartbeat() - reader = self._reader - assert reader is not None - - # we need to break `receive()` cycle first, - # `close()` may be called from different task - if self._waiting is not None and not self._closed: - reader.feed_data(WS_CLOSING_MESSAGE, 0) - await self._waiting - - if not self._closed: - self._closed = True - try: - await self._writer.close(code, message) - writer = self._payload_writer - assert writer is not None - await writer.drain() - except (asyncio.CancelledError, asyncio.TimeoutError): - self._close_code = WSCloseCode.ABNORMAL_CLOSURE - raise - except Exception as exc: - self._close_code = WSCloseCode.ABNORMAL_CLOSURE - self._exception = exc - return True - - if self._closing: - return True - - reader = self._reader - assert reader is not None - try: - async with async_timeout.timeout(self._timeout): - msg = await reader.read() - except asyncio.CancelledError: - self._close_code = WSCloseCode.ABNORMAL_CLOSURE - raise - except Exception as exc: - self._close_code = WSCloseCode.ABNORMAL_CLOSURE - self._exception = exc - return True - - if msg.type == WSMsgType.CLOSE: - self._close_code = msg.data - return True - - self._close_code = WSCloseCode.ABNORMAL_CLOSURE - self._exception = asyncio.TimeoutError() - return True - else: - return False - - async def receive(self, timeout: Optional[float] = None) -> WSMessage: - if self._reader is None: - raise RuntimeError("Call .prepare() first") - - loop = self._loop - assert loop is not None - while True: - if self._waiting is not None: - raise RuntimeError("Concurrent call to receive() is not allowed") - - if self._closed: - self._conn_lost += 1 - if self._conn_lost >= THRESHOLD_CONNLOST_ACCESS: - raise RuntimeError("WebSocket connection is closed.") - return WS_CLOSED_MESSAGE - elif self._closing: - return WS_CLOSING_MESSAGE - - try: - self._waiting = loop.create_future() - try: - async with async_timeout.timeout(timeout or self._receive_timeout): - msg = await self._reader.read() - self._reset_heartbeat() - finally: - waiter = self._waiting - set_result(waiter, True) - self._waiting = None - except (asyncio.CancelledError, asyncio.TimeoutError): - self._close_code = WSCloseCode.ABNORMAL_CLOSURE - raise - except EofStream: - self._close_code = WSCloseCode.OK - await self.close() - return WSMessage(WSMsgType.CLOSED, None, None) - except WebSocketError as exc: - self._close_code = exc.code - await self.close(code=exc.code) - return WSMessage(WSMsgType.ERROR, exc, None) - except Exception as exc: - self._exception = exc - self._closing = True - self._close_code = WSCloseCode.ABNORMAL_CLOSURE - await self.close() - return WSMessage(WSMsgType.ERROR, exc, None) - - if msg.type == WSMsgType.CLOSE: - self._closing = True - self._close_code = msg.data - if not self._closed and self._autoclose: - await self.close() - elif msg.type == WSMsgType.CLOSING: - self._closing = True - elif msg.type == WSMsgType.PING and self._autoping: - await self.pong(msg.data) - continue - elif msg.type == WSMsgType.PONG and self._autoping: - continue - - return msg - - async def receive_str(self, *, timeout: Optional[float] = None) -> str: - msg = await self.receive(timeout) - if msg.type != WSMsgType.TEXT: - raise TypeError( - "Received message {}:{!r} is not WSMsgType.TEXT".format( - msg.type, msg.data - ) - ) - return cast(str, msg.data) - - async def receive_bytes(self, *, timeout: Optional[float] = None) -> bytes: - msg = await self.receive(timeout) - if msg.type != WSMsgType.BINARY: - raise TypeError(f"Received message {msg.type}:{msg.data!r} is not bytes") - return cast(bytes, msg.data) - - async def receive_json( - self, *, loads: JSONDecoder = json.loads, timeout: Optional[float] = None - ) -> Any: - data = await self.receive_str(timeout=timeout) - return loads(data) - - async def write(self, data: bytes) -> None: - raise RuntimeError("Cannot call .write() for websocket") - - def __aiter__(self) -> "WebSocketResponse": - return self - - async def __anext__(self) -> WSMessage: - msg = await self.receive() - if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING, WSMsgType.CLOSED): - raise StopAsyncIteration - return msg - - def _cancel(self, exc: BaseException) -> None: - if self._reader is not None: - self._reader.set_exception(exc) diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/utils/_internal/query_language/query_parser.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/utils/_internal/query_language/query_parser.py deleted file mode 100644 index b635d296d8eddd2894c9803b4a230ebdd1b25802..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/utils/_internal/query_language/query_parser.py +++ /dev/null @@ -1,125 +0,0 @@ -from typing import Any, Dict, List, Optional, Union - -from docarray.utils._internal.query_language.lookup import ( - LookupLeaf, - LookupNode, - LookupTreeElem, - Q, -) - -LOGICAL_OPERATORS: Dict[str, Union[str, bool]] = { - '$and': 'and', - '$or': 'or', - '$not': True, -} - -COMPARISON_OPERATORS = { - '$lt': 'lt', - '$gt': 'gt', - '$lte': 'lte', - '$gte': 'gte', - '$eq': 'exact', - '$neq': 'neq', - '$exists': 'exists', -} - -REGEX_OPERATORS = {'$regex': 'regex'} - -ARRAY_OPERATORS = {'$size': 'size'} - -MEMBERSHIP_OPERATORS = {'$in': 'in', '$nin': 'nin'} - -SUPPORTED_OPERATORS = { - **COMPARISON_OPERATORS, - **ARRAY_OPERATORS, - **REGEX_OPERATORS, - **MEMBERSHIP_OPERATORS, -} - - -def _parse_lookups( - data: Union[Dict, List] = {}, root_node: Optional[LookupTreeElem] = None -) -> Optional[LookupTreeElem]: - if isinstance(data, dict): - for key, value in data.items(): - node: Optional[LookupTreeElem] = None - if isinstance(root_node, LookupLeaf): - root = LookupNode() - root.add_child(root_node) - root_node = root - - if key in LOGICAL_OPERATORS: - if key == '$not': - node = LookupNode(negate=True) - else: - node = LookupNode(op=LOGICAL_OPERATORS[key]) - node = _parse_lookups(value, root_node=node) - - elif key.startswith('$'): - raise ValueError( - f'The operator {key} is not supported yet,' - f' please double check the given filters!' - ) - else: - if not value or not isinstance(value, dict): - raise ValueError( - '''Not a valid query. It should follow the format: - { : { : }, ... } - ''' - ) - - items = list(value.items()) - if len(items) == 1: - op, val = items[0] - if op in LOGICAL_OPERATORS: - if op == '$not': - node = LookupNode(negate=True) - else: - node = LookupNode(op=LOGICAL_OPERATORS[op]) - node = _parse_lookups(val, root_node=node) - elif op in SUPPORTED_OPERATORS: - node = Q(**{f'{key}.{SUPPORTED_OPERATORS[op]}': val}) - else: - raise ValueError( - f'The operator {op} is not supported yet, ' - f'please double check the given filters!' - ) - - else: - node = LookupNode() - for op, val in items: - _node = _parse_lookups({key: {op: val}}) - node.add_child(_node) - - if root_node and node: - if isinstance(root_node, LookupNode): - root_node.add_child(node) - elif node: - root_node = node - - elif isinstance(data, list): - for d in data: - node = _parse_lookups(d) - if root_node and node: - if isinstance(root_node, LookupNode): - root_node.add_child(node) - elif node: - root_node = node - else: - raise ValueError(f'The query is illegal: `{data}`') - - return root_node - - -class QueryParser: - """A class to parse dict condition to lookup query.""" - - def __init__(self, conditions: Union[Dict, List] = {}): - self.conditions = conditions - self.lookup_groups = _parse_lookups(self.conditions) - - def evaluate(self, doc: Any) -> bool: - return self.lookup_groups.evaluate(doc) if self.lookup_groups else True - - def __call__(self, doc: Any) -> bool: - return self.evaluate(doc) diff --git a/spaces/Sup3r/img-to-music/share_btn.py b/spaces/Sup3r/img-to-music/share_btn.py deleted file mode 100644 index 1a2ac6a6e74b114dbd54c2f24723a87180db51ef..0000000000000000000000000000000000000000 --- a/spaces/Sup3r/img-to-music/share_btn.py +++ /dev/null @@ -1,100 +0,0 @@ -community_icon_html = """""" - -loading_icon_html = """""" - -share_js = """async () => { - async function uploadFile(file){ - const UPLOAD_URL = 'https://huggingface.co/uploads'; - const response = await fetch(UPLOAD_URL, { - method: 'POST', - headers: { - 'Content-Type': file.type, - 'X-Requested-With': 'XMLHttpRequest', - }, - body: file, /// <- File inherits from Blob - }); - const url = await response.text(); - return url; - } - async function getInputImgFile(imgEl){ - const res = await fetch(imgEl.src); - const blob = await res.blob(); - const imgId = Date.now() % 200; - const isPng = imgEl.src.startsWith(`data:image/png`); - if(isPng){ - const fileName = `sd-perception-${{imgId}}.png`; - return new File([blob], fileName, { type: 'image/png' }); - }else{ - const fileName = `sd-perception-${{imgId}}.jpg`; - return new File([blob], fileName, { type: 'image/jpeg' }); - } - } - async function getOutputMusicFile(audioEL){ - const res = await fetch(audioEL.src); - const blob = await res.blob(); - const audioId = Date.now() % 200; - const fileName = `img-to-music-${{audioId}}.wav`; - const musicBlob = new File([blob], fileName, { type: 'audio/wav' }); - console.log(musicBlob); - return musicBlob; - } - - async function audioToBase64(audioFile) { - return new Promise((resolve, reject) => { - let reader = new FileReader(); - reader.readAsDataURL(audioFile); - reader.onload = () => resolve(reader.result); - reader.onerror = error => reject(error); - - }); - } - const gradioEl = document.querySelector('body > gradio-app'); - // const gradioEl = document.querySelector("gradio-app").shadowRoot; - const inputImgEl = gradioEl.querySelector('#input-img img'); - const outputMusic = gradioEl.querySelector('#music-output audio'); - const outputMusic_src = gradioEl.querySelector('#music-output audio').src; - const outputMusic_name = outputMusic_src.split('/').pop(); - let titleTxt = outputMusic_name; - //if(titleTxt.length > 100){ - // titleTxt = titleTxt.slice(0, 100) + ' ...'; - //} - const shareBtnEl = gradioEl.querySelector('#share-btn'); - const shareIconEl = gradioEl.querySelector('#share-btn-share-icon'); - const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon'); - if(!outputMusic){ - return; - }; - shareBtnEl.style.pointerEvents = 'none'; - shareIconEl.style.display = 'none'; - loadingIconEl.style.removeProperty('display'); - const inputFile = await getInputImgFile(inputImgEl); - const urlInputImg = await uploadFile(inputFile); - const musicFile = await getOutputMusicFile(outputMusic); - const dataOutputMusic = await uploadFile(musicFile); - - const descriptionMd = `#### Input img: - - -#### Music: - - -`; - const params = new URLSearchParams({ - title: titleTxt, - description: descriptionMd, - }); - const paramsStr = params.toString(); - window.open(`https://huggingface.co/spaces/fffiloni/img-to-music/discussions/new?${paramsStr}`, '_blank'); - shareBtnEl.style.removeProperty('pointer-events'); - shareIconEl.style.removeProperty('display'); - loadingIconEl.style.display = 'none'; -}""" \ No newline at end of file diff --git a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/data/transforms/augmentation.py b/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/data/transforms/augmentation.py deleted file mode 100644 index 63dd41aef658c9b51c7246880399405a029c5580..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/data/transforms/augmentation.py +++ /dev/null @@ -1,380 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. - -import inspect -import numpy as np -import pprint -from typing import Any, List, Optional, Tuple, Union -from fvcore.transforms.transform import Transform, TransformList - -""" -See "Data Augmentation" tutorial for an overview of the system: -https://detectron2.readthedocs.io/tutorials/augmentation.html -""" - - -__all__ = [ - "Augmentation", - "AugmentationList", - "AugInput", - "TransformGen", - "apply_transform_gens", - "StandardAugInput", - "apply_augmentations", -] - - -def _check_img_dtype(img): - assert isinstance(img, np.ndarray), "[Augmentation] Needs an numpy array, but got a {}!".format( - type(img) - ) - assert not isinstance(img.dtype, np.integer) or ( - img.dtype == np.uint8 - ), "[Augmentation] Got image of type {}, use uint8 or floating points instead!".format( - img.dtype - ) - assert img.ndim in [2, 3], img.ndim - - -def _get_aug_input_args(aug, aug_input) -> List[Any]: - """ - Get the arguments to be passed to ``aug.get_transform`` from the input ``aug_input``. - """ - if aug.input_args is None: - # Decide what attributes are needed automatically - prms = list(inspect.signature(aug.get_transform).parameters.items()) - # The default behavior is: if there is one parameter, then its "image" - # (work automatically for majority of use cases, and also avoid BC breaking), - # Otherwise, use the argument names. - if len(prms) == 1: - names = ("image",) - else: - names = [] - for name, prm in prms: - if prm.kind in ( - inspect.Parameter.VAR_POSITIONAL, - inspect.Parameter.VAR_KEYWORD, - ): - raise TypeError( - f""" \ -The default implementation of `{type(aug)}.__call__` does not allow \ -`{type(aug)}.get_transform` to use variable-length arguments (*args, **kwargs)! \ -If arguments are unknown, reimplement `__call__` instead. \ -""" - ) - names.append(name) - aug.input_args = tuple(names) - - args = [] - for f in aug.input_args: - try: - args.append(getattr(aug_input, f)) - except AttributeError as e: - raise AttributeError( - f"{type(aug)}.get_transform needs input attribute '{f}', " - f"but it is not an attribute of {type(aug_input)}!" - ) from e - return args - - -class Augmentation: - """ - Augmentation defines (often random) policies/strategies to generate :class:`Transform` - from data. It is often used for pre-processing of input data. - - A "policy" that generates a :class:`Transform` may, in the most general case, - need arbitrary information from input data in order to determine what transforms - to apply. Therefore, each :class:`Augmentation` instance defines the arguments - needed by its :meth:`get_transform` method. When called with the positional arguments, - the :meth:`get_transform` method executes the policy. - - Note that :class:`Augmentation` defines the policies to create a :class:`Transform`, - but not how to execute the actual transform operations to those data. - Its :meth:`__call__` method will use :meth:`AugInput.transform` to execute the transform. - - The returned `Transform` object is meant to describe deterministic transformation, which means - it can be re-applied on associated data, e.g. the geometry of an image and its segmentation - masks need to be transformed together. - (If such re-application is not needed, then determinism is not a crucial requirement.) - """ - - input_args: Optional[Tuple[str]] = None - """ - Stores the attribute names needed by :meth:`get_transform`, e.g. ``("image", "sem_seg")``. - By default, it is just a tuple of argument names in :meth:`self.get_transform`, which often only - contain "image". As long as the argument name convention is followed, there is no need for - users to touch this attribute. - """ - - def _init(self, params=None): - if params: - for k, v in params.items(): - if k != "self" and not k.startswith("_"): - setattr(self, k, v) - - def get_transform(self, *args) -> Transform: - """ - Execute the policy based on input data, and decide what transform to apply to inputs. - - Args: - args: Any fixed-length positional arguments. By default, the name of the arguments - should exist in the :class:`AugInput` to be used. - - Returns: - Transform: Returns the deterministic transform to apply to the input. - - Examples: - :: - class MyAug: - # if a policy needs to know both image and semantic segmentation - def get_transform(image, sem_seg) -> T.Transform: - pass - tfm: Transform = MyAug().get_transform(image, sem_seg) - new_image = tfm.apply_image(image) - - Notes: - Users can freely use arbitrary new argument names in custom - :meth:`get_transform` method, as long as they are available in the - input data. In detectron2 we use the following convention: - - * image: (H,W) or (H,W,C) ndarray of type uint8 in range [0, 255], or - floating point in range [0, 1] or [0, 255]. - * boxes: (N,4) ndarray of float32. It represents the instance bounding boxes - of N instances. Each is in XYXY format in unit of absolute coordinates. - * sem_seg: (H,W) ndarray of type uint8. Each element is an integer label of pixel. - - We do not specify convention for other types and do not include builtin - :class:`Augmentation` that uses other types in detectron2. - """ - raise NotImplementedError - - def __call__(self, aug_input) -> Transform: - """ - Augment the given `aug_input` **in-place**, and return the transform that's used. - - This method will be called to apply the augmentation. In most augmentation, it - is enough to use the default implementation, which calls :meth:`get_transform` - using the inputs. But a subclass can overwrite it to have more complicated logic. - - Args: - aug_input (AugInput): an object that has attributes needed by this augmentation - (defined by ``self.get_transform``). Its ``transform`` method will be called - to in-place transform it. - - Returns: - Transform: the transform that is applied on the input. - """ - args = _get_aug_input_args(self, aug_input) - tfm = self.get_transform(*args) - assert isinstance(tfm, (Transform, TransformList)), ( - f"{type(self)}.get_transform must return an instance of Transform! " - f"Got {type(tfm)} instead." - ) - aug_input.transform(tfm) - return tfm - - def _rand_range(self, low=1.0, high=None, size=None): - """ - Uniform float random number between low and high. - """ - if high is None: - low, high = 0, low - if size is None: - size = [] - return np.random.uniform(low, high, size) - - def __repr__(self): - """ - Produce something like: - "MyAugmentation(field1={self.field1}, field2={self.field2})" - """ - try: - sig = inspect.signature(self.__init__) - classname = type(self).__name__ - argstr = [] - for name, param in sig.parameters.items(): - assert ( - param.kind != param.VAR_POSITIONAL and param.kind != param.VAR_KEYWORD - ), "The default __repr__ doesn't support *args or **kwargs" - assert hasattr(self, name), ( - "Attribute {} not found! " - "Default __repr__ only works if attributes match the constructor.".format(name) - ) - attr = getattr(self, name) - default = param.default - if default is attr: - continue - attr_str = pprint.pformat(attr) - if "\n" in attr_str: - # don't show it if pformat decides to use >1 lines - attr_str = "..." - argstr.append("{}={}".format(name, attr_str)) - return "{}({})".format(classname, ", ".join(argstr)) - except AssertionError: - return super().__repr__() - - __str__ = __repr__ - - -class _TransformToAug(Augmentation): - def __init__(self, tfm: Transform): - self.tfm = tfm - - def get_transform(self, *args): - return self.tfm - - def __repr__(self): - return repr(self.tfm) - - __str__ = __repr__ - - -def _transform_to_aug(tfm_or_aug): - """ - Wrap Transform into Augmentation. - Private, used internally to implement augmentations. - """ - assert isinstance(tfm_or_aug, (Transform, Augmentation)), tfm_or_aug - if isinstance(tfm_or_aug, Augmentation): - return tfm_or_aug - else: - return _TransformToAug(tfm_or_aug) - - -class AugmentationList(Augmentation): - """ - Apply a sequence of augmentations. - - It has ``__call__`` method to apply the augmentations. - - Note that :meth:`get_transform` method is impossible (will throw error if called) - for :class:`AugmentationList`, because in order to apply a sequence of augmentations, - the kth augmentation must be applied first, to provide inputs needed by the (k+1)th - augmentation. - """ - - def __init__(self, augs): - """ - Args: - augs (list[Augmentation or Transform]): - """ - super().__init__() - self.augs = [_transform_to_aug(x) for x in augs] - - def __call__(self, aug_input) -> TransformList: - tfms = [] - for x in self.augs: - tfm = x(aug_input) - tfms.append(tfm) - return TransformList(tfms) - - def __repr__(self): - msgs = [str(x) for x in self.augs] - return "AugmentationList[{}]".format(", ".join(msgs)) - - __str__ = __repr__ - - -class AugInput: - """ - Input that can be used with :meth:`Augmentation.__call__`. - This is a standard implementation for the majority of use cases. - This class provides the standard attributes **"image", "boxes", "sem_seg"** - defined in :meth:`__init__` and they may be needed by different augmentations. - Most augmentation policies do not need attributes beyond these three. - - After applying augmentations to these attributes (using :meth:`AugInput.transform`), - the returned transforms can then be used to transform other data structures that users have. - - Examples: - :: - input = AugInput(image, boxes=boxes) - tfms = augmentation(input) - transformed_image = input.image - transformed_boxes = input.boxes - transformed_other_data = tfms.apply_other(other_data) - - An extended project that works with new data types may implement augmentation policies - that need other inputs. An algorithm may need to transform inputs in a way different - from the standard approach defined in this class. In those rare situations, users can - implement a class similar to this class, that satify the following condition: - - * The input must provide access to these data in the form of attribute access - (``getattr``). For example, if an :class:`Augmentation` to be applied needs "image" - and "sem_seg" arguments, its input must have the attribute "image" and "sem_seg". - * The input must have a ``transform(tfm: Transform) -> None`` method which - in-place transforms all its attributes. - """ - - # TODO maybe should support more builtin data types here - def __init__( - self, - image: np.ndarray, - *, - boxes: Optional[np.ndarray] = None, - sem_seg: Optional[np.ndarray] = None, - ): - """ - Args: - image (ndarray): (H,W) or (H,W,C) ndarray of type uint8 in range [0, 255], or - floating point in range [0, 1] or [0, 255]. The meaning of C is up - to users. - boxes (ndarray or None): Nx4 float32 boxes in XYXY_ABS mode - sem_seg (ndarray or None): HxW uint8 semantic segmentation mask. Each element - is an integer label of pixel. - """ - _check_img_dtype(image) - self.image = image - self.boxes = boxes - self.sem_seg = sem_seg - - def transform(self, tfm: Transform) -> None: - """ - In-place transform all attributes of this class. - - By "in-place", it means after calling this method, accessing an attribute such - as ``self.image`` will return transformed data. - """ - self.image = tfm.apply_image(self.image) - if self.boxes is not None: - self.boxes = tfm.apply_box(self.boxes) - if self.sem_seg is not None: - self.sem_seg = tfm.apply_segmentation(self.sem_seg) - - def apply_augmentations( - self, augmentations: List[Union[Augmentation, Transform]] - ) -> TransformList: - """ - Equivalent of ``AugmentationList(augmentations)(self)`` - """ - return AugmentationList(augmentations)(self) - - -def apply_augmentations(augmentations: List[Union[Transform, Augmentation]], inputs): - """ - Use ``T.AugmentationList(augmentations)(inputs)`` instead. - """ - if isinstance(inputs, np.ndarray): - # handle the common case of image-only Augmentation, also for backward compatibility - image_only = True - inputs = AugInput(inputs) - else: - image_only = False - tfms = inputs.apply_augmentations(augmentations) - return inputs.image if image_only else inputs, tfms - - -apply_transform_gens = apply_augmentations -""" -Alias for backward-compatibility. -""" - -TransformGen = Augmentation -""" -Alias for Augmentation, since it is something that generates :class:`Transform`s -""" - -StandardAugInput = AugInput -""" -Alias for compatibility. It's not worth the complexity to have two classes. -""" diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/utils/virtualenv.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/utils/virtualenv.py deleted file mode 100644 index 882e36f5c1de19a8200000c216cf80119b37c96d..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/utils/virtualenv.py +++ /dev/null @@ -1,104 +0,0 @@ -import logging -import os -import re -import site -import sys -from typing import List, Optional - -logger = logging.getLogger(__name__) -_INCLUDE_SYSTEM_SITE_PACKAGES_REGEX = re.compile( - r"include-system-site-packages\s*=\s*(?Ptrue|false)" -) - - -def _running_under_venv() -> bool: - """Checks if sys.base_prefix and sys.prefix match. - - This handles PEP 405 compliant virtual environments. - """ - return sys.prefix != getattr(sys, "base_prefix", sys.prefix) - - -def _running_under_legacy_virtualenv() -> bool: - """Checks if sys.real_prefix is set. - - This handles virtual environments created with pypa's virtualenv. - """ - # pypa/virtualenv case - return hasattr(sys, "real_prefix") - - -def running_under_virtualenv() -> bool: - """True if we're running inside a virtual environment, False otherwise.""" - return _running_under_venv() or _running_under_legacy_virtualenv() - - -def _get_pyvenv_cfg_lines() -> Optional[List[str]]: - """Reads {sys.prefix}/pyvenv.cfg and returns its contents as list of lines - - Returns None, if it could not read/access the file. - """ - pyvenv_cfg_file = os.path.join(sys.prefix, "pyvenv.cfg") - try: - # Although PEP 405 does not specify, the built-in venv module always - # writes with UTF-8. (pypa/pip#8717) - with open(pyvenv_cfg_file, encoding="utf-8") as f: - return f.read().splitlines() # avoids trailing newlines - except OSError: - return None - - -def _no_global_under_venv() -> bool: - """Check `{sys.prefix}/pyvenv.cfg` for system site-packages inclusion - - PEP 405 specifies that when system site-packages are not supposed to be - visible from a virtual environment, `pyvenv.cfg` must contain the following - line: - - include-system-site-packages = false - - Additionally, log a warning if accessing the file fails. - """ - cfg_lines = _get_pyvenv_cfg_lines() - if cfg_lines is None: - # We're not in a "sane" venv, so assume there is no system - # site-packages access (since that's PEP 405's default state). - logger.warning( - "Could not access 'pyvenv.cfg' despite a virtual environment " - "being active. Assuming global site-packages is not accessible " - "in this environment." - ) - return True - - for line in cfg_lines: - match = _INCLUDE_SYSTEM_SITE_PACKAGES_REGEX.match(line) - if match is not None and match.group("value") == "false": - return True - return False - - -def _no_global_under_legacy_virtualenv() -> bool: - """Check if "no-global-site-packages.txt" exists beside site.py - - This mirrors logic in pypa/virtualenv for determining whether system - site-packages are visible in the virtual environment. - """ - site_mod_dir = os.path.dirname(os.path.abspath(site.__file__)) - no_global_site_packages_file = os.path.join( - site_mod_dir, - "no-global-site-packages.txt", - ) - return os.path.exists(no_global_site_packages_file) - - -def virtualenv_no_global() -> bool: - """Returns a boolean, whether running in venv with no system site-packages.""" - # PEP 405 compliance needs to be checked first since virtualenv >=20 would - # return True for both checks, but is only able to use the PEP 405 config. - if _running_under_venv(): - return _no_global_under_venv() - - if _running_under_legacy_virtualenv(): - return _no_global_under_legacy_virtualenv() - - return False diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pyparsing/helpers.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pyparsing/helpers.py deleted file mode 100644 index 018f0d6ac863f2e4a27636c721669061887ae554..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pyparsing/helpers.py +++ /dev/null @@ -1,1100 +0,0 @@ -# helpers.py -import html.entities -import re -import sys -import typing - -from . import __diag__ -from .core import * -from .util import ( - _bslash, - _flatten, - _escape_regex_range_chars, - replaced_by_pep8, -) - - -# -# global helpers -# -def counted_array( - expr: ParserElement, - int_expr: typing.Optional[ParserElement] = None, - *, - intExpr: typing.Optional[ParserElement] = None, -) -> ParserElement: - """Helper to define a counted list of expressions. - - This helper defines a pattern of the form:: - - integer expr expr expr... - - where the leading integer tells how many expr expressions follow. - The matched tokens returns the array of expr tokens as a list - the - leading count token is suppressed. - - If ``int_expr`` is specified, it should be a pyparsing expression - that produces an integer value. - - Example:: - - counted_array(Word(alphas)).parse_string('2 ab cd ef') # -> ['ab', 'cd'] - - # in this parser, the leading integer value is given in binary, - # '10' indicating that 2 values are in the array - binary_constant = Word('01').set_parse_action(lambda t: int(t[0], 2)) - counted_array(Word(alphas), int_expr=binary_constant).parse_string('10 ab cd ef') # -> ['ab', 'cd'] - - # if other fields must be parsed after the count but before the - # list items, give the fields results names and they will - # be preserved in the returned ParseResults: - count_with_metadata = integer + Word(alphas)("type") - typed_array = counted_array(Word(alphanums), int_expr=count_with_metadata)("items") - result = typed_array.parse_string("3 bool True True False") - print(result.dump()) - - # prints - # ['True', 'True', 'False'] - # - items: ['True', 'True', 'False'] - # - type: 'bool' - """ - intExpr = intExpr or int_expr - array_expr = Forward() - - def count_field_parse_action(s, l, t): - nonlocal array_expr - n = t[0] - array_expr <<= (expr * n) if n else Empty() - # clear list contents, but keep any named results - del t[:] - - if intExpr is None: - intExpr = Word(nums).set_parse_action(lambda t: int(t[0])) - else: - intExpr = intExpr.copy() - intExpr.set_name("arrayLen") - intExpr.add_parse_action(count_field_parse_action, call_during_try=True) - return (intExpr + array_expr).set_name("(len) " + str(expr) + "...") - - -def match_previous_literal(expr: ParserElement) -> ParserElement: - """Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks for - a 'repeat' of a previous expression. For example:: - - first = Word(nums) - second = match_previous_literal(first) - match_expr = first + ":" + second - - will match ``"1:1"``, but not ``"1:2"``. Because this - matches a previous literal, will also match the leading - ``"1:1"`` in ``"1:10"``. If this is not desired, use - :class:`match_previous_expr`. Do *not* use with packrat parsing - enabled. - """ - rep = Forward() - - def copy_token_to_repeater(s, l, t): - if t: - if len(t) == 1: - rep << t[0] - else: - # flatten t tokens - tflat = _flatten(t.as_list()) - rep << And(Literal(tt) for tt in tflat) - else: - rep << Empty() - - expr.add_parse_action(copy_token_to_repeater, callDuringTry=True) - rep.set_name("(prev) " + str(expr)) - return rep - - -def match_previous_expr(expr: ParserElement) -> ParserElement: - """Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks for - a 'repeat' of a previous expression. For example:: - - first = Word(nums) - second = match_previous_expr(first) - match_expr = first + ":" + second - - will match ``"1:1"``, but not ``"1:2"``. Because this - matches by expressions, will *not* match the leading ``"1:1"`` - in ``"1:10"``; the expressions are evaluated first, and then - compared, so ``"1"`` is compared with ``"10"``. Do *not* use - with packrat parsing enabled. - """ - rep = Forward() - e2 = expr.copy() - rep <<= e2 - - def copy_token_to_repeater(s, l, t): - matchTokens = _flatten(t.as_list()) - - def must_match_these_tokens(s, l, t): - theseTokens = _flatten(t.as_list()) - if theseTokens != matchTokens: - raise ParseException( - s, l, f"Expected {matchTokens}, found{theseTokens}" - ) - - rep.set_parse_action(must_match_these_tokens, callDuringTry=True) - - expr.add_parse_action(copy_token_to_repeater, callDuringTry=True) - rep.set_name("(prev) " + str(expr)) - return rep - - -def one_of( - strs: Union[typing.Iterable[str], str], - caseless: bool = False, - use_regex: bool = True, - as_keyword: bool = False, - *, - useRegex: bool = True, - asKeyword: bool = False, -) -> ParserElement: - """Helper to quickly define a set of alternative :class:`Literal` s, - and makes sure to do longest-first testing when there is a conflict, - regardless of the input order, but returns - a :class:`MatchFirst` for best performance. - - Parameters: - - - ``strs`` - a string of space-delimited literals, or a collection of - string literals - - ``caseless`` - treat all literals as caseless - (default= ``False``) - - ``use_regex`` - as an optimization, will - generate a :class:`Regex` object; otherwise, will generate - a :class:`MatchFirst` object (if ``caseless=True`` or ``as_keyword=True``, or if - creating a :class:`Regex` raises an exception) - (default= ``True``) - - ``as_keyword`` - enforce :class:`Keyword`-style matching on the - generated expressions - (default= ``False``) - - ``asKeyword`` and ``useRegex`` are retained for pre-PEP8 compatibility, - but will be removed in a future release - - Example:: - - comp_oper = one_of("< = > <= >= !=") - var = Word(alphas) - number = Word(nums) - term = var | number - comparison_expr = term + comp_oper + term - print(comparison_expr.search_string("B = 12 AA=23 B<=AA AA>12")) - - prints:: - - [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] - """ - asKeyword = asKeyword or as_keyword - useRegex = useRegex and use_regex - - if ( - isinstance(caseless, str_type) - and __diag__.warn_on_multiple_string_args_to_oneof - ): - warnings.warn( - "More than one string argument passed to one_of, pass" - " choices as a list or space-delimited string", - stacklevel=2, - ) - - if caseless: - isequal = lambda a, b: a.upper() == b.upper() - masks = lambda a, b: b.upper().startswith(a.upper()) - parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral - else: - isequal = lambda a, b: a == b - masks = lambda a, b: b.startswith(a) - parseElementClass = Keyword if asKeyword else Literal - - symbols: List[str] = [] - if isinstance(strs, str_type): - strs = typing.cast(str, strs) - symbols = strs.split() - elif isinstance(strs, Iterable): - symbols = list(strs) - else: - raise TypeError("Invalid argument to one_of, expected string or iterable") - if not symbols: - return NoMatch() - - # reorder given symbols to take care to avoid masking longer choices with shorter ones - # (but only if the given symbols are not just single characters) - if any(len(sym) > 1 for sym in symbols): - i = 0 - while i < len(symbols) - 1: - cur = symbols[i] - for j, other in enumerate(symbols[i + 1 :]): - if isequal(other, cur): - del symbols[i + j + 1] - break - elif masks(cur, other): - del symbols[i + j + 1] - symbols.insert(i, other) - break - else: - i += 1 - - if useRegex: - re_flags: int = re.IGNORECASE if caseless else 0 - - try: - if all(len(sym) == 1 for sym in symbols): - # symbols are just single characters, create range regex pattern - patt = f"[{''.join(_escape_regex_range_chars(sym) for sym in symbols)}]" - else: - patt = "|".join(re.escape(sym) for sym in symbols) - - # wrap with \b word break markers if defining as keywords - if asKeyword: - patt = rf"\b(?:{patt})\b" - - ret = Regex(patt, flags=re_flags).set_name(" | ".join(symbols)) - - if caseless: - # add parse action to return symbols as specified, not in random - # casing as found in input string - symbol_map = {sym.lower(): sym for sym in symbols} - ret.add_parse_action(lambda s, l, t: symbol_map[t[0].lower()]) - - return ret - - except re.error: - warnings.warn( - "Exception creating Regex for one_of, building MatchFirst", stacklevel=2 - ) - - # last resort, just use MatchFirst - return MatchFirst(parseElementClass(sym) for sym in symbols).set_name( - " | ".join(symbols) - ) - - -def dict_of(key: ParserElement, value: ParserElement) -> ParserElement: - """Helper to easily and clearly define a dictionary by specifying - the respective patterns for the key and value. Takes care of - defining the :class:`Dict`, :class:`ZeroOrMore`, and - :class:`Group` tokens in the proper order. The key pattern - can include delimiting markers or punctuation, as long as they are - suppressed, thereby leaving the significant key text. The value - pattern can include named results, so that the :class:`Dict` results - can include named token fields. - - Example:: - - text = "shape: SQUARE posn: upper left color: light blue texture: burlap" - attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) - print(attr_expr[1, ...].parse_string(text).dump()) - - attr_label = label - attr_value = Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join) - - # similar to Dict, but simpler call format - result = dict_of(attr_label, attr_value).parse_string(text) - print(result.dump()) - print(result['shape']) - print(result.shape) # object attribute access works too - print(result.as_dict()) - - prints:: - - [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - - color: 'light blue' - - posn: 'upper left' - - shape: 'SQUARE' - - texture: 'burlap' - SQUARE - SQUARE - {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} - """ - return Dict(OneOrMore(Group(key + value))) - - -def original_text_for( - expr: ParserElement, as_string: bool = True, *, asString: bool = True -) -> ParserElement: - """Helper to return the original, untokenized text for a given - expression. Useful to restore the parsed fields of an HTML start - tag into the raw tag text itself, or to revert separate tokens with - intervening whitespace back to the original matching input text. By - default, returns a string containing the original parsed text. - - If the optional ``as_string`` argument is passed as - ``False``, then the return value is - a :class:`ParseResults` containing any results names that - were originally matched, and a single token containing the original - matched text from the input string. So if the expression passed to - :class:`original_text_for` contains expressions with defined - results names, you must set ``as_string`` to ``False`` if you - want to preserve those results name values. - - The ``asString`` pre-PEP8 argument is retained for compatibility, - but will be removed in a future release. - - Example:: - - src = "this is test bold text normal text " - for tag in ("b", "i"): - opener, closer = make_html_tags(tag) - patt = original_text_for(opener + ... + closer) - print(patt.search_string(src)[0]) - - prints:: - - [' bold text '] - ['text'] - """ - asString = asString and as_string - - locMarker = Empty().set_parse_action(lambda s, loc, t: loc) - endlocMarker = locMarker.copy() - endlocMarker.callPreparse = False - matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end") - if asString: - extractText = lambda s, l, t: s[t._original_start : t._original_end] - else: - - def extractText(s, l, t): - t[:] = [s[t.pop("_original_start") : t.pop("_original_end")]] - - matchExpr.set_parse_action(extractText) - matchExpr.ignoreExprs = expr.ignoreExprs - matchExpr.suppress_warning(Diagnostics.warn_ungrouped_named_tokens_in_collection) - return matchExpr - - -def ungroup(expr: ParserElement) -> ParserElement: - """Helper to undo pyparsing's default grouping of And expressions, - even if all but one are non-empty. - """ - return TokenConverter(expr).add_parse_action(lambda t: t[0]) - - -def locatedExpr(expr: ParserElement) -> ParserElement: - """ - (DEPRECATED - future code should use the :class:`Located` class) - Helper to decorate a returned token with its starting and ending - locations in the input string. - - This helper adds the following results names: - - - ``locn_start`` - location where matched expression begins - - ``locn_end`` - location where matched expression ends - - ``value`` - the actual parsed results - - Be careful if the input text contains ```` characters, you - may want to call :class:`ParserElement.parse_with_tabs` - - Example:: - - wd = Word(alphas) - for match in locatedExpr(wd).search_string("ljsdf123lksdjjf123lkkjj1222"): - print(match) - - prints:: - - [[0, 'ljsdf', 5]] - [[8, 'lksdjjf', 15]] - [[18, 'lkkjj', 23]] - """ - locator = Empty().set_parse_action(lambda ss, ll, tt: ll) - return Group( - locator("locn_start") - + expr("value") - + locator.copy().leaveWhitespace()("locn_end") - ) - - -def nested_expr( - opener: Union[str, ParserElement] = "(", - closer: Union[str, ParserElement] = ")", - content: typing.Optional[ParserElement] = None, - ignore_expr: ParserElement = quoted_string(), - *, - ignoreExpr: ParserElement = quoted_string(), -) -> ParserElement: - """Helper method for defining nested lists enclosed in opening and - closing delimiters (``"("`` and ``")"`` are the default). - - Parameters: - - - ``opener`` - opening character for a nested list - (default= ``"("``); can also be a pyparsing expression - - ``closer`` - closing character for a nested list - (default= ``")"``); can also be a pyparsing expression - - ``content`` - expression for items within the nested lists - (default= ``None``) - - ``ignore_expr`` - expression for ignoring opening and closing delimiters - (default= :class:`quoted_string`) - - ``ignoreExpr`` - this pre-PEP8 argument is retained for compatibility - but will be removed in a future release - - If an expression is not provided for the content argument, the - nested expression will capture all whitespace-delimited content - between delimiters as a list of separate values. - - Use the ``ignore_expr`` argument to define expressions that may - contain opening or closing characters that should not be treated as - opening or closing characters for nesting, such as quoted_string or - a comment expression. Specify multiple expressions using an - :class:`Or` or :class:`MatchFirst`. The default is - :class:`quoted_string`, but if no expressions are to be ignored, then - pass ``None`` for this argument. - - Example:: - - data_type = one_of("void int short long char float double") - decl_data_type = Combine(data_type + Opt(Word('*'))) - ident = Word(alphas+'_', alphanums+'_') - number = pyparsing_common.number - arg = Group(decl_data_type + ident) - LPAR, RPAR = map(Suppress, "()") - - code_body = nested_expr('{', '}', ignore_expr=(quoted_string | c_style_comment)) - - c_function = (decl_data_type("type") - + ident("name") - + LPAR + Opt(DelimitedList(arg), [])("args") + RPAR - + code_body("body")) - c_function.ignore(c_style_comment) - - source_code = ''' - int is_odd(int x) { - return (x%2); - } - - int dec_to_hex(char hchar) { - if (hchar >= '0' && hchar <= '9') { - return (ord(hchar)-ord('0')); - } else { - return (10+ord(hchar)-ord('A')); - } - } - ''' - for func in c_function.search_string(source_code): - print("%(name)s (%(type)s) args: %(args)s" % func) - - - prints:: - - is_odd (int) args: [['int', 'x']] - dec_to_hex (int) args: [['char', 'hchar']] - """ - if ignoreExpr != ignore_expr: - ignoreExpr = ignore_expr if ignoreExpr == quoted_string() else ignoreExpr - if opener == closer: - raise ValueError("opening and closing strings cannot be the same") - if content is None: - if isinstance(opener, str_type) and isinstance(closer, str_type): - opener = typing.cast(str, opener) - closer = typing.cast(str, closer) - if len(opener) == 1 and len(closer) == 1: - if ignoreExpr is not None: - content = Combine( - OneOrMore( - ~ignoreExpr - + CharsNotIn( - opener + closer + ParserElement.DEFAULT_WHITE_CHARS, - exact=1, - ) - ) - ).set_parse_action(lambda t: t[0].strip()) - else: - content = empty.copy() + CharsNotIn( - opener + closer + ParserElement.DEFAULT_WHITE_CHARS - ).set_parse_action(lambda t: t[0].strip()) - else: - if ignoreExpr is not None: - content = Combine( - OneOrMore( - ~ignoreExpr - + ~Literal(opener) - + ~Literal(closer) - + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1) - ) - ).set_parse_action(lambda t: t[0].strip()) - else: - content = Combine( - OneOrMore( - ~Literal(opener) - + ~Literal(closer) - + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1) - ) - ).set_parse_action(lambda t: t[0].strip()) - else: - raise ValueError( - "opening and closing arguments must be strings if no content expression is given" - ) - ret = Forward() - if ignoreExpr is not None: - ret <<= Group( - Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer) - ) - else: - ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer)) - ret.set_name("nested %s%s expression" % (opener, closer)) - return ret - - -def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")): - """Internal helper to construct opening and closing tag expressions, given a tag name""" - if isinstance(tagStr, str_type): - resname = tagStr - tagStr = Keyword(tagStr, caseless=not xml) - else: - resname = tagStr.name - - tagAttrName = Word(alphas, alphanums + "_-:") - if xml: - tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes) - openTag = ( - suppress_LT - + tagStr("tag") - + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue))) - + Opt("/", default=[False])("empty").set_parse_action( - lambda s, l, t: t[0] == "/" - ) - + suppress_GT - ) - else: - tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word( - printables, exclude_chars=">" - ) - openTag = ( - suppress_LT - + tagStr("tag") - + Dict( - ZeroOrMore( - Group( - tagAttrName.set_parse_action(lambda t: t[0].lower()) - + Opt(Suppress("=") + tagAttrValue) - ) - ) - ) - + Opt("/", default=[False])("empty").set_parse_action( - lambda s, l, t: t[0] == "/" - ) - + suppress_GT - ) - closeTag = Combine(Literal("", adjacent=False) - - openTag.set_name("<%s>" % resname) - # add start results name in parse action now that ungrouped names are not reported at two levels - openTag.add_parse_action( - lambda t: t.__setitem__( - "start" + "".join(resname.replace(":", " ").title().split()), t.copy() - ) - ) - closeTag = closeTag( - "end" + "".join(resname.replace(":", " ").title().split()) - ).set_name("" % resname) - openTag.tag = resname - closeTag.tag = resname - openTag.tag_body = SkipTo(closeTag()) - return openTag, closeTag - - -def make_html_tags( - tag_str: Union[str, ParserElement] -) -> Tuple[ParserElement, ParserElement]: - """Helper to construct opening and closing tag expressions for HTML, - given a tag name. Matches tags in either upper or lower case, - attributes with namespaces and with quoted or unquoted values. - - Example:: - - text = 'More info at the pyparsing wiki page' - # make_html_tags returns pyparsing expressions for the opening and - # closing tags as a 2-tuple - a, a_end = make_html_tags("A") - link_expr = a + SkipTo(a_end)("link_text") + a_end - - for link in link_expr.search_string(text): - # attributes in the tag (like "href" shown here) are - # also accessible as named results - print(link.link_text, '->', link.href) - - prints:: - - pyparsing -> https://github.com/pyparsing/pyparsing/wiki - """ - return _makeTags(tag_str, False) - - -def make_xml_tags( - tag_str: Union[str, ParserElement] -) -> Tuple[ParserElement, ParserElement]: - """Helper to construct opening and closing tag expressions for XML, - given a tag name. Matches tags only in the given upper/lower case. - - Example: similar to :class:`make_html_tags` - """ - return _makeTags(tag_str, True) - - -any_open_tag: ParserElement -any_close_tag: ParserElement -any_open_tag, any_close_tag = make_html_tags( - Word(alphas, alphanums + "_:").set_name("any tag") -) - -_htmlEntityMap = {k.rstrip(";"): v for k, v in html.entities.html5.items()} -common_html_entity = Regex("&(?P" + "|".join(_htmlEntityMap) + ");").set_name( - "common HTML entity" -) - - -def replace_html_entity(s, l, t): - """Helper parser action to replace common HTML entities with their special characters""" - return _htmlEntityMap.get(t.entity) - - -class OpAssoc(Enum): - """Enumeration of operator associativity - - used in constructing InfixNotationOperatorSpec for :class:`infix_notation`""" - - LEFT = 1 - RIGHT = 2 - - -InfixNotationOperatorArgType = Union[ - ParserElement, str, Tuple[Union[ParserElement, str], Union[ParserElement, str]] -] -InfixNotationOperatorSpec = Union[ - Tuple[ - InfixNotationOperatorArgType, - int, - OpAssoc, - typing.Optional[ParseAction], - ], - Tuple[ - InfixNotationOperatorArgType, - int, - OpAssoc, - ], -] - - -def infix_notation( - base_expr: ParserElement, - op_list: List[InfixNotationOperatorSpec], - lpar: Union[str, ParserElement] = Suppress("("), - rpar: Union[str, ParserElement] = Suppress(")"), -) -> ParserElement: - """Helper method for constructing grammars of expressions made up of - operators working in a precedence hierarchy. Operators may be unary - or binary, left- or right-associative. Parse actions can also be - attached to operator expressions. The generated parser will also - recognize the use of parentheses to override operator precedences - (see example below). - - Note: if you define a deep operator list, you may see performance - issues when using infix_notation. See - :class:`ParserElement.enable_packrat` for a mechanism to potentially - improve your parser performance. - - Parameters: - - - ``base_expr`` - expression representing the most basic operand to - be used in the expression - - ``op_list`` - list of tuples, one for each operator precedence level - in the expression grammar; each tuple is of the form ``(op_expr, - num_operands, right_left_assoc, (optional)parse_action)``, where: - - - ``op_expr`` is the pyparsing expression for the operator; may also - be a string, which will be converted to a Literal; if ``num_operands`` - is 3, ``op_expr`` is a tuple of two expressions, for the two - operators separating the 3 terms - - ``num_operands`` is the number of terms for this operator (must be 1, - 2, or 3) - - ``right_left_assoc`` is the indicator whether the operator is right - or left associative, using the pyparsing-defined constants - ``OpAssoc.RIGHT`` and ``OpAssoc.LEFT``. - - ``parse_action`` is the parse action to be associated with - expressions matching this operator expression (the parse action - tuple member may be omitted); if the parse action is passed - a tuple or list of functions, this is equivalent to calling - ``set_parse_action(*fn)`` - (:class:`ParserElement.set_parse_action`) - - ``lpar`` - expression for matching left-parentheses; if passed as a - str, then will be parsed as ``Suppress(lpar)``. If lpar is passed as - an expression (such as ``Literal('(')``), then it will be kept in - the parsed results, and grouped with them. (default= ``Suppress('(')``) - - ``rpar`` - expression for matching right-parentheses; if passed as a - str, then will be parsed as ``Suppress(rpar)``. If rpar is passed as - an expression (such as ``Literal(')')``), then it will be kept in - the parsed results, and grouped with them. (default= ``Suppress(')')``) - - Example:: - - # simple example of four-function arithmetic with ints and - # variable names - integer = pyparsing_common.signed_integer - varname = pyparsing_common.identifier - - arith_expr = infix_notation(integer | varname, - [ - ('-', 1, OpAssoc.RIGHT), - (one_of('* /'), 2, OpAssoc.LEFT), - (one_of('+ -'), 2, OpAssoc.LEFT), - ]) - - arith_expr.run_tests(''' - 5+3*6 - (5+3)*6 - -2--11 - ''', full_dump=False) - - prints:: - - 5+3*6 - [[5, '+', [3, '*', 6]]] - - (5+3)*6 - [[[5, '+', 3], '*', 6]] - - (5+x)*y - [[[5, '+', 'x'], '*', 'y']] - - -2--11 - [[['-', 2], '-', ['-', 11]]] - """ - - # captive version of FollowedBy that does not do parse actions or capture results names - class _FB(FollowedBy): - def parseImpl(self, instring, loc, doActions=True): - self.expr.try_parse(instring, loc) - return loc, [] - - _FB.__name__ = "FollowedBy>" - - ret = Forward() - if isinstance(lpar, str): - lpar = Suppress(lpar) - if isinstance(rpar, str): - rpar = Suppress(rpar) - - # if lpar and rpar are not suppressed, wrap in group - if not (isinstance(rpar, Suppress) and isinstance(rpar, Suppress)): - lastExpr = base_expr | Group(lpar + ret + rpar) - else: - lastExpr = base_expr | (lpar + ret + rpar) - - arity: int - rightLeftAssoc: opAssoc - pa: typing.Optional[ParseAction] - opExpr1: ParserElement - opExpr2: ParserElement - for i, operDef in enumerate(op_list): - opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4] # type: ignore[assignment] - if isinstance(opExpr, str_type): - opExpr = ParserElement._literalStringClass(opExpr) - opExpr = typing.cast(ParserElement, opExpr) - if arity == 3: - if not isinstance(opExpr, (tuple, list)) or len(opExpr) != 2: - raise ValueError( - "if numterms=3, opExpr must be a tuple or list of two expressions" - ) - opExpr1, opExpr2 = opExpr - term_name = f"{opExpr1}{opExpr2} term" - else: - term_name = f"{opExpr} term" - - if not 1 <= arity <= 3: - raise ValueError("operator must be unary (1), binary (2), or ternary (3)") - - if rightLeftAssoc not in (OpAssoc.LEFT, OpAssoc.RIGHT): - raise ValueError("operator must indicate right or left associativity") - - thisExpr: ParserElement = Forward().set_name(term_name) - thisExpr = typing.cast(Forward, thisExpr) - if rightLeftAssoc is OpAssoc.LEFT: - if arity == 1: - matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + opExpr[1, ...]) - elif arity == 2: - if opExpr is not None: - matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group( - lastExpr + (opExpr + lastExpr)[1, ...] - ) - else: - matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr[2, ...]) - elif arity == 3: - matchExpr = _FB( - lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr - ) + Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr)) - elif rightLeftAssoc is OpAssoc.RIGHT: - if arity == 1: - # try to avoid LR with this extra test - if not isinstance(opExpr, Opt): - opExpr = Opt(opExpr) - matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr) - elif arity == 2: - if opExpr is not None: - matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group( - lastExpr + (opExpr + thisExpr)[1, ...] - ) - else: - matchExpr = _FB(lastExpr + thisExpr) + Group( - lastExpr + thisExpr[1, ...] - ) - elif arity == 3: - matchExpr = _FB( - lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr - ) + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) - if pa: - if isinstance(pa, (tuple, list)): - matchExpr.set_parse_action(*pa) - else: - matchExpr.set_parse_action(pa) - thisExpr <<= (matchExpr | lastExpr).setName(term_name) - lastExpr = thisExpr - ret <<= lastExpr - return ret - - -def indentedBlock(blockStatementExpr, indentStack, indent=True, backup_stacks=[]): - """ - (DEPRECATED - use :class:`IndentedBlock` class instead) - Helper method for defining space-delimited indentation blocks, - such as those used to define block statements in Python source code. - - Parameters: - - - ``blockStatementExpr`` - expression defining syntax of statement that - is repeated within the indented block - - ``indentStack`` - list created by caller to manage indentation stack - (multiple ``statementWithIndentedBlock`` expressions within a single - grammar should share a common ``indentStack``) - - ``indent`` - boolean indicating whether block must be indented beyond - the current level; set to ``False`` for block of left-most statements - (default= ``True``) - - A valid block must contain at least one ``blockStatement``. - - (Note that indentedBlock uses internal parse actions which make it - incompatible with packrat parsing.) - - Example:: - - data = ''' - def A(z): - A1 - B = 100 - G = A2 - A2 - A3 - B - def BB(a,b,c): - BB1 - def BBA(): - bba1 - bba2 - bba3 - C - D - def spam(x,y): - def eggs(z): - pass - ''' - - - indentStack = [1] - stmt = Forward() - - identifier = Word(alphas, alphanums) - funcDecl = ("def" + identifier + Group("(" + Opt(delimitedList(identifier)) + ")") + ":") - func_body = indentedBlock(stmt, indentStack) - funcDef = Group(funcDecl + func_body) - - rvalue = Forward() - funcCall = Group(identifier + "(" + Opt(delimitedList(rvalue)) + ")") - rvalue << (funcCall | identifier | Word(nums)) - assignment = Group(identifier + "=" + rvalue) - stmt << (funcDef | assignment | identifier) - - module_body = stmt[1, ...] - - parseTree = module_body.parseString(data) - parseTree.pprint() - - prints:: - - [['def', - 'A', - ['(', 'z', ')'], - ':', - [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], - 'B', - ['def', - 'BB', - ['(', 'a', 'b', 'c', ')'], - ':', - [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], - 'C', - 'D', - ['def', - 'spam', - ['(', 'x', 'y', ')'], - ':', - [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] - """ - backup_stacks.append(indentStack[:]) - - def reset_stack(): - indentStack[:] = backup_stacks[-1] - - def checkPeerIndent(s, l, t): - if l >= len(s): - return - curCol = col(l, s) - if curCol != indentStack[-1]: - if curCol > indentStack[-1]: - raise ParseException(s, l, "illegal nesting") - raise ParseException(s, l, "not a peer entry") - - def checkSubIndent(s, l, t): - curCol = col(l, s) - if curCol > indentStack[-1]: - indentStack.append(curCol) - else: - raise ParseException(s, l, "not a subentry") - - def checkUnindent(s, l, t): - if l >= len(s): - return - curCol = col(l, s) - if not (indentStack and curCol in indentStack): - raise ParseException(s, l, "not an unindent") - if curCol < indentStack[-1]: - indentStack.pop() - - NL = OneOrMore(LineEnd().set_whitespace_chars("\t ").suppress()) - INDENT = (Empty() + Empty().set_parse_action(checkSubIndent)).set_name("INDENT") - PEER = Empty().set_parse_action(checkPeerIndent).set_name("") - UNDENT = Empty().set_parse_action(checkUnindent).set_name("UNINDENT") - if indent: - smExpr = Group( - Opt(NL) - + INDENT - + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL)) - + UNDENT - ) - else: - smExpr = Group( - Opt(NL) - + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL)) - + Opt(UNDENT) - ) - - # add a parse action to remove backup_stack from list of backups - smExpr.add_parse_action( - lambda: backup_stacks.pop(-1) and None if backup_stacks else None - ) - smExpr.set_fail_action(lambda a, b, c, d: reset_stack()) - blockStatementExpr.ignore(_bslash + LineEnd()) - return smExpr.set_name("indented block") - - -# it's easy to get these comment structures wrong - they're very common, so may as well make them available -c_style_comment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/").set_name( - "C style comment" -) -"Comment of the form ``/* ... */``" - -html_comment = Regex(r"").set_name("HTML comment") -"Comment of the form ````" - -rest_of_line = Regex(r".*").leave_whitespace().set_name("rest of line") -dbl_slash_comment = Regex(r"//(?:\\\n|[^\n])*").set_name("// comment") -"Comment of the form ``// ... (to end of line)``" - -cpp_style_comment = Combine( - Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/" | dbl_slash_comment -).set_name("C++ style comment") -"Comment of either form :class:`c_style_comment` or :class:`dbl_slash_comment`" - -java_style_comment = cpp_style_comment -"Same as :class:`cpp_style_comment`" - -python_style_comment = Regex(r"#.*").set_name("Python style comment") -"Comment of the form ``# ... (to end of line)``" - - -# build list of built-in expressions, for future reference if a global default value -# gets updated -_builtin_exprs: List[ParserElement] = [ - v for v in vars().values() if isinstance(v, ParserElement) -] - - -# compatibility function, superseded by DelimitedList class -def delimited_list( - expr: Union[str, ParserElement], - delim: Union[str, ParserElement] = ",", - combine: bool = False, - min: typing.Optional[int] = None, - max: typing.Optional[int] = None, - *, - allow_trailing_delim: bool = False, -) -> ParserElement: - """(DEPRECATED - use :class:`DelimitedList` class)""" - return DelimitedList( - expr, delim, combine, min, max, allow_trailing_delim=allow_trailing_delim - ) - - -# pre-PEP8 compatible names -# fmt: off -opAssoc = OpAssoc -anyOpenTag = any_open_tag -anyCloseTag = any_close_tag -commonHTMLEntity = common_html_entity -cStyleComment = c_style_comment -htmlComment = html_comment -restOfLine = rest_of_line -dblSlashComment = dbl_slash_comment -cppStyleComment = cpp_style_comment -javaStyleComment = java_style_comment -pythonStyleComment = python_style_comment - -@replaced_by_pep8(DelimitedList) -def delimitedList(): ... - -@replaced_by_pep8(DelimitedList) -def delimited_list(): ... - -@replaced_by_pep8(counted_array) -def countedArray(): ... - -@replaced_by_pep8(match_previous_literal) -def matchPreviousLiteral(): ... - -@replaced_by_pep8(match_previous_expr) -def matchPreviousExpr(): ... - -@replaced_by_pep8(one_of) -def oneOf(): ... - -@replaced_by_pep8(dict_of) -def dictOf(): ... - -@replaced_by_pep8(original_text_for) -def originalTextFor(): ... - -@replaced_by_pep8(nested_expr) -def nestedExpr(): ... - -@replaced_by_pep8(make_html_tags) -def makeHTMLTags(): ... - -@replaced_by_pep8(make_xml_tags) -def makeXMLTags(): ... - -@replaced_by_pep8(replace_html_entity) -def replaceHTMLEntity(): ... - -@replaced_by_pep8(infix_notation) -def infixNotation(): ... -# fmt: on diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_vendor/importlib_resources/_adapters.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_vendor/importlib_resources/_adapters.py deleted file mode 100644 index ea363d86a564b5450666aa00aecd46353326a75a..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_vendor/importlib_resources/_adapters.py +++ /dev/null @@ -1,170 +0,0 @@ -from contextlib import suppress -from io import TextIOWrapper - -from . import abc - - -class SpecLoaderAdapter: - """ - Adapt a package spec to adapt the underlying loader. - """ - - def __init__(self, spec, adapter=lambda spec: spec.loader): - self.spec = spec - self.loader = adapter(spec) - - def __getattr__(self, name): - return getattr(self.spec, name) - - -class TraversableResourcesLoader: - """ - Adapt a loader to provide TraversableResources. - """ - - def __init__(self, spec): - self.spec = spec - - def get_resource_reader(self, name): - return CompatibilityFiles(self.spec)._native() - - -def _io_wrapper(file, mode='r', *args, **kwargs): - if mode == 'r': - return TextIOWrapper(file, *args, **kwargs) - elif mode == 'rb': - return file - raise ValueError( - "Invalid mode value '{}', only 'r' and 'rb' are supported".format(mode) - ) - - -class CompatibilityFiles: - """ - Adapter for an existing or non-existent resource reader - to provide a compatibility .files(). - """ - - class SpecPath(abc.Traversable): - """ - Path tied to a module spec. - Can be read and exposes the resource reader children. - """ - - def __init__(self, spec, reader): - self._spec = spec - self._reader = reader - - def iterdir(self): - if not self._reader: - return iter(()) - return iter( - CompatibilityFiles.ChildPath(self._reader, path) - for path in self._reader.contents() - ) - - def is_file(self): - return False - - is_dir = is_file - - def joinpath(self, other): - if not self._reader: - return CompatibilityFiles.OrphanPath(other) - return CompatibilityFiles.ChildPath(self._reader, other) - - @property - def name(self): - return self._spec.name - - def open(self, mode='r', *args, **kwargs): - return _io_wrapper(self._reader.open_resource(None), mode, *args, **kwargs) - - class ChildPath(abc.Traversable): - """ - Path tied to a resource reader child. - Can be read but doesn't expose any meaningful children. - """ - - def __init__(self, reader, name): - self._reader = reader - self._name = name - - def iterdir(self): - return iter(()) - - def is_file(self): - return self._reader.is_resource(self.name) - - def is_dir(self): - return not self.is_file() - - def joinpath(self, other): - return CompatibilityFiles.OrphanPath(self.name, other) - - @property - def name(self): - return self._name - - def open(self, mode='r', *args, **kwargs): - return _io_wrapper( - self._reader.open_resource(self.name), mode, *args, **kwargs - ) - - class OrphanPath(abc.Traversable): - """ - Orphan path, not tied to a module spec or resource reader. - Can't be read and doesn't expose any meaningful children. - """ - - def __init__(self, *path_parts): - if len(path_parts) < 1: - raise ValueError('Need at least one path part to construct a path') - self._path = path_parts - - def iterdir(self): - return iter(()) - - def is_file(self): - return False - - is_dir = is_file - - def joinpath(self, other): - return CompatibilityFiles.OrphanPath(*self._path, other) - - @property - def name(self): - return self._path[-1] - - def open(self, mode='r', *args, **kwargs): - raise FileNotFoundError("Can't open orphan path") - - def __init__(self, spec): - self.spec = spec - - @property - def _reader(self): - with suppress(AttributeError): - return self.spec.loader.get_resource_reader(self.spec.name) - - def _native(self): - """ - Return the native reader if it supports files(). - """ - reader = self._reader - return reader if hasattr(reader, 'files') else self - - def __getattr__(self, attr): - return getattr(self._reader, attr) - - def files(self): - return CompatibilityFiles.SpecPath(self.spec, self._reader) - - -def wrap_spec(package): - """ - Construct a package spec with traversable compatibility - on the spec/loader/reader. - """ - return SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader) diff --git a/spaces/ThankGod/anime-gan/Makefile b/spaces/ThankGod/anime-gan/Makefile deleted file mode 100644 index ff727d0ac0d87aa292e9ddbd99218cadb034f3a4..0000000000000000000000000000000000000000 --- a/spaces/ThankGod/anime-gan/Makefile +++ /dev/null @@ -1,27 +0,0 @@ -install: - pip install --upgrade pip &&\ - pip install -r requirements.txt - -test: - python -m pytest -vvv --cov=hello --cov=greeting \ - --cov=smath --cov=web tests - python -m pytest --nbval notebook.ipynb #tests our jupyter notebook - #python -m pytest -v tests/test_web.py #if you just want to test web - -debug: - python -m pytest -vv --pdb #Debugger is invoked - -one-test: - python -m pytest -vv tests/test_greeting.py::test_my_name4 - -debugthree: - #not working the way I expect - python -m pytest -vv --pdb --maxfail=4 # drop to PDB for first three failures - -format: - black *.py - -lint: - pylint --disable=R,C *.py - -all: install lint test format \ No newline at end of file diff --git a/spaces/TheKitten/Images/index.html b/spaces/TheKitten/Images/index.html deleted file mode 100644 index 6250c2958a7186a4e64f21c02b0359ff5ecd7e97..0000000000000000000000000000000000000000 --- a/spaces/TheKitten/Images/index.html +++ /dev/null @@ -1,16 +0,0 @@ - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/spaces/Toxfu/BIgVisionEffnetB2/app.py b/spaces/Toxfu/BIgVisionEffnetB2/app.py deleted file mode 100644 index 2707f0917d3670a4cb7f2241ca7ca9c524ac74fe..0000000000000000000000000000000000000000 --- a/spaces/Toxfu/BIgVisionEffnetB2/app.py +++ /dev/null @@ -1,81 +0,0 @@ -### 1. Imports and class names setup ### -import gradio as gr -import os -import torch - -from model import create_effnetb2_model -from timeit import default_timer as timer -from typing import Tuple, Dict - -# Setup class names -with open("class_names.txt", "r") as f: # reading them in from class_names.txt - class_names = [food_name.strip() for food_name in f.readlines()] - -### 2. Model and transforms preparation ### - -# Create model -effnetb2, effnetb2_transforms = create_effnetb2_model( - num_classes=101, # could also use len(class_names) -) - -# Load saved weights -effnetb2.load_state_dict( - torch.load( - f="effnetb2_food101_100_percent.pth", - map_location=torch.device("cpu"), # load to CPU - ) -) - -### 3. Predict function ### - -# Create predict function -def predict(img) -> Tuple[Dict, float]: - """Transforms and performs a prediction on img and returns prediction and time taken. - """ - # Start the timer - start_time = timer() - - # Transform the target image and add a batch dimension - img = effnetb2_transforms(img).unsqueeze(0) - - # Put model into evaluation mode and turn on inference mode - effnetb2.eval() - with torch.inference_mode(): - # Pass the transformed image through the model and turn the prediction logits into prediction probabilities - pred_probs = torch.softmax(effnetb2(img), dim=1) - - # Create a prediction label and prediction probability dictionary for each prediction class (this is the required format for Gradio's output parameter) - pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))} - - # Calculate the prediction time - pred_time = round(timer() - start_time, 5) - - # Return the prediction dictionary and prediction time - return pred_labels_and_probs, pred_time - -### 4. Gradio app ### - -# Create title, description and article strings -title = "FoodVision Big 🍔👁" -description = "An EfficientNetB2 feature extractor computer vision model to classify images of food into 101 different classes" -article = "Created for Toxfu" - -# Create examples list from "examples/" directory -example_list = [["examples/" + example] for example in os.listdir("examples")] - -# Create Gradio interface -demo = gr.Interface( - fn=predict, - inputs=gr.Image(type="pil"), - outputs=[ - gr.Label(num_top_classes=5, label="Predictions"), - gr.Number(label="Prediction time (s)"), - ], - examples=example_list, - title=title, - description=description, - article=article, -) - -# Launch the app! -demo.launch() diff --git a/spaces/UNIST-Eunchan/Summarizing-app/README.md b/spaces/UNIST-Eunchan/Summarizing-app/README.md deleted file mode 100644 index 3e8fe31b843cdb1a3d3039d7e0bba7cba6435e24..0000000000000000000000000000000000000000 --- a/spaces/UNIST-Eunchan/Summarizing-app/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Summarizing App -emoji: 🐠 -colorFrom: red -colorTo: yellow -sdk: streamlit -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/Vageesh1/Voice_Cloner/README.md b/spaces/Vageesh1/Voice_Cloner/README.md deleted file mode 100644 index 5a3ccde039a2856b9ccf431089d7e8668c587808..0000000000000000000000000000000000000000 --- a/spaces/Vageesh1/Voice_Cloner/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Voice Cloner -emoji: ⚡ -colorFrom: yellow -colorTo: yellow -sdk: streamlit -sdk_version: 1.21.0 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Xeaser/rvc-tes/infer_pack/transforms.py b/spaces/Xeaser/rvc-tes/infer_pack/transforms.py deleted file mode 100644 index a11f799e023864ff7082c1f49c0cc18351a13b47..0000000000000000000000000000000000000000 --- a/spaces/Xeaser/rvc-tes/infer_pack/transforms.py +++ /dev/null @@ -1,209 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = {"tails": tails, "tail_bound": tail_bound} - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1 - - -def unconstrained_rational_quadratic_spline( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails="linear", - tail_bound=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == "linear": - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError("{} tails are not implemented.".format(tails)) - - ( - outputs[inside_interval_mask], - logabsdet[inside_interval_mask], - ) = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, - right=tail_bound, - bottom=-tail_bound, - top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - ) - - return outputs, logabsdet - - -def rational_quadratic_spline( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0.0, - right=1.0, - bottom=0.0, - top=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError("Input to a transform is not within its domain") - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError("Minimal bin width too large for the number of bins") - if min_bin_height * num_bins > 1.0: - raise ValueError("Minimal bin height too large for the number of bins") - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (inputs - input_cumheights) * ( - input_derivatives + input_derivatives_plus_one - 2 * input_delta - ) + input_heights * (input_delta - input_derivatives) - b = input_heights * input_derivatives - (inputs - input_cumheights) * ( - input_derivatives + input_derivatives_plus_one - 2 * input_delta - ) - c = -input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ( - (input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta - ) - derivative_numerator = input_delta.pow(2) * ( - input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2) - ) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * ( - input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta - ) - denominator = input_delta + ( - (input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta - ) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * ( - input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2) - ) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/YanzBotz/stablediffusionapi-disney-pixar-cartoon/app.py b/spaces/YanzBotz/stablediffusionapi-disney-pixar-cartoon/app.py deleted file mode 100644 index 27fd27bfffeb59d210fd2c7769378680cb81844c..0000000000000000000000000000000000000000 --- a/spaces/YanzBotz/stablediffusionapi-disney-pixar-cartoon/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/stablediffusionapi/disney-pixar-cartoon").launch() \ No newline at end of file diff --git a/spaces/YotamNitzan/domain-expansion/legacy.py b/spaces/YotamNitzan/domain-expansion/legacy.py deleted file mode 100644 index 9387d79f23224642ca316399de2f0258f72de79b..0000000000000000000000000000000000000000 --- a/spaces/YotamNitzan/domain-expansion/legacy.py +++ /dev/null @@ -1,320 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -import click -import pickle -import re -import copy -import numpy as np -import torch -import dnnlib -from torch_utils import misc - -#---------------------------------------------------------------------------- - -def load_network_pkl(f, force_fp16=False): - data = _LegacyUnpickler(f).load() - - # Legacy TensorFlow pickle => convert. - if isinstance(data, tuple) and len(data) == 3 and all(isinstance(net, _TFNetworkStub) for net in data): - tf_G, tf_D, tf_Gs = data - G = convert_tf_generator(tf_G) - D = convert_tf_discriminator(tf_D) - G_ema = convert_tf_generator(tf_Gs) - data = dict(G=G, D=D, G_ema=G_ema) - - # Add missing fields. - if 'training_set_kwargs' not in data: - data['training_set_kwargs'] = None - if 'augment_pipe' not in data: - data['augment_pipe'] = None - - # Validate contents. - assert isinstance(data['G'], torch.nn.Module) - assert isinstance(data['D'], torch.nn.Module) - assert isinstance(data['G_ema'], torch.nn.Module) - assert isinstance(data['training_set_kwargs'], (dict, type(None))) - assert isinstance(data['augment_pipe'], (torch.nn.Module, type(None))) - - # Force FP16. - if force_fp16: - for key in ['G', 'D', 'G_ema']: - old = data[key] - kwargs = copy.deepcopy(old.init_kwargs) - if key.startswith('G'): - kwargs.synthesis_kwargs = dnnlib.EasyDict(kwargs.get('synthesis_kwargs', {})) - kwargs.synthesis_kwargs.num_fp16_res = 4 - kwargs.synthesis_kwargs.conv_clamp = 256 - if key.startswith('D'): - kwargs.num_fp16_res = 4 - kwargs.conv_clamp = 256 - if kwargs != old.init_kwargs: - new = type(old)(**kwargs).eval().requires_grad_(False) - misc.copy_params_and_buffers(old, new, require_all=True) - data[key] = new - return data - -#---------------------------------------------------------------------------- - -class _TFNetworkStub(dnnlib.EasyDict): - pass - -class _LegacyUnpickler(pickle.Unpickler): - def find_class(self, module, name): - if module == 'dnnlib.tflib.network' and name == 'Network': - return _TFNetworkStub - return super().find_class(module, name) - -#---------------------------------------------------------------------------- - -def _collect_tf_params(tf_net): - # pylint: disable=protected-access - tf_params = dict() - def recurse(prefix, tf_net): - for name, value in tf_net.variables: - tf_params[prefix + name] = value - for name, comp in tf_net.components.items(): - recurse(prefix + name + '/', comp) - recurse('', tf_net) - return tf_params - -#---------------------------------------------------------------------------- - -def _populate_module_params(module, *patterns): - for name, tensor in misc.named_params_and_buffers(module): - found = False - value = None - for pattern, value_fn in zip(patterns[0::2], patterns[1::2]): - match = re.fullmatch(pattern, name) - if match: - found = True - if value_fn is not None: - value = value_fn(*match.groups()) - break - try: - assert found - if value is not None: - tensor.copy_(torch.from_numpy(np.array(value))) - except: - print(name, list(tensor.shape)) - raise - -#---------------------------------------------------------------------------- - -def convert_tf_generator(tf_G): - if tf_G.version < 4: - raise ValueError('TensorFlow pickle version too low') - - # Collect kwargs. - tf_kwargs = tf_G.static_kwargs - known_kwargs = set() - def kwarg(tf_name, default=None, none=None): - known_kwargs.add(tf_name) - val = tf_kwargs.get(tf_name, default) - return val if val is not None else none - - # Convert kwargs. - kwargs = dnnlib.EasyDict( - z_dim = kwarg('latent_size', 512), - c_dim = kwarg('label_size', 0), - w_dim = kwarg('dlatent_size', 512), - img_resolution = kwarg('resolution', 1024), - img_channels = kwarg('num_channels', 3), - mapping_kwargs = dnnlib.EasyDict( - num_layers = kwarg('mapping_layers', 8), - embed_features = kwarg('label_fmaps', None), - layer_features = kwarg('mapping_fmaps', None), - activation = kwarg('mapping_nonlinearity', 'lrelu'), - lr_multiplier = kwarg('mapping_lrmul', 0.01), - w_avg_beta = kwarg('w_avg_beta', 0.995, none=1), - ), - synthesis_kwargs = dnnlib.EasyDict( - channel_base = kwarg('fmap_base', 16384) * 2, - channel_max = kwarg('fmap_max', 512), - num_fp16_res = kwarg('num_fp16_res', 0), - conv_clamp = kwarg('conv_clamp', None), - architecture = kwarg('architecture', 'skip'), - resample_filter = kwarg('resample_kernel', [1,3,3,1]), - use_noise = kwarg('use_noise', True), - activation = kwarg('nonlinearity', 'lrelu'), - ), - ) - - # Check for unknown kwargs. - kwarg('truncation_psi') - kwarg('truncation_cutoff') - kwarg('style_mixing_prob') - kwarg('structure') - unknown_kwargs = list(set(tf_kwargs.keys()) - known_kwargs) - if len(unknown_kwargs) > 0: - raise ValueError('Unknown TensorFlow kwarg', unknown_kwargs[0]) - - # Collect params. - tf_params = _collect_tf_params(tf_G) - for name, value in list(tf_params.items()): - match = re.fullmatch(r'ToRGB_lod(\d+)/(.*)', name) - if match: - r = kwargs.img_resolution // (2 ** int(match.group(1))) - tf_params[f'{r}x{r}/ToRGB/{match.group(2)}'] = value - kwargs.synthesis.kwargs.architecture = 'orig' - #for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}') - - # Convert params. - from training import networks - G = networks.Generator(**kwargs).eval().requires_grad_(False) - # pylint: disable=unnecessary-lambda - _populate_module_params(G, - r'mapping\.w_avg', lambda: tf_params[f'dlatent_avg'], - r'mapping\.embed\.weight', lambda: tf_params[f'mapping/LabelEmbed/weight'].transpose(), - r'mapping\.embed\.bias', lambda: tf_params[f'mapping/LabelEmbed/bias'], - r'mapping\.fc(\d+)\.weight', lambda i: tf_params[f'mapping/Dense{i}/weight'].transpose(), - r'mapping\.fc(\d+)\.bias', lambda i: tf_params[f'mapping/Dense{i}/bias'], - r'synthesis\.b4\.const', lambda: tf_params[f'synthesis/4x4/Const/const'][0], - r'synthesis\.b4\.conv1\.weight', lambda: tf_params[f'synthesis/4x4/Conv/weight'].transpose(3, 2, 0, 1), - r'synthesis\.b4\.conv1\.bias', lambda: tf_params[f'synthesis/4x4/Conv/bias'], - r'synthesis\.b4\.conv1\.noise_const', lambda: tf_params[f'synthesis/noise0'][0, 0], - r'synthesis\.b4\.conv1\.noise_strength', lambda: tf_params[f'synthesis/4x4/Conv/noise_strength'], - r'synthesis\.b4\.conv1\.affine\.weight', lambda: tf_params[f'synthesis/4x4/Conv/mod_weight'].transpose(), - r'synthesis\.b4\.conv1\.affine\.bias', lambda: tf_params[f'synthesis/4x4/Conv/mod_bias'] + 1, - r'synthesis\.b(\d+)\.conv0\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/weight'][::-1, ::-1].transpose(3, 2, 0, 1), - r'synthesis\.b(\d+)\.conv0\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/bias'], - r'synthesis\.b(\d+)\.conv0\.noise_const', lambda r: tf_params[f'synthesis/noise{int(np.log2(int(r)))*2-5}'][0, 0], - r'synthesis\.b(\d+)\.conv0\.noise_strength', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/noise_strength'], - r'synthesis\.b(\d+)\.conv0\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/mod_weight'].transpose(), - r'synthesis\.b(\d+)\.conv0\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/mod_bias'] + 1, - r'synthesis\.b(\d+)\.conv1\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/weight'].transpose(3, 2, 0, 1), - r'synthesis\.b(\d+)\.conv1\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/bias'], - r'synthesis\.b(\d+)\.conv1\.noise_const', lambda r: tf_params[f'synthesis/noise{int(np.log2(int(r)))*2-4}'][0, 0], - r'synthesis\.b(\d+)\.conv1\.noise_strength', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/noise_strength'], - r'synthesis\.b(\d+)\.conv1\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/mod_weight'].transpose(), - r'synthesis\.b(\d+)\.conv1\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/mod_bias'] + 1, - r'synthesis\.b(\d+)\.torgb\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/weight'].transpose(3, 2, 0, 1), - r'synthesis\.b(\d+)\.torgb\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/bias'], - r'synthesis\.b(\d+)\.torgb\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/mod_weight'].transpose(), - r'synthesis\.b(\d+)\.torgb\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/mod_bias'] + 1, - r'synthesis\.b(\d+)\.skip\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Skip/weight'][::-1, ::-1].transpose(3, 2, 0, 1), - r'.*\.resample_filter', None, - ) - return G - -#---------------------------------------------------------------------------- - -def convert_tf_discriminator(tf_D): - if tf_D.version < 4: - raise ValueError('TensorFlow pickle version too low') - - # Collect kwargs. - tf_kwargs = tf_D.static_kwargs - known_kwargs = set() - def kwarg(tf_name, default=None): - known_kwargs.add(tf_name) - return tf_kwargs.get(tf_name, default) - - # Convert kwargs. - kwargs = dnnlib.EasyDict( - c_dim = kwarg('label_size', 0), - img_resolution = kwarg('resolution', 1024), - img_channels = kwarg('num_channels', 3), - architecture = kwarg('architecture', 'resnet'), - channel_base = kwarg('fmap_base', 16384) * 2, - channel_max = kwarg('fmap_max', 512), - num_fp16_res = kwarg('num_fp16_res', 0), - conv_clamp = kwarg('conv_clamp', None), - cmap_dim = kwarg('mapping_fmaps', None), - block_kwargs = dnnlib.EasyDict( - activation = kwarg('nonlinearity', 'lrelu'), - resample_filter = kwarg('resample_kernel', [1,3,3,1]), - freeze_layers = kwarg('freeze_layers', 0), - ), - mapping_kwargs = dnnlib.EasyDict( - num_layers = kwarg('mapping_layers', 0), - embed_features = kwarg('mapping_fmaps', None), - layer_features = kwarg('mapping_fmaps', None), - activation = kwarg('nonlinearity', 'lrelu'), - lr_multiplier = kwarg('mapping_lrmul', 0.1), - ), - epilogue_kwargs = dnnlib.EasyDict( - mbstd_group_size = kwarg('mbstd_group_size', None), - mbstd_num_channels = kwarg('mbstd_num_features', 1), - activation = kwarg('nonlinearity', 'lrelu'), - ), - ) - - # Check for unknown kwargs. - kwarg('structure') - unknown_kwargs = list(set(tf_kwargs.keys()) - known_kwargs) - if len(unknown_kwargs) > 0: - raise ValueError('Unknown TensorFlow kwarg', unknown_kwargs[0]) - - # Collect params. - tf_params = _collect_tf_params(tf_D) - for name, value in list(tf_params.items()): - match = re.fullmatch(r'FromRGB_lod(\d+)/(.*)', name) - if match: - r = kwargs.img_resolution // (2 ** int(match.group(1))) - tf_params[f'{r}x{r}/FromRGB/{match.group(2)}'] = value - kwargs.architecture = 'orig' - #for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}') - - # Convert params. - from training import networks - D = networks.Discriminator(**kwargs).eval().requires_grad_(False) - # pylint: disable=unnecessary-lambda - _populate_module_params(D, - r'b(\d+)\.fromrgb\.weight', lambda r: tf_params[f'{r}x{r}/FromRGB/weight'].transpose(3, 2, 0, 1), - r'b(\d+)\.fromrgb\.bias', lambda r: tf_params[f'{r}x{r}/FromRGB/bias'], - r'b(\d+)\.conv(\d+)\.weight', lambda r, i: tf_params[f'{r}x{r}/Conv{i}{["","_down"][int(i)]}/weight'].transpose(3, 2, 0, 1), - r'b(\d+)\.conv(\d+)\.bias', lambda r, i: tf_params[f'{r}x{r}/Conv{i}{["","_down"][int(i)]}/bias'], - r'b(\d+)\.skip\.weight', lambda r: tf_params[f'{r}x{r}/Skip/weight'].transpose(3, 2, 0, 1), - r'mapping\.embed\.weight', lambda: tf_params[f'LabelEmbed/weight'].transpose(), - r'mapping\.embed\.bias', lambda: tf_params[f'LabelEmbed/bias'], - r'mapping\.fc(\d+)\.weight', lambda i: tf_params[f'Mapping{i}/weight'].transpose(), - r'mapping\.fc(\d+)\.bias', lambda i: tf_params[f'Mapping{i}/bias'], - r'b4\.conv\.weight', lambda: tf_params[f'4x4/Conv/weight'].transpose(3, 2, 0, 1), - r'b4\.conv\.bias', lambda: tf_params[f'4x4/Conv/bias'], - r'b4\.fc\.weight', lambda: tf_params[f'4x4/Dense0/weight'].transpose(), - r'b4\.fc\.bias', lambda: tf_params[f'4x4/Dense0/bias'], - r'b4\.out\.weight', lambda: tf_params[f'Output/weight'].transpose(), - r'b4\.out\.bias', lambda: tf_params[f'Output/bias'], - r'.*\.resample_filter', None, - ) - return D - -#---------------------------------------------------------------------------- - -@click.command() -@click.option('--source', help='Input pickle', required=True, metavar='PATH') -@click.option('--dest', help='Output pickle', required=True, metavar='PATH') -@click.option('--force-fp16', help='Force the networks to use FP16', type=bool, default=False, metavar='BOOL', show_default=True) -def convert_network_pickle(source, dest, force_fp16): - """Convert legacy network pickle into the native PyTorch format. - - The tool is able to load the main network configurations exported using the TensorFlow version of StyleGAN2 or StyleGAN2-ADA. - It does not support e.g. StyleGAN2-ADA comparison methods, StyleGAN2 configs A-D, or StyleGAN1 networks. - - Example: - - \b - python legacy.py \\ - --source=https://nvlabs-fi-cdn.nvidia.com/stylegan2/networks/stylegan2-cat-config-f.pkl \\ - --dest=stylegan2-cat-config-f.pkl - """ - print(f'Loading "{source}"...') - with dnnlib.util.open_url(source) as f: - data = load_network_pkl(f, force_fp16=force_fp16) - print(f'Saving "{dest}"...') - with open(dest, 'wb') as f: - pickle.dump(data, f) - print('Done.') - -#---------------------------------------------------------------------------- - -if __name__ == "__main__": - convert_network_pickle() # pylint: disable=no-value-for-parameter - -#---------------------------------------------------------------------------- diff --git a/spaces/Yukki-Yui/moe-tts/monotonic_align/__init__.py b/spaces/Yukki-Yui/moe-tts/monotonic_align/__init__.py deleted file mode 100644 index 40b6f64aa116c74cac2f6a33444c9eeea2fdb38c..0000000000000000000000000000000000000000 --- a/spaces/Yukki-Yui/moe-tts/monotonic_align/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -from numpy import zeros, int32, float32 -from torch import from_numpy - -from .core import maximum_path_jit - - -def maximum_path(neg_cent, mask): - """ numba optimized version. - neg_cent: [b, t_t, t_s] - mask: [b, t_t, t_s] - """ - device = neg_cent.device - dtype = neg_cent.dtype - neg_cent = neg_cent.data.cpu().numpy().astype(float32) - path = zeros(neg_cent.shape, dtype=int32) - - t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(int32) - t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(int32) - maximum_path_jit(path, neg_cent, t_t_max, t_s_max) - return from_numpy(path).to(device=device, dtype=dtype) - diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/models/necks/fpg.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/models/necks/fpg.py deleted file mode 100644 index c8e0d163ccf8cef6211530ba6c1b4d558ff6403f..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/models/necks/fpg.py +++ /dev/null @@ -1,398 +0,0 @@ -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule, caffe2_xavier_init, constant_init, is_norm - -from ..builder import NECKS - - -class Transition(nn.Module): - """Base class for transition. - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - """ - - def __init__(self, in_channels, out_channels): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - - def forward(x): - pass - - -class UpInterpolationConv(Transition): - """A transition used for up-sampling. - - Up-sample the input by interpolation then refines the feature by - a convolution layer. - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - scale_factor (int): Up-sampling factor. Default: 2. - mode (int): Interpolation mode. Default: nearest. - align_corners (bool): Whether align corners when interpolation. - Default: None. - kernel_size (int): Kernel size for the conv. Default: 3. - """ - - def __init__(self, - in_channels, - out_channels, - scale_factor=2, - mode='nearest', - align_corners=None, - kernel_size=3, - **kwargs): - super().__init__(in_channels, out_channels) - self.mode = mode - self.scale_factor = scale_factor - self.align_corners = align_corners - self.conv = ConvModule( - in_channels, - out_channels, - kernel_size, - padding=(kernel_size - 1) // 2, - **kwargs) - - def forward(self, x): - x = F.interpolate( - x, - scale_factor=self.scale_factor, - mode=self.mode, - align_corners=self.align_corners) - x = self.conv(x) - return x - - -class LastConv(Transition): - """A transition used for refining the output of the last stage. - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - num_inputs (int): Number of inputs of the FPN features. - kernel_size (int): Kernel size for the conv. Default: 3. - """ - - def __init__(self, - in_channels, - out_channels, - num_inputs, - kernel_size=3, - **kwargs): - super().__init__(in_channels, out_channels) - self.num_inputs = num_inputs - self.conv_out = ConvModule( - in_channels, - out_channels, - kernel_size, - padding=(kernel_size - 1) // 2, - **kwargs) - - def forward(self, inputs): - assert len(inputs) == self.num_inputs - return self.conv_out(inputs[-1]) - - -@NECKS.register_module() -class FPG(nn.Module): - """FPG. - - Implementation of `Feature Pyramid Grids (FPG) - `_. - This implementation only gives the basic structure stated in the paper. - But users can implement different type of transitions to fully explore the - the potential power of the structure of FPG. - - Args: - in_channels (int): Number of input channels (feature maps of all levels - should have the same channels). - out_channels (int): Number of output channels (used at each scale) - num_outs (int): Number of output scales. - stack_times (int): The number of times the pyramid architecture will - be stacked. - paths (list[str]): Specify the path order of each stack level. - Each element in the list should be either 'bu' (bottom-up) or - 'td' (top-down). - inter_channels (int): Number of inter channels. - same_up_trans (dict): Transition that goes down at the same stage. - same_down_trans (dict): Transition that goes up at the same stage. - across_lateral_trans (dict): Across-pathway same-stage - across_down_trans (dict): Across-pathway bottom-up connection. - across_up_trans (dict): Across-pathway top-down connection. - across_skip_trans (dict): Across-pathway skip connection. - output_trans (dict): Transition that trans the output of the - last stage. - start_level (int): Index of the start input backbone level used to - build the feature pyramid. Default: 0. - end_level (int): Index of the end input backbone level (exclusive) to - build the feature pyramid. Default: -1, which means the last level. - add_extra_convs (bool): It decides whether to add conv - layers on top of the original feature maps. Default to False. - If True, its actual mode is specified by `extra_convs_on_inputs`. - norm_cfg (dict): Config dict for normalization layer. Default: None. - """ - - transition_types = { - 'conv': ConvModule, - 'interpolation_conv': UpInterpolationConv, - 'last_conv': LastConv, - } - - def __init__(self, - in_channels, - out_channels, - num_outs, - stack_times, - paths, - inter_channels=None, - same_down_trans=None, - same_up_trans=dict( - type='conv', kernel_size=3, stride=2, padding=1), - across_lateral_trans=dict(type='conv', kernel_size=1), - across_down_trans=dict(type='conv', kernel_size=3), - across_up_trans=None, - across_skip_trans=dict(type='identity'), - output_trans=dict(type='last_conv', kernel_size=3), - start_level=0, - end_level=-1, - add_extra_convs=False, - norm_cfg=None, - skip_inds=None): - super(FPG, self).__init__() - assert isinstance(in_channels, list) - self.in_channels = in_channels - self.out_channels = out_channels - self.num_ins = len(in_channels) - self.num_outs = num_outs - if inter_channels is None: - self.inter_channels = [out_channels for _ in range(num_outs)] - elif isinstance(inter_channels, int): - self.inter_channels = [inter_channels for _ in range(num_outs)] - else: - assert isinstance(inter_channels, list) - assert len(inter_channels) == num_outs - self.inter_channels = inter_channels - self.stack_times = stack_times - self.paths = paths - assert isinstance(paths, list) and len(paths) == stack_times - for d in paths: - assert d in ('bu', 'td') - - self.same_down_trans = same_down_trans - self.same_up_trans = same_up_trans - self.across_lateral_trans = across_lateral_trans - self.across_down_trans = across_down_trans - self.across_up_trans = across_up_trans - self.output_trans = output_trans - self.across_skip_trans = across_skip_trans - - self.with_bias = norm_cfg is None - # skip inds must be specified if across skip trans is not None - if self.across_skip_trans is not None: - skip_inds is not None - self.skip_inds = skip_inds - assert len(self.skip_inds[0]) <= self.stack_times - - if end_level == -1: - self.backbone_end_level = self.num_ins - assert num_outs >= self.num_ins - start_level - else: - # if end_level < inputs, no extra level is allowed - self.backbone_end_level = end_level - assert end_level <= len(in_channels) - assert num_outs == end_level - start_level - self.start_level = start_level - self.end_level = end_level - self.add_extra_convs = add_extra_convs - - # build lateral 1x1 convs to reduce channels - self.lateral_convs = nn.ModuleList() - for i in range(self.start_level, self.backbone_end_level): - l_conv = nn.Conv2d(self.in_channels[i], - self.inter_channels[i - self.start_level], 1) - self.lateral_convs.append(l_conv) - - extra_levels = num_outs - self.backbone_end_level + self.start_level - self.extra_downsamples = nn.ModuleList() - for i in range(extra_levels): - if self.add_extra_convs: - fpn_idx = self.backbone_end_level - self.start_level + i - extra_conv = nn.Conv2d( - self.inter_channels[fpn_idx - 1], - self.inter_channels[fpn_idx], - 3, - stride=2, - padding=1) - self.extra_downsamples.append(extra_conv) - else: - self.extra_downsamples.append(nn.MaxPool2d(1, stride=2)) - - self.fpn_transitions = nn.ModuleList() # stack times - for s in range(self.stack_times): - stage_trans = nn.ModuleList() # num of feature levels - for i in range(self.num_outs): - # same, across_lateral, across_down, across_up - trans = nn.ModuleDict() - if s in self.skip_inds[i]: - stage_trans.append(trans) - continue - # build same-stage down trans (used in bottom-up paths) - if i == 0 or self.same_up_trans is None: - same_up_trans = None - else: - same_up_trans = self.build_trans( - self.same_up_trans, self.inter_channels[i - 1], - self.inter_channels[i]) - trans['same_up'] = same_up_trans - # build same-stage up trans (used in top-down paths) - if i == self.num_outs - 1 or self.same_down_trans is None: - same_down_trans = None - else: - same_down_trans = self.build_trans( - self.same_down_trans, self.inter_channels[i + 1], - self.inter_channels[i]) - trans['same_down'] = same_down_trans - # build across lateral trans - across_lateral_trans = self.build_trans( - self.across_lateral_trans, self.inter_channels[i], - self.inter_channels[i]) - trans['across_lateral'] = across_lateral_trans - # build across down trans - if i == self.num_outs - 1 or self.across_down_trans is None: - across_down_trans = None - else: - across_down_trans = self.build_trans( - self.across_down_trans, self.inter_channels[i + 1], - self.inter_channels[i]) - trans['across_down'] = across_down_trans - # build across up trans - if i == 0 or self.across_up_trans is None: - across_up_trans = None - else: - across_up_trans = self.build_trans( - self.across_up_trans, self.inter_channels[i - 1], - self.inter_channels[i]) - trans['across_up'] = across_up_trans - if self.across_skip_trans is None: - across_skip_trans = None - else: - across_skip_trans = self.build_trans( - self.across_skip_trans, self.inter_channels[i - 1], - self.inter_channels[i]) - trans['across_skip'] = across_skip_trans - # build across_skip trans - stage_trans.append(trans) - self.fpn_transitions.append(stage_trans) - - self.output_transition = nn.ModuleList() # output levels - for i in range(self.num_outs): - trans = self.build_trans( - self.output_trans, - self.inter_channels[i], - self.out_channels, - num_inputs=self.stack_times + 1) - self.output_transition.append(trans) - - self.relu = nn.ReLU(inplace=True) - - def build_trans(self, cfg, in_channels, out_channels, **extra_args): - cfg_ = cfg.copy() - trans_type = cfg_.pop('type') - trans_cls = self.transition_types[trans_type] - return trans_cls(in_channels, out_channels, **cfg_, **extra_args) - - def init_weights(self): - for m in self.modules(): - if isinstance(m, nn.Conv2d): - caffe2_xavier_init(m) - elif is_norm(m): - constant_init(m, 1.0) - - def fuse(self, fuse_dict): - out = None - for item in fuse_dict.values(): - if item is not None: - if out is None: - out = item - else: - out = out + item - return out - - def forward(self, inputs): - assert len(inputs) == len(self.in_channels) - - # build all levels from original feature maps - feats = [ - lateral_conv(inputs[i + self.start_level]) - for i, lateral_conv in enumerate(self.lateral_convs) - ] - for downsample in self.extra_downsamples: - feats.append(downsample(feats[-1])) - - outs = [feats] - - for i in range(self.stack_times): - current_outs = outs[-1] - next_outs = [] - direction = self.paths[i] - for j in range(self.num_outs): - if i in self.skip_inds[j]: - next_outs.append(outs[-1][j]) - continue - # feature level - if direction == 'td': - lvl = self.num_outs - j - 1 - else: - lvl = j - # get transitions - if direction == 'td': - same_trans = self.fpn_transitions[i][lvl]['same_down'] - else: - same_trans = self.fpn_transitions[i][lvl]['same_up'] - across_lateral_trans = self.fpn_transitions[i][lvl][ - 'across_lateral'] - across_down_trans = self.fpn_transitions[i][lvl]['across_down'] - across_up_trans = self.fpn_transitions[i][lvl]['across_up'] - across_skip_trans = self.fpn_transitions[i][lvl]['across_skip'] - # init output - to_fuse = dict( - same=None, lateral=None, across_up=None, across_down=None) - # same downsample/upsample - if same_trans is not None: - to_fuse['same'] = same_trans(next_outs[-1]) - # across lateral - if across_lateral_trans is not None: - to_fuse['lateral'] = across_lateral_trans( - current_outs[lvl]) - # across downsample - if lvl > 0 and across_up_trans is not None: - to_fuse['across_up'] = across_up_trans(current_outs[lvl - - 1]) - # across upsample - if (lvl < self.num_outs - 1 and across_down_trans is not None): - to_fuse['across_down'] = across_down_trans( - current_outs[lvl + 1]) - if across_skip_trans is not None: - to_fuse['across_skip'] = across_skip_trans(outs[0][lvl]) - x = self.fuse(to_fuse) - next_outs.append(x) - - if direction == 'td': - outs.append(next_outs[::-1]) - else: - outs.append(next_outs) - - # output trans - final_outs = [] - for i in range(self.num_outs): - lvl_out_list = [] - for s in range(len(outs)): - lvl_out_list.append(outs[s][i]) - lvl_out = self.output_transition[i](lvl_out_list) - final_outs.append(lvl_out) - - return final_outs diff --git a/spaces/ahnafsamin/GroTTS-Tacotron2-24mins/app.py b/spaces/ahnafsamin/GroTTS-Tacotron2-24mins/app.py deleted file mode 100644 index e6406e545a936fb0e53631c63b95add3285ef74e..0000000000000000000000000000000000000000 --- a/spaces/ahnafsamin/GroTTS-Tacotron2-24mins/app.py +++ /dev/null @@ -1,43 +0,0 @@ -import os - -os.environ["CURL_CA_BUNDLE"]="" - -import gradio as gr -import time -import urllib.request -from pathlib import Path -import os -import torch -import scipy.io.wavfile -from espnet2.bin.tts_inference import Text2Speech -from espnet2.utils.types import str_or_none -from parallel_wavegan.utils import download_pretrained_model - - -gos_text2speech = Text2Speech.from_pretrained( - model_tag="https://huggingface.co/ahnafsamin/Tacotron2-gronings-24mins/resolve/main/tts_train_raw_char_tacotron_train.loss.ave.zip", - vocoder_tag="parallel_wavegan/ljspeech_parallel_wavegan.v3" -) - -def inference(text,lang): - with torch.no_grad(): - if lang == "gronings": - wav = gos_text2speech(text)["wav"] - scipy.io.wavfile.write("out.wav", gos_text2speech.fs , wav.view(-1).cpu().numpy()) - - return "out.wav", "out.wav" - -title = "GroTTS" -examples = [ - ['Ze gingen mit klas noar waddendiek, over en deur bragel lopen.', 'gronings'] -] - - - -gr.Interface( - inference, - [gr.inputs.Textbox(label="input text", lines=3), gr.inputs.Radio(choices=["gronings"], type="value", default="gronings", label="language")], - [gr.outputs.Audio(type="file", label="Output"), gr.outputs.File()], - title=title, - examples=examples - ).launch(enable_queue=True) diff --git a/spaces/akhaliq/JoJoGAN/e4e/utils/common.py b/spaces/akhaliq/JoJoGAN/e4e/utils/common.py deleted file mode 100644 index b19e18ddcb78b06678fa18e4a76da44fc511b789..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/JoJoGAN/e4e/utils/common.py +++ /dev/null @@ -1,55 +0,0 @@ -from PIL import Image -import matplotlib.pyplot as plt - - -# Log images -def log_input_image(x, opts): - return tensor2im(x) - - -def tensor2im(var): - # var shape: (3, H, W) - var = var.cpu().detach().transpose(0, 2).transpose(0, 1).numpy() - var = ((var + 1) / 2) - var[var < 0] = 0 - var[var > 1] = 1 - var = var * 255 - return Image.fromarray(var.astype('uint8')) - - -def vis_faces(log_hooks): - display_count = len(log_hooks) - fig = plt.figure(figsize=(8, 4 * display_count)) - gs = fig.add_gridspec(display_count, 3) - for i in range(display_count): - hooks_dict = log_hooks[i] - fig.add_subplot(gs[i, 0]) - if 'diff_input' in hooks_dict: - vis_faces_with_id(hooks_dict, fig, gs, i) - else: - vis_faces_no_id(hooks_dict, fig, gs, i) - plt.tight_layout() - return fig - - -def vis_faces_with_id(hooks_dict, fig, gs, i): - plt.imshow(hooks_dict['input_face']) - plt.title('Input\nOut Sim={:.2f}'.format(float(hooks_dict['diff_input']))) - fig.add_subplot(gs[i, 1]) - plt.imshow(hooks_dict['target_face']) - plt.title('Target\nIn={:.2f}, Out={:.2f}'.format(float(hooks_dict['diff_views']), - float(hooks_dict['diff_target']))) - fig.add_subplot(gs[i, 2]) - plt.imshow(hooks_dict['output_face']) - plt.title('Output\n Target Sim={:.2f}'.format(float(hooks_dict['diff_target']))) - - -def vis_faces_no_id(hooks_dict, fig, gs, i): - plt.imshow(hooks_dict['input_face'], cmap="gray") - plt.title('Input') - fig.add_subplot(gs[i, 1]) - plt.imshow(hooks_dict['target_face']) - plt.title('Target') - fig.add_subplot(gs[i, 2]) - plt.imshow(hooks_dict['output_face']) - plt.title('Output') diff --git a/spaces/akhaliq/deeplab2/evaluation/segmentation_and_tracking_quality.py b/spaces/akhaliq/deeplab2/evaluation/segmentation_and_tracking_quality.py deleted file mode 100644 index c6c3171c8c3e98cc265b296f7b9e44df190f0d9d..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/deeplab2/evaluation/segmentation_and_tracking_quality.py +++ /dev/null @@ -1,282 +0,0 @@ -# coding=utf-8 -# Copyright 2021 The Deeplab2 Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Implementation of the Segmentation and Tracking Quality (STQ) metric.""" - -import collections -from typing import MutableMapping, Sequence, Dict, Text, Any -import numpy as np -import tensorflow as tf - - -def _update_dict_stats(stat_dict: MutableMapping[int, tf.Tensor], - id_array: tf.Tensor): - """Updates a given dict with corresponding counts.""" - ids, _, counts = tf.unique_with_counts(id_array) - for idx, count in zip(ids.numpy(), counts): - if idx in stat_dict: - stat_dict[idx] += count - else: - stat_dict[idx] = count - - -class STQuality(object): - """Metric class for the Segmentation and Tracking Quality (STQ). - - The metric computes the geometric mean of two terms. - - Association Quality: This term measures the quality of the track ID - assignment for `thing` classes. It is formulated as a weighted IoU - measure. - - Segmentation Quality: This term measures the semantic segmentation quality. - The standard class IoU measure is used for this. - - Example usage: - - stq_obj = segmentation_tracking_quality.STQuality(num_classes, things_list, - ignore_label, max_instances_per_category, offset) - stq_obj.update_state(y_true_1, y_pred_1) - stq_obj.update_state(y_true_2, y_pred_2) - ... - result = stq_obj.result().numpy() - """ - - def __init__(self, - num_classes: int, - things_list: Sequence[int], - ignore_label: int, - max_instances_per_category: int, - offset: int, - name='stq' - ): - """Initialization of the STQ metric. - - Args: - num_classes: Number of classes in the dataset as an integer. - things_list: A sequence of class ids that belong to `things`. - ignore_label: The class id to be ignored in evaluation as an integer or - integer tensor. - max_instances_per_category: The maximum number of instances for each class - as an integer or integer tensor. - offset: The maximum number of unique labels as an integer or integer - tensor. - name: An optional name. (default: 'st_quality') - """ - self._name = name - self._num_classes = num_classes - self._ignore_label = ignore_label - self._things_list = things_list - self._max_instances_per_category = max_instances_per_category - - if ignore_label >= num_classes: - self._confusion_matrix_size = num_classes + 1 - self._include_indices = np.arange(self._num_classes) - else: - self._confusion_matrix_size = num_classes - self._include_indices = np.array( - [i for i in range(num_classes) if i != self._ignore_label]) - - self._iou_confusion_matrix_per_sequence = collections.OrderedDict() - self._predictions = collections.OrderedDict() - self._ground_truth = collections.OrderedDict() - self._intersections = collections.OrderedDict() - self._sequence_length = collections.OrderedDict() - self._offset = offset - lower_bound = num_classes * max_instances_per_category - if offset < lower_bound: - raise ValueError('The provided offset %d is too small. No guarantess ' - 'about the correctness of the results can be made. ' - 'Please choose an offset that is higher than num_classes' - ' * max_instances_per_category = %d' % lower_bound) - - def update_state(self, y_true: tf.Tensor, y_pred: tf.Tensor, - sequence_id=0): - """Accumulates the segmentation and tracking quality statistics. - - Args: - y_true: The ground-truth panoptic label map for a particular video frame - (defined as semantic_map * max_instances_per_category + instance_map). - y_pred: The predicted panoptic label map for a particular video frame - (defined as semantic_map * max_instances_per_category + instance_map). - sequence_id: The optional ID of the sequence the frames belong to. When no - sequence is given, all frames are considered to belong to the same - sequence (default: 0). - """ - y_true = tf.cast(y_true, dtype=tf.int64) - y_pred = tf.cast(y_pred, dtype=tf.int64) - semantic_label = y_true // self._max_instances_per_category - semantic_prediction = y_pred // self._max_instances_per_category - # Check if the ignore value is outside the range [0, num_classes]. If yes, - # map `_ignore_label` to `_num_classes`, so it can be used to create the - # confusion matrix. - if self._ignore_label > self._num_classes: - semantic_label = tf.where( - tf.not_equal(semantic_label, self._ignore_label), semantic_label, - self._num_classes) - semantic_prediction = tf.where( - tf.not_equal(semantic_prediction, self._ignore_label), - semantic_prediction, self._num_classes) - if sequence_id in self._iou_confusion_matrix_per_sequence: - self._iou_confusion_matrix_per_sequence[sequence_id] += ( - tf.math.confusion_matrix( - tf.reshape(semantic_label, [-1]), - tf.reshape(semantic_prediction, [-1]), - self._confusion_matrix_size, - dtype=tf.int64)) - self._sequence_length[sequence_id] += 1 - else: - self._iou_confusion_matrix_per_sequence[sequence_id] = ( - tf.math.confusion_matrix( - tf.reshape(semantic_label, [-1]), - tf.reshape(semantic_prediction, [-1]), - self._confusion_matrix_size, - dtype=tf.int64)) - self._predictions[sequence_id] = {} - self._ground_truth[sequence_id] = {} - self._intersections[sequence_id] = {} - self._sequence_length[sequence_id] = 1 - - instance_label = y_true % self._max_instances_per_category - - label_mask = tf.zeros_like(semantic_label, dtype=tf.bool) - prediction_mask = tf.zeros_like(semantic_prediction, dtype=tf.bool) - for things_class_id in self._things_list: - label_mask = tf.logical_or(label_mask, - tf.equal(semantic_label, things_class_id)) - prediction_mask = tf.logical_or( - prediction_mask, tf.equal(semantic_prediction, things_class_id)) - - # Select the `crowd` region of the current class. This region is encoded - # instance id `0`. - is_crowd = tf.logical_and(tf.equal(instance_label, 0), label_mask) - # Select the non-crowd region of the corresponding class as the `crowd` - # region is ignored for the tracking term. - label_mask = tf.logical_and(label_mask, tf.logical_not(is_crowd)) - # Do not punish id assignment for regions that are annotated as `crowd` in - # the ground-truth. - prediction_mask = tf.logical_and(prediction_mask, tf.logical_not(is_crowd)) - - seq_preds = self._predictions[sequence_id] - seq_gts = self._ground_truth[sequence_id] - seq_intersects = self._intersections[sequence_id] - - # Compute and update areas of ground-truth, predictions and intersections. - _update_dict_stats(seq_preds, y_pred[prediction_mask]) - _update_dict_stats(seq_gts, y_true[label_mask]) - - non_crowd_intersection = tf.logical_and(label_mask, prediction_mask) - intersection_ids = ( - y_true[non_crowd_intersection] * self._offset + - y_pred[non_crowd_intersection]) - _update_dict_stats(seq_intersects, intersection_ids) - - def result(self) -> Dict[Text, Any]: - """Computes the segmentation and tracking quality. - - Returns: - A dictionary containing: - - 'STQ': The total STQ score. - - 'AQ': The total association quality (AQ) score. - - 'IoU': The total mean IoU. - - 'STQ_per_seq': A list of the STQ score per sequence. - - 'AQ_per_seq': A list of the AQ score per sequence. - - 'IoU_per_seq': A list of mean IoU per sequence. - - 'Id_per_seq': A list of sequence Ids to map list index to sequence. - - 'Length_per_seq': A list of the length of each sequence. - """ - # Compute association quality (AQ) - num_tubes_per_seq = [0] * len(self._ground_truth) - aq_per_seq = [0] * len(self._ground_truth) - iou_per_seq = [0] * len(self._ground_truth) - id_per_seq = [''] * len(self._ground_truth) - - for index, sequence_id in enumerate(self._ground_truth): - outer_sum = 0.0 - predictions = self._predictions[sequence_id] - ground_truth = self._ground_truth[sequence_id] - intersections = self._intersections[sequence_id] - num_tubes_per_seq[index] = len(ground_truth) - id_per_seq[index] = sequence_id - - for gt_id, gt_size in ground_truth.items(): - inner_sum = 0.0 - for pr_id, pr_size in predictions.items(): - tpa_key = self._offset * gt_id + pr_id - if tpa_key in intersections: - tpa = intersections[tpa_key].numpy() - fpa = pr_size.numpy() - tpa - fna = gt_size.numpy() - tpa - inner_sum += tpa * (tpa / (tpa + fpa + fna)) - - outer_sum += 1.0 / gt_size.numpy() * inner_sum - aq_per_seq[index] = outer_sum - - aq_mean = np.sum(aq_per_seq) / np.maximum(np.sum(num_tubes_per_seq), 1e-15) - aq_per_seq = aq_per_seq / np.maximum(num_tubes_per_seq, 1e-15) - - # Compute IoU scores. - # The rows correspond to ground-truth and the columns to predictions. - # Remove fp from confusion matrix for the void/ignore class. - total_confusion = np.zeros( - (self._confusion_matrix_size, self._confusion_matrix_size), - dtype=np.int64) - for index, confusion in enumerate( - self._iou_confusion_matrix_per_sequence.values()): - confusion = confusion.numpy() - removal_matrix = np.zeros_like(confusion) - removal_matrix[self._include_indices, :] = 1.0 - confusion *= removal_matrix - total_confusion += confusion - - # `intersections` corresponds to true positives. - intersections = confusion.diagonal() - fps = confusion.sum(axis=0) - intersections - fns = confusion.sum(axis=1) - intersections - unions = intersections + fps + fns - - num_classes = np.count_nonzero(unions) - ious = (intersections.astype(np.double) / - np.maximum(unions, 1e-15).astype(np.double)) - iou_per_seq[index] = np.sum(ious) / num_classes - - # `intersections` corresponds to true positives. - intersections = total_confusion.diagonal() - fps = total_confusion.sum(axis=0) - intersections - fns = total_confusion.sum(axis=1) - intersections - unions = intersections + fps + fns - - num_classes = np.count_nonzero(unions) - ious = (intersections.astype(np.double) / - np.maximum(unions, 1e-15).astype(np.double)) - iou_mean = np.sum(ious) / num_classes - - st_quality = np.sqrt(aq_mean * iou_mean) - st_quality_per_seq = np.sqrt(aq_per_seq * iou_per_seq) - return {'STQ': st_quality, - 'AQ': aq_mean, - 'IoU': float(iou_mean), - 'STQ_per_seq': st_quality_per_seq, - 'AQ_per_seq': aq_per_seq, - 'IoU_per_seq': iou_per_seq, - 'ID_per_seq': id_per_seq, - 'Length_per_seq': list(self._sequence_length.values()), - } - - def reset_states(self): - """Resets all states that accumulated data.""" - self._iou_confusion_matrix_per_sequence = collections.OrderedDict() - self._predictions = collections.OrderedDict() - self._ground_truth = collections.OrderedDict() - self._intersections = collections.OrderedDict() - self._sequence_length = collections.OrderedDict() diff --git a/spaces/akhaliq/lama/saicinpainting/training/modules/ffc.py b/spaces/akhaliq/lama/saicinpainting/training/modules/ffc.py deleted file mode 100644 index 0e7b84683fccb4bccac97b6371994fa6bb44dbe4..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/lama/saicinpainting/training/modules/ffc.py +++ /dev/null @@ -1,485 +0,0 @@ -# Fast Fourier Convolution NeurIPS 2020 -# original implementation https://github.com/pkumivision/FFC/blob/main/model_zoo/ffc.py -# paper https://proceedings.neurips.cc/paper/2020/file/2fd5d41ec6cfab47e32164d5624269b1-Paper.pdf - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F - -from saicinpainting.training.modules.base import get_activation, BaseDiscriminator -from saicinpainting.training.modules.spatial_transform import LearnableSpatialTransformWrapper -from saicinpainting.training.modules.squeeze_excitation import SELayer -from saicinpainting.utils import get_shape - - -class FFCSE_block(nn.Module): - - def __init__(self, channels, ratio_g): - super(FFCSE_block, self).__init__() - in_cg = int(channels * ratio_g) - in_cl = channels - in_cg - r = 16 - - self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) - self.conv1 = nn.Conv2d(channels, channels // r, - kernel_size=1, bias=True) - self.relu1 = nn.ReLU(inplace=True) - self.conv_a2l = None if in_cl == 0 else nn.Conv2d( - channels // r, in_cl, kernel_size=1, bias=True) - self.conv_a2g = None if in_cg == 0 else nn.Conv2d( - channels // r, in_cg, kernel_size=1, bias=True) - self.sigmoid = nn.Sigmoid() - - def forward(self, x): - x = x if type(x) is tuple else (x, 0) - id_l, id_g = x - - x = id_l if type(id_g) is int else torch.cat([id_l, id_g], dim=1) - x = self.avgpool(x) - x = self.relu1(self.conv1(x)) - - x_l = 0 if self.conv_a2l is None else id_l * \ - self.sigmoid(self.conv_a2l(x)) - x_g = 0 if self.conv_a2g is None else id_g * \ - self.sigmoid(self.conv_a2g(x)) - return x_l, x_g - - -class FourierUnit(nn.Module): - - def __init__(self, in_channels, out_channels, groups=1, spatial_scale_factor=None, spatial_scale_mode='bilinear', - spectral_pos_encoding=False, use_se=False, se_kwargs=None, ffc3d=False, fft_norm='ortho'): - # bn_layer not used - super(FourierUnit, self).__init__() - self.groups = groups - - self.conv_layer = torch.nn.Conv2d(in_channels=in_channels * 2 + (2 if spectral_pos_encoding else 0), - out_channels=out_channels * 2, - kernel_size=1, stride=1, padding=0, groups=self.groups, bias=False) - self.bn = torch.nn.BatchNorm2d(out_channels * 2) - self.relu = torch.nn.ReLU(inplace=True) - - # squeeze and excitation block - self.use_se = use_se - if use_se: - if se_kwargs is None: - se_kwargs = {} - self.se = SELayer(self.conv_layer.in_channels, **se_kwargs) - - self.spatial_scale_factor = spatial_scale_factor - self.spatial_scale_mode = spatial_scale_mode - self.spectral_pos_encoding = spectral_pos_encoding - self.ffc3d = ffc3d - self.fft_norm = fft_norm - - def forward(self, x): - batch = x.shape[0] - - if self.spatial_scale_factor is not None: - orig_size = x.shape[-2:] - x = F.interpolate(x, scale_factor=self.spatial_scale_factor, mode=self.spatial_scale_mode, align_corners=False) - - r_size = x.size() - # (batch, c, h, w/2+1, 2) - fft_dim = (-3, -2, -1) if self.ffc3d else (-2, -1) - ffted = torch.fft.rfftn(x, dim=fft_dim, norm=self.fft_norm) - ffted = torch.stack((ffted.real, ffted.imag), dim=-1) - ffted = ffted.permute(0, 1, 4, 2, 3).contiguous() # (batch, c, 2, h, w/2+1) - ffted = ffted.view((batch, -1,) + ffted.size()[3:]) - - if self.spectral_pos_encoding: - height, width = ffted.shape[-2:] - coords_vert = torch.linspace(0, 1, height)[None, None, :, None].expand(batch, 1, height, width).to(ffted) - coords_hor = torch.linspace(0, 1, width)[None, None, None, :].expand(batch, 1, height, width).to(ffted) - ffted = torch.cat((coords_vert, coords_hor, ffted), dim=1) - - if self.use_se: - ffted = self.se(ffted) - - ffted = self.conv_layer(ffted) # (batch, c*2, h, w/2+1) - ffted = self.relu(self.bn(ffted)) - - ffted = ffted.view((batch, -1, 2,) + ffted.size()[2:]).permute( - 0, 1, 3, 4, 2).contiguous() # (batch,c, t, h, w/2+1, 2) - ffted = torch.complex(ffted[..., 0], ffted[..., 1]) - - ifft_shape_slice = x.shape[-3:] if self.ffc3d else x.shape[-2:] - output = torch.fft.irfftn(ffted, s=ifft_shape_slice, dim=fft_dim, norm=self.fft_norm) - - if self.spatial_scale_factor is not None: - output = F.interpolate(output, size=orig_size, mode=self.spatial_scale_mode, align_corners=False) - - return output - - -class SeparableFourierUnit(nn.Module): - - def __init__(self, in_channels, out_channels, groups=1, kernel_size=3): - # bn_layer not used - super(SeparableFourierUnit, self).__init__() - self.groups = groups - row_out_channels = out_channels // 2 - col_out_channels = out_channels - row_out_channels - self.row_conv = torch.nn.Conv2d(in_channels=in_channels * 2, - out_channels=row_out_channels * 2, - kernel_size=(kernel_size, 1), # kernel size is always like this, but the data will be transposed - stride=1, padding=(kernel_size // 2, 0), - padding_mode='reflect', - groups=self.groups, bias=False) - self.col_conv = torch.nn.Conv2d(in_channels=in_channels * 2, - out_channels=col_out_channels * 2, - kernel_size=(kernel_size, 1), # kernel size is always like this, but the data will be transposed - stride=1, padding=(kernel_size // 2, 0), - padding_mode='reflect', - groups=self.groups, bias=False) - self.row_bn = torch.nn.BatchNorm2d(row_out_channels * 2) - self.col_bn = torch.nn.BatchNorm2d(col_out_channels * 2) - self.relu = torch.nn.ReLU(inplace=True) - - def process_branch(self, x, conv, bn): - batch = x.shape[0] - - r_size = x.size() - # (batch, c, h, w/2+1, 2) - ffted = torch.fft.rfft(x, norm="ortho") - ffted = torch.stack((ffted.real, ffted.imag), dim=-1) - ffted = ffted.permute(0, 1, 4, 2, 3).contiguous() # (batch, c, 2, h, w/2+1) - ffted = ffted.view((batch, -1,) + ffted.size()[3:]) - - ffted = self.relu(bn(conv(ffted))) - - ffted = ffted.view((batch, -1, 2,) + ffted.size()[2:]).permute( - 0, 1, 3, 4, 2).contiguous() # (batch,c, t, h, w/2+1, 2) - ffted = torch.complex(ffted[..., 0], ffted[..., 1]) - - output = torch.fft.irfft(ffted, s=x.shape[-1:], norm="ortho") - return output - - - def forward(self, x): - rowwise = self.process_branch(x, self.row_conv, self.row_bn) - colwise = self.process_branch(x.permute(0, 1, 3, 2), self.col_conv, self.col_bn).permute(0, 1, 3, 2) - out = torch.cat((rowwise, colwise), dim=1) - return out - - -class SpectralTransform(nn.Module): - - def __init__(self, in_channels, out_channels, stride=1, groups=1, enable_lfu=True, separable_fu=False, **fu_kwargs): - # bn_layer not used - super(SpectralTransform, self).__init__() - self.enable_lfu = enable_lfu - if stride == 2: - self.downsample = nn.AvgPool2d(kernel_size=(2, 2), stride=2) - else: - self.downsample = nn.Identity() - - self.stride = stride - self.conv1 = nn.Sequential( - nn.Conv2d(in_channels, out_channels // - 2, kernel_size=1, groups=groups, bias=False), - nn.BatchNorm2d(out_channels // 2), - nn.ReLU(inplace=True) - ) - fu_class = SeparableFourierUnit if separable_fu else FourierUnit - self.fu = fu_class( - out_channels // 2, out_channels // 2, groups, **fu_kwargs) - if self.enable_lfu: - self.lfu = fu_class( - out_channels // 2, out_channels // 2, groups) - self.conv2 = torch.nn.Conv2d( - out_channels // 2, out_channels, kernel_size=1, groups=groups, bias=False) - - def forward(self, x): - - x = self.downsample(x) - x = self.conv1(x) - output = self.fu(x) - - if self.enable_lfu: - n, c, h, w = x.shape - split_no = 2 - split_s = h // split_no - xs = torch.cat(torch.split( - x[:, :c // 4], split_s, dim=-2), dim=1).contiguous() - xs = torch.cat(torch.split(xs, split_s, dim=-1), - dim=1).contiguous() - xs = self.lfu(xs) - xs = xs.repeat(1, 1, split_no, split_no).contiguous() - else: - xs = 0 - - output = self.conv2(x + output + xs) - - return output - - -class FFC(nn.Module): - - def __init__(self, in_channels, out_channels, kernel_size, - ratio_gin, ratio_gout, stride=1, padding=0, - dilation=1, groups=1, bias=False, enable_lfu=True, - padding_type='reflect', gated=False, **spectral_kwargs): - super(FFC, self).__init__() - - assert stride == 1 or stride == 2, "Stride should be 1 or 2." - self.stride = stride - - in_cg = int(in_channels * ratio_gin) - in_cl = in_channels - in_cg - out_cg = int(out_channels * ratio_gout) - out_cl = out_channels - out_cg - #groups_g = 1 if groups == 1 else int(groups * ratio_gout) - #groups_l = 1 if groups == 1 else groups - groups_g - - self.ratio_gin = ratio_gin - self.ratio_gout = ratio_gout - self.global_in_num = in_cg - - module = nn.Identity if in_cl == 0 or out_cl == 0 else nn.Conv2d - self.convl2l = module(in_cl, out_cl, kernel_size, - stride, padding, dilation, groups, bias, padding_mode=padding_type) - module = nn.Identity if in_cl == 0 or out_cg == 0 else nn.Conv2d - self.convl2g = module(in_cl, out_cg, kernel_size, - stride, padding, dilation, groups, bias, padding_mode=padding_type) - module = nn.Identity if in_cg == 0 or out_cl == 0 else nn.Conv2d - self.convg2l = module(in_cg, out_cl, kernel_size, - stride, padding, dilation, groups, bias, padding_mode=padding_type) - module = nn.Identity if in_cg == 0 or out_cg == 0 else SpectralTransform - self.convg2g = module( - in_cg, out_cg, stride, 1 if groups == 1 else groups // 2, enable_lfu, **spectral_kwargs) - - self.gated = gated - module = nn.Identity if in_cg == 0 or out_cl == 0 or not self.gated else nn.Conv2d - self.gate = module(in_channels, 2, 1) - - def forward(self, x): - x_l, x_g = x if type(x) is tuple else (x, 0) - out_xl, out_xg = 0, 0 - - if self.gated: - total_input_parts = [x_l] - if torch.is_tensor(x_g): - total_input_parts.append(x_g) - total_input = torch.cat(total_input_parts, dim=1) - - gates = torch.sigmoid(self.gate(total_input)) - g2l_gate, l2g_gate = gates.chunk(2, dim=1) - else: - g2l_gate, l2g_gate = 1, 1 - - if self.ratio_gout != 1: - out_xl = self.convl2l(x_l) + self.convg2l(x_g) * g2l_gate - if self.ratio_gout != 0: - out_xg = self.convl2g(x_l) * l2g_gate + self.convg2g(x_g) - - return out_xl, out_xg - - -class FFC_BN_ACT(nn.Module): - - def __init__(self, in_channels, out_channels, - kernel_size, ratio_gin, ratio_gout, - stride=1, padding=0, dilation=1, groups=1, bias=False, - norm_layer=nn.BatchNorm2d, activation_layer=nn.Identity, - padding_type='reflect', - enable_lfu=True, **kwargs): - super(FFC_BN_ACT, self).__init__() - self.ffc = FFC(in_channels, out_channels, kernel_size, - ratio_gin, ratio_gout, stride, padding, dilation, - groups, bias, enable_lfu, padding_type=padding_type, **kwargs) - lnorm = nn.Identity if ratio_gout == 1 else norm_layer - gnorm = nn.Identity if ratio_gout == 0 else norm_layer - global_channels = int(out_channels * ratio_gout) - self.bn_l = lnorm(out_channels - global_channels) - self.bn_g = gnorm(global_channels) - - lact = nn.Identity if ratio_gout == 1 else activation_layer - gact = nn.Identity if ratio_gout == 0 else activation_layer - self.act_l = lact(inplace=True) - self.act_g = gact(inplace=True) - - def forward(self, x): - x_l, x_g = self.ffc(x) - x_l = self.act_l(self.bn_l(x_l)) - x_g = self.act_g(self.bn_g(x_g)) - return x_l, x_g - - -class FFCResnetBlock(nn.Module): - def __init__(self, dim, padding_type, norm_layer, activation_layer=nn.ReLU, dilation=1, - spatial_transform_kwargs=None, inline=False, **conv_kwargs): - super().__init__() - self.conv1 = FFC_BN_ACT(dim, dim, kernel_size=3, padding=dilation, dilation=dilation, - norm_layer=norm_layer, - activation_layer=activation_layer, - padding_type=padding_type, - **conv_kwargs) - self.conv2 = FFC_BN_ACT(dim, dim, kernel_size=3, padding=dilation, dilation=dilation, - norm_layer=norm_layer, - activation_layer=activation_layer, - padding_type=padding_type, - **conv_kwargs) - if spatial_transform_kwargs is not None: - self.conv1 = LearnableSpatialTransformWrapper(self.conv1, **spatial_transform_kwargs) - self.conv2 = LearnableSpatialTransformWrapper(self.conv2, **spatial_transform_kwargs) - self.inline = inline - - def forward(self, x): - if self.inline: - x_l, x_g = x[:, :-self.conv1.ffc.global_in_num], x[:, -self.conv1.ffc.global_in_num:] - else: - x_l, x_g = x if type(x) is tuple else (x, 0) - - id_l, id_g = x_l, x_g - - x_l, x_g = self.conv1((x_l, x_g)) - x_l, x_g = self.conv2((x_l, x_g)) - - x_l, x_g = id_l + x_l, id_g + x_g - out = x_l, x_g - if self.inline: - out = torch.cat(out, dim=1) - return out - - -class ConcatTupleLayer(nn.Module): - def forward(self, x): - assert isinstance(x, tuple) - x_l, x_g = x - assert torch.is_tensor(x_l) or torch.is_tensor(x_g) - if not torch.is_tensor(x_g): - return x_l - return torch.cat(x, dim=1) - - -class FFCResNetGenerator(nn.Module): - def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d, - padding_type='reflect', activation_layer=nn.ReLU, - up_norm_layer=nn.BatchNorm2d, up_activation=nn.ReLU(True), - init_conv_kwargs={}, downsample_conv_kwargs={}, resnet_conv_kwargs={}, - spatial_transform_layers=None, spatial_transform_kwargs={}, - add_out_act=True, max_features=1024, out_ffc=False, out_ffc_kwargs={}): - assert (n_blocks >= 0) - super().__init__() - - model = [nn.ReflectionPad2d(3), - FFC_BN_ACT(input_nc, ngf, kernel_size=7, padding=0, norm_layer=norm_layer, - activation_layer=activation_layer, **init_conv_kwargs)] - - ### downsample - for i in range(n_downsampling): - mult = 2 ** i - if i == n_downsampling - 1: - cur_conv_kwargs = dict(downsample_conv_kwargs) - cur_conv_kwargs['ratio_gout'] = resnet_conv_kwargs.get('ratio_gin', 0) - else: - cur_conv_kwargs = downsample_conv_kwargs - model += [FFC_BN_ACT(min(max_features, ngf * mult), - min(max_features, ngf * mult * 2), - kernel_size=3, stride=2, padding=1, - norm_layer=norm_layer, - activation_layer=activation_layer, - **cur_conv_kwargs)] - - mult = 2 ** n_downsampling - feats_num_bottleneck = min(max_features, ngf * mult) - - ### resnet blocks - for i in range(n_blocks): - cur_resblock = FFCResnetBlock(feats_num_bottleneck, padding_type=padding_type, activation_layer=activation_layer, - norm_layer=norm_layer, **resnet_conv_kwargs) - if spatial_transform_layers is not None and i in spatial_transform_layers: - cur_resblock = LearnableSpatialTransformWrapper(cur_resblock, **spatial_transform_kwargs) - model += [cur_resblock] - - model += [ConcatTupleLayer()] - - ### upsample - for i in range(n_downsampling): - mult = 2 ** (n_downsampling - i) - model += [nn.ConvTranspose2d(min(max_features, ngf * mult), - min(max_features, int(ngf * mult / 2)), - kernel_size=3, stride=2, padding=1, output_padding=1), - up_norm_layer(min(max_features, int(ngf * mult / 2))), - up_activation] - - if out_ffc: - model += [FFCResnetBlock(ngf, padding_type=padding_type, activation_layer=activation_layer, - norm_layer=norm_layer, inline=True, **out_ffc_kwargs)] - - model += [nn.ReflectionPad2d(3), - nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] - if add_out_act: - model.append(get_activation('tanh' if add_out_act is True else add_out_act)) - self.model = nn.Sequential(*model) - - def forward(self, input): - return self.model(input) - - -class FFCNLayerDiscriminator(BaseDiscriminator): - def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, max_features=512, - init_conv_kwargs={}, conv_kwargs={}): - super().__init__() - self.n_layers = n_layers - - def _act_ctor(inplace=True): - return nn.LeakyReLU(negative_slope=0.2, inplace=inplace) - - kw = 3 - padw = int(np.ceil((kw-1.0)/2)) - sequence = [[FFC_BN_ACT(input_nc, ndf, kernel_size=kw, padding=padw, norm_layer=norm_layer, - activation_layer=_act_ctor, **init_conv_kwargs)]] - - nf = ndf - for n in range(1, n_layers): - nf_prev = nf - nf = min(nf * 2, max_features) - - cur_model = [ - FFC_BN_ACT(nf_prev, nf, - kernel_size=kw, stride=2, padding=padw, - norm_layer=norm_layer, - activation_layer=_act_ctor, - **conv_kwargs) - ] - sequence.append(cur_model) - - nf_prev = nf - nf = min(nf * 2, 512) - - cur_model = [ - FFC_BN_ACT(nf_prev, nf, - kernel_size=kw, stride=1, padding=padw, - norm_layer=norm_layer, - activation_layer=lambda *args, **kwargs: nn.LeakyReLU(*args, negative_slope=0.2, **kwargs), - **conv_kwargs), - ConcatTupleLayer() - ] - sequence.append(cur_model) - - sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]] - - for n in range(len(sequence)): - setattr(self, 'model'+str(n), nn.Sequential(*sequence[n])) - - def get_all_activations(self, x): - res = [x] - for n in range(self.n_layers + 2): - model = getattr(self, 'model' + str(n)) - res.append(model(res[-1])) - return res[1:] - - def forward(self, x): - act = self.get_all_activations(x) - feats = [] - for out in act[:-1]: - if isinstance(out, tuple): - if torch.is_tensor(out[1]): - out = torch.cat(out, dim=1) - else: - out = out[0] - feats.append(out) - return act[-1], feats diff --git a/spaces/akhaliq/paint-by-example/inpainting.py b/spaces/akhaliq/paint-by-example/inpainting.py deleted file mode 100644 index 798c3fd252f826762aee6970f867eee537249db8..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/paint-by-example/inpainting.py +++ /dev/null @@ -1,194 +0,0 @@ -import inspect -from typing import List, Optional, Union - -import numpy as np -import torch - -import PIL -from diffusers import AutoencoderKL, DDIMScheduler, DiffusionPipeline, PNDMScheduler, UNet2DConditionModel -from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker -from tqdm.auto import tqdm -from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer - - -def preprocess_image(image): - w, h = image.size - w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32 - image = image.resize((w, h), resample=PIL.Image.LANCZOS) - image = np.array(image).astype(np.float32) / 255.0 - image = image[None].transpose(0, 3, 1, 2) - image = torch.from_numpy(image) - return 2.0 * image - 1.0 - - -def preprocess_mask(mask): - mask = mask.convert("L") - w, h = mask.size - w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32 - mask = mask.resize((w // 8, h // 8), resample=PIL.Image.NEAREST) - mask = np.array(mask).astype(np.float32) / 255.0 - mask = np.tile(mask, (4, 1, 1)) - mask = mask[None].transpose(0, 1, 2, 3) # what does this step do? - mask = 1 - mask # repaint white, keep black - mask = torch.from_numpy(mask) - return mask - -class StableDiffusionInpaintingPipeline(DiffusionPipeline): - def __init__( - self, - vae: AutoencoderKL, - text_encoder: CLIPTextModel, - tokenizer: CLIPTokenizer, - unet: UNet2DConditionModel, - scheduler: Union[DDIMScheduler, PNDMScheduler], - safety_checker: StableDiffusionSafetyChecker, - feature_extractor: CLIPFeatureExtractor, - ): - super().__init__() - scheduler = scheduler.set_format("pt") - self.register_modules( - vae=vae, - text_encoder=text_encoder, - tokenizer=tokenizer, - unet=unet, - scheduler=scheduler, - safety_checker=safety_checker, - feature_extractor=feature_extractor, - ) - - @torch.no_grad() - def __call__( - self, - prompt: Union[str, List[str]], - init_image: torch.FloatTensor, - mask_image: torch.FloatTensor, - strength: float = 0.8, - num_inference_steps: Optional[int] = 50, - guidance_scale: Optional[float] = 7.5, - eta: Optional[float] = 0.0, - generator: Optional[torch.Generator] = None, - output_type: Optional[str] = "pil", - ): - - if isinstance(prompt, str): - batch_size = 1 - elif isinstance(prompt, list): - batch_size = len(prompt) - else: - raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") - - if strength < 0 or strength > 1: - raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") - - # set timesteps - accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys()) - extra_set_kwargs = {} - offset = 0 - if accepts_offset: - offset = 1 - extra_set_kwargs["offset"] = 1 - - self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs) - - # preprocess image - init_image = preprocess_image(init_image).to(self.device) - - # encode the init image into latents and scale the latents - init_latent_dist = self.vae.encode(init_image).latent_dist - init_latents = init_latent_dist.sample(generator=generator) - init_latents = 0.18215 * init_latents - - # prepare init_latents noise to latents - init_latents = torch.cat([init_latents] * batch_size) - init_latents_orig = init_latents - - # preprocess mask - mask = preprocess_mask(mask_image).to(self.device) - mask = torch.cat([mask] * batch_size) - - # check sizes - if not mask.shape == init_latents.shape: - raise ValueError(f"The mask and init_image should be the same size!") - - # get the original timestep using init_timestep - init_timestep = int(num_inference_steps * strength) + offset - init_timestep = min(init_timestep, num_inference_steps) - timesteps = self.scheduler.timesteps[-init_timestep] - timesteps = torch.tensor([timesteps] * batch_size, dtype=torch.long, device=self.device) - - # add noise to latents using the timesteps - noise = torch.randn(init_latents.shape, generator=generator, device=self.device) - init_latents = self.scheduler.add_noise(init_latents, noise, timesteps) - - # get prompt text embeddings - text_input = self.tokenizer( - prompt, - padding="max_length", - max_length=self.tokenizer.model_max_length, - truncation=True, - return_tensors="pt", - ) - text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0] - - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) - # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` - # corresponds to doing no classifier free guidance. - do_classifier_free_guidance = guidance_scale > 1.0 - # get unconditional embeddings for classifier free guidance - if do_classifier_free_guidance: - max_length = text_input.input_ids.shape[-1] - uncond_input = self.tokenizer( - [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt" - ) - uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0] - - # For classifier free guidance, we need to do two forward passes. - # Here we concatenate the unconditional and text embeddings into a single batch - # to avoid doing two forward passes - text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) - - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. - # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 - # and should be between [0, 1] - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - extra_step_kwargs = {} - if accepts_eta: - extra_step_kwargs["eta"] = eta - - latents = init_latents - t_start = max(num_inference_steps - init_timestep + offset, 0) - for i, t in tqdm(enumerate(self.scheduler.timesteps[t_start:])): - # expand the latents if we are doing classifier free guidance - latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents - - # predict the noise residual - noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"] - - # perform guidance - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs)["prev_sample"] - - # masking - init_latents_proper = self.scheduler.add_noise(init_latents_orig, noise, t) - latents = (init_latents_proper * mask) + (latents * (1 - mask)) - - # scale and decode the image latents with vae - latents = 1 / 0.18215 * latents - image = self.vae.decode(latents).sample - - image = (image / 2 + 0.5).clamp(0, 1) - image = image.cpu().permute(0, 2, 3, 1).numpy() - - # run safety checker - safety_cheker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(self.device) - image, has_nsfw_concept = self.safety_checker(images=image, clip_input=safety_cheker_input.pixel_values) - - if output_type == "pil": - image = self.numpy_to_pil(image) - - return {"sample": image, "nsfw_content_detected": has_nsfw_concept} \ No newline at end of file diff --git a/spaces/akhaliq/paint-by-example/share_btn.py b/spaces/akhaliq/paint-by-example/share_btn.py deleted file mode 100644 index 5bce98ad54d491f9d5691fea427efeccc77690cc..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/paint-by-example/share_btn.py +++ /dev/null @@ -1,93 +0,0 @@ -community_icon_html = """""" - -loading_icon_html = """""" - -share_js = """async () => { - async function uploadFile(file){ - const UPLOAD_URL = 'https://huggingface.co/uploads'; - const response = await fetch(UPLOAD_URL, { - method: 'POST', - headers: { - 'Content-Type': file.type, - 'X-Requested-With': 'XMLHttpRequest', - }, - body: file, /// <- File inherits from Blob - }); - const url = await response.text(); - return url; - } - - async function getInputImgFile(imgCanvas){ - const blob = await new Promise(resolve => imgCanvas.toBlob(resolve)); - const imgId = Date.now() % 200; - const fileName = `sd-inpainting-${{imgId}}.png`; - return new File([blob], fileName, { type: 'image/png' }); - } - - async function getOutoutImgFile(imgEl){ - const res = await fetch(imgEl.src); - const blob = await res.blob(); - const imgId = Date.now() % 200; - const fileName = `sd-inpainting-${{imgId}}.png`; - return new File([blob], fileName, { type: 'image/png' }); - } - - const gradioEl = document.querySelector('body > gradio-app'); - // const gradioEl = document.querySelector("gradio-app").shadowRoot; - const inputImgCanvas = gradioEl.querySelector('canvas[key="drawing"]'); - const outputImgEl = gradioEl.querySelector('#output-img img'); - const promptTxt = gradioEl.querySelector('#input-text textarea').value; - let titleTxt = promptTxt; - if(titleTxt.length > 100){ - titleTxt = titleTxt.slice(0, 100) + ' ...'; - } - const shareBtnEl = gradioEl.querySelector('#share-btn'); - const shareIconEl = gradioEl.querySelector('#share-btn-share-icon'); - const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon'); - - if(!outputImgEl){ - return; - }; - - shareBtnEl.style.pointerEvents = 'none'; - shareIconEl.style.display = 'none'; - loadingIconEl.style.removeProperty('display'); - - const inputImgFile = await getInputImgFile(inputImgCanvas); - const outputImgFile = await getOutoutImgFile(outputImgEl); - const files = [inputImgFile, outputImgFile]; - - const urls = await Promise.all(files.map((f) => uploadFile(f))); - - const htmlImgs = urls.map(url => ``); - const [inputImgUrl, outputImgUrl] = htmlImgs; - - const descriptionMd = `
    -
    -${inputImgUrl} - -${promptTxt} -
    -
    -${outputImgUrl} -
    -
    `; - - const params = new URLSearchParams({ - title: titleTxt, - description: descriptionMd, - }); - - const paramsStr = params.toString(); - window.open(`${window.location.href}/discussions/new?${paramsStr}`, '_blank'); - - shareBtnEl.style.removeProperty('pointer-events'); - shareIconEl.style.removeProperty('display'); - loadingIconEl.style.display = 'none'; -}""" \ No newline at end of file diff --git a/spaces/akhaliq/wav2vec2-large-robust-ft-libri-960h/README.md b/spaces/akhaliq/wav2vec2-large-robust-ft-libri-960h/README.md deleted file mode 100644 index 9f5eebf43d604be01c5155baecf59b8ac70af6fb..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/wav2vec2-large-robust-ft-libri-960h/README.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: Wav2vec2 Large Robust Ft Libri 960h -emoji: 🌖 -colorFrom: pink -colorTo: pink -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/alex-mindspace/gpt-agents/tests/test.py b/spaces/alex-mindspace/gpt-agents/tests/test.py deleted file mode 100644 index fd956247da7f1751c97e98c26d28831fd9724740..0000000000000000000000000000000000000000 --- a/spaces/alex-mindspace/gpt-agents/tests/test.py +++ /dev/null @@ -1,34 +0,0 @@ -import sys -import os -import json -from pathlib import Path -import numpy as np -import matplotlib.pyplot as plt -import seaborn as sns -sys.path.append('..') - -from swarmai.challenges.python_challenges.PythonChallenge import PythonChallenge -from swarmai.Swarm import Swarm - -def load_keys(): - keys_file = Path("../keys.json") - with open(keys_file) as f: - keys = json.load(f) - os.environ["OPENAI_API_KEY"] = keys["OPENAI_API_KEY"] - -def init_challenge(): - # defining the challenge the swarm will be working on - test_challenge_config = Path('../swarmai/challenges/python_challenges/challenge2/pc2_config.yaml') - challenge1 = PythonChallenge(test_challenge_config) - print(challenge1.get_problem()) - return challenge1 - -def run_swarm(challenge): - # establishing the swarm - swarm1 = Swarm(challenge, (5, 5), {"python developer": 0.8, "explorer python": 0.2}) - swarm1.run_swarm(1500) - -if __name__=="__main__": - load_keys() - ch = init_challenge() - run_swarm(ch) \ No newline at end of file diff --git a/spaces/aodianyun/stable-diffusion-webui/modules/img2img.py b/spaces/aodianyun/stable-diffusion-webui/modules/img2img.py deleted file mode 100644 index 8ddf224fa2b13a32cb51603a55482e0f0783ec72..0000000000000000000000000000000000000000 --- a/spaces/aodianyun/stable-diffusion-webui/modules/img2img.py +++ /dev/null @@ -1,184 +0,0 @@ -import math -import os -import sys -import traceback - -import numpy as np -from PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops - -from modules import devices, sd_samplers -from modules.generation_parameters_copypaste import create_override_settings_dict -from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images -from modules.shared import opts, state -import modules.shared as shared -import modules.processing as processing -from modules.ui import plaintext_to_html -import modules.images as images -import modules.scripts - - -def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args): - processing.fix_seed(p) - - images = shared.listfiles(input_dir) - - is_inpaint_batch = False - if inpaint_mask_dir: - inpaint_masks = shared.listfiles(inpaint_mask_dir) - is_inpaint_batch = len(inpaint_masks) > 0 - if is_inpaint_batch: - print(f"\nInpaint batch is enabled. {len(inpaint_masks)} masks found.") - - print(f"Will process {len(images)} images, creating {p.n_iter * p.batch_size} new images for each.") - - save_normally = output_dir == '' - - p.do_not_save_grid = True - p.do_not_save_samples = not save_normally - - state.job_count = len(images) * p.n_iter - - for i, image in enumerate(images): - state.job = f"{i+1} out of {len(images)}" - if state.skipped: - state.skipped = False - - if state.interrupted: - break - - img = Image.open(image) - # Use the EXIF orientation of photos taken by smartphones. - img = ImageOps.exif_transpose(img) - p.init_images = [img] * p.batch_size - - if is_inpaint_batch: - # try to find corresponding mask for an image using simple filename matching - mask_image_path = os.path.join(inpaint_mask_dir, os.path.basename(image)) - # if not found use first one ("same mask for all images" use-case) - if not mask_image_path in inpaint_masks: - mask_image_path = inpaint_masks[0] - mask_image = Image.open(mask_image_path) - p.image_mask = mask_image - - proc = modules.scripts.scripts_img2img.run(p, *args) - if proc is None: - proc = process_images(p) - - for n, processed_image in enumerate(proc.images): - filename = os.path.basename(image) - - if n > 0: - left, right = os.path.splitext(filename) - filename = f"{left}-{n}{right}" - - if not save_normally: - os.makedirs(output_dir, exist_ok=True) - if processed_image.mode == 'RGBA': - processed_image = processed_image.convert("RGB") - processed_image.save(os.path.join(output_dir, filename)) - - -def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args): - override_settings = create_override_settings_dict(override_settings_texts) - - is_batch = mode == 5 - - if mode == 0: # img2img - image = init_img.convert("RGB") - mask = None - elif mode == 1: # img2img sketch - image = sketch.convert("RGB") - mask = None - elif mode == 2: # inpaint - image, mask = init_img_with_mask["image"], init_img_with_mask["mask"] - alpha_mask = ImageOps.invert(image.split()[-1]).convert('L').point(lambda x: 255 if x > 0 else 0, mode='1') - mask = ImageChops.lighter(alpha_mask, mask.convert('L')).convert('L') - image = image.convert("RGB") - elif mode == 3: # inpaint sketch - image = inpaint_color_sketch - orig = inpaint_color_sketch_orig or inpaint_color_sketch - pred = np.any(np.array(image) != np.array(orig), axis=-1) - mask = Image.fromarray(pred.astype(np.uint8) * 255, "L") - mask = ImageEnhance.Brightness(mask).enhance(1 - mask_alpha / 100) - blur = ImageFilter.GaussianBlur(mask_blur) - image = Image.composite(image.filter(blur), orig, mask.filter(blur)) - image = image.convert("RGB") - elif mode == 4: # inpaint upload mask - image = init_img_inpaint - mask = init_mask_inpaint - else: - image = None - mask = None - - # Use the EXIF orientation of photos taken by smartphones. - if image is not None: - image = ImageOps.exif_transpose(image) - - assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]' - - p = StableDiffusionProcessingImg2Img( - sd_model=shared.sd_model, - outpath_samples=opts.outdir_samples or opts.outdir_img2img_samples, - outpath_grids=opts.outdir_grids or opts.outdir_img2img_grids, - prompt=prompt, - negative_prompt=negative_prompt, - styles=prompt_styles, - seed=seed, - subseed=subseed, - subseed_strength=subseed_strength, - seed_resize_from_h=seed_resize_from_h, - seed_resize_from_w=seed_resize_from_w, - seed_enable_extras=seed_enable_extras, - sampler_name=sd_samplers.samplers_for_img2img[sampler_index].name, - batch_size=batch_size, - n_iter=n_iter, - steps=steps, - cfg_scale=cfg_scale, - width=width, - height=height, - restore_faces=restore_faces, - tiling=tiling, - init_images=[image], - mask=mask, - mask_blur=mask_blur, - inpainting_fill=inpainting_fill, - resize_mode=resize_mode, - denoising_strength=denoising_strength, - image_cfg_scale=image_cfg_scale, - inpaint_full_res=inpaint_full_res, - inpaint_full_res_padding=inpaint_full_res_padding, - inpainting_mask_invert=inpainting_mask_invert, - override_settings=override_settings, - ) - - p.scripts = modules.scripts.scripts_txt2img - p.script_args = args - - if shared.cmd_opts.enable_console_prompts: - print(f"\nimg2img: {prompt}", file=shared.progress_print_out) - - p.extra_generation_params["Mask blur"] = mask_blur - - if is_batch: - assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled" - - process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args) - - processed = Processed(p, [], p.seed, "") - else: - processed = modules.scripts.scripts_img2img.run(p, *args) - if processed is None: - processed = process_images(p) - - p.close() - - shared.total_tqdm.clear() - - generation_info_js = processed.js() - if opts.samples_log_stdout: - print(generation_info_js) - - if opts.do_not_show_images: - processed.images = [] - - return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(processed.comments) diff --git a/spaces/aodianyun/stable-diffusion-webui/modules/sd_samplers_kdiffusion.py b/spaces/aodianyun/stable-diffusion-webui/modules/sd_samplers_kdiffusion.py deleted file mode 100644 index 9d16fc11b8fc0678c36dadc9cca0de7122f47cee..0000000000000000000000000000000000000000 --- a/spaces/aodianyun/stable-diffusion-webui/modules/sd_samplers_kdiffusion.py +++ /dev/null @@ -1,357 +0,0 @@ -from collections import deque -import torch -import inspect -import einops -import k_diffusion.sampling -from modules import prompt_parser, devices, sd_samplers_common - -from modules.shared import opts, state -import modules.shared as shared -from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback -from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback - -samplers_k_diffusion = [ - ('Euler a', 'sample_euler_ancestral', ['k_euler_a', 'k_euler_ancestral'], {}), - ('Euler', 'sample_euler', ['k_euler'], {}), - ('LMS', 'sample_lms', ['k_lms'], {}), - ('Heun', 'sample_heun', ['k_heun'], {}), - ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {'discard_next_to_last_sigma': True}), - ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {'discard_next_to_last_sigma': True}), - ('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {}), - ('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}), - ('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {}), - ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {}), - ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {}), - ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), - ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True}), - ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras', 'discard_next_to_last_sigma': True}), - ('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras'}), - ('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}), - ('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras'}), -] - -samplers_data_k_diffusion = [ - sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options) - for label, funcname, aliases, options in samplers_k_diffusion - if hasattr(k_diffusion.sampling, funcname) -] - -sampler_extra_params = { - 'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'], - 'sample_heun': ['s_churn', 's_tmin', 's_tmax', 's_noise'], - 'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'], -} - - -class CFGDenoiser(torch.nn.Module): - """ - Classifier free guidance denoiser. A wrapper for stable diffusion model (specifically for unet) - that can take a noisy picture and produce a noise-free picture using two guidances (prompts) - instead of one. Originally, the second prompt is just an empty string, but we use non-empty - negative prompt. - """ - - def __init__(self, model): - super().__init__() - self.inner_model = model - self.mask = None - self.nmask = None - self.init_latent = None - self.step = 0 - self.image_cfg_scale = None - - def combine_denoised(self, x_out, conds_list, uncond, cond_scale): - denoised_uncond = x_out[-uncond.shape[0]:] - denoised = torch.clone(denoised_uncond) - - for i, conds in enumerate(conds_list): - for cond_index, weight in conds: - denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale) - - return denoised - - def combine_denoised_for_edit_model(self, x_out, cond_scale): - out_cond, out_img_cond, out_uncond = x_out.chunk(3) - denoised = out_uncond + cond_scale * (out_cond - out_img_cond) + self.image_cfg_scale * (out_img_cond - out_uncond) - - return denoised - - def forward(self, x, sigma, uncond, cond, cond_scale, image_cond): - if state.interrupted or state.skipped: - raise sd_samplers_common.InterruptedException - - # at self.image_cfg_scale == 1.0 produced results for edit model are the same as with normal sampling, - # so is_edit_model is set to False to support AND composition. - is_edit_model = shared.sd_model.cond_stage_key == "edit" and self.image_cfg_scale is not None and self.image_cfg_scale != 1.0 - - conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) - uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step) - - assert not is_edit_model or all([len(conds) == 1 for conds in conds_list]), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)" - - batch_size = len(conds_list) - repeats = [len(conds_list[i]) for i in range(batch_size)] - - if not is_edit_model: - x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x]) - sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma]) - image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_cond]) - else: - x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x] + [x]) - sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma] + [sigma]) - image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_cond] + [torch.zeros_like(self.init_latent)]) - - denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps) - cfg_denoiser_callback(denoiser_params) - x_in = denoiser_params.x - image_cond_in = denoiser_params.image_cond - sigma_in = denoiser_params.sigma - - if tensor.shape[1] == uncond.shape[1]: - if not is_edit_model: - cond_in = torch.cat([tensor, uncond]) - else: - cond_in = torch.cat([tensor, uncond, uncond]) - - if shared.batch_cond_uncond: - x_out = self.inner_model(x_in, sigma_in, cond={"c_crossattn": [cond_in], "c_concat": [image_cond_in]}) - else: - x_out = torch.zeros_like(x_in) - for batch_offset in range(0, x_out.shape[0], batch_size): - a = batch_offset - b = a + batch_size - x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond={"c_crossattn": [cond_in[a:b]], "c_concat": [image_cond_in[a:b]]}) - else: - x_out = torch.zeros_like(x_in) - batch_size = batch_size*2 if shared.batch_cond_uncond else batch_size - for batch_offset in range(0, tensor.shape[0], batch_size): - a = batch_offset - b = min(a + batch_size, tensor.shape[0]) - - if not is_edit_model: - c_crossattn = [tensor[a:b]] - else: - c_crossattn = torch.cat([tensor[a:b]], uncond) - - x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond={"c_crossattn": c_crossattn, "c_concat": [image_cond_in[a:b]]}) - - x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond={"c_crossattn": [uncond], "c_concat": [image_cond_in[-uncond.shape[0]:]]}) - - denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps) - cfg_denoised_callback(denoised_params) - - devices.test_for_nans(x_out, "unet") - - if opts.live_preview_content == "Prompt": - sd_samplers_common.store_latent(x_out[0:uncond.shape[0]]) - elif opts.live_preview_content == "Negative prompt": - sd_samplers_common.store_latent(x_out[-uncond.shape[0]:]) - - if not is_edit_model: - denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) - else: - denoised = self.combine_denoised_for_edit_model(x_out, cond_scale) - - if self.mask is not None: - denoised = self.init_latent * self.mask + self.nmask * denoised - - self.step += 1 - - return denoised - - -class TorchHijack: - def __init__(self, sampler_noises): - # Using a deque to efficiently receive the sampler_noises in the same order as the previous index-based - # implementation. - self.sampler_noises = deque(sampler_noises) - - def __getattr__(self, item): - if item == 'randn_like': - return self.randn_like - - if hasattr(torch, item): - return getattr(torch, item) - - raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, item)) - - def randn_like(self, x): - if self.sampler_noises: - noise = self.sampler_noises.popleft() - if noise.shape == x.shape: - return noise - - if x.device.type == 'mps': - return torch.randn_like(x, device=devices.cpu).to(x.device) - else: - return torch.randn_like(x) - - -class KDiffusionSampler: - def __init__(self, funcname, sd_model): - denoiser = k_diffusion.external.CompVisVDenoiser if sd_model.parameterization == "v" else k_diffusion.external.CompVisDenoiser - - self.model_wrap = denoiser(sd_model, quantize=shared.opts.enable_quantization) - self.funcname = funcname - self.func = getattr(k_diffusion.sampling, self.funcname) - self.extra_params = sampler_extra_params.get(funcname, []) - self.model_wrap_cfg = CFGDenoiser(self.model_wrap) - self.sampler_noises = None - self.stop_at = None - self.eta = None - self.config = None - self.last_latent = None - - self.conditioning_key = sd_model.model.conditioning_key - - def callback_state(self, d): - step = d['i'] - latent = d["denoised"] - if opts.live_preview_content == "Combined": - sd_samplers_common.store_latent(latent) - self.last_latent = latent - - if self.stop_at is not None and step > self.stop_at: - raise sd_samplers_common.InterruptedException - - state.sampling_step = step - shared.total_tqdm.update() - - def launch_sampling(self, steps, func): - state.sampling_steps = steps - state.sampling_step = 0 - - try: - return func() - except sd_samplers_common.InterruptedException: - return self.last_latent - - def number_of_needed_noises(self, p): - return p.steps - - def initialize(self, p): - self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None - self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None - self.model_wrap_cfg.step = 0 - self.model_wrap_cfg.image_cfg_scale = getattr(p, 'image_cfg_scale', None) - self.eta = p.eta if p.eta is not None else opts.eta_ancestral - - k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else []) - - extra_params_kwargs = {} - for param_name in self.extra_params: - if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters: - extra_params_kwargs[param_name] = getattr(p, param_name) - - if 'eta' in inspect.signature(self.func).parameters: - if self.eta != 1.0: - p.extra_generation_params["Eta"] = self.eta - - extra_params_kwargs['eta'] = self.eta - - return extra_params_kwargs - - def get_sigmas(self, p, steps): - discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False) - if opts.always_discard_next_to_last_sigma and not discard_next_to_last_sigma: - discard_next_to_last_sigma = True - p.extra_generation_params["Discard penultimate sigma"] = True - - steps += 1 if discard_next_to_last_sigma else 0 - - if p.sampler_noise_scheduler_override: - sigmas = p.sampler_noise_scheduler_override(steps) - elif self.config is not None and self.config.options.get('scheduler', None) == 'karras': - sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) - - sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=shared.device) - else: - sigmas = self.model_wrap.get_sigmas(steps) - - if discard_next_to_last_sigma: - sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) - - return sigmas - - def create_noise_sampler(self, x, sigmas, p): - """For DPM++ SDE: manually create noise sampler to enable deterministic results across different batch sizes""" - if shared.opts.no_dpmpp_sde_batch_determinism: - return None - - from k_diffusion.sampling import BrownianTreeNoiseSampler - sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max() - current_iter_seeds = p.all_seeds[p.iteration * p.batch_size:(p.iteration + 1) * p.batch_size] - return BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=current_iter_seeds) - - def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): - steps, t_enc = sd_samplers_common.setup_img2img_steps(p, steps) - - sigmas = self.get_sigmas(p, steps) - - sigma_sched = sigmas[steps - t_enc - 1:] - xi = x + noise * sigma_sched[0] - - extra_params_kwargs = self.initialize(p) - parameters = inspect.signature(self.func).parameters - - if 'sigma_min' in parameters: - ## last sigma is zero which isn't allowed by DPM Fast & Adaptive so taking value before last - extra_params_kwargs['sigma_min'] = sigma_sched[-2] - if 'sigma_max' in parameters: - extra_params_kwargs['sigma_max'] = sigma_sched[0] - if 'n' in parameters: - extra_params_kwargs['n'] = len(sigma_sched) - 1 - if 'sigma_sched' in parameters: - extra_params_kwargs['sigma_sched'] = sigma_sched - if 'sigmas' in parameters: - extra_params_kwargs['sigmas'] = sigma_sched - - if self.funcname == 'sample_dpmpp_sde': - noise_sampler = self.create_noise_sampler(x, sigmas, p) - extra_params_kwargs['noise_sampler'] = noise_sampler - - self.model_wrap_cfg.init_latent = x - self.last_latent = x - extra_args={ - 'cond': conditioning, - 'image_cond': image_conditioning, - 'uncond': unconditional_conditioning, - 'cond_scale': p.cfg_scale, - } - - samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) - - return samples - - def sample(self, p, x, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): - steps = steps or p.steps - - sigmas = self.get_sigmas(p, steps) - - x = x * sigmas[0] - - extra_params_kwargs = self.initialize(p) - parameters = inspect.signature(self.func).parameters - - if 'sigma_min' in parameters: - extra_params_kwargs['sigma_min'] = self.model_wrap.sigmas[0].item() - extra_params_kwargs['sigma_max'] = self.model_wrap.sigmas[-1].item() - if 'n' in parameters: - extra_params_kwargs['n'] = steps - else: - extra_params_kwargs['sigmas'] = sigmas - - if self.funcname == 'sample_dpmpp_sde': - noise_sampler = self.create_noise_sampler(x, sigmas, p) - extra_params_kwargs['noise_sampler'] = noise_sampler - - self.last_latent = x - samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={ - 'cond': conditioning, - 'image_cond': image_conditioning, - 'uncond': unconditional_conditioning, - 'cond_scale': p.cfg_scale - }, disable=False, callback=self.callback_state, **extra_params_kwargs)) - - return samples - diff --git a/spaces/arnavkartikeya/SCRIPture-final/models/blip_itm.py b/spaces/arnavkartikeya/SCRIPture-final/models/blip_itm.py deleted file mode 100644 index cf354c829564bf5a1f56089a2d745093d51e0fa2..0000000000000000000000000000000000000000 --- a/spaces/arnavkartikeya/SCRIPture-final/models/blip_itm.py +++ /dev/null @@ -1,76 +0,0 @@ -from models.med import BertConfig, BertModel -from transformers import BertTokenizer - -import torch -from torch import nn -import torch.nn.functional as F - -from models.blip import create_vit, init_tokenizer, load_checkpoint - -class BLIP_ITM(nn.Module): - def __init__(self, - med_config = 'configs/med_config.json', - image_size = 384, - vit = 'base', - vit_grad_ckpt = False, - vit_ckpt_layer = 0, - embed_dim = 256, - ): - """ - Args: - med_config (str): path for the mixture of encoder-decoder model's configuration file - image_size (int): input image size - vit (str): model size of vision transformer - """ - super().__init__() - - self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer) - self.tokenizer = init_tokenizer() - med_config = BertConfig.from_json_file(med_config) - med_config.encoder_width = vision_width - self.text_encoder = BertModel(config=med_config, add_pooling_layer=False) - - text_width = self.text_encoder.config.hidden_size - - self.vision_proj = nn.Linear(vision_width, embed_dim) - self.text_proj = nn.Linear(text_width, embed_dim) - - self.itm_head = nn.Linear(text_width, 2) - - - def forward(self, image, caption, match_head='itm'): - - image_embeds = self.visual_encoder(image) - image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device) - - text = self.tokenizer(caption, padding='max_length', truncation=True, max_length=35, - return_tensors="pt").to(image.device) - - - if match_head=='itm': - output = self.text_encoder(text.input_ids, - attention_mask = text.attention_mask, - encoder_hidden_states = image_embeds, - encoder_attention_mask = image_atts, - return_dict = True, - ) - itm_output = self.itm_head(output.last_hidden_state[:,0,:]) - return itm_output - - elif match_head=='itc': - text_output = self.text_encoder(text.input_ids, attention_mask = text.attention_mask, - return_dict = True, mode = 'text') - image_feat = F.normalize(self.vision_proj(image_embeds[:,0,:]),dim=-1) - text_feat = F.normalize(self.text_proj(text_output.last_hidden_state[:,0,:]),dim=-1) - - sim = image_feat @ text_feat.t() - return sim - - -def blip_itm(pretrained='',**kwargs): - model = BLIP_ITM(**kwargs) - if pretrained: - model,msg = load_checkpoint(model,pretrained) - assert(len(msg.missing_keys)==0) - return model - \ No newline at end of file diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Plex/Traditional.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Plex/Traditional.py deleted file mode 100644 index ec7252daed9963acc16369418152755e9e8eca30..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Plex/Traditional.py +++ /dev/null @@ -1,158 +0,0 @@ -#======================================================================= -# -# Python Lexical Analyser -# -# Traditional Regular Expression Syntax -# -#======================================================================= - -from __future__ import absolute_import - -from .Regexps import Alt, Seq, Rep, Rep1, Opt, Any, AnyBut, Bol, Eol, Char -from .Errors import PlexError - - -class RegexpSyntaxError(PlexError): - pass - - -def re(s): - """ - Convert traditional string representation of regular expression |s| - into Plex representation. - """ - return REParser(s).parse_re() - - -class REParser(object): - def __init__(self, s): - self.s = s - self.i = -1 - self.end = 0 - self.next() - - def parse_re(self): - re = self.parse_alt() - if not self.end: - self.error("Unexpected %s" % repr(self.c)) - return re - - def parse_alt(self): - """Parse a set of alternative regexps.""" - re = self.parse_seq() - if self.c == '|': - re_list = [re] - while self.c == '|': - self.next() - re_list.append(self.parse_seq()) - re = Alt(*re_list) - return re - - def parse_seq(self): - """Parse a sequence of regexps.""" - re_list = [] - while not self.end and not self.c in "|)": - re_list.append(self.parse_mod()) - return Seq(*re_list) - - def parse_mod(self): - """Parse a primitive regexp followed by *, +, ? modifiers.""" - re = self.parse_prim() - while not self.end and self.c in "*+?": - if self.c == '*': - re = Rep(re) - elif self.c == '+': - re = Rep1(re) - else: # self.c == '?' - re = Opt(re) - self.next() - return re - - def parse_prim(self): - """Parse a primitive regexp.""" - c = self.get() - if c == '.': - re = AnyBut("\n") - elif c == '^': - re = Bol - elif c == '$': - re = Eol - elif c == '(': - re = self.parse_alt() - self.expect(')') - elif c == '[': - re = self.parse_charset() - self.expect(']') - else: - if c == '\\': - c = self.get() - re = Char(c) - return re - - def parse_charset(self): - """Parse a charset. Does not include the surrounding [].""" - char_list = [] - invert = 0 - if self.c == '^': - invert = 1 - self.next() - if self.c == ']': - char_list.append(']') - self.next() - while not self.end and self.c != ']': - c1 = self.get() - if self.c == '-' and self.lookahead(1) != ']': - self.next() - c2 = self.get() - for a in range(ord(c1), ord(c2) + 1): - char_list.append(chr(a)) - else: - char_list.append(c1) - chars = ''.join(char_list) - if invert: - return AnyBut(chars) - else: - return Any(chars) - - def next(self): - """Advance to the next char.""" - s = self.s - i = self.i = self.i + 1 - if i < len(s): - self.c = s[i] - else: - self.c = '' - self.end = 1 - - def get(self): - if self.end: - self.error("Premature end of string") - c = self.c - self.next() - return c - - def lookahead(self, n): - """Look ahead n chars.""" - j = self.i + n - if j < len(self.s): - return self.s[j] - else: - return '' - - def expect(self, c): - """ - Expect to find character |c| at current position. - Raises an exception otherwise. - """ - if self.c == c: - self.next() - else: - self.error("Missing %s" % repr(c)) - - def error(self, mess): - """Raise exception to signal syntax error in regexp.""" - raise RegexpSyntaxError("Syntax error in regexp %s at position %d: %s" % ( - repr(self.s), self.i, mess)) - - - diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/PIL/ImageTransform.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/PIL/ImageTransform.py deleted file mode 100644 index 7881f0d262b0db7ecaed224ee2268f3b69b836c9..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/PIL/ImageTransform.py +++ /dev/null @@ -1,102 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# transform wrappers -# -# History: -# 2002-04-08 fl Created -# -# Copyright (c) 2002 by Secret Labs AB -# Copyright (c) 2002 by Fredrik Lundh -# -# See the README file for information on usage and redistribution. -# - -from . import Image - - -class Transform(Image.ImageTransformHandler): - def __init__(self, data): - self.data = data - - def getdata(self): - return self.method, self.data - - def transform(self, size, image, **options): - # can be overridden - method, data = self.getdata() - return image.transform(size, method, data, **options) - - -class AffineTransform(Transform): - """ - Define an affine image transform. - - This function takes a 6-tuple (a, b, c, d, e, f) which contain the first - two rows from an affine transform matrix. For each pixel (x, y) in the - output image, the new value is taken from a position (a x + b y + c, - d x + e y + f) in the input image, rounded to nearest pixel. - - This function can be used to scale, translate, rotate, and shear the - original image. - - See :py:meth:`~PIL.Image.Image.transform` - - :param matrix: A 6-tuple (a, b, c, d, e, f) containing the first two rows - from an affine transform matrix. - """ - - method = Image.Transform.AFFINE - - -class ExtentTransform(Transform): - """ - Define a transform to extract a subregion from an image. - - Maps a rectangle (defined by two corners) from the image to a rectangle of - the given size. The resulting image will contain data sampled from between - the corners, such that (x0, y0) in the input image will end up at (0,0) in - the output image, and (x1, y1) at size. - - This method can be used to crop, stretch, shrink, or mirror an arbitrary - rectangle in the current image. It is slightly slower than crop, but about - as fast as a corresponding resize operation. - - See :py:meth:`~PIL.Image.Image.transform` - - :param bbox: A 4-tuple (x0, y0, x1, y1) which specifies two points in the - input image's coordinate system. See :ref:`coordinate-system`. - """ - - method = Image.Transform.EXTENT - - -class QuadTransform(Transform): - """ - Define a quad image transform. - - Maps a quadrilateral (a region defined by four corners) from the image to a - rectangle of the given size. - - See :py:meth:`~PIL.Image.Image.transform` - - :param xy: An 8-tuple (x0, y0, x1, y1, x2, y2, x3, y3) which contain the - upper left, lower left, lower right, and upper right corner of the - source quadrilateral. - """ - - method = Image.Transform.QUAD - - -class MeshTransform(Transform): - """ - Define a mesh image transform. A mesh transform consists of one or more - individual quad transforms. - - See :py:meth:`~PIL.Image.Image.transform` - - :param data: A list of (bbox, quad) tuples. - """ - - method = Image.Transform.MESH diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/absl/flags/_defines.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/absl/flags/_defines.py deleted file mode 100644 index 61354e94837699f2b2e91c7c67d98df4b1e78dc7..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/absl/flags/_defines.py +++ /dev/null @@ -1,933 +0,0 @@ -# Copyright 2017 The Abseil Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""This modules contains flags DEFINE functions. - -Do NOT import this module directly. Import the flags package and use the -aliases defined at the package level instead. -""" - -import sys -import types - -from absl.flags import _argument_parser -from absl.flags import _exceptions -from absl.flags import _flag -from absl.flags import _flagvalues -from absl.flags import _helpers -from absl.flags import _validators - -# pylint: disable=unused-import -try: - from typing import Text, List, Any -except ImportError: - pass - -try: - import enum -except ImportError: - pass -# pylint: enable=unused-import - -_helpers.disclaim_module_ids.add(id(sys.modules[__name__])) - - -def _register_bounds_validator_if_needed(parser, name, flag_values): - """Enforces lower and upper bounds for numeric flags. - - Args: - parser: NumericParser (either FloatParser or IntegerParser), provides lower - and upper bounds, and help text to display. - name: str, name of the flag - flag_values: FlagValues. - """ - if parser.lower_bound is not None or parser.upper_bound is not None: - - def checker(value): - if value is not None and parser.is_outside_bounds(value): - message = '%s is not %s' % (value, parser.syntactic_help) - raise _exceptions.ValidationError(message) - return True - - _validators.register_validator(name, checker, flag_values=flag_values) - - -def DEFINE( # pylint: disable=invalid-name - parser, - name, - default, - help, # pylint: disable=redefined-builtin - flag_values=_flagvalues.FLAGS, - serializer=None, - module_name=None, - required=False, - **args): - """Registers a generic Flag object. - - NOTE: in the docstrings of all DEFINE* functions, "registers" is short - for "creates a new flag and registers it". - - Auxiliary function: clients should use the specialized ``DEFINE_`` - function instead. - - Args: - parser: :class:`ArgumentParser`, used to parse the flag arguments. - name: str, the flag name. - default: The default value of the flag. - help: str, the help message. - flag_values: :class:`FlagValues`, the FlagValues instance with which the - flag will be registered. This should almost never need to be overridden. - serializer: :class:`ArgumentSerializer`, the flag serializer instance. - module_name: str, the name of the Python module declaring this flag. If not - provided, it will be computed using the stack trace of this call. - required: bool, is this a required flag. This must be used as a keyword - argument. - **args: dict, the extra keyword args that are passed to ``Flag.__init__``. - - Returns: - a handle to defined flag. - """ - return DEFINE_flag( - _flag.Flag(parser, serializer, name, default, help, **args), flag_values, - module_name, required) - - -def DEFINE_flag( # pylint: disable=invalid-name - flag, - flag_values=_flagvalues.FLAGS, - module_name=None, - required=False): - """Registers a :class:`Flag` object with a :class:`FlagValues` object. - - By default, the global :const:`FLAGS` ``FlagValue`` object is used. - - Typical users will use one of the more specialized DEFINE_xxx - functions, such as :func:`DEFINE_string` or :func:`DEFINE_integer`. But - developers who need to create :class:`Flag` objects themselves should use - this function to register their flags. - - Args: - flag: :class:`Flag`, a flag that is key to the module. - flag_values: :class:`FlagValues`, the ``FlagValues`` instance with which the - flag will be registered. This should almost never need to be overridden. - module_name: str, the name of the Python module declaring this flag. If not - provided, it will be computed using the stack trace of this call. - required: bool, is this a required flag. This must be used as a keyword - argument. - - Returns: - a handle to defined flag. - """ - if required and flag.default is not None: - raise ValueError('Required flag --%s cannot have a non-None default' % - flag.name) - # Copying the reference to flag_values prevents pychecker warnings. - fv = flag_values - fv[flag.name] = flag - # Tell flag_values who's defining the flag. - if module_name: - module = sys.modules.get(module_name) - else: - module, module_name = _helpers.get_calling_module_object_and_name() - flag_values.register_flag_by_module(module_name, flag) - flag_values.register_flag_by_module_id(id(module), flag) - if required: - _validators.mark_flag_as_required(flag.name, fv) - ensure_non_none_value = (flag.default is not None) or required - return _flagvalues.FlagHolder( - fv, flag, ensure_non_none_value=ensure_non_none_value) - - -def set_default(flag_holder, value): - """Changes the default value of the provided flag object. - - The flag's current value is also updated if the flag is currently using - the default value, i.e. not specified in the command line, and not set - by FLAGS.name = value. - - Args: - flag_holder: FlagHolder, the flag to modify. - value: The new default value. - - Raises: - IllegalFlagValueError: Raised when value is not valid. - """ - flag_holder._flagvalues.set_default(flag_holder.name, value) # pylint: disable=protected-access - - -def _internal_declare_key_flags(flag_names, - flag_values=_flagvalues.FLAGS, - key_flag_values=None): - """Declares a flag as key for the calling module. - - Internal function. User code should call declare_key_flag or - adopt_module_key_flags instead. - - Args: - flag_names: [str], a list of names of already-registered Flag objects. - flag_values: :class:`FlagValues`, the FlagValues instance with which the - flags listed in flag_names have registered (the value of the flag_values - argument from the ``DEFINE_*`` calls that defined those flags). This - should almost never need to be overridden. - key_flag_values: :class:`FlagValues`, the FlagValues instance that (among - possibly many other things) keeps track of the key flags for each module. - Default ``None`` means "same as flag_values". This should almost never - need to be overridden. - - Raises: - UnrecognizedFlagError: Raised when the flag is not defined. - """ - key_flag_values = key_flag_values or flag_values - - module = _helpers.get_calling_module() - - for flag_name in flag_names: - key_flag_values.register_key_flag_for_module(module, flag_values[flag_name]) - - -def declare_key_flag(flag_name, flag_values=_flagvalues.FLAGS): - """Declares one flag as key to the current module. - - Key flags are flags that are deemed really important for a module. - They are important when listing help messages; e.g., if the - --helpshort command-line flag is used, then only the key flags of the - main module are listed (instead of all flags, as in the case of - --helpfull). - - Sample usage:: - - flags.declare_key_flag('flag_1') - - Args: - flag_name: str | :class:`FlagHolder`, the name or holder of an already - declared flag. (Redeclaring flags as key, including flags implicitly key - because they were declared in this module, is a no-op.) - Positional-only parameter. - flag_values: :class:`FlagValues`, the FlagValues instance in which the - flag will be declared as a key flag. This should almost never need to be - overridden. - - Raises: - ValueError: Raised if flag_name not defined as a Python flag. - """ - flag_name, flag_values = _flagvalues.resolve_flag_ref(flag_name, flag_values) - if flag_name in _helpers.SPECIAL_FLAGS: - # Take care of the special flags, e.g., --flagfile, --undefok. - # These flags are defined in SPECIAL_FLAGS, and are treated - # specially during flag parsing, taking precedence over the - # user-defined flags. - _internal_declare_key_flags([flag_name], - flag_values=_helpers.SPECIAL_FLAGS, - key_flag_values=flag_values) - return - try: - _internal_declare_key_flags([flag_name], flag_values=flag_values) - except KeyError: - raise ValueError('Flag --%s is undefined. To set a flag as a key flag ' - 'first define it in Python.' % flag_name) - - -def adopt_module_key_flags(module, flag_values=_flagvalues.FLAGS): - """Declares that all flags key to a module are key to the current module. - - Args: - module: module, the module object from which all key flags will be declared - as key flags to the current module. - flag_values: :class:`FlagValues`, the FlagValues instance in which the - flags will be declared as key flags. This should almost never need to be - overridden. - - Raises: - Error: Raised when given an argument that is a module name (a string), - instead of a module object. - """ - if not isinstance(module, types.ModuleType): - raise _exceptions.Error('Expected a module object, not %r.' % (module,)) - _internal_declare_key_flags( - [f.name for f in flag_values.get_key_flags_for_module(module.__name__)], - flag_values=flag_values) - # If module is this flag module, take _helpers.SPECIAL_FLAGS into account. - if module == _helpers.FLAGS_MODULE: - _internal_declare_key_flags( - # As we associate flags with get_calling_module_object_and_name(), the - # special flags defined in this module are incorrectly registered with - # a different module. So, we can't use get_key_flags_for_module. - # Instead, we take all flags from _helpers.SPECIAL_FLAGS (a private - # FlagValues, where no other module should register flags). - [_helpers.SPECIAL_FLAGS[name].name for name in _helpers.SPECIAL_FLAGS], - flag_values=_helpers.SPECIAL_FLAGS, - key_flag_values=flag_values) - - -def disclaim_key_flags(): - """Declares that the current module will not define any more key flags. - - Normally, the module that calls the DEFINE_xxx functions claims the - flag to be its key flag. This is undesirable for modules that - define additional DEFINE_yyy functions with its own flag parsers and - serializers, since that module will accidentally claim flags defined - by DEFINE_yyy as its key flags. After calling this function, the - module disclaims flag definitions thereafter, so the key flags will - be correctly attributed to the caller of DEFINE_yyy. - - After calling this function, the module will not be able to define - any more flags. This function will affect all FlagValues objects. - """ - globals_for_caller = sys._getframe(1).f_globals # pylint: disable=protected-access - module, _ = _helpers.get_module_object_and_name(globals_for_caller) - _helpers.disclaim_module_ids.add(id(module)) - - -def DEFINE_string( # pylint: disable=invalid-name,redefined-builtin - name, - default, - help, - flag_values=_flagvalues.FLAGS, - required=False, - **args): - """Registers a flag whose value can be any string.""" - parser = _argument_parser.ArgumentParser() - serializer = _argument_parser.ArgumentSerializer() - return DEFINE( - parser, - name, - default, - help, - flag_values, - serializer, - required=required, - **args) - - -def DEFINE_boolean( # pylint: disable=invalid-name,redefined-builtin - name, - default, - help, - flag_values=_flagvalues.FLAGS, - module_name=None, - required=False, - **args): - """Registers a boolean flag. - - Such a boolean flag does not take an argument. If a user wants to - specify a false value explicitly, the long option beginning with 'no' - must be used: i.e. --noflag - - This flag will have a value of None, True or False. None is possible - if default=None and the user does not specify the flag on the command - line. - - Args: - name: str, the flag name. - default: bool|str|None, the default value of the flag. - help: str, the help message. - flag_values: :class:`FlagValues`, the FlagValues instance with which the - flag will be registered. This should almost never need to be overridden. - module_name: str, the name of the Python module declaring this flag. If not - provided, it will be computed using the stack trace of this call. - required: bool, is this a required flag. This must be used as a keyword - argument. - **args: dict, the extra keyword args that are passed to ``Flag.__init__``. - - Returns: - a handle to defined flag. - """ - return DEFINE_flag( - _flag.BooleanFlag(name, default, help, **args), flag_values, module_name, - required) - - -def DEFINE_float( # pylint: disable=invalid-name,redefined-builtin - name, - default, - help, - lower_bound=None, - upper_bound=None, - flag_values=_flagvalues.FLAGS, - required=False, - **args): - """Registers a flag whose value must be a float. - - If ``lower_bound`` or ``upper_bound`` are set, then this flag must be - within the given range. - - Args: - name: str, the flag name. - default: float|str|None, the default value of the flag. - help: str, the help message. - lower_bound: float, min value of the flag. - upper_bound: float, max value of the flag. - flag_values: :class:`FlagValues`, the FlagValues instance with which the - flag will be registered. This should almost never need to be overridden. - required: bool, is this a required flag. This must be used as a keyword - argument. - **args: dict, the extra keyword args that are passed to :func:`DEFINE`. - - Returns: - a handle to defined flag. - """ - parser = _argument_parser.FloatParser(lower_bound, upper_bound) - serializer = _argument_parser.ArgumentSerializer() - result = DEFINE( - parser, - name, - default, - help, - flag_values, - serializer, - required=required, - **args) - _register_bounds_validator_if_needed(parser, name, flag_values=flag_values) - return result - - -def DEFINE_integer( # pylint: disable=invalid-name,redefined-builtin - name, - default, - help, - lower_bound=None, - upper_bound=None, - flag_values=_flagvalues.FLAGS, - required=False, - **args): - """Registers a flag whose value must be an integer. - - If ``lower_bound``, or ``upper_bound`` are set, then this flag must be - within the given range. - - Args: - name: str, the flag name. - default: int|str|None, the default value of the flag. - help: str, the help message. - lower_bound: int, min value of the flag. - upper_bound: int, max value of the flag. - flag_values: :class:`FlagValues`, the FlagValues instance with which the - flag will be registered. This should almost never need to be overridden. - required: bool, is this a required flag. This must be used as a keyword - argument. - **args: dict, the extra keyword args that are passed to :func:`DEFINE`. - - Returns: - a handle to defined flag. - """ - parser = _argument_parser.IntegerParser(lower_bound, upper_bound) - serializer = _argument_parser.ArgumentSerializer() - result = DEFINE( - parser, - name, - default, - help, - flag_values, - serializer, - required=required, - **args) - _register_bounds_validator_if_needed(parser, name, flag_values=flag_values) - return result - - -def DEFINE_enum( # pylint: disable=invalid-name,redefined-builtin - name, - default, - enum_values, - help, - flag_values=_flagvalues.FLAGS, - module_name=None, - required=False, - **args): - """Registers a flag whose value can be any string from enum_values. - - Instead of a string enum, prefer `DEFINE_enum_class`, which allows - defining enums from an `enum.Enum` class. - - Args: - name: str, the flag name. - default: str|None, the default value of the flag. - enum_values: [str], a non-empty list of strings with the possible values for - the flag. - help: str, the help message. - flag_values: :class:`FlagValues`, the FlagValues instance with which the - flag will be registered. This should almost never need to be overridden. - module_name: str, the name of the Python module declaring this flag. If not - provided, it will be computed using the stack trace of this call. - required: bool, is this a required flag. This must be used as a keyword - argument. - **args: dict, the extra keyword args that are passed to ``Flag.__init__``. - - Returns: - a handle to defined flag. - """ - return DEFINE_flag( - _flag.EnumFlag(name, default, help, enum_values, **args), flag_values, - module_name, required) - - -def DEFINE_enum_class( # pylint: disable=invalid-name,redefined-builtin - name, - default, - enum_class, - help, - flag_values=_flagvalues.FLAGS, - module_name=None, - case_sensitive=False, - required=False, - **args): - """Registers a flag whose value can be the name of enum members. - - Args: - name: str, the flag name. - default: Enum|str|None, the default value of the flag. - enum_class: class, the Enum class with all the possible values for the flag. - help: str, the help message. - flag_values: :class:`FlagValues`, the FlagValues instance with which the - flag will be registered. This should almost never need to be overridden. - module_name: str, the name of the Python module declaring this flag. If not - provided, it will be computed using the stack trace of this call. - case_sensitive: bool, whether to map strings to members of the enum_class - without considering case. - required: bool, is this a required flag. This must be used as a keyword - argument. - **args: dict, the extra keyword args that are passed to ``Flag.__init__``. - - Returns: - a handle to defined flag. - """ - return DEFINE_flag( - _flag.EnumClassFlag( - name, - default, - help, - enum_class, - case_sensitive=case_sensitive, - **args), flag_values, module_name, required) - - -def DEFINE_list( # pylint: disable=invalid-name,redefined-builtin - name, - default, - help, - flag_values=_flagvalues.FLAGS, - required=False, - **args): - """Registers a flag whose value is a comma-separated list of strings. - - The flag value is parsed with a CSV parser. - - Args: - name: str, the flag name. - default: list|str|None, the default value of the flag. - help: str, the help message. - flag_values: :class:`FlagValues`, the FlagValues instance with which the - flag will be registered. This should almost never need to be overridden. - required: bool, is this a required flag. This must be used as a keyword - argument. - **args: Dictionary with extra keyword args that are passed to the - ``Flag.__init__``. - - Returns: - a handle to defined flag. - """ - parser = _argument_parser.ListParser() - serializer = _argument_parser.CsvListSerializer(',') - return DEFINE( - parser, - name, - default, - help, - flag_values, - serializer, - required=required, - **args) - - -def DEFINE_spaceseplist( # pylint: disable=invalid-name,redefined-builtin - name, - default, - help, - comma_compat=False, - flag_values=_flagvalues.FLAGS, - required=False, - **args): - """Registers a flag whose value is a whitespace-separated list of strings. - - Any whitespace can be used as a separator. - - Args: - name: str, the flag name. - default: list|str|None, the default value of the flag. - help: str, the help message. - comma_compat: bool - Whether to support comma as an additional separator. If - false then only whitespace is supported. This is intended only for - backwards compatibility with flags that used to be comma-separated. - flag_values: :class:`FlagValues`, the FlagValues instance with which the - flag will be registered. This should almost never need to be overridden. - required: bool, is this a required flag. This must be used as a keyword - argument. - **args: Dictionary with extra keyword args that are passed to the - ``Flag.__init__``. - - Returns: - a handle to defined flag. - """ - parser = _argument_parser.WhitespaceSeparatedListParser( - comma_compat=comma_compat) - serializer = _argument_parser.ListSerializer(' ') - return DEFINE( - parser, - name, - default, - help, - flag_values, - serializer, - required=required, - **args) - - -def DEFINE_multi( # pylint: disable=invalid-name,redefined-builtin - parser, - serializer, - name, - default, - help, - flag_values=_flagvalues.FLAGS, - module_name=None, - required=False, - **args): - """Registers a generic MultiFlag that parses its args with a given parser. - - Auxiliary function. Normal users should NOT use it directly. - - Developers who need to create their own 'Parser' classes for options - which can appear multiple times can call this module function to - register their flags. - - Args: - parser: ArgumentParser, used to parse the flag arguments. - serializer: ArgumentSerializer, the flag serializer instance. - name: str, the flag name. - default: Union[Iterable[T], Text, None], the default value of the flag. If - the value is text, it will be parsed as if it was provided from the - command line. If the value is a non-string iterable, it will be iterated - over to create a shallow copy of the values. If it is None, it is left - as-is. - help: str, the help message. - flag_values: :class:`FlagValues`, the FlagValues instance with which the - flag will be registered. This should almost never need to be overridden. - module_name: A string, the name of the Python module declaring this flag. If - not provided, it will be computed using the stack trace of this call. - required: bool, is this a required flag. This must be used as a keyword - argument. - **args: Dictionary with extra keyword args that are passed to the - ``Flag.__init__``. - - Returns: - a handle to defined flag. - """ - return DEFINE_flag( - _flag.MultiFlag(parser, serializer, name, default, help, **args), - flag_values, module_name, required) - - -def DEFINE_multi_string( # pylint: disable=invalid-name,redefined-builtin - name, - default, - help, - flag_values=_flagvalues.FLAGS, - required=False, - **args): - """Registers a flag whose value can be a list of any strings. - - Use the flag on the command line multiple times to place multiple - string values into the list. The 'default' may be a single string - (which will be converted into a single-element list) or a list of - strings. - - - Args: - name: str, the flag name. - default: Union[Iterable[Text], Text, None], the default value of the flag; - see :func:`DEFINE_multi`. - help: str, the help message. - flag_values: :class:`FlagValues`, the FlagValues instance with which the - flag will be registered. This should almost never need to be overridden. - required: bool, is this a required flag. This must be used as a keyword - argument. - **args: Dictionary with extra keyword args that are passed to the - ``Flag.__init__``. - - Returns: - a handle to defined flag. - """ - parser = _argument_parser.ArgumentParser() - serializer = _argument_parser.ArgumentSerializer() - return DEFINE_multi( - parser, - serializer, - name, - default, - help, - flag_values, - required=required, - **args) - - -def DEFINE_multi_integer( # pylint: disable=invalid-name,redefined-builtin - name, - default, - help, - lower_bound=None, - upper_bound=None, - flag_values=_flagvalues.FLAGS, - required=False, - **args): - """Registers a flag whose value can be a list of arbitrary integers. - - Use the flag on the command line multiple times to place multiple - integer values into the list. The 'default' may be a single integer - (which will be converted into a single-element list) or a list of - integers. - - Args: - name: str, the flag name. - default: Union[Iterable[int], Text, None], the default value of the flag; - see `DEFINE_multi`. - help: str, the help message. - lower_bound: int, min values of the flag. - upper_bound: int, max values of the flag. - flag_values: :class:`FlagValues`, the FlagValues instance with which the - flag will be registered. This should almost never need to be overridden. - required: bool, is this a required flag. This must be used as a keyword - argument. - **args: Dictionary with extra keyword args that are passed to the - ``Flag.__init__``. - - Returns: - a handle to defined flag. - """ - parser = _argument_parser.IntegerParser(lower_bound, upper_bound) - serializer = _argument_parser.ArgumentSerializer() - return DEFINE_multi( - parser, - serializer, - name, - default, - help, - flag_values, - required=required, - **args) - - -def DEFINE_multi_float( # pylint: disable=invalid-name,redefined-builtin - name, - default, - help, - lower_bound=None, - upper_bound=None, - flag_values=_flagvalues.FLAGS, - required=False, - **args): - """Registers a flag whose value can be a list of arbitrary floats. - - Use the flag on the command line multiple times to place multiple - float values into the list. The 'default' may be a single float - (which will be converted into a single-element list) or a list of - floats. - - Args: - name: str, the flag name. - default: Union[Iterable[float], Text, None], the default value of the flag; - see `DEFINE_multi`. - help: str, the help message. - lower_bound: float, min values of the flag. - upper_bound: float, max values of the flag. - flag_values: :class:`FlagValues`, the FlagValues instance with which the - flag will be registered. This should almost never need to be overridden. - required: bool, is this a required flag. This must be used as a keyword - argument. - **args: Dictionary with extra keyword args that are passed to the - ``Flag.__init__``. - - Returns: - a handle to defined flag. - """ - parser = _argument_parser.FloatParser(lower_bound, upper_bound) - serializer = _argument_parser.ArgumentSerializer() - return DEFINE_multi( - parser, - serializer, - name, - default, - help, - flag_values, - required=required, - **args) - - -def DEFINE_multi_enum( # pylint: disable=invalid-name,redefined-builtin - name, - default, - enum_values, - help, - flag_values=_flagvalues.FLAGS, - case_sensitive=True, - required=False, - **args): - """Registers a flag whose value can be a list strings from enum_values. - - Use the flag on the command line multiple times to place multiple - enum values into the list. The 'default' may be a single string - (which will be converted into a single-element list) or a list of - strings. - - Args: - name: str, the flag name. - default: Union[Iterable[Text], Text, None], the default value of the flag; - see `DEFINE_multi`. - enum_values: [str], a non-empty list of strings with the possible values for - the flag. - help: str, the help message. - flag_values: :class:`FlagValues`, the FlagValues instance with which the - flag will be registered. This should almost never need to be overridden. - case_sensitive: Whether or not the enum is to be case-sensitive. - required: bool, is this a required flag. This must be used as a keyword - argument. - **args: Dictionary with extra keyword args that are passed to the - ``Flag.__init__``. - - Returns: - a handle to defined flag. - """ - parser = _argument_parser.EnumParser(enum_values, case_sensitive) - serializer = _argument_parser.ArgumentSerializer() - return DEFINE_multi( - parser, - serializer, - name, - default, - '<%s>: %s' % ('|'.join(enum_values), help), - flag_values, - required=required, - **args) - - -def DEFINE_multi_enum_class( # pylint: disable=invalid-name,redefined-builtin - name, - default, - enum_class, - help, - flag_values=_flagvalues.FLAGS, - module_name=None, - case_sensitive=False, - required=False, - **args): - """Registers a flag whose value can be a list of enum members. - - Use the flag on the command line multiple times to place multiple - enum values into the list. - - Args: - name: str, the flag name. - default: Union[Iterable[Enum], Iterable[Text], Enum, Text, None], the - default value of the flag; see `DEFINE_multi`; only differences are - documented here. If the value is a single Enum, it is treated as a - single-item list of that Enum value. If it is an iterable, text values - within the iterable will be converted to the equivalent Enum objects. - enum_class: class, the Enum class with all the possible values for the flag. - help: str, the help message. - flag_values: :class:`FlagValues`, the FlagValues instance with which the - flag will be registered. This should almost never need to be overridden. - module_name: A string, the name of the Python module declaring this flag. If - not provided, it will be computed using the stack trace of this call. - case_sensitive: bool, whether to map strings to members of the enum_class - without considering case. - required: bool, is this a required flag. This must be used as a keyword - argument. - **args: Dictionary with extra keyword args that are passed to the - ``Flag.__init__``. - - Returns: - a handle to defined flag. - """ - return DEFINE_flag( - _flag.MultiEnumClassFlag( - name, - default, - help, - enum_class, - case_sensitive=case_sensitive, - **args, - ), - flag_values, - module_name, - required=required, - ) - - -def DEFINE_alias( # pylint: disable=invalid-name - name, - original_name, - flag_values=_flagvalues.FLAGS, - module_name=None): - """Defines an alias flag for an existing one. - - Args: - name: str, the flag name. - original_name: str, the original flag name. - flag_values: :class:`FlagValues`, the FlagValues instance with which the - flag will be registered. This should almost never need to be overridden. - module_name: A string, the name of the module that defines this flag. - - Returns: - a handle to defined flag. - - Raises: - flags.FlagError: - UnrecognizedFlagError: if the referenced flag doesn't exist. - DuplicateFlagError: if the alias name has been used by some existing flag. - """ - if original_name not in flag_values: - raise _exceptions.UnrecognizedFlagError(original_name) - flag = flag_values[original_name] - - class _FlagAlias(_flag.Flag): - """Overrides Flag class so alias value is copy of original flag value.""" - - def parse(self, argument): - flag.parse(argument) - self.present += 1 - - def _parse_from_default(self, value): - # The value was already parsed by the aliased flag, so there is no - # need to call the parser on it a second time. - # Additionally, because of how MultiFlag parses and merges values, - # it isn't possible to delegate to the aliased flag and still get - # the correct values. - return value - - @property - def value(self): - return flag.value - - @value.setter - def value(self, value): - flag.value = value - - help_msg = 'Alias for --%s.' % flag.name - # If alias_name has been used, flags.DuplicatedFlag will be raised. - return DEFINE_flag( - _FlagAlias( - flag.parser, - flag.serializer, - name, - flag.default, - help_msg, - boolean=flag.boolean), flag_values, module_name) diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/vega/v5/data.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/vega/v5/data.py deleted file mode 100644 index 78273363d19679819237caa91d8e337ad2d3c936..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/vega/v5/data.py +++ /dev/null @@ -1,29 +0,0 @@ -from ..data import ( - MaxRowsError, - curry, - default_data_transformer, - limit_rows, - pipe, - sample, - to_csv, - to_json, - to_values, -) - - -# ============================================================================== -# Vega 5 data transformers -# ============================================================================== - - -__all__ = ( - "MaxRowsError", - "curry", - "default_data_transformer", - "limit_rows", - "pipe", - "sample", - "to_csv", - "to_json", - "to_values", -) diff --git a/spaces/ashercn97/AsherTesting/docs/ExLlama.md b/spaces/ashercn97/AsherTesting/docs/ExLlama.md deleted file mode 100644 index db0ebe63c90cf155e8b550e73a542d560ccb0b54..0000000000000000000000000000000000000000 --- a/spaces/ashercn97/AsherTesting/docs/ExLlama.md +++ /dev/null @@ -1,22 +0,0 @@ -# ExLlama - -### About - -ExLlama is an extremely optimized GPTQ backend for LLaMA models. It features much lower VRAM usage and much higher speeds due to not relying on unoptimized transformers code. - -### Usage - -Configure text-generation-webui to use exllama via the UI or command line: - - In the "Model" tab, set "Loader" to "exllama" - - Specify `--loader exllama` on the command line - -### Manual setup - -No additional installation steps are necessary since an exllama package is already included in the requirements.txt. If this package fails to install for some reason, you can install it manually by cloning the original repository into your `repositories/` folder: - -``` -mkdir repositories -cd repositories -git clone https://github.com/turboderp/exllama -``` - diff --git a/spaces/at2507/SM_NLP_RecoSys/Data/Mentor_interviews/Cairo Liu.html b/spaces/at2507/SM_NLP_RecoSys/Data/Mentor_interviews/Cairo Liu.html deleted file mode 100644 index f399346315c1420d670dec6b125e078e1f1c4fb8..0000000000000000000000000000000000000000 --- a/spaces/at2507/SM_NLP_RecoSys/Data/Mentor_interviews/Cairo Liu.html +++ /dev/null @@ -1,132 +0,0 @@ - - - - Cairo Liu - - - - -
    -

    Cairo Liu

    - -
    -
    Mentee to Mentor 

    1- What motivates you to become a mentor with SharpestMinds?
    - Like to help people and recently helped two friends in Job hunting which was helpful to them. Can add value to people by helping them, Have gone through the Job hunting process twice and have the knowledge on building a resume, networking and tackling interviews. Teaching people is also a great way to learn and stay updated on trends in the D.S. career. 

    2- What has your data science career journey been like?
    - Working with Data comes naturally. Did PhD in University of Toronto. Used data to derive consumer behaviors. Have coding experience of many years. 
    - Got into industry from Academia, and thought being a professor was the best option. 
    - Did intership related to D.S. 
    - Worked at a startup - Dealmaker as the first hire of Data team and built several tools from beginning to deployment. But entire data team was later laid off. 
    - Joined data consultancy company - tiger analytics. Worked closely with other Data scientists. The depth of modeling is much greater and get to work on solving difficult problems of various clients. 
    - Currently in a managerial role in charge of two other people and assign them tasks and oversee their work. 

    3- How was was your experience as a Mentee with SM?
    - Looked for a mentor after graduation, mentor was knowledgable and gave the right guidance on technical learning and making a shift to the industry from academia. Have been communicating to other fellow mentees as well from time to time. 

    4- According to you what is the biggest challenge someone faces when breaking into a Data science role? How can you help them with that.
    - The biggest challenge is How to get the first interview, and not to be shy of reaching out to people for job prospects. 
    Will help mentees by keeping them accountable for their job applications and check effectiveness. Help them improve their strategy, resume and help with interview preparation. 

    5- Would you be ok sharing a day in a life story which can be shared with the community?
    - Yes

    6- Do you have any questions for me regarding SM?
    - Is there a limitation of % income sharing from mentees? typical range?
    - What if the mentee is in the US, will I get paid in CAD or USD?
    -
    - -
    - - - \ No newline at end of file diff --git a/spaces/awacke1/HFSpaceStreamlitHeatmap/README.md b/spaces/awacke1/HFSpaceStreamlitHeatmap/README.md deleted file mode 100644 index a292a35e1c02efcb52a6c787e38ca413ab8648f1..0000000000000000000000000000000000000000 --- a/spaces/awacke1/HFSpaceStreamlitHeatmap/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: HFSpaceStreamlitHeatmap -emoji: 😻Heat -colorFrom: pink -colorTo: purple -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/Memory-Shared/app.py b/spaces/awacke1/Memory-Shared/app.py deleted file mode 100644 index 46bbfde01c415486f596e25f4ef2adcf65e7e32e..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Memory-Shared/app.py +++ /dev/null @@ -1,86 +0,0 @@ -import os -import csv -import gradio as gr -from gradio import inputs, outputs -import huggingface_hub -from huggingface_hub import Repository, hf_hub_download, upload_file -from datetime import datetime - -DATASET_REPO_URL = "https://huggingface.co/datasets/awacke1/data.csv" -DATASET_REPO_ID = "awacke1/data.csv" -DATA_FILENAME = "data.csv" -DATA_FILE = os.path.join("data", DATA_FILENAME) -HF_TOKEN = os.environ.get("HF_TOKEN") - -# overriding/appending to the gradio template -SCRIPT = """ - -""" -with open(os.path.join(gr.networking.STATIC_TEMPLATE_LIB, "frontend", "index.html"), "a") as f: - f.write(SCRIPT) - -try: - hf_hub_download( - repo_id=DATASET_REPO_ID, - filename=DATA_FILENAME, - cache_dir=DATA_DIRNAME, - force_filename=DATA_FILENAME - ) -except: - print("file not found") - -repo = Repository( - local_dir="data", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN -) - -def generate_html() -> str: - with open(DATA_FILE) as csvfile: - reader = csv.DictReader(csvfile) - rows = [] - for row in reader: - rows.append(row) - rows.reverse() - if len(rows) == 0: - return "no messages yet" - else: - html = "
    " - for row in rows: - html += "
    " - html += f"{row['name']}" - html += f"{row['message']}" - html += "
    " - html += "
    " - return html - -def store_message(name: str, message: str): - if name and message: - with open(DATA_FILE, "a") as csvfile: - writer = csv.DictWriter(csvfile, fieldnames=["name", "message", "time"]) - writer.writerow( - {"name": name, "message": message, "time": str(datetime.now())} - ) - commit_url = repo.push_to_hub() - return generate_html() - -iface = gr.Interface( - store_message, - [ - inputs.Textbox(placeholder="Your name"), - inputs.Textbox(placeholder="Your message", lines=2), - ], - "html", - css=""" - .message {background-color:cornflowerblue;color:white; padding:4px;margin:4px;border-radius:4px; } - """, - title="Reading/writing to a HuggingFace dataset repo from Spaces", - description=f"This is a demo of how to do simple *shared data persistence* in a Gradio Space, backed by a dataset repo.", - article=f"The dataset repo is [{DATASET_REPO_URL}]({DATASET_REPO_URL})", -) - -iface.launch() \ No newline at end of file diff --git a/spaces/azusarang/so-vits-svc-models-ba_P/modules/losses.py b/spaces/azusarang/so-vits-svc-models-ba_P/modules/losses.py deleted file mode 100644 index cd21799eccde350c3aac0bdd661baf96ed220147..0000000000000000000000000000000000000000 --- a/spaces/azusarang/so-vits-svc-models-ba_P/modules/losses.py +++ /dev/null @@ -1,61 +0,0 @@ -import torch -from torch.nn import functional as F - -import modules.commons as commons - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - rl = rl.float().detach() - gl = gl.float() - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - dr = dr.float() - dg = dg.float() - r_loss = torch.mean((1-dr)**2) - g_loss = torch.mean(dg**2) - loss += (r_loss + g_loss) - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - dg = dg.float() - l = torch.mean((1-dg)**2) - gen_losses.append(l) - loss += l - - return loss, gen_losses - - -def kl_loss(z_p, logs_q, m_p, logs_p, z_mask): - """ - z_p, logs_q: [b, h, t_t] - m_p, logs_p: [b, h, t_t] - """ - z_p = z_p.float() - logs_q = logs_q.float() - m_p = m_p.float() - logs_p = logs_p.float() - z_mask = z_mask.float() - #print(logs_p) - kl = logs_p - logs_q - 0.5 - kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p) - kl = torch.sum(kl * z_mask) - l = kl / torch.sum(z_mask) - return l diff --git a/spaces/banana-projects/convai/Dockerfile b/spaces/banana-projects/convai/Dockerfile deleted file mode 100644 index 17c322e892bcfc6787db650acd9f91caf7d26ac8..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/convai/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -FROM node:20 - -WORKDIR /code - -RUN npm i -g typescript@4.0.3 grunt-cli@1.2.0 - -COPY . . - -RUN cd front && npm i && tsc && cd .. - -RUN cd grunt && npm i && grunt && cd .. - -RUN cd server && npm i && tsc && cd .. - -CMD ["node", "server/dist/server.js"] diff --git a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/metalnessmap_pars_fragment.glsl.js b/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/metalnessmap_pars_fragment.glsl.js deleted file mode 100644 index ac89c0b371d29eed724e38ee5a6423c493aaa71b..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/metalnessmap_pars_fragment.glsl.js +++ /dev/null @@ -1,7 +0,0 @@ -export default /* glsl */` -#ifdef USE_METALNESSMAP - - uniform sampler2D metalnessMap; - -#endif -`; diff --git a/spaces/bigjoker/stable-diffusion-webui/modules/scripts_auto_postprocessing.py b/spaces/bigjoker/stable-diffusion-webui/modules/scripts_auto_postprocessing.py deleted file mode 100644 index 16ec8b613b134b0a9a4054f06d5979ec1822c422..0000000000000000000000000000000000000000 --- a/spaces/bigjoker/stable-diffusion-webui/modules/scripts_auto_postprocessing.py +++ /dev/null @@ -1,42 +0,0 @@ -from modules import scripts, scripts_postprocessing, shared - - -class ScriptPostprocessingForMainUI(scripts.Script): - def __init__(self, script_postproc): - self.script: scripts_postprocessing.ScriptPostprocessing = script_postproc - self.postprocessing_controls = None - - def title(self): - return self.script.name - - def show(self, is_img2img): - return scripts.AlwaysVisible - - def ui(self, is_img2img): - self.postprocessing_controls = self.script.ui() - return self.postprocessing_controls.values() - - def postprocess_image(self, p, script_pp, *args): - args_dict = {k: v for k, v in zip(self.postprocessing_controls, args)} - - pp = scripts_postprocessing.PostprocessedImage(script_pp.image) - pp.info = {} - self.script.process(pp, **args_dict) - p.extra_generation_params.update(pp.info) - script_pp.image = pp.image - - -def create_auto_preprocessing_script_data(): - from modules import scripts - - res = [] - - for name in shared.opts.postprocessing_enable_in_main_ui: - script = next(iter([x for x in scripts.postprocessing_scripts_data if x.script_class.name == name]), None) - if script is None: - continue - - constructor = lambda s=script: ScriptPostprocessingForMainUI(s.script_class()) - res.append(scripts.ScriptClassData(script_class=constructor, path=script.path, basedir=script.basedir, module=script.module)) - - return res diff --git a/spaces/bigjoker/stable-diffusion-webui/modules/shared_items.py b/spaces/bigjoker/stable-diffusion-webui/modules/shared_items.py deleted file mode 100644 index 8dd832ed9b1e610b2ab1b4d5f911c58d63c00f80..0000000000000000000000000000000000000000 --- a/spaces/bigjoker/stable-diffusion-webui/modules/shared_items.py +++ /dev/null @@ -1,23 +0,0 @@ - - -def realesrgan_models_names(): - import modules.realesrgan_model - return [x.name for x in modules.realesrgan_model.get_realesrgan_models(None)] - - -def postprocessing_scripts(): - import modules.scripts - - return modules.scripts.scripts_postproc.scripts - - -def sd_vae_items(): - import modules.sd_vae - - return ["Automatic", "None"] + list(modules.sd_vae.vae_dict) - - -def refresh_vae_list(): - import modules.sd_vae - - modules.sd_vae.refresh_vae_list() diff --git a/spaces/bioriAsaeru/text-to-voice/ASTRO25 Portable CPS Install R19.01.00.zip.md b/spaces/bioriAsaeru/text-to-voice/ASTRO25 Portable CPS Install R19.01.00.zip.md deleted file mode 100644 index f408d9f45d83f20f1181c1f5e207d0c0019aeb74..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/ASTRO25 Portable CPS Install R19.01.00.zip.md +++ /dev/null @@ -1,30 +0,0 @@ -

    ASTRO25 Portable CPS Install R19.01.00.zip


    DOWNLOAD >>> https://urloso.com/2uyRC3



    - -Critical house prices point to bubble - -Over the weekend, Australia's leading real estate website Domain has released a report showing that the biggest 15 cities in the country have seen median house prices climb by 40 per cent or more in the past three years. - -All but one of the 15 markets - Sunshine Coast - saw prices rise by at least a third in the last three years, with the biggest increases in Sydney, Melbourne and the Gold Coast. - -Over the weekend, Domain, a real estate information company with over 10 million registered users, released a report showing that the biggest 15 cities in the country have seen median house prices climb by 40 per cent or more in the past three years. - -The report found the biggest decrease in median house prices were in Hobart and Darwin, with prices falling 25 per cent and 18 per cent respectively. - -Domain head of analytics Ben Uffindell said while these numbers are not necessarily representative of the full real estate market, they indicate that Sydney and Melbourne are the most overvalued housing markets in the country. - -"We believe the price jump in Sydney and Melbourne is a sign of a new, national housing bubble emerging," he said. - -Uffindell said the average median price for a Sydney or Melbourne home is now $1.4 million, compared to a median of $938,000 in Sydney and $1.1 million in Melbourne in 2007. - -He said in both cities, the number of homes sold during a typical month was higher than it was at the peak of the last bubble - in the early 2000s. - -But Uffindell said this was not the case in the other markets. "The average Sydney or Melbourne home has increased in price by 30 per cent, compared to a 20 per cent increase for our other market leaders," he said. - -"There are some market indicators, like the number of newly listed homes, that reflect the price growth, but they tell only part of the story." - -He said that while the rise in prices is not as dramatic as it was in the early 2000s, it still indicates that Australians are seeing a relatively high number of gains. - -The report also pointed to high levels of investor activity, which is in contrast to the GFC and the 2001 housing bubble. 4fefd39f24
    -
    -
    -

    diff --git a/spaces/bioriAsaeru/text-to-voice/DOOM V6.66 Update 9 CPY FitGirl HOT.md b/spaces/bioriAsaeru/text-to-voice/DOOM V6.66 Update 9 CPY FitGirl HOT.md deleted file mode 100644 index 47fcbc78b6ace780f8ed54b052afb0405b843f6e..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/DOOM V6.66 Update 9 CPY FitGirl HOT.md +++ /dev/null @@ -1,186 +0,0 @@ - -

    DOOM v6.66 Update 9 CPY, FitGirl: The Ultimate Guide

    - -

    If you are looking for a way to download and install DOOM v6.66 Update 9 CPY, FitGirl, you have come to the right place. In this article, we will show you how to get this amazing game on your PC with a few simple steps. You will also learn about the features and benefits of this repack, as well as some tips and tricks to enjoy it to the fullest.

    -

    DOOM v6.66 Update 9 CPY, FitGirl


    Download Zip 🆓 https://urloso.com/2uyP31



    - -

    What is DOOM v6.66 Update 9 CPY, FitGirl?

    - -

    DOOM v6.66 Update 9 CPY, FitGirl is a repack of the latest version of DOOM 2016, the reboot of the classic first-person shooter game developed by id Software and published by Bethesda Softworks. This repack is based on the Steam release of the game from March 29, 2018, which includes the latest update (v6.66) and crack by CODEX.

    - -

    The repack has several advantages over the original release, such as:

    - -
      -
    • Smaller size: The original size of the game is 68.9 GB, while the repack size is from 30.4 GB to 41.5 GB, depending on the selected components.
    • -
    • Selective download: You can skip downloading and installing multiplayer files, SnapMap editor files, credits videos and language files you don't need. Note that multiplayer and SnapMap modes are not playable without legit online access.
    • -
    • Lossless quality: All files are identical to originals after installation. Nothing is ripped or re-encoded.
    • -
    • Faster installation: Installation takes from 35 minutes to 1 hour for singleplayer mode only, depending on your CPU and RAM. Installing multiplayer and SnapMap files takes another 15 to 25 minutes.
    • -
    • Integrity check: After-install integrity check ensures that everything is installed properly.
    • -
    • Language support: The game supports 10 languages: English, French, Italian, German, Spanish, Japanese, Polish, Portuguese-Brazilian, Russian and Traditional Chinese. You can change the language using "Language Selector.exe" in game root.
    • -
    - -

    How to Download and Install DOOM v6.66 Update 9 CPY, FitGirl?

    - -

    To download and install DOOM v6.66 Update 9 CPY, FitGirl, you need to follow these steps:

    -

    - -
      -
    1. Download the repack from one of the mirrors provided by FitGirl Repacks Site or other trusted sources. You can use torrent or direct links, depending on your preference.
    2. -
    3. Extract the repack using WinRAR or 7-Zip. You will need at least 2.5 GB of free RAM (including virtual) for this process.
    4. -
    5. Run "setup.exe" and select the components you want to install. Make sure you have enough disk space for installation (up to 69 GB).
    6. -
    7. Wait for the installation to finish. It may take some time depending on your system specs.
    8. -
    9. Run the game from desktop shortcut or "DOOMx64.exe" in game root.
    10. -
    11. Enjoy!
    12. -
    - -

    What are the Features and Benefits of DOOM v6.66 Update 9 CPY, FitGirl?

    - -

    DOOM v6.66 Update 9 CPY, FitGirl offers you a chance to experience one of the best shooter games ever made with improved performance and stability. Here are some of the features and benefits of this game:

    - -
      -
    • Awesome gameplay: DOOM is a fast-paced, brutal and challenging shooter that will keep you on the edge of your seat. You will face relentless demons, use impossibly destructive guns, and move with fluidity and speed through the depths of Hell in the single-player campaign or compete against your friends in various multiplayer modes.
    • -
    • Stunning graphics: DOOM uses id Tech 6 engine that delivers incredible visuals and performance on PC. The game supports up to 4K resolution and uncapped framerate for smooth and immersive gameplay.
    • -
    • Creative content: DOOM allows you to expand your gameplay experience using DOOM SnapMap game editor that lets you easily create, play and share your own content with the world. You can make new maps, modes or even full games with SnapMap.
    • -
    • Update 6.66: This update brings several improvements and fixes to the game, such as:
    • -
        -
      • New progression system: The update replaces the previous random unlock system with a new one that allows you to unlock specific items by completing challenges and leveling up.
      • -
      • New rune system: The update replaces the previous Hack Module system with a new one that allows you to equip runes as persistent player abilities earned and included in a player loadout.
      • -
      • New multiplayer features: The update adds new features such as bots support for all modes except Sacrifice; new HUD options; new kill card; new weapon balance; new echelon levels; new medals; new announcer voice; etc.
      • -
      • Bug fixes: The update fixes various issues related to textures, models, multiplayer modes, SnapMap editor, etc.
      • -
      -
    - -

    Tips and Tricks for DOOM v6.66 Update 9 CPY, FitGirl

    - -

    To make the most out of DOOM v6.66 Update 9 CPY, FitGirl, here are some tips and tricks that may help you:

    - -
      -
    • Adjust your settings: Before playing the game, make sure you adjust your settings according to your system specs and preferences. You can tweak graphics options such as resolution, anti-aliasing, texture quality, shadows quality, etc.; audio options such as volume levels, subtitles language; gameplay options such as difficulty level; etc.
    • -
    • Use glory kills: Glory kills are special melee executions that allow you to finish off weakened enemies in style and get health drops from them. To perform a glory kill, approach a staggered enemy (indicated by a blue or orange highlight) and press F or mouse click when prompted.
    • -
    • Use chainsaw: Chainsaw is a powerful weapon that can instantly kill most enemies (except bosses) and get ammo drops from them. To use chainsaw, equip it with G key or mouse wheel and press mouse click when close to an enemy. Note that chainsaw requires fuel that can be found throughout levels or dropped by some enemies.
    • -
    • Use grenades: Grenades are useful tools that can deal damage to multiple enemies at once or stun them temporarily. To use grenades, press Q key or mouse wheel click to throw them or hold Q key or mouse wheel click to cook them before throwing them.
    • -
    • Use weapon mods: Weapon mods are attachments that can enhance your weapons with different abilities such as zooming in, charging up shots, firing multiple projectiles etc. To use weapon mods, -equip them with R key or mouse wheel when holding a weapon or press F1 key or mouse wheel click when selecting a weapon from weapon wheel; then press mouse right click to activate them when aiming. -
    • -
    • Find secrets: Secrets are hidden items or areas that can reward you with collectibles such as action figures, -classic maps, -data logs, -field drones, -rune trials, -elite guards, -argent cells etc. -To find secrets, -look for clues such as cracks, -vents, -levers, -switches etc. -or use automap stations -or praetor suit upgrades -to reveal them on your map. -
    • -
    - -

    Conclusion

    - -

    DOOM v6.66 Update 9 CPY, FitGirl is a great way to enjoy one of the best shooter games ever made with improved performance and stability. -If you follow our guide, -you will be able to download -and install this repack easily -and play this game with no problems. -We hope you have fun -and share your feedback -with us in the comments below.

    -

    How to Play DOOM v6.66 Update 9 CPY, FitGirl?

    - -

    Once you have downloaded and installed DOOM v6.66 Update 9 CPY, FitGirl, you are ready to play this awesome game. You can choose from three modes of gameplay: single-player, multiplayer and SnapMap.

    - -

    In single-player mode, you will take on the role of a lone DOOM Marine who wakes up on a UAC facility on Mars that has been overrun by demons. You will have to fight your way through hordes of enemies using a variety of weapons and gadgets, while exploring the secrets and lore of the DOOM universe.

    - -

    In multiplayer mode, you will join other players online in various modes such as Team Deathmatch, Domination, Warpath, Freeze Tag, Clan Arena and more. You will be able to customize your character with different armor sets, colors, patterns and taunts. You will also be able to unlock and use different weapons and power-ups such as the BFG, Gauss Cannon, Quad Damage and Demon Runes.

    - -

    In SnapMap mode, you will be able to create your own maps and modes using a simple and intuitive editor that lets you drag and drop elements, add logic and scripts, and test your creations on the fly. You will also be able to play and share your content with other players around the world.

    - -

    Why Should You Download DOOM v6.66 Update 9 CPY, FitGirl?

    - -

    If you are still wondering why you should download DOOM v6.66 Update 9 CPY, FitGirl, here are some reasons why you should not miss this opportunity:

    - -
      -
    • It is free: You can download this repack without paying anything. You just need a torrent client or a direct link to get it.
    • -
    • It is safe: You can download this repack without worrying about viruses or malware. The repack is verified by FitGirl Repacks Site and other trusted sources.
    • -
    • It is easy: You can download and install this repack without any hassle. The repack has a simple setup that guides you through the process.
    • -
    • It is fun: You can download and play this game without any limitations. The game has a lot of content and features that will keep you entertained for hours.
    • -
    - -

    Conclusion

    - -

    DOOM v6.66 Update 9 CPY, FitGirl is a great way to enjoy one of the best shooter games ever made with improved performance and stability. -If you follow our guide, -you will be able to download -and install this repack easily -and play this game with no problems. -We hope you have fun -and share your feedback -with us in the comments below.

    -

    How to Fix Common Issues with DOOM v6.66 Update 9 CPY, FitGirl?

    - -

    While DOOM v6.66 Update 9 CPY, FitGirl is a stable and reliable repack, you may encounter some issues while playing the game. Here are some common problems and their solutions:

    - -
      -
    • Game crashes or freezes: If the game crashes or freezes during gameplay, try to lower your graphics settings, update your drivers, disable antivirus or firewall, run the game as administrator, or verify the integrity of game files.
    • -
    • Game won't start or launch: If the game won't start or launch at all, make sure you have installed all the required components such as DirectX, Visual C++, etc. You can find them in "_Redist" folder of the repack. Also, check if your antivirus or firewall is blocking the game or the crack.
    • -
    • Game shows black screen or no sound: If the game shows black screen or no sound after launching, try to change your screen resolution, switch to windowed mode, disable fullscreen optimizations, or change your audio output device.
    • -
    • Game has low FPS or stuttering: If the game has low FPS or stuttering during gameplay, try to disable VSync, lower your graphics settings, close background programs, or use a FPS limiter.
    • -
    - -

    What are the System Requirements for DOOM v6.66 Update 9 CPY, FitGirl?

    - -

    To play DOOM v6.66 Update 9 CPY, FitGirl, you need to have a PC that meets the following minimum or recommended system requirements:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    MinimumRecommended
    OS: Windows 7/8.1/10 (64-bit versions)OS: Windows 7/8.1/10 (64-bit versions)
    CPU: Intel Core i5-2400/AMD FX-8320 or betterCPU: Intel Core i7-3770/AMD FX-8350 or better
    RAM: 8 GBRAM: 8 GB
    GPU: NVIDIA GTX 670 2GB/AMD Radeon HD 7870 2GB or betterGPU: NVIDIA GTX 970 4GB/AMD Radeon R9 290 4GB or better
    HDD: up to 69 GBHDD: up to 69 GB
    DirectX: Version 11DirectX: Version 11
    Note: Requires Steam activation and broadband internet connection for Multiplayer and SnapMap.Note: Requires Steam activation and broadband internet connection for Multiplayer and SnapMap.
    - -

    Conclusion

    - -

    DOOM v6.66 Update 9 CPY, FitGirl is a great way to enjoy one of the best shooter games ever made with improved performance and stability. -If you follow our guide, -you will be able to download -and install this repack easily -and play this game with no problems. -We hope you have fun -and share your feedback -with us in the comments below.

    -

    In conclusion, DOOM v6.66 Update 9 CPY, FitGirl is a repack of the latest version of DOOM 2016, the reboot of the classic first-person shooter game. This repack has several advantages over the original release, such as smaller size, selective download, lossless quality, faster installation and language support. The game itself is a fast-paced, brutal and challenging shooter that will keep you entertained for hours with its awesome gameplay, stunning graphics and creative content. You can also download and install this repack easily by following our guide and fix any common issues with our solutions. If you are a fan of DOOM or shooter games in general, you should not miss this opportunity to download and play this game for free.

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/bioriAsaeru/text-to-voice/Elfen Lied Lilium Full Version Flac Playerl The Story Behind the Creation and Performance of the Song.md b/spaces/bioriAsaeru/text-to-voice/Elfen Lied Lilium Full Version Flac Playerl The Story Behind the Creation and Performance of the Song.md deleted file mode 100644 index 67e9a29b4760ecc6a6eb996299d3a4612cb5ecbc..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Elfen Lied Lilium Full Version Flac Playerl The Story Behind the Creation and Performance of the Song.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Elfen Lied Lilium Full Version Flac Playerl


    Download File 🗹 https://urloso.com/2uyPZE



    -
    - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/brainblow/AudioCreator_Music-Audio_Generation/docs/ENCODEC.md b/spaces/brainblow/AudioCreator_Music-Audio_Generation/docs/ENCODEC.md deleted file mode 100644 index efc2bcc7ec50190b907c887b920b70fd799c6953..0000000000000000000000000000000000000000 --- a/spaces/brainblow/AudioCreator_Music-Audio_Generation/docs/ENCODEC.md +++ /dev/null @@ -1,179 +0,0 @@ -# EnCodec: High Fidelity Neural Audio Compression - -AudioCraft provides the training code for EnCodec, a state-of-the-art deep learning -based audio codec supporting both mono stereo audio, presented in the -[High Fidelity Neural Audio Compression][arxiv] paper. -Check out our [sample page][encodec_samples]. - -## Original EnCodec models - -The EnCodec models presented in High Fidelity Neural Audio Compression can be accessed -and used with the [EnCodec repository](https://github.com/facebookresearch/encodec). - -**Note**: We do not guarantee compatibility between the AudioCraft and EnCodec codebases -and released checkpoints at this stage. - - -## Installation - -Please follow the AudioCraft installation instructions from the [README](../README.md). - - -## Training - -The [CompressionSolver](../audiocraft/solvers/compression.py) implements the audio reconstruction -task to train an EnCodec model. Specifically, it trains an encoder-decoder with a quantization -bottleneck - a SEANet encoder-decoder with Residual Vector Quantization bottleneck for EnCodec - -using a combination of objective and perceptual losses in the forms of discriminators. - -The default configuration matches a causal EnCodec training with at a single bandwidth. - -### Example configuration and grids - -We provide sample configuration and grids for training EnCodec models. - -The compression configuration are defined in -[config/solver/compression](../config/solver/compression). - -The example grids are available at -[audiocraft/grids/compression](../audiocraft/grids/compression). - -```shell -# base causal encodec on monophonic audio sampled at 24 khz -dora grid compression.encodec_base_24khz -# encodec model used for MusicGen on monophonic audio sampled at 32 khz -dora grid compression.encodec_musicgen_32khz -``` - -### Training and valid stages - -The model is trained using a combination of objective and perceptual losses. -More specifically, EnCodec is trained with the MS-STFT discriminator along with -objective losses through the use of a loss balancer to effectively weight -the different losses, in an intuitive manner. - -### Evaluation stage - -Evaluations metrics for audio generation: -* SI-SNR: Scale-Invariant Signal-to-Noise Ratio. -* ViSQOL: Virtual Speech Quality Objective Listener. - -Note: Path to the ViSQOL binary (compiled with bazel) needs to be provided in -order to run the ViSQOL metric on the reference and degraded signals. -The metric is disabled by default. -Please refer to the [metrics documentation](../METRICS.md) to learn more. - -### Generation stage - -The generation stage consists in generating the reconstructed audio from samples -with the current model. The number of samples generated and the batch size used are -controlled by the `dataset.generate` configuration. The output path and audio formats -are defined in the generate stage configuration. - -```shell -# generate samples every 5 epoch -dora run solver=compression/encodec_base_24khz generate.every=5 -# run with a different dset -dora run solver=compression/encodec_base_24khz generate.path= -# limit the number of samples or use a different batch size -dora grid solver=compression/encodec_base_24khz dataset.generate.num_samples=10 dataset.generate.batch_size=4 -``` - -### Playing with the model - -Once you have a model trained, it is possible to get the entire solver, or just -the trained model with the following functions: - -```python -from audiocraft.solvers import CompressionSolver - -# If you trained a custom model with signature SIG. -model = CompressionSolver.model_from_checkpoint('//sig/SIG') -# If you want to get one of the pretrained models with the `//pretrained/` prefix. -model = CompressionSolver.model_from_checkpoint('//pretrained/facebook/encodec_32khz') -# Or load from a custom checkpoint path -model = CompressionSolver.model_from_checkpoint('/my_checkpoints/foo/bar/checkpoint.th') - - -# If you only want to use a pretrained model, you can also directly get it -# from the CompressionModel base model class. -from audiocraft.models import CompressionModel - -# Here do not put the `//pretrained/` prefix! -model = CompressionModel.get_pretrained('facebook/encodec_32khz') -model = CompressionModel.get_pretrained('dac_44khz') - -# Finally, you can also retrieve the full Solver object, with its dataloader etc. -from audiocraft import train -from pathlib import Path -import logging -import os -import sys - -# uncomment the following line if you want some detailed logs when loading a Solver. -logging.basicConfig(stream=sys.stderr, level=logging.INFO) -# You must always run the following function from the root directory. -os.chdir(Path(train.__file__).parent.parent) - - -# You can also get the full solver (only for your own experiments). -# You can provide some overrides to the parameters to make things more convenient. -solver = train.get_solver_from_sig('SIG', {'device': 'cpu', 'dataset': {'batch_size': 8}}) -solver.model -solver.dataloaders -``` - -### Importing / Exporting models - -At the moment we do not have a definitive workflow for exporting EnCodec models, for -instance to Hugging Face (HF). We are working on supporting automatic convertion between -AudioCraft and Hugging Face implementations. - -We still have some support for fine tuning an EnCodec model coming from HF in AudioCraft, -using for instance `continue_from=//pretrained/facebook/encodec_32k`. - -An AudioCraft checkpoint can be exported in a more compact format (excluding the optimizer etc.) -using `audiocraft.utils.export.export_encodec`. For instance, you could run - -```python -from audiocraft.utils import export -from audiocraft import train -xp = train.main.get_xp_from_sig('SIG') -export.export_encodec( - xp.folder / 'checkpoint.th', - '/checkpoints/my_audio_lm/compression_state_dict.bin') - - -from audiocraft.models import CompressionModel -model = CompressionModel.get_pretrained('/checkpoints/my_audio_lm/compression_state_dict.bin') - -from audiocraft.solvers import CompressionSolver -# The two are strictly equivalent, but this function supports also loading from non already exported models. -model = CompressionSolver.model_from_checkpoint('//pretrained//checkpoints/my_audio_lm/compression_state_dict.bin') -``` - -We will see then how to use this model as a tokenizer for MusicGen/Audio gen in the -[MusicGen documentation](./MUSICGEN.md). - -### Learn more - -Learn more about AudioCraft training pipelines in the [dedicated section](./TRAINING.md). - - -## Citation -``` -@article{defossez2022highfi, - title={High Fidelity Neural Audio Compression}, - author={Défossez, Alexandre and Copet, Jade and Synnaeve, Gabriel and Adi, Yossi}, - journal={arXiv preprint arXiv:2210.13438}, - year={2022} -} -``` - - -## License - -See license information in the [README](../README.md). - -[arxiv]: https://arxiv.org/abs/2210.13438 -[encodec_samples]: https://ai.honu.io/papers/encodec/samples.html diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/solver/__init__.py b/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/solver/__init__.py deleted file mode 100644 index 7e36c64f60f38f41d01dd2c9fb30364489a03841..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/solver/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from .build import build_lr_scheduler, build_optimizer, get_default_optimizer_params -from .lr_scheduler import ( - LRMultiplier, - LRScheduler, - WarmupCosineLR, - WarmupMultiStepLR, - WarmupParamScheduler, -) - -__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/spaces/cakiki/facets-dive/Dockerfile b/spaces/cakiki/facets-dive/Dockerfile deleted file mode 100644 index 5f1f4bb9feb52223d23b556d2bdfc046ea2b2b64..0000000000000000000000000000000000000000 --- a/spaces/cakiki/facets-dive/Dockerfile +++ /dev/null @@ -1,3 +0,0 @@ -FROM jupyter/base-notebook:latest - -RUN pip install --use-feature=2020-resolver pandas facets-overview diff --git a/spaces/camenduru-com/one-shot-talking-face/oh-no.py b/spaces/camenduru-com/one-shot-talking-face/oh-no.py deleted file mode 100644 index e8c0f3bd8d72805b4ee69d4d0fd9133347d00f92..0000000000000000000000000000000000000000 --- a/spaces/camenduru-com/one-shot-talking-face/oh-no.py +++ /dev/null @@ -1,14 +0,0 @@ -import gradio as gr - -block = gr.Blocks() - -def run(): - with block: - gr.Markdown( - """ -

    oh no 😐 something wrong with the 🤗 hugging face servers 😐 hopefully, it will be fixed soon

    - """) - block.launch(server_name="0.0.0.0", server_port=7860) - -if __name__ == "__main__": - run() \ No newline at end of file diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/data/datasets/lvis_v0_5_categories.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/data/datasets/lvis_v0_5_categories.py deleted file mode 100644 index d3dab6198da614937b08682f4c9edf52bdf1d236..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/data/datasets/lvis_v0_5_categories.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# Autogen with -# with open("lvis_v0.5_val.json", "r") as f: -# a = json.load(f) -# c = a["categories"] -# for x in c: -# del x["image_count"] -# del x["instance_count"] -# LVIS_CATEGORIES = repr(c) + " # noqa" - -# fmt: off -LVIS_CATEGORIES = [{'frequency': 'r', 'id': 1, 'synset': 'acorn.n.01', 'synonyms': ['acorn'], 'def': 'nut from an oak tree', 'name': 'acorn'}, {'frequency': 'c', 'id': 2, 'synset': 'aerosol.n.02', 'synonyms': ['aerosol_can', 'spray_can'], 'def': 'a dispenser that holds a substance under pressure', 'name': 'aerosol_can'}, {'frequency': 'f', 'id': 3, 'synset': 'air_conditioner.n.01', 'synonyms': ['air_conditioner'], 'def': 'a machine that keeps air cool and dry', 'name': 'air_conditioner'}, {'frequency': 'f', 'id': 4, 'synset': 'airplane.n.01', 'synonyms': ['airplane', 'aeroplane'], 'def': 'an aircraft that has a fixed wing and is powered by propellers or jets', 'name': 'airplane'}, {'frequency': 'c', 'id': 5, 'synset': 'alarm_clock.n.01', 'synonyms': ['alarm_clock'], 'def': 'a clock that wakes a sleeper at some preset time', 'name': 'alarm_clock'}, {'frequency': 'c', 'id': 6, 'synset': 'alcohol.n.01', 'synonyms': ['alcohol', 'alcoholic_beverage'], 'def': 'a liquor or brew containing alcohol as the active agent', 'name': 'alcohol'}, {'frequency': 'r', 'id': 7, 'synset': 'alligator.n.02', 'synonyms': ['alligator', 'gator'], 'def': 'amphibious reptiles related to crocodiles but with shorter broader snouts', 'name': 'alligator'}, {'frequency': 'c', 'id': 8, 'synset': 'almond.n.02', 'synonyms': ['almond'], 'def': 'oval-shaped edible seed of the almond tree', 'name': 'almond'}, {'frequency': 'c', 'id': 9, 'synset': 'ambulance.n.01', 'synonyms': ['ambulance'], 'def': 'a vehicle that takes people to and from hospitals', 'name': 'ambulance'}, {'frequency': 'r', 'id': 10, 'synset': 'amplifier.n.01', 'synonyms': ['amplifier'], 'def': 'electronic equipment that increases strength of signals', 'name': 'amplifier'}, {'frequency': 'c', 'id': 11, 'synset': 'anklet.n.03', 'synonyms': ['anklet', 'ankle_bracelet'], 'def': 'an ornament worn around the ankle', 'name': 'anklet'}, {'frequency': 'f', 'id': 12, 'synset': 'antenna.n.01', 'synonyms': ['antenna', 'aerial', 'transmitting_aerial'], 'def': 'an electrical device that sends or receives radio or television signals', 'name': 'antenna'}, {'frequency': 'f', 'id': 13, 'synset': 'apple.n.01', 'synonyms': ['apple'], 'def': 'fruit with red or yellow or green skin and sweet to tart crisp whitish flesh', 'name': 'apple'}, {'frequency': 'r', 'id': 14, 'synset': 'apple_juice.n.01', 'synonyms': ['apple_juice'], 'def': 'the juice of apples', 'name': 'apple_juice'}, {'frequency': 'r', 'id': 15, 'synset': 'applesauce.n.01', 'synonyms': ['applesauce'], 'def': 'puree of stewed apples usually sweetened and spiced', 'name': 'applesauce'}, {'frequency': 'r', 'id': 16, 'synset': 'apricot.n.02', 'synonyms': ['apricot'], 'def': 'downy yellow to rosy-colored fruit resembling a small peach', 'name': 'apricot'}, {'frequency': 'f', 'id': 17, 'synset': 'apron.n.01', 'synonyms': ['apron'], 'def': 'a garment of cloth that is tied about the waist and worn to protect clothing', 'name': 'apron'}, {'frequency': 'c', 'id': 18, 'synset': 'aquarium.n.01', 'synonyms': ['aquarium', 'fish_tank'], 'def': 'a tank/pool/bowl filled with water for keeping live fish and underwater animals', 'name': 'aquarium'}, {'frequency': 'c', 'id': 19, 'synset': 'armband.n.02', 'synonyms': ['armband'], 'def': 'a band worn around the upper arm', 'name': 'armband'}, {'frequency': 'f', 'id': 20, 'synset': 'armchair.n.01', 'synonyms': ['armchair'], 'def': 'chair with a support on each side for arms', 'name': 'armchair'}, {'frequency': 'r', 'id': 21, 'synset': 'armoire.n.01', 'synonyms': ['armoire'], 'def': 'a large wardrobe or cabinet', 'name': 'armoire'}, {'frequency': 'r', 'id': 22, 'synset': 'armor.n.01', 'synonyms': ['armor', 'armour'], 'def': 'protective covering made of metal and used in combat', 'name': 'armor'}, {'frequency': 'c', 'id': 23, 'synset': 'artichoke.n.02', 'synonyms': ['artichoke'], 'def': 'a thistlelike flower head with edible fleshy leaves and heart', 'name': 'artichoke'}, {'frequency': 'f', 'id': 24, 'synset': 'ashcan.n.01', 'synonyms': ['trash_can', 'garbage_can', 'wastebin', 'dustbin', 'trash_barrel', 'trash_bin'], 'def': 'a bin that holds rubbish until it is collected', 'name': 'trash_can'}, {'frequency': 'c', 'id': 25, 'synset': 'ashtray.n.01', 'synonyms': ['ashtray'], 'def': "a receptacle for the ash from smokers' cigars or cigarettes", 'name': 'ashtray'}, {'frequency': 'c', 'id': 26, 'synset': 'asparagus.n.02', 'synonyms': ['asparagus'], 'def': 'edible young shoots of the asparagus plant', 'name': 'asparagus'}, {'frequency': 'c', 'id': 27, 'synset': 'atomizer.n.01', 'synonyms': ['atomizer', 'atomiser', 'spray', 'sprayer', 'nebulizer', 'nebuliser'], 'def': 'a dispenser that turns a liquid (such as perfume) into a fine mist', 'name': 'atomizer'}, {'frequency': 'c', 'id': 28, 'synset': 'avocado.n.01', 'synonyms': ['avocado'], 'def': 'a pear-shaped fruit with green or blackish skin and rich yellowish pulp enclosing a single large seed', 'name': 'avocado'}, {'frequency': 'c', 'id': 29, 'synset': 'award.n.02', 'synonyms': ['award', 'accolade'], 'def': 'a tangible symbol signifying approval or distinction', 'name': 'award'}, {'frequency': 'f', 'id': 30, 'synset': 'awning.n.01', 'synonyms': ['awning'], 'def': 'a canopy made of canvas to shelter people or things from rain or sun', 'name': 'awning'}, {'frequency': 'r', 'id': 31, 'synset': 'ax.n.01', 'synonyms': ['ax', 'axe'], 'def': 'an edge tool with a heavy bladed head mounted across a handle', 'name': 'ax'}, {'frequency': 'f', 'id': 32, 'synset': 'baby_buggy.n.01', 'synonyms': ['baby_buggy', 'baby_carriage', 'perambulator', 'pram', 'stroller'], 'def': 'a small vehicle with four wheels in which a baby or child is pushed around', 'name': 'baby_buggy'}, {'frequency': 'c', 'id': 33, 'synset': 'backboard.n.01', 'synonyms': ['basketball_backboard'], 'def': 'a raised vertical board with basket attached; used to play basketball', 'name': 'basketball_backboard'}, {'frequency': 'f', 'id': 34, 'synset': 'backpack.n.01', 'synonyms': ['backpack', 'knapsack', 'packsack', 'rucksack', 'haversack'], 'def': 'a bag carried by a strap on your back or shoulder', 'name': 'backpack'}, {'frequency': 'f', 'id': 35, 'synset': 'bag.n.04', 'synonyms': ['handbag', 'purse', 'pocketbook'], 'def': 'a container used for carrying money and small personal items or accessories', 'name': 'handbag'}, {'frequency': 'f', 'id': 36, 'synset': 'bag.n.06', 'synonyms': ['suitcase', 'baggage', 'luggage'], 'def': 'cases used to carry belongings when traveling', 'name': 'suitcase'}, {'frequency': 'c', 'id': 37, 'synset': 'bagel.n.01', 'synonyms': ['bagel', 'beigel'], 'def': 'glazed yeast-raised doughnut-shaped roll with hard crust', 'name': 'bagel'}, {'frequency': 'r', 'id': 38, 'synset': 'bagpipe.n.01', 'synonyms': ['bagpipe'], 'def': 'a tubular wind instrument; the player blows air into a bag and squeezes it out', 'name': 'bagpipe'}, {'frequency': 'r', 'id': 39, 'synset': 'baguet.n.01', 'synonyms': ['baguet', 'baguette'], 'def': 'narrow French stick loaf', 'name': 'baguet'}, {'frequency': 'r', 'id': 40, 'synset': 'bait.n.02', 'synonyms': ['bait', 'lure'], 'def': 'something used to lure fish or other animals into danger so they can be trapped or killed', 'name': 'bait'}, {'frequency': 'f', 'id': 41, 'synset': 'ball.n.06', 'synonyms': ['ball'], 'def': 'a spherical object used as a plaything', 'name': 'ball'}, {'frequency': 'r', 'id': 42, 'synset': 'ballet_skirt.n.01', 'synonyms': ['ballet_skirt', 'tutu'], 'def': 'very short skirt worn by ballerinas', 'name': 'ballet_skirt'}, {'frequency': 'f', 'id': 43, 'synset': 'balloon.n.01', 'synonyms': ['balloon'], 'def': 'large tough nonrigid bag filled with gas or heated air', 'name': 'balloon'}, {'frequency': 'c', 'id': 44, 'synset': 'bamboo.n.02', 'synonyms': ['bamboo'], 'def': 'woody tropical grass having hollow woody stems', 'name': 'bamboo'}, {'frequency': 'f', 'id': 45, 'synset': 'banana.n.02', 'synonyms': ['banana'], 'def': 'elongated crescent-shaped yellow fruit with soft sweet flesh', 'name': 'banana'}, {'frequency': 'r', 'id': 46, 'synset': 'band_aid.n.01', 'synonyms': ['Band_Aid'], 'def': 'trade name for an adhesive bandage to cover small cuts or blisters', 'name': 'Band_Aid'}, {'frequency': 'c', 'id': 47, 'synset': 'bandage.n.01', 'synonyms': ['bandage'], 'def': 'a piece of soft material that covers and protects an injured part of the body', 'name': 'bandage'}, {'frequency': 'c', 'id': 48, 'synset': 'bandanna.n.01', 'synonyms': ['bandanna', 'bandana'], 'def': 'large and brightly colored handkerchief; often used as a neckerchief', 'name': 'bandanna'}, {'frequency': 'r', 'id': 49, 'synset': 'banjo.n.01', 'synonyms': ['banjo'], 'def': 'a stringed instrument of the guitar family with a long neck and circular body', 'name': 'banjo'}, {'frequency': 'f', 'id': 50, 'synset': 'banner.n.01', 'synonyms': ['banner', 'streamer'], 'def': 'long strip of cloth or paper used for decoration or advertising', 'name': 'banner'}, {'frequency': 'r', 'id': 51, 'synset': 'barbell.n.01', 'synonyms': ['barbell'], 'def': 'a bar to which heavy discs are attached at each end; used in weightlifting', 'name': 'barbell'}, {'frequency': 'r', 'id': 52, 'synset': 'barge.n.01', 'synonyms': ['barge'], 'def': 'a flatbottom boat for carrying heavy loads (especially on canals)', 'name': 'barge'}, {'frequency': 'f', 'id': 53, 'synset': 'barrel.n.02', 'synonyms': ['barrel', 'cask'], 'def': 'a cylindrical container that holds liquids', 'name': 'barrel'}, {'frequency': 'c', 'id': 54, 'synset': 'barrette.n.01', 'synonyms': ['barrette'], 'def': "a pin for holding women's hair in place", 'name': 'barrette'}, {'frequency': 'c', 'id': 55, 'synset': 'barrow.n.03', 'synonyms': ['barrow', 'garden_cart', 'lawn_cart', 'wheelbarrow'], 'def': 'a cart for carrying small loads; has handles and one or more wheels', 'name': 'barrow'}, {'frequency': 'f', 'id': 56, 'synset': 'base.n.03', 'synonyms': ['baseball_base'], 'def': 'a place that the runner must touch before scoring', 'name': 'baseball_base'}, {'frequency': 'f', 'id': 57, 'synset': 'baseball.n.02', 'synonyms': ['baseball'], 'def': 'a ball used in playing baseball', 'name': 'baseball'}, {'frequency': 'f', 'id': 58, 'synset': 'baseball_bat.n.01', 'synonyms': ['baseball_bat'], 'def': 'an implement used in baseball by the batter', 'name': 'baseball_bat'}, {'frequency': 'f', 'id': 59, 'synset': 'baseball_cap.n.01', 'synonyms': ['baseball_cap', 'jockey_cap', 'golf_cap'], 'def': 'a cap with a bill', 'name': 'baseball_cap'}, {'frequency': 'f', 'id': 60, 'synset': 'baseball_glove.n.01', 'synonyms': ['baseball_glove', 'baseball_mitt'], 'def': 'the handwear used by fielders in playing baseball', 'name': 'baseball_glove'}, {'frequency': 'f', 'id': 61, 'synset': 'basket.n.01', 'synonyms': ['basket', 'handbasket'], 'def': 'a container that is usually woven and has handles', 'name': 'basket'}, {'frequency': 'c', 'id': 62, 'synset': 'basket.n.03', 'synonyms': ['basketball_hoop'], 'def': 'metal hoop supporting a net through which players try to throw the basketball', 'name': 'basketball_hoop'}, {'frequency': 'c', 'id': 63, 'synset': 'basketball.n.02', 'synonyms': ['basketball'], 'def': 'an inflated ball used in playing basketball', 'name': 'basketball'}, {'frequency': 'r', 'id': 64, 'synset': 'bass_horn.n.01', 'synonyms': ['bass_horn', 'sousaphone', 'tuba'], 'def': 'the lowest brass wind instrument', 'name': 'bass_horn'}, {'frequency': 'r', 'id': 65, 'synset': 'bat.n.01', 'synonyms': ['bat_(animal)'], 'def': 'nocturnal mouselike mammal with forelimbs modified to form membranous wings', 'name': 'bat_(animal)'}, {'frequency': 'f', 'id': 66, 'synset': 'bath_mat.n.01', 'synonyms': ['bath_mat'], 'def': 'a heavy towel or mat to stand on while drying yourself after a bath', 'name': 'bath_mat'}, {'frequency': 'f', 'id': 67, 'synset': 'bath_towel.n.01', 'synonyms': ['bath_towel'], 'def': 'a large towel; to dry yourself after a bath', 'name': 'bath_towel'}, {'frequency': 'c', 'id': 68, 'synset': 'bathrobe.n.01', 'synonyms': ['bathrobe'], 'def': 'a loose-fitting robe of towelling; worn after a bath or swim', 'name': 'bathrobe'}, {'frequency': 'f', 'id': 69, 'synset': 'bathtub.n.01', 'synonyms': ['bathtub', 'bathing_tub'], 'def': 'a large open container that you fill with water and use to wash the body', 'name': 'bathtub'}, {'frequency': 'r', 'id': 70, 'synset': 'batter.n.02', 'synonyms': ['batter_(food)'], 'def': 'a liquid or semiliquid mixture, as of flour, eggs, and milk, used in cooking', 'name': 'batter_(food)'}, {'frequency': 'c', 'id': 71, 'synset': 'battery.n.02', 'synonyms': ['battery'], 'def': 'a portable device that produces electricity', 'name': 'battery'}, {'frequency': 'r', 'id': 72, 'synset': 'beach_ball.n.01', 'synonyms': ['beachball'], 'def': 'large and light ball; for play at the seaside', 'name': 'beachball'}, {'frequency': 'c', 'id': 73, 'synset': 'bead.n.01', 'synonyms': ['bead'], 'def': 'a small ball with a hole through the middle used for ornamentation, jewellery, etc.', 'name': 'bead'}, {'frequency': 'r', 'id': 74, 'synset': 'beaker.n.01', 'synonyms': ['beaker'], 'def': 'a flatbottomed jar made of glass or plastic; used for chemistry', 'name': 'beaker'}, {'frequency': 'c', 'id': 75, 'synset': 'bean_curd.n.01', 'synonyms': ['bean_curd', 'tofu'], 'def': 'cheeselike food made of curdled soybean milk', 'name': 'bean_curd'}, {'frequency': 'c', 'id': 76, 'synset': 'beanbag.n.01', 'synonyms': ['beanbag'], 'def': 'a bag filled with dried beans or similar items; used in games or to sit on', 'name': 'beanbag'}, {'frequency': 'f', 'id': 77, 'synset': 'beanie.n.01', 'synonyms': ['beanie', 'beany'], 'def': 'a small skullcap; formerly worn by schoolboys and college freshmen', 'name': 'beanie'}, {'frequency': 'f', 'id': 78, 'synset': 'bear.n.01', 'synonyms': ['bear'], 'def': 'large carnivorous or omnivorous mammals with shaggy coats and claws', 'name': 'bear'}, {'frequency': 'f', 'id': 79, 'synset': 'bed.n.01', 'synonyms': ['bed'], 'def': 'a piece of furniture that provides a place to sleep', 'name': 'bed'}, {'frequency': 'c', 'id': 80, 'synset': 'bedspread.n.01', 'synonyms': ['bedspread', 'bedcover', 'bed_covering', 'counterpane', 'spread'], 'def': 'decorative cover for a bed', 'name': 'bedspread'}, {'frequency': 'f', 'id': 81, 'synset': 'beef.n.01', 'synonyms': ['cow'], 'def': 'cattle that are reared for their meat', 'name': 'cow'}, {'frequency': 'c', 'id': 82, 'synset': 'beef.n.02', 'synonyms': ['beef_(food)', 'boeuf_(food)'], 'def': 'meat from an adult domestic bovine', 'name': 'beef_(food)'}, {'frequency': 'r', 'id': 83, 'synset': 'beeper.n.01', 'synonyms': ['beeper', 'pager'], 'def': 'an device that beeps when the person carrying it is being paged', 'name': 'beeper'}, {'frequency': 'f', 'id': 84, 'synset': 'beer_bottle.n.01', 'synonyms': ['beer_bottle'], 'def': 'a bottle that holds beer', 'name': 'beer_bottle'}, {'frequency': 'c', 'id': 85, 'synset': 'beer_can.n.01', 'synonyms': ['beer_can'], 'def': 'a can that holds beer', 'name': 'beer_can'}, {'frequency': 'r', 'id': 86, 'synset': 'beetle.n.01', 'synonyms': ['beetle'], 'def': 'insect with hard wing covers', 'name': 'beetle'}, {'frequency': 'f', 'id': 87, 'synset': 'bell.n.01', 'synonyms': ['bell'], 'def': 'a hollow device made of metal that makes a ringing sound when struck', 'name': 'bell'}, {'frequency': 'f', 'id': 88, 'synset': 'bell_pepper.n.02', 'synonyms': ['bell_pepper', 'capsicum'], 'def': 'large bell-shaped sweet pepper in green or red or yellow or orange or black varieties', 'name': 'bell_pepper'}, {'frequency': 'f', 'id': 89, 'synset': 'belt.n.02', 'synonyms': ['belt'], 'def': 'a band to tie or buckle around the body (usually at the waist)', 'name': 'belt'}, {'frequency': 'f', 'id': 90, 'synset': 'belt_buckle.n.01', 'synonyms': ['belt_buckle'], 'def': 'the buckle used to fasten a belt', 'name': 'belt_buckle'}, {'frequency': 'f', 'id': 91, 'synset': 'bench.n.01', 'synonyms': ['bench'], 'def': 'a long seat for more than one person', 'name': 'bench'}, {'frequency': 'c', 'id': 92, 'synset': 'beret.n.01', 'synonyms': ['beret'], 'def': 'a cap with no brim or bill; made of soft cloth', 'name': 'beret'}, {'frequency': 'c', 'id': 93, 'synset': 'bib.n.02', 'synonyms': ['bib'], 'def': 'a napkin tied under the chin of a child while eating', 'name': 'bib'}, {'frequency': 'r', 'id': 94, 'synset': 'bible.n.01', 'synonyms': ['Bible'], 'def': 'the sacred writings of the Christian religions', 'name': 'Bible'}, {'frequency': 'f', 'id': 95, 'synset': 'bicycle.n.01', 'synonyms': ['bicycle', 'bike_(bicycle)'], 'def': 'a wheeled vehicle that has two wheels and is moved by foot pedals', 'name': 'bicycle'}, {'frequency': 'f', 'id': 96, 'synset': 'bill.n.09', 'synonyms': ['visor', 'vizor'], 'def': 'a brim that projects to the front to shade the eyes', 'name': 'visor'}, {'frequency': 'c', 'id': 97, 'synset': 'binder.n.03', 'synonyms': ['binder', 'ring-binder'], 'def': 'holds loose papers or magazines', 'name': 'binder'}, {'frequency': 'c', 'id': 98, 'synset': 'binoculars.n.01', 'synonyms': ['binoculars', 'field_glasses', 'opera_glasses'], 'def': 'an optical instrument designed for simultaneous use by both eyes', 'name': 'binoculars'}, {'frequency': 'f', 'id': 99, 'synset': 'bird.n.01', 'synonyms': ['bird'], 'def': 'animal characterized by feathers and wings', 'name': 'bird'}, {'frequency': 'r', 'id': 100, 'synset': 'bird_feeder.n.01', 'synonyms': ['birdfeeder'], 'def': 'an outdoor device that supplies food for wild birds', 'name': 'birdfeeder'}, {'frequency': 'r', 'id': 101, 'synset': 'birdbath.n.01', 'synonyms': ['birdbath'], 'def': 'an ornamental basin (usually in a garden) for birds to bathe in', 'name': 'birdbath'}, {'frequency': 'c', 'id': 102, 'synset': 'birdcage.n.01', 'synonyms': ['birdcage'], 'def': 'a cage in which a bird can be kept', 'name': 'birdcage'}, {'frequency': 'c', 'id': 103, 'synset': 'birdhouse.n.01', 'synonyms': ['birdhouse'], 'def': 'a shelter for birds', 'name': 'birdhouse'}, {'frequency': 'f', 'id': 104, 'synset': 'birthday_cake.n.01', 'synonyms': ['birthday_cake'], 'def': 'decorated cake served at a birthday party', 'name': 'birthday_cake'}, {'frequency': 'r', 'id': 105, 'synset': 'birthday_card.n.01', 'synonyms': ['birthday_card'], 'def': 'a card expressing a birthday greeting', 'name': 'birthday_card'}, {'frequency': 'r', 'id': 106, 'synset': 'biscuit.n.01', 'synonyms': ['biscuit_(bread)'], 'def': 'small round bread leavened with baking-powder or soda', 'name': 'biscuit_(bread)'}, {'frequency': 'r', 'id': 107, 'synset': 'black_flag.n.01', 'synonyms': ['pirate_flag'], 'def': 'a flag usually bearing a white skull and crossbones on a black background', 'name': 'pirate_flag'}, {'frequency': 'c', 'id': 108, 'synset': 'black_sheep.n.02', 'synonyms': ['black_sheep'], 'def': 'sheep with a black coat', 'name': 'black_sheep'}, {'frequency': 'c', 'id': 109, 'synset': 'blackboard.n.01', 'synonyms': ['blackboard', 'chalkboard'], 'def': 'sheet of slate; for writing with chalk', 'name': 'blackboard'}, {'frequency': 'f', 'id': 110, 'synset': 'blanket.n.01', 'synonyms': ['blanket'], 'def': 'bedding that keeps a person warm in bed', 'name': 'blanket'}, {'frequency': 'c', 'id': 111, 'synset': 'blazer.n.01', 'synonyms': ['blazer', 'sport_jacket', 'sport_coat', 'sports_jacket', 'sports_coat'], 'def': 'lightweight jacket; often striped in the colors of a club or school', 'name': 'blazer'}, {'frequency': 'f', 'id': 112, 'synset': 'blender.n.01', 'synonyms': ['blender', 'liquidizer', 'liquidiser'], 'def': 'an electrically powered mixer that mix or chop or liquefy foods', 'name': 'blender'}, {'frequency': 'r', 'id': 113, 'synset': 'blimp.n.02', 'synonyms': ['blimp'], 'def': 'a small nonrigid airship used for observation or as a barrage balloon', 'name': 'blimp'}, {'frequency': 'c', 'id': 114, 'synset': 'blinker.n.01', 'synonyms': ['blinker', 'flasher'], 'def': 'a light that flashes on and off; used as a signal or to send messages', 'name': 'blinker'}, {'frequency': 'c', 'id': 115, 'synset': 'blueberry.n.02', 'synonyms': ['blueberry'], 'def': 'sweet edible dark-blue berries of blueberry plants', 'name': 'blueberry'}, {'frequency': 'r', 'id': 116, 'synset': 'boar.n.02', 'synonyms': ['boar'], 'def': 'an uncastrated male hog', 'name': 'boar'}, {'frequency': 'r', 'id': 117, 'synset': 'board.n.09', 'synonyms': ['gameboard'], 'def': 'a flat portable surface (usually rectangular) designed for board games', 'name': 'gameboard'}, {'frequency': 'f', 'id': 118, 'synset': 'boat.n.01', 'synonyms': ['boat', 'ship_(boat)'], 'def': 'a vessel for travel on water', 'name': 'boat'}, {'frequency': 'c', 'id': 119, 'synset': 'bobbin.n.01', 'synonyms': ['bobbin', 'spool', 'reel'], 'def': 'a thing around which thread/tape/film or other flexible materials can be wound', 'name': 'bobbin'}, {'frequency': 'r', 'id': 120, 'synset': 'bobby_pin.n.01', 'synonyms': ['bobby_pin', 'hairgrip'], 'def': 'a flat wire hairpin used to hold bobbed hair in place', 'name': 'bobby_pin'}, {'frequency': 'c', 'id': 121, 'synset': 'boiled_egg.n.01', 'synonyms': ['boiled_egg', 'coddled_egg'], 'def': 'egg cooked briefly in the shell in gently boiling water', 'name': 'boiled_egg'}, {'frequency': 'r', 'id': 122, 'synset': 'bolo_tie.n.01', 'synonyms': ['bolo_tie', 'bolo', 'bola_tie', 'bola'], 'def': 'a cord fastened around the neck with an ornamental clasp and worn as a necktie', 'name': 'bolo_tie'}, {'frequency': 'c', 'id': 123, 'synset': 'bolt.n.03', 'synonyms': ['deadbolt'], 'def': 'the part of a lock that is engaged or withdrawn with a key', 'name': 'deadbolt'}, {'frequency': 'f', 'id': 124, 'synset': 'bolt.n.06', 'synonyms': ['bolt'], 'def': 'a screw that screws into a nut to form a fastener', 'name': 'bolt'}, {'frequency': 'r', 'id': 125, 'synset': 'bonnet.n.01', 'synonyms': ['bonnet'], 'def': 'a hat tied under the chin', 'name': 'bonnet'}, {'frequency': 'f', 'id': 126, 'synset': 'book.n.01', 'synonyms': ['book'], 'def': 'a written work or composition that has been published', 'name': 'book'}, {'frequency': 'r', 'id': 127, 'synset': 'book_bag.n.01', 'synonyms': ['book_bag'], 'def': 'a bag in which students carry their books', 'name': 'book_bag'}, {'frequency': 'c', 'id': 128, 'synset': 'bookcase.n.01', 'synonyms': ['bookcase'], 'def': 'a piece of furniture with shelves for storing books', 'name': 'bookcase'}, {'frequency': 'c', 'id': 129, 'synset': 'booklet.n.01', 'synonyms': ['booklet', 'brochure', 'leaflet', 'pamphlet'], 'def': 'a small book usually having a paper cover', 'name': 'booklet'}, {'frequency': 'r', 'id': 130, 'synset': 'bookmark.n.01', 'synonyms': ['bookmark', 'bookmarker'], 'def': 'a marker (a piece of paper or ribbon) placed between the pages of a book', 'name': 'bookmark'}, {'frequency': 'r', 'id': 131, 'synset': 'boom.n.04', 'synonyms': ['boom_microphone', 'microphone_boom'], 'def': 'a pole carrying an overhead microphone projected over a film or tv set', 'name': 'boom_microphone'}, {'frequency': 'f', 'id': 132, 'synset': 'boot.n.01', 'synonyms': ['boot'], 'def': 'footwear that covers the whole foot and lower leg', 'name': 'boot'}, {'frequency': 'f', 'id': 133, 'synset': 'bottle.n.01', 'synonyms': ['bottle'], 'def': 'a glass or plastic vessel used for storing drinks or other liquids', 'name': 'bottle'}, {'frequency': 'c', 'id': 134, 'synset': 'bottle_opener.n.01', 'synonyms': ['bottle_opener'], 'def': 'an opener for removing caps or corks from bottles', 'name': 'bottle_opener'}, {'frequency': 'c', 'id': 135, 'synset': 'bouquet.n.01', 'synonyms': ['bouquet'], 'def': 'an arrangement of flowers that is usually given as a present', 'name': 'bouquet'}, {'frequency': 'r', 'id': 136, 'synset': 'bow.n.04', 'synonyms': ['bow_(weapon)'], 'def': 'a weapon for shooting arrows', 'name': 'bow_(weapon)'}, {'frequency': 'f', 'id': 137, 'synset': 'bow.n.08', 'synonyms': ['bow_(decorative_ribbons)'], 'def': 'a decorative interlacing of ribbons', 'name': 'bow_(decorative_ribbons)'}, {'frequency': 'f', 'id': 138, 'synset': 'bow_tie.n.01', 'synonyms': ['bow-tie', 'bowtie'], 'def': "a man's tie that ties in a bow", 'name': 'bow-tie'}, {'frequency': 'f', 'id': 139, 'synset': 'bowl.n.03', 'synonyms': ['bowl'], 'def': 'a dish that is round and open at the top for serving foods', 'name': 'bowl'}, {'frequency': 'r', 'id': 140, 'synset': 'bowl.n.08', 'synonyms': ['pipe_bowl'], 'def': 'a small round container that is open at the top for holding tobacco', 'name': 'pipe_bowl'}, {'frequency': 'c', 'id': 141, 'synset': 'bowler_hat.n.01', 'synonyms': ['bowler_hat', 'bowler', 'derby_hat', 'derby', 'plug_hat'], 'def': 'a felt hat that is round and hard with a narrow brim', 'name': 'bowler_hat'}, {'frequency': 'r', 'id': 142, 'synset': 'bowling_ball.n.01', 'synonyms': ['bowling_ball'], 'def': 'a large ball with finger holes used in the sport of bowling', 'name': 'bowling_ball'}, {'frequency': 'r', 'id': 143, 'synset': 'bowling_pin.n.01', 'synonyms': ['bowling_pin'], 'def': 'a club-shaped wooden object used in bowling', 'name': 'bowling_pin'}, {'frequency': 'r', 'id': 144, 'synset': 'boxing_glove.n.01', 'synonyms': ['boxing_glove'], 'def': 'large glove coverings the fists of a fighter worn for the sport of boxing', 'name': 'boxing_glove'}, {'frequency': 'c', 'id': 145, 'synset': 'brace.n.06', 'synonyms': ['suspenders'], 'def': 'elastic straps that hold trousers up (usually used in the plural)', 'name': 'suspenders'}, {'frequency': 'f', 'id': 146, 'synset': 'bracelet.n.02', 'synonyms': ['bracelet', 'bangle'], 'def': 'jewelry worn around the wrist for decoration', 'name': 'bracelet'}, {'frequency': 'r', 'id': 147, 'synset': 'brass.n.07', 'synonyms': ['brass_plaque'], 'def': 'a memorial made of brass', 'name': 'brass_plaque'}, {'frequency': 'c', 'id': 148, 'synset': 'brassiere.n.01', 'synonyms': ['brassiere', 'bra', 'bandeau'], 'def': 'an undergarment worn by women to support their breasts', 'name': 'brassiere'}, {'frequency': 'c', 'id': 149, 'synset': 'bread-bin.n.01', 'synonyms': ['bread-bin', 'breadbox'], 'def': 'a container used to keep bread or cake in', 'name': 'bread-bin'}, {'frequency': 'r', 'id': 150, 'synset': 'breechcloth.n.01', 'synonyms': ['breechcloth', 'breechclout', 'loincloth'], 'def': 'a garment that provides covering for the loins', 'name': 'breechcloth'}, {'frequency': 'c', 'id': 151, 'synset': 'bridal_gown.n.01', 'synonyms': ['bridal_gown', 'wedding_gown', 'wedding_dress'], 'def': 'a gown worn by the bride at a wedding', 'name': 'bridal_gown'}, {'frequency': 'c', 'id': 152, 'synset': 'briefcase.n.01', 'synonyms': ['briefcase'], 'def': 'a case with a handle; for carrying papers or files or books', 'name': 'briefcase'}, {'frequency': 'c', 'id': 153, 'synset': 'bristle_brush.n.01', 'synonyms': ['bristle_brush'], 'def': 'a brush that is made with the short stiff hairs of an animal or plant', 'name': 'bristle_brush'}, {'frequency': 'f', 'id': 154, 'synset': 'broccoli.n.01', 'synonyms': ['broccoli'], 'def': 'plant with dense clusters of tight green flower buds', 'name': 'broccoli'}, {'frequency': 'r', 'id': 155, 'synset': 'brooch.n.01', 'synonyms': ['broach'], 'def': 'a decorative pin worn by women', 'name': 'broach'}, {'frequency': 'c', 'id': 156, 'synset': 'broom.n.01', 'synonyms': ['broom'], 'def': 'bundle of straws or twigs attached to a long handle; used for cleaning', 'name': 'broom'}, {'frequency': 'c', 'id': 157, 'synset': 'brownie.n.03', 'synonyms': ['brownie'], 'def': 'square or bar of very rich chocolate cake usually with nuts', 'name': 'brownie'}, {'frequency': 'c', 'id': 158, 'synset': 'brussels_sprouts.n.01', 'synonyms': ['brussels_sprouts'], 'def': 'the small edible cabbage-like buds growing along a stalk', 'name': 'brussels_sprouts'}, {'frequency': 'r', 'id': 159, 'synset': 'bubble_gum.n.01', 'synonyms': ['bubble_gum'], 'def': 'a kind of chewing gum that can be blown into bubbles', 'name': 'bubble_gum'}, {'frequency': 'f', 'id': 160, 'synset': 'bucket.n.01', 'synonyms': ['bucket', 'pail'], 'def': 'a roughly cylindrical vessel that is open at the top', 'name': 'bucket'}, {'frequency': 'r', 'id': 161, 'synset': 'buggy.n.01', 'synonyms': ['horse_buggy'], 'def': 'a small lightweight carriage; drawn by a single horse', 'name': 'horse_buggy'}, {'frequency': 'c', 'id': 162, 'synset': 'bull.n.11', 'synonyms': ['bull'], 'def': 'mature male cow', 'name': 'bull'}, {'frequency': 'r', 'id': 163, 'synset': 'bulldog.n.01', 'synonyms': ['bulldog'], 'def': 'a thickset short-haired dog with a large head and strong undershot lower jaw', 'name': 'bulldog'}, {'frequency': 'r', 'id': 164, 'synset': 'bulldozer.n.01', 'synonyms': ['bulldozer', 'dozer'], 'def': 'large powerful tractor; a large blade in front flattens areas of ground', 'name': 'bulldozer'}, {'frequency': 'c', 'id': 165, 'synset': 'bullet_train.n.01', 'synonyms': ['bullet_train'], 'def': 'a high-speed passenger train', 'name': 'bullet_train'}, {'frequency': 'c', 'id': 166, 'synset': 'bulletin_board.n.02', 'synonyms': ['bulletin_board', 'notice_board'], 'def': 'a board that hangs on a wall; displays announcements', 'name': 'bulletin_board'}, {'frequency': 'r', 'id': 167, 'synset': 'bulletproof_vest.n.01', 'synonyms': ['bulletproof_vest'], 'def': 'a vest capable of resisting the impact of a bullet', 'name': 'bulletproof_vest'}, {'frequency': 'c', 'id': 168, 'synset': 'bullhorn.n.01', 'synonyms': ['bullhorn', 'megaphone'], 'def': 'a portable loudspeaker with built-in microphone and amplifier', 'name': 'bullhorn'}, {'frequency': 'r', 'id': 169, 'synset': 'bully_beef.n.01', 'synonyms': ['corned_beef', 'corn_beef'], 'def': 'beef cured or pickled in brine', 'name': 'corned_beef'}, {'frequency': 'f', 'id': 170, 'synset': 'bun.n.01', 'synonyms': ['bun', 'roll'], 'def': 'small rounded bread either plain or sweet', 'name': 'bun'}, {'frequency': 'c', 'id': 171, 'synset': 'bunk_bed.n.01', 'synonyms': ['bunk_bed'], 'def': 'beds built one above the other', 'name': 'bunk_bed'}, {'frequency': 'f', 'id': 172, 'synset': 'buoy.n.01', 'synonyms': ['buoy'], 'def': 'a float attached by rope to the seabed to mark channels in a harbor or underwater hazards', 'name': 'buoy'}, {'frequency': 'r', 'id': 173, 'synset': 'burrito.n.01', 'synonyms': ['burrito'], 'def': 'a flour tortilla folded around a filling', 'name': 'burrito'}, {'frequency': 'f', 'id': 174, 'synset': 'bus.n.01', 'synonyms': ['bus_(vehicle)', 'autobus', 'charabanc', 'double-decker', 'motorbus', 'motorcoach'], 'def': 'a vehicle carrying many passengers; used for public transport', 'name': 'bus_(vehicle)'}, {'frequency': 'c', 'id': 175, 'synset': 'business_card.n.01', 'synonyms': ['business_card'], 'def': "a card on which are printed the person's name and business affiliation", 'name': 'business_card'}, {'frequency': 'c', 'id': 176, 'synset': 'butcher_knife.n.01', 'synonyms': ['butcher_knife'], 'def': 'a large sharp knife for cutting or trimming meat', 'name': 'butcher_knife'}, {'frequency': 'c', 'id': 177, 'synset': 'butter.n.01', 'synonyms': ['butter'], 'def': 'an edible emulsion of fat globules made by churning milk or cream; for cooking and table use', 'name': 'butter'}, {'frequency': 'c', 'id': 178, 'synset': 'butterfly.n.01', 'synonyms': ['butterfly'], 'def': 'insect typically having a slender body with knobbed antennae and broad colorful wings', 'name': 'butterfly'}, {'frequency': 'f', 'id': 179, 'synset': 'button.n.01', 'synonyms': ['button'], 'def': 'a round fastener sewn to shirts and coats etc to fit through buttonholes', 'name': 'button'}, {'frequency': 'f', 'id': 180, 'synset': 'cab.n.03', 'synonyms': ['cab_(taxi)', 'taxi', 'taxicab'], 'def': 'a car that takes passengers where they want to go in exchange for money', 'name': 'cab_(taxi)'}, {'frequency': 'r', 'id': 181, 'synset': 'cabana.n.01', 'synonyms': ['cabana'], 'def': 'a small tent used as a dressing room beside the sea or a swimming pool', 'name': 'cabana'}, {'frequency': 'r', 'id': 182, 'synset': 'cabin_car.n.01', 'synonyms': ['cabin_car', 'caboose'], 'def': 'a car on a freight train for use of the train crew; usually the last car on the train', 'name': 'cabin_car'}, {'frequency': 'f', 'id': 183, 'synset': 'cabinet.n.01', 'synonyms': ['cabinet'], 'def': 'a piece of furniture resembling a cupboard with doors and shelves and drawers', 'name': 'cabinet'}, {'frequency': 'r', 'id': 184, 'synset': 'cabinet.n.03', 'synonyms': ['locker', 'storage_locker'], 'def': 'a storage compartment for clothes and valuables; usually it has a lock', 'name': 'locker'}, {'frequency': 'f', 'id': 185, 'synset': 'cake.n.03', 'synonyms': ['cake'], 'def': 'baked goods made from or based on a mixture of flour, sugar, eggs, and fat', 'name': 'cake'}, {'frequency': 'c', 'id': 186, 'synset': 'calculator.n.02', 'synonyms': ['calculator'], 'def': 'a small machine that is used for mathematical calculations', 'name': 'calculator'}, {'frequency': 'f', 'id': 187, 'synset': 'calendar.n.02', 'synonyms': ['calendar'], 'def': 'a list or register of events (appointments/social events/court cases, etc)', 'name': 'calendar'}, {'frequency': 'c', 'id': 188, 'synset': 'calf.n.01', 'synonyms': ['calf'], 'def': 'young of domestic cattle', 'name': 'calf'}, {'frequency': 'c', 'id': 189, 'synset': 'camcorder.n.01', 'synonyms': ['camcorder'], 'def': 'a portable television camera and videocassette recorder', 'name': 'camcorder'}, {'frequency': 'c', 'id': 190, 'synset': 'camel.n.01', 'synonyms': ['camel'], 'def': 'cud-chewing mammal used as a draft or saddle animal in desert regions', 'name': 'camel'}, {'frequency': 'f', 'id': 191, 'synset': 'camera.n.01', 'synonyms': ['camera'], 'def': 'equipment for taking photographs', 'name': 'camera'}, {'frequency': 'c', 'id': 192, 'synset': 'camera_lens.n.01', 'synonyms': ['camera_lens'], 'def': 'a lens that focuses the image in a camera', 'name': 'camera_lens'}, {'frequency': 'c', 'id': 193, 'synset': 'camper.n.02', 'synonyms': ['camper_(vehicle)', 'camping_bus', 'motor_home'], 'def': 'a recreational vehicle equipped for camping out while traveling', 'name': 'camper_(vehicle)'}, {'frequency': 'f', 'id': 194, 'synset': 'can.n.01', 'synonyms': ['can', 'tin_can'], 'def': 'airtight sealed metal container for food or drink or paint etc.', 'name': 'can'}, {'frequency': 'c', 'id': 195, 'synset': 'can_opener.n.01', 'synonyms': ['can_opener', 'tin_opener'], 'def': 'a device for cutting cans open', 'name': 'can_opener'}, {'frequency': 'r', 'id': 196, 'synset': 'candelabrum.n.01', 'synonyms': ['candelabrum', 'candelabra'], 'def': 'branched candlestick; ornamental; has several lights', 'name': 'candelabrum'}, {'frequency': 'f', 'id': 197, 'synset': 'candle.n.01', 'synonyms': ['candle', 'candlestick'], 'def': 'stick of wax with a wick in the middle', 'name': 'candle'}, {'frequency': 'f', 'id': 198, 'synset': 'candlestick.n.01', 'synonyms': ['candle_holder'], 'def': 'a holder with sockets for candles', 'name': 'candle_holder'}, {'frequency': 'r', 'id': 199, 'synset': 'candy_bar.n.01', 'synonyms': ['candy_bar'], 'def': 'a candy shaped as a bar', 'name': 'candy_bar'}, {'frequency': 'c', 'id': 200, 'synset': 'candy_cane.n.01', 'synonyms': ['candy_cane'], 'def': 'a hard candy in the shape of a rod (usually with stripes)', 'name': 'candy_cane'}, {'frequency': 'c', 'id': 201, 'synset': 'cane.n.01', 'synonyms': ['walking_cane'], 'def': 'a stick that people can lean on to help them walk', 'name': 'walking_cane'}, {'frequency': 'c', 'id': 202, 'synset': 'canister.n.02', 'synonyms': ['canister', 'cannister'], 'def': 'metal container for storing dry foods such as tea or flour', 'name': 'canister'}, {'frequency': 'r', 'id': 203, 'synset': 'cannon.n.02', 'synonyms': ['cannon'], 'def': 'heavy gun fired from a tank', 'name': 'cannon'}, {'frequency': 'c', 'id': 204, 'synset': 'canoe.n.01', 'synonyms': ['canoe'], 'def': 'small and light boat; pointed at both ends; propelled with a paddle', 'name': 'canoe'}, {'frequency': 'r', 'id': 205, 'synset': 'cantaloup.n.02', 'synonyms': ['cantaloup', 'cantaloupe'], 'def': 'the fruit of a cantaloup vine; small to medium-sized melon with yellowish flesh', 'name': 'cantaloup'}, {'frequency': 'r', 'id': 206, 'synset': 'canteen.n.01', 'synonyms': ['canteen'], 'def': 'a flask for carrying water; used by soldiers or travelers', 'name': 'canteen'}, {'frequency': 'c', 'id': 207, 'synset': 'cap.n.01', 'synonyms': ['cap_(headwear)'], 'def': 'a tight-fitting headwear', 'name': 'cap_(headwear)'}, {'frequency': 'f', 'id': 208, 'synset': 'cap.n.02', 'synonyms': ['bottle_cap', 'cap_(container_lid)'], 'def': 'a top (as for a bottle)', 'name': 'bottle_cap'}, {'frequency': 'r', 'id': 209, 'synset': 'cape.n.02', 'synonyms': ['cape'], 'def': 'a sleeveless garment like a cloak but shorter', 'name': 'cape'}, {'frequency': 'c', 'id': 210, 'synset': 'cappuccino.n.01', 'synonyms': ['cappuccino', 'coffee_cappuccino'], 'def': 'equal parts of espresso and steamed milk', 'name': 'cappuccino'}, {'frequency': 'f', 'id': 211, 'synset': 'car.n.01', 'synonyms': ['car_(automobile)', 'auto_(automobile)', 'automobile'], 'def': 'a motor vehicle with four wheels', 'name': 'car_(automobile)'}, {'frequency': 'f', 'id': 212, 'synset': 'car.n.02', 'synonyms': ['railcar_(part_of_a_train)', 'railway_car_(part_of_a_train)', 'railroad_car_(part_of_a_train)'], 'def': 'a wheeled vehicle adapted to the rails of railroad', 'name': 'railcar_(part_of_a_train)'}, {'frequency': 'r', 'id': 213, 'synset': 'car.n.04', 'synonyms': ['elevator_car'], 'def': 'where passengers ride up and down', 'name': 'elevator_car'}, {'frequency': 'r', 'id': 214, 'synset': 'car_battery.n.01', 'synonyms': ['car_battery', 'automobile_battery'], 'def': 'a battery in a motor vehicle', 'name': 'car_battery'}, {'frequency': 'c', 'id': 215, 'synset': 'card.n.02', 'synonyms': ['identity_card'], 'def': 'a card certifying the identity of the bearer', 'name': 'identity_card'}, {'frequency': 'c', 'id': 216, 'synset': 'card.n.03', 'synonyms': ['card'], 'def': 'a rectangular piece of paper used to send messages (e.g. greetings or pictures)', 'name': 'card'}, {'frequency': 'r', 'id': 217, 'synset': 'cardigan.n.01', 'synonyms': ['cardigan'], 'def': 'knitted jacket that is fastened up the front with buttons or a zipper', 'name': 'cardigan'}, {'frequency': 'r', 'id': 218, 'synset': 'cargo_ship.n.01', 'synonyms': ['cargo_ship', 'cargo_vessel'], 'def': 'a ship designed to carry cargo', 'name': 'cargo_ship'}, {'frequency': 'r', 'id': 219, 'synset': 'carnation.n.01', 'synonyms': ['carnation'], 'def': 'plant with pink to purple-red spice-scented usually double flowers', 'name': 'carnation'}, {'frequency': 'c', 'id': 220, 'synset': 'carriage.n.02', 'synonyms': ['horse_carriage'], 'def': 'a vehicle with wheels drawn by one or more horses', 'name': 'horse_carriage'}, {'frequency': 'f', 'id': 221, 'synset': 'carrot.n.01', 'synonyms': ['carrot'], 'def': 'deep orange edible root of the cultivated carrot plant', 'name': 'carrot'}, {'frequency': 'c', 'id': 222, 'synset': 'carryall.n.01', 'synonyms': ['tote_bag'], 'def': 'a capacious bag or basket', 'name': 'tote_bag'}, {'frequency': 'c', 'id': 223, 'synset': 'cart.n.01', 'synonyms': ['cart'], 'def': 'a heavy open wagon usually having two wheels and drawn by an animal', 'name': 'cart'}, {'frequency': 'c', 'id': 224, 'synset': 'carton.n.02', 'synonyms': ['carton'], 'def': 'a box made of cardboard; opens by flaps on top', 'name': 'carton'}, {'frequency': 'c', 'id': 225, 'synset': 'cash_register.n.01', 'synonyms': ['cash_register', 'register_(for_cash_transactions)'], 'def': 'a cashbox with an adding machine to register transactions', 'name': 'cash_register'}, {'frequency': 'r', 'id': 226, 'synset': 'casserole.n.01', 'synonyms': ['casserole'], 'def': 'food cooked and served in a casserole', 'name': 'casserole'}, {'frequency': 'r', 'id': 227, 'synset': 'cassette.n.01', 'synonyms': ['cassette'], 'def': 'a container that holds a magnetic tape used for recording or playing sound or video', 'name': 'cassette'}, {'frequency': 'c', 'id': 228, 'synset': 'cast.n.05', 'synonyms': ['cast', 'plaster_cast', 'plaster_bandage'], 'def': 'bandage consisting of a firm covering that immobilizes broken bones while they heal', 'name': 'cast'}, {'frequency': 'f', 'id': 229, 'synset': 'cat.n.01', 'synonyms': ['cat'], 'def': 'a domestic house cat', 'name': 'cat'}, {'frequency': 'c', 'id': 230, 'synset': 'cauliflower.n.02', 'synonyms': ['cauliflower'], 'def': 'edible compact head of white undeveloped flowers', 'name': 'cauliflower'}, {'frequency': 'r', 'id': 231, 'synset': 'caviar.n.01', 'synonyms': ['caviar', 'caviare'], 'def': "salted roe of sturgeon or other large fish; usually served as an hors d'oeuvre", 'name': 'caviar'}, {'frequency': 'c', 'id': 232, 'synset': 'cayenne.n.02', 'synonyms': ['cayenne_(spice)', 'cayenne_pepper_(spice)', 'red_pepper_(spice)'], 'def': 'ground pods and seeds of pungent red peppers of the genus Capsicum', 'name': 'cayenne_(spice)'}, {'frequency': 'c', 'id': 233, 'synset': 'cd_player.n.01', 'synonyms': ['CD_player'], 'def': 'electronic equipment for playing compact discs (CDs)', 'name': 'CD_player'}, {'frequency': 'c', 'id': 234, 'synset': 'celery.n.01', 'synonyms': ['celery'], 'def': 'widely cultivated herb with aromatic leaf stalks that are eaten raw or cooked', 'name': 'celery'}, {'frequency': 'f', 'id': 235, 'synset': 'cellular_telephone.n.01', 'synonyms': ['cellular_telephone', 'cellular_phone', 'cellphone', 'mobile_phone', 'smart_phone'], 'def': 'a hand-held mobile telephone', 'name': 'cellular_telephone'}, {'frequency': 'r', 'id': 236, 'synset': 'chain_mail.n.01', 'synonyms': ['chain_mail', 'ring_mail', 'chain_armor', 'chain_armour', 'ring_armor', 'ring_armour'], 'def': '(Middle Ages) flexible armor made of interlinked metal rings', 'name': 'chain_mail'}, {'frequency': 'f', 'id': 237, 'synset': 'chair.n.01', 'synonyms': ['chair'], 'def': 'a seat for one person, with a support for the back', 'name': 'chair'}, {'frequency': 'r', 'id': 238, 'synset': 'chaise_longue.n.01', 'synonyms': ['chaise_longue', 'chaise', 'daybed'], 'def': 'a long chair; for reclining', 'name': 'chaise_longue'}, {'frequency': 'r', 'id': 239, 'synset': 'champagne.n.01', 'synonyms': ['champagne'], 'def': 'a white sparkling wine produced in Champagne or resembling that produced there', 'name': 'champagne'}, {'frequency': 'f', 'id': 240, 'synset': 'chandelier.n.01', 'synonyms': ['chandelier'], 'def': 'branched lighting fixture; often ornate; hangs from the ceiling', 'name': 'chandelier'}, {'frequency': 'r', 'id': 241, 'synset': 'chap.n.04', 'synonyms': ['chap'], 'def': 'leather leggings without a seat; worn over trousers by cowboys to protect their legs', 'name': 'chap'}, {'frequency': 'r', 'id': 242, 'synset': 'checkbook.n.01', 'synonyms': ['checkbook', 'chequebook'], 'def': 'a book issued to holders of checking accounts', 'name': 'checkbook'}, {'frequency': 'r', 'id': 243, 'synset': 'checkerboard.n.01', 'synonyms': ['checkerboard'], 'def': 'a board having 64 squares of two alternating colors', 'name': 'checkerboard'}, {'frequency': 'c', 'id': 244, 'synset': 'cherry.n.03', 'synonyms': ['cherry'], 'def': 'a red fruit with a single hard stone', 'name': 'cherry'}, {'frequency': 'r', 'id': 245, 'synset': 'chessboard.n.01', 'synonyms': ['chessboard'], 'def': 'a checkerboard used to play chess', 'name': 'chessboard'}, {'frequency': 'r', 'id': 246, 'synset': 'chest_of_drawers.n.01', 'synonyms': ['chest_of_drawers_(furniture)', 'bureau_(furniture)', 'chest_(furniture)'], 'def': 'furniture with drawers for keeping clothes', 'name': 'chest_of_drawers_(furniture)'}, {'frequency': 'c', 'id': 247, 'synset': 'chicken.n.02', 'synonyms': ['chicken_(animal)'], 'def': 'a domestic fowl bred for flesh or eggs', 'name': 'chicken_(animal)'}, {'frequency': 'c', 'id': 248, 'synset': 'chicken_wire.n.01', 'synonyms': ['chicken_wire'], 'def': 'a galvanized wire network with a hexagonal mesh; used to build fences', 'name': 'chicken_wire'}, {'frequency': 'r', 'id': 249, 'synset': 'chickpea.n.01', 'synonyms': ['chickpea', 'garbanzo'], 'def': 'the seed of the chickpea plant; usually dried', 'name': 'chickpea'}, {'frequency': 'r', 'id': 250, 'synset': 'chihuahua.n.03', 'synonyms': ['Chihuahua'], 'def': 'an old breed of tiny short-haired dog with protruding eyes from Mexico', 'name': 'Chihuahua'}, {'frequency': 'r', 'id': 251, 'synset': 'chili.n.02', 'synonyms': ['chili_(vegetable)', 'chili_pepper_(vegetable)', 'chilli_(vegetable)', 'chilly_(vegetable)', 'chile_(vegetable)'], 'def': 'very hot and finely tapering pepper of special pungency', 'name': 'chili_(vegetable)'}, {'frequency': 'r', 'id': 252, 'synset': 'chime.n.01', 'synonyms': ['chime', 'gong'], 'def': 'an instrument consisting of a set of bells that are struck with a hammer', 'name': 'chime'}, {'frequency': 'r', 'id': 253, 'synset': 'chinaware.n.01', 'synonyms': ['chinaware'], 'def': 'dishware made of high quality porcelain', 'name': 'chinaware'}, {'frequency': 'c', 'id': 254, 'synset': 'chip.n.04', 'synonyms': ['crisp_(potato_chip)', 'potato_chip'], 'def': 'a thin crisp slice of potato fried in deep fat', 'name': 'crisp_(potato_chip)'}, {'frequency': 'r', 'id': 255, 'synset': 'chip.n.06', 'synonyms': ['poker_chip'], 'def': 'a small disk-shaped counter used to represent money when gambling', 'name': 'poker_chip'}, {'frequency': 'c', 'id': 256, 'synset': 'chocolate_bar.n.01', 'synonyms': ['chocolate_bar'], 'def': 'a bar of chocolate candy', 'name': 'chocolate_bar'}, {'frequency': 'c', 'id': 257, 'synset': 'chocolate_cake.n.01', 'synonyms': ['chocolate_cake'], 'def': 'cake containing chocolate', 'name': 'chocolate_cake'}, {'frequency': 'r', 'id': 258, 'synset': 'chocolate_milk.n.01', 'synonyms': ['chocolate_milk'], 'def': 'milk flavored with chocolate syrup', 'name': 'chocolate_milk'}, {'frequency': 'r', 'id': 259, 'synset': 'chocolate_mousse.n.01', 'synonyms': ['chocolate_mousse'], 'def': 'dessert mousse made with chocolate', 'name': 'chocolate_mousse'}, {'frequency': 'f', 'id': 260, 'synset': 'choker.n.03', 'synonyms': ['choker', 'collar', 'neckband'], 'def': 'necklace that fits tightly around the neck', 'name': 'choker'}, {'frequency': 'f', 'id': 261, 'synset': 'chopping_board.n.01', 'synonyms': ['chopping_board', 'cutting_board', 'chopping_block'], 'def': 'a wooden board where meats or vegetables can be cut', 'name': 'chopping_board'}, {'frequency': 'c', 'id': 262, 'synset': 'chopstick.n.01', 'synonyms': ['chopstick'], 'def': 'one of a pair of slender sticks used as oriental tableware to eat food with', 'name': 'chopstick'}, {'frequency': 'f', 'id': 263, 'synset': 'christmas_tree.n.05', 'synonyms': ['Christmas_tree'], 'def': 'an ornamented evergreen used as a Christmas decoration', 'name': 'Christmas_tree'}, {'frequency': 'c', 'id': 264, 'synset': 'chute.n.02', 'synonyms': ['slide'], 'def': 'sloping channel through which things can descend', 'name': 'slide'}, {'frequency': 'r', 'id': 265, 'synset': 'cider.n.01', 'synonyms': ['cider', 'cyder'], 'def': 'a beverage made from juice pressed from apples', 'name': 'cider'}, {'frequency': 'r', 'id': 266, 'synset': 'cigar_box.n.01', 'synonyms': ['cigar_box'], 'def': 'a box for holding cigars', 'name': 'cigar_box'}, {'frequency': 'c', 'id': 267, 'synset': 'cigarette.n.01', 'synonyms': ['cigarette'], 'def': 'finely ground tobacco wrapped in paper; for smoking', 'name': 'cigarette'}, {'frequency': 'c', 'id': 268, 'synset': 'cigarette_case.n.01', 'synonyms': ['cigarette_case', 'cigarette_pack'], 'def': 'a small flat case for holding cigarettes', 'name': 'cigarette_case'}, {'frequency': 'f', 'id': 269, 'synset': 'cistern.n.02', 'synonyms': ['cistern', 'water_tank'], 'def': 'a tank that holds the water used to flush a toilet', 'name': 'cistern'}, {'frequency': 'r', 'id': 270, 'synset': 'clarinet.n.01', 'synonyms': ['clarinet'], 'def': 'a single-reed instrument with a straight tube', 'name': 'clarinet'}, {'frequency': 'r', 'id': 271, 'synset': 'clasp.n.01', 'synonyms': ['clasp'], 'def': 'a fastener (as a buckle or hook) that is used to hold two things together', 'name': 'clasp'}, {'frequency': 'c', 'id': 272, 'synset': 'cleansing_agent.n.01', 'synonyms': ['cleansing_agent', 'cleanser', 'cleaner'], 'def': 'a preparation used in cleaning something', 'name': 'cleansing_agent'}, {'frequency': 'r', 'id': 273, 'synset': 'clementine.n.01', 'synonyms': ['clementine'], 'def': 'a variety of mandarin orange', 'name': 'clementine'}, {'frequency': 'c', 'id': 274, 'synset': 'clip.n.03', 'synonyms': ['clip'], 'def': 'any of various small fasteners used to hold loose articles together', 'name': 'clip'}, {'frequency': 'c', 'id': 275, 'synset': 'clipboard.n.01', 'synonyms': ['clipboard'], 'def': 'a small writing board with a clip at the top for holding papers', 'name': 'clipboard'}, {'frequency': 'f', 'id': 276, 'synset': 'clock.n.01', 'synonyms': ['clock', 'timepiece', 'timekeeper'], 'def': 'a timepiece that shows the time of day', 'name': 'clock'}, {'frequency': 'f', 'id': 277, 'synset': 'clock_tower.n.01', 'synonyms': ['clock_tower'], 'def': 'a tower with a large clock visible high up on an outside face', 'name': 'clock_tower'}, {'frequency': 'c', 'id': 278, 'synset': 'clothes_hamper.n.01', 'synonyms': ['clothes_hamper', 'laundry_basket', 'clothes_basket'], 'def': 'a hamper that holds dirty clothes to be washed or wet clothes to be dried', 'name': 'clothes_hamper'}, {'frequency': 'c', 'id': 279, 'synset': 'clothespin.n.01', 'synonyms': ['clothespin', 'clothes_peg'], 'def': 'wood or plastic fastener; for holding clothes on a clothesline', 'name': 'clothespin'}, {'frequency': 'r', 'id': 280, 'synset': 'clutch_bag.n.01', 'synonyms': ['clutch_bag'], 'def': "a woman's strapless purse that is carried in the hand", 'name': 'clutch_bag'}, {'frequency': 'f', 'id': 281, 'synset': 'coaster.n.03', 'synonyms': ['coaster'], 'def': 'a covering (plate or mat) that protects the surface of a table', 'name': 'coaster'}, {'frequency': 'f', 'id': 282, 'synset': 'coat.n.01', 'synonyms': ['coat'], 'def': 'an outer garment that has sleeves and covers the body from shoulder down', 'name': 'coat'}, {'frequency': 'c', 'id': 283, 'synset': 'coat_hanger.n.01', 'synonyms': ['coat_hanger', 'clothes_hanger', 'dress_hanger'], 'def': "a hanger that is shaped like a person's shoulders", 'name': 'coat_hanger'}, {'frequency': 'r', 'id': 284, 'synset': 'coatrack.n.01', 'synonyms': ['coatrack', 'hatrack'], 'def': 'a rack with hooks for temporarily holding coats and hats', 'name': 'coatrack'}, {'frequency': 'c', 'id': 285, 'synset': 'cock.n.04', 'synonyms': ['cock', 'rooster'], 'def': 'adult male chicken', 'name': 'cock'}, {'frequency': 'c', 'id': 286, 'synset': 'coconut.n.02', 'synonyms': ['coconut', 'cocoanut'], 'def': 'large hard-shelled brown oval nut with a fibrous husk', 'name': 'coconut'}, {'frequency': 'r', 'id': 287, 'synset': 'coffee_filter.n.01', 'synonyms': ['coffee_filter'], 'def': 'filter (usually of paper) that passes the coffee and retains the coffee grounds', 'name': 'coffee_filter'}, {'frequency': 'f', 'id': 288, 'synset': 'coffee_maker.n.01', 'synonyms': ['coffee_maker', 'coffee_machine'], 'def': 'a kitchen appliance for brewing coffee automatically', 'name': 'coffee_maker'}, {'frequency': 'f', 'id': 289, 'synset': 'coffee_table.n.01', 'synonyms': ['coffee_table', 'cocktail_table'], 'def': 'low table where magazines can be placed and coffee or cocktails are served', 'name': 'coffee_table'}, {'frequency': 'c', 'id': 290, 'synset': 'coffeepot.n.01', 'synonyms': ['coffeepot'], 'def': 'tall pot in which coffee is brewed', 'name': 'coffeepot'}, {'frequency': 'r', 'id': 291, 'synset': 'coil.n.05', 'synonyms': ['coil'], 'def': 'tubing that is wound in a spiral', 'name': 'coil'}, {'frequency': 'c', 'id': 292, 'synset': 'coin.n.01', 'synonyms': ['coin'], 'def': 'a flat metal piece (usually a disc) used as money', 'name': 'coin'}, {'frequency': 'r', 'id': 293, 'synset': 'colander.n.01', 'synonyms': ['colander', 'cullender'], 'def': 'bowl-shaped strainer; used to wash or drain foods', 'name': 'colander'}, {'frequency': 'c', 'id': 294, 'synset': 'coleslaw.n.01', 'synonyms': ['coleslaw', 'slaw'], 'def': 'basically shredded cabbage', 'name': 'coleslaw'}, {'frequency': 'r', 'id': 295, 'synset': 'coloring_material.n.01', 'synonyms': ['coloring_material', 'colouring_material'], 'def': 'any material used for its color', 'name': 'coloring_material'}, {'frequency': 'r', 'id': 296, 'synset': 'combination_lock.n.01', 'synonyms': ['combination_lock'], 'def': 'lock that can be opened only by turning dials in a special sequence', 'name': 'combination_lock'}, {'frequency': 'c', 'id': 297, 'synset': 'comforter.n.04', 'synonyms': ['pacifier', 'teething_ring'], 'def': 'device used for an infant to suck or bite on', 'name': 'pacifier'}, {'frequency': 'r', 'id': 298, 'synset': 'comic_book.n.01', 'synonyms': ['comic_book'], 'def': 'a magazine devoted to comic strips', 'name': 'comic_book'}, {'frequency': 'f', 'id': 299, 'synset': 'computer_keyboard.n.01', 'synonyms': ['computer_keyboard', 'keyboard_(computer)'], 'def': 'a keyboard that is a data input device for computers', 'name': 'computer_keyboard'}, {'frequency': 'r', 'id': 300, 'synset': 'concrete_mixer.n.01', 'synonyms': ['concrete_mixer', 'cement_mixer'], 'def': 'a machine with a large revolving drum in which cement/concrete is mixed', 'name': 'concrete_mixer'}, {'frequency': 'f', 'id': 301, 'synset': 'cone.n.01', 'synonyms': ['cone', 'traffic_cone'], 'def': 'a cone-shaped object used to direct traffic', 'name': 'cone'}, {'frequency': 'f', 'id': 302, 'synset': 'control.n.09', 'synonyms': ['control', 'controller'], 'def': 'a mechanism that controls the operation of a machine', 'name': 'control'}, {'frequency': 'r', 'id': 303, 'synset': 'convertible.n.01', 'synonyms': ['convertible_(automobile)'], 'def': 'a car that has top that can be folded or removed', 'name': 'convertible_(automobile)'}, {'frequency': 'r', 'id': 304, 'synset': 'convertible.n.03', 'synonyms': ['sofa_bed'], 'def': 'a sofa that can be converted into a bed', 'name': 'sofa_bed'}, {'frequency': 'c', 'id': 305, 'synset': 'cookie.n.01', 'synonyms': ['cookie', 'cooky', 'biscuit_(cookie)'], 'def': "any of various small flat sweet cakes (`biscuit' is the British term)", 'name': 'cookie'}, {'frequency': 'r', 'id': 306, 'synset': 'cookie_jar.n.01', 'synonyms': ['cookie_jar', 'cooky_jar'], 'def': 'a jar in which cookies are kept (and sometimes money is hidden)', 'name': 'cookie_jar'}, {'frequency': 'r', 'id': 307, 'synset': 'cooking_utensil.n.01', 'synonyms': ['cooking_utensil'], 'def': 'a kitchen utensil made of material that does not melt easily; used for cooking', 'name': 'cooking_utensil'}, {'frequency': 'f', 'id': 308, 'synset': 'cooler.n.01', 'synonyms': ['cooler_(for_food)', 'ice_chest'], 'def': 'an insulated box for storing food often with ice', 'name': 'cooler_(for_food)'}, {'frequency': 'c', 'id': 309, 'synset': 'cork.n.04', 'synonyms': ['cork_(bottle_plug)', 'bottle_cork'], 'def': 'the plug in the mouth of a bottle (especially a wine bottle)', 'name': 'cork_(bottle_plug)'}, {'frequency': 'r', 'id': 310, 'synset': 'corkboard.n.01', 'synonyms': ['corkboard'], 'def': 'a sheet consisting of cork granules', 'name': 'corkboard'}, {'frequency': 'r', 'id': 311, 'synset': 'corkscrew.n.01', 'synonyms': ['corkscrew', 'bottle_screw'], 'def': 'a bottle opener that pulls corks', 'name': 'corkscrew'}, {'frequency': 'c', 'id': 312, 'synset': 'corn.n.03', 'synonyms': ['edible_corn', 'corn', 'maize'], 'def': 'ears of corn that can be prepared and served for human food', 'name': 'edible_corn'}, {'frequency': 'r', 'id': 313, 'synset': 'cornbread.n.01', 'synonyms': ['cornbread'], 'def': 'bread made primarily of cornmeal', 'name': 'cornbread'}, {'frequency': 'c', 'id': 314, 'synset': 'cornet.n.01', 'synonyms': ['cornet', 'horn', 'trumpet'], 'def': 'a brass musical instrument with a narrow tube and a flared bell and many valves', 'name': 'cornet'}, {'frequency': 'c', 'id': 315, 'synset': 'cornice.n.01', 'synonyms': ['cornice', 'valance', 'valance_board', 'pelmet'], 'def': 'a decorative framework to conceal curtain fixtures at the top of a window casing', 'name': 'cornice'}, {'frequency': 'r', 'id': 316, 'synset': 'cornmeal.n.01', 'synonyms': ['cornmeal'], 'def': 'coarsely ground corn', 'name': 'cornmeal'}, {'frequency': 'r', 'id': 317, 'synset': 'corset.n.01', 'synonyms': ['corset', 'girdle'], 'def': "a woman's close-fitting foundation garment", 'name': 'corset'}, {'frequency': 'r', 'id': 318, 'synset': 'cos.n.02', 'synonyms': ['romaine_lettuce'], 'def': 'lettuce with long dark-green leaves in a loosely packed elongated head', 'name': 'romaine_lettuce'}, {'frequency': 'c', 'id': 319, 'synset': 'costume.n.04', 'synonyms': ['costume'], 'def': 'the attire characteristic of a country or a time or a social class', 'name': 'costume'}, {'frequency': 'r', 'id': 320, 'synset': 'cougar.n.01', 'synonyms': ['cougar', 'puma', 'catamount', 'mountain_lion', 'panther'], 'def': 'large American feline resembling a lion', 'name': 'cougar'}, {'frequency': 'r', 'id': 321, 'synset': 'coverall.n.01', 'synonyms': ['coverall'], 'def': 'a loose-fitting protective garment that is worn over other clothing', 'name': 'coverall'}, {'frequency': 'r', 'id': 322, 'synset': 'cowbell.n.01', 'synonyms': ['cowbell'], 'def': 'a bell hung around the neck of cow so that the cow can be easily located', 'name': 'cowbell'}, {'frequency': 'f', 'id': 323, 'synset': 'cowboy_hat.n.01', 'synonyms': ['cowboy_hat', 'ten-gallon_hat'], 'def': 'a hat with a wide brim and a soft crown; worn by American ranch hands', 'name': 'cowboy_hat'}, {'frequency': 'r', 'id': 324, 'synset': 'crab.n.01', 'synonyms': ['crab_(animal)'], 'def': 'decapod having eyes on short stalks and a broad flattened shell and pincers', 'name': 'crab_(animal)'}, {'frequency': 'c', 'id': 325, 'synset': 'cracker.n.01', 'synonyms': ['cracker'], 'def': 'a thin crisp wafer', 'name': 'cracker'}, {'frequency': 'r', 'id': 326, 'synset': 'crape.n.01', 'synonyms': ['crape', 'crepe', 'French_pancake'], 'def': 'small very thin pancake', 'name': 'crape'}, {'frequency': 'f', 'id': 327, 'synset': 'crate.n.01', 'synonyms': ['crate'], 'def': 'a rugged box (usually made of wood); used for shipping', 'name': 'crate'}, {'frequency': 'r', 'id': 328, 'synset': 'crayon.n.01', 'synonyms': ['crayon', 'wax_crayon'], 'def': 'writing or drawing implement made of a colored stick of composition wax', 'name': 'crayon'}, {'frequency': 'r', 'id': 329, 'synset': 'cream_pitcher.n.01', 'synonyms': ['cream_pitcher'], 'def': 'a small pitcher for serving cream', 'name': 'cream_pitcher'}, {'frequency': 'r', 'id': 330, 'synset': 'credit_card.n.01', 'synonyms': ['credit_card', 'charge_card', 'debit_card'], 'def': 'a card, usually plastic, used to pay for goods and services', 'name': 'credit_card'}, {'frequency': 'c', 'id': 331, 'synset': 'crescent_roll.n.01', 'synonyms': ['crescent_roll', 'croissant'], 'def': 'very rich flaky crescent-shaped roll', 'name': 'crescent_roll'}, {'frequency': 'c', 'id': 332, 'synset': 'crib.n.01', 'synonyms': ['crib', 'cot'], 'def': 'baby bed with high sides made of slats', 'name': 'crib'}, {'frequency': 'c', 'id': 333, 'synset': 'crock.n.03', 'synonyms': ['crock_pot', 'earthenware_jar'], 'def': 'an earthen jar (made of baked clay)', 'name': 'crock_pot'}, {'frequency': 'f', 'id': 334, 'synset': 'crossbar.n.01', 'synonyms': ['crossbar'], 'def': 'a horizontal bar that goes across something', 'name': 'crossbar'}, {'frequency': 'r', 'id': 335, 'synset': 'crouton.n.01', 'synonyms': ['crouton'], 'def': 'a small piece of toasted or fried bread; served in soup or salads', 'name': 'crouton'}, {'frequency': 'r', 'id': 336, 'synset': 'crow.n.01', 'synonyms': ['crow'], 'def': 'black birds having a raucous call', 'name': 'crow'}, {'frequency': 'c', 'id': 337, 'synset': 'crown.n.04', 'synonyms': ['crown'], 'def': 'an ornamental jeweled headdress signifying sovereignty', 'name': 'crown'}, {'frequency': 'c', 'id': 338, 'synset': 'crucifix.n.01', 'synonyms': ['crucifix'], 'def': 'representation of the cross on which Jesus died', 'name': 'crucifix'}, {'frequency': 'c', 'id': 339, 'synset': 'cruise_ship.n.01', 'synonyms': ['cruise_ship', 'cruise_liner'], 'def': 'a passenger ship used commercially for pleasure cruises', 'name': 'cruise_ship'}, {'frequency': 'c', 'id': 340, 'synset': 'cruiser.n.01', 'synonyms': ['police_cruiser', 'patrol_car', 'police_car', 'squad_car'], 'def': 'a car in which policemen cruise the streets', 'name': 'police_cruiser'}, {'frequency': 'c', 'id': 341, 'synset': 'crumb.n.03', 'synonyms': ['crumb'], 'def': 'small piece of e.g. bread or cake', 'name': 'crumb'}, {'frequency': 'r', 'id': 342, 'synset': 'crutch.n.01', 'synonyms': ['crutch'], 'def': 'a wooden or metal staff that fits under the armpit and reaches to the ground', 'name': 'crutch'}, {'frequency': 'c', 'id': 343, 'synset': 'cub.n.03', 'synonyms': ['cub_(animal)'], 'def': 'the young of certain carnivorous mammals such as the bear or wolf or lion', 'name': 'cub_(animal)'}, {'frequency': 'r', 'id': 344, 'synset': 'cube.n.05', 'synonyms': ['cube', 'square_block'], 'def': 'a block in the (approximate) shape of a cube', 'name': 'cube'}, {'frequency': 'f', 'id': 345, 'synset': 'cucumber.n.02', 'synonyms': ['cucumber', 'cuke'], 'def': 'cylindrical green fruit with thin green rind and white flesh eaten as a vegetable', 'name': 'cucumber'}, {'frequency': 'c', 'id': 346, 'synset': 'cufflink.n.01', 'synonyms': ['cufflink'], 'def': 'jewelry consisting of linked buttons used to fasten the cuffs of a shirt', 'name': 'cufflink'}, {'frequency': 'f', 'id': 347, 'synset': 'cup.n.01', 'synonyms': ['cup'], 'def': 'a small open container usually used for drinking; usually has a handle', 'name': 'cup'}, {'frequency': 'c', 'id': 348, 'synset': 'cup.n.08', 'synonyms': ['trophy_cup'], 'def': 'a metal vessel with handles that is awarded as a trophy to a competition winner', 'name': 'trophy_cup'}, {'frequency': 'c', 'id': 349, 'synset': 'cupcake.n.01', 'synonyms': ['cupcake'], 'def': 'small cake baked in a muffin tin', 'name': 'cupcake'}, {'frequency': 'r', 'id': 350, 'synset': 'curler.n.01', 'synonyms': ['hair_curler', 'hair_roller', 'hair_crimper'], 'def': 'a cylindrical tube around which the hair is wound to curl it', 'name': 'hair_curler'}, {'frequency': 'r', 'id': 351, 'synset': 'curling_iron.n.01', 'synonyms': ['curling_iron'], 'def': 'a cylindrical home appliance that heats hair that has been curled around it', 'name': 'curling_iron'}, {'frequency': 'f', 'id': 352, 'synset': 'curtain.n.01', 'synonyms': ['curtain', 'drapery'], 'def': 'hanging cloth used as a blind (especially for a window)', 'name': 'curtain'}, {'frequency': 'f', 'id': 353, 'synset': 'cushion.n.03', 'synonyms': ['cushion'], 'def': 'a soft bag filled with air or padding such as feathers or foam rubber', 'name': 'cushion'}, {'frequency': 'r', 'id': 354, 'synset': 'custard.n.01', 'synonyms': ['custard'], 'def': 'sweetened mixture of milk and eggs baked or boiled or frozen', 'name': 'custard'}, {'frequency': 'c', 'id': 355, 'synset': 'cutter.n.06', 'synonyms': ['cutting_tool'], 'def': 'a cutting implement; a tool for cutting', 'name': 'cutting_tool'}, {'frequency': 'r', 'id': 356, 'synset': 'cylinder.n.04', 'synonyms': ['cylinder'], 'def': 'a cylindrical container', 'name': 'cylinder'}, {'frequency': 'r', 'id': 357, 'synset': 'cymbal.n.01', 'synonyms': ['cymbal'], 'def': 'a percussion instrument consisting of a concave brass disk', 'name': 'cymbal'}, {'frequency': 'r', 'id': 358, 'synset': 'dachshund.n.01', 'synonyms': ['dachshund', 'dachsie', 'badger_dog'], 'def': 'small long-bodied short-legged breed of dog having a short sleek coat and long drooping ears', 'name': 'dachshund'}, {'frequency': 'r', 'id': 359, 'synset': 'dagger.n.01', 'synonyms': ['dagger'], 'def': 'a short knife with a pointed blade used for piercing or stabbing', 'name': 'dagger'}, {'frequency': 'r', 'id': 360, 'synset': 'dartboard.n.01', 'synonyms': ['dartboard'], 'def': 'a circular board of wood or cork used as the target in the game of darts', 'name': 'dartboard'}, {'frequency': 'r', 'id': 361, 'synset': 'date.n.08', 'synonyms': ['date_(fruit)'], 'def': 'sweet edible fruit of the date palm with a single long woody seed', 'name': 'date_(fruit)'}, {'frequency': 'f', 'id': 362, 'synset': 'deck_chair.n.01', 'synonyms': ['deck_chair', 'beach_chair'], 'def': 'a folding chair for use outdoors; a wooden frame supports a length of canvas', 'name': 'deck_chair'}, {'frequency': 'c', 'id': 363, 'synset': 'deer.n.01', 'synonyms': ['deer', 'cervid'], 'def': "distinguished from Bovidae by the male's having solid deciduous antlers", 'name': 'deer'}, {'frequency': 'c', 'id': 364, 'synset': 'dental_floss.n.01', 'synonyms': ['dental_floss', 'floss'], 'def': 'a soft thread for cleaning the spaces between the teeth', 'name': 'dental_floss'}, {'frequency': 'f', 'id': 365, 'synset': 'desk.n.01', 'synonyms': ['desk'], 'def': 'a piece of furniture with a writing surface and usually drawers or other compartments', 'name': 'desk'}, {'frequency': 'r', 'id': 366, 'synset': 'detergent.n.01', 'synonyms': ['detergent'], 'def': 'a surface-active chemical widely used in industry and laundering', 'name': 'detergent'}, {'frequency': 'c', 'id': 367, 'synset': 'diaper.n.01', 'synonyms': ['diaper'], 'def': 'garment consisting of a folded cloth drawn up between the legs and fastened at the waist', 'name': 'diaper'}, {'frequency': 'r', 'id': 368, 'synset': 'diary.n.01', 'synonyms': ['diary', 'journal'], 'def': 'a daily written record of (usually personal) experiences and observations', 'name': 'diary'}, {'frequency': 'r', 'id': 369, 'synset': 'die.n.01', 'synonyms': ['die', 'dice'], 'def': 'a small cube with 1 to 6 spots on the six faces; used in gambling', 'name': 'die'}, {'frequency': 'r', 'id': 370, 'synset': 'dinghy.n.01', 'synonyms': ['dinghy', 'dory', 'rowboat'], 'def': 'a small boat of shallow draft with seats and oars with which it is propelled', 'name': 'dinghy'}, {'frequency': 'f', 'id': 371, 'synset': 'dining_table.n.01', 'synonyms': ['dining_table'], 'def': 'a table at which meals are served', 'name': 'dining_table'}, {'frequency': 'r', 'id': 372, 'synset': 'dinner_jacket.n.01', 'synonyms': ['tux', 'tuxedo'], 'def': 'semiformal evening dress for men', 'name': 'tux'}, {'frequency': 'c', 'id': 373, 'synset': 'dish.n.01', 'synonyms': ['dish'], 'def': 'a piece of dishware normally used as a container for holding or serving food', 'name': 'dish'}, {'frequency': 'c', 'id': 374, 'synset': 'dish.n.05', 'synonyms': ['dish_antenna'], 'def': 'directional antenna consisting of a parabolic reflector', 'name': 'dish_antenna'}, {'frequency': 'c', 'id': 375, 'synset': 'dishrag.n.01', 'synonyms': ['dishrag', 'dishcloth'], 'def': 'a cloth for washing dishes', 'name': 'dishrag'}, {'frequency': 'c', 'id': 376, 'synset': 'dishtowel.n.01', 'synonyms': ['dishtowel', 'tea_towel'], 'def': 'a towel for drying dishes', 'name': 'dishtowel'}, {'frequency': 'f', 'id': 377, 'synset': 'dishwasher.n.01', 'synonyms': ['dishwasher', 'dishwashing_machine'], 'def': 'a machine for washing dishes', 'name': 'dishwasher'}, {'frequency': 'r', 'id': 378, 'synset': 'dishwasher_detergent.n.01', 'synonyms': ['dishwasher_detergent', 'dishwashing_detergent', 'dishwashing_liquid'], 'def': 'a low-sudsing detergent designed for use in dishwashers', 'name': 'dishwasher_detergent'}, {'frequency': 'r', 'id': 379, 'synset': 'diskette.n.01', 'synonyms': ['diskette', 'floppy', 'floppy_disk'], 'def': 'a small plastic magnetic disk enclosed in a stiff envelope used to store data', 'name': 'diskette'}, {'frequency': 'c', 'id': 380, 'synset': 'dispenser.n.01', 'synonyms': ['dispenser'], 'def': 'a container so designed that the contents can be used in prescribed amounts', 'name': 'dispenser'}, {'frequency': 'c', 'id': 381, 'synset': 'dixie_cup.n.01', 'synonyms': ['Dixie_cup', 'paper_cup'], 'def': 'a disposable cup made of paper; for holding drinks', 'name': 'Dixie_cup'}, {'frequency': 'f', 'id': 382, 'synset': 'dog.n.01', 'synonyms': ['dog'], 'def': 'a common domesticated dog', 'name': 'dog'}, {'frequency': 'f', 'id': 383, 'synset': 'dog_collar.n.01', 'synonyms': ['dog_collar'], 'def': 'a collar for a dog', 'name': 'dog_collar'}, {'frequency': 'c', 'id': 384, 'synset': 'doll.n.01', 'synonyms': ['doll'], 'def': 'a toy replica of a HUMAN (NOT AN ANIMAL)', 'name': 'doll'}, {'frequency': 'r', 'id': 385, 'synset': 'dollar.n.02', 'synonyms': ['dollar', 'dollar_bill', 'one_dollar_bill'], 'def': 'a piece of paper money worth one dollar', 'name': 'dollar'}, {'frequency': 'r', 'id': 386, 'synset': 'dolphin.n.02', 'synonyms': ['dolphin'], 'def': 'any of various small toothed whales with a beaklike snout; larger than porpoises', 'name': 'dolphin'}, {'frequency': 'c', 'id': 387, 'synset': 'domestic_ass.n.01', 'synonyms': ['domestic_ass', 'donkey'], 'def': 'domestic beast of burden descended from the African wild ass; patient but stubborn', 'name': 'domestic_ass'}, {'frequency': 'r', 'id': 388, 'synset': 'domino.n.03', 'synonyms': ['eye_mask'], 'def': 'a mask covering the upper part of the face but with holes for the eyes', 'name': 'eye_mask'}, {'frequency': 'r', 'id': 389, 'synset': 'doorbell.n.01', 'synonyms': ['doorbell', 'buzzer'], 'def': 'a button at an outer door that gives a ringing or buzzing signal when pushed', 'name': 'doorbell'}, {'frequency': 'f', 'id': 390, 'synset': 'doorknob.n.01', 'synonyms': ['doorknob', 'doorhandle'], 'def': "a knob used to open a door (often called `doorhandle' in Great Britain)", 'name': 'doorknob'}, {'frequency': 'c', 'id': 391, 'synset': 'doormat.n.02', 'synonyms': ['doormat', 'welcome_mat'], 'def': 'a mat placed outside an exterior door for wiping the shoes before entering', 'name': 'doormat'}, {'frequency': 'f', 'id': 392, 'synset': 'doughnut.n.02', 'synonyms': ['doughnut', 'donut'], 'def': 'a small ring-shaped friedcake', 'name': 'doughnut'}, {'frequency': 'r', 'id': 393, 'synset': 'dove.n.01', 'synonyms': ['dove'], 'def': 'any of numerous small pigeons', 'name': 'dove'}, {'frequency': 'r', 'id': 394, 'synset': 'dragonfly.n.01', 'synonyms': ['dragonfly'], 'def': 'slender-bodied non-stinging insect having iridescent wings that are outspread at rest', 'name': 'dragonfly'}, {'frequency': 'f', 'id': 395, 'synset': 'drawer.n.01', 'synonyms': ['drawer'], 'def': 'a boxlike container in a piece of furniture; made so as to slide in and out', 'name': 'drawer'}, {'frequency': 'c', 'id': 396, 'synset': 'drawers.n.01', 'synonyms': ['underdrawers', 'boxers', 'boxershorts'], 'def': 'underpants worn by men', 'name': 'underdrawers'}, {'frequency': 'f', 'id': 397, 'synset': 'dress.n.01', 'synonyms': ['dress', 'frock'], 'def': 'a one-piece garment for a woman; has skirt and bodice', 'name': 'dress'}, {'frequency': 'c', 'id': 398, 'synset': 'dress_hat.n.01', 'synonyms': ['dress_hat', 'high_hat', 'opera_hat', 'silk_hat', 'top_hat'], 'def': "a man's hat with a tall crown; usually covered with silk or with beaver fur", 'name': 'dress_hat'}, {'frequency': 'c', 'id': 399, 'synset': 'dress_suit.n.01', 'synonyms': ['dress_suit'], 'def': 'formalwear consisting of full evening dress for men', 'name': 'dress_suit'}, {'frequency': 'c', 'id': 400, 'synset': 'dresser.n.05', 'synonyms': ['dresser'], 'def': 'a cabinet with shelves', 'name': 'dresser'}, {'frequency': 'c', 'id': 401, 'synset': 'drill.n.01', 'synonyms': ['drill'], 'def': 'a tool with a sharp rotating point for making holes in hard materials', 'name': 'drill'}, {'frequency': 'r', 'id': 402, 'synset': 'drinking_fountain.n.01', 'synonyms': ['drinking_fountain'], 'def': 'a public fountain to provide a jet of drinking water', 'name': 'drinking_fountain'}, {'frequency': 'r', 'id': 403, 'synset': 'drone.n.04', 'synonyms': ['drone'], 'def': 'an aircraft without a pilot that is operated by remote control', 'name': 'drone'}, {'frequency': 'r', 'id': 404, 'synset': 'dropper.n.01', 'synonyms': ['dropper', 'eye_dropper'], 'def': 'pipet consisting of a small tube with a vacuum bulb at one end for drawing liquid in and releasing it a drop at a time', 'name': 'dropper'}, {'frequency': 'c', 'id': 405, 'synset': 'drum.n.01', 'synonyms': ['drum_(musical_instrument)'], 'def': 'a musical percussion instrument; usually consists of a hollow cylinder with a membrane stretched across each end', 'name': 'drum_(musical_instrument)'}, {'frequency': 'r', 'id': 406, 'synset': 'drumstick.n.02', 'synonyms': ['drumstick'], 'def': 'a stick used for playing a drum', 'name': 'drumstick'}, {'frequency': 'f', 'id': 407, 'synset': 'duck.n.01', 'synonyms': ['duck'], 'def': 'small web-footed broad-billed swimming bird', 'name': 'duck'}, {'frequency': 'r', 'id': 408, 'synset': 'duckling.n.02', 'synonyms': ['duckling'], 'def': 'young duck', 'name': 'duckling'}, {'frequency': 'c', 'id': 409, 'synset': 'duct_tape.n.01', 'synonyms': ['duct_tape'], 'def': 'a wide silvery adhesive tape', 'name': 'duct_tape'}, {'frequency': 'f', 'id': 410, 'synset': 'duffel_bag.n.01', 'synonyms': ['duffel_bag', 'duffle_bag', 'duffel', 'duffle'], 'def': 'a large cylindrical bag of heavy cloth', 'name': 'duffel_bag'}, {'frequency': 'r', 'id': 411, 'synset': 'dumbbell.n.01', 'synonyms': ['dumbbell'], 'def': 'an exercising weight with two ball-like ends connected by a short handle', 'name': 'dumbbell'}, {'frequency': 'c', 'id': 412, 'synset': 'dumpster.n.01', 'synonyms': ['dumpster'], 'def': 'a container designed to receive and transport and dump waste', 'name': 'dumpster'}, {'frequency': 'r', 'id': 413, 'synset': 'dustpan.n.02', 'synonyms': ['dustpan'], 'def': 'a short-handled receptacle into which dust can be swept', 'name': 'dustpan'}, {'frequency': 'r', 'id': 414, 'synset': 'dutch_oven.n.02', 'synonyms': ['Dutch_oven'], 'def': 'iron or earthenware cooking pot; used for stews', 'name': 'Dutch_oven'}, {'frequency': 'c', 'id': 415, 'synset': 'eagle.n.01', 'synonyms': ['eagle'], 'def': 'large birds of prey noted for their broad wings and strong soaring flight', 'name': 'eagle'}, {'frequency': 'f', 'id': 416, 'synset': 'earphone.n.01', 'synonyms': ['earphone', 'earpiece', 'headphone'], 'def': 'device for listening to audio that is held over or inserted into the ear', 'name': 'earphone'}, {'frequency': 'r', 'id': 417, 'synset': 'earplug.n.01', 'synonyms': ['earplug'], 'def': 'a soft plug that is inserted into the ear canal to block sound', 'name': 'earplug'}, {'frequency': 'f', 'id': 418, 'synset': 'earring.n.01', 'synonyms': ['earring'], 'def': 'jewelry to ornament the ear', 'name': 'earring'}, {'frequency': 'c', 'id': 419, 'synset': 'easel.n.01', 'synonyms': ['easel'], 'def': "an upright tripod for displaying something (usually an artist's canvas)", 'name': 'easel'}, {'frequency': 'r', 'id': 420, 'synset': 'eclair.n.01', 'synonyms': ['eclair'], 'def': 'oblong cream puff', 'name': 'eclair'}, {'frequency': 'r', 'id': 421, 'synset': 'eel.n.01', 'synonyms': ['eel'], 'def': 'an elongate fish with fatty flesh', 'name': 'eel'}, {'frequency': 'f', 'id': 422, 'synset': 'egg.n.02', 'synonyms': ['egg', 'eggs'], 'def': 'oval reproductive body of a fowl (especially a hen) used as food', 'name': 'egg'}, {'frequency': 'r', 'id': 423, 'synset': 'egg_roll.n.01', 'synonyms': ['egg_roll', 'spring_roll'], 'def': 'minced vegetables and meat wrapped in a pancake and fried', 'name': 'egg_roll'}, {'frequency': 'c', 'id': 424, 'synset': 'egg_yolk.n.01', 'synonyms': ['egg_yolk', 'yolk_(egg)'], 'def': 'the yellow spherical part of an egg', 'name': 'egg_yolk'}, {'frequency': 'c', 'id': 425, 'synset': 'eggbeater.n.02', 'synonyms': ['eggbeater', 'eggwhisk'], 'def': 'a mixer for beating eggs or whipping cream', 'name': 'eggbeater'}, {'frequency': 'c', 'id': 426, 'synset': 'eggplant.n.01', 'synonyms': ['eggplant', 'aubergine'], 'def': 'egg-shaped vegetable having a shiny skin typically dark purple', 'name': 'eggplant'}, {'frequency': 'r', 'id': 427, 'synset': 'electric_chair.n.01', 'synonyms': ['electric_chair'], 'def': 'a chair-shaped instrument of execution by electrocution', 'name': 'electric_chair'}, {'frequency': 'f', 'id': 428, 'synset': 'electric_refrigerator.n.01', 'synonyms': ['refrigerator'], 'def': 'a refrigerator in which the coolant is pumped around by an electric motor', 'name': 'refrigerator'}, {'frequency': 'f', 'id': 429, 'synset': 'elephant.n.01', 'synonyms': ['elephant'], 'def': 'a common elephant', 'name': 'elephant'}, {'frequency': 'r', 'id': 430, 'synset': 'elk.n.01', 'synonyms': ['elk', 'moose'], 'def': 'large northern deer with enormous flattened antlers in the male', 'name': 'elk'}, {'frequency': 'c', 'id': 431, 'synset': 'envelope.n.01', 'synonyms': ['envelope'], 'def': 'a flat (usually rectangular) container for a letter, thin package, etc.', 'name': 'envelope'}, {'frequency': 'c', 'id': 432, 'synset': 'eraser.n.01', 'synonyms': ['eraser'], 'def': 'an implement used to erase something', 'name': 'eraser'}, {'frequency': 'r', 'id': 433, 'synset': 'escargot.n.01', 'synonyms': ['escargot'], 'def': 'edible snail usually served in the shell with a sauce of melted butter and garlic', 'name': 'escargot'}, {'frequency': 'r', 'id': 434, 'synset': 'eyepatch.n.01', 'synonyms': ['eyepatch'], 'def': 'a protective cloth covering for an injured eye', 'name': 'eyepatch'}, {'frequency': 'r', 'id': 435, 'synset': 'falcon.n.01', 'synonyms': ['falcon'], 'def': 'birds of prey having long pointed powerful wings adapted for swift flight', 'name': 'falcon'}, {'frequency': 'f', 'id': 436, 'synset': 'fan.n.01', 'synonyms': ['fan'], 'def': 'a device for creating a current of air by movement of a surface or surfaces', 'name': 'fan'}, {'frequency': 'f', 'id': 437, 'synset': 'faucet.n.01', 'synonyms': ['faucet', 'spigot', 'tap'], 'def': 'a regulator for controlling the flow of a liquid from a reservoir', 'name': 'faucet'}, {'frequency': 'r', 'id': 438, 'synset': 'fedora.n.01', 'synonyms': ['fedora'], 'def': 'a hat made of felt with a creased crown', 'name': 'fedora'}, {'frequency': 'r', 'id': 439, 'synset': 'ferret.n.02', 'synonyms': ['ferret'], 'def': 'domesticated albino variety of the European polecat bred for hunting rats and rabbits', 'name': 'ferret'}, {'frequency': 'c', 'id': 440, 'synset': 'ferris_wheel.n.01', 'synonyms': ['Ferris_wheel'], 'def': 'a large wheel with suspended seats that remain upright as the wheel rotates', 'name': 'Ferris_wheel'}, {'frequency': 'r', 'id': 441, 'synset': 'ferry.n.01', 'synonyms': ['ferry', 'ferryboat'], 'def': 'a boat that transports people or vehicles across a body of water and operates on a regular schedule', 'name': 'ferry'}, {'frequency': 'r', 'id': 442, 'synset': 'fig.n.04', 'synonyms': ['fig_(fruit)'], 'def': 'fleshy sweet pear-shaped yellowish or purple fruit eaten fresh or preserved or dried', 'name': 'fig_(fruit)'}, {'frequency': 'c', 'id': 443, 'synset': 'fighter.n.02', 'synonyms': ['fighter_jet', 'fighter_aircraft', 'attack_aircraft'], 'def': 'a high-speed military or naval airplane designed to destroy enemy targets', 'name': 'fighter_jet'}, {'frequency': 'f', 'id': 444, 'synset': 'figurine.n.01', 'synonyms': ['figurine'], 'def': 'a small carved or molded figure', 'name': 'figurine'}, {'frequency': 'c', 'id': 445, 'synset': 'file.n.03', 'synonyms': ['file_cabinet', 'filing_cabinet'], 'def': 'office furniture consisting of a container for keeping papers in order', 'name': 'file_cabinet'}, {'frequency': 'r', 'id': 446, 'synset': 'file.n.04', 'synonyms': ['file_(tool)'], 'def': 'a steel hand tool with small sharp teeth on some or all of its surfaces; used for smoothing wood or metal', 'name': 'file_(tool)'}, {'frequency': 'f', 'id': 447, 'synset': 'fire_alarm.n.02', 'synonyms': ['fire_alarm', 'smoke_alarm'], 'def': 'an alarm that is tripped off by fire or smoke', 'name': 'fire_alarm'}, {'frequency': 'c', 'id': 448, 'synset': 'fire_engine.n.01', 'synonyms': ['fire_engine', 'fire_truck'], 'def': 'large trucks that carry firefighters and equipment to the site of a fire', 'name': 'fire_engine'}, {'frequency': 'c', 'id': 449, 'synset': 'fire_extinguisher.n.01', 'synonyms': ['fire_extinguisher', 'extinguisher'], 'def': 'a manually operated device for extinguishing small fires', 'name': 'fire_extinguisher'}, {'frequency': 'c', 'id': 450, 'synset': 'fire_hose.n.01', 'synonyms': ['fire_hose'], 'def': 'a large hose that carries water from a fire hydrant to the site of the fire', 'name': 'fire_hose'}, {'frequency': 'f', 'id': 451, 'synset': 'fireplace.n.01', 'synonyms': ['fireplace'], 'def': 'an open recess in a wall at the base of a chimney where a fire can be built', 'name': 'fireplace'}, {'frequency': 'f', 'id': 452, 'synset': 'fireplug.n.01', 'synonyms': ['fireplug', 'fire_hydrant', 'hydrant'], 'def': 'an upright hydrant for drawing water to use in fighting a fire', 'name': 'fireplug'}, {'frequency': 'c', 'id': 453, 'synset': 'fish.n.01', 'synonyms': ['fish'], 'def': 'any of various mostly cold-blooded aquatic vertebrates usually having scales and breathing through gills', 'name': 'fish'}, {'frequency': 'r', 'id': 454, 'synset': 'fish.n.02', 'synonyms': ['fish_(food)'], 'def': 'the flesh of fish used as food', 'name': 'fish_(food)'}, {'frequency': 'r', 'id': 455, 'synset': 'fishbowl.n.02', 'synonyms': ['fishbowl', 'goldfish_bowl'], 'def': 'a transparent bowl in which small fish are kept', 'name': 'fishbowl'}, {'frequency': 'r', 'id': 456, 'synset': 'fishing_boat.n.01', 'synonyms': ['fishing_boat', 'fishing_vessel'], 'def': 'a vessel for fishing', 'name': 'fishing_boat'}, {'frequency': 'c', 'id': 457, 'synset': 'fishing_rod.n.01', 'synonyms': ['fishing_rod', 'fishing_pole'], 'def': 'a rod that is used in fishing to extend the fishing line', 'name': 'fishing_rod'}, {'frequency': 'f', 'id': 458, 'synset': 'flag.n.01', 'synonyms': ['flag'], 'def': 'emblem usually consisting of a rectangular piece of cloth of distinctive design (do not include pole)', 'name': 'flag'}, {'frequency': 'f', 'id': 459, 'synset': 'flagpole.n.02', 'synonyms': ['flagpole', 'flagstaff'], 'def': 'a tall staff or pole on which a flag is raised', 'name': 'flagpole'}, {'frequency': 'c', 'id': 460, 'synset': 'flamingo.n.01', 'synonyms': ['flamingo'], 'def': 'large pink web-footed bird with down-bent bill', 'name': 'flamingo'}, {'frequency': 'c', 'id': 461, 'synset': 'flannel.n.01', 'synonyms': ['flannel'], 'def': 'a soft light woolen fabric; used for clothing', 'name': 'flannel'}, {'frequency': 'r', 'id': 462, 'synset': 'flash.n.10', 'synonyms': ['flash', 'flashbulb'], 'def': 'a lamp for providing momentary light to take a photograph', 'name': 'flash'}, {'frequency': 'c', 'id': 463, 'synset': 'flashlight.n.01', 'synonyms': ['flashlight', 'torch'], 'def': 'a small portable battery-powered electric lamp', 'name': 'flashlight'}, {'frequency': 'r', 'id': 464, 'synset': 'fleece.n.03', 'synonyms': ['fleece'], 'def': 'a soft bulky fabric with deep pile; used chiefly for clothing', 'name': 'fleece'}, {'frequency': 'f', 'id': 465, 'synset': 'flip-flop.n.02', 'synonyms': ['flip-flop_(sandal)'], 'def': 'a backless sandal held to the foot by a thong between two toes', 'name': 'flip-flop_(sandal)'}, {'frequency': 'c', 'id': 466, 'synset': 'flipper.n.01', 'synonyms': ['flipper_(footwear)', 'fin_(footwear)'], 'def': 'a shoe to aid a person in swimming', 'name': 'flipper_(footwear)'}, {'frequency': 'f', 'id': 467, 'synset': 'flower_arrangement.n.01', 'synonyms': ['flower_arrangement', 'floral_arrangement'], 'def': 'a decorative arrangement of flowers', 'name': 'flower_arrangement'}, {'frequency': 'c', 'id': 468, 'synset': 'flute.n.02', 'synonyms': ['flute_glass', 'champagne_flute'], 'def': 'a tall narrow wineglass', 'name': 'flute_glass'}, {'frequency': 'r', 'id': 469, 'synset': 'foal.n.01', 'synonyms': ['foal'], 'def': 'a young horse', 'name': 'foal'}, {'frequency': 'c', 'id': 470, 'synset': 'folding_chair.n.01', 'synonyms': ['folding_chair'], 'def': 'a chair that can be folded flat for storage', 'name': 'folding_chair'}, {'frequency': 'c', 'id': 471, 'synset': 'food_processor.n.01', 'synonyms': ['food_processor'], 'def': 'a kitchen appliance for shredding, blending, chopping, or slicing food', 'name': 'food_processor'}, {'frequency': 'c', 'id': 472, 'synset': 'football.n.02', 'synonyms': ['football_(American)'], 'def': 'the inflated oblong ball used in playing American football', 'name': 'football_(American)'}, {'frequency': 'r', 'id': 473, 'synset': 'football_helmet.n.01', 'synonyms': ['football_helmet'], 'def': 'a padded helmet with a face mask to protect the head of football players', 'name': 'football_helmet'}, {'frequency': 'c', 'id': 474, 'synset': 'footstool.n.01', 'synonyms': ['footstool', 'footrest'], 'def': 'a low seat or a stool to rest the feet of a seated person', 'name': 'footstool'}, {'frequency': 'f', 'id': 475, 'synset': 'fork.n.01', 'synonyms': ['fork'], 'def': 'cutlery used for serving and eating food', 'name': 'fork'}, {'frequency': 'r', 'id': 476, 'synset': 'forklift.n.01', 'synonyms': ['forklift'], 'def': 'an industrial vehicle with a power operated fork in front that can be inserted under loads to lift and move them', 'name': 'forklift'}, {'frequency': 'r', 'id': 477, 'synset': 'freight_car.n.01', 'synonyms': ['freight_car'], 'def': 'a railway car that carries freight', 'name': 'freight_car'}, {'frequency': 'r', 'id': 478, 'synset': 'french_toast.n.01', 'synonyms': ['French_toast'], 'def': 'bread slice dipped in egg and milk and fried', 'name': 'French_toast'}, {'frequency': 'c', 'id': 479, 'synset': 'freshener.n.01', 'synonyms': ['freshener', 'air_freshener'], 'def': 'anything that freshens', 'name': 'freshener'}, {'frequency': 'f', 'id': 480, 'synset': 'frisbee.n.01', 'synonyms': ['frisbee'], 'def': 'a light, plastic disk propelled with a flip of the wrist for recreation or competition', 'name': 'frisbee'}, {'frequency': 'c', 'id': 481, 'synset': 'frog.n.01', 'synonyms': ['frog', 'toad', 'toad_frog'], 'def': 'a tailless stout-bodied amphibians with long hind limbs for leaping', 'name': 'frog'}, {'frequency': 'c', 'id': 482, 'synset': 'fruit_juice.n.01', 'synonyms': ['fruit_juice'], 'def': 'drink produced by squeezing or crushing fruit', 'name': 'fruit_juice'}, {'frequency': 'r', 'id': 483, 'synset': 'fruit_salad.n.01', 'synonyms': ['fruit_salad'], 'def': 'salad composed of fruits', 'name': 'fruit_salad'}, {'frequency': 'c', 'id': 484, 'synset': 'frying_pan.n.01', 'synonyms': ['frying_pan', 'frypan', 'skillet'], 'def': 'a pan used for frying foods', 'name': 'frying_pan'}, {'frequency': 'r', 'id': 485, 'synset': 'fudge.n.01', 'synonyms': ['fudge'], 'def': 'soft creamy candy', 'name': 'fudge'}, {'frequency': 'r', 'id': 486, 'synset': 'funnel.n.02', 'synonyms': ['funnel'], 'def': 'a cone-shaped utensil used to channel a substance into a container with a small mouth', 'name': 'funnel'}, {'frequency': 'c', 'id': 487, 'synset': 'futon.n.01', 'synonyms': ['futon'], 'def': 'a pad that is used for sleeping on the floor or on a raised frame', 'name': 'futon'}, {'frequency': 'r', 'id': 488, 'synset': 'gag.n.02', 'synonyms': ['gag', 'muzzle'], 'def': "restraint put into a person's mouth to prevent speaking or shouting", 'name': 'gag'}, {'frequency': 'r', 'id': 489, 'synset': 'garbage.n.03', 'synonyms': ['garbage'], 'def': 'a receptacle where waste can be discarded', 'name': 'garbage'}, {'frequency': 'c', 'id': 490, 'synset': 'garbage_truck.n.01', 'synonyms': ['garbage_truck'], 'def': 'a truck for collecting domestic refuse', 'name': 'garbage_truck'}, {'frequency': 'c', 'id': 491, 'synset': 'garden_hose.n.01', 'synonyms': ['garden_hose'], 'def': 'a hose used for watering a lawn or garden', 'name': 'garden_hose'}, {'frequency': 'c', 'id': 492, 'synset': 'gargle.n.01', 'synonyms': ['gargle', 'mouthwash'], 'def': 'a medicated solution used for gargling and rinsing the mouth', 'name': 'gargle'}, {'frequency': 'r', 'id': 493, 'synset': 'gargoyle.n.02', 'synonyms': ['gargoyle'], 'def': 'an ornament consisting of a grotesquely carved figure of a person or animal', 'name': 'gargoyle'}, {'frequency': 'c', 'id': 494, 'synset': 'garlic.n.02', 'synonyms': ['garlic', 'ail'], 'def': 'aromatic bulb used as seasoning', 'name': 'garlic'}, {'frequency': 'r', 'id': 495, 'synset': 'gasmask.n.01', 'synonyms': ['gasmask', 'respirator', 'gas_helmet'], 'def': 'a protective face mask with a filter', 'name': 'gasmask'}, {'frequency': 'r', 'id': 496, 'synset': 'gazelle.n.01', 'synonyms': ['gazelle'], 'def': 'small swift graceful antelope of Africa and Asia having lustrous eyes', 'name': 'gazelle'}, {'frequency': 'c', 'id': 497, 'synset': 'gelatin.n.02', 'synonyms': ['gelatin', 'jelly'], 'def': 'an edible jelly made with gelatin and used as a dessert or salad base or a coating for foods', 'name': 'gelatin'}, {'frequency': 'r', 'id': 498, 'synset': 'gem.n.02', 'synonyms': ['gemstone'], 'def': 'a crystalline rock that can be cut and polished for jewelry', 'name': 'gemstone'}, {'frequency': 'c', 'id': 499, 'synset': 'giant_panda.n.01', 'synonyms': ['giant_panda', 'panda', 'panda_bear'], 'def': 'large black-and-white herbivorous mammal of bamboo forests of China and Tibet', 'name': 'giant_panda'}, {'frequency': 'c', 'id': 500, 'synset': 'gift_wrap.n.01', 'synonyms': ['gift_wrap'], 'def': 'attractive wrapping paper suitable for wrapping gifts', 'name': 'gift_wrap'}, {'frequency': 'c', 'id': 501, 'synset': 'ginger.n.03', 'synonyms': ['ginger', 'gingerroot'], 'def': 'the root of the common ginger plant; used fresh as a seasoning', 'name': 'ginger'}, {'frequency': 'f', 'id': 502, 'synset': 'giraffe.n.01', 'synonyms': ['giraffe'], 'def': 'tall animal having a spotted coat and small horns and very long neck and legs', 'name': 'giraffe'}, {'frequency': 'c', 'id': 503, 'synset': 'girdle.n.02', 'synonyms': ['cincture', 'sash', 'waistband', 'waistcloth'], 'def': 'a band of material around the waist that strengthens a skirt or trousers', 'name': 'cincture'}, {'frequency': 'f', 'id': 504, 'synset': 'glass.n.02', 'synonyms': ['glass_(drink_container)', 'drinking_glass'], 'def': 'a container for holding liquids while drinking', 'name': 'glass_(drink_container)'}, {'frequency': 'c', 'id': 505, 'synset': 'globe.n.03', 'synonyms': ['globe'], 'def': 'a sphere on which a map (especially of the earth) is represented', 'name': 'globe'}, {'frequency': 'f', 'id': 506, 'synset': 'glove.n.02', 'synonyms': ['glove'], 'def': 'handwear covering the hand', 'name': 'glove'}, {'frequency': 'c', 'id': 507, 'synset': 'goat.n.01', 'synonyms': ['goat'], 'def': 'a common goat', 'name': 'goat'}, {'frequency': 'f', 'id': 508, 'synset': 'goggles.n.01', 'synonyms': ['goggles'], 'def': 'tight-fitting spectacles worn to protect the eyes', 'name': 'goggles'}, {'frequency': 'r', 'id': 509, 'synset': 'goldfish.n.01', 'synonyms': ['goldfish'], 'def': 'small golden or orange-red freshwater fishes used as pond or aquarium pets', 'name': 'goldfish'}, {'frequency': 'r', 'id': 510, 'synset': 'golf_club.n.02', 'synonyms': ['golf_club', 'golf-club'], 'def': 'golf equipment used by a golfer to hit a golf ball', 'name': 'golf_club'}, {'frequency': 'c', 'id': 511, 'synset': 'golfcart.n.01', 'synonyms': ['golfcart'], 'def': 'a small motor vehicle in which golfers can ride between shots', 'name': 'golfcart'}, {'frequency': 'r', 'id': 512, 'synset': 'gondola.n.02', 'synonyms': ['gondola_(boat)'], 'def': 'long narrow flat-bottomed boat propelled by sculling; traditionally used on canals of Venice', 'name': 'gondola_(boat)'}, {'frequency': 'c', 'id': 513, 'synset': 'goose.n.01', 'synonyms': ['goose'], 'def': 'loud, web-footed long-necked aquatic birds usually larger than ducks', 'name': 'goose'}, {'frequency': 'r', 'id': 514, 'synset': 'gorilla.n.01', 'synonyms': ['gorilla'], 'def': 'largest ape', 'name': 'gorilla'}, {'frequency': 'r', 'id': 515, 'synset': 'gourd.n.02', 'synonyms': ['gourd'], 'def': 'any of numerous inedible fruits with hard rinds', 'name': 'gourd'}, {'frequency': 'r', 'id': 516, 'synset': 'gown.n.04', 'synonyms': ['surgical_gown', 'scrubs_(surgical_clothing)'], 'def': 'protective garment worn by surgeons during operations', 'name': 'surgical_gown'}, {'frequency': 'f', 'id': 517, 'synset': 'grape.n.01', 'synonyms': ['grape'], 'def': 'any of various juicy fruit with green or purple skins; grow in clusters', 'name': 'grape'}, {'frequency': 'r', 'id': 518, 'synset': 'grasshopper.n.01', 'synonyms': ['grasshopper'], 'def': 'plant-eating insect with hind legs adapted for leaping', 'name': 'grasshopper'}, {'frequency': 'c', 'id': 519, 'synset': 'grater.n.01', 'synonyms': ['grater'], 'def': 'utensil with sharp perforations for shredding foods (as vegetables or cheese)', 'name': 'grater'}, {'frequency': 'c', 'id': 520, 'synset': 'gravestone.n.01', 'synonyms': ['gravestone', 'headstone', 'tombstone'], 'def': 'a stone that is used to mark a grave', 'name': 'gravestone'}, {'frequency': 'r', 'id': 521, 'synset': 'gravy_boat.n.01', 'synonyms': ['gravy_boat', 'gravy_holder'], 'def': 'a dish (often boat-shaped) for serving gravy or sauce', 'name': 'gravy_boat'}, {'frequency': 'c', 'id': 522, 'synset': 'green_bean.n.02', 'synonyms': ['green_bean'], 'def': 'a common bean plant cultivated for its slender green edible pods', 'name': 'green_bean'}, {'frequency': 'c', 'id': 523, 'synset': 'green_onion.n.01', 'synonyms': ['green_onion', 'spring_onion', 'scallion'], 'def': 'a young onion before the bulb has enlarged', 'name': 'green_onion'}, {'frequency': 'r', 'id': 524, 'synset': 'griddle.n.01', 'synonyms': ['griddle'], 'def': 'cooking utensil consisting of a flat heated surface on which food is cooked', 'name': 'griddle'}, {'frequency': 'r', 'id': 525, 'synset': 'grillroom.n.01', 'synonyms': ['grillroom', 'grill_(restaurant)'], 'def': 'a restaurant where food is cooked on a grill', 'name': 'grillroom'}, {'frequency': 'r', 'id': 526, 'synset': 'grinder.n.04', 'synonyms': ['grinder_(tool)'], 'def': 'a machine tool that polishes metal', 'name': 'grinder_(tool)'}, {'frequency': 'r', 'id': 527, 'synset': 'grits.n.01', 'synonyms': ['grits', 'hominy_grits'], 'def': 'coarsely ground corn boiled as a breakfast dish', 'name': 'grits'}, {'frequency': 'c', 'id': 528, 'synset': 'grizzly.n.01', 'synonyms': ['grizzly', 'grizzly_bear'], 'def': 'powerful brownish-yellow bear of the uplands of western North America', 'name': 'grizzly'}, {'frequency': 'c', 'id': 529, 'synset': 'grocery_bag.n.01', 'synonyms': ['grocery_bag'], 'def': "a sack for holding customer's groceries", 'name': 'grocery_bag'}, {'frequency': 'r', 'id': 530, 'synset': 'guacamole.n.01', 'synonyms': ['guacamole'], 'def': 'a dip made of mashed avocado mixed with chopped onions and other seasonings', 'name': 'guacamole'}, {'frequency': 'f', 'id': 531, 'synset': 'guitar.n.01', 'synonyms': ['guitar'], 'def': 'a stringed instrument usually having six strings; played by strumming or plucking', 'name': 'guitar'}, {'frequency': 'c', 'id': 532, 'synset': 'gull.n.02', 'synonyms': ['gull', 'seagull'], 'def': 'mostly white aquatic bird having long pointed wings and short legs', 'name': 'gull'}, {'frequency': 'c', 'id': 533, 'synset': 'gun.n.01', 'synonyms': ['gun'], 'def': 'a weapon that discharges a bullet at high velocity from a metal tube', 'name': 'gun'}, {'frequency': 'r', 'id': 534, 'synset': 'hair_spray.n.01', 'synonyms': ['hair_spray'], 'def': 'substance sprayed on the hair to hold it in place', 'name': 'hair_spray'}, {'frequency': 'c', 'id': 535, 'synset': 'hairbrush.n.01', 'synonyms': ['hairbrush'], 'def': "a brush used to groom a person's hair", 'name': 'hairbrush'}, {'frequency': 'c', 'id': 536, 'synset': 'hairnet.n.01', 'synonyms': ['hairnet'], 'def': 'a small net that someone wears over their hair to keep it in place', 'name': 'hairnet'}, {'frequency': 'c', 'id': 537, 'synset': 'hairpin.n.01', 'synonyms': ['hairpin'], 'def': "a double pronged pin used to hold women's hair in place", 'name': 'hairpin'}, {'frequency': 'f', 'id': 538, 'synset': 'ham.n.01', 'synonyms': ['ham', 'jambon', 'gammon'], 'def': 'meat cut from the thigh of a hog (usually smoked)', 'name': 'ham'}, {'frequency': 'c', 'id': 539, 'synset': 'hamburger.n.01', 'synonyms': ['hamburger', 'beefburger', 'burger'], 'def': 'a sandwich consisting of a patty of minced beef served on a bun', 'name': 'hamburger'}, {'frequency': 'c', 'id': 540, 'synset': 'hammer.n.02', 'synonyms': ['hammer'], 'def': 'a hand tool with a heavy head and a handle; used to deliver an impulsive force by striking', 'name': 'hammer'}, {'frequency': 'r', 'id': 541, 'synset': 'hammock.n.02', 'synonyms': ['hammock'], 'def': 'a hanging bed of canvas or rope netting (usually suspended between two trees)', 'name': 'hammock'}, {'frequency': 'r', 'id': 542, 'synset': 'hamper.n.02', 'synonyms': ['hamper'], 'def': 'a basket usually with a cover', 'name': 'hamper'}, {'frequency': 'r', 'id': 543, 'synset': 'hamster.n.01', 'synonyms': ['hamster'], 'def': 'short-tailed burrowing rodent with large cheek pouches', 'name': 'hamster'}, {'frequency': 'c', 'id': 544, 'synset': 'hand_blower.n.01', 'synonyms': ['hair_dryer'], 'def': 'a hand-held electric blower that can blow warm air onto the hair', 'name': 'hair_dryer'}, {'frequency': 'r', 'id': 545, 'synset': 'hand_glass.n.01', 'synonyms': ['hand_glass', 'hand_mirror'], 'def': 'a mirror intended to be held in the hand', 'name': 'hand_glass'}, {'frequency': 'f', 'id': 546, 'synset': 'hand_towel.n.01', 'synonyms': ['hand_towel', 'face_towel'], 'def': 'a small towel used to dry the hands or face', 'name': 'hand_towel'}, {'frequency': 'c', 'id': 547, 'synset': 'handcart.n.01', 'synonyms': ['handcart', 'pushcart', 'hand_truck'], 'def': 'wheeled vehicle that can be pushed by a person', 'name': 'handcart'}, {'frequency': 'r', 'id': 548, 'synset': 'handcuff.n.01', 'synonyms': ['handcuff'], 'def': 'shackle that consists of a metal loop that can be locked around the wrist', 'name': 'handcuff'}, {'frequency': 'c', 'id': 549, 'synset': 'handkerchief.n.01', 'synonyms': ['handkerchief'], 'def': 'a square piece of cloth used for wiping the eyes or nose or as a costume accessory', 'name': 'handkerchief'}, {'frequency': 'f', 'id': 550, 'synset': 'handle.n.01', 'synonyms': ['handle', 'grip', 'handgrip'], 'def': 'the appendage to an object that is designed to be held in order to use or move it', 'name': 'handle'}, {'frequency': 'r', 'id': 551, 'synset': 'handsaw.n.01', 'synonyms': ['handsaw', "carpenter's_saw"], 'def': 'a saw used with one hand for cutting wood', 'name': 'handsaw'}, {'frequency': 'r', 'id': 552, 'synset': 'hardback.n.01', 'synonyms': ['hardback_book', 'hardcover_book'], 'def': 'a book with cardboard or cloth or leather covers', 'name': 'hardback_book'}, {'frequency': 'r', 'id': 553, 'synset': 'harmonium.n.01', 'synonyms': ['harmonium', 'organ_(musical_instrument)', 'reed_organ_(musical_instrument)'], 'def': 'a free-reed instrument in which air is forced through the reeds by bellows', 'name': 'harmonium'}, {'frequency': 'f', 'id': 554, 'synset': 'hat.n.01', 'synonyms': ['hat'], 'def': 'headwear that protects the head from bad weather, sun, or worn for fashion', 'name': 'hat'}, {'frequency': 'r', 'id': 555, 'synset': 'hatbox.n.01', 'synonyms': ['hatbox'], 'def': 'a round piece of luggage for carrying hats', 'name': 'hatbox'}, {'frequency': 'r', 'id': 556, 'synset': 'hatch.n.03', 'synonyms': ['hatch'], 'def': 'a movable barrier covering a hatchway', 'name': 'hatch'}, {'frequency': 'c', 'id': 557, 'synset': 'head_covering.n.01', 'synonyms': ['veil'], 'def': 'a garment that covers the head and face', 'name': 'veil'}, {'frequency': 'f', 'id': 558, 'synset': 'headband.n.01', 'synonyms': ['headband'], 'def': 'a band worn around or over the head', 'name': 'headband'}, {'frequency': 'f', 'id': 559, 'synset': 'headboard.n.01', 'synonyms': ['headboard'], 'def': 'a vertical board or panel forming the head of a bedstead', 'name': 'headboard'}, {'frequency': 'f', 'id': 560, 'synset': 'headlight.n.01', 'synonyms': ['headlight', 'headlamp'], 'def': 'a powerful light with reflector; attached to the front of an automobile or locomotive', 'name': 'headlight'}, {'frequency': 'c', 'id': 561, 'synset': 'headscarf.n.01', 'synonyms': ['headscarf'], 'def': 'a kerchief worn over the head and tied under the chin', 'name': 'headscarf'}, {'frequency': 'r', 'id': 562, 'synset': 'headset.n.01', 'synonyms': ['headset'], 'def': 'receiver consisting of a pair of headphones', 'name': 'headset'}, {'frequency': 'c', 'id': 563, 'synset': 'headstall.n.01', 'synonyms': ['headstall_(for_horses)', 'headpiece_(for_horses)'], 'def': "the band that is the part of a bridle that fits around a horse's head", 'name': 'headstall_(for_horses)'}, {'frequency': 'r', 'id': 564, 'synset': 'hearing_aid.n.02', 'synonyms': ['hearing_aid'], 'def': 'an acoustic device used to direct sound to the ear of a hearing-impaired person', 'name': 'hearing_aid'}, {'frequency': 'c', 'id': 565, 'synset': 'heart.n.02', 'synonyms': ['heart'], 'def': 'a muscular organ; its contractions move the blood through the body', 'name': 'heart'}, {'frequency': 'c', 'id': 566, 'synset': 'heater.n.01', 'synonyms': ['heater', 'warmer'], 'def': 'device that heats water or supplies warmth to a room', 'name': 'heater'}, {'frequency': 'c', 'id': 567, 'synset': 'helicopter.n.01', 'synonyms': ['helicopter'], 'def': 'an aircraft without wings that obtains its lift from the rotation of overhead blades', 'name': 'helicopter'}, {'frequency': 'f', 'id': 568, 'synset': 'helmet.n.02', 'synonyms': ['helmet'], 'def': 'a protective headgear made of hard material to resist blows', 'name': 'helmet'}, {'frequency': 'r', 'id': 569, 'synset': 'heron.n.02', 'synonyms': ['heron'], 'def': 'grey or white wading bird with long neck and long legs and (usually) long bill', 'name': 'heron'}, {'frequency': 'c', 'id': 570, 'synset': 'highchair.n.01', 'synonyms': ['highchair', 'feeding_chair'], 'def': 'a chair for feeding a very young child', 'name': 'highchair'}, {'frequency': 'f', 'id': 571, 'synset': 'hinge.n.01', 'synonyms': ['hinge'], 'def': 'a joint that holds two parts together so that one can swing relative to the other', 'name': 'hinge'}, {'frequency': 'r', 'id': 572, 'synset': 'hippopotamus.n.01', 'synonyms': ['hippopotamus'], 'def': 'massive thick-skinned animal living in or around rivers of tropical Africa', 'name': 'hippopotamus'}, {'frequency': 'r', 'id': 573, 'synset': 'hockey_stick.n.01', 'synonyms': ['hockey_stick'], 'def': 'sports implement consisting of a stick used by hockey players to move the puck', 'name': 'hockey_stick'}, {'frequency': 'c', 'id': 574, 'synset': 'hog.n.03', 'synonyms': ['hog', 'pig'], 'def': 'domestic swine', 'name': 'hog'}, {'frequency': 'f', 'id': 575, 'synset': 'home_plate.n.01', 'synonyms': ['home_plate_(baseball)', 'home_base_(baseball)'], 'def': '(baseball) a rubber slab where the batter stands; it must be touched by a base runner in order to score', 'name': 'home_plate_(baseball)'}, {'frequency': 'c', 'id': 576, 'synset': 'honey.n.01', 'synonyms': ['honey'], 'def': 'a sweet yellow liquid produced by bees', 'name': 'honey'}, {'frequency': 'f', 'id': 577, 'synset': 'hood.n.06', 'synonyms': ['fume_hood', 'exhaust_hood'], 'def': 'metal covering leading to a vent that exhausts smoke or fumes', 'name': 'fume_hood'}, {'frequency': 'f', 'id': 578, 'synset': 'hook.n.05', 'synonyms': ['hook'], 'def': 'a curved or bent implement for suspending or pulling something', 'name': 'hook'}, {'frequency': 'f', 'id': 579, 'synset': 'horse.n.01', 'synonyms': ['horse'], 'def': 'a common horse', 'name': 'horse'}, {'frequency': 'f', 'id': 580, 'synset': 'hose.n.03', 'synonyms': ['hose', 'hosepipe'], 'def': 'a flexible pipe for conveying a liquid or gas', 'name': 'hose'}, {'frequency': 'r', 'id': 581, 'synset': 'hot-air_balloon.n.01', 'synonyms': ['hot-air_balloon'], 'def': 'balloon for travel through the air in a basket suspended below a large bag of heated air', 'name': 'hot-air_balloon'}, {'frequency': 'r', 'id': 582, 'synset': 'hot_plate.n.01', 'synonyms': ['hotplate'], 'def': 'a portable electric appliance for heating or cooking or keeping food warm', 'name': 'hotplate'}, {'frequency': 'c', 'id': 583, 'synset': 'hot_sauce.n.01', 'synonyms': ['hot_sauce'], 'def': 'a pungent peppery sauce', 'name': 'hot_sauce'}, {'frequency': 'r', 'id': 584, 'synset': 'hourglass.n.01', 'synonyms': ['hourglass'], 'def': 'a sandglass timer that runs for sixty minutes', 'name': 'hourglass'}, {'frequency': 'r', 'id': 585, 'synset': 'houseboat.n.01', 'synonyms': ['houseboat'], 'def': 'a barge that is designed and equipped for use as a dwelling', 'name': 'houseboat'}, {'frequency': 'r', 'id': 586, 'synset': 'hummingbird.n.01', 'synonyms': ['hummingbird'], 'def': 'tiny American bird having brilliant iridescent plumage and long slender bills', 'name': 'hummingbird'}, {'frequency': 'r', 'id': 587, 'synset': 'hummus.n.01', 'synonyms': ['hummus', 'humus', 'hommos', 'hoummos', 'humous'], 'def': 'a thick spread made from mashed chickpeas', 'name': 'hummus'}, {'frequency': 'c', 'id': 588, 'synset': 'ice_bear.n.01', 'synonyms': ['polar_bear'], 'def': 'white bear of Arctic regions', 'name': 'polar_bear'}, {'frequency': 'c', 'id': 589, 'synset': 'ice_cream.n.01', 'synonyms': ['icecream'], 'def': 'frozen dessert containing cream and sugar and flavoring', 'name': 'icecream'}, {'frequency': 'r', 'id': 590, 'synset': 'ice_lolly.n.01', 'synonyms': ['popsicle'], 'def': 'ice cream or water ice on a small wooden stick', 'name': 'popsicle'}, {'frequency': 'c', 'id': 591, 'synset': 'ice_maker.n.01', 'synonyms': ['ice_maker'], 'def': 'an appliance included in some electric refrigerators for making ice cubes', 'name': 'ice_maker'}, {'frequency': 'r', 'id': 592, 'synset': 'ice_pack.n.01', 'synonyms': ['ice_pack', 'ice_bag'], 'def': 'a waterproof bag filled with ice: applied to the body (especially the head) to cool or reduce swelling', 'name': 'ice_pack'}, {'frequency': 'r', 'id': 593, 'synset': 'ice_skate.n.01', 'synonyms': ['ice_skate'], 'def': 'skate consisting of a boot with a steel blade fitted to the sole', 'name': 'ice_skate'}, {'frequency': 'r', 'id': 594, 'synset': 'ice_tea.n.01', 'synonyms': ['ice_tea', 'iced_tea'], 'def': 'strong tea served over ice', 'name': 'ice_tea'}, {'frequency': 'c', 'id': 595, 'synset': 'igniter.n.01', 'synonyms': ['igniter', 'ignitor', 'lighter'], 'def': 'a substance or device used to start a fire', 'name': 'igniter'}, {'frequency': 'r', 'id': 596, 'synset': 'incense.n.01', 'synonyms': ['incense'], 'def': 'a substance that produces a fragrant odor when burned', 'name': 'incense'}, {'frequency': 'r', 'id': 597, 'synset': 'inhaler.n.01', 'synonyms': ['inhaler', 'inhalator'], 'def': 'a dispenser that produces a chemical vapor to be inhaled through mouth or nose', 'name': 'inhaler'}, {'frequency': 'c', 'id': 598, 'synset': 'ipod.n.01', 'synonyms': ['iPod'], 'def': 'a pocket-sized device used to play music files', 'name': 'iPod'}, {'frequency': 'c', 'id': 599, 'synset': 'iron.n.04', 'synonyms': ['iron_(for_clothing)', 'smoothing_iron_(for_clothing)'], 'def': 'home appliance consisting of a flat metal base that is heated and used to smooth cloth', 'name': 'iron_(for_clothing)'}, {'frequency': 'r', 'id': 600, 'synset': 'ironing_board.n.01', 'synonyms': ['ironing_board'], 'def': 'narrow padded board on collapsible supports; used for ironing clothes', 'name': 'ironing_board'}, {'frequency': 'f', 'id': 601, 'synset': 'jacket.n.01', 'synonyms': ['jacket'], 'def': 'a waist-length coat', 'name': 'jacket'}, {'frequency': 'r', 'id': 602, 'synset': 'jam.n.01', 'synonyms': ['jam'], 'def': 'preserve of crushed fruit', 'name': 'jam'}, {'frequency': 'f', 'id': 603, 'synset': 'jean.n.01', 'synonyms': ['jean', 'blue_jean', 'denim'], 'def': '(usually plural) close-fitting trousers of heavy denim for manual work or casual wear', 'name': 'jean'}, {'frequency': 'c', 'id': 604, 'synset': 'jeep.n.01', 'synonyms': ['jeep', 'landrover'], 'def': 'a car suitable for traveling over rough terrain', 'name': 'jeep'}, {'frequency': 'r', 'id': 605, 'synset': 'jelly_bean.n.01', 'synonyms': ['jelly_bean', 'jelly_egg'], 'def': 'sugar-glazed jellied candy', 'name': 'jelly_bean'}, {'frequency': 'f', 'id': 606, 'synset': 'jersey.n.03', 'synonyms': ['jersey', 'T-shirt', 'tee_shirt'], 'def': 'a close-fitting pullover shirt', 'name': 'jersey'}, {'frequency': 'c', 'id': 607, 'synset': 'jet.n.01', 'synonyms': ['jet_plane', 'jet-propelled_plane'], 'def': 'an airplane powered by one or more jet engines', 'name': 'jet_plane'}, {'frequency': 'c', 'id': 608, 'synset': 'jewelry.n.01', 'synonyms': ['jewelry', 'jewellery'], 'def': 'an adornment (as a bracelet or ring or necklace) made of precious metals and set with gems (or imitation gems)', 'name': 'jewelry'}, {'frequency': 'r', 'id': 609, 'synset': 'joystick.n.02', 'synonyms': ['joystick'], 'def': 'a control device for computers consisting of a vertical handle that can move freely in two directions', 'name': 'joystick'}, {'frequency': 'r', 'id': 610, 'synset': 'jump_suit.n.01', 'synonyms': ['jumpsuit'], 'def': "one-piece garment fashioned after a parachutist's uniform", 'name': 'jumpsuit'}, {'frequency': 'c', 'id': 611, 'synset': 'kayak.n.01', 'synonyms': ['kayak'], 'def': 'a small canoe consisting of a light frame made watertight with animal skins', 'name': 'kayak'}, {'frequency': 'r', 'id': 612, 'synset': 'keg.n.02', 'synonyms': ['keg'], 'def': 'small cask or barrel', 'name': 'keg'}, {'frequency': 'r', 'id': 613, 'synset': 'kennel.n.01', 'synonyms': ['kennel', 'doghouse'], 'def': 'outbuilding that serves as a shelter for a dog', 'name': 'kennel'}, {'frequency': 'c', 'id': 614, 'synset': 'kettle.n.01', 'synonyms': ['kettle', 'boiler'], 'def': 'a metal pot for stewing or boiling; usually has a lid', 'name': 'kettle'}, {'frequency': 'f', 'id': 615, 'synset': 'key.n.01', 'synonyms': ['key'], 'def': 'metal instrument used to unlock a lock', 'name': 'key'}, {'frequency': 'r', 'id': 616, 'synset': 'keycard.n.01', 'synonyms': ['keycard'], 'def': 'a plastic card used to gain access typically to a door', 'name': 'keycard'}, {'frequency': 'r', 'id': 617, 'synset': 'kilt.n.01', 'synonyms': ['kilt'], 'def': 'a knee-length pleated tartan skirt worn by men as part of the traditional dress in the Highlands of northern Scotland', 'name': 'kilt'}, {'frequency': 'c', 'id': 618, 'synset': 'kimono.n.01', 'synonyms': ['kimono'], 'def': 'a loose robe; imitated from robes originally worn by Japanese', 'name': 'kimono'}, {'frequency': 'f', 'id': 619, 'synset': 'kitchen_sink.n.01', 'synonyms': ['kitchen_sink'], 'def': 'a sink in a kitchen', 'name': 'kitchen_sink'}, {'frequency': 'c', 'id': 620, 'synset': 'kitchen_table.n.01', 'synonyms': ['kitchen_table'], 'def': 'a table in the kitchen', 'name': 'kitchen_table'}, {'frequency': 'f', 'id': 621, 'synset': 'kite.n.03', 'synonyms': ['kite'], 'def': 'plaything consisting of a light frame covered with tissue paper; flown in wind at end of a string', 'name': 'kite'}, {'frequency': 'c', 'id': 622, 'synset': 'kitten.n.01', 'synonyms': ['kitten', 'kitty'], 'def': 'young domestic cat', 'name': 'kitten'}, {'frequency': 'c', 'id': 623, 'synset': 'kiwi.n.03', 'synonyms': ['kiwi_fruit'], 'def': 'fuzzy brown egg-shaped fruit with slightly tart green flesh', 'name': 'kiwi_fruit'}, {'frequency': 'f', 'id': 624, 'synset': 'knee_pad.n.01', 'synonyms': ['knee_pad'], 'def': 'protective garment consisting of a pad worn by football or baseball or hockey players', 'name': 'knee_pad'}, {'frequency': 'f', 'id': 625, 'synset': 'knife.n.01', 'synonyms': ['knife'], 'def': 'tool with a blade and point used as a cutting instrument', 'name': 'knife'}, {'frequency': 'r', 'id': 626, 'synset': 'knight.n.02', 'synonyms': ['knight_(chess_piece)', 'horse_(chess_piece)'], 'def': 'a chess game piece shaped to resemble the head of a horse', 'name': 'knight_(chess_piece)'}, {'frequency': 'r', 'id': 627, 'synset': 'knitting_needle.n.01', 'synonyms': ['knitting_needle'], 'def': 'needle consisting of a slender rod with pointed ends; usually used in pairs', 'name': 'knitting_needle'}, {'frequency': 'f', 'id': 628, 'synset': 'knob.n.02', 'synonyms': ['knob'], 'def': 'a round handle often found on a door', 'name': 'knob'}, {'frequency': 'r', 'id': 629, 'synset': 'knocker.n.05', 'synonyms': ['knocker_(on_a_door)', 'doorknocker'], 'def': 'a device (usually metal and ornamental) attached by a hinge to a door', 'name': 'knocker_(on_a_door)'}, {'frequency': 'r', 'id': 630, 'synset': 'koala.n.01', 'synonyms': ['koala', 'koala_bear'], 'def': 'sluggish tailless Australian marsupial with grey furry ears and coat', 'name': 'koala'}, {'frequency': 'r', 'id': 631, 'synset': 'lab_coat.n.01', 'synonyms': ['lab_coat', 'laboratory_coat'], 'def': 'a light coat worn to protect clothing from substances used while working in a laboratory', 'name': 'lab_coat'}, {'frequency': 'f', 'id': 632, 'synset': 'ladder.n.01', 'synonyms': ['ladder'], 'def': 'steps consisting of two parallel members connected by rungs', 'name': 'ladder'}, {'frequency': 'c', 'id': 633, 'synset': 'ladle.n.01', 'synonyms': ['ladle'], 'def': 'a spoon-shaped vessel with a long handle frequently used to transfer liquids', 'name': 'ladle'}, {'frequency': 'r', 'id': 634, 'synset': 'ladybug.n.01', 'synonyms': ['ladybug', 'ladybeetle', 'ladybird_beetle'], 'def': 'small round bright-colored and spotted beetle, typically red and black', 'name': 'ladybug'}, {'frequency': 'c', 'id': 635, 'synset': 'lamb.n.01', 'synonyms': ['lamb_(animal)'], 'def': 'young sheep', 'name': 'lamb_(animal)'}, {'frequency': 'r', 'id': 636, 'synset': 'lamb_chop.n.01', 'synonyms': ['lamb-chop', 'lambchop'], 'def': 'chop cut from a lamb', 'name': 'lamb-chop'}, {'frequency': 'f', 'id': 637, 'synset': 'lamp.n.02', 'synonyms': ['lamp'], 'def': 'a piece of furniture holding one or more electric light bulbs', 'name': 'lamp'}, {'frequency': 'f', 'id': 638, 'synset': 'lamppost.n.01', 'synonyms': ['lamppost'], 'def': 'a metal post supporting an outdoor lamp (such as a streetlight)', 'name': 'lamppost'}, {'frequency': 'f', 'id': 639, 'synset': 'lampshade.n.01', 'synonyms': ['lampshade'], 'def': 'a protective ornamental shade used to screen a light bulb from direct view', 'name': 'lampshade'}, {'frequency': 'c', 'id': 640, 'synset': 'lantern.n.01', 'synonyms': ['lantern'], 'def': 'light in a transparent protective case', 'name': 'lantern'}, {'frequency': 'f', 'id': 641, 'synset': 'lanyard.n.02', 'synonyms': ['lanyard', 'laniard'], 'def': 'a cord worn around the neck to hold a knife or whistle, etc.', 'name': 'lanyard'}, {'frequency': 'f', 'id': 642, 'synset': 'laptop.n.01', 'synonyms': ['laptop_computer', 'notebook_computer'], 'def': 'a portable computer small enough to use in your lap', 'name': 'laptop_computer'}, {'frequency': 'r', 'id': 643, 'synset': 'lasagna.n.01', 'synonyms': ['lasagna', 'lasagne'], 'def': 'baked dish of layers of lasagna pasta with sauce and cheese and meat or vegetables', 'name': 'lasagna'}, {'frequency': 'c', 'id': 644, 'synset': 'latch.n.02', 'synonyms': ['latch'], 'def': 'a bar that can be lowered or slid into a groove to fasten a door or gate', 'name': 'latch'}, {'frequency': 'r', 'id': 645, 'synset': 'lawn_mower.n.01', 'synonyms': ['lawn_mower'], 'def': 'garden tool for mowing grass on lawns', 'name': 'lawn_mower'}, {'frequency': 'r', 'id': 646, 'synset': 'leather.n.01', 'synonyms': ['leather'], 'def': 'an animal skin made smooth and flexible by removing the hair and then tanning', 'name': 'leather'}, {'frequency': 'c', 'id': 647, 'synset': 'legging.n.01', 'synonyms': ['legging_(clothing)', 'leging_(clothing)', 'leg_covering'], 'def': 'a garment covering the leg (usually extending from the knee to the ankle)', 'name': 'legging_(clothing)'}, {'frequency': 'c', 'id': 648, 'synset': 'lego.n.01', 'synonyms': ['Lego', 'Lego_set'], 'def': "a child's plastic construction set for making models from blocks", 'name': 'Lego'}, {'frequency': 'f', 'id': 649, 'synset': 'lemon.n.01', 'synonyms': ['lemon'], 'def': 'yellow oval fruit with juicy acidic flesh', 'name': 'lemon'}, {'frequency': 'r', 'id': 650, 'synset': 'lemonade.n.01', 'synonyms': ['lemonade'], 'def': 'sweetened beverage of diluted lemon juice', 'name': 'lemonade'}, {'frequency': 'f', 'id': 651, 'synset': 'lettuce.n.02', 'synonyms': ['lettuce'], 'def': 'leafy plant commonly eaten in salad or on sandwiches', 'name': 'lettuce'}, {'frequency': 'f', 'id': 652, 'synset': 'license_plate.n.01', 'synonyms': ['license_plate', 'numberplate'], 'def': "a plate mounted on the front and back of car and bearing the car's registration number", 'name': 'license_plate'}, {'frequency': 'f', 'id': 653, 'synset': 'life_buoy.n.01', 'synonyms': ['life_buoy', 'lifesaver', 'life_belt', 'life_ring'], 'def': 'a ring-shaped life preserver used to prevent drowning (NOT a life-jacket or vest)', 'name': 'life_buoy'}, {'frequency': 'f', 'id': 654, 'synset': 'life_jacket.n.01', 'synonyms': ['life_jacket', 'life_vest'], 'def': 'life preserver consisting of a sleeveless jacket of buoyant or inflatable design', 'name': 'life_jacket'}, {'frequency': 'f', 'id': 655, 'synset': 'light_bulb.n.01', 'synonyms': ['lightbulb'], 'def': 'glass bulb or tube shaped electric device that emits light (DO NOT MARK LAMPS AS A WHOLE)', 'name': 'lightbulb'}, {'frequency': 'r', 'id': 656, 'synset': 'lightning_rod.n.02', 'synonyms': ['lightning_rod', 'lightning_conductor'], 'def': 'a metallic conductor that is attached to a high point and leads to the ground', 'name': 'lightning_rod'}, {'frequency': 'c', 'id': 657, 'synset': 'lime.n.06', 'synonyms': ['lime'], 'def': 'the green acidic fruit of any of various lime trees', 'name': 'lime'}, {'frequency': 'r', 'id': 658, 'synset': 'limousine.n.01', 'synonyms': ['limousine'], 'def': 'long luxurious car; usually driven by a chauffeur', 'name': 'limousine'}, {'frequency': 'r', 'id': 659, 'synset': 'linen.n.02', 'synonyms': ['linen_paper'], 'def': 'a high-quality paper made of linen fibers or with a linen finish', 'name': 'linen_paper'}, {'frequency': 'c', 'id': 660, 'synset': 'lion.n.01', 'synonyms': ['lion'], 'def': 'large gregarious predatory cat of Africa and India', 'name': 'lion'}, {'frequency': 'c', 'id': 661, 'synset': 'lip_balm.n.01', 'synonyms': ['lip_balm'], 'def': 'a balm applied to the lips', 'name': 'lip_balm'}, {'frequency': 'c', 'id': 662, 'synset': 'lipstick.n.01', 'synonyms': ['lipstick', 'lip_rouge'], 'def': 'makeup that is used to color the lips', 'name': 'lipstick'}, {'frequency': 'r', 'id': 663, 'synset': 'liquor.n.01', 'synonyms': ['liquor', 'spirits', 'hard_liquor', 'liqueur', 'cordial'], 'def': 'an alcoholic beverage that is distilled rather than fermented', 'name': 'liquor'}, {'frequency': 'r', 'id': 664, 'synset': 'lizard.n.01', 'synonyms': ['lizard'], 'def': 'a reptile with usually two pairs of legs and a tapering tail', 'name': 'lizard'}, {'frequency': 'r', 'id': 665, 'synset': 'loafer.n.02', 'synonyms': ['Loafer_(type_of_shoe)'], 'def': 'a low leather step-in shoe', 'name': 'Loafer_(type_of_shoe)'}, {'frequency': 'f', 'id': 666, 'synset': 'log.n.01', 'synonyms': ['log'], 'def': 'a segment of the trunk of a tree when stripped of branches', 'name': 'log'}, {'frequency': 'c', 'id': 667, 'synset': 'lollipop.n.02', 'synonyms': ['lollipop'], 'def': 'hard candy on a stick', 'name': 'lollipop'}, {'frequency': 'c', 'id': 668, 'synset': 'lotion.n.01', 'synonyms': ['lotion'], 'def': 'any of various cosmetic preparations that are applied to the skin', 'name': 'lotion'}, {'frequency': 'f', 'id': 669, 'synset': 'loudspeaker.n.01', 'synonyms': ['speaker_(stero_equipment)'], 'def': 'electronic device that produces sound often as part of a stereo system', 'name': 'speaker_(stero_equipment)'}, {'frequency': 'c', 'id': 670, 'synset': 'love_seat.n.01', 'synonyms': ['loveseat'], 'def': 'small sofa that seats two people', 'name': 'loveseat'}, {'frequency': 'r', 'id': 671, 'synset': 'machine_gun.n.01', 'synonyms': ['machine_gun'], 'def': 'a rapidly firing automatic gun', 'name': 'machine_gun'}, {'frequency': 'f', 'id': 672, 'synset': 'magazine.n.02', 'synonyms': ['magazine'], 'def': 'a paperback periodic publication', 'name': 'magazine'}, {'frequency': 'f', 'id': 673, 'synset': 'magnet.n.01', 'synonyms': ['magnet'], 'def': 'a device that attracts iron and produces a magnetic field', 'name': 'magnet'}, {'frequency': 'r', 'id': 674, 'synset': 'mail_slot.n.01', 'synonyms': ['mail_slot'], 'def': 'a slot (usually in a door) through which mail can be delivered', 'name': 'mail_slot'}, {'frequency': 'c', 'id': 675, 'synset': 'mailbox.n.01', 'synonyms': ['mailbox_(at_home)', 'letter_box_(at_home)'], 'def': 'a private box for delivery of mail', 'name': 'mailbox_(at_home)'}, {'frequency': 'r', 'id': 676, 'synset': 'mallet.n.01', 'synonyms': ['mallet'], 'def': 'a sports implement with a long handle and a hammer-like head used to hit a ball', 'name': 'mallet'}, {'frequency': 'r', 'id': 677, 'synset': 'mammoth.n.01', 'synonyms': ['mammoth'], 'def': 'any of numerous extinct elephants widely distributed in the Pleistocene', 'name': 'mammoth'}, {'frequency': 'c', 'id': 678, 'synset': 'mandarin.n.05', 'synonyms': ['mandarin_orange'], 'def': 'a somewhat flat reddish-orange loose skinned citrus of China', 'name': 'mandarin_orange'}, {'frequency': 'c', 'id': 679, 'synset': 'manger.n.01', 'synonyms': ['manger', 'trough'], 'def': 'a container (usually in a barn or stable) from which cattle or horses feed', 'name': 'manger'}, {'frequency': 'f', 'id': 680, 'synset': 'manhole.n.01', 'synonyms': ['manhole'], 'def': 'a hole (usually with a flush cover) through which a person can gain access to an underground structure', 'name': 'manhole'}, {'frequency': 'c', 'id': 681, 'synset': 'map.n.01', 'synonyms': ['map'], 'def': "a diagrammatic representation of the earth's surface (or part of it)", 'name': 'map'}, {'frequency': 'c', 'id': 682, 'synset': 'marker.n.03', 'synonyms': ['marker'], 'def': 'a writing implement for making a mark', 'name': 'marker'}, {'frequency': 'r', 'id': 683, 'synset': 'martini.n.01', 'synonyms': ['martini'], 'def': 'a cocktail made of gin (or vodka) with dry vermouth', 'name': 'martini'}, {'frequency': 'r', 'id': 684, 'synset': 'mascot.n.01', 'synonyms': ['mascot'], 'def': 'a person or animal that is adopted by a team or other group as a symbolic figure', 'name': 'mascot'}, {'frequency': 'c', 'id': 685, 'synset': 'mashed_potato.n.01', 'synonyms': ['mashed_potato'], 'def': 'potato that has been peeled and boiled and then mashed', 'name': 'mashed_potato'}, {'frequency': 'r', 'id': 686, 'synset': 'masher.n.02', 'synonyms': ['masher'], 'def': 'a kitchen utensil used for mashing (e.g. potatoes)', 'name': 'masher'}, {'frequency': 'f', 'id': 687, 'synset': 'mask.n.04', 'synonyms': ['mask', 'facemask'], 'def': 'a protective covering worn over the face', 'name': 'mask'}, {'frequency': 'f', 'id': 688, 'synset': 'mast.n.01', 'synonyms': ['mast'], 'def': 'a vertical spar for supporting sails', 'name': 'mast'}, {'frequency': 'c', 'id': 689, 'synset': 'mat.n.03', 'synonyms': ['mat_(gym_equipment)', 'gym_mat'], 'def': 'sports equipment consisting of a piece of thick padding on the floor for gymnastics', 'name': 'mat_(gym_equipment)'}, {'frequency': 'r', 'id': 690, 'synset': 'matchbox.n.01', 'synonyms': ['matchbox'], 'def': 'a box for holding matches', 'name': 'matchbox'}, {'frequency': 'f', 'id': 691, 'synset': 'mattress.n.01', 'synonyms': ['mattress'], 'def': 'a thick pad filled with resilient material used as a bed or part of a bed', 'name': 'mattress'}, {'frequency': 'c', 'id': 692, 'synset': 'measuring_cup.n.01', 'synonyms': ['measuring_cup'], 'def': 'graduated cup used to measure liquid or granular ingredients', 'name': 'measuring_cup'}, {'frequency': 'c', 'id': 693, 'synset': 'measuring_stick.n.01', 'synonyms': ['measuring_stick', 'ruler_(measuring_stick)', 'measuring_rod'], 'def': 'measuring instrument having a sequence of marks at regular intervals', 'name': 'measuring_stick'}, {'frequency': 'c', 'id': 694, 'synset': 'meatball.n.01', 'synonyms': ['meatball'], 'def': 'ground meat formed into a ball and fried or simmered in broth', 'name': 'meatball'}, {'frequency': 'c', 'id': 695, 'synset': 'medicine.n.02', 'synonyms': ['medicine'], 'def': 'something that treats or prevents or alleviates the symptoms of disease', 'name': 'medicine'}, {'frequency': 'r', 'id': 696, 'synset': 'melon.n.01', 'synonyms': ['melon'], 'def': 'fruit of the gourd family having a hard rind and sweet juicy flesh', 'name': 'melon'}, {'frequency': 'f', 'id': 697, 'synset': 'microphone.n.01', 'synonyms': ['microphone'], 'def': 'device for converting sound waves into electrical energy', 'name': 'microphone'}, {'frequency': 'r', 'id': 698, 'synset': 'microscope.n.01', 'synonyms': ['microscope'], 'def': 'magnifier of the image of small objects', 'name': 'microscope'}, {'frequency': 'f', 'id': 699, 'synset': 'microwave.n.02', 'synonyms': ['microwave_oven'], 'def': 'kitchen appliance that cooks food by passing an electromagnetic wave through it', 'name': 'microwave_oven'}, {'frequency': 'r', 'id': 700, 'synset': 'milestone.n.01', 'synonyms': ['milestone', 'milepost'], 'def': 'stone post at side of a road to show distances', 'name': 'milestone'}, {'frequency': 'c', 'id': 701, 'synset': 'milk.n.01', 'synonyms': ['milk'], 'def': 'a white nutritious liquid secreted by mammals and used as food by human beings', 'name': 'milk'}, {'frequency': 'f', 'id': 702, 'synset': 'minivan.n.01', 'synonyms': ['minivan'], 'def': 'a small box-shaped passenger van', 'name': 'minivan'}, {'frequency': 'r', 'id': 703, 'synset': 'mint.n.05', 'synonyms': ['mint_candy'], 'def': 'a candy that is flavored with a mint oil', 'name': 'mint_candy'}, {'frequency': 'f', 'id': 704, 'synset': 'mirror.n.01', 'synonyms': ['mirror'], 'def': 'polished surface that forms images by reflecting light', 'name': 'mirror'}, {'frequency': 'c', 'id': 705, 'synset': 'mitten.n.01', 'synonyms': ['mitten'], 'def': 'glove that encases the thumb separately and the other four fingers together', 'name': 'mitten'}, {'frequency': 'c', 'id': 706, 'synset': 'mixer.n.04', 'synonyms': ['mixer_(kitchen_tool)', 'stand_mixer'], 'def': 'a kitchen utensil that is used for mixing foods', 'name': 'mixer_(kitchen_tool)'}, {'frequency': 'c', 'id': 707, 'synset': 'money.n.03', 'synonyms': ['money'], 'def': 'the official currency issued by a government or national bank', 'name': 'money'}, {'frequency': 'f', 'id': 708, 'synset': 'monitor.n.04', 'synonyms': ['monitor_(computer_equipment) computer_monitor'], 'def': 'a computer monitor', 'name': 'monitor_(computer_equipment) computer_monitor'}, {'frequency': 'c', 'id': 709, 'synset': 'monkey.n.01', 'synonyms': ['monkey'], 'def': 'any of various long-tailed primates', 'name': 'monkey'}, {'frequency': 'f', 'id': 710, 'synset': 'motor.n.01', 'synonyms': ['motor'], 'def': 'machine that converts other forms of energy into mechanical energy and so imparts motion', 'name': 'motor'}, {'frequency': 'f', 'id': 711, 'synset': 'motor_scooter.n.01', 'synonyms': ['motor_scooter', 'scooter'], 'def': 'a wheeled vehicle with small wheels and a low-powered engine', 'name': 'motor_scooter'}, {'frequency': 'r', 'id': 712, 'synset': 'motor_vehicle.n.01', 'synonyms': ['motor_vehicle', 'automotive_vehicle'], 'def': 'a self-propelled wheeled vehicle that does not run on rails', 'name': 'motor_vehicle'}, {'frequency': 'r', 'id': 713, 'synset': 'motorboat.n.01', 'synonyms': ['motorboat', 'powerboat'], 'def': 'a boat propelled by an internal-combustion engine', 'name': 'motorboat'}, {'frequency': 'f', 'id': 714, 'synset': 'motorcycle.n.01', 'synonyms': ['motorcycle'], 'def': 'a motor vehicle with two wheels and a strong frame', 'name': 'motorcycle'}, {'frequency': 'f', 'id': 715, 'synset': 'mound.n.01', 'synonyms': ['mound_(baseball)', "pitcher's_mound"], 'def': '(baseball) the slight elevation on which the pitcher stands', 'name': 'mound_(baseball)'}, {'frequency': 'r', 'id': 716, 'synset': 'mouse.n.01', 'synonyms': ['mouse_(animal_rodent)'], 'def': 'a small rodent with pointed snouts and small ears on elongated bodies with slender usually hairless tails', 'name': 'mouse_(animal_rodent)'}, {'frequency': 'f', 'id': 717, 'synset': 'mouse.n.04', 'synonyms': ['mouse_(computer_equipment)', 'computer_mouse'], 'def': 'a computer input device that controls an on-screen pointer', 'name': 'mouse_(computer_equipment)'}, {'frequency': 'f', 'id': 718, 'synset': 'mousepad.n.01', 'synonyms': ['mousepad'], 'def': 'a small portable pad that provides an operating surface for a computer mouse', 'name': 'mousepad'}, {'frequency': 'c', 'id': 719, 'synset': 'muffin.n.01', 'synonyms': ['muffin'], 'def': 'a sweet quick bread baked in a cup-shaped pan', 'name': 'muffin'}, {'frequency': 'f', 'id': 720, 'synset': 'mug.n.04', 'synonyms': ['mug'], 'def': 'with handle and usually cylindrical', 'name': 'mug'}, {'frequency': 'f', 'id': 721, 'synset': 'mushroom.n.02', 'synonyms': ['mushroom'], 'def': 'a common mushroom', 'name': 'mushroom'}, {'frequency': 'r', 'id': 722, 'synset': 'music_stool.n.01', 'synonyms': ['music_stool', 'piano_stool'], 'def': 'a stool for piano players; usually adjustable in height', 'name': 'music_stool'}, {'frequency': 'r', 'id': 723, 'synset': 'musical_instrument.n.01', 'synonyms': ['musical_instrument', 'instrument_(musical)'], 'def': 'any of various devices or contrivances that can be used to produce musical tones or sounds', 'name': 'musical_instrument'}, {'frequency': 'r', 'id': 724, 'synset': 'nailfile.n.01', 'synonyms': ['nailfile'], 'def': 'a small flat file for shaping the nails', 'name': 'nailfile'}, {'frequency': 'r', 'id': 725, 'synset': 'nameplate.n.01', 'synonyms': ['nameplate'], 'def': 'a plate bearing a name', 'name': 'nameplate'}, {'frequency': 'f', 'id': 726, 'synset': 'napkin.n.01', 'synonyms': ['napkin', 'table_napkin', 'serviette'], 'def': 'a small piece of table linen or paper that is used to wipe the mouth and to cover the lap in order to protect clothing', 'name': 'napkin'}, {'frequency': 'r', 'id': 727, 'synset': 'neckerchief.n.01', 'synonyms': ['neckerchief'], 'def': 'a kerchief worn around the neck', 'name': 'neckerchief'}, {'frequency': 'f', 'id': 728, 'synset': 'necklace.n.01', 'synonyms': ['necklace'], 'def': 'jewelry consisting of a cord or chain (often bearing gems) worn about the neck as an ornament', 'name': 'necklace'}, {'frequency': 'f', 'id': 729, 'synset': 'necktie.n.01', 'synonyms': ['necktie', 'tie_(necktie)'], 'def': 'neckwear consisting of a long narrow piece of material worn under a collar and tied in knot at the front', 'name': 'necktie'}, {'frequency': 'r', 'id': 730, 'synset': 'needle.n.03', 'synonyms': ['needle'], 'def': 'a sharp pointed implement (usually metal)', 'name': 'needle'}, {'frequency': 'c', 'id': 731, 'synset': 'nest.n.01', 'synonyms': ['nest'], 'def': 'a structure in which animals lay eggs or give birth to their young', 'name': 'nest'}, {'frequency': 'r', 'id': 732, 'synset': 'newsstand.n.01', 'synonyms': ['newsstand'], 'def': 'a stall where newspapers and other periodicals are sold', 'name': 'newsstand'}, {'frequency': 'c', 'id': 733, 'synset': 'nightwear.n.01', 'synonyms': ['nightshirt', 'nightwear', 'sleepwear', 'nightclothes'], 'def': 'garments designed to be worn in bed', 'name': 'nightshirt'}, {'frequency': 'r', 'id': 734, 'synset': 'nosebag.n.01', 'synonyms': ['nosebag_(for_animals)', 'feedbag'], 'def': 'a canvas bag that is used to feed an animal (such as a horse); covers the muzzle and fastens at the top of the head', 'name': 'nosebag_(for_animals)'}, {'frequency': 'r', 'id': 735, 'synset': 'noseband.n.01', 'synonyms': ['noseband_(for_animals)', 'nosepiece_(for_animals)'], 'def': "a strap that is the part of a bridle that goes over the animal's nose", 'name': 'noseband_(for_animals)'}, {'frequency': 'f', 'id': 736, 'synset': 'notebook.n.01', 'synonyms': ['notebook'], 'def': 'a book with blank pages for recording notes or memoranda', 'name': 'notebook'}, {'frequency': 'c', 'id': 737, 'synset': 'notepad.n.01', 'synonyms': ['notepad'], 'def': 'a pad of paper for keeping notes', 'name': 'notepad'}, {'frequency': 'c', 'id': 738, 'synset': 'nut.n.03', 'synonyms': ['nut'], 'def': 'a small metal block (usually square or hexagonal) with internal screw thread to be fitted onto a bolt', 'name': 'nut'}, {'frequency': 'r', 'id': 739, 'synset': 'nutcracker.n.01', 'synonyms': ['nutcracker'], 'def': 'a hand tool used to crack nuts open', 'name': 'nutcracker'}, {'frequency': 'c', 'id': 740, 'synset': 'oar.n.01', 'synonyms': ['oar'], 'def': 'an implement used to propel or steer a boat', 'name': 'oar'}, {'frequency': 'r', 'id': 741, 'synset': 'octopus.n.01', 'synonyms': ['octopus_(food)'], 'def': 'tentacles of octopus prepared as food', 'name': 'octopus_(food)'}, {'frequency': 'r', 'id': 742, 'synset': 'octopus.n.02', 'synonyms': ['octopus_(animal)'], 'def': 'bottom-living cephalopod having a soft oval body with eight long tentacles', 'name': 'octopus_(animal)'}, {'frequency': 'c', 'id': 743, 'synset': 'oil_lamp.n.01', 'synonyms': ['oil_lamp', 'kerosene_lamp', 'kerosine_lamp'], 'def': 'a lamp that burns oil (as kerosine) for light', 'name': 'oil_lamp'}, {'frequency': 'c', 'id': 744, 'synset': 'olive_oil.n.01', 'synonyms': ['olive_oil'], 'def': 'oil from olives', 'name': 'olive_oil'}, {'frequency': 'r', 'id': 745, 'synset': 'omelet.n.01', 'synonyms': ['omelet', 'omelette'], 'def': 'beaten eggs cooked until just set; may be folded around e.g. ham or cheese or jelly', 'name': 'omelet'}, {'frequency': 'f', 'id': 746, 'synset': 'onion.n.01', 'synonyms': ['onion'], 'def': 'the bulb of an onion plant', 'name': 'onion'}, {'frequency': 'f', 'id': 747, 'synset': 'orange.n.01', 'synonyms': ['orange_(fruit)'], 'def': 'orange (FRUIT of an orange tree)', 'name': 'orange_(fruit)'}, {'frequency': 'c', 'id': 748, 'synset': 'orange_juice.n.01', 'synonyms': ['orange_juice'], 'def': 'bottled or freshly squeezed juice of oranges', 'name': 'orange_juice'}, {'frequency': 'r', 'id': 749, 'synset': 'oregano.n.01', 'synonyms': ['oregano', 'marjoram'], 'def': 'aromatic Eurasian perennial herb used in cooking and baking', 'name': 'oregano'}, {'frequency': 'c', 'id': 750, 'synset': 'ostrich.n.02', 'synonyms': ['ostrich'], 'def': 'fast-running African flightless bird with two-toed feet; largest living bird', 'name': 'ostrich'}, {'frequency': 'c', 'id': 751, 'synset': 'ottoman.n.03', 'synonyms': ['ottoman', 'pouf', 'pouffe', 'hassock'], 'def': 'thick cushion used as a seat', 'name': 'ottoman'}, {'frequency': 'c', 'id': 752, 'synset': 'overall.n.01', 'synonyms': ['overalls_(clothing)'], 'def': 'work clothing consisting of denim trousers usually with a bib and shoulder straps', 'name': 'overalls_(clothing)'}, {'frequency': 'c', 'id': 753, 'synset': 'owl.n.01', 'synonyms': ['owl'], 'def': 'nocturnal bird of prey with hawk-like beak and claws and large head with front-facing eyes', 'name': 'owl'}, {'frequency': 'c', 'id': 754, 'synset': 'packet.n.03', 'synonyms': ['packet'], 'def': 'a small package or bundle', 'name': 'packet'}, {'frequency': 'r', 'id': 755, 'synset': 'pad.n.03', 'synonyms': ['inkpad', 'inking_pad', 'stamp_pad'], 'def': 'absorbent material saturated with ink used to transfer ink evenly to a rubber stamp', 'name': 'inkpad'}, {'frequency': 'c', 'id': 756, 'synset': 'pad.n.04', 'synonyms': ['pad'], 'def': 'a flat mass of soft material used for protection, stuffing, or comfort', 'name': 'pad'}, {'frequency': 'c', 'id': 757, 'synset': 'paddle.n.04', 'synonyms': ['paddle', 'boat_paddle'], 'def': 'a short light oar used without an oarlock to propel a canoe or small boat', 'name': 'paddle'}, {'frequency': 'c', 'id': 758, 'synset': 'padlock.n.01', 'synonyms': ['padlock'], 'def': 'a detachable, portable lock', 'name': 'padlock'}, {'frequency': 'r', 'id': 759, 'synset': 'paintbox.n.01', 'synonyms': ['paintbox'], 'def': "a box containing a collection of cubes or tubes of artists' paint", 'name': 'paintbox'}, {'frequency': 'c', 'id': 760, 'synset': 'paintbrush.n.01', 'synonyms': ['paintbrush'], 'def': 'a brush used as an applicator to apply paint', 'name': 'paintbrush'}, {'frequency': 'f', 'id': 761, 'synset': 'painting.n.01', 'synonyms': ['painting'], 'def': 'graphic art consisting of an artistic composition made by applying paints to a surface', 'name': 'painting'}, {'frequency': 'c', 'id': 762, 'synset': 'pajama.n.02', 'synonyms': ['pajamas', 'pyjamas'], 'def': 'loose-fitting nightclothes worn for sleeping or lounging', 'name': 'pajamas'}, {'frequency': 'c', 'id': 763, 'synset': 'palette.n.02', 'synonyms': ['palette', 'pallet'], 'def': 'board that provides a flat surface on which artists mix paints and the range of colors used', 'name': 'palette'}, {'frequency': 'f', 'id': 764, 'synset': 'pan.n.01', 'synonyms': ['pan_(for_cooking)', 'cooking_pan'], 'def': 'cooking utensil consisting of a wide metal vessel', 'name': 'pan_(for_cooking)'}, {'frequency': 'r', 'id': 765, 'synset': 'pan.n.03', 'synonyms': ['pan_(metal_container)'], 'def': 'shallow container made of metal', 'name': 'pan_(metal_container)'}, {'frequency': 'c', 'id': 766, 'synset': 'pancake.n.01', 'synonyms': ['pancake'], 'def': 'a flat cake of thin batter fried on both sides on a griddle', 'name': 'pancake'}, {'frequency': 'r', 'id': 767, 'synset': 'pantyhose.n.01', 'synonyms': ['pantyhose'], 'def': "a woman's tights consisting of underpants and stockings", 'name': 'pantyhose'}, {'frequency': 'r', 'id': 768, 'synset': 'papaya.n.02', 'synonyms': ['papaya'], 'def': 'large oval melon-like tropical fruit with yellowish flesh', 'name': 'papaya'}, {'frequency': 'r', 'id': 769, 'synset': 'paper_clip.n.01', 'synonyms': ['paperclip'], 'def': 'a wire or plastic clip for holding sheets of paper together', 'name': 'paperclip'}, {'frequency': 'f', 'id': 770, 'synset': 'paper_plate.n.01', 'synonyms': ['paper_plate'], 'def': 'a disposable plate made of cardboard', 'name': 'paper_plate'}, {'frequency': 'f', 'id': 771, 'synset': 'paper_towel.n.01', 'synonyms': ['paper_towel'], 'def': 'a disposable towel made of absorbent paper', 'name': 'paper_towel'}, {'frequency': 'r', 'id': 772, 'synset': 'paperback_book.n.01', 'synonyms': ['paperback_book', 'paper-back_book', 'softback_book', 'soft-cover_book'], 'def': 'a book with paper covers', 'name': 'paperback_book'}, {'frequency': 'r', 'id': 773, 'synset': 'paperweight.n.01', 'synonyms': ['paperweight'], 'def': 'a weight used to hold down a stack of papers', 'name': 'paperweight'}, {'frequency': 'c', 'id': 774, 'synset': 'parachute.n.01', 'synonyms': ['parachute'], 'def': 'rescue equipment consisting of a device that fills with air and retards your fall', 'name': 'parachute'}, {'frequency': 'r', 'id': 775, 'synset': 'parakeet.n.01', 'synonyms': ['parakeet', 'parrakeet', 'parroket', 'paraquet', 'paroquet', 'parroquet'], 'def': 'any of numerous small slender long-tailed parrots', 'name': 'parakeet'}, {'frequency': 'c', 'id': 776, 'synset': 'parasail.n.01', 'synonyms': ['parasail_(sports)'], 'def': 'parachute that will lift a person up into the air when it is towed by a motorboat or a car', 'name': 'parasail_(sports)'}, {'frequency': 'r', 'id': 777, 'synset': 'parchment.n.01', 'synonyms': ['parchment'], 'def': 'a superior paper resembling sheepskin', 'name': 'parchment'}, {'frequency': 'r', 'id': 778, 'synset': 'parka.n.01', 'synonyms': ['parka', 'anorak'], 'def': "a kind of heavy jacket (`windcheater' is a British term)", 'name': 'parka'}, {'frequency': 'f', 'id': 779, 'synset': 'parking_meter.n.01', 'synonyms': ['parking_meter'], 'def': 'a coin-operated timer located next to a parking space', 'name': 'parking_meter'}, {'frequency': 'c', 'id': 780, 'synset': 'parrot.n.01', 'synonyms': ['parrot'], 'def': 'usually brightly colored tropical birds with short hooked beaks and the ability to mimic sounds', 'name': 'parrot'}, {'frequency': 'c', 'id': 781, 'synset': 'passenger_car.n.01', 'synonyms': ['passenger_car_(part_of_a_train)', 'coach_(part_of_a_train)'], 'def': 'a railcar where passengers ride', 'name': 'passenger_car_(part_of_a_train)'}, {'frequency': 'r', 'id': 782, 'synset': 'passenger_ship.n.01', 'synonyms': ['passenger_ship'], 'def': 'a ship built to carry passengers', 'name': 'passenger_ship'}, {'frequency': 'r', 'id': 783, 'synset': 'passport.n.02', 'synonyms': ['passport'], 'def': 'a document issued by a country to a citizen allowing that person to travel abroad and re-enter the home country', 'name': 'passport'}, {'frequency': 'f', 'id': 784, 'synset': 'pastry.n.02', 'synonyms': ['pastry'], 'def': 'any of various baked foods made of dough or batter', 'name': 'pastry'}, {'frequency': 'r', 'id': 785, 'synset': 'patty.n.01', 'synonyms': ['patty_(food)'], 'def': 'small flat mass of chopped food', 'name': 'patty_(food)'}, {'frequency': 'c', 'id': 786, 'synset': 'pea.n.01', 'synonyms': ['pea_(food)'], 'def': 'seed of a pea plant used for food', 'name': 'pea_(food)'}, {'frequency': 'c', 'id': 787, 'synset': 'peach.n.03', 'synonyms': ['peach'], 'def': 'downy juicy fruit with sweet yellowish or whitish flesh', 'name': 'peach'}, {'frequency': 'c', 'id': 788, 'synset': 'peanut_butter.n.01', 'synonyms': ['peanut_butter'], 'def': 'a spread made from ground peanuts', 'name': 'peanut_butter'}, {'frequency': 'c', 'id': 789, 'synset': 'pear.n.01', 'synonyms': ['pear'], 'def': 'sweet juicy gritty-textured fruit available in many varieties', 'name': 'pear'}, {'frequency': 'r', 'id': 790, 'synset': 'peeler.n.03', 'synonyms': ['peeler_(tool_for_fruit_and_vegetables)'], 'def': 'a device for peeling vegetables or fruits', 'name': 'peeler_(tool_for_fruit_and_vegetables)'}, {'frequency': 'r', 'id': 791, 'synset': 'pegboard.n.01', 'synonyms': ['pegboard'], 'def': 'a board perforated with regularly spaced holes into which pegs can be fitted', 'name': 'pegboard'}, {'frequency': 'c', 'id': 792, 'synset': 'pelican.n.01', 'synonyms': ['pelican'], 'def': 'large long-winged warm-water seabird having a large bill with a distensible pouch for fish', 'name': 'pelican'}, {'frequency': 'f', 'id': 793, 'synset': 'pen.n.01', 'synonyms': ['pen'], 'def': 'a writing implement with a point from which ink flows', 'name': 'pen'}, {'frequency': 'c', 'id': 794, 'synset': 'pencil.n.01', 'synonyms': ['pencil'], 'def': 'a thin cylindrical pointed writing implement made of wood and graphite', 'name': 'pencil'}, {'frequency': 'r', 'id': 795, 'synset': 'pencil_box.n.01', 'synonyms': ['pencil_box', 'pencil_case'], 'def': 'a box for holding pencils', 'name': 'pencil_box'}, {'frequency': 'r', 'id': 796, 'synset': 'pencil_sharpener.n.01', 'synonyms': ['pencil_sharpener'], 'def': 'a rotary implement for sharpening the point on pencils', 'name': 'pencil_sharpener'}, {'frequency': 'r', 'id': 797, 'synset': 'pendulum.n.01', 'synonyms': ['pendulum'], 'def': 'an apparatus consisting of an object mounted so that it swings freely under the influence of gravity', 'name': 'pendulum'}, {'frequency': 'c', 'id': 798, 'synset': 'penguin.n.01', 'synonyms': ['penguin'], 'def': 'short-legged flightless birds of cold southern regions having webbed feet and wings modified as flippers', 'name': 'penguin'}, {'frequency': 'r', 'id': 799, 'synset': 'pennant.n.02', 'synonyms': ['pennant'], 'def': 'a flag longer than it is wide (and often tapering)', 'name': 'pennant'}, {'frequency': 'r', 'id': 800, 'synset': 'penny.n.02', 'synonyms': ['penny_(coin)'], 'def': 'a coin worth one-hundredth of the value of the basic unit', 'name': 'penny_(coin)'}, {'frequency': 'c', 'id': 801, 'synset': 'pepper.n.03', 'synonyms': ['pepper', 'peppercorn'], 'def': 'pungent seasoning from the berry of the common pepper plant; whole or ground', 'name': 'pepper'}, {'frequency': 'c', 'id': 802, 'synset': 'pepper_mill.n.01', 'synonyms': ['pepper_mill', 'pepper_grinder'], 'def': 'a mill for grinding pepper', 'name': 'pepper_mill'}, {'frequency': 'c', 'id': 803, 'synset': 'perfume.n.02', 'synonyms': ['perfume'], 'def': 'a toiletry that emits and diffuses a fragrant odor', 'name': 'perfume'}, {'frequency': 'r', 'id': 804, 'synset': 'persimmon.n.02', 'synonyms': ['persimmon'], 'def': 'orange fruit resembling a plum; edible when fully ripe', 'name': 'persimmon'}, {'frequency': 'f', 'id': 805, 'synset': 'person.n.01', 'synonyms': ['baby', 'child', 'boy', 'girl', 'man', 'woman', 'person', 'human'], 'def': 'a human being', 'name': 'baby'}, {'frequency': 'r', 'id': 806, 'synset': 'pet.n.01', 'synonyms': ['pet'], 'def': 'a domesticated animal kept for companionship or amusement', 'name': 'pet'}, {'frequency': 'r', 'id': 807, 'synset': 'petfood.n.01', 'synonyms': ['petfood', 'pet-food'], 'def': 'food prepared for animal pets', 'name': 'petfood'}, {'frequency': 'r', 'id': 808, 'synset': 'pew.n.01', 'synonyms': ['pew_(church_bench)', 'church_bench'], 'def': 'long bench with backs; used in church by the congregation', 'name': 'pew_(church_bench)'}, {'frequency': 'r', 'id': 809, 'synset': 'phonebook.n.01', 'synonyms': ['phonebook', 'telephone_book', 'telephone_directory'], 'def': 'a directory containing an alphabetical list of telephone subscribers and their telephone numbers', 'name': 'phonebook'}, {'frequency': 'c', 'id': 810, 'synset': 'phonograph_record.n.01', 'synonyms': ['phonograph_record', 'phonograph_recording', 'record_(phonograph_recording)'], 'def': 'sound recording consisting of a typically black disk with a continuous groove', 'name': 'phonograph_record'}, {'frequency': 'c', 'id': 811, 'synset': 'piano.n.01', 'synonyms': ['piano'], 'def': 'a keyboard instrument that is played by depressing keys that cause hammers to strike tuned strings and produce sounds', 'name': 'piano'}, {'frequency': 'f', 'id': 812, 'synset': 'pickle.n.01', 'synonyms': ['pickle'], 'def': 'vegetables (especially cucumbers) preserved in brine or vinegar', 'name': 'pickle'}, {'frequency': 'f', 'id': 813, 'synset': 'pickup.n.01', 'synonyms': ['pickup_truck'], 'def': 'a light truck with an open body and low sides and a tailboard', 'name': 'pickup_truck'}, {'frequency': 'c', 'id': 814, 'synset': 'pie.n.01', 'synonyms': ['pie'], 'def': 'dish baked in pastry-lined pan often with a pastry top', 'name': 'pie'}, {'frequency': 'c', 'id': 815, 'synset': 'pigeon.n.01', 'synonyms': ['pigeon'], 'def': 'wild and domesticated birds having a heavy body and short legs', 'name': 'pigeon'}, {'frequency': 'r', 'id': 816, 'synset': 'piggy_bank.n.01', 'synonyms': ['piggy_bank', 'penny_bank'], 'def': "a child's coin bank (often shaped like a pig)", 'name': 'piggy_bank'}, {'frequency': 'f', 'id': 817, 'synset': 'pillow.n.01', 'synonyms': ['pillow'], 'def': 'a cushion to support the head of a sleeping person', 'name': 'pillow'}, {'frequency': 'r', 'id': 818, 'synset': 'pin.n.09', 'synonyms': ['pin_(non_jewelry)'], 'def': 'a small slender (often pointed) piece of wood or metal used to support or fasten or attach things', 'name': 'pin_(non_jewelry)'}, {'frequency': 'f', 'id': 819, 'synset': 'pineapple.n.02', 'synonyms': ['pineapple'], 'def': 'large sweet fleshy tropical fruit with a tuft of stiff leaves', 'name': 'pineapple'}, {'frequency': 'c', 'id': 820, 'synset': 'pinecone.n.01', 'synonyms': ['pinecone'], 'def': 'the seed-producing cone of a pine tree', 'name': 'pinecone'}, {'frequency': 'r', 'id': 821, 'synset': 'ping-pong_ball.n.01', 'synonyms': ['ping-pong_ball'], 'def': 'light hollow ball used in playing table tennis', 'name': 'ping-pong_ball'}, {'frequency': 'r', 'id': 822, 'synset': 'pinwheel.n.03', 'synonyms': ['pinwheel'], 'def': 'a toy consisting of vanes of colored paper or plastic that is pinned to a stick and spins when it is pointed into the wind', 'name': 'pinwheel'}, {'frequency': 'r', 'id': 823, 'synset': 'pipe.n.01', 'synonyms': ['tobacco_pipe'], 'def': 'a tube with a small bowl at one end; used for smoking tobacco', 'name': 'tobacco_pipe'}, {'frequency': 'f', 'id': 824, 'synset': 'pipe.n.02', 'synonyms': ['pipe', 'piping'], 'def': 'a long tube made of metal or plastic that is used to carry water or oil or gas etc.', 'name': 'pipe'}, {'frequency': 'r', 'id': 825, 'synset': 'pistol.n.01', 'synonyms': ['pistol', 'handgun'], 'def': 'a firearm that is held and fired with one hand', 'name': 'pistol'}, {'frequency': 'r', 'id': 826, 'synset': 'pita.n.01', 'synonyms': ['pita_(bread)', 'pocket_bread'], 'def': 'usually small round bread that can open into a pocket for filling', 'name': 'pita_(bread)'}, {'frequency': 'f', 'id': 827, 'synset': 'pitcher.n.02', 'synonyms': ['pitcher_(vessel_for_liquid)', 'ewer'], 'def': 'an open vessel with a handle and a spout for pouring', 'name': 'pitcher_(vessel_for_liquid)'}, {'frequency': 'r', 'id': 828, 'synset': 'pitchfork.n.01', 'synonyms': ['pitchfork'], 'def': 'a long-handled hand tool with sharp widely spaced prongs for lifting and pitching hay', 'name': 'pitchfork'}, {'frequency': 'f', 'id': 829, 'synset': 'pizza.n.01', 'synonyms': ['pizza'], 'def': 'Italian open pie made of thin bread dough spread with a spiced mixture of e.g. tomato sauce and cheese', 'name': 'pizza'}, {'frequency': 'f', 'id': 830, 'synset': 'place_mat.n.01', 'synonyms': ['place_mat'], 'def': 'a mat placed on a table for an individual place setting', 'name': 'place_mat'}, {'frequency': 'f', 'id': 831, 'synset': 'plate.n.04', 'synonyms': ['plate'], 'def': 'dish on which food is served or from which food is eaten', 'name': 'plate'}, {'frequency': 'c', 'id': 832, 'synset': 'platter.n.01', 'synonyms': ['platter'], 'def': 'a large shallow dish used for serving food', 'name': 'platter'}, {'frequency': 'r', 'id': 833, 'synset': 'playing_card.n.01', 'synonyms': ['playing_card'], 'def': 'one of a pack of cards that are used to play card games', 'name': 'playing_card'}, {'frequency': 'r', 'id': 834, 'synset': 'playpen.n.01', 'synonyms': ['playpen'], 'def': 'a portable enclosure in which babies may be left to play', 'name': 'playpen'}, {'frequency': 'c', 'id': 835, 'synset': 'pliers.n.01', 'synonyms': ['pliers', 'plyers'], 'def': 'a gripping hand tool with two hinged arms and (usually) serrated jaws', 'name': 'pliers'}, {'frequency': 'r', 'id': 836, 'synset': 'plow.n.01', 'synonyms': ['plow_(farm_equipment)', 'plough_(farm_equipment)'], 'def': 'a farm tool having one or more heavy blades to break the soil and cut a furrow prior to sowing', 'name': 'plow_(farm_equipment)'}, {'frequency': 'r', 'id': 837, 'synset': 'pocket_watch.n.01', 'synonyms': ['pocket_watch'], 'def': 'a watch that is carried in a small watch pocket', 'name': 'pocket_watch'}, {'frequency': 'c', 'id': 838, 'synset': 'pocketknife.n.01', 'synonyms': ['pocketknife'], 'def': 'a knife with a blade that folds into the handle; suitable for carrying in the pocket', 'name': 'pocketknife'}, {'frequency': 'c', 'id': 839, 'synset': 'poker.n.01', 'synonyms': ['poker_(fire_stirring_tool)', 'stove_poker', 'fire_hook'], 'def': 'fire iron consisting of a metal rod with a handle; used to stir a fire', 'name': 'poker_(fire_stirring_tool)'}, {'frequency': 'f', 'id': 840, 'synset': 'pole.n.01', 'synonyms': ['pole', 'post'], 'def': 'a long (usually round) rod of wood or metal or plastic', 'name': 'pole'}, {'frequency': 'r', 'id': 841, 'synset': 'police_van.n.01', 'synonyms': ['police_van', 'police_wagon', 'paddy_wagon', 'patrol_wagon'], 'def': 'van used by police to transport prisoners', 'name': 'police_van'}, {'frequency': 'f', 'id': 842, 'synset': 'polo_shirt.n.01', 'synonyms': ['polo_shirt', 'sport_shirt'], 'def': 'a shirt with short sleeves designed for comfort and casual wear', 'name': 'polo_shirt'}, {'frequency': 'r', 'id': 843, 'synset': 'poncho.n.01', 'synonyms': ['poncho'], 'def': 'a blanket-like cloak with a hole in the center for the head', 'name': 'poncho'}, {'frequency': 'c', 'id': 844, 'synset': 'pony.n.05', 'synonyms': ['pony'], 'def': 'any of various breeds of small gentle horses usually less than five feet high at the shoulder', 'name': 'pony'}, {'frequency': 'r', 'id': 845, 'synset': 'pool_table.n.01', 'synonyms': ['pool_table', 'billiard_table', 'snooker_table'], 'def': 'game equipment consisting of a heavy table on which pool is played', 'name': 'pool_table'}, {'frequency': 'f', 'id': 846, 'synset': 'pop.n.02', 'synonyms': ['pop_(soda)', 'soda_(pop)', 'tonic', 'soft_drink'], 'def': 'a sweet drink containing carbonated water and flavoring', 'name': 'pop_(soda)'}, {'frequency': 'r', 'id': 847, 'synset': 'portrait.n.02', 'synonyms': ['portrait', 'portrayal'], 'def': 'any likeness of a person, in any medium', 'name': 'portrait'}, {'frequency': 'c', 'id': 848, 'synset': 'postbox.n.01', 'synonyms': ['postbox_(public)', 'mailbox_(public)'], 'def': 'public box for deposit of mail', 'name': 'postbox_(public)'}, {'frequency': 'c', 'id': 849, 'synset': 'postcard.n.01', 'synonyms': ['postcard', 'postal_card', 'mailing-card'], 'def': 'a card for sending messages by post without an envelope', 'name': 'postcard'}, {'frequency': 'f', 'id': 850, 'synset': 'poster.n.01', 'synonyms': ['poster', 'placard'], 'def': 'a sign posted in a public place as an advertisement', 'name': 'poster'}, {'frequency': 'f', 'id': 851, 'synset': 'pot.n.01', 'synonyms': ['pot'], 'def': 'metal or earthenware cooking vessel that is usually round and deep; often has a handle and lid', 'name': 'pot'}, {'frequency': 'f', 'id': 852, 'synset': 'pot.n.04', 'synonyms': ['flowerpot'], 'def': 'a container in which plants are cultivated', 'name': 'flowerpot'}, {'frequency': 'f', 'id': 853, 'synset': 'potato.n.01', 'synonyms': ['potato'], 'def': 'an edible tuber native to South America', 'name': 'potato'}, {'frequency': 'c', 'id': 854, 'synset': 'potholder.n.01', 'synonyms': ['potholder'], 'def': 'an insulated pad for holding hot pots', 'name': 'potholder'}, {'frequency': 'c', 'id': 855, 'synset': 'pottery.n.01', 'synonyms': ['pottery', 'clayware'], 'def': 'ceramic ware made from clay and baked in a kiln', 'name': 'pottery'}, {'frequency': 'c', 'id': 856, 'synset': 'pouch.n.01', 'synonyms': ['pouch'], 'def': 'a small or medium size container for holding or carrying things', 'name': 'pouch'}, {'frequency': 'r', 'id': 857, 'synset': 'power_shovel.n.01', 'synonyms': ['power_shovel', 'excavator', 'digger'], 'def': 'a machine for excavating', 'name': 'power_shovel'}, {'frequency': 'c', 'id': 858, 'synset': 'prawn.n.01', 'synonyms': ['prawn', 'shrimp'], 'def': 'any of various edible decapod crustaceans', 'name': 'prawn'}, {'frequency': 'f', 'id': 859, 'synset': 'printer.n.03', 'synonyms': ['printer', 'printing_machine'], 'def': 'a machine that prints', 'name': 'printer'}, {'frequency': 'c', 'id': 860, 'synset': 'projectile.n.01', 'synonyms': ['projectile_(weapon)', 'missile'], 'def': 'a weapon that is forcibly thrown or projected at a targets', 'name': 'projectile_(weapon)'}, {'frequency': 'c', 'id': 861, 'synset': 'projector.n.02', 'synonyms': ['projector'], 'def': 'an optical instrument that projects an enlarged image onto a screen', 'name': 'projector'}, {'frequency': 'f', 'id': 862, 'synset': 'propeller.n.01', 'synonyms': ['propeller', 'propellor'], 'def': 'a mechanical device that rotates to push against air or water', 'name': 'propeller'}, {'frequency': 'r', 'id': 863, 'synset': 'prune.n.01', 'synonyms': ['prune'], 'def': 'dried plum', 'name': 'prune'}, {'frequency': 'r', 'id': 864, 'synset': 'pudding.n.01', 'synonyms': ['pudding'], 'def': 'any of various soft thick unsweetened baked dishes', 'name': 'pudding'}, {'frequency': 'r', 'id': 865, 'synset': 'puffer.n.02', 'synonyms': ['puffer_(fish)', 'pufferfish', 'blowfish', 'globefish'], 'def': 'fishes whose elongated spiny body can inflate itself with water or air to form a globe', 'name': 'puffer_(fish)'}, {'frequency': 'r', 'id': 866, 'synset': 'puffin.n.01', 'synonyms': ['puffin'], 'def': 'seabirds having short necks and brightly colored compressed bills', 'name': 'puffin'}, {'frequency': 'r', 'id': 867, 'synset': 'pug.n.01', 'synonyms': ['pug-dog'], 'def': 'small compact smooth-coated breed of Asiatic origin having a tightly curled tail and broad flat wrinkled muzzle', 'name': 'pug-dog'}, {'frequency': 'c', 'id': 868, 'synset': 'pumpkin.n.02', 'synonyms': ['pumpkin'], 'def': 'usually large pulpy deep-yellow round fruit of the squash family maturing in late summer or early autumn', 'name': 'pumpkin'}, {'frequency': 'r', 'id': 869, 'synset': 'punch.n.03', 'synonyms': ['puncher'], 'def': 'a tool for making holes or indentations', 'name': 'puncher'}, {'frequency': 'r', 'id': 870, 'synset': 'puppet.n.01', 'synonyms': ['puppet', 'marionette'], 'def': 'a small figure of a person operated from above with strings by a puppeteer', 'name': 'puppet'}, {'frequency': 'r', 'id': 871, 'synset': 'puppy.n.01', 'synonyms': ['puppy'], 'def': 'a young dog', 'name': 'puppy'}, {'frequency': 'r', 'id': 872, 'synset': 'quesadilla.n.01', 'synonyms': ['quesadilla'], 'def': 'a tortilla that is filled with cheese and heated', 'name': 'quesadilla'}, {'frequency': 'r', 'id': 873, 'synset': 'quiche.n.02', 'synonyms': ['quiche'], 'def': 'a tart filled with rich unsweetened custard; often contains other ingredients (as cheese or ham or seafood or vegetables)', 'name': 'quiche'}, {'frequency': 'f', 'id': 874, 'synset': 'quilt.n.01', 'synonyms': ['quilt', 'comforter'], 'def': 'bedding made of two layers of cloth filled with stuffing and stitched together', 'name': 'quilt'}, {'frequency': 'c', 'id': 875, 'synset': 'rabbit.n.01', 'synonyms': ['rabbit'], 'def': 'any of various burrowing animals of the family Leporidae having long ears and short tails', 'name': 'rabbit'}, {'frequency': 'r', 'id': 876, 'synset': 'racer.n.02', 'synonyms': ['race_car', 'racing_car'], 'def': 'a fast car that competes in races', 'name': 'race_car'}, {'frequency': 'c', 'id': 877, 'synset': 'racket.n.04', 'synonyms': ['racket', 'racquet'], 'def': 'a sports implement used to strike a ball in various games', 'name': 'racket'}, {'frequency': 'r', 'id': 878, 'synset': 'radar.n.01', 'synonyms': ['radar'], 'def': 'measuring instrument in which the echo of a pulse of microwave radiation is used to detect and locate distant objects', 'name': 'radar'}, {'frequency': 'c', 'id': 879, 'synset': 'radiator.n.03', 'synonyms': ['radiator'], 'def': 'a mechanism consisting of a metal honeycomb through which hot fluids circulate', 'name': 'radiator'}, {'frequency': 'c', 'id': 880, 'synset': 'radio_receiver.n.01', 'synonyms': ['radio_receiver', 'radio_set', 'radio', 'tuner_(radio)'], 'def': 'an electronic receiver that detects and demodulates and amplifies transmitted radio signals', 'name': 'radio_receiver'}, {'frequency': 'c', 'id': 881, 'synset': 'radish.n.03', 'synonyms': ['radish', 'daikon'], 'def': 'pungent edible root of any of various cultivated radish plants', 'name': 'radish'}, {'frequency': 'c', 'id': 882, 'synset': 'raft.n.01', 'synonyms': ['raft'], 'def': 'a flat float (usually made of logs or planks) that can be used for transport or as a platform for swimmers', 'name': 'raft'}, {'frequency': 'r', 'id': 883, 'synset': 'rag_doll.n.01', 'synonyms': ['rag_doll'], 'def': 'a cloth doll that is stuffed and (usually) painted', 'name': 'rag_doll'}, {'frequency': 'c', 'id': 884, 'synset': 'raincoat.n.01', 'synonyms': ['raincoat', 'waterproof_jacket'], 'def': 'a water-resistant coat', 'name': 'raincoat'}, {'frequency': 'c', 'id': 885, 'synset': 'ram.n.05', 'synonyms': ['ram_(animal)'], 'def': 'uncastrated adult male sheep', 'name': 'ram_(animal)'}, {'frequency': 'c', 'id': 886, 'synset': 'raspberry.n.02', 'synonyms': ['raspberry'], 'def': 'red or black edible aggregate berries usually smaller than the related blackberries', 'name': 'raspberry'}, {'frequency': 'r', 'id': 887, 'synset': 'rat.n.01', 'synonyms': ['rat'], 'def': 'any of various long-tailed rodents similar to but larger than a mouse', 'name': 'rat'}, {'frequency': 'c', 'id': 888, 'synset': 'razorblade.n.01', 'synonyms': ['razorblade'], 'def': 'a blade that has very sharp edge', 'name': 'razorblade'}, {'frequency': 'c', 'id': 889, 'synset': 'reamer.n.01', 'synonyms': ['reamer_(juicer)', 'juicer', 'juice_reamer'], 'def': 'a squeezer with a conical ridged center that is used for squeezing juice from citrus fruit', 'name': 'reamer_(juicer)'}, {'frequency': 'f', 'id': 890, 'synset': 'rearview_mirror.n.01', 'synonyms': ['rearview_mirror'], 'def': 'car mirror that reflects the view out of the rear window', 'name': 'rearview_mirror'}, {'frequency': 'c', 'id': 891, 'synset': 'receipt.n.02', 'synonyms': ['receipt'], 'def': 'an acknowledgment (usually tangible) that payment has been made', 'name': 'receipt'}, {'frequency': 'c', 'id': 892, 'synset': 'recliner.n.01', 'synonyms': ['recliner', 'reclining_chair', 'lounger_(chair)'], 'def': 'an armchair whose back can be lowered and foot can be raised to allow the sitter to recline in it', 'name': 'recliner'}, {'frequency': 'r', 'id': 893, 'synset': 'record_player.n.01', 'synonyms': ['record_player', 'phonograph_(record_player)', 'turntable'], 'def': 'machine in which rotating records cause a stylus to vibrate and the vibrations are amplified acoustically or electronically', 'name': 'record_player'}, {'frequency': 'r', 'id': 894, 'synset': 'red_cabbage.n.02', 'synonyms': ['red_cabbage'], 'def': 'compact head of purplish-red leaves', 'name': 'red_cabbage'}, {'frequency': 'f', 'id': 895, 'synset': 'reflector.n.01', 'synonyms': ['reflector'], 'def': 'device that reflects light, radiation, etc.', 'name': 'reflector'}, {'frequency': 'f', 'id': 896, 'synset': 'remote_control.n.01', 'synonyms': ['remote_control'], 'def': 'a device that can be used to control a machine or apparatus from a distance', 'name': 'remote_control'}, {'frequency': 'c', 'id': 897, 'synset': 'rhinoceros.n.01', 'synonyms': ['rhinoceros'], 'def': 'massive powerful herbivorous odd-toed ungulate of southeast Asia and Africa having very thick skin and one or two horns on the snout', 'name': 'rhinoceros'}, {'frequency': 'r', 'id': 898, 'synset': 'rib.n.03', 'synonyms': ['rib_(food)'], 'def': 'cut of meat including one or more ribs', 'name': 'rib_(food)'}, {'frequency': 'r', 'id': 899, 'synset': 'rifle.n.01', 'synonyms': ['rifle'], 'def': 'a shoulder firearm with a long barrel', 'name': 'rifle'}, {'frequency': 'f', 'id': 900, 'synset': 'ring.n.08', 'synonyms': ['ring'], 'def': 'jewelry consisting of a circlet of precious metal (often set with jewels) worn on the finger', 'name': 'ring'}, {'frequency': 'r', 'id': 901, 'synset': 'river_boat.n.01', 'synonyms': ['river_boat'], 'def': 'a boat used on rivers or to ply a river', 'name': 'river_boat'}, {'frequency': 'r', 'id': 902, 'synset': 'road_map.n.02', 'synonyms': ['road_map'], 'def': '(NOT A ROAD) a MAP showing roads (for automobile travel)', 'name': 'road_map'}, {'frequency': 'c', 'id': 903, 'synset': 'robe.n.01', 'synonyms': ['robe'], 'def': 'any loose flowing garment', 'name': 'robe'}, {'frequency': 'c', 'id': 904, 'synset': 'rocking_chair.n.01', 'synonyms': ['rocking_chair'], 'def': 'a chair mounted on rockers', 'name': 'rocking_chair'}, {'frequency': 'r', 'id': 905, 'synset': 'roller_skate.n.01', 'synonyms': ['roller_skate'], 'def': 'a shoe with pairs of rollers (small hard wheels) fixed to the sole', 'name': 'roller_skate'}, {'frequency': 'r', 'id': 906, 'synset': 'rollerblade.n.01', 'synonyms': ['Rollerblade'], 'def': 'an in-line variant of a roller skate', 'name': 'Rollerblade'}, {'frequency': 'c', 'id': 907, 'synset': 'rolling_pin.n.01', 'synonyms': ['rolling_pin'], 'def': 'utensil consisting of a cylinder (usually of wood) with a handle at each end; used to roll out dough', 'name': 'rolling_pin'}, {'frequency': 'r', 'id': 908, 'synset': 'root_beer.n.01', 'synonyms': ['root_beer'], 'def': 'carbonated drink containing extracts of roots and herbs', 'name': 'root_beer'}, {'frequency': 'c', 'id': 909, 'synset': 'router.n.02', 'synonyms': ['router_(computer_equipment)'], 'def': 'a device that forwards data packets between computer networks', 'name': 'router_(computer_equipment)'}, {'frequency': 'f', 'id': 910, 'synset': 'rubber_band.n.01', 'synonyms': ['rubber_band', 'elastic_band'], 'def': 'a narrow band of elastic rubber used to hold things (such as papers) together', 'name': 'rubber_band'}, {'frequency': 'c', 'id': 911, 'synset': 'runner.n.08', 'synonyms': ['runner_(carpet)'], 'def': 'a long narrow carpet', 'name': 'runner_(carpet)'}, {'frequency': 'f', 'id': 912, 'synset': 'sack.n.01', 'synonyms': ['plastic_bag', 'paper_bag'], 'def': "a bag made of paper or plastic for holding customer's purchases", 'name': 'plastic_bag'}, {'frequency': 'f', 'id': 913, 'synset': 'saddle.n.01', 'synonyms': ['saddle_(on_an_animal)'], 'def': 'a seat for the rider of a horse or camel', 'name': 'saddle_(on_an_animal)'}, {'frequency': 'f', 'id': 914, 'synset': 'saddle_blanket.n.01', 'synonyms': ['saddle_blanket', 'saddlecloth', 'horse_blanket'], 'def': 'stable gear consisting of a blanket placed under the saddle', 'name': 'saddle_blanket'}, {'frequency': 'c', 'id': 915, 'synset': 'saddlebag.n.01', 'synonyms': ['saddlebag'], 'def': 'a large bag (or pair of bags) hung over a saddle', 'name': 'saddlebag'}, {'frequency': 'r', 'id': 916, 'synset': 'safety_pin.n.01', 'synonyms': ['safety_pin'], 'def': 'a pin in the form of a clasp; has a guard so the point of the pin will not stick the user', 'name': 'safety_pin'}, {'frequency': 'c', 'id': 917, 'synset': 'sail.n.01', 'synonyms': ['sail'], 'def': 'a large piece of fabric by means of which wind is used to propel a sailing vessel', 'name': 'sail'}, {'frequency': 'c', 'id': 918, 'synset': 'salad.n.01', 'synonyms': ['salad'], 'def': 'food mixtures either arranged on a plate or tossed and served with a moist dressing; usually consisting of or including greens', 'name': 'salad'}, {'frequency': 'r', 'id': 919, 'synset': 'salad_plate.n.01', 'synonyms': ['salad_plate', 'salad_bowl'], 'def': 'a plate or bowl for individual servings of salad', 'name': 'salad_plate'}, {'frequency': 'r', 'id': 920, 'synset': 'salami.n.01', 'synonyms': ['salami'], 'def': 'highly seasoned fatty sausage of pork and beef usually dried', 'name': 'salami'}, {'frequency': 'r', 'id': 921, 'synset': 'salmon.n.01', 'synonyms': ['salmon_(fish)'], 'def': 'any of various large food and game fishes of northern waters', 'name': 'salmon_(fish)'}, {'frequency': 'r', 'id': 922, 'synset': 'salmon.n.03', 'synonyms': ['salmon_(food)'], 'def': 'flesh of any of various marine or freshwater fish of the family Salmonidae', 'name': 'salmon_(food)'}, {'frequency': 'r', 'id': 923, 'synset': 'salsa.n.01', 'synonyms': ['salsa'], 'def': 'spicy sauce of tomatoes and onions and chili peppers to accompany Mexican foods', 'name': 'salsa'}, {'frequency': 'f', 'id': 924, 'synset': 'saltshaker.n.01', 'synonyms': ['saltshaker'], 'def': 'a shaker with a perforated top for sprinkling salt', 'name': 'saltshaker'}, {'frequency': 'f', 'id': 925, 'synset': 'sandal.n.01', 'synonyms': ['sandal_(type_of_shoe)'], 'def': 'a shoe consisting of a sole fastened by straps to the foot', 'name': 'sandal_(type_of_shoe)'}, {'frequency': 'f', 'id': 926, 'synset': 'sandwich.n.01', 'synonyms': ['sandwich'], 'def': 'two (or more) slices of bread with a filling between them', 'name': 'sandwich'}, {'frequency': 'r', 'id': 927, 'synset': 'satchel.n.01', 'synonyms': ['satchel'], 'def': 'luggage consisting of a small case with a flat bottom and (usually) a shoulder strap', 'name': 'satchel'}, {'frequency': 'r', 'id': 928, 'synset': 'saucepan.n.01', 'synonyms': ['saucepan'], 'def': 'a deep pan with a handle; used for stewing or boiling', 'name': 'saucepan'}, {'frequency': 'f', 'id': 929, 'synset': 'saucer.n.02', 'synonyms': ['saucer'], 'def': 'a small shallow dish for holding a cup at the table', 'name': 'saucer'}, {'frequency': 'f', 'id': 930, 'synset': 'sausage.n.01', 'synonyms': ['sausage'], 'def': 'highly seasoned minced meat stuffed in casings', 'name': 'sausage'}, {'frequency': 'r', 'id': 931, 'synset': 'sawhorse.n.01', 'synonyms': ['sawhorse', 'sawbuck'], 'def': 'a framework for holding wood that is being sawed', 'name': 'sawhorse'}, {'frequency': 'r', 'id': 932, 'synset': 'sax.n.02', 'synonyms': ['saxophone'], 'def': "a wind instrument with a `J'-shaped form typically made of brass", 'name': 'saxophone'}, {'frequency': 'f', 'id': 933, 'synset': 'scale.n.07', 'synonyms': ['scale_(measuring_instrument)'], 'def': 'a measuring instrument for weighing; shows amount of mass', 'name': 'scale_(measuring_instrument)'}, {'frequency': 'r', 'id': 934, 'synset': 'scarecrow.n.01', 'synonyms': ['scarecrow', 'strawman'], 'def': 'an effigy in the shape of a man to frighten birds away from seeds', 'name': 'scarecrow'}, {'frequency': 'f', 'id': 935, 'synset': 'scarf.n.01', 'synonyms': ['scarf'], 'def': 'a garment worn around the head or neck or shoulders for warmth or decoration', 'name': 'scarf'}, {'frequency': 'c', 'id': 936, 'synset': 'school_bus.n.01', 'synonyms': ['school_bus'], 'def': 'a bus used to transport children to or from school', 'name': 'school_bus'}, {'frequency': 'f', 'id': 937, 'synset': 'scissors.n.01', 'synonyms': ['scissors'], 'def': 'a tool having two crossed pivoting blades with looped handles', 'name': 'scissors'}, {'frequency': 'c', 'id': 938, 'synset': 'scoreboard.n.01', 'synonyms': ['scoreboard'], 'def': 'a large board for displaying the score of a contest (and some other information)', 'name': 'scoreboard'}, {'frequency': 'c', 'id': 939, 'synset': 'scrambled_eggs.n.01', 'synonyms': ['scrambled_eggs'], 'def': 'eggs beaten and cooked to a soft firm consistency while stirring', 'name': 'scrambled_eggs'}, {'frequency': 'r', 'id': 940, 'synset': 'scraper.n.01', 'synonyms': ['scraper'], 'def': 'any of various hand tools for scraping', 'name': 'scraper'}, {'frequency': 'r', 'id': 941, 'synset': 'scratcher.n.03', 'synonyms': ['scratcher'], 'def': 'a device used for scratching', 'name': 'scratcher'}, {'frequency': 'c', 'id': 942, 'synset': 'screwdriver.n.01', 'synonyms': ['screwdriver'], 'def': 'a hand tool for driving screws; has a tip that fits into the head of a screw', 'name': 'screwdriver'}, {'frequency': 'c', 'id': 943, 'synset': 'scrub_brush.n.01', 'synonyms': ['scrubbing_brush'], 'def': 'a brush with short stiff bristles for heavy cleaning', 'name': 'scrubbing_brush'}, {'frequency': 'c', 'id': 944, 'synset': 'sculpture.n.01', 'synonyms': ['sculpture'], 'def': 'a three-dimensional work of art', 'name': 'sculpture'}, {'frequency': 'r', 'id': 945, 'synset': 'seabird.n.01', 'synonyms': ['seabird', 'seafowl'], 'def': 'a bird that frequents coastal waters and the open ocean: gulls; pelicans; gannets; cormorants; albatrosses; petrels; etc.', 'name': 'seabird'}, {'frequency': 'r', 'id': 946, 'synset': 'seahorse.n.02', 'synonyms': ['seahorse'], 'def': 'small fish with horse-like heads bent sharply downward and curled tails', 'name': 'seahorse'}, {'frequency': 'r', 'id': 947, 'synset': 'seaplane.n.01', 'synonyms': ['seaplane', 'hydroplane'], 'def': 'an airplane that can land on or take off from water', 'name': 'seaplane'}, {'frequency': 'c', 'id': 948, 'synset': 'seashell.n.01', 'synonyms': ['seashell'], 'def': 'the shell of a marine organism', 'name': 'seashell'}, {'frequency': 'r', 'id': 949, 'synset': 'seedling.n.01', 'synonyms': ['seedling'], 'def': 'young plant or tree grown from a seed', 'name': 'seedling'}, {'frequency': 'c', 'id': 950, 'synset': 'serving_dish.n.01', 'synonyms': ['serving_dish'], 'def': 'a dish used for serving food', 'name': 'serving_dish'}, {'frequency': 'r', 'id': 951, 'synset': 'sewing_machine.n.01', 'synonyms': ['sewing_machine'], 'def': 'a textile machine used as a home appliance for sewing', 'name': 'sewing_machine'}, {'frequency': 'r', 'id': 952, 'synset': 'shaker.n.03', 'synonyms': ['shaker'], 'def': 'a container in which something can be shaken', 'name': 'shaker'}, {'frequency': 'c', 'id': 953, 'synset': 'shampoo.n.01', 'synonyms': ['shampoo'], 'def': 'cleansing agent consisting of soaps or detergents used for washing the hair', 'name': 'shampoo'}, {'frequency': 'r', 'id': 954, 'synset': 'shark.n.01', 'synonyms': ['shark'], 'def': 'typically large carnivorous fishes with sharpe teeth', 'name': 'shark'}, {'frequency': 'r', 'id': 955, 'synset': 'sharpener.n.01', 'synonyms': ['sharpener'], 'def': 'any implement that is used to make something (an edge or a point) sharper', 'name': 'sharpener'}, {'frequency': 'r', 'id': 956, 'synset': 'sharpie.n.03', 'synonyms': ['Sharpie'], 'def': 'a pen with indelible ink that will write on any surface', 'name': 'Sharpie'}, {'frequency': 'r', 'id': 957, 'synset': 'shaver.n.03', 'synonyms': ['shaver_(electric)', 'electric_shaver', 'electric_razor'], 'def': 'a razor powered by an electric motor', 'name': 'shaver_(electric)'}, {'frequency': 'c', 'id': 958, 'synset': 'shaving_cream.n.01', 'synonyms': ['shaving_cream', 'shaving_soap'], 'def': 'toiletry consisting that forms a rich lather for softening the beard before shaving', 'name': 'shaving_cream'}, {'frequency': 'r', 'id': 959, 'synset': 'shawl.n.01', 'synonyms': ['shawl'], 'def': 'cloak consisting of an oblong piece of cloth used to cover the head and shoulders', 'name': 'shawl'}, {'frequency': 'r', 'id': 960, 'synset': 'shears.n.01', 'synonyms': ['shears'], 'def': 'large scissors with strong blades', 'name': 'shears'}, {'frequency': 'f', 'id': 961, 'synset': 'sheep.n.01', 'synonyms': ['sheep'], 'def': 'woolly usually horned ruminant mammal related to the goat', 'name': 'sheep'}, {'frequency': 'r', 'id': 962, 'synset': 'shepherd_dog.n.01', 'synonyms': ['shepherd_dog', 'sheepdog'], 'def': 'any of various usually long-haired breeds of dog reared to herd and guard sheep', 'name': 'shepherd_dog'}, {'frequency': 'r', 'id': 963, 'synset': 'sherbert.n.01', 'synonyms': ['sherbert', 'sherbet'], 'def': 'a frozen dessert made primarily of fruit juice and sugar', 'name': 'sherbert'}, {'frequency': 'r', 'id': 964, 'synset': 'shield.n.02', 'synonyms': ['shield'], 'def': 'armor carried on the arm to intercept blows', 'name': 'shield'}, {'frequency': 'f', 'id': 965, 'synset': 'shirt.n.01', 'synonyms': ['shirt'], 'def': 'a garment worn on the upper half of the body', 'name': 'shirt'}, {'frequency': 'f', 'id': 966, 'synset': 'shoe.n.01', 'synonyms': ['shoe', 'sneaker_(type_of_shoe)', 'tennis_shoe'], 'def': 'common footwear covering the foot', 'name': 'shoe'}, {'frequency': 'c', 'id': 967, 'synset': 'shopping_bag.n.01', 'synonyms': ['shopping_bag'], 'def': 'a bag made of plastic or strong paper (often with handles); used to transport goods after shopping', 'name': 'shopping_bag'}, {'frequency': 'c', 'id': 968, 'synset': 'shopping_cart.n.01', 'synonyms': ['shopping_cart'], 'def': 'a handcart that holds groceries or other goods while shopping', 'name': 'shopping_cart'}, {'frequency': 'f', 'id': 969, 'synset': 'short_pants.n.01', 'synonyms': ['short_pants', 'shorts_(clothing)', 'trunks_(clothing)'], 'def': 'trousers that end at or above the knee', 'name': 'short_pants'}, {'frequency': 'r', 'id': 970, 'synset': 'shot_glass.n.01', 'synonyms': ['shot_glass'], 'def': 'a small glass adequate to hold a single swallow of whiskey', 'name': 'shot_glass'}, {'frequency': 'c', 'id': 971, 'synset': 'shoulder_bag.n.01', 'synonyms': ['shoulder_bag'], 'def': 'a large handbag that can be carried by a strap looped over the shoulder', 'name': 'shoulder_bag'}, {'frequency': 'c', 'id': 972, 'synset': 'shovel.n.01', 'synonyms': ['shovel'], 'def': 'a hand tool for lifting loose material such as snow, dirt, etc.', 'name': 'shovel'}, {'frequency': 'f', 'id': 973, 'synset': 'shower.n.01', 'synonyms': ['shower_head'], 'def': 'a plumbing fixture that sprays water over you', 'name': 'shower_head'}, {'frequency': 'f', 'id': 974, 'synset': 'shower_curtain.n.01', 'synonyms': ['shower_curtain'], 'def': 'a curtain that keeps water from splashing out of the shower area', 'name': 'shower_curtain'}, {'frequency': 'r', 'id': 975, 'synset': 'shredder.n.01', 'synonyms': ['shredder_(for_paper)'], 'def': 'a device that shreds documents', 'name': 'shredder_(for_paper)'}, {'frequency': 'r', 'id': 976, 'synset': 'sieve.n.01', 'synonyms': ['sieve', 'screen_(sieve)'], 'def': 'a strainer for separating lumps from powdered material or grading particles', 'name': 'sieve'}, {'frequency': 'f', 'id': 977, 'synset': 'signboard.n.01', 'synonyms': ['signboard'], 'def': 'structure displaying a board on which advertisements can be posted', 'name': 'signboard'}, {'frequency': 'c', 'id': 978, 'synset': 'silo.n.01', 'synonyms': ['silo'], 'def': 'a cylindrical tower used for storing goods', 'name': 'silo'}, {'frequency': 'f', 'id': 979, 'synset': 'sink.n.01', 'synonyms': ['sink'], 'def': 'plumbing fixture consisting of a water basin fixed to a wall or floor and having a drainpipe', 'name': 'sink'}, {'frequency': 'f', 'id': 980, 'synset': 'skateboard.n.01', 'synonyms': ['skateboard'], 'def': 'a board with wheels that is ridden in a standing or crouching position and propelled by foot', 'name': 'skateboard'}, {'frequency': 'c', 'id': 981, 'synset': 'skewer.n.01', 'synonyms': ['skewer'], 'def': 'a long pin for holding meat in position while it is being roasted', 'name': 'skewer'}, {'frequency': 'f', 'id': 982, 'synset': 'ski.n.01', 'synonyms': ['ski'], 'def': 'sports equipment for skiing on snow', 'name': 'ski'}, {'frequency': 'f', 'id': 983, 'synset': 'ski_boot.n.01', 'synonyms': ['ski_boot'], 'def': 'a stiff boot that is fastened to a ski with a ski binding', 'name': 'ski_boot'}, {'frequency': 'f', 'id': 984, 'synset': 'ski_parka.n.01', 'synonyms': ['ski_parka', 'ski_jacket'], 'def': 'a parka to be worn while skiing', 'name': 'ski_parka'}, {'frequency': 'f', 'id': 985, 'synset': 'ski_pole.n.01', 'synonyms': ['ski_pole'], 'def': 'a pole with metal points used as an aid in skiing', 'name': 'ski_pole'}, {'frequency': 'f', 'id': 986, 'synset': 'skirt.n.02', 'synonyms': ['skirt'], 'def': 'a garment hanging from the waist; worn mainly by girls and women', 'name': 'skirt'}, {'frequency': 'c', 'id': 987, 'synset': 'sled.n.01', 'synonyms': ['sled', 'sledge', 'sleigh'], 'def': 'a vehicle or flat object for transportation over snow by sliding or pulled by dogs, etc.', 'name': 'sled'}, {'frequency': 'c', 'id': 988, 'synset': 'sleeping_bag.n.01', 'synonyms': ['sleeping_bag'], 'def': 'large padded bag designed to be slept in outdoors', 'name': 'sleeping_bag'}, {'frequency': 'r', 'id': 989, 'synset': 'sling.n.05', 'synonyms': ['sling_(bandage)', 'triangular_bandage'], 'def': 'bandage to support an injured forearm; slung over the shoulder or neck', 'name': 'sling_(bandage)'}, {'frequency': 'c', 'id': 990, 'synset': 'slipper.n.01', 'synonyms': ['slipper_(footwear)', 'carpet_slipper_(footwear)'], 'def': 'low footwear that can be slipped on and off easily; usually worn indoors', 'name': 'slipper_(footwear)'}, {'frequency': 'r', 'id': 991, 'synset': 'smoothie.n.02', 'synonyms': ['smoothie'], 'def': 'a thick smooth drink consisting of fresh fruit pureed with ice cream or yoghurt or milk', 'name': 'smoothie'}, {'frequency': 'r', 'id': 992, 'synset': 'snake.n.01', 'synonyms': ['snake', 'serpent'], 'def': 'limbless scaly elongate reptile; some are venomous', 'name': 'snake'}, {'frequency': 'f', 'id': 993, 'synset': 'snowboard.n.01', 'synonyms': ['snowboard'], 'def': 'a board that resembles a broad ski or a small surfboard; used in a standing position to slide down snow-covered slopes', 'name': 'snowboard'}, {'frequency': 'c', 'id': 994, 'synset': 'snowman.n.01', 'synonyms': ['snowman'], 'def': 'a figure of a person made of packed snow', 'name': 'snowman'}, {'frequency': 'c', 'id': 995, 'synset': 'snowmobile.n.01', 'synonyms': ['snowmobile'], 'def': 'tracked vehicle for travel on snow having skis in front', 'name': 'snowmobile'}, {'frequency': 'f', 'id': 996, 'synset': 'soap.n.01', 'synonyms': ['soap'], 'def': 'a cleansing agent made from the salts of vegetable or animal fats', 'name': 'soap'}, {'frequency': 'f', 'id': 997, 'synset': 'soccer_ball.n.01', 'synonyms': ['soccer_ball'], 'def': "an inflated ball used in playing soccer (called `football' outside of the United States)", 'name': 'soccer_ball'}, {'frequency': 'f', 'id': 998, 'synset': 'sock.n.01', 'synonyms': ['sock'], 'def': 'cloth covering for the foot; worn inside the shoe; reaches to between the ankle and the knee', 'name': 'sock'}, {'frequency': 'r', 'id': 999, 'synset': 'soda_fountain.n.02', 'synonyms': ['soda_fountain'], 'def': 'an apparatus for dispensing soda water', 'name': 'soda_fountain'}, {'frequency': 'r', 'id': 1000, 'synset': 'soda_water.n.01', 'synonyms': ['carbonated_water', 'club_soda', 'seltzer', 'sparkling_water'], 'def': 'effervescent beverage artificially charged with carbon dioxide', 'name': 'carbonated_water'}, {'frequency': 'f', 'id': 1001, 'synset': 'sofa.n.01', 'synonyms': ['sofa', 'couch', 'lounge'], 'def': 'an upholstered seat for more than one person', 'name': 'sofa'}, {'frequency': 'r', 'id': 1002, 'synset': 'softball.n.01', 'synonyms': ['softball'], 'def': 'ball used in playing softball', 'name': 'softball'}, {'frequency': 'c', 'id': 1003, 'synset': 'solar_array.n.01', 'synonyms': ['solar_array', 'solar_battery', 'solar_panel'], 'def': 'electrical device consisting of a large array of connected solar cells', 'name': 'solar_array'}, {'frequency': 'r', 'id': 1004, 'synset': 'sombrero.n.02', 'synonyms': ['sombrero'], 'def': 'a straw hat with a tall crown and broad brim; worn in American southwest and in Mexico', 'name': 'sombrero'}, {'frequency': 'c', 'id': 1005, 'synset': 'soup.n.01', 'synonyms': ['soup'], 'def': 'liquid food especially of meat or fish or vegetable stock often containing pieces of solid food', 'name': 'soup'}, {'frequency': 'r', 'id': 1006, 'synset': 'soup_bowl.n.01', 'synonyms': ['soup_bowl'], 'def': 'a bowl for serving soup', 'name': 'soup_bowl'}, {'frequency': 'c', 'id': 1007, 'synset': 'soupspoon.n.01', 'synonyms': ['soupspoon'], 'def': 'a spoon with a rounded bowl for eating soup', 'name': 'soupspoon'}, {'frequency': 'c', 'id': 1008, 'synset': 'sour_cream.n.01', 'synonyms': ['sour_cream', 'soured_cream'], 'def': 'soured light cream', 'name': 'sour_cream'}, {'frequency': 'r', 'id': 1009, 'synset': 'soya_milk.n.01', 'synonyms': ['soya_milk', 'soybean_milk', 'soymilk'], 'def': 'a milk substitute containing soybean flour and water; used in some infant formulas and in making tofu', 'name': 'soya_milk'}, {'frequency': 'r', 'id': 1010, 'synset': 'space_shuttle.n.01', 'synonyms': ['space_shuttle'], 'def': "a reusable spacecraft with wings for a controlled descent through the Earth's atmosphere", 'name': 'space_shuttle'}, {'frequency': 'r', 'id': 1011, 'synset': 'sparkler.n.02', 'synonyms': ['sparkler_(fireworks)'], 'def': 'a firework that burns slowly and throws out a shower of sparks', 'name': 'sparkler_(fireworks)'}, {'frequency': 'f', 'id': 1012, 'synset': 'spatula.n.02', 'synonyms': ['spatula'], 'def': 'a hand tool with a thin flexible blade used to mix or spread soft substances', 'name': 'spatula'}, {'frequency': 'r', 'id': 1013, 'synset': 'spear.n.01', 'synonyms': ['spear', 'lance'], 'def': 'a long pointed rod used as a tool or weapon', 'name': 'spear'}, {'frequency': 'f', 'id': 1014, 'synset': 'spectacles.n.01', 'synonyms': ['spectacles', 'specs', 'eyeglasses', 'glasses'], 'def': 'optical instrument consisting of a frame that holds a pair of lenses for correcting defective vision', 'name': 'spectacles'}, {'frequency': 'c', 'id': 1015, 'synset': 'spice_rack.n.01', 'synonyms': ['spice_rack'], 'def': 'a rack for displaying containers filled with spices', 'name': 'spice_rack'}, {'frequency': 'r', 'id': 1016, 'synset': 'spider.n.01', 'synonyms': ['spider'], 'def': 'predatory arachnid with eight legs, two poison fangs, two feelers, and usually two silk-spinning organs at the back end of the body', 'name': 'spider'}, {'frequency': 'c', 'id': 1017, 'synset': 'sponge.n.01', 'synonyms': ['sponge'], 'def': 'a porous mass usable to absorb water typically used for cleaning', 'name': 'sponge'}, {'frequency': 'f', 'id': 1018, 'synset': 'spoon.n.01', 'synonyms': ['spoon'], 'def': 'a piece of cutlery with a shallow bowl-shaped container and a handle', 'name': 'spoon'}, {'frequency': 'c', 'id': 1019, 'synset': 'sportswear.n.01', 'synonyms': ['sportswear', 'athletic_wear', 'activewear'], 'def': 'attire worn for sport or for casual wear', 'name': 'sportswear'}, {'frequency': 'c', 'id': 1020, 'synset': 'spotlight.n.02', 'synonyms': ['spotlight'], 'def': 'a lamp that produces a strong beam of light to illuminate a restricted area; used to focus attention of a stage performer', 'name': 'spotlight'}, {'frequency': 'r', 'id': 1021, 'synset': 'squirrel.n.01', 'synonyms': ['squirrel'], 'def': 'a kind of arboreal rodent having a long bushy tail', 'name': 'squirrel'}, {'frequency': 'c', 'id': 1022, 'synset': 'stapler.n.01', 'synonyms': ['stapler_(stapling_machine)'], 'def': 'a machine that inserts staples into sheets of paper in order to fasten them together', 'name': 'stapler_(stapling_machine)'}, {'frequency': 'r', 'id': 1023, 'synset': 'starfish.n.01', 'synonyms': ['starfish', 'sea_star'], 'def': 'echinoderms characterized by five arms extending from a central disk', 'name': 'starfish'}, {'frequency': 'f', 'id': 1024, 'synset': 'statue.n.01', 'synonyms': ['statue_(sculpture)'], 'def': 'a sculpture representing a human or animal', 'name': 'statue_(sculpture)'}, {'frequency': 'c', 'id': 1025, 'synset': 'steak.n.01', 'synonyms': ['steak_(food)'], 'def': 'a slice of meat cut from the fleshy part of an animal or large fish', 'name': 'steak_(food)'}, {'frequency': 'r', 'id': 1026, 'synset': 'steak_knife.n.01', 'synonyms': ['steak_knife'], 'def': 'a sharp table knife used in eating steak', 'name': 'steak_knife'}, {'frequency': 'r', 'id': 1027, 'synset': 'steamer.n.02', 'synonyms': ['steamer_(kitchen_appliance)'], 'def': 'a cooking utensil that can be used to cook food by steaming it', 'name': 'steamer_(kitchen_appliance)'}, {'frequency': 'f', 'id': 1028, 'synset': 'steering_wheel.n.01', 'synonyms': ['steering_wheel'], 'def': 'a handwheel that is used for steering', 'name': 'steering_wheel'}, {'frequency': 'r', 'id': 1029, 'synset': 'stencil.n.01', 'synonyms': ['stencil'], 'def': 'a sheet of material (metal, plastic, etc.) that has been perforated with a pattern; ink or paint can pass through the perforations to create the printed pattern on the surface below', 'name': 'stencil'}, {'frequency': 'r', 'id': 1030, 'synset': 'step_ladder.n.01', 'synonyms': ['stepladder'], 'def': 'a folding portable ladder hinged at the top', 'name': 'stepladder'}, {'frequency': 'c', 'id': 1031, 'synset': 'step_stool.n.01', 'synonyms': ['step_stool'], 'def': 'a stool that has one or two steps that fold under the seat', 'name': 'step_stool'}, {'frequency': 'c', 'id': 1032, 'synset': 'stereo.n.01', 'synonyms': ['stereo_(sound_system)'], 'def': 'electronic device for playing audio', 'name': 'stereo_(sound_system)'}, {'frequency': 'r', 'id': 1033, 'synset': 'stew.n.02', 'synonyms': ['stew'], 'def': 'food prepared by stewing especially meat or fish with vegetables', 'name': 'stew'}, {'frequency': 'r', 'id': 1034, 'synset': 'stirrer.n.02', 'synonyms': ['stirrer'], 'def': 'an implement used for stirring', 'name': 'stirrer'}, {'frequency': 'f', 'id': 1035, 'synset': 'stirrup.n.01', 'synonyms': ['stirrup'], 'def': "support consisting of metal loops into which rider's feet go", 'name': 'stirrup'}, {'frequency': 'c', 'id': 1036, 'synset': 'stocking.n.01', 'synonyms': ['stockings_(leg_wear)'], 'def': 'close-fitting hosiery to cover the foot and leg; come in matched pairs', 'name': 'stockings_(leg_wear)'}, {'frequency': 'f', 'id': 1037, 'synset': 'stool.n.01', 'synonyms': ['stool'], 'def': 'a simple seat without a back or arms', 'name': 'stool'}, {'frequency': 'f', 'id': 1038, 'synset': 'stop_sign.n.01', 'synonyms': ['stop_sign'], 'def': 'a traffic sign to notify drivers that they must come to a complete stop', 'name': 'stop_sign'}, {'frequency': 'f', 'id': 1039, 'synset': 'stoplight.n.01', 'synonyms': ['brake_light'], 'def': 'a red light on the rear of a motor vehicle that signals when the brakes are applied', 'name': 'brake_light'}, {'frequency': 'f', 'id': 1040, 'synset': 'stove.n.01', 'synonyms': ['stove', 'kitchen_stove', 'range_(kitchen_appliance)', 'kitchen_range', 'cooking_stove'], 'def': 'a kitchen appliance used for cooking food', 'name': 'stove'}, {'frequency': 'c', 'id': 1041, 'synset': 'strainer.n.01', 'synonyms': ['strainer'], 'def': 'a filter to retain larger pieces while smaller pieces and liquids pass through', 'name': 'strainer'}, {'frequency': 'f', 'id': 1042, 'synset': 'strap.n.01', 'synonyms': ['strap'], 'def': 'an elongated strip of material for binding things together or holding', 'name': 'strap'}, {'frequency': 'f', 'id': 1043, 'synset': 'straw.n.04', 'synonyms': ['straw_(for_drinking)', 'drinking_straw'], 'def': 'a thin paper or plastic tube used to suck liquids into the mouth', 'name': 'straw_(for_drinking)'}, {'frequency': 'f', 'id': 1044, 'synset': 'strawberry.n.01', 'synonyms': ['strawberry'], 'def': 'sweet fleshy red fruit', 'name': 'strawberry'}, {'frequency': 'f', 'id': 1045, 'synset': 'street_sign.n.01', 'synonyms': ['street_sign'], 'def': 'a sign visible from the street', 'name': 'street_sign'}, {'frequency': 'f', 'id': 1046, 'synset': 'streetlight.n.01', 'synonyms': ['streetlight', 'street_lamp'], 'def': 'a lamp supported on a lamppost; for illuminating a street', 'name': 'streetlight'}, {'frequency': 'r', 'id': 1047, 'synset': 'string_cheese.n.01', 'synonyms': ['string_cheese'], 'def': 'cheese formed in long strings twisted together', 'name': 'string_cheese'}, {'frequency': 'r', 'id': 1048, 'synset': 'stylus.n.02', 'synonyms': ['stylus'], 'def': 'a pointed tool for writing or drawing or engraving', 'name': 'stylus'}, {'frequency': 'r', 'id': 1049, 'synset': 'subwoofer.n.01', 'synonyms': ['subwoofer'], 'def': 'a loudspeaker that is designed to reproduce very low bass frequencies', 'name': 'subwoofer'}, {'frequency': 'r', 'id': 1050, 'synset': 'sugar_bowl.n.01', 'synonyms': ['sugar_bowl'], 'def': 'a dish in which sugar is served', 'name': 'sugar_bowl'}, {'frequency': 'r', 'id': 1051, 'synset': 'sugarcane.n.01', 'synonyms': ['sugarcane_(plant)'], 'def': 'juicy canes whose sap is a source of molasses and commercial sugar; fresh canes are sometimes chewed for the juice', 'name': 'sugarcane_(plant)'}, {'frequency': 'c', 'id': 1052, 'synset': 'suit.n.01', 'synonyms': ['suit_(clothing)'], 'def': 'a set of garments (usually including a jacket and trousers or skirt) for outerwear all of the same fabric and color', 'name': 'suit_(clothing)'}, {'frequency': 'c', 'id': 1053, 'synset': 'sunflower.n.01', 'synonyms': ['sunflower'], 'def': 'any plant of the genus Helianthus having large flower heads with dark disk florets and showy yellow rays', 'name': 'sunflower'}, {'frequency': 'f', 'id': 1054, 'synset': 'sunglasses.n.01', 'synonyms': ['sunglasses'], 'def': 'spectacles that are darkened or polarized to protect the eyes from the glare of the sun', 'name': 'sunglasses'}, {'frequency': 'c', 'id': 1055, 'synset': 'sunhat.n.01', 'synonyms': ['sunhat'], 'def': 'a hat with a broad brim that protects the face from direct exposure to the sun', 'name': 'sunhat'}, {'frequency': 'r', 'id': 1056, 'synset': 'sunscreen.n.01', 'synonyms': ['sunscreen', 'sunblock'], 'def': 'a cream spread on the skin; contains a chemical to filter out ultraviolet light and so protect from sunburn', 'name': 'sunscreen'}, {'frequency': 'f', 'id': 1057, 'synset': 'surfboard.n.01', 'synonyms': ['surfboard'], 'def': 'a narrow buoyant board for riding surf', 'name': 'surfboard'}, {'frequency': 'c', 'id': 1058, 'synset': 'sushi.n.01', 'synonyms': ['sushi'], 'def': 'rice (with raw fish) wrapped in seaweed', 'name': 'sushi'}, {'frequency': 'c', 'id': 1059, 'synset': 'swab.n.02', 'synonyms': ['mop'], 'def': 'cleaning implement consisting of absorbent material fastened to a handle; for cleaning floors', 'name': 'mop'}, {'frequency': 'c', 'id': 1060, 'synset': 'sweat_pants.n.01', 'synonyms': ['sweat_pants'], 'def': 'loose-fitting trousers with elastic cuffs; worn by athletes', 'name': 'sweat_pants'}, {'frequency': 'c', 'id': 1061, 'synset': 'sweatband.n.02', 'synonyms': ['sweatband'], 'def': 'a band of material tied around the forehead or wrist to absorb sweat', 'name': 'sweatband'}, {'frequency': 'f', 'id': 1062, 'synset': 'sweater.n.01', 'synonyms': ['sweater'], 'def': 'a crocheted or knitted garment covering the upper part of the body', 'name': 'sweater'}, {'frequency': 'f', 'id': 1063, 'synset': 'sweatshirt.n.01', 'synonyms': ['sweatshirt'], 'def': 'cotton knit pullover with long sleeves worn during athletic activity', 'name': 'sweatshirt'}, {'frequency': 'c', 'id': 1064, 'synset': 'sweet_potato.n.02', 'synonyms': ['sweet_potato'], 'def': 'the edible tuberous root of the sweet potato vine', 'name': 'sweet_potato'}, {'frequency': 'f', 'id': 1065, 'synset': 'swimsuit.n.01', 'synonyms': ['swimsuit', 'swimwear', 'bathing_suit', 'swimming_costume', 'bathing_costume', 'swimming_trunks', 'bathing_trunks'], 'def': 'garment worn for swimming', 'name': 'swimsuit'}, {'frequency': 'c', 'id': 1066, 'synset': 'sword.n.01', 'synonyms': ['sword'], 'def': 'a cutting or thrusting weapon that has a long metal blade', 'name': 'sword'}, {'frequency': 'r', 'id': 1067, 'synset': 'syringe.n.01', 'synonyms': ['syringe'], 'def': 'a medical instrument used to inject or withdraw fluids', 'name': 'syringe'}, {'frequency': 'r', 'id': 1068, 'synset': 'tabasco.n.02', 'synonyms': ['Tabasco_sauce'], 'def': 'very spicy sauce (trade name Tabasco) made from fully-aged red peppers', 'name': 'Tabasco_sauce'}, {'frequency': 'r', 'id': 1069, 'synset': 'table-tennis_table.n.01', 'synonyms': ['table-tennis_table', 'ping-pong_table'], 'def': 'a table used for playing table tennis', 'name': 'table-tennis_table'}, {'frequency': 'f', 'id': 1070, 'synset': 'table.n.02', 'synonyms': ['table'], 'def': 'a piece of furniture having a smooth flat top that is usually supported by one or more vertical legs', 'name': 'table'}, {'frequency': 'c', 'id': 1071, 'synset': 'table_lamp.n.01', 'synonyms': ['table_lamp'], 'def': 'a lamp that sits on a table', 'name': 'table_lamp'}, {'frequency': 'f', 'id': 1072, 'synset': 'tablecloth.n.01', 'synonyms': ['tablecloth'], 'def': 'a covering spread over a dining table', 'name': 'tablecloth'}, {'frequency': 'r', 'id': 1073, 'synset': 'tachometer.n.01', 'synonyms': ['tachometer'], 'def': 'measuring instrument for indicating speed of rotation', 'name': 'tachometer'}, {'frequency': 'r', 'id': 1074, 'synset': 'taco.n.02', 'synonyms': ['taco'], 'def': 'a small tortilla cupped around a filling', 'name': 'taco'}, {'frequency': 'f', 'id': 1075, 'synset': 'tag.n.02', 'synonyms': ['tag'], 'def': 'a label associated with something for the purpose of identification or information', 'name': 'tag'}, {'frequency': 'f', 'id': 1076, 'synset': 'taillight.n.01', 'synonyms': ['taillight', 'rear_light'], 'def': 'lamp (usually red) mounted at the rear of a motor vehicle', 'name': 'taillight'}, {'frequency': 'r', 'id': 1077, 'synset': 'tambourine.n.01', 'synonyms': ['tambourine'], 'def': 'a shallow drum with a single drumhead and with metallic disks in the sides', 'name': 'tambourine'}, {'frequency': 'r', 'id': 1078, 'synset': 'tank.n.01', 'synonyms': ['army_tank', 'armored_combat_vehicle', 'armoured_combat_vehicle'], 'def': 'an enclosed armored military vehicle; has a cannon and moves on caterpillar treads', 'name': 'army_tank'}, {'frequency': 'c', 'id': 1079, 'synset': 'tank.n.02', 'synonyms': ['tank_(storage_vessel)', 'storage_tank'], 'def': 'a large (usually metallic) vessel for holding gases or liquids', 'name': 'tank_(storage_vessel)'}, {'frequency': 'f', 'id': 1080, 'synset': 'tank_top.n.01', 'synonyms': ['tank_top_(clothing)'], 'def': 'a tight-fitting sleeveless shirt with wide shoulder straps and low neck and no front opening', 'name': 'tank_top_(clothing)'}, {'frequency': 'c', 'id': 1081, 'synset': 'tape.n.01', 'synonyms': ['tape_(sticky_cloth_or_paper)'], 'def': 'a long thin piece of cloth or paper as used for binding or fastening', 'name': 'tape_(sticky_cloth_or_paper)'}, {'frequency': 'c', 'id': 1082, 'synset': 'tape.n.04', 'synonyms': ['tape_measure', 'measuring_tape'], 'def': 'measuring instrument consisting of a narrow strip (cloth or metal) marked in inches or centimeters and used for measuring lengths', 'name': 'tape_measure'}, {'frequency': 'c', 'id': 1083, 'synset': 'tapestry.n.02', 'synonyms': ['tapestry'], 'def': 'a heavy textile with a woven design; used for curtains and upholstery', 'name': 'tapestry'}, {'frequency': 'f', 'id': 1084, 'synset': 'tarpaulin.n.01', 'synonyms': ['tarp'], 'def': 'waterproofed canvas', 'name': 'tarp'}, {'frequency': 'c', 'id': 1085, 'synset': 'tartan.n.01', 'synonyms': ['tartan', 'plaid'], 'def': 'a cloth having a crisscross design', 'name': 'tartan'}, {'frequency': 'c', 'id': 1086, 'synset': 'tassel.n.01', 'synonyms': ['tassel'], 'def': 'adornment consisting of a bunch of cords fastened at one end', 'name': 'tassel'}, {'frequency': 'r', 'id': 1087, 'synset': 'tea_bag.n.01', 'synonyms': ['tea_bag'], 'def': 'a measured amount of tea in a bag for an individual serving of tea', 'name': 'tea_bag'}, {'frequency': 'c', 'id': 1088, 'synset': 'teacup.n.02', 'synonyms': ['teacup'], 'def': 'a cup from which tea is drunk', 'name': 'teacup'}, {'frequency': 'c', 'id': 1089, 'synset': 'teakettle.n.01', 'synonyms': ['teakettle'], 'def': 'kettle for boiling water to make tea', 'name': 'teakettle'}, {'frequency': 'c', 'id': 1090, 'synset': 'teapot.n.01', 'synonyms': ['teapot'], 'def': 'pot for brewing tea; usually has a spout and handle', 'name': 'teapot'}, {'frequency': 'f', 'id': 1091, 'synset': 'teddy.n.01', 'synonyms': ['teddy_bear'], 'def': "plaything consisting of a child's toy bear (usually plush and stuffed with soft materials)", 'name': 'teddy_bear'}, {'frequency': 'f', 'id': 1092, 'synset': 'telephone.n.01', 'synonyms': ['telephone', 'phone', 'telephone_set'], 'def': 'electronic device for communicating by voice over long distances', 'name': 'telephone'}, {'frequency': 'c', 'id': 1093, 'synset': 'telephone_booth.n.01', 'synonyms': ['telephone_booth', 'phone_booth', 'call_box', 'telephone_box', 'telephone_kiosk'], 'def': 'booth for using a telephone', 'name': 'telephone_booth'}, {'frequency': 'f', 'id': 1094, 'synset': 'telephone_pole.n.01', 'synonyms': ['telephone_pole', 'telegraph_pole', 'telegraph_post'], 'def': 'tall pole supporting telephone wires', 'name': 'telephone_pole'}, {'frequency': 'r', 'id': 1095, 'synset': 'telephoto_lens.n.01', 'synonyms': ['telephoto_lens', 'zoom_lens'], 'def': 'a camera lens that magnifies the image', 'name': 'telephoto_lens'}, {'frequency': 'c', 'id': 1096, 'synset': 'television_camera.n.01', 'synonyms': ['television_camera', 'tv_camera'], 'def': 'television equipment for capturing and recording video', 'name': 'television_camera'}, {'frequency': 'f', 'id': 1097, 'synset': 'television_receiver.n.01', 'synonyms': ['television_set', 'tv', 'tv_set'], 'def': 'an electronic device that receives television signals and displays them on a screen', 'name': 'television_set'}, {'frequency': 'f', 'id': 1098, 'synset': 'tennis_ball.n.01', 'synonyms': ['tennis_ball'], 'def': 'ball about the size of a fist used in playing tennis', 'name': 'tennis_ball'}, {'frequency': 'f', 'id': 1099, 'synset': 'tennis_racket.n.01', 'synonyms': ['tennis_racket'], 'def': 'a racket used to play tennis', 'name': 'tennis_racket'}, {'frequency': 'r', 'id': 1100, 'synset': 'tequila.n.01', 'synonyms': ['tequila'], 'def': 'Mexican liquor made from fermented juices of an agave plant', 'name': 'tequila'}, {'frequency': 'c', 'id': 1101, 'synset': 'thermometer.n.01', 'synonyms': ['thermometer'], 'def': 'measuring instrument for measuring temperature', 'name': 'thermometer'}, {'frequency': 'c', 'id': 1102, 'synset': 'thermos.n.01', 'synonyms': ['thermos_bottle'], 'def': 'vacuum flask that preserves temperature of hot or cold drinks', 'name': 'thermos_bottle'}, {'frequency': 'c', 'id': 1103, 'synset': 'thermostat.n.01', 'synonyms': ['thermostat'], 'def': 'a regulator for automatically regulating temperature by starting or stopping the supply of heat', 'name': 'thermostat'}, {'frequency': 'r', 'id': 1104, 'synset': 'thimble.n.02', 'synonyms': ['thimble'], 'def': 'a small metal cap to protect the finger while sewing; can be used as a small container', 'name': 'thimble'}, {'frequency': 'c', 'id': 1105, 'synset': 'thread.n.01', 'synonyms': ['thread', 'yarn'], 'def': 'a fine cord of twisted fibers (of cotton or silk or wool or nylon etc.) used in sewing and weaving', 'name': 'thread'}, {'frequency': 'c', 'id': 1106, 'synset': 'thumbtack.n.01', 'synonyms': ['thumbtack', 'drawing_pin', 'pushpin'], 'def': 'a tack for attaching papers to a bulletin board or drawing board', 'name': 'thumbtack'}, {'frequency': 'c', 'id': 1107, 'synset': 'tiara.n.01', 'synonyms': ['tiara'], 'def': 'a jeweled headdress worn by women on formal occasions', 'name': 'tiara'}, {'frequency': 'c', 'id': 1108, 'synset': 'tiger.n.02', 'synonyms': ['tiger'], 'def': 'large feline of forests in most of Asia having a tawny coat with black stripes', 'name': 'tiger'}, {'frequency': 'c', 'id': 1109, 'synset': 'tights.n.01', 'synonyms': ['tights_(clothing)', 'leotards'], 'def': 'skintight knit hose covering the body from the waist to the feet worn by acrobats and dancers and as stockings by women and girls', 'name': 'tights_(clothing)'}, {'frequency': 'c', 'id': 1110, 'synset': 'timer.n.01', 'synonyms': ['timer', 'stopwatch'], 'def': 'a timepiece that measures a time interval and signals its end', 'name': 'timer'}, {'frequency': 'f', 'id': 1111, 'synset': 'tinfoil.n.01', 'synonyms': ['tinfoil'], 'def': 'foil made of tin or an alloy of tin and lead', 'name': 'tinfoil'}, {'frequency': 'r', 'id': 1112, 'synset': 'tinsel.n.01', 'synonyms': ['tinsel'], 'def': 'a showy decoration that is basically valueless', 'name': 'tinsel'}, {'frequency': 'f', 'id': 1113, 'synset': 'tissue.n.02', 'synonyms': ['tissue_paper'], 'def': 'a soft thin (usually translucent) paper', 'name': 'tissue_paper'}, {'frequency': 'c', 'id': 1114, 'synset': 'toast.n.01', 'synonyms': ['toast_(food)'], 'def': 'slice of bread that has been toasted', 'name': 'toast_(food)'}, {'frequency': 'f', 'id': 1115, 'synset': 'toaster.n.02', 'synonyms': ['toaster'], 'def': 'a kitchen appliance (usually electric) for toasting bread', 'name': 'toaster'}, {'frequency': 'c', 'id': 1116, 'synset': 'toaster_oven.n.01', 'synonyms': ['toaster_oven'], 'def': 'kitchen appliance consisting of a small electric oven for toasting or warming food', 'name': 'toaster_oven'}, {'frequency': 'f', 'id': 1117, 'synset': 'toilet.n.02', 'synonyms': ['toilet'], 'def': 'a plumbing fixture for defecation and urination', 'name': 'toilet'}, {'frequency': 'f', 'id': 1118, 'synset': 'toilet_tissue.n.01', 'synonyms': ['toilet_tissue', 'toilet_paper', 'bathroom_tissue'], 'def': 'a soft thin absorbent paper for use in toilets', 'name': 'toilet_tissue'}, {'frequency': 'f', 'id': 1119, 'synset': 'tomato.n.01', 'synonyms': ['tomato'], 'def': 'mildly acid red or yellow pulpy fruit eaten as a vegetable', 'name': 'tomato'}, {'frequency': 'c', 'id': 1120, 'synset': 'tongs.n.01', 'synonyms': ['tongs'], 'def': 'any of various devices for taking hold of objects; usually have two hinged legs with handles above and pointed hooks below', 'name': 'tongs'}, {'frequency': 'c', 'id': 1121, 'synset': 'toolbox.n.01', 'synonyms': ['toolbox'], 'def': 'a box or chest or cabinet for holding hand tools', 'name': 'toolbox'}, {'frequency': 'f', 'id': 1122, 'synset': 'toothbrush.n.01', 'synonyms': ['toothbrush'], 'def': 'small brush; has long handle; used to clean teeth', 'name': 'toothbrush'}, {'frequency': 'f', 'id': 1123, 'synset': 'toothpaste.n.01', 'synonyms': ['toothpaste'], 'def': 'a dentifrice in the form of a paste', 'name': 'toothpaste'}, {'frequency': 'c', 'id': 1124, 'synset': 'toothpick.n.01', 'synonyms': ['toothpick'], 'def': 'pick consisting of a small strip of wood or plastic; used to pick food from between the teeth', 'name': 'toothpick'}, {'frequency': 'c', 'id': 1125, 'synset': 'top.n.09', 'synonyms': ['cover'], 'def': 'covering for a hole (especially a hole in the top of a container)', 'name': 'cover'}, {'frequency': 'c', 'id': 1126, 'synset': 'tortilla.n.01', 'synonyms': ['tortilla'], 'def': 'thin unleavened pancake made from cornmeal or wheat flour', 'name': 'tortilla'}, {'frequency': 'c', 'id': 1127, 'synset': 'tow_truck.n.01', 'synonyms': ['tow_truck'], 'def': 'a truck equipped to hoist and pull wrecked cars (or to remove cars from no-parking zones)', 'name': 'tow_truck'}, {'frequency': 'f', 'id': 1128, 'synset': 'towel.n.01', 'synonyms': ['towel'], 'def': 'a rectangular piece of absorbent cloth (or paper) for drying or wiping', 'name': 'towel'}, {'frequency': 'f', 'id': 1129, 'synset': 'towel_rack.n.01', 'synonyms': ['towel_rack', 'towel_rail', 'towel_bar'], 'def': 'a rack consisting of one or more bars on which towels can be hung', 'name': 'towel_rack'}, {'frequency': 'f', 'id': 1130, 'synset': 'toy.n.03', 'synonyms': ['toy'], 'def': 'a device regarded as providing amusement', 'name': 'toy'}, {'frequency': 'c', 'id': 1131, 'synset': 'tractor.n.01', 'synonyms': ['tractor_(farm_equipment)'], 'def': 'a wheeled vehicle with large wheels; used in farming and other applications', 'name': 'tractor_(farm_equipment)'}, {'frequency': 'f', 'id': 1132, 'synset': 'traffic_light.n.01', 'synonyms': ['traffic_light'], 'def': 'a device to control vehicle traffic often consisting of three or more lights', 'name': 'traffic_light'}, {'frequency': 'r', 'id': 1133, 'synset': 'trail_bike.n.01', 'synonyms': ['dirt_bike'], 'def': 'a lightweight motorcycle equipped with rugged tires and suspension for off-road use', 'name': 'dirt_bike'}, {'frequency': 'c', 'id': 1134, 'synset': 'trailer_truck.n.01', 'synonyms': ['trailer_truck', 'tractor_trailer', 'trucking_rig', 'articulated_lorry', 'semi_truck'], 'def': 'a truck consisting of a tractor and trailer together', 'name': 'trailer_truck'}, {'frequency': 'f', 'id': 1135, 'synset': 'train.n.01', 'synonyms': ['train_(railroad_vehicle)', 'railroad_train'], 'def': 'public or private transport provided by a line of railway cars coupled together and drawn by a locomotive', 'name': 'train_(railroad_vehicle)'}, {'frequency': 'r', 'id': 1136, 'synset': 'trampoline.n.01', 'synonyms': ['trampoline'], 'def': 'gymnastic apparatus consisting of a strong canvas sheet attached with springs to a metal frame', 'name': 'trampoline'}, {'frequency': 'f', 'id': 1137, 'synset': 'tray.n.01', 'synonyms': ['tray'], 'def': 'an open receptacle for holding or displaying or serving articles or food', 'name': 'tray'}, {'frequency': 'r', 'id': 1138, 'synset': 'tree_house.n.01', 'synonyms': ['tree_house'], 'def': '(NOT A TREE) a PLAYHOUSE built in the branches of a tree', 'name': 'tree_house'}, {'frequency': 'r', 'id': 1139, 'synset': 'trench_coat.n.01', 'synonyms': ['trench_coat'], 'def': 'a military style raincoat; belted with deep pockets', 'name': 'trench_coat'}, {'frequency': 'r', 'id': 1140, 'synset': 'triangle.n.05', 'synonyms': ['triangle_(musical_instrument)'], 'def': 'a percussion instrument consisting of a metal bar bent in the shape of an open triangle', 'name': 'triangle_(musical_instrument)'}, {'frequency': 'r', 'id': 1141, 'synset': 'tricycle.n.01', 'synonyms': ['tricycle'], 'def': 'a vehicle with three wheels that is moved by foot pedals', 'name': 'tricycle'}, {'frequency': 'c', 'id': 1142, 'synset': 'tripod.n.01', 'synonyms': ['tripod'], 'def': 'a three-legged rack used for support', 'name': 'tripod'}, {'frequency': 'f', 'id': 1143, 'synset': 'trouser.n.01', 'synonyms': ['trousers', 'pants_(clothing)'], 'def': 'a garment extending from the waist to the knee or ankle, covering each leg separately', 'name': 'trousers'}, {'frequency': 'f', 'id': 1144, 'synset': 'truck.n.01', 'synonyms': ['truck'], 'def': 'an automotive vehicle suitable for hauling', 'name': 'truck'}, {'frequency': 'r', 'id': 1145, 'synset': 'truffle.n.03', 'synonyms': ['truffle_(chocolate)', 'chocolate_truffle'], 'def': 'creamy chocolate candy', 'name': 'truffle_(chocolate)'}, {'frequency': 'c', 'id': 1146, 'synset': 'trunk.n.02', 'synonyms': ['trunk'], 'def': 'luggage consisting of a large strong case used when traveling or for storage', 'name': 'trunk'}, {'frequency': 'r', 'id': 1147, 'synset': 'tub.n.02', 'synonyms': ['vat'], 'def': 'a large open vessel for holding or storing liquids', 'name': 'vat'}, {'frequency': 'c', 'id': 1148, 'synset': 'turban.n.01', 'synonyms': ['turban'], 'def': 'a traditional headdress consisting of a long scarf wrapped around the head', 'name': 'turban'}, {'frequency': 'r', 'id': 1149, 'synset': 'turkey.n.01', 'synonyms': ['turkey_(bird)'], 'def': 'large gallinaceous bird with fan-shaped tail; widely domesticated for food', 'name': 'turkey_(bird)'}, {'frequency': 'c', 'id': 1150, 'synset': 'turkey.n.04', 'synonyms': ['turkey_(food)'], 'def': 'flesh of large domesticated fowl usually roasted', 'name': 'turkey_(food)'}, {'frequency': 'r', 'id': 1151, 'synset': 'turnip.n.01', 'synonyms': ['turnip'], 'def': 'widely cultivated plant having a large fleshy edible white or yellow root', 'name': 'turnip'}, {'frequency': 'c', 'id': 1152, 'synset': 'turtle.n.02', 'synonyms': ['turtle'], 'def': 'any of various aquatic and land reptiles having a bony shell and flipper-like limbs for swimming', 'name': 'turtle'}, {'frequency': 'r', 'id': 1153, 'synset': 'turtleneck.n.01', 'synonyms': ['turtleneck_(clothing)', 'polo-neck'], 'def': 'a sweater or jersey with a high close-fitting collar', 'name': 'turtleneck_(clothing)'}, {'frequency': 'r', 'id': 1154, 'synset': 'typewriter.n.01', 'synonyms': ['typewriter'], 'def': 'hand-operated character printer for printing written messages one character at a time', 'name': 'typewriter'}, {'frequency': 'f', 'id': 1155, 'synset': 'umbrella.n.01', 'synonyms': ['umbrella'], 'def': 'a lightweight handheld collapsible canopy', 'name': 'umbrella'}, {'frequency': 'c', 'id': 1156, 'synset': 'underwear.n.01', 'synonyms': ['underwear', 'underclothes', 'underclothing', 'underpants'], 'def': 'undergarment worn next to the skin and under the outer garments', 'name': 'underwear'}, {'frequency': 'r', 'id': 1157, 'synset': 'unicycle.n.01', 'synonyms': ['unicycle'], 'def': 'a vehicle with a single wheel that is driven by pedals', 'name': 'unicycle'}, {'frequency': 'c', 'id': 1158, 'synset': 'urinal.n.01', 'synonyms': ['urinal'], 'def': 'a plumbing fixture (usually attached to the wall) used by men to urinate', 'name': 'urinal'}, {'frequency': 'r', 'id': 1159, 'synset': 'urn.n.01', 'synonyms': ['urn'], 'def': 'a large vase that usually has a pedestal or feet', 'name': 'urn'}, {'frequency': 'c', 'id': 1160, 'synset': 'vacuum.n.04', 'synonyms': ['vacuum_cleaner'], 'def': 'an electrical home appliance that cleans by suction', 'name': 'vacuum_cleaner'}, {'frequency': 'c', 'id': 1161, 'synset': 'valve.n.03', 'synonyms': ['valve'], 'def': 'control consisting of a mechanical device for controlling the flow of a fluid', 'name': 'valve'}, {'frequency': 'f', 'id': 1162, 'synset': 'vase.n.01', 'synonyms': ['vase'], 'def': 'an open jar of glass or porcelain used as an ornament or to hold flowers', 'name': 'vase'}, {'frequency': 'c', 'id': 1163, 'synset': 'vending_machine.n.01', 'synonyms': ['vending_machine'], 'def': 'a slot machine for selling goods', 'name': 'vending_machine'}, {'frequency': 'f', 'id': 1164, 'synset': 'vent.n.01', 'synonyms': ['vent', 'blowhole', 'air_vent'], 'def': 'a hole for the escape of gas or air', 'name': 'vent'}, {'frequency': 'c', 'id': 1165, 'synset': 'videotape.n.01', 'synonyms': ['videotape'], 'def': 'a video recording made on magnetic tape', 'name': 'videotape'}, {'frequency': 'r', 'id': 1166, 'synset': 'vinegar.n.01', 'synonyms': ['vinegar'], 'def': 'sour-tasting liquid produced usually by oxidation of the alcohol in wine or cider and used as a condiment or food preservative', 'name': 'vinegar'}, {'frequency': 'r', 'id': 1167, 'synset': 'violin.n.01', 'synonyms': ['violin', 'fiddle'], 'def': 'bowed stringed instrument that is the highest member of the violin family', 'name': 'violin'}, {'frequency': 'r', 'id': 1168, 'synset': 'vodka.n.01', 'synonyms': ['vodka'], 'def': 'unaged colorless liquor originating in Russia', 'name': 'vodka'}, {'frequency': 'r', 'id': 1169, 'synset': 'volleyball.n.02', 'synonyms': ['volleyball'], 'def': 'an inflated ball used in playing volleyball', 'name': 'volleyball'}, {'frequency': 'r', 'id': 1170, 'synset': 'vulture.n.01', 'synonyms': ['vulture'], 'def': 'any of various large birds of prey having naked heads and weak claws and feeding chiefly on carrion', 'name': 'vulture'}, {'frequency': 'c', 'id': 1171, 'synset': 'waffle.n.01', 'synonyms': ['waffle'], 'def': 'pancake batter baked in a waffle iron', 'name': 'waffle'}, {'frequency': 'r', 'id': 1172, 'synset': 'waffle_iron.n.01', 'synonyms': ['waffle_iron'], 'def': 'a kitchen appliance for baking waffles', 'name': 'waffle_iron'}, {'frequency': 'c', 'id': 1173, 'synset': 'wagon.n.01', 'synonyms': ['wagon'], 'def': 'any of various kinds of wheeled vehicles drawn by an animal or a tractor', 'name': 'wagon'}, {'frequency': 'c', 'id': 1174, 'synset': 'wagon_wheel.n.01', 'synonyms': ['wagon_wheel'], 'def': 'a wheel of a wagon', 'name': 'wagon_wheel'}, {'frequency': 'c', 'id': 1175, 'synset': 'walking_stick.n.01', 'synonyms': ['walking_stick'], 'def': 'a stick carried in the hand for support in walking', 'name': 'walking_stick'}, {'frequency': 'c', 'id': 1176, 'synset': 'wall_clock.n.01', 'synonyms': ['wall_clock'], 'def': 'a clock mounted on a wall', 'name': 'wall_clock'}, {'frequency': 'f', 'id': 1177, 'synset': 'wall_socket.n.01', 'synonyms': ['wall_socket', 'wall_plug', 'electric_outlet', 'electrical_outlet', 'outlet', 'electric_receptacle'], 'def': 'receptacle providing a place in a wiring system where current can be taken to run electrical devices', 'name': 'wall_socket'}, {'frequency': 'c', 'id': 1178, 'synset': 'wallet.n.01', 'synonyms': ['wallet', 'billfold'], 'def': 'a pocket-size case for holding papers and paper money', 'name': 'wallet'}, {'frequency': 'r', 'id': 1179, 'synset': 'walrus.n.01', 'synonyms': ['walrus'], 'def': 'either of two large northern marine mammals having ivory tusks and tough hide over thick blubber', 'name': 'walrus'}, {'frequency': 'r', 'id': 1180, 'synset': 'wardrobe.n.01', 'synonyms': ['wardrobe'], 'def': 'a tall piece of furniture that provides storage space for clothes; has a door and rails or hooks for hanging clothes', 'name': 'wardrobe'}, {'frequency': 'r', 'id': 1181, 'synset': 'wasabi.n.02', 'synonyms': ['wasabi'], 'def': 'the thick green root of the wasabi plant that the Japanese use in cooking and that tastes like strong horseradish', 'name': 'wasabi'}, {'frequency': 'c', 'id': 1182, 'synset': 'washer.n.03', 'synonyms': ['automatic_washer', 'washing_machine'], 'def': 'a home appliance for washing clothes and linens automatically', 'name': 'automatic_washer'}, {'frequency': 'f', 'id': 1183, 'synset': 'watch.n.01', 'synonyms': ['watch', 'wristwatch'], 'def': 'a small, portable timepiece', 'name': 'watch'}, {'frequency': 'f', 'id': 1184, 'synset': 'water_bottle.n.01', 'synonyms': ['water_bottle'], 'def': 'a bottle for holding water', 'name': 'water_bottle'}, {'frequency': 'c', 'id': 1185, 'synset': 'water_cooler.n.01', 'synonyms': ['water_cooler'], 'def': 'a device for cooling and dispensing drinking water', 'name': 'water_cooler'}, {'frequency': 'c', 'id': 1186, 'synset': 'water_faucet.n.01', 'synonyms': ['water_faucet', 'water_tap', 'tap_(water_faucet)'], 'def': 'a faucet for drawing water from a pipe or cask', 'name': 'water_faucet'}, {'frequency': 'r', 'id': 1187, 'synset': 'water_filter.n.01', 'synonyms': ['water_filter'], 'def': 'a filter to remove impurities from the water supply', 'name': 'water_filter'}, {'frequency': 'r', 'id': 1188, 'synset': 'water_heater.n.01', 'synonyms': ['water_heater', 'hot-water_heater'], 'def': 'a heater and storage tank to supply heated water', 'name': 'water_heater'}, {'frequency': 'r', 'id': 1189, 'synset': 'water_jug.n.01', 'synonyms': ['water_jug'], 'def': 'a jug that holds water', 'name': 'water_jug'}, {'frequency': 'r', 'id': 1190, 'synset': 'water_pistol.n.01', 'synonyms': ['water_gun', 'squirt_gun'], 'def': 'plaything consisting of a toy pistol that squirts water', 'name': 'water_gun'}, {'frequency': 'c', 'id': 1191, 'synset': 'water_scooter.n.01', 'synonyms': ['water_scooter', 'sea_scooter', 'jet_ski'], 'def': 'a motorboat resembling a motor scooter (NOT A SURFBOARD OR WATER SKI)', 'name': 'water_scooter'}, {'frequency': 'c', 'id': 1192, 'synset': 'water_ski.n.01', 'synonyms': ['water_ski'], 'def': 'broad ski for skimming over water towed by a speedboat (DO NOT MARK WATER)', 'name': 'water_ski'}, {'frequency': 'c', 'id': 1193, 'synset': 'water_tower.n.01', 'synonyms': ['water_tower'], 'def': 'a large reservoir for water', 'name': 'water_tower'}, {'frequency': 'c', 'id': 1194, 'synset': 'watering_can.n.01', 'synonyms': ['watering_can'], 'def': 'a container with a handle and a spout with a perforated nozzle; used to sprinkle water over plants', 'name': 'watering_can'}, {'frequency': 'c', 'id': 1195, 'synset': 'watermelon.n.02', 'synonyms': ['watermelon'], 'def': 'large oblong or roundish melon with a hard green rind and sweet watery red or occasionally yellowish pulp', 'name': 'watermelon'}, {'frequency': 'f', 'id': 1196, 'synset': 'weathervane.n.01', 'synonyms': ['weathervane', 'vane_(weathervane)', 'wind_vane'], 'def': 'mechanical device attached to an elevated structure; rotates freely to show the direction of the wind', 'name': 'weathervane'}, {'frequency': 'c', 'id': 1197, 'synset': 'webcam.n.01', 'synonyms': ['webcam'], 'def': 'a digital camera designed to take digital photographs and transmit them over the internet', 'name': 'webcam'}, {'frequency': 'c', 'id': 1198, 'synset': 'wedding_cake.n.01', 'synonyms': ['wedding_cake', 'bridecake'], 'def': 'a rich cake with two or more tiers and covered with frosting and decorations; served at a wedding reception', 'name': 'wedding_cake'}, {'frequency': 'c', 'id': 1199, 'synset': 'wedding_ring.n.01', 'synonyms': ['wedding_ring', 'wedding_band'], 'def': 'a ring given to the bride and/or groom at the wedding', 'name': 'wedding_ring'}, {'frequency': 'f', 'id': 1200, 'synset': 'wet_suit.n.01', 'synonyms': ['wet_suit'], 'def': 'a close-fitting garment made of a permeable material; worn in cold water to retain body heat', 'name': 'wet_suit'}, {'frequency': 'f', 'id': 1201, 'synset': 'wheel.n.01', 'synonyms': ['wheel'], 'def': 'a circular frame with spokes (or a solid disc) that can rotate on a shaft or axle', 'name': 'wheel'}, {'frequency': 'c', 'id': 1202, 'synset': 'wheelchair.n.01', 'synonyms': ['wheelchair'], 'def': 'a movable chair mounted on large wheels', 'name': 'wheelchair'}, {'frequency': 'c', 'id': 1203, 'synset': 'whipped_cream.n.01', 'synonyms': ['whipped_cream'], 'def': 'cream that has been beaten until light and fluffy', 'name': 'whipped_cream'}, {'frequency': 'r', 'id': 1204, 'synset': 'whiskey.n.01', 'synonyms': ['whiskey'], 'def': 'a liquor made from fermented mash of grain', 'name': 'whiskey'}, {'frequency': 'r', 'id': 1205, 'synset': 'whistle.n.03', 'synonyms': ['whistle'], 'def': 'a small wind instrument that produces a whistling sound by blowing into it', 'name': 'whistle'}, {'frequency': 'r', 'id': 1206, 'synset': 'wick.n.02', 'synonyms': ['wick'], 'def': 'a loosely woven cord in a candle or oil lamp that is lit on fire', 'name': 'wick'}, {'frequency': 'c', 'id': 1207, 'synset': 'wig.n.01', 'synonyms': ['wig'], 'def': 'hairpiece covering the head and made of real or synthetic hair', 'name': 'wig'}, {'frequency': 'c', 'id': 1208, 'synset': 'wind_chime.n.01', 'synonyms': ['wind_chime'], 'def': 'a decorative arrangement of pieces of metal or glass or pottery that hang together loosely so the wind can cause them to tinkle', 'name': 'wind_chime'}, {'frequency': 'c', 'id': 1209, 'synset': 'windmill.n.01', 'synonyms': ['windmill'], 'def': 'a mill that is powered by the wind', 'name': 'windmill'}, {'frequency': 'c', 'id': 1210, 'synset': 'window_box.n.01', 'synonyms': ['window_box_(for_plants)'], 'def': 'a container for growing plants on a windowsill', 'name': 'window_box_(for_plants)'}, {'frequency': 'f', 'id': 1211, 'synset': 'windshield_wiper.n.01', 'synonyms': ['windshield_wiper', 'windscreen_wiper', 'wiper_(for_windshield/screen)'], 'def': 'a mechanical device that cleans the windshield', 'name': 'windshield_wiper'}, {'frequency': 'c', 'id': 1212, 'synset': 'windsock.n.01', 'synonyms': ['windsock', 'air_sock', 'air-sleeve', 'wind_sleeve', 'wind_cone'], 'def': 'a truncated cloth cone mounted on a mast/pole; shows wind direction', 'name': 'windsock'}, {'frequency': 'f', 'id': 1213, 'synset': 'wine_bottle.n.01', 'synonyms': ['wine_bottle'], 'def': 'a bottle for holding wine', 'name': 'wine_bottle'}, {'frequency': 'r', 'id': 1214, 'synset': 'wine_bucket.n.01', 'synonyms': ['wine_bucket', 'wine_cooler'], 'def': 'a bucket of ice used to chill a bottle of wine', 'name': 'wine_bucket'}, {'frequency': 'f', 'id': 1215, 'synset': 'wineglass.n.01', 'synonyms': ['wineglass'], 'def': 'a glass that has a stem and in which wine is served', 'name': 'wineglass'}, {'frequency': 'r', 'id': 1216, 'synset': 'wing_chair.n.01', 'synonyms': ['wing_chair'], 'def': 'easy chair having wings on each side of a high back', 'name': 'wing_chair'}, {'frequency': 'c', 'id': 1217, 'synset': 'winker.n.02', 'synonyms': ['blinder_(for_horses)'], 'def': 'blinds that prevent a horse from seeing something on either side', 'name': 'blinder_(for_horses)'}, {'frequency': 'c', 'id': 1218, 'synset': 'wok.n.01', 'synonyms': ['wok'], 'def': 'pan with a convex bottom; used for frying in Chinese cooking', 'name': 'wok'}, {'frequency': 'r', 'id': 1219, 'synset': 'wolf.n.01', 'synonyms': ['wolf'], 'def': 'a wild carnivorous mammal of the dog family, living and hunting in packs', 'name': 'wolf'}, {'frequency': 'c', 'id': 1220, 'synset': 'wooden_spoon.n.02', 'synonyms': ['wooden_spoon'], 'def': 'a spoon made of wood', 'name': 'wooden_spoon'}, {'frequency': 'c', 'id': 1221, 'synset': 'wreath.n.01', 'synonyms': ['wreath'], 'def': 'an arrangement of flowers, leaves, or stems fastened in a ring', 'name': 'wreath'}, {'frequency': 'c', 'id': 1222, 'synset': 'wrench.n.03', 'synonyms': ['wrench', 'spanner'], 'def': 'a hand tool that is used to hold or twist a nut or bolt', 'name': 'wrench'}, {'frequency': 'c', 'id': 1223, 'synset': 'wristband.n.01', 'synonyms': ['wristband'], 'def': 'band consisting of a part of a sleeve that covers the wrist', 'name': 'wristband'}, {'frequency': 'f', 'id': 1224, 'synset': 'wristlet.n.01', 'synonyms': ['wristlet', 'wrist_band'], 'def': 'a band or bracelet worn around the wrist', 'name': 'wristlet'}, {'frequency': 'r', 'id': 1225, 'synset': 'yacht.n.01', 'synonyms': ['yacht'], 'def': 'an expensive vessel propelled by sail or power and used for cruising or racing', 'name': 'yacht'}, {'frequency': 'r', 'id': 1226, 'synset': 'yak.n.02', 'synonyms': ['yak'], 'def': 'large long-haired wild ox of Tibet often domesticated', 'name': 'yak'}, {'frequency': 'c', 'id': 1227, 'synset': 'yogurt.n.01', 'synonyms': ['yogurt', 'yoghurt', 'yoghourt'], 'def': 'a custard-like food made from curdled milk', 'name': 'yogurt'}, {'frequency': 'r', 'id': 1228, 'synset': 'yoke.n.07', 'synonyms': ['yoke_(animal_equipment)'], 'def': 'gear joining two animals at the neck; NOT egg yolk', 'name': 'yoke_(animal_equipment)'}, {'frequency': 'f', 'id': 1229, 'synset': 'zebra.n.01', 'synonyms': ['zebra'], 'def': 'any of several fleet black-and-white striped African equines', 'name': 'zebra'}, {'frequency': 'c', 'id': 1230, 'synset': 'zucchini.n.02', 'synonyms': ['zucchini', 'courgette'], 'def': 'small cucumber-shaped vegetable marrow; typically dark green', 'name': 'zucchini'}] # noqa -# fmt: on diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/densepose/data/samplers/densepose_cse_base.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/densepose/data/samplers/densepose_cse_base.py deleted file mode 100644 index 845545c1438b9d2a4fbb4c6dac0642461a7e539f..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/projects/DensePose/densepose/data/samplers/densepose_cse_base.py +++ /dev/null @@ -1,139 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. - -from typing import Any, Dict, List, Tuple -import torch -from torch.nn import functional as F - -from detectron2.config import CfgNode -from detectron2.structures import Instances - -from densepose.converters.base import IntTupleBox -from densepose.data.utils import get_class_to_mesh_name_mapping -from densepose.modeling.cse.utils import squared_euclidean_distance_matrix -from densepose.structures import DensePoseDataRelative - -from .densepose_base import DensePoseBaseSampler - - -class DensePoseCSEBaseSampler(DensePoseBaseSampler): - """ - Base DensePose sampler to produce DensePose data from DensePose predictions. - Samples for each class are drawn according to some distribution over all pixels estimated - to belong to that class. - """ - - def __init__( - self, - cfg: CfgNode, - use_gt_categories: bool, - embedder: torch.nn.Module, - count_per_class: int = 8, - ): - """ - Constructor - - Args: - cfg (CfgNode): the config of the model - embedder (torch.nn.Module): necessary to compute mesh vertex embeddings - count_per_class (int): the sampler produces at most `count_per_class` - samples for each category - """ - super().__init__(count_per_class) - self.embedder = embedder - self.class_to_mesh_name = get_class_to_mesh_name_mapping(cfg) - self.use_gt_categories = use_gt_categories - - def _sample(self, instance: Instances, bbox_xywh: IntTupleBox) -> Dict[str, List[Any]]: - """ - Sample DensPoseDataRelative from estimation results - """ - if self.use_gt_categories: - instance_class = instance.dataset_classes.tolist()[0] - else: - instance_class = instance.pred_classes.tolist()[0] - mesh_name = self.class_to_mesh_name[instance_class] - - annotation = { - DensePoseDataRelative.X_KEY: [], - DensePoseDataRelative.Y_KEY: [], - DensePoseDataRelative.VERTEX_IDS_KEY: [], - DensePoseDataRelative.MESH_NAME_KEY: mesh_name, - } - - mask, embeddings, other_values = self._produce_mask_and_results(instance, bbox_xywh) - indices = torch.nonzero(mask, as_tuple=True) - selected_embeddings = embeddings.permute(1, 2, 0)[indices].cpu() - values = other_values[:, indices[0], indices[1]] - k = values.shape[1] - - count = min(self.count_per_class, k) - if count <= 0: - return annotation - - index_sample = self._produce_index_sample(values, count) - closest_vertices = squared_euclidean_distance_matrix( - selected_embeddings[index_sample], self.embedder(mesh_name) - ) - closest_vertices = torch.argmin(closest_vertices, dim=1) - - sampled_y = indices[0][index_sample] + 0.5 - sampled_x = indices[1][index_sample] + 0.5 - # prepare / normalize data - _, _, w, h = bbox_xywh - x = (sampled_x / w * 256.0).cpu().tolist() - y = (sampled_y / h * 256.0).cpu().tolist() - # extend annotations - annotation[DensePoseDataRelative.X_KEY].extend(x) - annotation[DensePoseDataRelative.Y_KEY].extend(y) - annotation[DensePoseDataRelative.VERTEX_IDS_KEY].extend(closest_vertices.cpu().tolist()) - return annotation - - def _produce_mask_and_results( - self, instance: Instances, bbox_xywh: IntTupleBox - ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Method to get labels and DensePose results from an instance - - Args: - instance (Instances): an instance of `DensePoseEmbeddingPredictorOutput` - bbox_xywh (IntTupleBox): the corresponding bounding box - - Return: - mask (torch.Tensor): shape [H, W], DensePose segmentation mask - embeddings (Tuple[torch.Tensor]): a tensor of shape [D, H, W], - DensePose CSE Embeddings - other_values (Tuple[torch.Tensor]): a tensor of shape [0, H, W], - for potential other values - """ - densepose_output = instance.pred_densepose - S = densepose_output.coarse_segm - E = densepose_output.embedding - _, _, w, h = bbox_xywh - embeddings = F.interpolate(E, size=(h, w), mode="bilinear")[0] - coarse_segm_resized = F.interpolate(S, size=(h, w), mode="bilinear")[0] - mask = coarse_segm_resized.argmax(0) > 0 - other_values = torch.empty((0, h, w), device=E.device) - return mask, embeddings, other_values - - def _resample_mask(self, output: Any) -> torch.Tensor: - """ - Convert DensePose predictor output to segmentation annotation - tensors of size - (256, 256) and type `int64`. - - Args: - output: DensePose predictor output with the following attributes: - - coarse_segm: tensor of size [N, D, H, W] with unnormalized coarse - segmentation scores - Return: - Tensor of size (S, S) and type `int64` with coarse segmentation annotations, - where S = DensePoseDataRelative.MASK_SIZE - """ - sz = DensePoseDataRelative.MASK_SIZE - mask = ( - F.interpolate(output.coarse_segm, (sz, sz), mode="bilinear", align_corners=False) - .argmax(dim=1) - .long() - .squeeze() - .cpu() - ) - return mask diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/tests/data/test_transforms.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/tests/data/test_transforms.py deleted file mode 100644 index 382048e533708dec3fabf89528564ebc2ad4c83f..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/tests/data/test_transforms.py +++ /dev/null @@ -1,268 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. - -import logging -import numpy as np -import unittest -from unittest import mock -import torch -from PIL import Image, ImageOps -from torch.nn import functional as F - -from detectron2.config import get_cfg -from detectron2.data import detection_utils -from detectron2.data import transforms as T -from detectron2.utils.logger import setup_logger - -logger = logging.getLogger(__name__) - - -def polygon_allclose(poly1, poly2): - """ - Test whether two polygons are the same. - Both arguments are nx2 numpy arrays. - """ - # ABCD and CDAB are the same polygon. So it's important to check after rolling - for k in range(len(poly1)): - rolled_poly1 = np.roll(poly1, k, axis=0) - if np.allclose(rolled_poly1, poly2): - return True - return False - - -class TestTransforms(unittest.TestCase): - def setUp(self): - setup_logger() - - def test_apply_rotated_boxes(self): - np.random.seed(125) - cfg = get_cfg() - is_train = True - augs = detection_utils.build_augmentation(cfg, is_train) - image = np.random.rand(200, 300) - image, transforms = T.apply_augmentations(augs, image) - image_shape = image.shape[:2] # h, w - assert image_shape == (800, 1200) - annotation = {"bbox": [179, 97, 62, 40, -56]} - - boxes = np.array([annotation["bbox"]], dtype=np.float64) # boxes.shape = (1, 5) - transformed_bbox = transforms.apply_rotated_box(boxes)[0] - - expected_bbox = np.array([484, 388, 248, 160, 56], dtype=np.float64) - err_msg = "transformed_bbox = {}, expected {}".format(transformed_bbox, expected_bbox) - assert np.allclose(transformed_bbox, expected_bbox), err_msg - - def test_resize_and_crop(self): - np.random.seed(125) - min_scale = 0.2 - max_scale = 2.0 - target_height = 1100 - target_width = 1000 - resize_aug = T.ResizeScale(min_scale, max_scale, target_height, target_width) - fixed_size_crop_aug = T.FixedSizeCrop((target_height, target_width)) - hflip_aug = T.RandomFlip() - augs = [resize_aug, fixed_size_crop_aug, hflip_aug] - original_image = np.random.rand(900, 800) - image, transforms = T.apply_augmentations(augs, original_image) - image_shape = image.shape[:2] # h, w - self.assertEqual((1100, 1000), image_shape) - - boxes = np.array( - [[91, 46, 144, 111], [523, 251, 614, 295]], - dtype=np.float64, - ) - transformed_bboxs = transforms.apply_box(boxes) - expected_bboxs = np.array( - [ - [895.42, 33.42666667, 933.91125, 80.66], - [554.0825, 182.39333333, 620.17125, 214.36666667], - ], - dtype=np.float64, - ) - err_msg = "transformed_bbox = {}, expected {}".format(transformed_bboxs, expected_bboxs) - self.assertTrue(np.allclose(transformed_bboxs, expected_bboxs), err_msg) - - polygon = np.array([[91, 46], [144, 46], [144, 111], [91, 111]]) - transformed_polygons = transforms.apply_polygons([polygon]) - expected_polygon = np.array([[934.0, 33.0], [934.0, 80.0], [896.0, 80.0], [896.0, 33.0]]) - self.assertEqual(1, len(transformed_polygons)) - err_msg = "transformed_polygon = {}, expected {}".format( - transformed_polygons[0], expected_polygon - ) - self.assertTrue(polygon_allclose(transformed_polygons[0], expected_polygon), err_msg) - - def test_apply_rotated_boxes_unequal_scaling_factor(self): - np.random.seed(125) - h, w = 400, 200 - newh, neww = 800, 800 - image = np.random.rand(h, w) - augs = [] - augs.append(T.Resize(shape=(newh, neww))) - image, transforms = T.apply_augmentations(augs, image) - image_shape = image.shape[:2] # h, w - assert image_shape == (newh, neww) - - boxes = np.array( - [ - [150, 100, 40, 20, 0], - [150, 100, 40, 20, 30], - [150, 100, 40, 20, 90], - [150, 100, 40, 20, -90], - ], - dtype=np.float64, - ) - transformed_boxes = transforms.apply_rotated_box(boxes) - - expected_bboxes = np.array( - [ - [600, 200, 160, 40, 0], - [600, 200, 144.22205102, 52.91502622, 49.10660535], - [600, 200, 80, 80, 90], - [600, 200, 80, 80, -90], - ], - dtype=np.float64, - ) - err_msg = "transformed_boxes = {}, expected {}".format(transformed_boxes, expected_bboxes) - assert np.allclose(transformed_boxes, expected_bboxes), err_msg - - def test_print_augmentation(self): - t = T.RandomCrop("relative", (100, 100)) - self.assertEqual(str(t), "RandomCrop(crop_type='relative', crop_size=(100, 100))") - - t0 = T.RandomFlip(prob=0.5) - self.assertEqual(str(t0), "RandomFlip(prob=0.5)") - - t1 = T.RandomFlip() - self.assertEqual(str(t1), "RandomFlip()") - - t = T.AugmentationList([t0, t1]) - self.assertEqual(str(t), f"AugmentationList[{t0}, {t1}]") - - def test_random_apply_prob_out_of_range_check(self): - test_probabilities = {0.0: True, 0.5: True, 1.0: True, -0.01: False, 1.01: False} - - for given_probability, is_valid in test_probabilities.items(): - if not is_valid: - self.assertRaises(AssertionError, T.RandomApply, None, prob=given_probability) - else: - T.RandomApply(T.NoOpTransform(), prob=given_probability) - - def test_random_apply_wrapping_aug_probability_occured_evaluation(self): - transform_mock = mock.MagicMock(name="MockTransform", spec=T.Augmentation) - image_mock = mock.MagicMock(name="MockImage") - random_apply = T.RandomApply(transform_mock, prob=0.001) - - with mock.patch.object(random_apply, "_rand_range", return_value=0.0001): - transform = random_apply.get_transform(image_mock) - transform_mock.get_transform.assert_called_once_with(image_mock) - self.assertIsNot(transform, transform_mock) - - def test_random_apply_wrapping_std_transform_probability_occured_evaluation(self): - transform_mock = mock.MagicMock(name="MockTransform", spec=T.Transform) - image_mock = mock.MagicMock(name="MockImage") - random_apply = T.RandomApply(transform_mock, prob=0.001) - - with mock.patch.object(random_apply, "_rand_range", return_value=0.0001): - transform = random_apply.get_transform(image_mock) - self.assertIs(transform, transform_mock) - - def test_random_apply_probability_not_occured_evaluation(self): - transform_mock = mock.MagicMock(name="MockTransform", spec=T.Augmentation) - image_mock = mock.MagicMock(name="MockImage") - random_apply = T.RandomApply(transform_mock, prob=0.001) - - with mock.patch.object(random_apply, "_rand_range", return_value=0.9): - transform = random_apply.get_transform(image_mock) - transform_mock.get_transform.assert_not_called() - self.assertIsInstance(transform, T.NoOpTransform) - - def test_augmentation_input_args(self): - input_shape = (100, 100) - output_shape = (50, 50) - - # define two augmentations with different args - class TG1(T.Augmentation): - def get_transform(self, image, sem_seg): - return T.ResizeTransform( - input_shape[0], input_shape[1], output_shape[0], output_shape[1] - ) - - class TG2(T.Augmentation): - def get_transform(self, image): - assert image.shape[:2] == output_shape # check that TG1 is applied - return T.HFlipTransform(output_shape[1]) - - image = np.random.rand(*input_shape).astype("float32") - sem_seg = (np.random.rand(*input_shape) < 0.5).astype("uint8") - inputs = T.AugInput(image, sem_seg=sem_seg) # provide two args - tfms = inputs.apply_augmentations([TG1(), TG2()]) - self.assertIsInstance(tfms[0], T.ResizeTransform) - self.assertIsInstance(tfms[1], T.HFlipTransform) - self.assertTrue(inputs.image.shape[:2] == output_shape) - self.assertTrue(inputs.sem_seg.shape[:2] == output_shape) - - class TG3(T.Augmentation): - def get_transform(self, image, nonexist): - pass - - with self.assertRaises(AttributeError): - inputs.apply_augmentations([TG3()]) - - def test_augmentation_list(self): - input_shape = (100, 100) - image = np.random.rand(*input_shape).astype("float32") - sem_seg = (np.random.rand(*input_shape) < 0.5).astype("uint8") - inputs = T.AugInput(image, sem_seg=sem_seg) # provide two args - - augs = T.AugmentationList([T.RandomFlip(), T.Resize(20)]) - _ = T.AugmentationList([augs, T.Resize(30)])(inputs) - # 3 in latest fvcore (flattened transformlist), 2 in older - # self.assertEqual(len(tfms), 3) - - def test_color_transforms(self): - rand_img = np.random.random((100, 100, 3)) * 255 - rand_img = rand_img.astype("uint8") - - # Test no-op - noop_transform = T.ColorTransform(lambda img: img) - self.assertTrue(np.array_equal(rand_img, noop_transform.apply_image(rand_img))) - - # Test a ImageOps operation - magnitude = np.random.randint(0, 256) - solarize_transform = T.PILColorTransform(lambda img: ImageOps.solarize(img, magnitude)) - expected_img = ImageOps.solarize(Image.fromarray(rand_img), magnitude) - self.assertTrue(np.array_equal(expected_img, solarize_transform.apply_image(rand_img))) - - def test_resize_transform(self): - input_shapes = [(100, 100), (100, 100, 1), (100, 100, 3)] - output_shapes = [(200, 200), (200, 200, 1), (200, 200, 3)] - for in_shape, out_shape in zip(input_shapes, output_shapes): - in_img = np.random.randint(0, 255, size=in_shape, dtype=np.uint8) - tfm = T.ResizeTransform(in_shape[0], in_shape[1], out_shape[0], out_shape[1]) - out_img = tfm.apply_image(in_img) - self.assertEqual(out_img.shape, out_shape) - - def test_resize_shorted_edge_scriptable(self): - def f(image): - newh, neww = T.ResizeShortestEdge.get_output_shape( - image.shape[-2], image.shape[-1], 80, 133 - ) - return F.interpolate(image.unsqueeze(0), size=(newh, neww)) - - input = torch.randn(3, 10, 10) - script_f = torch.jit.script(f) - self.assertTrue(torch.allclose(f(input), script_f(input))) - - # generalize to new shapes - input = torch.randn(3, 8, 100) - self.assertTrue(torch.allclose(f(input), script_f(input))) - - def test_extent_transform(self): - input_shapes = [(100, 100), (100, 100, 1), (100, 100, 3)] - src_rect = (20, 20, 80, 80) - output_shapes = [(200, 200), (200, 200, 1), (200, 200, 3)] - for in_shape, out_shape in zip(input_shapes, output_shapes): - in_img = np.random.randint(0, 255, size=in_shape, dtype=np.uint8) - tfm = T.ExtentTransform(src_rect, out_shape[:2]) - out_img = tfm.apply_image(in_img) - self.assertTrue(out_img.shape == out_shape) diff --git a/spaces/cbr/swp/face_parsing/__init__.py b/spaces/cbr/swp/face_parsing/__init__.py deleted file mode 100644 index e98735aec33d8a4f5525f7ca03f1285d18782285..0000000000000000000000000000000000000000 --- a/spaces/cbr/swp/face_parsing/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .swap import init_parser, swap_regions, mask_regions, mask_regions_to_list, SoftErosion \ No newline at end of file diff --git a/spaces/chadpanda/PEPE-Semantics/README.md b/spaces/chadpanda/PEPE-Semantics/README.md deleted file mode 100644 index a46d27e81f776374f1f79f926a8ebfe9b9eed423..0000000000000000000000000000000000000000 --- a/spaces/chadpanda/PEPE-Semantics/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: PEPE Semantics -emoji: 😻 -colorFrom: purple -colorTo: pink -sdk: gradio -sdk_version: 3.4.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/chendl/compositional_test/multimodal/tools/add_vg_to_blip2_data.py b/spaces/chendl/compositional_test/multimodal/tools/add_vg_to_blip2_data.py deleted file mode 100644 index 2b1718fd35d61e8360999c83166e7581041a9690..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/multimodal/tools/add_vg_to_blip2_data.py +++ /dev/null @@ -1,25 +0,0 @@ -import os -import shutil -import glob -import random -from pprint import pprint - -DIR_VG = "/gpfs/u/home/LMCG/LMCGljnn/scratch-shared/junyan/raw/vg_0826" -DIR = "/gpfs/u/home/LMCG/LMCGljnn/scratch-shared/junyan/raw/blip2_all_data_ground" -OUT_DIR = "/gpfs/u/home/LMCG/LMCGljnn/scratch-shared/junyan/raw/blip2_all_data_ground_with_vg_0826" - - -if __name__ == "__main__": - os.makedirs(OUT_DIR, exist_ok=True) - blip2_tars = glob.glob(os.path.join(DIR, "*.tar")) - vg_tars = glob.glob(os.path.join(DIR_VG, "*", "*.tar")) - tars = [] - tars.extend(blip2_tars) - tars.extend(vg_tars) - print(len(tars)) - pprint(tars[:20]) - pprint(tars[-20:]) - for i, tar in enumerate(tars): - dst = os.path.join(OUT_DIR, f"{str(i).zfill(6)}.tar") - # print(tar, dst) - os.symlink(tar, dst) diff --git a/spaces/chendl/compositional_test/transformers/docker/transformers-cpu/Dockerfile b/spaces/chendl/compositional_test/transformers/docker/transformers-cpu/Dockerfile deleted file mode 100644 index c3590e4239e470be8fbc8100128efd264fb41c7e..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/docker/transformers-cpu/Dockerfile +++ /dev/null @@ -1,26 +0,0 @@ -FROM ubuntu:18.04 -LABEL maintainer="Hugging Face" -LABEL repository="transformers" - -RUN apt update && \ - apt install -y bash \ - build-essential \ - git \ - curl \ - ca-certificates \ - python3 \ - python3-pip && \ - rm -rf /var/lib/apt/lists - -RUN python3 -m pip install --no-cache-dir --upgrade pip && \ - python3 -m pip install --no-cache-dir \ - jupyter \ - tensorflow-cpu \ - torch - -WORKDIR /workspace -COPY . transformers/ -RUN cd transformers/ && \ - python3 -m pip install --no-cache-dir . - -CMD ["/bin/bash"] diff --git a/spaces/chendl/compositional_test/transformers/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py b/spaces/chendl/compositional_test/transformers/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py deleted file mode 100644 index 3c13de36ec188ac05e8d43bba751a0c173824b72..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py +++ /dev/null @@ -1,520 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2022 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and - -import json -import logging -import os -import random -import sys -from dataclasses import dataclass, field -from typing import Optional - -import evaluate -import numpy as np -import torch -from datasets import load_dataset -from huggingface_hub import hf_hub_download -from PIL import Image -from torch import nn -from torchvision import transforms -from torchvision.transforms import functional - -import transformers -from transformers import ( - AutoConfig, - AutoImageProcessor, - AutoModelForSemanticSegmentation, - HfArgumentParser, - Trainer, - TrainingArguments, - default_data_collator, -) -from transformers.trainer_utils import get_last_checkpoint -from transformers.utils import check_min_version, send_example_telemetry -from transformers.utils.versions import require_version - - -""" Finetuning any 🤗 Transformers model supported by AutoModelForSemanticSegmentation for semantic segmentation leveraging the Trainer API.""" - -logger = logging.getLogger(__name__) - -# Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.28.0") - -require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/semantic-segmentation/requirements.txt") - - -def pad_if_smaller(img, size, fill=0): - size = (size, size) if isinstance(size, int) else size - original_width, original_height = img.size - pad_height = size[1] - original_height if original_height < size[1] else 0 - pad_width = size[0] - original_width if original_width < size[0] else 0 - img = functional.pad(img, (0, 0, pad_width, pad_height), fill=fill) - return img - - -class Compose: - def __init__(self, transforms): - self.transforms = transforms - - def __call__(self, image, target): - for t in self.transforms: - image, target = t(image, target) - return image, target - - -class Identity: - def __init__(self): - pass - - def __call__(self, image, target): - return image, target - - -class Resize: - def __init__(self, size): - self.size = size - - def __call__(self, image, target): - image = functional.resize(image, self.size) - target = functional.resize(target, self.size, interpolation=transforms.InterpolationMode.NEAREST) - return image, target - - -class RandomResize: - def __init__(self, min_size, max_size=None): - self.min_size = min_size - if max_size is None: - max_size = min_size - self.max_size = max_size - - def __call__(self, image, target): - size = random.randint(self.min_size, self.max_size) - image = functional.resize(image, size) - target = functional.resize(target, size, interpolation=transforms.InterpolationMode.NEAREST) - return image, target - - -class RandomCrop: - def __init__(self, size): - self.size = size if isinstance(size, tuple) else (size, size) - - def __call__(self, image, target): - image = pad_if_smaller(image, self.size) - target = pad_if_smaller(target, self.size, fill=255) - crop_params = transforms.RandomCrop.get_params(image, self.size) - image = functional.crop(image, *crop_params) - target = functional.crop(target, *crop_params) - return image, target - - -class RandomHorizontalFlip: - def __init__(self, flip_prob): - self.flip_prob = flip_prob - - def __call__(self, image, target): - if random.random() < self.flip_prob: - image = functional.hflip(image) - target = functional.hflip(target) - return image, target - - -class PILToTensor: - def __call__(self, image, target): - image = functional.pil_to_tensor(image) - target = torch.as_tensor(np.array(target), dtype=torch.int64) - return image, target - - -class ConvertImageDtype: - def __init__(self, dtype): - self.dtype = dtype - - def __call__(self, image, target): - image = functional.convert_image_dtype(image, self.dtype) - return image, target - - -class Normalize: - def __init__(self, mean, std): - self.mean = mean - self.std = std - - def __call__(self, image, target): - image = functional.normalize(image, mean=self.mean, std=self.std) - return image, target - - -class ReduceLabels: - def __call__(self, image, target): - if not isinstance(target, np.ndarray): - target = np.array(target).astype(np.uint8) - # avoid using underflow conversion - target[target == 0] = 255 - target = target - 1 - target[target == 254] = 255 - - target = Image.fromarray(target) - return image, target - - -@dataclass -class DataTrainingArguments: - """ - Arguments pertaining to what data we are going to input our model for training and eval. - Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify - them on the command line. - """ - - dataset_name: Optional[str] = field( - default="segments/sidewalk-semantic", - metadata={ - "help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)." - }, - ) - dataset_config_name: Optional[str] = field( - default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} - ) - train_val_split: Optional[float] = field( - default=0.15, metadata={"help": "Percent to split off of train for validation."} - ) - max_train_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of training examples to this " - "value if set." - ) - }, - ) - max_eval_samples: Optional[int] = field( - default=None, - metadata={ - "help": ( - "For debugging purposes or quicker training, truncate the number of evaluation examples to this " - "value if set." - ) - }, - ) - reduce_labels: Optional[bool] = field( - default=False, - metadata={"help": "Whether or not to reduce all labels by 1 and replace background by 255."}, - ) - - def __post_init__(self): - if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None): - raise ValueError( - "You must specify either a dataset name from the hub or a train and/or validation directory." - ) - - -@dataclass -class ModelArguments: - """ - Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. - """ - - model_name_or_path: str = field( - default="nvidia/mit-b0", - metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}, - ) - config_name: Optional[str] = field( - default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} - ) - cache_dir: Optional[str] = field( - default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} - ) - model_revision: str = field( - default="main", - metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, - ) - image_processor_name: str = field(default=None, metadata={"help": "Name or path of preprocessor config."}) - use_auth_token: bool = field( - default=False, - metadata={ - "help": ( - "Will use the token generated when running `huggingface-cli login` (necessary to use this script " - "with private models)." - ) - }, - ) - - -def main(): - # See all possible arguments in src/transformers/training_args.py - # or by passing the --help flag to this script. - # We now keep distinct sets of args, for a cleaner separation of concerns. - - parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) - if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): - # If we pass only one argument to the script and it's the path to a json file, - # let's parse it to get our arguments. - model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) - else: - model_args, data_args, training_args = parser.parse_args_into_dataclasses() - - # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The - # information sent is the one passed as arguments along with your Python/PyTorch versions. - send_example_telemetry("run_semantic_segmentation", model_args, data_args) - - # Setup logging - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - handlers=[logging.StreamHandler(sys.stdout)], - ) - - if training_args.should_log: - # The default of training_args.log_level is passive, so we set log level at info here to have that default. - transformers.utils.logging.set_verbosity_info() - - log_level = training_args.get_process_log_level() - logger.setLevel(log_level) - transformers.utils.logging.set_verbosity(log_level) - transformers.utils.logging.enable_default_handler() - transformers.utils.logging.enable_explicit_format() - - # Log on each process the small summary: - logger.warning( - f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" - + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" - ) - logger.info(f"Training/evaluation parameters {training_args}") - - # Detecting last checkpoint. - last_checkpoint = None - if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: - last_checkpoint = get_last_checkpoint(training_args.output_dir) - if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: - raise ValueError( - f"Output directory ({training_args.output_dir}) already exists and is not empty. " - "Use --overwrite_output_dir to overcome." - ) - elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: - logger.info( - f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " - "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." - ) - - # Load dataset - # In distributed training, the load_dataset function guarantees that only one local process can concurrently - # download the dataset. - # TODO support datasets from local folders - dataset = load_dataset(data_args.dataset_name, cache_dir=model_args.cache_dir) - - # Rename column names to standardized names (only "image" and "label" need to be present) - if "pixel_values" in dataset["train"].column_names: - dataset = dataset.rename_columns({"pixel_values": "image"}) - if "annotation" in dataset["train"].column_names: - dataset = dataset.rename_columns({"annotation": "label"}) - - # If we don't have a validation split, split off a percentage of train as validation. - data_args.train_val_split = None if "validation" in dataset.keys() else data_args.train_val_split - if isinstance(data_args.train_val_split, float) and data_args.train_val_split > 0.0: - split = dataset["train"].train_test_split(data_args.train_val_split) - dataset["train"] = split["train"] - dataset["validation"] = split["test"] - - # Prepare label mappings. - # We'll include these in the model's config to get human readable labels in the Inference API. - if data_args.dataset_name == "scene_parse_150": - repo_id = "huggingface/label-files" - filename = "ade20k-id2label.json" - else: - repo_id = data_args.dataset_name - filename = "id2label.json" - id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) - id2label = {int(k): v for k, v in id2label.items()} - label2id = {v: str(k) for k, v in id2label.items()} - - # Load the mean IoU metric from the datasets package - metric = evaluate.load("mean_iou") - - # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a - # predictions and label_ids field) and has to return a dictionary string to float. - @torch.no_grad() - def compute_metrics(eval_pred): - logits, labels = eval_pred - logits_tensor = torch.from_numpy(logits) - # scale the logits to the size of the label - logits_tensor = nn.functional.interpolate( - logits_tensor, - size=labels.shape[-2:], - mode="bilinear", - align_corners=False, - ).argmax(dim=1) - - pred_labels = logits_tensor.detach().cpu().numpy() - metrics = metric.compute( - predictions=pred_labels, - references=labels, - num_labels=len(id2label), - ignore_index=0, - reduce_labels=image_processor.do_reduce_labels, - ) - # add per category metrics as individual key-value pairs - per_category_accuracy = metrics.pop("per_category_accuracy").tolist() - per_category_iou = metrics.pop("per_category_iou").tolist() - - metrics.update({f"accuracy_{id2label[i]}": v for i, v in enumerate(per_category_accuracy)}) - metrics.update({f"iou_{id2label[i]}": v for i, v in enumerate(per_category_iou)}) - - return metrics - - config = AutoConfig.from_pretrained( - model_args.config_name or model_args.model_name_or_path, - label2id=label2id, - id2label=id2label, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - ) - model = AutoModelForSemanticSegmentation.from_pretrained( - model_args.model_name_or_path, - from_tf=bool(".ckpt" in model_args.model_name_or_path), - config=config, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - ) - image_processor = AutoImageProcessor.from_pretrained( - model_args.image_processor_name or model_args.model_name_or_path, - cache_dir=model_args.cache_dir, - revision=model_args.model_revision, - use_auth_token=True if model_args.use_auth_token else None, - ) - - # Define torchvision transforms to be applied to each image + target. - # Not that straightforward in torchvision: https://github.com/pytorch/vision/issues/9 - # Currently based on official torchvision references: https://github.com/pytorch/vision/blob/main/references/segmentation/transforms.py - if "shortest_edge" in image_processor.size: - # We instead set the target size as (shortest_edge, shortest_edge) to here to ensure all images are batchable. - size = (image_processor.size["shortest_edge"], image_processor.size["shortest_edge"]) - else: - size = (image_processor.size["height"], image_processor.size["width"]) - train_transforms = Compose( - [ - ReduceLabels() if data_args.reduce_labels else Identity(), - RandomCrop(size=size), - RandomHorizontalFlip(flip_prob=0.5), - PILToTensor(), - ConvertImageDtype(torch.float), - Normalize(mean=image_processor.image_mean, std=image_processor.image_std), - ] - ) - # Define torchvision transform to be applied to each image. - # jitter = ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1) - val_transforms = Compose( - [ - ReduceLabels() if data_args.reduce_labels else Identity(), - Resize(size=size), - PILToTensor(), - ConvertImageDtype(torch.float), - Normalize(mean=image_processor.image_mean, std=image_processor.image_std), - ] - ) - - def preprocess_train(example_batch): - pixel_values = [] - labels = [] - for image, target in zip(example_batch["image"], example_batch["label"]): - image, target = train_transforms(image.convert("RGB"), target) - pixel_values.append(image) - labels.append(target) - - encoding = {} - encoding["pixel_values"] = torch.stack(pixel_values) - encoding["labels"] = torch.stack(labels) - - return encoding - - def preprocess_val(example_batch): - pixel_values = [] - labels = [] - for image, target in zip(example_batch["image"], example_batch["label"]): - image, target = val_transforms(image.convert("RGB"), target) - pixel_values.append(image) - labels.append(target) - - encoding = {} - encoding["pixel_values"] = torch.stack(pixel_values) - encoding["labels"] = torch.stack(labels) - - return encoding - - if training_args.do_train: - if "train" not in dataset: - raise ValueError("--do_train requires a train dataset") - if data_args.max_train_samples is not None: - dataset["train"] = ( - dataset["train"].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples)) - ) - # Set the training transforms - dataset["train"].set_transform(preprocess_train) - - if training_args.do_eval: - if "validation" not in dataset: - raise ValueError("--do_eval requires a validation dataset") - if data_args.max_eval_samples is not None: - dataset["validation"] = ( - dataset["validation"].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples)) - ) - # Set the validation transforms - dataset["validation"].set_transform(preprocess_val) - - # Initalize our trainer - trainer = Trainer( - model=model, - args=training_args, - train_dataset=dataset["train"] if training_args.do_train else None, - eval_dataset=dataset["validation"] if training_args.do_eval else None, - compute_metrics=compute_metrics, - tokenizer=image_processor, - data_collator=default_data_collator, - ) - - # Training - if training_args.do_train: - checkpoint = None - if training_args.resume_from_checkpoint is not None: - checkpoint = training_args.resume_from_checkpoint - elif last_checkpoint is not None: - checkpoint = last_checkpoint - train_result = trainer.train(resume_from_checkpoint=checkpoint) - trainer.save_model() - trainer.log_metrics("train", train_result.metrics) - trainer.save_metrics("train", train_result.metrics) - trainer.save_state() - - # Evaluation - if training_args.do_eval: - metrics = trainer.evaluate() - trainer.log_metrics("eval", metrics) - trainer.save_metrics("eval", metrics) - - # Write model card and (optionally) push to hub - kwargs = { - "finetuned_from": model_args.model_name_or_path, - "dataset": data_args.dataset_name, - "tags": ["image-segmentation", "vision"], - } - if training_args.push_to_hub: - trainer.push_to_hub(**kwargs) - else: - trainer.create_model_card(**kwargs) - - -if __name__ == "__main__": - main() diff --git a/spaces/chendl/compositional_test/transformers/examples/research_projects/quantization-qdqbert/Dockerfile b/spaces/chendl/compositional_test/transformers/examples/research_projects/quantization-qdqbert/Dockerfile deleted file mode 100644 index e64c9f0e021d4547654192bbfe34f469c76fc6f0..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/research_projects/quantization-qdqbert/Dockerfile +++ /dev/null @@ -1,34 +0,0 @@ -# coding=utf-8 -# Copyright 2021 NVIDIA Corporation. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -FROM nvcr.io/nvidia/pytorch:22.02-py3 -LABEL maintainer="Hugging Face" -LABEL repository="transformers" - -RUN apt-get update -RUN apt-get install sudo - -RUN python3 -m pip install --no-cache-dir --upgrade pip -RUN python3 -m pip install --no-cache-dir --ignore-installed pycuda -RUN python3 -m pip install --no-cache-dir \ - pytorch-quantization --extra-index-url https://pypi.ngc.nvidia.com -RUN python3 -m pip install --no-cache-dir onnxruntime-gpu==1.11 - -WORKDIR /workspace -COPY . transformers/ -RUN cd transformers/ && \ - python3 -m pip install --no-cache-dir . - -RUN python3 -m pip install --no-cache-dir datasets \ - accelerate diff --git a/spaces/chendl/compositional_test/transformers/examples/research_projects/seq2seq-distillation/sentence_splitter.py b/spaces/chendl/compositional_test/transformers/examples/research_projects/seq2seq-distillation/sentence_splitter.py deleted file mode 100644 index c5acec73928ccd00dcf049601ebdf37bcdf4cfea..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/research_projects/seq2seq-distillation/sentence_splitter.py +++ /dev/null @@ -1,22 +0,0 @@ -import re - -from filelock import FileLock - - -try: - import nltk - - NLTK_AVAILABLE = True -except (ImportError, ModuleNotFoundError): - NLTK_AVAILABLE = False - -if NLTK_AVAILABLE: - with FileLock(".lock") as lock: - nltk.download("punkt", quiet=True) - - -def add_newline_to_end_of_each_sentence(x: str) -> str: - """This was added to get rougeLsum scores matching published rougeL scores for BART and PEGASUS.""" - re.sub("", "", x) # remove pegasus newline char - assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" - return "\n".join(nltk.sent_tokenize(x)) diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/anyio/from_thread.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/anyio/from_thread.py deleted file mode 100644 index 6b76861c70d6a6aa369a54370ef47aa75839a91f..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/anyio/from_thread.py +++ /dev/null @@ -1,500 +0,0 @@ -from __future__ import annotations - -import threading -from asyncio import iscoroutine -from concurrent.futures import FIRST_COMPLETED, Future, ThreadPoolExecutor, wait -from contextlib import AbstractContextManager, contextmanager -from types import TracebackType -from typing import ( - Any, - AsyncContextManager, - Awaitable, - Callable, - ContextManager, - Generator, - Generic, - Iterable, - TypeVar, - cast, - overload, -) -from warnings import warn - -from ._core import _eventloop -from ._core._eventloop import get_asynclib, get_cancelled_exc_class, threadlocals -from ._core._synchronization import Event -from ._core._tasks import CancelScope, create_task_group -from .abc._tasks import TaskStatus - -T_Retval = TypeVar("T_Retval") -T_co = TypeVar("T_co") - - -def run(func: Callable[..., Awaitable[T_Retval]], *args: object) -> T_Retval: - """ - Call a coroutine function from a worker thread. - - :param func: a coroutine function - :param args: positional arguments for the callable - :return: the return value of the coroutine function - - """ - try: - asynclib = threadlocals.current_async_module - except AttributeError: - raise RuntimeError("This function can only be run from an AnyIO worker thread") - - return asynclib.run_async_from_thread(func, *args) - - -def run_async_from_thread( - func: Callable[..., Awaitable[T_Retval]], *args: object -) -> T_Retval: - warn( - "run_async_from_thread() has been deprecated, use anyio.from_thread.run() instead", - DeprecationWarning, - ) - return run(func, *args) - - -def run_sync(func: Callable[..., T_Retval], *args: object) -> T_Retval: - """ - Call a function in the event loop thread from a worker thread. - - :param func: a callable - :param args: positional arguments for the callable - :return: the return value of the callable - - """ - try: - asynclib = threadlocals.current_async_module - except AttributeError: - raise RuntimeError("This function can only be run from an AnyIO worker thread") - - return asynclib.run_sync_from_thread(func, *args) - - -def run_sync_from_thread(func: Callable[..., T_Retval], *args: object) -> T_Retval: - warn( - "run_sync_from_thread() has been deprecated, use anyio.from_thread.run_sync() instead", - DeprecationWarning, - ) - return run_sync(func, *args) - - -class _BlockingAsyncContextManager(Generic[T_co], AbstractContextManager): - _enter_future: Future - _exit_future: Future - _exit_event: Event - _exit_exc_info: tuple[ - type[BaseException] | None, BaseException | None, TracebackType | None - ] = (None, None, None) - - def __init__(self, async_cm: AsyncContextManager[T_co], portal: BlockingPortal): - self._async_cm = async_cm - self._portal = portal - - async def run_async_cm(self) -> bool | None: - try: - self._exit_event = Event() - value = await self._async_cm.__aenter__() - except BaseException as exc: - self._enter_future.set_exception(exc) - raise - else: - self._enter_future.set_result(value) - - try: - # Wait for the sync context manager to exit. - # This next statement can raise `get_cancelled_exc_class()` if - # something went wrong in a task group in this async context - # manager. - await self._exit_event.wait() - finally: - # In case of cancellation, it could be that we end up here before - # `_BlockingAsyncContextManager.__exit__` is called, and an - # `_exit_exc_info` has been set. - result = await self._async_cm.__aexit__(*self._exit_exc_info) - return result - - def __enter__(self) -> T_co: - self._enter_future = Future() - self._exit_future = self._portal.start_task_soon(self.run_async_cm) - cm = self._enter_future.result() - return cast(T_co, cm) - - def __exit__( - self, - __exc_type: type[BaseException] | None, - __exc_value: BaseException | None, - __traceback: TracebackType | None, - ) -> bool | None: - self._exit_exc_info = __exc_type, __exc_value, __traceback - self._portal.call(self._exit_event.set) - return self._exit_future.result() - - -class _BlockingPortalTaskStatus(TaskStatus): - def __init__(self, future: Future): - self._future = future - - def started(self, value: object = None) -> None: - self._future.set_result(value) - - -class BlockingPortal: - """An object that lets external threads run code in an asynchronous event loop.""" - - def __new__(cls) -> BlockingPortal: - return get_asynclib().BlockingPortal() - - def __init__(self) -> None: - self._event_loop_thread_id: int | None = threading.get_ident() - self._stop_event = Event() - self._task_group = create_task_group() - self._cancelled_exc_class = get_cancelled_exc_class() - - async def __aenter__(self) -> BlockingPortal: - await self._task_group.__aenter__() - return self - - async def __aexit__( - self, - exc_type: type[BaseException] | None, - exc_val: BaseException | None, - exc_tb: TracebackType | None, - ) -> bool | None: - await self.stop() - return await self._task_group.__aexit__(exc_type, exc_val, exc_tb) - - def _check_running(self) -> None: - if self._event_loop_thread_id is None: - raise RuntimeError("This portal is not running") - if self._event_loop_thread_id == threading.get_ident(): - raise RuntimeError( - "This method cannot be called from the event loop thread" - ) - - async def sleep_until_stopped(self) -> None: - """Sleep until :meth:`stop` is called.""" - await self._stop_event.wait() - - async def stop(self, cancel_remaining: bool = False) -> None: - """ - Signal the portal to shut down. - - This marks the portal as no longer accepting new calls and exits from - :meth:`sleep_until_stopped`. - - :param cancel_remaining: ``True`` to cancel all the remaining tasks, ``False`` to let them - finish before returning - - """ - self._event_loop_thread_id = None - self._stop_event.set() - if cancel_remaining: - self._task_group.cancel_scope.cancel() - - async def _call_func( - self, func: Callable, args: tuple, kwargs: dict[str, Any], future: Future - ) -> None: - def callback(f: Future) -> None: - if f.cancelled() and self._event_loop_thread_id not in ( - None, - threading.get_ident(), - ): - self.call(scope.cancel) - - try: - retval = func(*args, **kwargs) - if iscoroutine(retval): - with CancelScope() as scope: - if future.cancelled(): - scope.cancel() - else: - future.add_done_callback(callback) - - retval = await retval - except self._cancelled_exc_class: - future.cancel() - except BaseException as exc: - if not future.cancelled(): - future.set_exception(exc) - - # Let base exceptions fall through - if not isinstance(exc, Exception): - raise - else: - if not future.cancelled(): - future.set_result(retval) - finally: - scope = None # type: ignore[assignment] - - def _spawn_task_from_thread( - self, - func: Callable, - args: tuple, - kwargs: dict[str, Any], - name: object, - future: Future, - ) -> None: - """ - Spawn a new task using the given callable. - - Implementors must ensure that the future is resolved when the task finishes. - - :param func: a callable - :param args: positional arguments to be passed to the callable - :param kwargs: keyword arguments to be passed to the callable - :param name: name of the task (will be coerced to a string if not ``None``) - :param future: a future that will resolve to the return value of the callable, or the - exception raised during its execution - - """ - raise NotImplementedError - - @overload - def call(self, func: Callable[..., Awaitable[T_Retval]], *args: object) -> T_Retval: - ... - - @overload - def call(self, func: Callable[..., T_Retval], *args: object) -> T_Retval: - ... - - def call( - self, func: Callable[..., Awaitable[T_Retval] | T_Retval], *args: object - ) -> T_Retval: - """ - Call the given function in the event loop thread. - - If the callable returns a coroutine object, it is awaited on. - - :param func: any callable - :raises RuntimeError: if the portal is not running or if this method is called from within - the event loop thread - - """ - return cast(T_Retval, self.start_task_soon(func, *args).result()) - - @overload - def spawn_task( - self, - func: Callable[..., Awaitable[T_Retval]], - *args: object, - name: object = None, - ) -> Future[T_Retval]: - ... - - @overload - def spawn_task( - self, func: Callable[..., T_Retval], *args: object, name: object = None - ) -> Future[T_Retval]: - ... - - def spawn_task( - self, - func: Callable[..., Awaitable[T_Retval] | T_Retval], - *args: object, - name: object = None, - ) -> Future[T_Retval]: - """ - Start a task in the portal's task group. - - :param func: the target coroutine function - :param args: positional arguments passed to ``func`` - :param name: name of the task (will be coerced to a string if not ``None``) - :return: a future that resolves with the return value of the callable if the task completes - successfully, or with the exception raised in the task - :raises RuntimeError: if the portal is not running or if this method is called from within - the event loop thread - - .. versionadded:: 2.1 - .. deprecated:: 3.0 - Use :meth:`start_task_soon` instead. If your code needs AnyIO 2 compatibility, you - can keep using this until AnyIO 4. - - """ - warn( - "spawn_task() is deprecated -- use start_task_soon() instead", - DeprecationWarning, - ) - return self.start_task_soon(func, *args, name=name) # type: ignore[arg-type] - - @overload - def start_task_soon( - self, - func: Callable[..., Awaitable[T_Retval]], - *args: object, - name: object = None, - ) -> Future[T_Retval]: - ... - - @overload - def start_task_soon( - self, func: Callable[..., T_Retval], *args: object, name: object = None - ) -> Future[T_Retval]: - ... - - def start_task_soon( - self, - func: Callable[..., Awaitable[T_Retval] | T_Retval], - *args: object, - name: object = None, - ) -> Future[T_Retval]: - """ - Start a task in the portal's task group. - - The task will be run inside a cancel scope which can be cancelled by cancelling the - returned future. - - :param func: the target function - :param args: positional arguments passed to ``func`` - :param name: name of the task (will be coerced to a string if not ``None``) - :return: a future that resolves with the return value of the callable if the - task completes successfully, or with the exception raised in the task - :raises RuntimeError: if the portal is not running or if this method is called - from within the event loop thread - :rtype: concurrent.futures.Future[T_Retval] - - .. versionadded:: 3.0 - - """ - self._check_running() - f: Future = Future() - self._spawn_task_from_thread(func, args, {}, name, f) - return f - - def start_task( - self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None - ) -> tuple[Future[Any], Any]: - """ - Start a task in the portal's task group and wait until it signals for readiness. - - This method works the same way as :meth:`.abc.TaskGroup.start`. - - :param func: the target function - :param args: positional arguments passed to ``func`` - :param name: name of the task (will be coerced to a string if not ``None``) - :return: a tuple of (future, task_status_value) where the ``task_status_value`` - is the value passed to ``task_status.started()`` from within the target - function - :rtype: tuple[concurrent.futures.Future[Any], Any] - - .. versionadded:: 3.0 - - """ - - def task_done(future: Future) -> None: - if not task_status_future.done(): - if future.cancelled(): - task_status_future.cancel() - elif future.exception(): - task_status_future.set_exception(future.exception()) - else: - exc = RuntimeError( - "Task exited without calling task_status.started()" - ) - task_status_future.set_exception(exc) - - self._check_running() - task_status_future: Future = Future() - task_status = _BlockingPortalTaskStatus(task_status_future) - f: Future = Future() - f.add_done_callback(task_done) - self._spawn_task_from_thread(func, args, {"task_status": task_status}, name, f) - return f, task_status_future.result() - - def wrap_async_context_manager( - self, cm: AsyncContextManager[T_co] - ) -> ContextManager[T_co]: - """ - Wrap an async context manager as a synchronous context manager via this portal. - - Spawns a task that will call both ``__aenter__()`` and ``__aexit__()``, stopping in the - middle until the synchronous context manager exits. - - :param cm: an asynchronous context manager - :return: a synchronous context manager - - .. versionadded:: 2.1 - - """ - return _BlockingAsyncContextManager(cm, self) - - -def create_blocking_portal() -> BlockingPortal: - """ - Create a portal for running functions in the event loop thread from external threads. - - Use this function in asynchronous code when you need to allow external threads access to the - event loop where your asynchronous code is currently running. - - .. deprecated:: 3.0 - Use :class:`.BlockingPortal` directly. - - """ - warn( - "create_blocking_portal() has been deprecated -- use anyio.from_thread.BlockingPortal() " - "directly", - DeprecationWarning, - ) - return BlockingPortal() - - -@contextmanager -def start_blocking_portal( - backend: str = "asyncio", backend_options: dict[str, Any] | None = None -) -> Generator[BlockingPortal, Any, None]: - """ - Start a new event loop in a new thread and run a blocking portal in its main task. - - The parameters are the same as for :func:`~anyio.run`. - - :param backend: name of the backend - :param backend_options: backend options - :return: a context manager that yields a blocking portal - - .. versionchanged:: 3.0 - Usage as a context manager is now required. - - """ - - async def run_portal() -> None: - async with BlockingPortal() as portal_: - if future.set_running_or_notify_cancel(): - future.set_result(portal_) - await portal_.sleep_until_stopped() - - future: Future[BlockingPortal] = Future() - with ThreadPoolExecutor(1) as executor: - run_future = executor.submit( - _eventloop.run, - run_portal, # type: ignore[arg-type] - backend=backend, - backend_options=backend_options, - ) - try: - wait( - cast(Iterable[Future], [run_future, future]), - return_when=FIRST_COMPLETED, - ) - except BaseException: - future.cancel() - run_future.cancel() - raise - - if future.done(): - portal = future.result() - cancel_remaining_tasks = False - try: - yield portal - except BaseException: - cancel_remaining_tasks = True - raise - finally: - try: - portal.call(portal.stop, cancel_remaining_tasks) - except RuntimeError: - pass - - run_future.result() diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/attr/_version_info.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/attr/_version_info.py deleted file mode 100644 index 51a1312f9759f21063caea779a62882d7f7c86ae..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/attr/_version_info.py +++ /dev/null @@ -1,86 +0,0 @@ -# SPDX-License-Identifier: MIT - - -from functools import total_ordering - -from ._funcs import astuple -from ._make import attrib, attrs - - -@total_ordering -@attrs(eq=False, order=False, slots=True, frozen=True) -class VersionInfo: - """ - A version object that can be compared to tuple of length 1--4: - - >>> attr.VersionInfo(19, 1, 0, "final") <= (19, 2) - True - >>> attr.VersionInfo(19, 1, 0, "final") < (19, 1, 1) - True - >>> vi = attr.VersionInfo(19, 2, 0, "final") - >>> vi < (19, 1, 1) - False - >>> vi < (19,) - False - >>> vi == (19, 2,) - True - >>> vi == (19, 2, 1) - False - - .. versionadded:: 19.2 - """ - - year = attrib(type=int) - minor = attrib(type=int) - micro = attrib(type=int) - releaselevel = attrib(type=str) - - @classmethod - def _from_version_string(cls, s): - """ - Parse *s* and return a _VersionInfo. - """ - v = s.split(".") - if len(v) == 3: - v.append("final") - - return cls( - year=int(v[0]), minor=int(v[1]), micro=int(v[2]), releaselevel=v[3] - ) - - def _ensure_tuple(self, other): - """ - Ensure *other* is a tuple of a valid length. - - Returns a possibly transformed *other* and ourselves as a tuple of - the same length as *other*. - """ - - if self.__class__ is other.__class__: - other = astuple(other) - - if not isinstance(other, tuple): - raise NotImplementedError - - if not (1 <= len(other) <= 4): - raise NotImplementedError - - return astuple(self)[: len(other)], other - - def __eq__(self, other): - try: - us, them = self._ensure_tuple(other) - except NotImplementedError: - return NotImplemented - - return us == them - - def __lt__(self, other): - try: - us, them = self._ensure_tuple(other) - except NotImplementedError: - return NotImplemented - - # Since alphabetically "dev0" < "final" < "post1" < "post2", we don't - # have to do anything special with releaselevel for now. - return us < them diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/clickhouse_connect/dbapi/__init__.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/clickhouse_connect/dbapi/__init__.py deleted file mode 100644 index ea792b49683ed3ebf6dd6b09146029c982716363..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/clickhouse_connect/dbapi/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -from typing import Optional - -from clickhouse_connect.dbapi.connection import Connection - - -apilevel = '2.0' # PEP 249 DB API level -threadsafety = 2 # PEP 249 Threads may share the module and connections. -paramstyle = 'pyformat' # PEP 249 Python extended format codes, e.g. ...WHERE name=%(name)s - - -class Error(Exception): - pass - - -def connect(host: Optional[str] = None, - database: Optional[str] = None, - username: Optional[str] = '', - password: Optional[str] = '', - port: Optional[int] = None, - **kwargs): - secure = kwargs.pop('secure', False) - return Connection(host=host, - database=database, - username=username, - password=password, - port=port, - secure=secure, - **kwargs) diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/faiss/__init__.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/faiss/__init__.py deleted file mode 100644 index 92eeb3479ba37acec65e17a437b6e0d608a85d7e..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/faiss/__init__.py +++ /dev/null @@ -1,312 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -# @nolint - -# not linting this file because it imports * from swigfaiss, which -# causes a ton of useless warnings. - -import numpy as np -import sys -import inspect - -# We import * so that the symbol foo can be accessed as faiss.foo. -from .loader import * - -# additional wrappers -from faiss import class_wrappers -from faiss.gpu_wrappers import * -from faiss.array_conversions import * -from faiss.extra_wrappers import kmin, kmax, pairwise_distances, rand, randint, \ - lrand, randn, rand_smooth_vectors, eval_intersection, normalize_L2, \ - ResultHeap, knn, Kmeans, checksum, matrix_bucket_sort_inplace, bucket_sort, \ - merge_knn_results - - -__version__ = "%d.%d.%d" % (FAISS_VERSION_MAJOR, - FAISS_VERSION_MINOR, - FAISS_VERSION_PATCH) - -class_wrappers.handle_Clustering(Clustering) -class_wrappers.handle_Clustering1D(Clustering1D) -class_wrappers.handle_MatrixStats(MatrixStats) -class_wrappers.handle_IOWriter(IOWriter) -class_wrappers.handle_IOReader(IOReader) -class_wrappers.handle_AutoTuneCriterion(AutoTuneCriterion) -class_wrappers.handle_ParameterSpace(ParameterSpace) -class_wrappers.handle_NSG(IndexNSG) -class_wrappers.handle_MapLong2Long(MapLong2Long) -class_wrappers.handle_IDSelectorSubset(IDSelectorBatch, class_owns=True) -class_wrappers.handle_IDSelectorSubset(IDSelectorArray, class_owns=False) -class_wrappers.handle_IDSelectorSubset(IDSelectorBitmap, class_owns=False, force_int64=False) - -this_module = sys.modules[__name__] - -# handle sub-classes -for symbol in dir(this_module): - obj = getattr(this_module, symbol) - # print symbol, isinstance(obj, (type, types.ClassType)) - if inspect.isclass(obj): - the_class = obj - if issubclass(the_class, Index): - class_wrappers.handle_Index(the_class) - - if issubclass(the_class, IndexBinary): - class_wrappers.handle_IndexBinary(the_class) - - if issubclass(the_class, VectorTransform): - class_wrappers.handle_VectorTransform(the_class) - - if issubclass(the_class, Quantizer): - class_wrappers.handle_Quantizer(the_class) - - if issubclass(the_class, IndexRowwiseMinMax) or \ - issubclass(the_class, IndexRowwiseMinMaxFP16): - class_wrappers.handle_IndexRowwiseMinMax(the_class) - - if issubclass(the_class, SearchParameters): - class_wrappers.handle_SearchParameters(the_class) - - if issubclass(the_class, CodePacker): - class_wrappers.handle_CodePacker(the_class) - -############################################################################## -# For some classes (IndexIVF, IDSelector), the object holds a reference to -# a C++ object (eg. the quantizer object of IndexIVF). We don't transfer the -# ownership to the C++ object (ie. set own_quantizer=true), but instead we add -# a reference in the Python class wrapper instead. This is done via an -# additional referenced_objects field. -# -# Since the semantics of ownership in the C++ classes are sometimes irregular, -# these references are added manually using the functions below. -############################################################################## - - -def add_ref_in_constructor(the_class, parameter_no): - # adds a reference to parameter parameter_no in self - # so that that parameter does not get deallocated before self - original_init = the_class.__init__ - - def replacement_init(self, *args): - original_init(self, *args) - self.referenced_objects = [args[parameter_no]] - - def replacement_init_multiple(self, *args): - original_init(self, *args) - pset = parameter_no[len(args)] - self.referenced_objects = [args[no] for no in pset] - - if type(parameter_no) == dict: - # a list of parameters to keep, depending on the number of arguments - the_class.__init__ = replacement_init_multiple - else: - the_class.__init__ = replacement_init - -def add_to_referenced_objects(self, ref): - if not hasattr(self, 'referenced_objects'): - self.referenced_objects = [ref] - else: - self.referenced_objects.append(ref) - - -def add_ref_in_method(the_class, method_name, parameter_no): - original_method = getattr(the_class, method_name) - - def replacement_method(self, *args): - ref = args[parameter_no] - add_to_referenced_objects(self, ref) - return original_method(self, *args) - setattr(the_class, method_name, replacement_method) - - -def add_ref_in_method_explicit_own(the_class, method_name): - # for methods of format set_XXX(object, own) - original_method = getattr(the_class, method_name) - - def replacement_method(self, ref, own=False): - if not own: - if not hasattr(self, 'referenced_objects'): - self.referenced_objects = [ref] - else: - self.referenced_objects.append(ref) - else: - # transfer ownership to C++ class - ref.this.disown() - return original_method(self, ref, own) - setattr(the_class, method_name, replacement_method) - - -def add_ref_in_function(function_name, parameter_no): - # assumes the function returns an object - original_function = getattr(this_module, function_name) - - def replacement_function(*args): - result = original_function(*args) - ref = args[parameter_no] - result.referenced_objects = [ref] - return result - setattr(this_module, function_name, replacement_function) - - -add_ref_in_constructor(IndexIVFFlat, 0) -add_ref_in_constructor(IndexIVFFlatDedup, 0) -add_ref_in_constructor(IndexPreTransform, {2: [0, 1], 1: [0]}) -add_ref_in_method(IndexPreTransform, 'prepend_transform', 0) -add_ref_in_constructor(IndexIVFPQ, 0) -add_ref_in_constructor(IndexIVFPQR, 0) -add_ref_in_constructor(IndexIVFPQFastScan, 0) -add_ref_in_constructor(IndexIVFResidualQuantizer, 0) -add_ref_in_constructor(IndexIVFLocalSearchQuantizer, 0) -add_ref_in_constructor(IndexIVFResidualQuantizerFastScan, 0) -add_ref_in_constructor(IndexIVFLocalSearchQuantizerFastScan, 0) -add_ref_in_constructor(IndexIVFSpectralHash, 0) -add_ref_in_method_explicit_own(IndexIVFSpectralHash, "replace_vt") - -add_ref_in_constructor(Index2Layer, 0) -add_ref_in_constructor(Level1Quantizer, 0) -add_ref_in_constructor(IndexIVFScalarQuantizer, 0) -add_ref_in_constructor(IndexRowwiseMinMax, 0) -add_ref_in_constructor(IndexRowwiseMinMaxFP16, 0) -add_ref_in_constructor(IndexIDMap, 0) -add_ref_in_constructor(IndexIDMap2, 0) -add_ref_in_constructor(IndexHNSW, 0) -add_ref_in_method(IndexShards, 'add_shard', 0) -add_ref_in_method(IndexBinaryShards, 'add_shard', 0) -add_ref_in_constructor(IndexRefineFlat, {2: [0], 1: [0]}) -add_ref_in_constructor(IndexRefine, {2: [0, 1]}) - -add_ref_in_constructor(IndexBinaryIVF, 0) -add_ref_in_constructor(IndexBinaryFromFloat, 0) -add_ref_in_constructor(IndexBinaryIDMap, 0) -add_ref_in_constructor(IndexBinaryIDMap2, 0) - -add_ref_in_method(IndexReplicas, 'addIndex', 0) -add_ref_in_method(IndexBinaryReplicas, 'addIndex', 0) - -add_ref_in_constructor(BufferedIOWriter, 0) -add_ref_in_constructor(BufferedIOReader, 0) - -add_ref_in_constructor(IDSelectorNot, 0) -add_ref_in_constructor(IDSelectorAnd, slice(2)) -add_ref_in_constructor(IDSelectorOr, slice(2)) -add_ref_in_constructor(IDSelectorXOr, slice(2)) - -# seems really marginal... -# remove_ref_from_method(IndexReplicas, 'removeIndex', 0) - - -###################################################### -# search_with_parameters interface -###################################################### - -search_with_parameters_c = search_with_parameters - - -def search_with_parameters(index, x, k, params=None, output_stats=False): - x = np.ascontiguousarray(x, dtype='float32') - n, d = x.shape - assert d == index.d - if not params: - # if not provided use the ones set in the IVF object - params = IVFSearchParameters() - index_ivf = extract_index_ivf(index) - params.nprobe = index_ivf.nprobe - params.max_codes = index_ivf.max_codes - nb_dis = np.empty(1, 'uint64') - ms_per_stage = np.empty(3, 'float64') - distances = np.empty((n, k), dtype=np.float32) - labels = np.empty((n, k), dtype=np.int64) - search_with_parameters_c( - index, n, swig_ptr(x), - k, swig_ptr(distances), - swig_ptr(labels), - params, swig_ptr(nb_dis), swig_ptr(ms_per_stage) - ) - if not output_stats: - return distances, labels - else: - stats = { - 'ndis': nb_dis[0], - 'pre_transform_ms': ms_per_stage[0], - 'coarse_quantizer_ms': ms_per_stage[1], - 'invlist_scan_ms': ms_per_stage[2], - } - return distances, labels, stats - - -range_search_with_parameters_c = range_search_with_parameters - - -def range_search_with_parameters(index, x, radius, params=None, output_stats=False): - x = np.ascontiguousarray(x, dtype='float32') - n, d = x.shape - assert d == index.d - if not params: - # if not provided use the ones set in the IVF object - params = IVFSearchParameters() - index_ivf = extract_index_ivf(index) - params.nprobe = index_ivf.nprobe - params.max_codes = index_ivf.max_codes - nb_dis = np.empty(1, 'uint64') - ms_per_stage = np.empty(3, 'float64') - res = RangeSearchResult(n) - range_search_with_parameters_c( - index, n, swig_ptr(x), - radius, res, - params, swig_ptr(nb_dis), swig_ptr(ms_per_stage) - ) - lims = rev_swig_ptr(res.lims, n + 1).copy() - nd = int(lims[-1]) - Dout = rev_swig_ptr(res.distances, nd).copy() - Iout = rev_swig_ptr(res.labels, nd).copy() - if not output_stats: - return lims, Dout, Iout - else: - stats = { - 'ndis': nb_dis[0], - 'pre_transform_ms': ms_per_stage[0], - 'coarse_quantizer_ms': ms_per_stage[1], - 'invlist_scan_ms': ms_per_stage[2], - } - return lims, Dout, Iout, stats - - -# IndexProxy was renamed to IndexReplicas, remap the old name for any old code -# people may have -IndexProxy = IndexReplicas -ConcatenatedInvertedLists = HStackInvertedLists -IndexResidual = IndexResidualQuantizer - -IVFSearchParameters = SearchParametersIVF - -########################################### -# serialization of indexes to byte arrays -########################################### - - -def serialize_index(index): - """ convert an index to a numpy uint8 array """ - writer = VectorIOWriter() - write_index(index, writer) - return vector_to_array(writer.data) - - -def deserialize_index(data): - reader = VectorIOReader() - copy_array_to_vector(data, reader.data) - return read_index(reader) - - -def serialize_index_binary(index): - """ convert an index to a numpy uint8 array """ - writer = VectorIOWriter() - write_index_binary(index, writer) - return vector_to_array(writer.data) - - -def deserialize_index_binary(data): - reader = VectorIOReader() - copy_array_to_vector(data, reader.data) - return read_index_binary(reader) diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/G_M_A_P_.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/G_M_A_P_.py deleted file mode 100644 index 39b0050c5f0591a2b36c21242863655ca1f3ef47..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/G_M_A_P_.py +++ /dev/null @@ -1,142 +0,0 @@ -from fontTools.misc import sstruct -from fontTools.misc.textTools import tobytes, tostr, safeEval -from . import DefaultTable - -GMAPFormat = """ - > # big endian - tableVersionMajor: H - tableVersionMinor: H - flags: H - recordsCount: H - recordsOffset: H - fontNameLength: H -""" -# psFontName is a byte string which follows the record above. This is zero padded -# to the beginning of the records array. The recordsOffsst is 32 bit aligned. - -GMAPRecordFormat1 = """ - > # big endian - UV: L - cid: H - gid: H - ggid: H - name: 32s -""" - - -class GMAPRecord(object): - def __init__(self, uv=0, cid=0, gid=0, ggid=0, name=""): - self.UV = uv - self.cid = cid - self.gid = gid - self.ggid = ggid - self.name = name - - def toXML(self, writer, ttFont): - writer.begintag("GMAPRecord") - writer.newline() - writer.simpletag("UV", value=self.UV) - writer.newline() - writer.simpletag("cid", value=self.cid) - writer.newline() - writer.simpletag("gid", value=self.gid) - writer.newline() - writer.simpletag("glyphletGid", value=self.gid) - writer.newline() - writer.simpletag("GlyphletName", value=self.name) - writer.newline() - writer.endtag("GMAPRecord") - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - value = attrs["value"] - if name == "GlyphletName": - self.name = value - else: - setattr(self, name, safeEval(value)) - - def compile(self, ttFont): - if self.UV is None: - self.UV = 0 - nameLen = len(self.name) - if nameLen < 32: - self.name = self.name + "\0" * (32 - nameLen) - data = sstruct.pack(GMAPRecordFormat1, self) - return data - - def __repr__(self): - return ( - "GMAPRecord[ UV: " - + str(self.UV) - + ", cid: " - + str(self.cid) - + ", gid: " - + str(self.gid) - + ", ggid: " - + str(self.ggid) - + ", Glyphlet Name: " - + str(self.name) - + " ]" - ) - - -class table_G_M_A_P_(DefaultTable.DefaultTable): - - dependencies = [] - - def decompile(self, data, ttFont): - dummy, newData = sstruct.unpack2(GMAPFormat, data, self) - self.psFontName = tostr(newData[: self.fontNameLength]) - assert ( - self.recordsOffset % 4 - ) == 0, "GMAP error: recordsOffset is not 32 bit aligned." - newData = data[self.recordsOffset :] - self.gmapRecords = [] - for i in range(self.recordsCount): - gmapRecord, newData = sstruct.unpack2( - GMAPRecordFormat1, newData, GMAPRecord() - ) - gmapRecord.name = gmapRecord.name.strip("\0") - self.gmapRecords.append(gmapRecord) - - def compile(self, ttFont): - self.recordsCount = len(self.gmapRecords) - self.fontNameLength = len(self.psFontName) - self.recordsOffset = 4 * (((self.fontNameLength + 12) + 3) // 4) - data = sstruct.pack(GMAPFormat, self) - data = data + tobytes(self.psFontName) - data = data + b"\0" * (self.recordsOffset - len(data)) - for record in self.gmapRecords: - data = data + record.compile(ttFont) - return data - - def toXML(self, writer, ttFont): - writer.comment("Most of this table will be recalculated by the compiler") - writer.newline() - formatstring, names, fixes = sstruct.getformat(GMAPFormat) - for name in names: - value = getattr(self, name) - writer.simpletag(name, value=value) - writer.newline() - writer.simpletag("PSFontName", value=self.psFontName) - writer.newline() - for gmapRecord in self.gmapRecords: - gmapRecord.toXML(writer, ttFont) - - def fromXML(self, name, attrs, content, ttFont): - if name == "GMAPRecord": - if not hasattr(self, "gmapRecords"): - self.gmapRecords = [] - gmapRecord = GMAPRecord() - self.gmapRecords.append(gmapRecord) - for element in content: - if isinstance(element, str): - continue - name, attrs, content = element - gmapRecord.fromXML(name, attrs, content, ttFont) - else: - value = attrs["value"] - if name == "PSFontName": - self.psFontName = value - else: - setattr(self, name, safeEval(value)) diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/google/protobuf/timestamp_pb2.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/google/protobuf/timestamp_pb2.py deleted file mode 100644 index b10f2f2047bf0e89f584633b0abd1c94b8ada467..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/google/protobuf/timestamp_pb2.py +++ /dev/null @@ -1,27 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/protobuf/timestamp.proto -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import symbol_database as _symbol_database -from google.protobuf.internal import builder as _builder -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1fgoogle/protobuf/timestamp.proto\x12\x0fgoogle.protobuf\";\n\tTimestamp\x12\x18\n\x07seconds\x18\x01 \x01(\x03R\x07seconds\x12\x14\n\x05nanos\x18\x02 \x01(\x05R\x05nanosB\x85\x01\n\x13\x63om.google.protobufB\x0eTimestampProtoP\x01Z2google.golang.org/protobuf/types/known/timestamppb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3') - -_globals = globals() -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.timestamp_pb2', _globals) -if _descriptor._USE_C_DESCRIPTORS == False: - - DESCRIPTOR._options = None - DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\016TimestampProtoP\001Z2google.golang.org/protobuf/types/known/timestamppb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes' - _globals['_TIMESTAMP']._serialized_start=52 - _globals['_TIMESTAMP']._serialized_end=111 -# @@protoc_insertion_point(module_scope) diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/interpretation.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/interpretation.py deleted file mode 100644 index 767ad641b99a51c08b4efadec350c7170bdc734b..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/interpretation.py +++ /dev/null @@ -1,328 +0,0 @@ -"""Contains classes and methods related to interpretation for components in Gradio.""" - -from __future__ import annotations - -import copy -import math -from abc import ABC, abstractmethod -from typing import TYPE_CHECKING, Any - -import numpy as np -from gradio_client import utils as client_utils - -from gradio import components - -if TYPE_CHECKING: # Only import for type checking (is False at runtime). - from gradio import Interface - - -class Interpretable(ABC): # noqa: B024 - def __init__(self) -> None: - self.set_interpret_parameters() - - def set_interpret_parameters(self): # noqa: B027 - """ - Set any parameters for interpretation. Properties can be set here to be - used in get_interpretation_neighbors and get_interpretation_scores. - """ - pass - - def get_interpretation_scores( - self, x: Any, neighbors: list[Any] | None, scores: list[float], **kwargs - ) -> list: - """ - Arrange the output values from the neighbors into interpretation scores for the interface to render. - Parameters: - x: Input to interface - neighbors: Neighboring values to input x used for interpretation. - scores: Output value corresponding to each neighbor in neighbors - Returns: - Arrangement of interpretation scores for interfaces to render. - """ - return scores - - -class TokenInterpretable(Interpretable, ABC): - @abstractmethod - def tokenize(self, x: Any) -> tuple[list, list, None]: - """ - Interprets an input data point x by splitting it into a list of tokens (e.g - a string into words or an image into super-pixels). - """ - return [], [], None - - @abstractmethod - def get_masked_inputs(self, tokens: list, binary_mask_matrix: list[list]) -> list: - return [] - - -class NeighborInterpretable(Interpretable, ABC): - @abstractmethod - def get_interpretation_neighbors(self, x: Any) -> tuple[list, dict]: - """ - Generates values similar to input to be used to interpret the significance of the input in the final output. - Parameters: - x: Input to interface - Returns: (neighbor_values, interpret_kwargs, interpret_by_removal) - neighbor_values: Neighboring values to input x to compute for interpretation - interpret_kwargs: Keyword arguments to be passed to get_interpretation_scores - """ - return [], {} - - -async def run_interpret(interface: Interface, raw_input: list): - """ - Runs the interpretation command for the machine learning model. Handles both the "default" out-of-the-box - interpretation for a certain set of UI component types, as well as the custom interpretation case. - Parameters: - raw_input: a list of raw inputs to apply the interpretation(s) on. - """ - if isinstance(interface.interpretation, list): # Either "default" or "shap" - processed_input = [ - input_component.preprocess(raw_input[i]) - for i, input_component in enumerate(interface.input_components) - ] - original_output = await interface.call_function(0, processed_input) - original_output = original_output["prediction"] - - if len(interface.output_components) == 1: - original_output = [original_output] - - scores, alternative_outputs = [], [] - - for i, (x, interp) in enumerate(zip(raw_input, interface.interpretation)): - if interp == "default": - input_component = interface.input_components[i] - neighbor_raw_input = list(raw_input) - if isinstance(input_component, TokenInterpretable): - tokens, neighbor_values, masks = input_component.tokenize(x) - interface_scores = [] - alternative_output = [] - for neighbor_input in neighbor_values: - neighbor_raw_input[i] = neighbor_input - processed_neighbor_input = [ - input_component.preprocess(neighbor_raw_input[i]) - for i, input_component in enumerate( - interface.input_components - ) - ] - - neighbor_output = await interface.call_function( - 0, processed_neighbor_input - ) - neighbor_output = neighbor_output["prediction"] - if len(interface.output_components) == 1: - neighbor_output = [neighbor_output] - processed_neighbor_output = [ - output_component.postprocess(neighbor_output[i]) - for i, output_component in enumerate( - interface.output_components - ) - ] - - alternative_output.append(processed_neighbor_output) - interface_scores.append( - quantify_difference_in_label( - interface, original_output, neighbor_output - ) - ) - alternative_outputs.append(alternative_output) - scores.append( - input_component.get_interpretation_scores( - raw_input[i], - neighbor_values, - interface_scores, - masks=masks, - tokens=tokens, - ) - ) - elif isinstance(input_component, NeighborInterpretable): - ( - neighbor_values, - interpret_kwargs, - ) = input_component.get_interpretation_neighbors( - x - ) # type: ignore - interface_scores = [] - alternative_output = [] - for neighbor_input in neighbor_values: - neighbor_raw_input[i] = neighbor_input - processed_neighbor_input = [ - input_component.preprocess(neighbor_raw_input[i]) - for i, input_component in enumerate( - interface.input_components - ) - ] - neighbor_output = await interface.call_function( - 0, processed_neighbor_input - ) - neighbor_output = neighbor_output["prediction"] - if len(interface.output_components) == 1: - neighbor_output = [neighbor_output] - processed_neighbor_output = [ - output_component.postprocess(neighbor_output[i]) - for i, output_component in enumerate( - interface.output_components - ) - ] - - alternative_output.append(processed_neighbor_output) - interface_scores.append( - quantify_difference_in_label( - interface, original_output, neighbor_output - ) - ) - alternative_outputs.append(alternative_output) - interface_scores = [-score for score in interface_scores] - scores.append( - input_component.get_interpretation_scores( - raw_input[i], - neighbor_values, - interface_scores, - **interpret_kwargs, - ) - ) - else: - raise ValueError( - f"Component {input_component} does not support interpretation" - ) - elif interp == "shap" or interp == "shapley": - try: - import shap # type: ignore - except (ImportError, ModuleNotFoundError) as err: - raise ValueError( - "The package `shap` is required for this interpretation method. Try: `pip install shap`" - ) from err - input_component = interface.input_components[i] - if not isinstance(input_component, TokenInterpretable): - raise ValueError( - f"Input component {input_component} does not support `shap` interpretation" - ) - - tokens, _, masks = input_component.tokenize(x) - - # construct a masked version of the input - def get_masked_prediction(binary_mask): - assert isinstance(input_component, TokenInterpretable) - masked_xs = input_component.get_masked_inputs(tokens, binary_mask) - preds = [] - for masked_x in masked_xs: - processed_masked_input = copy.deepcopy(processed_input) - processed_masked_input[i] = input_component.preprocess(masked_x) - new_output = client_utils.synchronize_async( - interface.call_function, 0, processed_masked_input - ) - new_output = new_output["prediction"] - if len(interface.output_components) == 1: - new_output = [new_output] - pred = get_regression_or_classification_value( - interface, original_output, new_output - ) - preds.append(pred) - return np.array(preds) - - num_total_segments = len(tokens) - explainer = shap.KernelExplainer( - get_masked_prediction, np.zeros((1, num_total_segments)) - ) - shap_values = explainer.shap_values( - np.ones((1, num_total_segments)), - nsamples=int(interface.num_shap * num_total_segments), - silent=True, - ) - assert shap_values is not None, "SHAP values could not be calculated" - scores.append( - input_component.get_interpretation_scores( - raw_input[i], - None, - shap_values[0].tolist(), - masks=masks, - tokens=tokens, - ) - ) - alternative_outputs.append([]) - elif interp is None: - scores.append(None) - alternative_outputs.append([]) - else: - raise ValueError(f"Unknown interpretation method: {interp}") - return scores, alternative_outputs - elif interface.interpretation: # custom interpretation function - processed_input = [ - input_component.preprocess(raw_input[i]) - for i, input_component in enumerate(interface.input_components) - ] - interpreter = interface.interpretation - interpretation = interpreter(*processed_input) - if len(raw_input) == 1: - interpretation = [interpretation] - return interpretation, [] - else: - raise ValueError("No interpretation method specified.") - - -def diff(original: Any, perturbed: Any) -> int | float: - try: # try computing numerical difference - score = float(original) - float(perturbed) - except ValueError: # otherwise, look at strict difference in label - score = int(original != perturbed) - return score - - -def quantify_difference_in_label( - interface: Interface, original_output: list, perturbed_output: list -) -> int | float: - output_component = interface.output_components[0] - post_original_output = output_component.postprocess(original_output[0]) - post_perturbed_output = output_component.postprocess(perturbed_output[0]) - - if isinstance(output_component, components.Label): - original_label = post_original_output["label"] - perturbed_label = post_perturbed_output["label"] - - # Handle different return types of Label interface - if "confidences" in post_original_output: - original_confidence = original_output[0][original_label] - perturbed_confidence = perturbed_output[0][original_label] - score = original_confidence - perturbed_confidence - else: - score = diff(original_label, perturbed_label) - return score - - elif isinstance(output_component, components.Number): - score = diff(post_original_output, post_perturbed_output) - return score - - else: - raise ValueError( - f"This interpretation method doesn't support the Output component: {output_component}" - ) - - -def get_regression_or_classification_value( - interface: Interface, original_output: list, perturbed_output: list -) -> int | float: - """Used to combine regression/classification for Shap interpretation method.""" - output_component = interface.output_components[0] - post_original_output = output_component.postprocess(original_output[0]) - post_perturbed_output = output_component.postprocess(perturbed_output[0]) - - if isinstance(output_component, components.Label): - original_label = post_original_output["label"] - perturbed_label = post_perturbed_output["label"] - - # Handle different return types of Label interface - if "confidences" in post_original_output: - if math.isnan(perturbed_output[0][original_label]): - return 0 - return perturbed_output[0][original_label] - else: - score = diff( - perturbed_label, original_label - ) # Intentionally inverted order of arguments. - return score - - else: - raise ValueError( - f"This interpretation method doesn't support the Output component: {output_component}" - ) diff --git a/spaces/clem/dreambooth-training_v2/train_dreambooth.py b/spaces/clem/dreambooth-training_v2/train_dreambooth.py deleted file mode 100644 index c18edc83b6a5850b86ee75c8ef2f36bb91691b95..0000000000000000000000000000000000000000 --- a/spaces/clem/dreambooth-training_v2/train_dreambooth.py +++ /dev/null @@ -1,818 +0,0 @@ -import argparse -import itertools -import math -import os -from pathlib import Path -from typing import Optional -import subprocess -import sys - -import torch -import torch.nn.functional as F -import torch.utils.checkpoint -from torch.utils.data import Dataset - -from accelerate import Accelerator -from accelerate.logging import get_logger -from accelerate.utils import set_seed -from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel -from diffusers.optimization import get_scheduler -from huggingface_hub import HfFolder, Repository, whoami -from PIL import Image -from torchvision import transforms -from tqdm.auto import tqdm -from transformers import CLIPTextModel, CLIPTokenizer - - -logger = get_logger(__name__) - - -def parse_args(): - parser = argparse.ArgumentParser(description="Simple example of a training script.") - parser.add_argument( - "--pretrained_model_name_or_path", - type=str, - default=None, - #required=True, - help="Path to pretrained model or model identifier from huggingface.co/models.", - ) - parser.add_argument( - "--tokenizer_name", - type=str, - default=None, - help="Pretrained tokenizer name or path if not the same as model_name", - ) - parser.add_argument( - "--instance_data_dir", - type=str, - default=None, - #required=True, - help="A folder containing the training data of instance images.", - ) - parser.add_argument( - "--class_data_dir", - type=str, - default=None, - required=False, - help="A folder containing the training data of class images.", - ) - parser.add_argument( - "--instance_prompt", - type=str, - default=None, - help="The prompt with identifier specifying the instance", - ) - parser.add_argument( - "--class_prompt", - type=str, - default="", - help="The prompt to specify images in the same class as provided instance images.", - ) - parser.add_argument( - "--with_prior_preservation", - default=False, - action="store_true", - help="Flag to add prior preservation loss.", - ) - parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") - parser.add_argument( - "--num_class_images", - type=int, - default=100, - help=( - "Minimal class images for prior preservation loss. If not have enough images, additional images will be" - " sampled with class_prompt." - ), - ) - parser.add_argument( - "--output_dir", - type=str, - default="", - help="The output directory where the model predictions and checkpoints will be written.", - ) - parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") - parser.add_argument( - "--resolution", - type=int, - default=512, - help=( - "The resolution for input images, all the images in the train/validation dataset will be resized to this" - " resolution" - ), - ) - parser.add_argument( - "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution" - ) - parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder") - parser.add_argument( - "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." - ) - parser.add_argument( - "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." - ) - parser.add_argument("--num_train_epochs", type=int, default=1) - parser.add_argument( - "--max_train_steps", - type=int, - default=None, - help="Total number of training steps to perform. If provided, overrides num_train_epochs.", - ) - parser.add_argument( - "--gradient_accumulation_steps", - type=int, - default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.", - ) - parser.add_argument( - "--gradient_checkpointing", - action="store_true", - help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", - ) - parser.add_argument( - "--learning_rate", - type=float, - default=5e-6, - help="Initial learning rate (after the potential warmup period) to use.", - ) - parser.add_argument( - "--scale_lr", - action="store_true", - default=False, - help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", - ) - parser.add_argument( - "--lr_scheduler", - type=str, - default="constant", - help=( - 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' - ' "constant", "constant_with_warmup"]' - ), - ) - parser.add_argument( - "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." - ) - parser.add_argument( - "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." - ) - parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") - parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") - parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") - parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") - parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") - parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") - parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") - parser.add_argument( - "--hub_model_id", - type=str, - default=None, - help="The name of the repository to keep in sync with the local `output_dir`.", - ) - parser.add_argument( - "--logging_dir", - type=str, - default="logs", - help=( - "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" - " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." - ), - ) - parser.add_argument( - "--mixed_precision", - type=str, - default="no", - choices=["no", "fp16", "bf16"], - help=( - "Whether to use mixed precision. Choose" - "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." - "and an Nvidia Ampere GPU." - ), - ) - - parser.add_argument( - "--save_n_steps", - type=int, - default=1, - help=("Save the model every n global_steps"), - ) - - - parser.add_argument( - "--save_starting_step", - type=int, - default=1, - help=("The step from which it starts saving intermediary checkpoints"), - ) - - parser.add_argument( - "--stop_text_encoder_training", - type=int, - default=1000000, - help=("The step at which the text_encoder is no longer trained"), - ) - - - parser.add_argument( - "--image_captions_filename", - action="store_true", - help="Get captions from filename", - ) - - - parser.add_argument( - "--dump_only_text_encoder", - action="store_true", - default=False, - help="Dump only text encoder", - ) - - parser.add_argument( - "--train_only_unet", - action="store_true", - default=False, - help="Train only the unet", - ) - - parser.add_argument( - "--Session_dir", - type=str, - default="", - help="Current session directory", - ) - - - - - parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") - - args = parser.parse_args() - env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) - if env_local_rank != -1 and env_local_rank != args.local_rank: - args.local_rank = env_local_rank - - #if args.instance_data_dir is None: - # raise ValueError("You must specify a train data directory.") - - #if args.with_prior_preservation: - # if args.class_data_dir is None: - # raise ValueError("You must specify a data directory for class images.") - # if args.class_prompt is None: - # raise ValueError("You must specify prompt for class images.") - - return args - - -class DreamBoothDataset(Dataset): - """ - A dataset to prepare the instance and class images with the prompts for fine-tuning the model. - It pre-processes the images and the tokenizes prompts. - """ - - def __init__( - self, - instance_data_root, - instance_prompt, - tokenizer, - args, - class_data_root=None, - class_prompt=None, - size=512, - center_crop=False, - ): - self.size = size - self.center_crop = center_crop - self.tokenizer = tokenizer - self.image_captions_filename = None - - self.instance_data_root = Path(instance_data_root) - if not self.instance_data_root.exists(): - raise ValueError("Instance images root doesn't exists.") - - self.instance_images_path = list(Path(instance_data_root).iterdir()) - self.num_instance_images = len(self.instance_images_path) - self.instance_prompt = instance_prompt - self._length = self.num_instance_images - - if args.image_captions_filename: - self.image_captions_filename = True - - if class_data_root is not None: - self.class_data_root = Path(class_data_root) - self.class_data_root.mkdir(parents=True, exist_ok=True) - self.class_images_path = list(self.class_data_root.iterdir()) - self.num_class_images = len(self.class_images_path) - self._length = max(self.num_class_images, self.num_instance_images) - self.class_prompt = class_prompt - else: - self.class_data_root = None - - self.image_transforms = transforms.Compose( - [ - transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), - transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size), - transforms.ToTensor(), - transforms.Normalize([0.5], [0.5]), - ] - ) - - def __len__(self): - return self._length - - def __getitem__(self, index): - example = {} - path = self.instance_images_path[index % self.num_instance_images] - instance_image = Image.open(path) - if not instance_image.mode == "RGB": - instance_image = instance_image.convert("RGB") - - instance_prompt = self.instance_prompt - - if self.image_captions_filename: - filename = Path(path).stem - pt=''.join([i for i in filename if not i.isdigit()]) - pt=pt.replace("_"," ") - pt=pt.replace("(","") - pt=pt.replace(")","") - instance_prompt = pt - sys.stdout.write(" " +instance_prompt+" ") - sys.stdout.flush() - - - example["instance_images"] = self.image_transforms(instance_image) - example["instance_prompt_ids"] = self.tokenizer( - instance_prompt, - padding="do_not_pad", - truncation=True, - max_length=self.tokenizer.model_max_length, - ).input_ids - - if self.class_data_root: - class_image = Image.open(self.class_images_path[index % self.num_class_images]) - if not class_image.mode == "RGB": - class_image = class_image.convert("RGB") - example["class_images"] = self.image_transforms(class_image) - example["class_prompt_ids"] = self.tokenizer( - self.class_prompt, - padding="do_not_pad", - truncation=True, - max_length=self.tokenizer.model_max_length, - ).input_ids - - return example - - - -class PromptDataset(Dataset): - "A simple dataset to prepare the prompts to generate class images on multiple GPUs." - - def __init__(self, prompt, num_samples): - self.prompt = prompt - self.num_samples = num_samples - - def __len__(self): - return self.num_samples - - def __getitem__(self, index): - example = {} - example["prompt"] = self.prompt - example["index"] = index - return example - - -def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None): - if token is None: - token = HfFolder.get_token() - if organization is None: - username = whoami(token)["name"] - return f"{username}/{model_id}" - else: - return f"{organization}/{model_id}" - -def merge_two_dicts(starting_dict: dict, updater_dict: dict) -> dict: - """ - Starts from base starting dict and then adds the remaining key values from updater replacing the values from - the first starting/base dict with the second updater dict. - - For later: how does d = {**d1, **d2} replace collision? - - :param starting_dict: - :param updater_dict: - :return: - """ - new_dict: dict = starting_dict.copy() # start with keys and values of starting_dict - new_dict.update(updater_dict) # modifies starting_dict with keys and values of updater_dict - return new_dict - -def merge_args(args1: argparse.Namespace, args2: argparse.Namespace) -> argparse.Namespace: - """ - - ref: https://stackoverflow.com/questions/56136549/how-can-i-merge-two-argparse-namespaces-in-python-2-x - :param args1: - :param args2: - :return: - """ - # - the merged args - # The vars() function returns the __dict__ attribute to values of the given object e.g {field:value}. - merged_key_values_for_namespace: dict = merge_two_dicts(vars(args1), vars(args2)) - args = argparse.Namespace(**merged_key_values_for_namespace) - return args - -def run_training(args_imported): - args_default = parse_args() - args = merge_args(args_default, args_imported) - print(args) - logging_dir = Path(args.output_dir, args.logging_dir) - i=args.save_starting_step - accelerator = Accelerator( - gradient_accumulation_steps=args.gradient_accumulation_steps, - mixed_precision=args.mixed_precision, - log_with="tensorboard", - logging_dir=logging_dir, - ) - - # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate - # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models. - # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate. - if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1: - raise ValueError( - "Gradient accumulation is not supported when training the text encoder in distributed training. " - "Please set gradient_accumulation_steps to 1. This feature will be supported in the future." - ) - - if args.seed is not None: - set_seed(args.seed) - - if args.with_prior_preservation: - class_images_dir = Path(args.class_data_dir) - if not class_images_dir.exists(): - class_images_dir.mkdir(parents=True) - cur_class_images = len(list(class_images_dir.iterdir())) - - if cur_class_images < args.num_class_images: - torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32 - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, torch_dtype=torch_dtype - ) - pipeline.set_progress_bar_config(disable=True) - - num_new_images = args.num_class_images - cur_class_images - logger.info(f"Number of class images to sample: {num_new_images}.") - - sample_dataset = PromptDataset(args.class_prompt, num_new_images) - sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size) - - sample_dataloader = accelerator.prepare(sample_dataloader) - pipeline.to(accelerator.device) - - for example in tqdm( - sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process - ): - with torch.autocast("cuda"): - images = pipeline(example["prompt"]).images - - for i, image in enumerate(images): - image.save(class_images_dir / f"{example['index'][i] + cur_class_images}.jpg") - - del pipeline - if torch.cuda.is_available(): - torch.cuda.empty_cache() - - # Handle the repository creation - if accelerator.is_main_process: - if args.push_to_hub: - if args.hub_model_id is None: - repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token) - else: - repo_name = args.hub_model_id - repo = Repository(args.output_dir, clone_from=repo_name) - - with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: - if "step_*" not in gitignore: - gitignore.write("step_*\n") - if "epoch_*" not in gitignore: - gitignore.write("epoch_*\n") - elif args.output_dir is not None: - os.makedirs(args.output_dir, exist_ok=True) - - # Load the tokenizer - if args.tokenizer_name: - tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name) - elif args.pretrained_model_name_or_path: - tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") - - # Load models and create wrapper for stable diffusion - if args.train_only_unet: - if os.path.exists(str(args.output_dir+"/text_encoder_trained")): - text_encoder = CLIPTextModel.from_pretrained(args.output_dir, subfolder="text_encoder_trained") - elif os.path.exists(str(args.output_dir+"/text_encoder")): - text_encoder = CLIPTextModel.from_pretrained(args.output_dir, subfolder="text_encoder") - else: - text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder") - else: - text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder") - vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae") - unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet") - - vae.requires_grad_(False) - if not args.train_text_encoder: - text_encoder.requires_grad_(False) - - if args.gradient_checkpointing: - unet.enable_gradient_checkpointing() - if args.train_text_encoder: - text_encoder.gradient_checkpointing_enable() - - if args.scale_lr: - args.learning_rate = ( - args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes - ) - - # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs - if args.use_8bit_adam: - try: - import bitsandbytes as bnb - except ImportError: - raise ImportError( - "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." - ) - - optimizer_class = bnb.optim.AdamW8bit - else: - optimizer_class = torch.optim.AdamW - - params_to_optimize = ( - itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters() - ) - optimizer = optimizer_class( - params_to_optimize, - lr=args.learning_rate, - betas=(args.adam_beta1, args.adam_beta2), - weight_decay=args.adam_weight_decay, - eps=args.adam_epsilon, - ) - - noise_scheduler = DDPMScheduler( - beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000 - ) - - train_dataset = DreamBoothDataset( - instance_data_root=args.instance_data_dir, - instance_prompt=args.instance_prompt, - class_data_root=args.class_data_dir if args.with_prior_preservation else None, - class_prompt=args.class_prompt, - tokenizer=tokenizer, - size=args.resolution, - center_crop=args.center_crop, - args=args, - ) - - def collate_fn(examples): - input_ids = [example["instance_prompt_ids"] for example in examples] - pixel_values = [example["instance_images"] for example in examples] - - # Concat class and instance examples for prior preservation. - # We do this to avoid doing two forward passes. - if args.with_prior_preservation: - input_ids += [example["class_prompt_ids"] for example in examples] - pixel_values += [example["class_images"] for example in examples] - - pixel_values = torch.stack(pixel_values) - pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() - - input_ids = tokenizer.pad({"input_ids": input_ids}, padding=True, return_tensors="pt").input_ids - - batch = { - "input_ids": input_ids, - "pixel_values": pixel_values, - } - return batch - - train_dataloader = torch.utils.data.DataLoader( - train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn - ) - - # Scheduler and math around the number of training steps. - overrode_max_train_steps = False - num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) - if args.max_train_steps is None: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - overrode_max_train_steps = True - - lr_scheduler = get_scheduler( - args.lr_scheduler, - optimizer=optimizer, - num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, - num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, - ) - - if args.train_text_encoder: - unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( - unet, text_encoder, optimizer, train_dataloader, lr_scheduler - ) - else: - unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( - unet, optimizer, train_dataloader, lr_scheduler - ) - - weight_dtype = torch.float32 - if args.mixed_precision == "fp16": - weight_dtype = torch.float16 - elif args.mixed_precision == "bf16": - weight_dtype = torch.bfloat16 - - # Move text_encode and vae to gpu. - # For mixed precision training we cast the text_encoder and vae weights to half-precision - # as these models are only used for inference, keeping weights in full precision is not required. - vae.to(accelerator.device, dtype=weight_dtype) - if not args.train_text_encoder: - text_encoder.to(accelerator.device, dtype=weight_dtype) - - # We need to recalculate our total training steps as the size of the training dataloader may have changed. - num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) - if overrode_max_train_steps: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - # Afterwards we recalculate our number of training epochs - args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) - - # We need to initialize the trackers we use, and also store our configuration. - # The trackers initializes automatically on the main process. - if accelerator.is_main_process: - accelerator.init_trackers("dreambooth", config=vars(args)) - - def bar(prg): - br='|'+'█' * prg + ' ' * (25-prg)+'|' - return br - - # Train! - total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps - - logger.info("***** Running training *****") - logger.info(f" Num examples = {len(train_dataset)}") - logger.info(f" Num batches each epoch = {len(train_dataloader)}") - logger.info(f" Num Epochs = {args.num_train_epochs}") - logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") - logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") - logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") - logger.info(f" Total optimization steps = {args.max_train_steps}") - # Only show the progress bar once on each machine. - progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) - global_step = 0 - - for epoch in range(args.num_train_epochs): - unet.train() - if args.train_text_encoder: - text_encoder.train() - for step, batch in enumerate(train_dataloader): - with accelerator.accumulate(unet): - # Convert images to latent space - latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample() - latents = latents * 0.18215 - - # Sample noise that we'll add to the latents - noise = torch.randn_like(latents) - bsz = latents.shape[0] - # Sample a random timestep for each image - timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) - timesteps = timesteps.long() - - # Add noise to the latents according to the noise magnitude at each timestep - # (this is the forward diffusion process) - noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) - - # Get the text embedding for conditioning - encoder_hidden_states = text_encoder(batch["input_ids"])[0] - - # Predict the noise residual - noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample - - if args.with_prior_preservation: - # Chunk the noise and noise_pred into two parts and compute the loss on each part separately. - noise_pred, noise_pred_prior = torch.chunk(noise_pred, 2, dim=0) - noise, noise_prior = torch.chunk(noise, 2, dim=0) - - # Compute instance loss - loss = F.mse_loss(noise_pred.float(), noise.float(), reduction="none").mean([1, 2, 3]).mean() - - # Compute prior loss - prior_loss = F.mse_loss(noise_pred_prior.float(), noise_prior.float(), reduction="mean") - - # Add the prior loss to the instance loss. - loss = loss + args.prior_loss_weight * prior_loss - else: - loss = F.mse_loss(noise_pred.float(), noise.float(), reduction="mean") - - accelerator.backward(loss) - if accelerator.sync_gradients: - params_to_clip = ( - itertools.chain(unet.parameters(), text_encoder.parameters()) - if args.train_text_encoder - else unet.parameters() - ) - accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) - optimizer.step() - lr_scheduler.step() - optimizer.zero_grad() - - # Checks if the accelerator has performed an optimization step behind the scenes - if accelerator.sync_gradients: - progress_bar.update(1) - global_step += 1 - - fll=round((global_step*100)/args.max_train_steps) - fll=round(fll/4) - pr=bar(fll) - - logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} - progress_bar.set_postfix(**logs) - progress_bar.set_description_str("Progress:"+pr) - accelerator.log(logs, step=global_step) - - if global_step >= args.max_train_steps: - break - - if args.train_text_encoder and global_step == args.stop_text_encoder_training and global_step >= 30: - if accelerator.is_main_process: - print(" " +" Freezing the text_encoder ..."+" ") - frz_dir=args.output_dir + "/text_encoder_frozen" - if os.path.exists(frz_dir): - subprocess.call('rm -r '+ frz_dir, shell=True) - os.mkdir(frz_dir) - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - ) - pipeline.text_encoder.save_pretrained(frz_dir) - - if args.save_n_steps >= 200: - if global_step < args.max_train_steps-100 and global_step+1==i: - ckpt_name = "_step_" + str(global_step+1) - save_dir = Path(args.output_dir+ckpt_name) - save_dir=str(save_dir) - save_dir=save_dir.replace(" ", "_") - if not os.path.exists(save_dir): - os.mkdir(save_dir) - inst=save_dir[16:] - inst=inst.replace(" ", "_") - print(" SAVING CHECKPOINT: "+args.Session_dir+"/"+inst+".ckpt") - # Create the pipeline using the trained modules and save it. - if accelerator.is_main_process: - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - ) - pipeline.save_pretrained(save_dir) - frz_dir=args.output_dir + "/text_encoder_frozen" - if args.train_text_encoder and os.path.exists(frz_dir): - subprocess.call('rm -r '+save_dir+'/text_encoder/*.*', shell=True) - subprocess.call('cp -f '+frz_dir +'/*.* '+ save_dir+'/text_encoder', shell=True) - chkpth=args.Session_dir+"/"+inst+".ckpt" - subprocess.call('python /content/diffusers/scripts/convert_diffusers_to_original_stable_diffusion.py --model_path ' + save_dir + ' --checkpoint_path ' + chkpth + ' --half', shell=True) - i=i+args.save_n_steps - - accelerator.wait_for_everyone() - - # Create the pipeline using using the trained modules and save it. - if accelerator.is_main_process: - if args.dump_only_text_encoder: - txt_dir=args.output_dir + "/text_encoder_trained" - if not os.path.exists(txt_dir): - os.mkdir(txt_dir) - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - ) - pipeline.text_encoder.save_pretrained(txt_dir) - - elif args.train_only_unet: - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - ) - pipeline.save_pretrained(args.output_dir) - txt_dir=args.output_dir + "/text_encoder_trained" - subprocess.call('rm -r '+txt_dir, shell=True) - - else: - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - ) - frz_dir=args.output_dir + "/text_encoder_frozen" - pipeline.save_pretrained(args.output_dir) - if args.train_text_encoder and os.path.exists(frz_dir): - subprocess.call('mv -f '+frz_dir +'/*.* '+ args.output_dir+'/text_encoder', shell=True) - subprocess.call('rm -r '+ frz_dir, shell=True) - - if args.push_to_hub: - repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True) - - accelerator.end_training() - -if __name__ == "__main__": - pass - #main() diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/ttLib/tables/sbixGlyph.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/ttLib/tables/sbixGlyph.py deleted file mode 100644 index fd687a18808b6b2655951f9a6934916d7bafbc71..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fontTools/ttLib/tables/sbixGlyph.py +++ /dev/null @@ -1,145 +0,0 @@ -from fontTools.misc import sstruct -from fontTools.misc.textTools import readHex, safeEval -import struct - - -sbixGlyphHeaderFormat = """ - > - originOffsetX: h # The x-value of the point in the glyph relative to its - # lower-left corner which corresponds to the origin of - # the glyph on the screen, that is the point on the - # baseline at the left edge of the glyph. - originOffsetY: h # The y-value of the point in the glyph relative to its - # lower-left corner which corresponds to the origin of - # the glyph on the screen, that is the point on the - # baseline at the left edge of the glyph. - graphicType: 4s # e.g. "png " -""" - -sbixGlyphHeaderFormatSize = sstruct.calcsize(sbixGlyphHeaderFormat) - - -class Glyph(object): - def __init__( - self, - glyphName=None, - referenceGlyphName=None, - originOffsetX=0, - originOffsetY=0, - graphicType=None, - imageData=None, - rawdata=None, - gid=0, - ): - self.gid = gid - self.glyphName = glyphName - self.referenceGlyphName = referenceGlyphName - self.originOffsetX = originOffsetX - self.originOffsetY = originOffsetY - self.rawdata = rawdata - self.graphicType = graphicType - self.imageData = imageData - - # fix self.graphicType if it is null terminated or too short - if self.graphicType is not None: - if self.graphicType[-1] == "\0": - self.graphicType = self.graphicType[:-1] - if len(self.graphicType) > 4: - from fontTools import ttLib - - raise ttLib.TTLibError( - "Glyph.graphicType must not be longer than 4 characters." - ) - elif len(self.graphicType) < 4: - # pad with spaces - self.graphicType += " "[: (4 - len(self.graphicType))] - - def decompile(self, ttFont): - self.glyphName = ttFont.getGlyphName(self.gid) - if self.rawdata is None: - from fontTools import ttLib - - raise ttLib.TTLibError("No table data to decompile") - if len(self.rawdata) > 0: - if len(self.rawdata) < sbixGlyphHeaderFormatSize: - from fontTools import ttLib - - # print "Glyph %i header too short: Expected %x, got %x." % (self.gid, sbixGlyphHeaderFormatSize, len(self.rawdata)) - raise ttLib.TTLibError("Glyph header too short.") - - sstruct.unpack( - sbixGlyphHeaderFormat, self.rawdata[:sbixGlyphHeaderFormatSize], self - ) - - if self.graphicType == "dupe": - # this glyph is a reference to another glyph's image data - (gid,) = struct.unpack(">H", self.rawdata[sbixGlyphHeaderFormatSize:]) - self.referenceGlyphName = ttFont.getGlyphName(gid) - else: - self.imageData = self.rawdata[sbixGlyphHeaderFormatSize:] - self.referenceGlyphName = None - # clean up - del self.rawdata - del self.gid - - def compile(self, ttFont): - if self.glyphName is None: - from fontTools import ttLib - - raise ttLib.TTLibError("Can't compile Glyph without glyph name") - # TODO: if ttFont has no maxp, cmap etc., ignore glyph names and compile by index? - # (needed if you just want to compile the sbix table on its own) - self.gid = struct.pack(">H", ttFont.getGlyphID(self.glyphName)) - if self.graphicType is None: - rawdata = b"" - else: - rawdata = sstruct.pack(sbixGlyphHeaderFormat, self) - if self.graphicType == "dupe": - rawdata += struct.pack(">H", ttFont.getGlyphID(self.referenceGlyphName)) - else: - assert self.imageData is not None - rawdata += self.imageData - self.rawdata = rawdata - - def toXML(self, xmlWriter, ttFont): - if self.graphicType is None: - # TODO: ignore empty glyphs? - # a glyph data entry is required for each glyph, - # but empty ones can be calculated at compile time - xmlWriter.simpletag("glyph", name=self.glyphName) - xmlWriter.newline() - return - xmlWriter.begintag( - "glyph", - graphicType=self.graphicType, - name=self.glyphName, - originOffsetX=self.originOffsetX, - originOffsetY=self.originOffsetY, - ) - xmlWriter.newline() - if self.graphicType == "dupe": - # graphicType == "dupe" is a reference to another glyph id. - xmlWriter.simpletag("ref", glyphname=self.referenceGlyphName) - else: - xmlWriter.begintag("hexdata") - xmlWriter.newline() - xmlWriter.dumphex(self.imageData) - xmlWriter.endtag("hexdata") - xmlWriter.newline() - xmlWriter.endtag("glyph") - xmlWriter.newline() - - def fromXML(self, name, attrs, content, ttFont): - if name == "ref": - # glyph is a "dupe", i.e. a reference to another glyph's image data. - # in this case imageData contains the glyph id of the reference glyph - # get glyph id from glyphname - glyphname = safeEval("'''" + attrs["glyphname"] + "'''") - self.imageData = struct.pack(">H", ttFont.getGlyphID(glyphname)) - self.referenceGlyphName = glyphname - elif name == "hexdata": - self.imageData = readHex(content) - else: - from fontTools import ttLib - - raise ttLib.TTLibError("can't handle '%s' element" % name) diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/h264_levels.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/h264_levels.c deleted file mode 100644 index f7ed9a6e375bec9a83270128c2cc535ae1fa1700..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/h264_levels.c +++ /dev/null @@ -1,123 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include -#include "libavutil/macros.h" -#include "h264_levels.h" - -// H.264 table A-1. -static const H264LevelDescriptor h264_levels[] = { - // Name MaxMBPS MaxBR MinCR - // | level_idc | MaxFS | MaxCPB | MaxMvsPer2Mb - // | | cs3f | | MaxDpbMbs | | MaxVmvR | | - { "1", 10, 0, 1485, 99, 396, 64, 175, 64, 2, 0 }, - { "1b", 11, 1, 1485, 99, 396, 128, 350, 64, 2, 0 }, - { "1b", 9, 0, 1485, 99, 396, 128, 350, 64, 2, 0 }, - { "1.1", 11, 0, 3000, 396, 900, 192, 500, 128, 2, 0 }, - { "1.2", 12, 0, 6000, 396, 2376, 384, 1000, 128, 2, 0 }, - { "1.3", 13, 0, 11880, 396, 2376, 768, 2000, 128, 2, 0 }, - { "2", 20, 0, 11880, 396, 2376, 2000, 2000, 128, 2, 0 }, - { "2.1", 21, 0, 19800, 792, 4752, 4000, 4000, 256, 2, 0 }, - { "2.2", 22, 0, 20250, 1620, 8100, 4000, 4000, 256, 2, 0 }, - { "3", 30, 0, 40500, 1620, 8100, 10000, 10000, 256, 2, 32 }, - { "3.1", 31, 0, 108000, 3600, 18000, 14000, 14000, 512, 4, 16 }, - { "3.2", 32, 0, 216000, 5120, 20480, 20000, 20000, 512, 4, 16 }, - { "4", 40, 0, 245760, 8192, 32768, 20000, 25000, 512, 4, 16 }, - { "4.1", 41, 0, 245760, 8192, 32768, 50000, 62500, 512, 2, 16 }, - { "4.2", 42, 0, 522240, 8704, 34816, 50000, 62500, 512, 2, 16 }, - { "5", 50, 0, 589824, 22080, 110400, 135000, 135000, 512, 2, 16 }, - { "5.1", 51, 0, 983040, 36864, 184320, 240000, 240000, 512, 2, 16 }, - { "5.2", 52, 0, 2073600, 36864, 184320, 240000, 240000, 512, 2, 16 }, - { "6", 60, 0, 4177920, 139264, 696320, 240000, 240000, 8192, 2, 16 }, - { "6.1", 61, 0, 8355840, 139264, 696320, 480000, 480000, 8192, 2, 16 }, - { "6.2", 62, 0, 16711680, 139264, 696320, 800000, 800000, 8192, 2, 16 }, -}; - -// H.264 table A-2 plus values from A-1. -static const struct { - int profile_idc; - int cpb_br_vcl_factor; - int cpb_br_nal_factor; -} h264_br_factors[] = { - { 66, 1000, 1200 }, - { 77, 1000, 1200 }, - { 88, 1000, 1200 }, - { 100, 1250, 1500 }, - { 110, 3000, 3600 }, - { 122, 4000, 4800 }, - { 244, 4000, 4800 }, - { 44, 4000, 4800 }, -}; - -// We are only ever interested in the NAL bitrate factor. -static int h264_get_br_factor(int profile_idc) -{ - int i; - for (i = 0; i < FF_ARRAY_ELEMS(h264_br_factors); i++) { - if (h264_br_factors[i].profile_idc == profile_idc) - return h264_br_factors[i].cpb_br_nal_factor; - } - // Default to the non-high profile value if not specified. - return 1200; -} - -const H264LevelDescriptor *ff_h264_guess_level(int profile_idc, - int64_t bitrate, - int framerate, - int width, int height, - int max_dec_frame_buffering) -{ - int width_mbs = (width + 15) / 16; - int height_mbs = (height + 15) / 16; - int no_cs3f = !(profile_idc == 66 || - profile_idc == 77 || - profile_idc == 88); - int i; - - for (i = 0; i < FF_ARRAY_ELEMS(h264_levels); i++) { - const H264LevelDescriptor *level = &h264_levels[i]; - - if (level->constraint_set3_flag && no_cs3f) - continue; - - if (bitrate > (int64_t)level->max_br * h264_get_br_factor(profile_idc)) - continue; - - if (width_mbs * height_mbs > level->max_fs) - continue; - if (width_mbs * width_mbs > 8 * level->max_fs) - continue; - if (height_mbs * height_mbs > 8 * level->max_fs) - continue; - - if (width_mbs && height_mbs) { - int max_dpb_frames = - FFMIN(level->max_dpb_mbs / (width_mbs * height_mbs), 16); - if (max_dec_frame_buffering > max_dpb_frames) - continue; - - if (framerate > (level->max_mbps / (width_mbs * height_mbs))) - continue; - } - - return level; - } - - // No usable levels found - frame is too big or bitrate is too high. - return NULL; -} diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/huffyuvenc.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/huffyuvenc.c deleted file mode 100644 index 72d6246ebe0ac431c00a8c2f4ef2ff725392f710..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/huffyuvenc.c +++ /dev/null @@ -1,1131 +0,0 @@ -/* - * Copyright (c) 2002-2014 Michael Niedermayer - * - * see https://multimedia.cx/huffyuv.txt for a description of - * the algorithm used - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - * - * yuva, gray, 4:4:4, 4:1:1, 4:1:0 and >8 bit per sample support sponsored by NOA - */ - -/** - * @file - * huffyuv encoder - */ - -#include "config_components.h" - -#include "avcodec.h" -#include "bswapdsp.h" -#include "codec_internal.h" -#include "encode.h" -#include "huffyuv.h" -#include "huffman.h" -#include "huffyuvencdsp.h" -#include "lossless_videoencdsp.h" -#include "put_bits.h" -#include "libavutil/opt.h" -#include "libavutil/pixdesc.h" - -typedef struct HYuvEncContext { - AVClass *class; - AVCodecContext *avctx; - PutBitContext pb; - Predictor predictor; - int interlaced; - int decorrelate; - int bitstream_bpp; - int version; - int bps; - int n; // 1<bps <= 8) { - s->llvidencdsp.diff_bytes(dst, src0, src1, w); - } else { - s->hencdsp.diff_int16((uint16_t *)dst, (const uint16_t *)src0, (const uint16_t *)src1, s->n - 1, w); - } -} - -static inline int sub_left_prediction(HYuvEncContext *s, uint8_t *dst, - const uint8_t *src, int w, int left) -{ - int i; - int min_width = FFMIN(w, 32); - - if (s->bps <= 8) { - for (i = 0; i < min_width; i++) { /* scalar loop before dsp call */ - const int temp = src[i]; - dst[i] = temp - left; - left = temp; - } - if (w < 32) - return left; - s->llvidencdsp.diff_bytes(dst + 32, src + 32, src + 31, w - 32); - return src[w-1]; - } else { - const uint16_t *src16 = (const uint16_t *)src; - uint16_t *dst16 = ( uint16_t *)dst; - for (i = 0; i < min_width; i++) { /* scalar loop before dsp call */ - const int temp = src16[i]; - dst16[i] = temp - left; - left = temp; - } - if (w < 32) - return left; - s->hencdsp.diff_int16(dst16 + 32, src16 + 32, src16 + 31, s->n - 1, w - 32); - return src16[w-1]; - } -} - -static inline void sub_left_prediction_bgr32(HYuvEncContext *s, uint8_t *dst, - const uint8_t *src, int w, - int *red, int *green, int *blue, - int *alpha) -{ - int i; - int r, g, b, a; - int min_width = FFMIN(w, 8); - r = *red; - g = *green; - b = *blue; - a = *alpha; - - for (i = 0; i < min_width; i++) { - const int rt = src[i * 4 + R]; - const int gt = src[i * 4 + G]; - const int bt = src[i * 4 + B]; - const int at = src[i * 4 + A]; - dst[i * 4 + R] = rt - r; - dst[i * 4 + G] = gt - g; - dst[i * 4 + B] = bt - b; - dst[i * 4 + A] = at - a; - r = rt; - g = gt; - b = bt; - a = at; - } - - s->llvidencdsp.diff_bytes(dst + 32, src + 32, src + 32 - 4, w * 4 - 32); - - *red = src[(w - 1) * 4 + R]; - *green = src[(w - 1) * 4 + G]; - *blue = src[(w - 1) * 4 + B]; - *alpha = src[(w - 1) * 4 + A]; -} - -static inline void sub_left_prediction_rgb24(HYuvEncContext *s, uint8_t *dst, - const uint8_t *src, int w, - int *red, int *green, int *blue) -{ - int i; - int r, g, b; - r = *red; - g = *green; - b = *blue; - for (i = 0; i < FFMIN(w, 16); i++) { - const int rt = src[i * 3 + 0]; - const int gt = src[i * 3 + 1]; - const int bt = src[i * 3 + 2]; - dst[i * 3 + 0] = rt - r; - dst[i * 3 + 1] = gt - g; - dst[i * 3 + 2] = bt - b; - r = rt; - g = gt; - b = bt; - } - - s->llvidencdsp.diff_bytes(dst + 48, src + 48, src + 48 - 3, w * 3 - 48); - - *red = src[(w - 1) * 3 + 0]; - *green = src[(w - 1) * 3 + 1]; - *blue = src[(w - 1) * 3 + 2]; -} - -static void sub_median_prediction(HYuvEncContext *s, uint8_t *dst, - const uint8_t *src1, const uint8_t *src2, - int w, int *left, int *left_top) -{ - if (s->bps <= 8) { - s->llvidencdsp.sub_median_pred(dst, src1, src2, w , left, left_top); - } else { - s->hencdsp.sub_hfyu_median_pred_int16((uint16_t *)dst, (const uint16_t *)src1, (const uint16_t *)src2, s->n - 1, w , left, left_top); - } -} - -static int store_table(HYuvEncContext *s, const uint8_t *len, uint8_t *buf) -{ - int i; - int index = 0; - int n = s->vlc_n; - - for (i = 0; i < n;) { - int val = len[i]; - int repeat = 0; - - for (; i < n && len[i] == val && repeat < 255; i++) - repeat++; - - av_assert0(val < 32 && val >0 && repeat < 256 && repeat>0); - if (repeat > 7) { - buf[index++] = val; - buf[index++] = repeat; - } else { - buf[index++] = val | (repeat << 5); - } - } - - return index; -} - -static int store_huffman_tables(HYuvEncContext *s, uint8_t *buf) -{ - int i, ret; - int size = 0; - int count = 3; - - if (s->version > 2) - count = 1 + s->alpha + 2*s->chroma; - - for (i = 0; i < count; i++) { - if ((ret = ff_huff_gen_len_table(s->len[i], s->stats[i], s->vlc_n, 0)) < 0) - return ret; - - if (ff_huffyuv_generate_bits_table(s->bits[i], s->len[i], s->vlc_n) < 0) { - return -1; - } - - size += store_table(s, s->len[i], buf + size); - } - return size; -} - -static av_cold int encode_init(AVCodecContext *avctx) -{ - HYuvEncContext *s = avctx->priv_data; - int i, j; - int ret; - const AVPixFmtDescriptor *desc; - - s->avctx = avctx; - s->flags = avctx->flags; - - ff_bswapdsp_init(&s->bdsp); - ff_huffyuvencdsp_init(&s->hencdsp, avctx->pix_fmt); - ff_llvidencdsp_init(&s->llvidencdsp); - - avctx->extradata = av_mallocz(3*MAX_N + 4); - if (!avctx->extradata) - return AVERROR(ENOMEM); - if (s->flags&AV_CODEC_FLAG_PASS1) { -#define STATS_OUT_SIZE 21*MAX_N*3 + 4 - avctx->stats_out = av_mallocz(STATS_OUT_SIZE); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132 - if (!avctx->stats_out) - return AVERROR(ENOMEM); - } - s->version = 2; - - desc = av_pix_fmt_desc_get(avctx->pix_fmt); - s->bps = desc->comp[0].depth; - s->yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) && desc->nb_components >= 2; - s->chroma = desc->nb_components > 2; - s->alpha = !!(desc->flags & AV_PIX_FMT_FLAG_ALPHA); - s->chroma_h_shift = desc->log2_chroma_w; - s->chroma_v_shift = desc->log2_chroma_h; - - switch (avctx->pix_fmt) { - case AV_PIX_FMT_YUV420P: - case AV_PIX_FMT_YUV422P: - if (avctx->width & 1) { - av_log(avctx, AV_LOG_ERROR, "Width must be even for this colorspace.\n"); - return AVERROR(EINVAL); - } - s->bitstream_bpp = avctx->pix_fmt == AV_PIX_FMT_YUV420P ? 12 : 16; - break; - case AV_PIX_FMT_YUV444P: - case AV_PIX_FMT_YUV410P: - case AV_PIX_FMT_YUV411P: - case AV_PIX_FMT_YUV440P: - case AV_PIX_FMT_GBRP: - case AV_PIX_FMT_GBRP9: - case AV_PIX_FMT_GBRP10: - case AV_PIX_FMT_GBRP12: - case AV_PIX_FMT_GBRP14: - case AV_PIX_FMT_GBRP16: - case AV_PIX_FMT_GRAY8: - case AV_PIX_FMT_GRAY16: - case AV_PIX_FMT_YUVA444P: - case AV_PIX_FMT_YUVA420P: - case AV_PIX_FMT_YUVA422P: - case AV_PIX_FMT_GBRAP: - case AV_PIX_FMT_YUV420P9: - case AV_PIX_FMT_YUV420P10: - case AV_PIX_FMT_YUV420P12: - case AV_PIX_FMT_YUV420P14: - case AV_PIX_FMT_YUV420P16: - case AV_PIX_FMT_YUV422P9: - case AV_PIX_FMT_YUV422P10: - case AV_PIX_FMT_YUV422P12: - case AV_PIX_FMT_YUV422P14: - case AV_PIX_FMT_YUV422P16: - case AV_PIX_FMT_YUV444P9: - case AV_PIX_FMT_YUV444P10: - case AV_PIX_FMT_YUV444P12: - case AV_PIX_FMT_YUV444P14: - case AV_PIX_FMT_YUV444P16: - case AV_PIX_FMT_YUVA420P9: - case AV_PIX_FMT_YUVA420P10: - case AV_PIX_FMT_YUVA420P16: - case AV_PIX_FMT_YUVA422P9: - case AV_PIX_FMT_YUVA422P10: - case AV_PIX_FMT_YUVA422P16: - case AV_PIX_FMT_YUVA444P9: - case AV_PIX_FMT_YUVA444P10: - case AV_PIX_FMT_YUVA444P16: - s->version = 3; - break; - case AV_PIX_FMT_RGB32: - s->bitstream_bpp = 32; - break; - case AV_PIX_FMT_RGB24: - s->bitstream_bpp = 24; - break; - default: - av_log(avctx, AV_LOG_ERROR, "format not supported\n"); - return AVERROR(EINVAL); - } - s->n = 1<bps; - s->vlc_n = FFMIN(s->n, MAX_VLC_N); - - avctx->bits_per_coded_sample = s->bitstream_bpp; - s->decorrelate = s->bitstream_bpp >= 24 && !s->yuv && !(desc->flags & AV_PIX_FMT_FLAG_PLANAR); - s->interlaced = avctx->flags & AV_CODEC_FLAG_INTERLACED_ME ? 1 : 0; - if (s->context) { - if (s->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2)) { - av_log(avctx, AV_LOG_ERROR, - "context=1 is not compatible with " - "2 pass huffyuv encoding\n"); - return AVERROR(EINVAL); - } - } - - if (avctx->codec->id == AV_CODEC_ID_HUFFYUV) { - if (s->interlaced != ( avctx->height > 288 )) - av_log(avctx, AV_LOG_INFO, - "using huffyuv 2.2.0 or newer interlacing flag\n"); - } - - if (s->version > 3 && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) { - av_log(avctx, AV_LOG_ERROR, "Ver > 3 is under development, files encoded with it may not be decodable with future versions!!!\n" - "Use vstrict=-2 / -strict -2 to use it anyway.\n"); - return AVERROR(EINVAL); - } - - if (s->bitstream_bpp >= 24 && s->predictor == MEDIAN && s->version <= 2) { - av_log(avctx, AV_LOG_ERROR, - "Error: RGB is incompatible with median predictor\n"); - return AVERROR(EINVAL); - } - - avctx->extradata[0] = s->predictor | (s->decorrelate << 6); - avctx->extradata[2] = s->interlaced ? 0x10 : 0x20; - if (s->context) - avctx->extradata[2] |= 0x40; - if (s->version < 3) { - avctx->extradata[1] = s->bitstream_bpp; - avctx->extradata[3] = 0; - } else { - avctx->extradata[1] = ((s->bps-1)<<4) | s->chroma_h_shift | (s->chroma_v_shift<<2); - if (s->chroma) - avctx->extradata[2] |= s->yuv ? 1 : 2; - if (s->alpha) - avctx->extradata[2] |= 4; - avctx->extradata[3] = 1; - } - avctx->extradata_size = 4; - - if (avctx->stats_in) { - char *p = avctx->stats_in; - - for (i = 0; i < 4; i++) - for (j = 0; j < s->vlc_n; j++) - s->stats[i][j] = 1; - - for (;;) { - for (i = 0; i < 4; i++) { - char *next; - - for (j = 0; j < s->vlc_n; j++) { - s->stats[i][j] += strtol(p, &next, 0); - if (next == p) return -1; - p = next; - } - } - if (p[0] == 0 || p[1] == 0 || p[2] == 0) break; - } - } else { - for (i = 0; i < 4; i++) - for (j = 0; j < s->vlc_n; j++) { - int d = FFMIN(j, s->vlc_n - j); - - s->stats[i][j] = 100000000 / (d*d + 1); - } - } - - ret = store_huffman_tables(s, avctx->extradata + avctx->extradata_size); - if (ret < 0) - return ret; - avctx->extradata_size += ret; - - if (s->context) { - for (i = 0; i < 4; i++) { - int pels = avctx->width * avctx->height / (i ? 40 : 10); - for (j = 0; j < s->vlc_n; j++) { - int d = FFMIN(j, s->vlc_n - j); - s->stats[i][j] = pels/(d*d + 1); - } - } - } else { - for (i = 0; i < 4; i++) - for (j = 0; j < s->vlc_n; j++) - s->stats[i][j]= 0; - } - - ret = ff_huffyuv_alloc_temp(s->temp, s->temp16, avctx->width); - if (ret < 0) - return ret; - - s->picture_number=0; - - return 0; -} -static int encode_422_bitstream(HYuvEncContext *s, int offset, int count) -{ - int i; - const uint8_t *y = s->temp[0] + offset; - const uint8_t *u = s->temp[1] + offset / 2; - const uint8_t *v = s->temp[2] + offset / 2; - - if (put_bytes_left(&s->pb, 0) < 2 * 4 * count) { - av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); - return -1; - } - -#define LOAD4\ - int y0 = y[2 * i];\ - int y1 = y[2 * i + 1];\ - int u0 = u[i];\ - int v0 = v[i]; - - count /= 2; - - if (s->flags & AV_CODEC_FLAG_PASS1) { - for(i = 0; i < count; i++) { - LOAD4; - s->stats[0][y0]++; - s->stats[1][u0]++; - s->stats[0][y1]++; - s->stats[2][v0]++; - } - } - if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT) - return 0; - if (s->context) { - for (i = 0; i < count; i++) { - LOAD4; - s->stats[0][y0]++; - put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]); - s->stats[1][u0]++; - put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]); - s->stats[0][y1]++; - put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]); - s->stats[2][v0]++; - put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]); - } - } else { - for(i = 0; i < count; i++) { - LOAD4; - put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]); - put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]); - put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]); - put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]); - } - } - return 0; -} - -static int encode_plane_bitstream(HYuvEncContext *s, int width, int plane) -{ - int i, count = width/2; - - if (put_bytes_left(&s->pb, 0) < count * s->bps / 2) { - av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); - return -1; - } - -#define LOADEND\ - int y0 = s->temp[0][width-1]; -#define LOADEND_14\ - int y0 = s->temp16[0][width-1] & mask; -#define LOADEND_16\ - int y0 = s->temp16[0][width-1]; -#define STATEND\ - s->stats[plane][y0]++; -#define STATEND_16\ - s->stats[plane][y0>>2]++; -#define WRITEEND\ - put_bits(&s->pb, s->len[plane][y0], s->bits[plane][y0]); -#define WRITEEND_16\ - put_bits(&s->pb, s->len[plane][y0>>2], s->bits[plane][y0>>2]);\ - put_bits(&s->pb, 2, y0&3); - -#define LOAD2\ - int y0 = s->temp[0][2 * i];\ - int y1 = s->temp[0][2 * i + 1]; -#define LOAD2_14\ - int y0 = s->temp16[0][2 * i] & mask;\ - int y1 = s->temp16[0][2 * i + 1] & mask; -#define LOAD2_16\ - int y0 = s->temp16[0][2 * i];\ - int y1 = s->temp16[0][2 * i + 1]; -#define STAT2\ - s->stats[plane][y0]++;\ - s->stats[plane][y1]++; -#define STAT2_16\ - s->stats[plane][y0>>2]++;\ - s->stats[plane][y1>>2]++; -#define WRITE2\ - put_bits(&s->pb, s->len[plane][y0], s->bits[plane][y0]);\ - put_bits(&s->pb, s->len[plane][y1], s->bits[plane][y1]); -#define WRITE2_16\ - put_bits(&s->pb, s->len[plane][y0>>2], s->bits[plane][y0>>2]);\ - put_bits(&s->pb, 2, y0&3);\ - put_bits(&s->pb, s->len[plane][y1>>2], s->bits[plane][y1>>2]);\ - put_bits(&s->pb, 2, y1&3); - - if (s->bps <= 8) { - if (s->flags & AV_CODEC_FLAG_PASS1) { - for (i = 0; i < count; i++) { - LOAD2; - STAT2; - } - if (width&1) { - LOADEND; - STATEND; - } - } - if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT) - return 0; - - if (s->context) { - for (i = 0; i < count; i++) { - LOAD2; - STAT2; - WRITE2; - } - if (width&1) { - LOADEND; - STATEND; - WRITEEND; - } - } else { - for (i = 0; i < count; i++) { - LOAD2; - WRITE2; - } - if (width&1) { - LOADEND; - WRITEEND; - } - } - } else if (s->bps <= 14) { - int mask = s->n - 1; - if (s->flags & AV_CODEC_FLAG_PASS1) { - for (i = 0; i < count; i++) { - LOAD2_14; - STAT2; - } - if (width&1) { - LOADEND_14; - STATEND; - } - } - if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT) - return 0; - - if (s->context) { - for (i = 0; i < count; i++) { - LOAD2_14; - STAT2; - WRITE2; - } - if (width&1) { - LOADEND_14; - STATEND; - WRITEEND; - } - } else { - for (i = 0; i < count; i++) { - LOAD2_14; - WRITE2; - } - if (width&1) { - LOADEND_14; - WRITEEND; - } - } - } else { - if (s->flags & AV_CODEC_FLAG_PASS1) { - for (i = 0; i < count; i++) { - LOAD2_16; - STAT2_16; - } - if (width&1) { - LOADEND_16; - STATEND_16; - } - } - if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT) - return 0; - - if (s->context) { - for (i = 0; i < count; i++) { - LOAD2_16; - STAT2_16; - WRITE2_16; - } - if (width&1) { - LOADEND_16; - STATEND_16; - WRITEEND_16; - } - } else { - for (i = 0; i < count; i++) { - LOAD2_16; - WRITE2_16; - } - if (width&1) { - LOADEND_16; - WRITEEND_16; - } - } - } -#undef LOAD2 -#undef STAT2 -#undef WRITE2 - return 0; -} - -static int encode_gray_bitstream(HYuvEncContext *s, int count) -{ - int i; - - if (put_bytes_left(&s->pb, 0) < 4 * count) { - av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); - return -1; - } - -#define LOAD2\ - int y0 = s->temp[0][2 * i];\ - int y1 = s->temp[0][2 * i + 1]; -#define STAT2\ - s->stats[0][y0]++;\ - s->stats[0][y1]++; -#define WRITE2\ - put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\ - put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]); - - count /= 2; - - if (s->flags & AV_CODEC_FLAG_PASS1) { - for (i = 0; i < count; i++) { - LOAD2; - STAT2; - } - } - if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT) - return 0; - - if (s->context) { - for (i = 0; i < count; i++) { - LOAD2; - STAT2; - WRITE2; - } - } else { - for (i = 0; i < count; i++) { - LOAD2; - WRITE2; - } - } - return 0; -} - -static inline int encode_bgra_bitstream(HYuvEncContext *s, int count, int planes) -{ - int i; - - if (put_bytes_left(&s->pb, 0) < 4 * planes * count) { - av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); - return -1; - } - -#define LOAD_GBRA \ - int g = s->temp[0][planes == 3 ? 3 * i + 1 : 4 * i + G]; \ - int b =(s->temp[0][planes == 3 ? 3 * i + 2 : 4 * i + B] - g) & 0xFF;\ - int r =(s->temp[0][planes == 3 ? 3 * i + 0 : 4 * i + R] - g) & 0xFF;\ - int a = s->temp[0][planes * i + A]; - -#define STAT_BGRA \ - s->stats[0][b]++; \ - s->stats[1][g]++; \ - s->stats[2][r]++; \ - if (planes == 4) \ - s->stats[2][a]++; - -#define WRITE_GBRA \ - put_bits(&s->pb, s->len[1][g], s->bits[1][g]); \ - put_bits(&s->pb, s->len[0][b], s->bits[0][b]); \ - put_bits(&s->pb, s->len[2][r], s->bits[2][r]); \ - if (planes == 4) \ - put_bits(&s->pb, s->len[2][a], s->bits[2][a]); - - if ((s->flags & AV_CODEC_FLAG_PASS1) && - (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)) { - for (i = 0; i < count; i++) { - LOAD_GBRA; - STAT_BGRA; - } - } else if (s->context || (s->flags & AV_CODEC_FLAG_PASS1)) { - for (i = 0; i < count; i++) { - LOAD_GBRA; - STAT_BGRA; - WRITE_GBRA; - } - } else { - for (i = 0; i < count; i++) { - LOAD_GBRA; - WRITE_GBRA; - } - } - return 0; -} - -static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, - const AVFrame *pict, int *got_packet) -{ - HYuvEncContext *s = avctx->priv_data; - const int width = avctx->width; - const int width2 = avctx->width >> 1; - const int height = avctx->height; - const int fake_ystride = s->interlaced ? pict->linesize[0]*2 : pict->linesize[0]; - const int fake_ustride = s->interlaced ? pict->linesize[1]*2 : pict->linesize[1]; - const int fake_vstride = s->interlaced ? pict->linesize[2]*2 : pict->linesize[2]; - const AVFrame * const p = pict; - int i, j, size = 0, ret; - - if ((ret = ff_alloc_packet(avctx, pkt, width * height * 3 * 4 + AV_INPUT_BUFFER_MIN_SIZE)) < 0) - return ret; - - if (s->context) { - size = store_huffman_tables(s, pkt->data); - if (size < 0) - return size; - - for (i = 0; i < 4; i++) - for (j = 0; j < s->vlc_n; j++) - s->stats[i][j] >>= 1; - } - - init_put_bits(&s->pb, pkt->data + size, pkt->size - size); - - if (avctx->pix_fmt == AV_PIX_FMT_YUV422P || - avctx->pix_fmt == AV_PIX_FMT_YUV420P) { - int lefty, leftu, leftv, y, cy; - - put_bits(&s->pb, 8, leftv = p->data[2][0]); - put_bits(&s->pb, 8, lefty = p->data[0][1]); - put_bits(&s->pb, 8, leftu = p->data[1][0]); - put_bits(&s->pb, 8, p->data[0][0]); - - lefty = sub_left_prediction(s, s->temp[0], p->data[0], width , 0); - leftu = sub_left_prediction(s, s->temp[1], p->data[1], width2, 0); - leftv = sub_left_prediction(s, s->temp[2], p->data[2], width2, 0); - - encode_422_bitstream(s, 2, width-2); - - if (s->predictor==MEDIAN) { - int lefttopy, lefttopu, lefttopv; - cy = y = 1; - if (s->interlaced) { - lefty = sub_left_prediction(s, s->temp[0], p->data[0] + p->linesize[0], width , lefty); - leftu = sub_left_prediction(s, s->temp[1], p->data[1] + p->linesize[1], width2, leftu); - leftv = sub_left_prediction(s, s->temp[2], p->data[2] + p->linesize[2], width2, leftv); - - encode_422_bitstream(s, 0, width); - y++; cy++; - } - - lefty = sub_left_prediction(s, s->temp[0], p->data[0] + fake_ystride, 4, lefty); - leftu = sub_left_prediction(s, s->temp[1], p->data[1] + fake_ustride, 2, leftu); - leftv = sub_left_prediction(s, s->temp[2], p->data[2] + fake_vstride, 2, leftv); - - encode_422_bitstream(s, 0, 4); - - lefttopy = p->data[0][3]; - lefttopu = p->data[1][1]; - lefttopv = p->data[2][1]; - s->llvidencdsp.sub_median_pred(s->temp[0], p->data[0] + 4, p->data[0] + fake_ystride + 4, width - 4, &lefty, &lefttopy); - s->llvidencdsp.sub_median_pred(s->temp[1], p->data[1] + 2, p->data[1] + fake_ustride + 2, width2 - 2, &leftu, &lefttopu); - s->llvidencdsp.sub_median_pred(s->temp[2], p->data[2] + 2, p->data[2] + fake_vstride + 2, width2 - 2, &leftv, &lefttopv); - encode_422_bitstream(s, 0, width - 4); - y++; cy++; - - for (; y < height; y++,cy++) { - const uint8_t *ydst, *udst, *vdst; - - if (s->bitstream_bpp == 12) { - while (2 * cy > y) { - ydst = p->data[0] + p->linesize[0] * y; - s->llvidencdsp.sub_median_pred(s->temp[0], ydst - fake_ystride, ydst, width, &lefty, &lefttopy); - encode_gray_bitstream(s, width); - y++; - } - if (y >= height) break; - } - ydst = p->data[0] + p->linesize[0] * y; - udst = p->data[1] + p->linesize[1] * cy; - vdst = p->data[2] + p->linesize[2] * cy; - - s->llvidencdsp.sub_median_pred(s->temp[0], ydst - fake_ystride, ydst, width, &lefty, &lefttopy); - s->llvidencdsp.sub_median_pred(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu); - s->llvidencdsp.sub_median_pred(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv); - - encode_422_bitstream(s, 0, width); - } - } else { - for (cy = y = 1; y < height; y++, cy++) { - const uint8_t *ydst, *udst, *vdst; - - /* encode a luma only line & y++ */ - if (s->bitstream_bpp == 12) { - ydst = p->data[0] + p->linesize[0] * y; - - if (s->predictor == PLANE && s->interlaced < y) { - s->llvidencdsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width); - - lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty); - } else { - lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty); - } - encode_gray_bitstream(s, width); - y++; - if (y >= height) break; - } - - ydst = p->data[0] + p->linesize[0] * y; - udst = p->data[1] + p->linesize[1] * cy; - vdst = p->data[2] + p->linesize[2] * cy; - - if (s->predictor == PLANE && s->interlaced < cy) { - s->llvidencdsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width); - s->llvidencdsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2); - s->llvidencdsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2); - - lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty); - leftu = sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu); - leftv = sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv); - } else { - lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty); - leftu = sub_left_prediction(s, s->temp[1], udst, width2, leftu); - leftv = sub_left_prediction(s, s->temp[2], vdst, width2, leftv); - } - - encode_422_bitstream(s, 0, width); - } - } - } else if(avctx->pix_fmt == AV_PIX_FMT_RGB32) { - const uint8_t *data = p->data[0] + (height - 1) * p->linesize[0]; - const int stride = -p->linesize[0]; - const int fake_stride = -fake_ystride; - int leftr, leftg, leftb, lefta; - - put_bits(&s->pb, 8, lefta = data[A]); - put_bits(&s->pb, 8, leftr = data[R]); - put_bits(&s->pb, 8, leftg = data[G]); - put_bits(&s->pb, 8, leftb = data[B]); - - sub_left_prediction_bgr32(s, s->temp[0], data + 4, width - 1, - &leftr, &leftg, &leftb, &lefta); - encode_bgra_bitstream(s, width - 1, 4); - - for (int y = 1; y < height; y++) { - const uint8_t *dst = data + y*stride; - if (s->predictor == PLANE && s->interlaced < y) { - s->llvidencdsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width * 4); - sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, - &leftr, &leftg, &leftb, &lefta); - } else { - sub_left_prediction_bgr32(s, s->temp[0], dst, width, - &leftr, &leftg, &leftb, &lefta); - } - encode_bgra_bitstream(s, width, 4); - } - } else if (avctx->pix_fmt == AV_PIX_FMT_RGB24) { - const uint8_t *data = p->data[0] + (height - 1) * p->linesize[0]; - const int stride = -p->linesize[0]; - const int fake_stride = -fake_ystride; - int leftr, leftg, leftb; - - put_bits(&s->pb, 8, leftr = data[0]); - put_bits(&s->pb, 8, leftg = data[1]); - put_bits(&s->pb, 8, leftb = data[2]); - put_bits(&s->pb, 8, 0); - - sub_left_prediction_rgb24(s, s->temp[0], data + 3, width - 1, - &leftr, &leftg, &leftb); - encode_bgra_bitstream(s, width-1, 3); - - for (int y = 1; y < height; y++) { - const uint8_t *dst = data + y * stride; - if (s->predictor == PLANE && s->interlaced < y) { - s->llvidencdsp.diff_bytes(s->temp[1], dst, dst - fake_stride, - width * 3); - sub_left_prediction_rgb24(s, s->temp[0], s->temp[1], width, - &leftr, &leftg, &leftb); - } else { - sub_left_prediction_rgb24(s, s->temp[0], dst, width, - &leftr, &leftg, &leftb); - } - encode_bgra_bitstream(s, width, 3); - } - } else if (s->version > 2) { - int plane; - for (plane = 0; plane < 1 + 2*s->chroma + s->alpha; plane++) { - int left, y; - int w = width; - int h = height; - int fake_stride = fake_ystride; - - if (s->chroma && (plane == 1 || plane == 2)) { - w >>= s->chroma_h_shift; - h >>= s->chroma_v_shift; - fake_stride = plane == 1 ? fake_ustride : fake_vstride; - } - - left = sub_left_prediction(s, s->temp[0], p->data[plane], w , 0); - - encode_plane_bitstream(s, w, plane); - - if (s->predictor==MEDIAN) { - int lefttop; - y = 1; - if (s->interlaced) { - left = sub_left_prediction(s, s->temp[0], p->data[plane] + p->linesize[plane], w , left); - - encode_plane_bitstream(s, w, plane); - y++; - } - - lefttop = p->data[plane][0]; - - for (; y < h; y++) { - const uint8_t *dst = p->data[plane] + p->linesize[plane] * y; - - sub_median_prediction(s, s->temp[0], dst - fake_stride, dst, w , &left, &lefttop); - - encode_plane_bitstream(s, w, plane); - } - } else { - for (y = 1; y < h; y++) { - const uint8_t *dst = p->data[plane] + p->linesize[plane] * y; - - if (s->predictor == PLANE && s->interlaced < y) { - diff_bytes(s, s->temp[1], dst, dst - fake_stride, w); - - left = sub_left_prediction(s, s->temp[0], s->temp[1], w , left); - } else { - left = sub_left_prediction(s, s->temp[0], dst, w , left); - } - - encode_plane_bitstream(s, w, plane); - } - } - } - } else { - av_log(avctx, AV_LOG_ERROR, "Format not supported!\n"); - } - emms_c(); - - size += (put_bits_count(&s->pb) + 31) / 8; - put_bits(&s->pb, 16, 0); - put_bits(&s->pb, 15, 0); - size /= 4; - - if ((s->flags & AV_CODEC_FLAG_PASS1) && (s->picture_number & 31) == 0) { - int j; - char *p = avctx->stats_out; - char *end = p + STATS_OUT_SIZE; - for (i = 0; i < 4; i++) { - for (j = 0; j < s->vlc_n; j++) { - snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]); - p += strlen(p); - s->stats[i][j]= 0; - } - snprintf(p, end-p, "\n"); - p++; - if (end <= p) - return AVERROR(ENOMEM); - } - } else if (avctx->stats_out) - avctx->stats_out[0] = '\0'; - if (!(s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)) { - flush_put_bits(&s->pb); - s->bdsp.bswap_buf((uint32_t *) pkt->data, (uint32_t *) pkt->data, size); - } - - s->picture_number++; - - pkt->size = size * 4; - *got_packet = 1; - - return 0; -} - -static av_cold int encode_end(AVCodecContext *avctx) -{ - HYuvEncContext *s = avctx->priv_data; - - ff_huffyuv_common_end(s->temp, s->temp16); - - av_freep(&avctx->stats_out); - - return 0; -} - -#define OFFSET(x) offsetof(HYuvEncContext, x) -#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM - -#define COMMON_OPTIONS \ - { "non_deterministic", "Allow multithreading for e.g. context=1 at the expense of determinism", \ - OFFSET(non_determ), AV_OPT_TYPE_BOOL, { .i64 = 0 }, \ - 0, 1, VE }, \ - { "pred", "Prediction method", OFFSET(predictor), AV_OPT_TYPE_INT, { .i64 = LEFT }, LEFT, MEDIAN, VE, "pred" }, \ - { "left", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LEFT }, INT_MIN, INT_MAX, VE, "pred" }, \ - { "plane", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PLANE }, INT_MIN, INT_MAX, VE, "pred" }, \ - { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MEDIAN }, INT_MIN, INT_MAX, VE, "pred" }, \ - -static const AVOption normal_options[] = { - COMMON_OPTIONS - { NULL }, -}; - -static const AVOption ff_options[] = { - COMMON_OPTIONS - { "context", "Set per-frame huffman tables", OFFSET(context), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE }, - { NULL }, -}; - -static const AVClass normal_class = { - .class_name = "huffyuv", - .item_name = av_default_item_name, - .option = normal_options, - .version = LIBAVUTIL_VERSION_INT, -}; - -static const AVClass ff_class = { - .class_name = "ffvhuff", - .item_name = av_default_item_name, - .option = ff_options, - .version = LIBAVUTIL_VERSION_INT, -}; - -const FFCodec ff_huffyuv_encoder = { - .p.name = "huffyuv", - CODEC_LONG_NAME("Huffyuv / HuffYUV"), - .p.type = AVMEDIA_TYPE_VIDEO, - .p.id = AV_CODEC_ID_HUFFYUV, - .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS | - AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE, - .priv_data_size = sizeof(HYuvEncContext), - .init = encode_init, - FF_CODEC_ENCODE_CB(encode_frame), - .close = encode_end, - .p.priv_class = &normal_class, - .p.pix_fmts = (const enum AVPixelFormat[]){ - AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB24, - AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE - }, - .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, -}; - -#if CONFIG_FFVHUFF_ENCODER -const FFCodec ff_ffvhuff_encoder = { - .p.name = "ffvhuff", - CODEC_LONG_NAME("Huffyuv FFmpeg variant"), - .p.type = AVMEDIA_TYPE_VIDEO, - .p.id = AV_CODEC_ID_FFVHUFF, - .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS | - AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE, - .priv_data_size = sizeof(HYuvEncContext), - .init = encode_init, - FF_CODEC_ENCODE_CB(encode_frame), - .close = encode_end, - .p.priv_class = &ff_class, - .p.pix_fmts = (const enum AVPixelFormat[]){ - AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV411P, - AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P, - AV_PIX_FMT_GBRP, - AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16, - AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY16, - AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P, - AV_PIX_FMT_GBRAP, - AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV420P16, - AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV422P16, - AV_PIX_FMT_YUV444P9, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV444P16, - AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA420P16, - AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA422P16, - AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA444P16, - AV_PIX_FMT_RGB24, - AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE - }, - .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, -}; -#endif diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/jni.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/jni.h deleted file mode 100644 index dd99e92611322b5ac590daea689d920b358b272c..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/jni.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * JNI public API functions - * - * Copyright (c) 2015-2016 Matthieu Bouron - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_JNI_H -#define AVCODEC_JNI_H - -/* - * Manually set a Java virtual machine which will be used to retrieve the JNI - * environment. Once a Java VM is set it cannot be changed afterwards, meaning - * you can call multiple times av_jni_set_java_vm with the same Java VM pointer - * however it will error out if you try to set a different Java VM. - * - * @param vm Java virtual machine - * @param log_ctx context used for logging, can be NULL - * @return 0 on success, < 0 otherwise - */ -int av_jni_set_java_vm(void *vm, void *log_ctx); - -/* - * Get the Java virtual machine which has been set with av_jni_set_java_vm. - * - * @param vm Java virtual machine - * @return a pointer to the Java virtual machine - */ -void *av_jni_get_java_vm(void *log_ctx); - -#endif /* AVCODEC_JNI_H */ diff --git a/spaces/congsaPfin/Manga-OCR/logs/APK Games Free Download and Install the Best Games for Your Phone.md b/spaces/congsaPfin/Manga-OCR/logs/APK Games Free Download and Install the Best Games for Your Phone.md deleted file mode 100644 index 202cc30efe194019a4f4c4d50f58d68d56010722..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/APK Games Free Download and Install the Best Games for Your Phone.md +++ /dev/null @@ -1,121 +0,0 @@ -
    - - - - - - - - - -
    -

    APK Games Free Download: How to Find and Install Them on Your Android Device

    -

    If you are an avid gamer who loves to explore new and exciting games on your Android device, you might have heard of APK games. These are games that are not available on the official Google Play Store, but can be downloaded from other sources as APK files. But what are APK files, and how can you download, install, and play them on your device? In this article, we will answer these questions and more.

    -

    apk games free download


    Download ★★★★★ https://urlca.com/2uO7M5



    -

    What are APK Games?

    -

    APK stands for Android Package Kit, which is a file format that contains all the necessary components for an Android app or game to run on your device. APK files are similar to ZIP files, but they have a different extension (.apk) and can be installed directly on your device without extracting them.

    -

    There are many reasons why some games are not available on the Google Play Store, such as regional restrictions, censorship, licensing issues, or developer preferences. Some developers may choose to distribute their games as APK files to avoid Google's fees or policies, or to offer exclusive features or updates that are not available on the official store.

    -

    Some of the benefits of playing APK games are:

    -

    android apk games free download
    -apk games free download for mobile
    -apk games free download offline
    -apk games free download full version
    -apk games free download mod
    -apk games free download 2021
    -apk games free download sites
    -apk games free download action
    -apk games free download racing
    -apk games free download puzzle
    -apk games free download adventure
    -apk games free download rpg
    -apk games free download strategy
    -apk games free download simulation
    -apk games free download sports
    -apk games free download horror
    -apk games free download arcade
    -apk games free download casual
    -apk games free download shooter
    -apk games free download fighting
    -apk games free download platformer
    -apk games free download role playing
    -apk games free download sandbox
    -apk games free download survival
    -apk games free download zombie
    -apk games free download car
    -apk games free download bike
    -apk games free download truck
    -apk games free download bus
    -apk games free download train
    -apk games free download plane
    -apk games free download helicopter
    -apk games free download boat
    -apk games free download submarine
    -apk games free download tank
    -apk games free download war
    -apk games free download army
    -apk games free download navy
    -apk games free download air force
    -apk games free download police
    -apk games free download fireman
    -apk games free download doctor
    -apk games free download dentist
    -apk games free download chef
    -apk games free download farmer
    -apk games free download zookeeper
    -apk games free download teacher
    -apk games free download lawyer

    -
      -
    • You can access games that are not available on the Google Play Store.
    • -
    • You can enjoy games that have more features or better graphics than their official versions.
    • -
    • You can get early access to beta versions or updates of your favorite games.
    • -
    • You can customize your gaming experience by modifying or hacking your APK games.
    • -
    -

    However, there are also some drawbacks of playing APK games, such as:

    -
      -
    • You may expose your device to malware or viruses that can harm your data or privacy.
    • -
    • You may violate - You may violate the terms and conditions of the original game developers or publishers, and risk getting banned or sued. - You may encounter compatibility or performance issues, such as crashes, glitches, or bugs. - You may miss out on the official updates or support from the Google Play Store or the game developers. Some examples of popular APK games are: - PUBG Mobile Lite: A lighter version of the famous battle royale game that can run on low-end devices. - Minecraft Pocket Edition: A portable version of the sandbox game that allows you to create and explore your own world. - GTA San Andreas: A classic open-world action game that lets you experience the life of a gangster in Los Santos. - Among Us: A multiplayer game where you have to find the impostor among your crewmates in a spaceship. - Genshin Impact: A role-playing game where you can explore a vast fantasy world with different characters and elements.

      How to Download APK Games?

      -

      There are many sources and websites where you can download APK games for free. However, not all of them are safe or reliable. Some of them may contain malware or viruses that can harm your device or steal your information. Some of them may also provide fake or outdated APK files that may not work properly or at all.

      -

      Therefore, you should be careful and cautious when downloading APK games from unknown sources. Here are some precautions and tips for downloading APK games:

      -
        -
      • Always check the reputation and reviews of the source or website before downloading any APK file. You can use tools like VirusTotal or Google Safe Browsing to scan the URL for any malicious content.
      • -
      • Always check the permissions and details of the APK file before installing it on your device. You can use tools like APK Analyzer or APK Editor to inspect the APK file for any suspicious or unnecessary permissions or components.
      • -
      • Always download the latest version of the APK file from the official website or a trusted source. You can use tools like APKMirror or APKPure to find and download the latest and verified APK files for your games.
      • -
      -

      The steps for downloading APK games are:

      -
        -
      1. Find and choose the APK game that you want to download from a source or website.
      2. -
      3. Click on the download button or link to start downloading the APK file to your device.
      4. -
      5. Wait for the download to finish and locate the APK file in your device's storage.
      6. -
      -

      How to Install APK Games?

      -

      Before you can install any APK game on your device, you need to make sure that your device meets some requirements and settings. These are:

      -
        -
      • Your device should have enough storage space to accommodate the APK file and its data.
      • -
      • Your device should have a compatible Android version and hardware specifications to run the game smoothly.
      • -
      • Your device should allow installation from unknown sources, which is a security setting that prevents installation of apps or games from outside the Google Play Store. You can enable this setting by going to Settings > Security > Unknown Sources (or similar) and toggling it on.
      • -
      -

      The steps for installing APK games are:

      -
        -
      1. Open the file manager app on your device and navigate to the folder where you downloaded the APK file.
      2. -
      3. Tap on the APK file to launch it and start the installation process.
      4. -
      5. Follow the instructions on the screen and grant any permissions or access that the game requires.
      6. -
      7. Wait for the installation to finish and look for the game icon on your home screen or app drawer.
      8. -
      9. Tap on the game icon to launch it and enjoy playing it.
      10. -
      -

      Sometimes, you may encounter some troubleshooting or common errors when installing APK games, such as:

      -
        -
      • The installation is blocked or failed: This may happen if your device does not meet a href="https://innersloth.com/gameAmongUs.php">https://innersloth.com/gameAmongUs.php
    Genshin ImpactA role-playing game where you can explore a vast fantasy world with different characters and elements.https://genshin.mihoyo.com/en
    -

    FAQs

    -

    Here are some of the frequently asked questions about APK games and their answers:

    -

    Q1: Are APK games safe?

    -

    A1: APK games are not inherently unsafe, but they may pose some risks if you download them from untrusted sources or websites. You should always scan the APK file for any malware or viruses before installing it on your device, and check the permissions and details of the game. You should also avoid downloading or installing any APK games that require a license verification or an additional data download, as they may be fake or harmful.

    -

    Q2: Do I need to root my device to play APK games?

    -

    A2: No, you do not need to root your device to play APK games, unless the game specifically requires it. Rooting your device is a process that gives you full access and control over your device's system and settings, which can allow you to modify or hack your APK games. However, rooting your device can also void your warranty, expose your device to security risks, and cause compatibility or performance issues. Therefore, you should only root your device if you know what you are doing and at your own risk.

    -

    Q3: How can I update my APK games?

    -

    A3: You can update your APK games by downloading and installing the latest version of the APK file from the official website or a trusted source. You can also use tools like APKUpdater or Uptodown to check for updates and download them automatically. However, you should be aware that updating your APK games may overwrite your data or progress, or cause compatibility or performance issues. Therefore, you should always backup your data and progress before updating your APK games.

    -

    Q4: What are some of the best APK games?

    -

    A4: There are many APK games that you can download and play on your Android device, depending on your preferences and tastes. Some of the best APK games that we recommend are PUBG Mobile Lite, Minecraft Pocket Edition, GTA San Andreas, Among Us, and Genshin Impact. You can find more information and download links for these games in the table above.

    -

    Q5: How can I uninstall APK games?

    -

    A5: You can uninstall APK games by following the same steps as uninstalling regular apps or games on your device. You can go to Settings > Apps > (Game Name) > Uninstall, or long-press the game icon on your home screen or app drawer and drag it to the uninstall option. You can also use tools like SD Maid or App Manager to uninstall APK games more easily and completely.

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/European Truck Simulator Mod APK The Most Realistic Truck Simulator Ever.md b/spaces/congsaPfin/Manga-OCR/logs/European Truck Simulator Mod APK The Most Realistic Truck Simulator Ever.md deleted file mode 100644 index 5b3215798f82f21a0c6714e1d364d0810a1b7f50..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/European Truck Simulator Mod APK The Most Realistic Truck Simulator Ever.md +++ /dev/null @@ -1,156 +0,0 @@ - -

    European Truck Simulator APK Mod Download: A Guide for Beginners

    -

    If you are a fan of driving simulation games, you might have heard of Euro Truck Simulator 2, a popular game that lets you travel across Europe as a truck driver, delivering cargo and running your own business. But did you know that you can also play this game on your Android device, with some extra features and benefits? In this article, we will show you how to download and install the Euro Truck Simulator apk mod, a modified version of the game that gives you unlimited money, access to all trucks and maps, and more. We will also give you some tips and tricks for playing the game, as well as some reviews from other players. So buckle up and get ready for a thrilling ride!

    -

    What is Euro Truck Simulator?

    -

    Euro Truck Simulator is a series of truck simulation games developed by SCS Software, a Czech company that specializes in vehicle simulation games. The first game in the series was released in 2008, and the second one, Euro Truck Simulator 2, was released in 2012. The game features licensed trucks from various European brands, such as Mercedes-Benz, Volvo, Scania, MAN, Renault, and more. The game also features realistic environments, roads, cities, and landmarks from over 26 European countries, such as Germany, France, Italy, Spain, Poland, and more. The game has a career mode, where you can start as a low-skilled driver and work your way up to become a successful trucking company owner. You can also customize your trucks with various tuning options, such as paint jobs, engines, lights, horns, etc. The game also has a multiplayer mode, where you can join other players online and compete or cooperate with them.

    -

    european truck simulator apk mod download


    DOWNLOADhttps://urlca.com/2uO4Xo



    -

    Features of the game

    -

    Some of the main features of Euro Truck Simulator 2 are:

    -
      -
    • Transport a vast variety of cargo across more than 60 European cities.
    • -
    • Run your own business which continues to grow even as you complete your freight deliveries.
    • -
    • Build your own fleet of trucks, buy garages, hire drivers, manage your company for maximum profits.
    • -
    • A varied amount of truck tuning that range from performance to cosmetic changes.
    • -
    • Realistic weather conditions and day/night cycle.
    • -
    • Visual damage on trucks.
    • -
    • Detailed interiors for each truck brand.
    • -
    • Amazing engine sounds.
    • -
    • Modding and community support.
    • -
    -

    How to download and install the apk mod

    -

    If you want to play Euro Truck Simulator 2 on your Android device, you will need to download and install the Euro Truck Simulator apk mod, which is a modified version of the game that gives you some extra features and benefits. Here are the steps to do so:

    -
      -
    1. Go to this link and download the apk file. This is a trusted source that provides safe and virus-free downloads.
    2. -
    3. Go to your device settings and enable the installation of apps from unknown sources. This will allow you to install the apk file that you downloaded.
    4. -
    5. Locate the apk file in your device storage and tap on it to install it. Follow the instructions on the screen to complete the installation.
    6. -
    7. Launch the game and enjoy!
    8. -
    -

    Why use the apk mod?

    -

    You might be wondering why you should use the Euro Truck Simulator apk mod instead of the original game. Well, there are some good reasons to do so, as well as some risks and precautions that you should be aware of. Let's take a look at them.

    -

    Benefits of the mod

    -

    The main benefit of using the Euro Truck Simulator apk mod is that it gives you unlimited money, which means that you can buy any truck, upgrade, garage, or driver that you want, without worrying about your budget. You can also access all maps and trucks, which means that you can explore every corner of Europe and drive any truck that you like, without having to unlock them first. You can also enjoy some extra features, such as no damage, no fatigue, no police, no speed limit, etc., which can make your gameplay more fun and easy.

    -

    Risks and precautions of the mod

    -

    However, using the Euro Truck Simulator apk mod also comes with some risks and precautions that you should be aware of. The main risk is that the mod may not be compatible with your device or the latest version of the game, which may cause some errors, crashes, or glitches. The mod may also not work well with the multiplayer mode, which may prevent you from joining other players online or cause some conflicts with them. The mod may also violate the terms and conditions of the game developer, which may result in a ban or a penalty. Therefore, you should always backup your data before installing the mod, and use it at your own risk and responsibility. You should also avoid using the mod in multiplayer mode, and respect other players and their gameplay.

    -

    Tips and tricks for playing Euro Truck Simulator

    -

    If you are new to Euro Truck Simulator 2, or if you want to improve your skills and experience in the game, here are some tips and tricks that can help you:

    -

    Follow the rules of traffic

    -

    One of the most important things to remember when playing Euro Truck Simulator 2 is to follow the rules of traffic. This means that you should obey the speed limits, traffic lights, signs, signals, etc., as well as drive on the correct side of the road. If you break any of these rules, you may get fined by the police, or cause an accident that can damage your truck or cargo. You may also lose reputation points or money if you deliver your cargo late or damaged. Therefore, it is better to drive safely and carefully than to rush and risk.

    -

    european truck driver simulator mod apk free download
    -download euro truck evolution simulator mod apk unlimited money
    -euro truck simulator 2 android apk mod download
    -euro truck driver 2018 mod apk download latest version
    -euro truck simulator pro mod apk download for android
    -euro truck driver 3d mod apk download
    -euro truck simulator 2 mobile mod apk download
    -euro truck driver 2019 mod apk download
    -euro truck simulator 3 mod apk free download
    -euro truck driver simulator 2 mod apk download
    -euro truck simulator multiplayer mod apk download
    -euro truck driver 2017 mod apk download
    -euro truck simulator offroad cargo transport mod apk download
    -euro truck simulator bus mod apk download
    -euro truck driver 2020 mod apk download
    -euro truck simulator 2 vr mod apk download
    -euro truck simulator cargo delivery mod apk download
    -euro truck driver simulator 3d mod apk download
    -euro truck simulator 2 online multiplayer mod apk download
    -euro truck driver 2016 mod apk download
    -euro truck simulator snow mod apk download
    -euro truck simulator 2 realistic graphics mod apk download
    -euro truck driver simulator offline mod apk download
    -euro truck simulator 2 car driving mod apk download
    -euro truck driver 2015 mod apk download
    -euro truck simulator 2 map mods apk download
    -euro truck driver simulator hack mod apk download
    -euro truck simulator 2 traffic jam mod apk download
    -euro truck simulator 2 indian bus mod apk download
    -euro truck driver 2014 mod apk download
    -euro truck simulator 2 police car mod apk download
    -euro truck simulator 2 winter mods apk download
    -euro truck driver simulator premium mod apk download
    -euro truck simulator 2 scania bus mods free download for android
    -euro truck driver simulator cheats mod apk download
    -euro truck simulator 2 volvo bus mods free download for android
    -euro truck driver simulator unlimited money and xp mod apk download
    -euro truck simulator 2 indian map mods free download for android
    -euro truck driver simulator full unlocked mod apk download
    -euro truck simulator 2 mercedes benz bus mods free download for android
    -european cargo transport simulation game - real driving sim - offline game - free to play - realistic physics - amazing graphics - easy controls - multiple camera views - different weather conditions - day and night cycle - customisable trucks and trailers - various cargo types and destinations - challenging missions and achievements - online leaderboards and multiplayer mode

    -

    Be careful with skill point assignment

    -

    Another important thing to consider when playing Euro Truck Simulator 2 is how to assign your skill points. Skill points are earned by leveling up in the game, and they can be used to unlock various perks and abilities for your driver. However, not all skills are equally useful or necessary, so you should be careful with how you spend them. Some of the most useful skills are:

    -
      -
    • A + B + C Cargo: These skills allow you to transport more types of cargo, such as fragile, valuable, or dangerous goods. This can increase your income and reputation.
    • -
    • Long Distance: This skill allows you to take longer delivery jobs, which can also increase your income and reputation.
    • -
    • Fuel Economy: This skill allows you to save fuel by driving more efficiently. This can reduce your expenses and increase your profits.
    • -
    • High Value Cargo: This skill allows you to transport more expensive cargo, which can also increase your income and reputation.
    • -
    • Eco Driving: This skill allows you to reduce your fuel consumption by driving more smoothly. This can also reduce your expenses and increase your profits.
    • -
    • Hazardous Cargo: This skill allows you to transport more dangerous cargo, such as explosives, chemicals, or radioactive materials. This can also increase your income and reputation.
    • -
    • Just In Time Delivery: This skill allows you to take urgent delivery jobs, which have a shorter time limit but a higher reward. This can also increase your income and reputation.
    • -
    -

    Some of the less useful skills are:

    -
      -electronics. However, this skill is not very useful, as fragile cargo is not very common, and it does not pay much more than regular cargo. -
    • ADR: This skill allows you to transport more types of hazardous cargo, such as flammable, corrosive, or toxic materials. However, this skill is also not very useful, as hazardous cargo is already covered by the Hazardous Cargo skill, and it does not pay much more than regular cargo.
    • -
    • Passenger Transport: This skill allows you to transport passengers, such as tourists or workers. However, this skill is also not very useful, as passenger transport is not very common, and it does not pay much more than regular cargo.
    • -
    -

    Therefore, you should focus on the skills that can increase your income and reputation, and avoid the skills that are less useful or redundant.

    -

    Do not go through with the very first loan

    -

    Another tip for playing Euro Truck Simulator 2 is to avoid taking the very first loan that is offered to you at the beginning of the game. The loan is for 100,000 euros, and it has a high interest rate of 18%. This means that you will have to pay back 1,800 euros every day for 100 days, which can be a huge burden on your finances. Instead of taking the loan, you should continue working as a hired driver for other companies, until you have enough money to buy your own truck. This way, you can save money on interest and fees, and have more control over your income and expenses.

    -

    Take time to learn the menu

    -

    Another tip for playing Euro Truck Simulator 2 is to take some time to learn the menu and its functions. The menu is where you can access various options and information about your game, such as your profile, your company, your truck, your garage, your drivers, your bank, your map, your job market, your settings, etc. You can also use the menu to pause or save your game, or to quit the game. The menu can be accessed by pressing the Esc key on your keyboard, or by tapping on the screen if you are playing on a touch device. You should familiarize yourself with the menu and its features, as they can help you manage your game and improve your gameplay.

    -

    Ecodriving is a useless skill

    -

    Another tip for playing Euro Truck Simulator 2 is to ignore the ecodriving skill. The ecodriving skill is supposed to measure how efficiently you drive your truck, by taking into account factors such as speed, acceleration, braking, gear shifting, etc. The higher your ecodriving skill, the less fuel you consume and the more money you save. However, the ecodriving skill is actually useless and inaccurate in the game. It does not reflect the actual fuel consumption of your truck, nor does it affect your income or expenses. It is just a cosmetic feature that has no real impact on your gameplay. Therefore, you should not worry about your ecodriving skill or try to improve it. You should focus on other skills that are more useful and relevant in the game.

    -

    Reviews of Euro Truck Simulator

    -

    If you are still not convinced that Euro Truck Simulator 2 is a great game to play on your Android device with the Euro Truck Simulator apk mod, here are some reviews from other players who have tried it:

    -

    Pros and cons of the game

    -

    Some of the pros and cons of Euro Truck Simulator 2 are:

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    ProsCons
    - Realistic and immersive graphics and sounds.- Requires a lot of storage space and memory.
    - Diverse and expansive map of Europe.- May have some bugs or glitches.
    - Fun and challenging gameplay.- May be boring or repetitive for some players.
    - Customizable and upgradeable trucks.- May be too difficult or complex for some players.
    - Modding and community support.- May not be compatible with some devices or versions.
    -

    User ratings and feedback

    -

    Some of the user ratings and feedback for Euro Truck Simulator 2 are:

    -
      -
    • "This game is amazing. I love driving around Europe and delivering cargo. The graphics are stunning and the sounds are realistic. The game is very relaxing and enjoyable. I highly recommend it to anyone who likes simulation games." - 5 stars
    • -
    • "This game is good, but it has some issues. The game crashes sometimes and it lags a lot. The controls are not very responsive and the steering is too sensitive. The game is also very hard and frustrating. I wish there was an easier mode or a tutorial." - 3 stars
    • -
    • "This game is terrible. It is boring and repetitive. The game is too long and too complex. The game is also very expensive and it takes up too much space on my device. The game is also full of bugs and glitches. I regret buying it." - 1 star
    • -
    -

    Conclusion and FAQs

    -

    In conclusion, Euro Truck Simulator 2 is a great game to play on your Android device with the Euro Truck Simulator apk mod. The game offers you a realistic and immersive experience of driving a truck across Europe, delivering cargo and running your own business. The game also has a lot of features and options to customize your truck and your gameplay. The game also has a modding and community support, which can enhance your game even more. However, the game also has some drawbacks, such as requiring a lot of storage space and memory, having some bugs or glitches, being incompatible with some devices or versions, or being too difficult or complex for some players. Therefore, you should always backup your data before installing the mod, and use it at your own risk and responsibility.

    -

    If you have any questions about Euro Truck Simulator 2 or the Euro Truck Simulator apk mod, here are some FAQs that might help you:

    -
      -
    1. How can I update the game or the mod?
    2. -

      You can update the game or the mod by downloading the latest version from the official website or from the link that we provided. You can also check for updates from within the game menu or from the Google Play Store. However, you should always backup your data before updating, as some updates may not be compatible with your device or your previous version.

      -
    3. How can I play the multiplayer mode?
    4. -

      You can play the multiplayer mode by joining other players online through the Steam platform. You will need to have a Steam account and a copy of the original game on your PC to do so. You will also need to download and install the TruckersMP mod, which is a fan-made multiplayer mod that allows you to play with other players online. However, you should note that the multiplayer mode may not work well with the apk mod, as they may have different versions or features.

      -
    5. How can I use mods in the game?
    6. -

      You can use mods in the game by downloading them from various sources, such as the official website, the Steam Workshop, or other websites. You can also create your own mods using the SCS Software tools. However, you should note that not all mods are compatible with each other or with the apk mod, so you should always backup your data before installing them, and use them at your own risk and responsibility.

      -
    7. How can I contact the developer or get support?
    8. -

      You can contact the developer or get support by visiting their official website, their Facebook page, their Twitter account, their YouTube channel, or their forum. You can also send them an email at info@scssoft.com.

      -
    9. How can I uninstall the game or the mod?
    10. -

      You can uninstall the game or the mod by going to your device settings and finding the app in the list of installed apps. Then, you can tap on the app and select the option to uninstall it. You can also delete the apk file from your device storage if you want to free up some space.

      -

      I hope this article has helped you learn more about Euro Truck Simulator 2 and the Euro Truck Simulator apk mod. If you have any comments or suggestions, please feel free to share them with us. Thank you for reading and happy trucking!

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Instagram 2 APK - Enjoy New Features and Enhancements on Your Android.md b/spaces/congsaPfin/Manga-OCR/logs/Instagram 2 APK - Enjoy New Features and Enhancements on Your Android.md deleted file mode 100644 index 1374133ac9549057ce0ab3b3a39a8de7d04cd0ad..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Instagram 2 APK - Enjoy New Features and Enhancements on Your Android.md +++ /dev/null @@ -1,103 +0,0 @@ - -

      Instagram 2 APK Download: What Is It and How to Get It

      -

      Instagram is one of the most popular social media platforms in the world, with over one billion monthly active users. It allows you to create and share your photos, videos, stories, reels, and more with your friends and followers. But what if you want to have more control over your Instagram experience? What if you want to remove ads, hide views, download media, or customize your app? That's where Instagram 2 APK comes in.

      -

      instagram 2 apk download


      DOWNLOAD >>>>> https://urlca.com/2uO71I



      -

      Instagram 2 APK is a modified version of the official Instagram app that offers some extra features and options that are not available in the original app. It is also known as Instagram Mod APK or InstaMod APK. Some people might want to download Instagram 2 APK because they are not satisfied with the limitations or restrictions of the official app, or because they want to try something new and different.

      -

      In this article, we will explain what you can do with Instagram 2 APK, how to download and install it on your Android device, and what are the risks and benefits of using it. We will also answer some frequently asked questions about Instagram 2 APK. Let's get started!

      -

      Instagram Features: What You Can Do with the Official App

      -

      Before we dive into the features of Instagram 2 APK, let's review what you can do with the official Instagram app. Here are some of the features that make Instagram a great social media platform:

      -
        -
      • Reels: Reels are short-form videos that you can create and share with your friends or anyone on Instagram. You can add music, filters, stickers, text, and other effects to make your reels fun and entertaining.
      • -
      • Stories: Stories are posts that disappear after 24 hours. You can share moments from your everyday life in your stories, such as photos, videos, boomerangs, polls, quizzes, etc. You can also watch stories from other people you follow.
      • -
      • Messenger: Messenger is a feature that allows you to send photos, videos, messages, voice notes, and more privately to your friends or groups. You can also make video calls or join chat rooms with up to 50 people.
      • -
      • Shopping: Shopping is a feature that allows you to browse and buy products from your favorite brands and creators on Instagram. You can also create your own shop and sell your products to your followers.
      • -
      • Search & Explore: Search & Explore is a feature that helps you discover content and creators based on your interests. You can search for hashtags, keywords, locations, accounts, or browse through different categories.
      • -
      -

      These are just some of the features that Instagram offers. There are many more features that you can explore and enjoy on the app.

      -

      instagram 2 apk free download latest version
      -instagram 2 apk download for android aptoide
      -instagram 2 apk download 2023 updated
      -instagram 2 apk download com.instagram.android
      -instagram 2 apk download help.instagram.com
      -instagram 2 apk download apkcombo
      -instagram 2 apk download for android 4.1
      -instagram 2 apk download 289.0.0.0.11
      -instagram 2 apk download social media app
      -instagram 2 apk download create share enjoy
      -instagram 2 apk download modded version
      -instagram 2 apk download no root required
      -instagram 2 apk download dual account feature
      -instagram 2 apk download unlimited likes and followers
      -instagram 2 apk download with dark mode
      -instagram 2 apk download with reels and stories
      -instagram 2 apk download with filters and stickers
      -instagram 2 apk download with direct messages and video calls
      -instagram 2 apk download with live streaming and IGTV
      -instagram 2 apk download with explore and shop tabs
      -instagram 2 apk download with privacy and security settings
      -instagram 2 apk download with notifications and insights
      -instagram 2 apk download with backup and restore options
      -instagram 2 apk download with offline mode and cache cleaner
      -instagram 2 apk download with multiple languages and fonts
      -instagram 2 apk download with custom themes and icons
      -instagram 2 apk download with anti-ban and anti-spam features
      -instagram 2 apk download with bug fixes and performance improvements
      -instagram 2 apk download for android tablet and tv
      -instagram 2 apk download for android emulator and pc

      -

      Instagram Mod APK: What You Can Do with the Modified App

      -

      Now that we have covered what you can do with the official Instagram app, let's see what you can do with Instagram 2 APK or InstaMod APK. Here are some of the features that make Instagram 2 APK or InstaMod APK different from the official app:

      -
        -
      • Remove ads: Instagram 2 APK allows you to remove all the ads that appear on your feed, stories, reels, and explore page. This way, you can enjoy a more smooth and uninterrupted Instagram experience.
      • -
      • Hide views: Instagram 2 APK allows you to hide the number of views, likes, and comments on your posts and stories. This way, you can avoid the pressure of social comparison and focus on the content itself.
      • -
      • Download media: Instagram 2 APK allows you to download any photo, video, story, reel, or IGTV that you see on Instagram. You can save them to your device or share them with other apps.
      • -
      • Disable stories: Instagram 2 APK allows you to disable the stories feature completely. This way, you can avoid seeing or posting any stories on Instagram.
      • -
      • Customize app: Instagram 2 APK allows you to customize the appearance and functionality of your app. You can change the theme, icon, font, layout, and more according to your preferences.
      • -
      -

      These are just some of the features that Instagram 2 APK or InstaMod APK offers. There are many more features that you can discover and use on the app.

      -

      How to Download and Install Instagram 2 APK on Your Android Device

      -

      If you are interested in trying out Instagram 2 APK or InstaMod APK, you will need to download and install it on your Android device. Here are the steps that you need to follow:

      -
        -
      1. Uninstall the official Instagram app: Before you install Instagram 2 APK or InstaMod APK, you will need to uninstall the official Instagram app from your device. This is because you cannot have two versions of the same app on your device.
      2. -
      3. Download Instagram 2 APK or InstaMod APK: Next, you will need to download the latest version of Instagram 2 APK or InstaMod APK from a reliable source. You can search for it on Google or use this link: . Make sure that you download the file from a trusted and secure website.
      4. -
      5. Enable unknown sources: After you download the file, you will need to enable unknown sources on your device. This is because Android does not allow installing apps from sources other than the Google Play Store by default. To enable unknown sources, go to Settings > Security > Unknown Sources and toggle it on.
      6. -
      7. Install Instagram 2 APK or InstaMod APK: Finally, you will need to install Instagram 2 APK or InstaMod APK on your device. To do this, locate the downloaded file in your file manager and tap on it. Follow the instructions on the screen and wait for the installation to complete.
      8. -
      -

      Congratulations! You have successfully installed Instagram 2 APK or InstaMod APK on your device. You can now open the app and log in with your existing account or create a new one.

      -

      Risks and Benefits of Using Instagram 2 APK

      -

      Using Instagram 2 APK or InstaMod APK can have some risks and benefits that you should be aware of before using it. Here are some of them:

      - - - - - - - - - - - - - - - - -Conclusion -

      In conclusion, Instagram 2 APK or InstaMod APK is a modified version of the official Instagram app that offers some extra features and options that are not available in the original app. It can help you customize your app, remove ads, hide views, download media, disable stories, and more. However, it also comes with some risks and drawbacks, such as security issues, account suspension, update issues, and violation of terms and conditions. Therefore, you should use it at your own risk and discretion.

      -

      If you want to try out Instagram 2 APK or InstaMod APK, you can follow the steps we provided above to download and install it on your Android device. You can also check out some other alternatives to Instagram 2 APK or InstaMod APK, such as GB Instagram, OG Instagram, or Aero Instagram. These are also modified versions of Instagram that offer similar or different features.

      -

      We hope that this article has helped you understand what Instagram 2 APK or InstaMod APK is and how to get it. If you have any questions or feedback, feel free to leave a comment below. We would love to hear from you!

      -

      FAQs

      -

      Here are some frequently asked questions about Instagram 2 APK or InstaMod APK:

      -
        -
      • Q: Is Instagram 2 APK or InstaMod APK safe to use?
      • -
      • A: There is no definitive answer to this question. Some sources claim that Instagram 2 APK or InstaMod APK is safe and virus-free, while others warn that it might contain malware or spyware. Therefore, you should be careful when downloading and installing any modified app from unknown sources. You should also scan the file with an antivirus app before installing it.
      • -
      • Q: Will I get banned from using Instagram if I use Instagram 2 APK or InstaMod APK?
      • -
      • A: There is a possibility that you might get banned or suspended from using Instagram if you use Instagram 2 APK or InstaMod APK. This is because using a modified app violates the terms and conditions of Instagram. If Instagram detects that you are using a modified app, it might take action against your account. Therefore, you should use it at your own risk and discretion.
      • -
      • Q: How can I update Instagram 2 APK or InstaMod APK?
      • -
      • A: You cannot update Instagram 2 APK or InstaMod APK from the Google Play Store like the official app. You will need to download the latest version of the file from a reliable source and install it manually on your device. You should also uninstall the previous version of the app before installing the new one.
      • -
      • Q: Can I use both the official app and the modified app on the same device?
      • -
      • A: No, you cannot use both the official app and the modified app on the same device. This is because they have the same package name and signature. You will need to uninstall one of them before installing the other one.
      • -
      • Q: Can I use Instagram 2 APK or InstaMod APK on iOS devices?
      • -
      • A: No, you cannot use Instagram 2 APK or InstaMod APK on iOS devices. This is because they are only compatible with Android devices. You will need to jailbreak your iOS device and use a third-party app store to install any modified app on it.
      • -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Paint 3D A Free and Fun Way to Create in Every Dimension on Windows 8.md b/spaces/congsaPfin/Manga-OCR/logs/Paint 3D A Free and Fun Way to Create in Every Dimension on Windows 8.md deleted file mode 100644 index d6b3490ba083434df34196d429f5815e95df8e8e..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Paint 3D A Free and Fun Way to Create in Every Dimension on Windows 8.md +++ /dev/null @@ -1,171 +0,0 @@ - -
      RisksBenefits
      - Security issues: Using a modified app can expose your device and data to malware, viruses, spyware, or hackers. You might also lose your privacy and personal information if the app accesses your contacts, messages, location, camera, microphone, etc.- Customization options: Using a modified app can give you more control over your app's appearance and functionality. You can change the theme, icon, font, layout, and more according to your preferences.
      - Account suspension: Using a modified app can violate the terms and conditions of Instagram. You might get banned or suspended from using the official app if Instagram detects that you are using a modified app.- Extra features: Using a modified app can give you access to some extra features that are not available in the official app. You can remove ads, hide views, download media, disable stories, etc.
      - Update issues: Using a modified app can prevent you from getting the latest updates and features from Instagram. You might miss out on some new and exciting features that Instagram releases regularly.- Experimentation: Using a modified app can give you a chance to experiment with something new and different. You might discover some hidden features or tricks that enhance your Instagram experience.
      - -
      -

      Paint 3D Free Download for Windows 8

      -

      Do you want to unleash your creativity and bring your ideas to life in three dimensions? Do you want to have fun with doodles, shapes, stickers, and effects? Do you want to create amazing 3D models without any coding or design skills? If you answered yes to any of these questions, then you need to download Paint 3D for Windows 8.

      -

      paint 3d free download for windows 8


      Downloadhttps://urlca.com/2uO4Vs



      -

      Paint 3D is a free modeling application that was introduced along with Windows 10 Creators Update. It is an upgraded version of the classic Microsoft Paint that offers support for 3D shapes. You can use it to make 2D masterpieces or 3D models that you can play with from all angles.

      -

      In this article, we will tell you everything you need to know about Paint 3D and how to get it on your Windows 8 device. We will also show you how to use it to create your own stunning projects. Let's get started!

      -

      What is Paint 3D?

      -

      Paint 3D is a free modeling application that was introduced along with Windows 10 Creators Update. It is an upgraded version of the classic Microsoft Paint that offers support for 3D shapes. You can use it to make 2D masterpieces or 3D models that you can play with from all angles.

      -

      Paint 3D has a simple and intuitive interface that lets you easily access its tools and features. You can draw, paint, erase, crop, resize, rotate, and flip your images. You can also add text, stickers, effects, and filters to enhance your creations. You can import images from your device or the web, or use the built-in camera to capture your surroundings.

      -

      But the most exciting feature of Paint 3D is its ability to create and edit 3D objects. You can choose from a variety of predefined shapes, such as animals, plants, buildings, and vehicles. You can also use the 3D doodle tool to draw your own shapes in freehand or sharp mode. You can then modify your 3D objects by changing their color, texture, lighting, or material. You can also move them around, rotate them, scale them, duplicate them, or delete them.

      -

      How to install paint 3d on windows 8.1
      -Paint 3d for windows 8.1 free download full version
      -Best alternatives to paint 3d for windows 8 users
      -Paint 3d windows 8.1 download link
      -Paint 3d features and tutorials for windows 8.1
      -Paint 3d compatible with windows 8.1
      -Paint 3d app for windows 8.1 pc
      -Paint 3d offline installer for windows 8.1
      -Paint 3d update for windows 8.1
      -Paint 3d review and rating for windows 8.1
      -Paint 3d system requirements for windows 8.1
      -Paint 3d tips and tricks for windows 8.1
      -Paint 3d problems and solutions for windows 8.1
      -Paint 3d vs microsoft paint for windows 8.1
      -Paint 3d vs other 3d modeling software for windows 8.1
      -Paint 3d free trial for windows 8.1
      -Paint 3d license key for windows 8.1
      -Paint 3d support and help for windows 8.1
      -Paint 3d online version for windows 8.1
      -Paint 3d file formats and compatibility for windows 8.1
      -How to create 3d models with paint 3d on windows 8.1
      -How to edit and customize 3d models with paint 3d on windows 8.1
      -How to share and export 3d models with paint 3d on windows 8.1
      -How to import and use 3d stock images with paint 3d on windows 8.1
      -How to add text and stickers with paint 3d on windows 8.1
      -How to use brushes and tools with paint 3d on windows 8.1
      -How to use lighting and effects with paint 3d on windows 8.1
      -How to use magic select and crop with paint 3d on windows 8.1
      -How to use canvas and background with paint 3d on windows 8.1
      -How to use remix and community with paint 3d on windows 8.1
      -How to use mixed reality and holograms with paint 3d on windows 8.1
      -How to use animation and video with paint 3d on windows 8.1
      -How to use sketchpad and ruler with paint 3d on windows 8.1
      -How to use history and undo with paint 3d on windows 8.1
      -How to use settings and preferences with paint 3d on windows

      -

      Paint 3D also allows you to export your projects as images or videos. You can save them on your device or share them online with others. You can also upload them to Remix 3D, a community website where you can find and download thousands of 3D models created by other users. You can also remix them and add your own touch to them.

      -

      Why Download Paint 3D for Windows 8?

      -

      Paint 3D is a fun and easy way to express yourself and unleash your creativity. Whether you are a beginner or a pro, you can use Paint 3D to create amazing 3D models without any coding or design skills. You can also use it to learn about 3D concepts and principles, such as perspective, depth, and dimension.

      -

      Paint 3D is also a great tool for education and entertainment. You can use it to make presentations, posters, flyers, cards, logos, banners, and more. You can also use it to make animations, cartoons, comics, memes, games, and more. You can also use it to explore different topics and themes, such as history, geography, science, art, culture, and more.

      -

      Paint 3D is compatible with Windows 8 devices that have at least 4 GB of RAM and a DirectX 9 or higher graphics card. It is also compatible with touchscreens and stylus pens. You can use it on your laptop, tablet, or desktop computer.

      -

      How to Download Paint 3D for Windows 8?

      -

      If you want to download Paint 3D for Windows 8, you have two options: get it from the Microsoft Store or get it from FileHippo. Here are the steps for each option:

      -

      Option 1: Get Paint 3D from the Microsoft Store

      -

      The Microsoft Store is the official source for downloading Paint 3D for Windows 8. Here are the steps to get it from there:

      -
        -
      1. Open the Microsoft Store app on your Windows 8 device.
      2. -
      3. Type "Paint 3D" in the search box and hit enter.
      4. -
      5. Select the Paint 3D app from the list of results.
      6. -
      7. Click on the "Get" button to start the download.
      8. -
      9. Wait for the download to finish and then click on the "Launch" button to open the app.
      10. -
      -

      Option 2: Get Paint 3D from FileHippo

      -

      FileHippo is a third-party website that offers free downloads of various software applications. Here are the steps to get Paint 3D from there:

      -
        -
      1. Open your web browser and go to https://filehippo.com/download_paint-3d/.
      2. -
      3. Click on the "Download Latest Version" button to start the download.
      4. -
      5. Wait for the download to finish and then open the downloaded file.
      6. -
      7. Follow the instructions on the screen to install the app.
      8. -
      9. Once the installation is complete, open the app from your start menu or desktop shortcut.
      10. -
      -

      How to Use Paint 3D on Windows 8?

      -

      Now that you have downloaded Paint 3D for Windows 8, you are ready to use it to create your own stunning projects. Here is a quick tutorial on how to create and edit 3D models with Paint 3D:

      -

      Create a New Project

      -

      To start a new project in Paint 3D, follow these steps:

      -
        -
      1. Open the Paint 3D app on your Windows 8 device.
      2. -
      3. Click on the "New" button on the top left corner of the screen.
      4. -
      5. You will see a blank canvas where you can create your project.
      6. -
      -

      Choose a Canvas

      -

      To choose a canvas size and orientation for your project, follow these steps:

      -
        -
      1. Click on the "Menu" button on the top left corner of the screen.
      2. -
      3. Select the "Canvas" option from the drop-down menu.
      4. -
      5. You will see a panel on the right side of the screen where you can adjust the canvas settings.
      6. -
      7. You can use the sliders to change the width and height of the canvas in pixels.
      8. -
      9. You can also use the buttons to change the orientation of the canvas to portrait or landscape.
      10. -
      11. You can also use the toggle to enable or disable transparent canvas.
      12. -
      13. Once you are happy with your canvas settings, click on the "X" button to close the panel.
      14. -
      -

      Add a Background

      -

      To add a color or an image as a background for your project, follow these steps:

      -
        -
      1. Click on the "Brushes" button on the top menu bar.
      2. -
      3. Select the "Fill" tool from the list of brushes.
      4. -
      5. You will see a panel on the right side of the screen where you can choose a color or an image for your background.
      6. -
      7. If you want to use a color, click on the color wheel and select a color from the palette. You can also use the slider to adjust the opacity of the color.
      8. -
      9. If you want to use an image, click on the "Browse files" button and select an image from your device or the web. You can also use the slider to adjust the opacity of the image.
      10. -
      11. Once you have chosen your background, click on the canvas to apply it.
      12. -
      -

      Insert a 3D Object

      -

      To insert a predefined or a custom 3D object for your project, follow these steps:

      -
        -
      1. Click on the "3D shapes" button on the top menu bar.
      2. -
      3. You will see a panel on the right side of the screen where you can choose a 3D object from various categories, such as basic shapes, animals, plants, buildings, vehicles, and more.
      4. -
      5. If you want to use a predefined 3D object, click on one of the categories and select an object from the list. You can also use the search box to find an object by name.
      6. -
      7. If you want to use a custom 3D object, click on the "3D doodle" tool and draw your own shape in freehand or sharp mode. You can also use the eraser tool to erase any mistakes.
      8. -
      9. Once you have chosen your 3D object, click and drag it onto the canvas. You can also use the mouse wheel to zoom in or out.
      10. -
      -

      Modify a 3D Object

      -

      To move, rotate, scale, duplicate, or delete a 3D object for your project, follow these steps:

      -
        -
      1. Select a 3D object by clicking on it. You will see a bounding box around it with various handles and icons.
      2. -
      3. To move a 3D object, click and drag it to any position on the canvas. You can also use the arrow keys to move it in small increments.
      4. -
      5. To rotate a 3D object, click and drag the circular handle on the top of the bounding box. You can also use the "Rotate" icon on the bottom menu bar to rotate it by 45 degrees.
      6. -
      7. To scale a 3D object, click and drag any of the square handles on the corners or sides of the bounding box. You can also use the "Scale" icon on the bottom menu bar to scale it uniformly or non-uniformly.
      8. -
      9. To duplicate a 3D object, click on the "Copy" icon on the bottom menu bar and then click on the "Paste" icon. You can also use the keyboard shortcuts Ctrl+C and Ctrl+V to copy and paste.
      10. -
      11. To delete a 3D object, click on the "Delete" icon on the bottom menu bar or press the Delete key on your keyboard.
      12. -
      -

      Apply Effects to a 3D Object

      -

      To change the color, texture, lighting, or material of a 3D object for your project, follow these steps:

      -
        -
      1. Select a 3D object by clicking on it. You will see a panel on the right side of the screen where you can choose different effects for your 3D object.
      2. -
      3. To change the color of a 3D object, click on the "Color" tab and select a color from the palette. You can also use the color picker tool to select a color from anywhere on the screen.
      4. -
      5. To change the texture of a 3D object, click on the "Texture" tab and select a texture from the list. You can also use the "Browse files" button to select an image from your device or the web as a texture.
      6. -
      7. To change the lighting of a 3D object, click on the "Lighting" tab and select a lighting mode from the list. You can also use the slider to adjust the brightness of the light.
      8. -
      9. To change the material of a 3D object, click on the "Material" tab and select a material from the list. You can also use the slider to adjust the glossiness or metallicness of the material.
      10. -
      -

      Save or Export a Project

      -

      To save or export a project as an image or a video for your project, follow these steps:

      -
        -
      1. Click on the "Menu" button on the top left corner of the screen.
      2. -
      3. Select the "Save as" option from the drop-down menu.
      4. -
      5. You will see a panel on the right side of the screen where you can choose how to save or export your project.
      6. -
      7. If you want to save your project as a Paint 3D file, click on the "Paint 3D project" option and enter a name for your file. You can also choose a location to save your file on your device.
      8. -
      9. If you want to export your project as an image, click on the "Image" option and choose a file format from PNG, JPEG, GIF, BMP, or TIFF. You can also choose a quality level from low, medium, or high.
      10. -
      11. If you want to export your project as a video, click on the "Video" option and choose a file format from MP4 or WMV. You can also choose a quality level from low, medium, or high.
      12. -
      13. Once you have chosen how to save or export your project, click on the "Save" button and wait for the process to complete.
      14. -
      -

      Conclusion

      -

      Paint 3D is a free modeling application that lets you create and edit 3D models with ease. You can use it to make 2D masterpieces or 3D models that you can play with from all angles. You can also use it to learn about 3D concepts and principles, such as perspective, depth, and dimension.

      -

      Paint 3D is compatible with Windows 8 devices that have at least 4 GB of RAM and a DirectX 9 or higher graphics card. It is also compatible with touchscreens and stylus pens. You can download it from the Microsoft Store or from FileHippo.

      -

      Paint 3D has a simple and intuitive interface that lets you easily access its tools and features. You can draw, paint, erase, crop, resize, rotate, and flip your images. You can also add text, stickers, effects, and filters to enhance your creations. You can import images from your device or the web, or use the built-in camera to capture your surroundings.

      -

      But the most exciting feature of Paint 3D is its ability to create and edit 3D objects. You can choose from a variety of predefined shapes, such as animals, plants, buildings, and vehicles. You can also use the 3D doodle tool to draw your own shapes in freehand or sharp mode. You can then modify your 3D objects by changing their color, texture, lighting, or material. You can also move them around, rotate them, scale them, duplicate them, or delete them.

      -

      Paint 3D also allows you to export your projects as images or videos. You can save them on your device or share them online with others. You can also upload them to Remix 3D, a community website where you can find and download thousands of 3D models created by other users. You can also remix them and add your own touch to them.

      -

      If you are looking for a fun and easy way to express yourself and unleash your creativity in three dimensions, then you should download Paint 3D for Windows 8 today. You will be amazed by what you can create with this app!

      -

      FAQs

      -

      Here are some frequently asked questions and answers about Paint 3D:

      -
        -
      • Q: Is Paint 3D free?
      • -
      • A: Yes, Paint 3D is a free modeling application that you can download from the Microsoft Store or from FileHippo.
      • -
      • Q: Is Paint 3D safe?
      • -
      • A: Yes, Paint 3D is a safe application that does not contain any viruses or malware. However, if you download it from a third-party website, make sure to scan the file before opening it.
      • -
      • Q: Is Paint 3D compatible with Windows 8?
      • -
      • A: Yes, Paint 3D is compatible with Windows 8 devices that have at least 4 GB of RAM and a DirectX 9 or higher graphics card. It is also compatible with touchscreens and stylus pens.
      • -
      • Q: How do I update Paint 3D?
      • -
      • A: If you downloaded Paint 3D from the Microsoft Store, it will update automatically when there is a new version available. If you downloaded it from FileHippo, you will need to check the website for any updates and download them manually.
      • -
      • Q: How do I uninstall Paint 3D?
      • -
      • A: If you want to uninstall Paint 3D from your Windows 8 device, follow these steps:

        -
          -
        1. Open the Control Panel on your Windows 8 device.
        2. -
        3. Select the "Programs and Features" option.
        4. -
        5. Find and select the Paint 3D app from the list of installed programs.
        6. -
        7. Click on the "Uninstall" button and follow the instructions on the screen.
        8. -
        -

        I hope you enjoyed this article and learned something new about Paint 3D. If you have any questions or feedback, please leave a comment below. Thank you for reading!

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/eFootball PES 2021 Season Update - Everything You Need to Know Before You Download.md b/spaces/congsaPfin/Manga-OCR/logs/eFootball PES 2021 Season Update - Everything You Need to Know Before You Download.md deleted file mode 100644 index b7b29c8fddc617fb2904056fa597d4feda9635e4..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/eFootball PES 2021 Season Update - Everything You Need to Know Before You Download.md +++ /dev/null @@ -1,118 +0,0 @@ - -

        How to Download PES 2021 for Free on PC, PS4, and Xbox One

        -

        If you are a fan of football games, you might have heard of PES 2021, the latest installment in the popular eFootball series by Konami. PES 2021 is a realistic and immersive football simulation game that offers a variety of modes, teams, players, and stadiums to choose from. But did you know that you can download PES 2021 for free on your PC, PS4, or Xbox One? In this article, we will show you how to do that in a few simple steps.

        -

        What is PES 2021?

        -

        PES 2021 is a football game that celebrates the 25th anniversary of the Pro Evolution Soccer series. It is also known as eFootball PES 2021 Season Update, as it is based on the same engine and gameplay as PES 2020, but with updated rosters, kits, and features. Some of the new features include:

        -

        pes 2021 file download


        DOWNLOAD →→→ https://urlca.com/2uO5TM



        -
          -
        • A new eFootball mode that allows you to compete with players from around the world in online tournaments and events
        • -
        • A revamped myClub mode that lets you create your own dream team by signing and developing players
        • -
        • A new Matchday mode that lets you participate in weekly matches that reflect the real-world football calendar
        • -
        • An improved Master League mode that lets you manage your favorite club and experience a realistic career mode
        • -
        • A new Iconic Moment Series that lets you relive some of the most memorable moments in football history with legendary players
        • -
        -

        Why download PES 2021?

        -

        PES 2021 is a game that will appeal to any football fan who wants to experience a realistic and immersive football simulation. Some of the benefits of playing PES 2021 are:

        -
          -
        • It has stunning graphics and animations that make you feel like you are watching a real match
        • -
        • It has smooth and responsive gameplay that gives you full control over your actions and tactics
        • -
        • It has a variety of modes that cater to different preferences and skill levels
        • -
        • It has a large database of licensed teams, players, stadiums, and leagues from around the world
        • -
        • It has a dynamic difficulty system that adjusts to your performance and provides a fair challenge
        • -
        -

        How to download PES 2021 for free on PC?

        -

        If you want to play PES 2021 on your PC, you have two options: you can either download the trial version or the lite version from Steam. The trial version lets you play myClub and eFootball modes for free, while the lite version lets you play Kickoff, Local Match, Co-op, Training, myClub, and eFootball modes for free. Here are the steps to download either version:

        -
          -
        1. Go to [Steam] and create an account if you don't have one already
        2. -
        3. Search for "eFootball PES 2021" in the store page
        4. -
        5. Select either "eFootball PES 2021 SEASON UPDATE" or "eFootball PES 2021 LITE" depending on which one you want to download
        6. -
        7. Click on "Play Game" and follow the instructions to install the game on your PC
        8. -
        -

        The requirements and specifications for PC are:

        - - - - - - - - - -
        MinimumRecommended
        OS: Windows 8.1/10 - 64bit
        CPU: Intel Core i5-3470 / AMD FX 4350
        RAM: 8 GB
        GPU: NVIDIA GTX 670 / AMD Radeon HD 7870
        VRAM: 2 GB
        DirectX: Version 11
        Storage: 40 GB
        OS: Windows 10 - 64bit
        CPU: Intel Core i7-3770 / AMD FX 8350
        RAM: 8 GB
        GPU: NVIDIA GTX 760 / AMD Radeon R9 270X
        VRAM: 4 GB
        DirectX: Version 11
        Storage: 40 GB
        -

        How to download PES 2021 for free on PS4?

        -

        If you want to play PES 2021 on your PS4, you have two options: you can either download the trial version or the lite version from the PlayStation Store. The trial version lets you play myClub and eFootball modes for free, while the lite version lets you play Kickoff, Local Match, Co-op, Training, myClub, and eFootball modes for free. Here are the steps to download either version:

        -
          -
        1. Go to [PlayStation Store] and sign in with your PlayStation Network account if you don't have one already
        2. -
        3. Search for "eFootball PES 2021" in the store page
        4. -
        5. Select either "eFootball PES 2021 SEASON UPDATE" or "eFootball PES 2021 LITE" depending on which one you want to download
        6. -
        7. Click on "Download" and follow the instructions to install the game on your PS4
        8. -
        -

        The requirements and specifications for PS4 are:

        -
          -
        • A PS4 console with a minimum of 40 GB of free storage space
        • -
        • A PlayStation Network account and an internet connection
        • -
        • A compatible controller and a display device
        • -
        -

        How to download PES 2021 for free on Xbox One?

        -

        If you want to play PES 2021 on your Xbox One, you have two options: you can either download the trial version or the lite version from the Microsoft Store. The trial version lets you play myClub and eFootball modes for free, while the lite version lets you play Kickoff, Local Match, Co-op, Training, myClub, and eFootball modes for free. Here are the steps to download either version:

        -

        pes 2021 season update download
        -pes 2021 lite free download
        -pes 2021 pc download full version
        -pes 2021 ps4 download code
        -pes 2021 xbox one download size
        -pes 2021 mobile apk download
        -pes 2021 patch download for pc
        -pes 2021 option file download ps5
        -pes 2021 data pack download xbox
        -pes 2021 steam download key
        -pes 2021 demo download link
        -pes 2021 crack download cpy
        -pes 2021 license key download txt
        -pes 2021 iso download for ppsspp
        -pes 2021 mod apk download android
        -pes 2021 update file download ps4
        -pes 2021 efootball download free
        -pes 2021 konami id download bonus
        -pes 2021 offline mode download
        -pes 2021 online access code download
        -pes 2021 original soundtrack download
        -pes 2021 next gen patch download
        -pes 2021 real madrid kit download
        -pes 2021 smoke patch download mega
        -pes 2021 trainer download fling
        -pes 2021 unlimited money download
        -pes 2021 verification file download failed
        -pes 2021 wallpaper hd download
        -pes 2021 xbox series x download problem
        -pes 2021 youtube channel art download
        -pes 2021 zip file download for android
        -pes 2021 best tactics download pdf
        -pes 2021 commentary language pack download
        -pes 2021 database editor download tool
        -pes 2021 face scan app download ios
        -pes 2021 graphics settings file download ini
        -pes 2021 legends edition pre order bonus code redeem xbox one digital download card gamestop exclusive dlc content voucher code generator no survey no human verification no password no offer no payment required free instant access online working legit real valid verified updated latest version new release official website link how to get where to find how to use how to redeem how to apply how to activate how to install how to play how to run how to start how to launch how to open how to access how to enter how to input how to type how to copy and paste how to share how to send how to post how to comment how to like how to subscribe how to follow how to rate how to review how to feedback how to contact how to support how to donate how to contribute how to sponsor how to fundraise how to crowdfund how to advertise how to promote how to market how to sell how to buy how to order how to purchase how to trade how to exchange how to swap how to bid how to auction

        -
          -
        1. Go to [Microsoft Store] and sign in with your Microsoft account if you don't have one already
        2. -
        3. Search for "eFootball PES 2021" in the store page
        4. -
        5. Select either "eFootball PES 2021 SEASON UPDATE" or "eFootball PES 2021 LITE" depending on which one you want to download
        6. -
        7. Click on "Get" and follow the instructions to install the game on your Xbox One
        8. -
        -

        The requirements and specifications for Xbox One are:

        -
          -
        • An Xbox One console with a minimum of 40 GB of free storage space
        • -
        • A Microsoft account and an internet connection
        • -
        • A compatible controller and a display device
        • -
        -

        Conclusion

        -

        PES 2021 is a great football game that offers a realistic and immersive football simulation experience. You can download it for free on your PC, PS4, or Xbox One by following the steps we have outlined in this article. Whether you want to play online or offline, solo or with friends, casual or competitive, PES 2021 has something for everyone. So what are you waiting for? Download PES 2021 today and enjoy the beautiful game!

        -

        Frequently Asked Questions (FAQs)

        -
          -
        1. What is the difference between PES 2021 SEASON UPDATE and PES 2021 LITE?
          PES 2021 SEASON UPDATE is the full version of the game that includes all the modes, teams, players, and stadiums. PES 2021 LITE is a free-to-play version of the game that includes limited modes, teams, players, and stadiums.
        2. -
        3. Can I play PES 2021 online with other players?
          Yes, you can play PES 2021 online with other players in various modes, such as eFootball, myClub, Matchday, and Co-op. You will need an internet connection and a PlayStation Network account, a Microsoft account, or a Steam account depending on your platform.
        4. -
        5. Can I play PES 2021 offline without an internet connection?
          Yes, you can play PES 2021 offline without an internet connection in some modes, such as Kickoff, Local Match, Training, and Master League. However, you will not be able to access the online features, such as updates, events, and online matches.
        6. -
        7. Can I transfer my data from PES 2020 to PES 2021?
          Yes, you can transfer your data from PES 2020 to PES 2021 if you have the same platform and the same account. You can transfer your myClub coins, GP, players, managers, scouts, and trainers. However, you cannot transfer your Matchday points, eFootball points, or eFootball rankings.
        8. -
        9. Can I customize my team and players in PES 2021?
          Yes, you can customize your team and players in PES 2021 by using the Edit mode. You can change the team name, emblem, kit, stadium, and players' appearance, attributes, and skills. You can also create your own original teams and players by using the Create mode.
        10. -

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/Annika Murjahn Wilder Kaiser Das Duell HD Nackt.avi.rar !!LINK!!.md b/spaces/contluForse/HuggingGPT/assets/Annika Murjahn Wilder Kaiser Das Duell HD Nackt.avi.rar !!LINK!!.md deleted file mode 100644 index 6e685310272cafaaba1d8e306616548d746a84b6..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Annika Murjahn Wilder Kaiser Das Duell HD Nackt.avi.rar !!LINK!!.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Annika Murjahn Wilder Kaiser Das Duell HD nackt.avi.rar


        Download Ziphttps://ssurll.com/2uzxA6



        - - aaccfb2cb3
        -
        -
        -

        diff --git a/spaces/contluForse/HuggingGPT/assets/Bhse Onkelz Discography 52 Alben RAR Discover the History and Legacy of the German Rock Phenomenon.md b/spaces/contluForse/HuggingGPT/assets/Bhse Onkelz Discography 52 Alben RAR Discover the History and Legacy of the German Rock Phenomenon.md deleted file mode 100644 index 7b8dcf6b57fa2830391896d7ab6fc99d1b492e04..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Bhse Onkelz Discography 52 Alben RAR Discover the History and Legacy of the German Rock Phenomenon.md +++ /dev/null @@ -1,5 +0,0 @@ -
        -

        impmar 45cee15e9a -HlqrTK-Iel" >danielsipperplaneacionycontroldelaproduccionpdf

        " >jumper 2008 dual audio hindi download piratbay

        -cetPI9iGReYSvwQK5aXg" >Macgo Mac Blu-ray Player Pro 3.3

        -ONcf-_QjMo4I_d" >diskinternals uneraser 6.3 keygen torrent

        " >Khelein Hum Jee Jaan Sey movie download 720p in hindi

        _" >Acronis True Image 2014 Premium 17 Build 6673 download

        -6ir04re0JQ" >Vienna Instruments Pro 2 Crack

        -lb3TSzYF" >bhse onkelz discography 52 alben rar

        _Tj0lfElC3C4lrU" >championship manager 00 01 no cd crack 40

        _2VyE9T4" >Mineralogia Klein Zanichelli.pdf

        _29_... > _29_... href=" -labx.ru/?p=22019
        :Album:4092206" > :Album:4092206
        _post=4&vnrosnrosee=... > _post=4&vnrosnrosee=... href="

        -

        bhse onkelz discography 52 alben rar


        Download Filehttps://ssurll.com/2uzvME



        aaccfb2cb3
        -
        -
        \ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/Cabaret 4 Movie In Tamil REPACK Free Download.md b/spaces/contluForse/HuggingGPT/assets/Cabaret 4 Movie In Tamil REPACK Free Download.md deleted file mode 100644 index 2d245c245f1336022cd5044ee5cb38ed253d30a9..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Cabaret 4 Movie In Tamil REPACK Free Download.md +++ /dev/null @@ -1,5 +0,0 @@ -
        -

        download Bolcom unlimited Movies and videos Download Here.Bolcom Hd,3gp. mp4 320p and More Videos You Can Download Easyly. tamilrockers and movierulz, tamilgun, filmywap, and pagalworld videos and Movies download.

        -

        Cabaret 4 Movie In Tamil Free Download


        DOWNLOADhttps://ssurll.com/2uzyFH



        aaccfb2cb3
        -
        -
        \ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/Curso De Infraestructura 2010 Victor Burgos 1 Fix.md b/spaces/contluForse/HuggingGPT/assets/Curso De Infraestructura 2010 Victor Burgos 1 Fix.md deleted file mode 100644 index fc6f1f8bc0d9a050be027c63af409752dc154ac4..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Curso De Infraestructura 2010 Victor Burgos 1 Fix.md +++ /dev/null @@ -1,11 +0,0 @@ -

        curso de infraestructura 2010 victor burgos 1


        DOWNLOAD ►►► https://ssurll.com/2uzyVQ



        -
        -Subscribe to the course Installation and Configuration of Exchange Server 2016 by Victor ... All Programs => Microsoft Exchange Server 2010 => Exchange ... Get access to the electronic document management system. -To gain access to the electronic document management system, register online at the link: ... -Sign up for the newsletter. -Not yet subscribed to the newsletter? -Sign up for Microsoft product newsletters and get technical support, tips and tricks to help you get the latest product releases, software updates, tips and tricks. -Subscribe to the newsletter. 8a78ff9644
        -
        -
        -

        diff --git a/spaces/cooelf/Multimodal-CoT/timm/data/parsers/__init__.py b/spaces/cooelf/Multimodal-CoT/timm/data/parsers/__init__.py deleted file mode 100644 index eeb44e3714eff75028e15214e0e65bf2afebd86c..0000000000000000000000000000000000000000 --- a/spaces/cooelf/Multimodal-CoT/timm/data/parsers/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .parser_factory import create_parser diff --git a/spaces/coraKong/WorldSimulation/config.py b/spaces/coraKong/WorldSimulation/config.py deleted file mode 100644 index 7662f62142716f42dafa5f805cc0bf48288a4ab2..0000000000000000000000000000000000000000 --- a/spaces/coraKong/WorldSimulation/config.py +++ /dev/null @@ -1,22 +0,0 @@ -P_DIE_WHEN_LOSE = 0.7 # 战败死亡的概率 -MAX_BATTLE_ROUND = 100 # 战斗最大回合数 -MARRIAGE_RATE = 0.3 # 每名适龄青年每次模拟找对象的概率 - -# 当每人资源大于阈值时,生育率正常,战斗率正常 -NORMAL_BIRTH_RATE = 0.15 -NORMAL_BATTLE_RATE = 0.3 - -# 当每人资源低于阈值时,生育率线性下降,战斗率线性上升 -LOW_BIRTH_RATE = 0.05 -HIGH_BATTLE_RATE = 0.9 - -# 每10轮有5%概率发生灾难 -DISASTER_PROB = 0.05 -DISASTER_FREQUENCY = 10 - -# 每50轮有10%概率发生战争 -WAR_PROB = 0.1 -WAR_FREQUENCY = 50 - -# 成仙等级 -IMMORTAL_RANK = 5 \ No newline at end of file diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmcv/runner/hooks/logger/wandb.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmcv/runner/hooks/logger/wandb.py deleted file mode 100644 index 9f6808462eb79ab2b04806a5d9f0d3dd079b5ea9..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmcv/runner/hooks/logger/wandb.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from ...dist_utils import master_only -from ..hook import HOOKS -from .base import LoggerHook - - -@HOOKS.register_module() -class WandbLoggerHook(LoggerHook): - - def __init__(self, - init_kwargs=None, - interval=10, - ignore_last=True, - reset_flag=False, - commit=True, - by_epoch=True, - with_step=True): - super(WandbLoggerHook, self).__init__(interval, ignore_last, - reset_flag, by_epoch) - self.import_wandb() - self.init_kwargs = init_kwargs - self.commit = commit - self.with_step = with_step - - def import_wandb(self): - try: - import wandb - except ImportError: - raise ImportError( - 'Please run "pip install wandb" to install wandb') - self.wandb = wandb - - @master_only - def before_run(self, runner): - super(WandbLoggerHook, self).before_run(runner) - if self.wandb is None: - self.import_wandb() - if self.init_kwargs: - self.wandb.init(**self.init_kwargs) - else: - self.wandb.init() - - @master_only - def log(self, runner): - tags = self.get_loggable_tags(runner) - if tags: - if self.with_step: - self.wandb.log( - tags, step=self.get_iter(runner), commit=self.commit) - else: - tags['global_step'] = self.get_iter(runner) - self.wandb.log(tags, commit=self.commit) - - @master_only - def after_run(self, runner): - self.wandb.join() diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/models/backbones/hrnet.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/models/backbones/hrnet.py deleted file mode 100644 index 331ebf3ccb8597b3f507670753789073fc3c946d..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/models/backbones/hrnet.py +++ /dev/null @@ -1,555 +0,0 @@ -import torch.nn as nn -from annotator.uniformer.mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init, - kaiming_init) -from annotator.uniformer.mmcv.runner import load_checkpoint -from annotator.uniformer.mmcv.utils.parrots_wrapper import _BatchNorm - -from annotator.uniformer.mmseg.ops import Upsample, resize -from annotator.uniformer.mmseg.utils import get_root_logger -from ..builder import BACKBONES -from .resnet import BasicBlock, Bottleneck - - -class HRModule(nn.Module): - """High-Resolution Module for HRNet. - - In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange - is in this module. - """ - - def __init__(self, - num_branches, - blocks, - num_blocks, - in_channels, - num_channels, - multiscale_output=True, - with_cp=False, - conv_cfg=None, - norm_cfg=dict(type='BN', requires_grad=True)): - super(HRModule, self).__init__() - self._check_branches(num_branches, num_blocks, in_channels, - num_channels) - - self.in_channels = in_channels - self.num_branches = num_branches - - self.multiscale_output = multiscale_output - self.norm_cfg = norm_cfg - self.conv_cfg = conv_cfg - self.with_cp = with_cp - self.branches = self._make_branches(num_branches, blocks, num_blocks, - num_channels) - self.fuse_layers = self._make_fuse_layers() - self.relu = nn.ReLU(inplace=False) - - def _check_branches(self, num_branches, num_blocks, in_channels, - num_channels): - """Check branches configuration.""" - if num_branches != len(num_blocks): - error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_BLOCKS(' \ - f'{len(num_blocks)})' - raise ValueError(error_msg) - - if num_branches != len(num_channels): - error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_CHANNELS(' \ - f'{len(num_channels)})' - raise ValueError(error_msg) - - if num_branches != len(in_channels): - error_msg = f'NUM_BRANCHES({num_branches}) <> NUM_INCHANNELS(' \ - f'{len(in_channels)})' - raise ValueError(error_msg) - - def _make_one_branch(self, - branch_index, - block, - num_blocks, - num_channels, - stride=1): - """Build one branch.""" - downsample = None - if stride != 1 or \ - self.in_channels[branch_index] != \ - num_channels[branch_index] * block.expansion: - downsample = nn.Sequential( - build_conv_layer( - self.conv_cfg, - self.in_channels[branch_index], - num_channels[branch_index] * block.expansion, - kernel_size=1, - stride=stride, - bias=False), - build_norm_layer(self.norm_cfg, num_channels[branch_index] * - block.expansion)[1]) - - layers = [] - layers.append( - block( - self.in_channels[branch_index], - num_channels[branch_index], - stride, - downsample=downsample, - with_cp=self.with_cp, - norm_cfg=self.norm_cfg, - conv_cfg=self.conv_cfg)) - self.in_channels[branch_index] = \ - num_channels[branch_index] * block.expansion - for i in range(1, num_blocks[branch_index]): - layers.append( - block( - self.in_channels[branch_index], - num_channels[branch_index], - with_cp=self.with_cp, - norm_cfg=self.norm_cfg, - conv_cfg=self.conv_cfg)) - - return nn.Sequential(*layers) - - def _make_branches(self, num_branches, block, num_blocks, num_channels): - """Build multiple branch.""" - branches = [] - - for i in range(num_branches): - branches.append( - self._make_one_branch(i, block, num_blocks, num_channels)) - - return nn.ModuleList(branches) - - def _make_fuse_layers(self): - """Build fuse layer.""" - if self.num_branches == 1: - return None - - num_branches = self.num_branches - in_channels = self.in_channels - fuse_layers = [] - num_out_branches = num_branches if self.multiscale_output else 1 - for i in range(num_out_branches): - fuse_layer = [] - for j in range(num_branches): - if j > i: - fuse_layer.append( - nn.Sequential( - build_conv_layer( - self.conv_cfg, - in_channels[j], - in_channels[i], - kernel_size=1, - stride=1, - padding=0, - bias=False), - build_norm_layer(self.norm_cfg, in_channels[i])[1], - # we set align_corners=False for HRNet - Upsample( - scale_factor=2**(j - i), - mode='bilinear', - align_corners=False))) - elif j == i: - fuse_layer.append(None) - else: - conv_downsamples = [] - for k in range(i - j): - if k == i - j - 1: - conv_downsamples.append( - nn.Sequential( - build_conv_layer( - self.conv_cfg, - in_channels[j], - in_channels[i], - kernel_size=3, - stride=2, - padding=1, - bias=False), - build_norm_layer(self.norm_cfg, - in_channels[i])[1])) - else: - conv_downsamples.append( - nn.Sequential( - build_conv_layer( - self.conv_cfg, - in_channels[j], - in_channels[j], - kernel_size=3, - stride=2, - padding=1, - bias=False), - build_norm_layer(self.norm_cfg, - in_channels[j])[1], - nn.ReLU(inplace=False))) - fuse_layer.append(nn.Sequential(*conv_downsamples)) - fuse_layers.append(nn.ModuleList(fuse_layer)) - - return nn.ModuleList(fuse_layers) - - def forward(self, x): - """Forward function.""" - if self.num_branches == 1: - return [self.branches[0](x[0])] - - for i in range(self.num_branches): - x[i] = self.branches[i](x[i]) - - x_fuse = [] - for i in range(len(self.fuse_layers)): - y = 0 - for j in range(self.num_branches): - if i == j: - y += x[j] - elif j > i: - y = y + resize( - self.fuse_layers[i][j](x[j]), - size=x[i].shape[2:], - mode='bilinear', - align_corners=False) - else: - y += self.fuse_layers[i][j](x[j]) - x_fuse.append(self.relu(y)) - return x_fuse - - -@BACKBONES.register_module() -class HRNet(nn.Module): - """HRNet backbone. - - High-Resolution Representations for Labeling Pixels and Regions - arXiv: https://arxiv.org/abs/1904.04514 - - Args: - extra (dict): detailed configuration for each stage of HRNet. - in_channels (int): Number of input image channels. Normally 3. - conv_cfg (dict): dictionary to construct and config conv layer. - norm_cfg (dict): dictionary to construct and config norm layer. - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. - zero_init_residual (bool): whether to use zero init for last norm layer - in resblocks to let them behave as identity. - - Example: - >>> from annotator.uniformer.mmseg.models import HRNet - >>> import torch - >>> extra = dict( - >>> stage1=dict( - >>> num_modules=1, - >>> num_branches=1, - >>> block='BOTTLENECK', - >>> num_blocks=(4, ), - >>> num_channels=(64, )), - >>> stage2=dict( - >>> num_modules=1, - >>> num_branches=2, - >>> block='BASIC', - >>> num_blocks=(4, 4), - >>> num_channels=(32, 64)), - >>> stage3=dict( - >>> num_modules=4, - >>> num_branches=3, - >>> block='BASIC', - >>> num_blocks=(4, 4, 4), - >>> num_channels=(32, 64, 128)), - >>> stage4=dict( - >>> num_modules=3, - >>> num_branches=4, - >>> block='BASIC', - >>> num_blocks=(4, 4, 4, 4), - >>> num_channels=(32, 64, 128, 256))) - >>> self = HRNet(extra, in_channels=1) - >>> self.eval() - >>> inputs = torch.rand(1, 1, 32, 32) - >>> level_outputs = self.forward(inputs) - >>> for level_out in level_outputs: - ... print(tuple(level_out.shape)) - (1, 32, 8, 8) - (1, 64, 4, 4) - (1, 128, 2, 2) - (1, 256, 1, 1) - """ - - blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck} - - def __init__(self, - extra, - in_channels=3, - conv_cfg=None, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=False, - with_cp=False, - zero_init_residual=False): - super(HRNet, self).__init__() - self.extra = extra - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.norm_eval = norm_eval - self.with_cp = with_cp - self.zero_init_residual = zero_init_residual - - # stem net - self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1) - self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2) - - self.conv1 = build_conv_layer( - self.conv_cfg, - in_channels, - 64, - kernel_size=3, - stride=2, - padding=1, - bias=False) - - self.add_module(self.norm1_name, norm1) - self.conv2 = build_conv_layer( - self.conv_cfg, - 64, - 64, - kernel_size=3, - stride=2, - padding=1, - bias=False) - - self.add_module(self.norm2_name, norm2) - self.relu = nn.ReLU(inplace=True) - - # stage 1 - self.stage1_cfg = self.extra['stage1'] - num_channels = self.stage1_cfg['num_channels'][0] - block_type = self.stage1_cfg['block'] - num_blocks = self.stage1_cfg['num_blocks'][0] - - block = self.blocks_dict[block_type] - stage1_out_channels = num_channels * block.expansion - self.layer1 = self._make_layer(block, 64, num_channels, num_blocks) - - # stage 2 - self.stage2_cfg = self.extra['stage2'] - num_channels = self.stage2_cfg['num_channels'] - block_type = self.stage2_cfg['block'] - - block = self.blocks_dict[block_type] - num_channels = [channel * block.expansion for channel in num_channels] - self.transition1 = self._make_transition_layer([stage1_out_channels], - num_channels) - self.stage2, pre_stage_channels = self._make_stage( - self.stage2_cfg, num_channels) - - # stage 3 - self.stage3_cfg = self.extra['stage3'] - num_channels = self.stage3_cfg['num_channels'] - block_type = self.stage3_cfg['block'] - - block = self.blocks_dict[block_type] - num_channels = [channel * block.expansion for channel in num_channels] - self.transition2 = self._make_transition_layer(pre_stage_channels, - num_channels) - self.stage3, pre_stage_channels = self._make_stage( - self.stage3_cfg, num_channels) - - # stage 4 - self.stage4_cfg = self.extra['stage4'] - num_channels = self.stage4_cfg['num_channels'] - block_type = self.stage4_cfg['block'] - - block = self.blocks_dict[block_type] - num_channels = [channel * block.expansion for channel in num_channels] - self.transition3 = self._make_transition_layer(pre_stage_channels, - num_channels) - self.stage4, pre_stage_channels = self._make_stage( - self.stage4_cfg, num_channels) - - @property - def norm1(self): - """nn.Module: the normalization layer named "norm1" """ - return getattr(self, self.norm1_name) - - @property - def norm2(self): - """nn.Module: the normalization layer named "norm2" """ - return getattr(self, self.norm2_name) - - def _make_transition_layer(self, num_channels_pre_layer, - num_channels_cur_layer): - """Make transition layer.""" - num_branches_cur = len(num_channels_cur_layer) - num_branches_pre = len(num_channels_pre_layer) - - transition_layers = [] - for i in range(num_branches_cur): - if i < num_branches_pre: - if num_channels_cur_layer[i] != num_channels_pre_layer[i]: - transition_layers.append( - nn.Sequential( - build_conv_layer( - self.conv_cfg, - num_channels_pre_layer[i], - num_channels_cur_layer[i], - kernel_size=3, - stride=1, - padding=1, - bias=False), - build_norm_layer(self.norm_cfg, - num_channels_cur_layer[i])[1], - nn.ReLU(inplace=True))) - else: - transition_layers.append(None) - else: - conv_downsamples = [] - for j in range(i + 1 - num_branches_pre): - in_channels = num_channels_pre_layer[-1] - out_channels = num_channels_cur_layer[i] \ - if j == i - num_branches_pre else in_channels - conv_downsamples.append( - nn.Sequential( - build_conv_layer( - self.conv_cfg, - in_channels, - out_channels, - kernel_size=3, - stride=2, - padding=1, - bias=False), - build_norm_layer(self.norm_cfg, out_channels)[1], - nn.ReLU(inplace=True))) - transition_layers.append(nn.Sequential(*conv_downsamples)) - - return nn.ModuleList(transition_layers) - - def _make_layer(self, block, inplanes, planes, blocks, stride=1): - """Make each layer.""" - downsample = None - if stride != 1 or inplanes != planes * block.expansion: - downsample = nn.Sequential( - build_conv_layer( - self.conv_cfg, - inplanes, - planes * block.expansion, - kernel_size=1, - stride=stride, - bias=False), - build_norm_layer(self.norm_cfg, planes * block.expansion)[1]) - - layers = [] - layers.append( - block( - inplanes, - planes, - stride, - downsample=downsample, - with_cp=self.with_cp, - norm_cfg=self.norm_cfg, - conv_cfg=self.conv_cfg)) - inplanes = planes * block.expansion - for i in range(1, blocks): - layers.append( - block( - inplanes, - planes, - with_cp=self.with_cp, - norm_cfg=self.norm_cfg, - conv_cfg=self.conv_cfg)) - - return nn.Sequential(*layers) - - def _make_stage(self, layer_config, in_channels, multiscale_output=True): - """Make each stage.""" - num_modules = layer_config['num_modules'] - num_branches = layer_config['num_branches'] - num_blocks = layer_config['num_blocks'] - num_channels = layer_config['num_channels'] - block = self.blocks_dict[layer_config['block']] - - hr_modules = [] - for i in range(num_modules): - # multi_scale_output is only used for the last module - if not multiscale_output and i == num_modules - 1: - reset_multiscale_output = False - else: - reset_multiscale_output = True - - hr_modules.append( - HRModule( - num_branches, - block, - num_blocks, - in_channels, - num_channels, - reset_multiscale_output, - with_cp=self.with_cp, - norm_cfg=self.norm_cfg, - conv_cfg=self.conv_cfg)) - - return nn.Sequential(*hr_modules), in_channels - - def init_weights(self, pretrained=None): - """Initialize the weights in backbone. - - Args: - pretrained (str, optional): Path to pre-trained weights. - Defaults to None. - """ - if isinstance(pretrained, str): - logger = get_root_logger() - load_checkpoint(self, pretrained, strict=False, logger=logger) - elif pretrained is None: - for m in self.modules(): - if isinstance(m, nn.Conv2d): - kaiming_init(m) - elif isinstance(m, (_BatchNorm, nn.GroupNorm)): - constant_init(m, 1) - - if self.zero_init_residual: - for m in self.modules(): - if isinstance(m, Bottleneck): - constant_init(m.norm3, 0) - elif isinstance(m, BasicBlock): - constant_init(m.norm2, 0) - else: - raise TypeError('pretrained must be a str or None') - - def forward(self, x): - """Forward function.""" - - x = self.conv1(x) - x = self.norm1(x) - x = self.relu(x) - x = self.conv2(x) - x = self.norm2(x) - x = self.relu(x) - x = self.layer1(x) - - x_list = [] - for i in range(self.stage2_cfg['num_branches']): - if self.transition1[i] is not None: - x_list.append(self.transition1[i](x)) - else: - x_list.append(x) - y_list = self.stage2(x_list) - - x_list = [] - for i in range(self.stage3_cfg['num_branches']): - if self.transition2[i] is not None: - x_list.append(self.transition2[i](y_list[-1])) - else: - x_list.append(y_list[i]) - y_list = self.stage3(x_list) - - x_list = [] - for i in range(self.stage4_cfg['num_branches']): - if self.transition3[i] is not None: - x_list.append(self.transition3[i](y_list[-1])) - else: - x_list.append(y_list[i]) - y_list = self.stage4(x_list) - - return y_list - - def train(self, mode=True): - """Convert the model into training mode will keeping the normalization - layer freezed.""" - super(HRNet, self).train(mode) - if mode and self.norm_eval: - for m in self.modules(): - # trick: eval have effect on BatchNorm only - if isinstance(m, _BatchNorm): - m.eval() diff --git a/spaces/cxeep/whisper-webui/src/__init__.py b/spaces/cxeep/whisper-webui/src/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/cymic/Talking_Head_Anime_3/tha3/nn/two_algo_body_rotator/__init__.py b/spaces/cymic/Talking_Head_Anime_3/tha3/nn/two_algo_body_rotator/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/dalle-mini/dalle-mini/index.html b/spaces/dalle-mini/dalle-mini/index.html deleted file mode 100644 index fdfd83b76c6b2371a100ead6d8fcc90db8f74256..0000000000000000000000000000000000000000 --- a/spaces/dalle-mini/dalle-mini/index.html +++ /dev/null @@ -1,295 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - -
        - - - diff --git a/spaces/danielcwang-optum/1_SimPhysics/1-SimPhysics/README.md b/spaces/danielcwang-optum/1_SimPhysics/1-SimPhysics/README.md deleted file mode 100644 index b31a6ae704f0373db4e2fb14898ade5c42afb191..0000000000000000000000000000000000000000 --- a/spaces/danielcwang-optum/1_SimPhysics/1-SimPhysics/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: 🏖️PlayCanvas Simulation Vehicle Physics⛱️🌊 Live HTML5 -emoji: 1-Sim🌊 -colorFrom: green -colorTo: gray -sdk: static -pinned: false ---- - -Inspired by Danny Lange, VP AI and ML at Unity -Reference: https://youtu.be/YsEDv13W1RI?t=48 - -Quote on MLAgents: ... if you think about what I just said about evolution and that the creation of tools for intelligence yeah so you have the basic nature you have the 3d spatial environment you have gravity and you have inertia and the physics engine and now we throw in ml agents which is a machine learning system - diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/D_S_I_G_.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/D_S_I_G_.py deleted file mode 100644 index d902a29080aff5a275f530c7658d3c9eb4498034..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fontTools/ttLib/tables/D_S_I_G_.py +++ /dev/null @@ -1,151 +0,0 @@ -from fontTools.misc.textTools import bytesjoin, strjoin, tobytes, tostr, safeEval -from fontTools.misc import sstruct -from . import DefaultTable -import base64 - -DSIG_HeaderFormat = """ - > # big endian - ulVersion: L - usNumSigs: H - usFlag: H -""" -# followed by an array of usNumSigs DSIG_Signature records -DSIG_SignatureFormat = """ - > # big endian - ulFormat: L - ulLength: L # length includes DSIG_SignatureBlock header - ulOffset: L -""" -# followed by an array of usNumSigs DSIG_SignatureBlock records, -# each followed immediately by the pkcs7 bytes -DSIG_SignatureBlockFormat = """ - > # big endian - usReserved1: H - usReserved2: H - cbSignature: l # length of following raw pkcs7 data -""" - -# -# NOTE -# the DSIG table format allows for SignatureBlocks residing -# anywhere in the table and possibly in a different order as -# listed in the array after the first table header -# -# this implementation does not keep track of any gaps and/or data -# before or after the actual signature blocks while decompiling, -# and puts them in the same physical order as listed in the header -# on compilation with no padding whatsoever. -# - - -class table_D_S_I_G_(DefaultTable.DefaultTable): - def decompile(self, data, ttFont): - dummy, newData = sstruct.unpack2(DSIG_HeaderFormat, data, self) - assert self.ulVersion == 1, "DSIG ulVersion must be 1" - assert self.usFlag & ~1 == 0, "DSIG usFlag must be 0x1 or 0x0" - self.signatureRecords = sigrecs = [] - for n in range(self.usNumSigs): - sigrec, newData = sstruct.unpack2( - DSIG_SignatureFormat, newData, SignatureRecord() - ) - assert sigrec.ulFormat == 1, ( - "DSIG signature record #%d ulFormat must be 1" % n - ) - sigrecs.append(sigrec) - for sigrec in sigrecs: - dummy, newData = sstruct.unpack2( - DSIG_SignatureBlockFormat, data[sigrec.ulOffset :], sigrec - ) - assert sigrec.usReserved1 == 0, ( - "DSIG signature record #%d usReserverd1 must be 0" % n - ) - assert sigrec.usReserved2 == 0, ( - "DSIG signature record #%d usReserverd2 must be 0" % n - ) - sigrec.pkcs7 = newData[: sigrec.cbSignature] - - def compile(self, ttFont): - packed = sstruct.pack(DSIG_HeaderFormat, self) - headers = [packed] - offset = len(packed) + self.usNumSigs * sstruct.calcsize(DSIG_SignatureFormat) - data = [] - for sigrec in self.signatureRecords: - # first pack signature block - sigrec.cbSignature = len(sigrec.pkcs7) - packed = sstruct.pack(DSIG_SignatureBlockFormat, sigrec) + sigrec.pkcs7 - data.append(packed) - # update redundant length field - sigrec.ulLength = len(packed) - # update running table offset - sigrec.ulOffset = offset - headers.append(sstruct.pack(DSIG_SignatureFormat, sigrec)) - offset += sigrec.ulLength - if offset % 2: - # Pad to even bytes - data.append(b"\0") - return bytesjoin(headers + data) - - def toXML(self, xmlWriter, ttFont): - xmlWriter.comment( - "note that the Digital Signature will be invalid after recompilation!" - ) - xmlWriter.newline() - xmlWriter.simpletag( - "tableHeader", - version=self.ulVersion, - numSigs=self.usNumSigs, - flag="0x%X" % self.usFlag, - ) - for sigrec in self.signatureRecords: - xmlWriter.newline() - sigrec.toXML(xmlWriter, ttFont) - xmlWriter.newline() - - def fromXML(self, name, attrs, content, ttFont): - if name == "tableHeader": - self.signatureRecords = [] - self.ulVersion = safeEval(attrs["version"]) - self.usNumSigs = safeEval(attrs["numSigs"]) - self.usFlag = safeEval(attrs["flag"]) - return - if name == "SignatureRecord": - sigrec = SignatureRecord() - sigrec.fromXML(name, attrs, content, ttFont) - self.signatureRecords.append(sigrec) - - -pem_spam = lambda l, spam={ - "-----BEGIN PKCS7-----": True, - "-----END PKCS7-----": True, - "": True, -}: not spam.get(l.strip()) - - -def b64encode(b): - s = base64.b64encode(b) - # Line-break at 76 chars. - items = [] - while s: - items.append(tostr(s[:76])) - items.append("\n") - s = s[76:] - return strjoin(items) - - -class SignatureRecord(object): - def __repr__(self): - return "<%s: %s>" % (self.__class__.__name__, self.__dict__) - - def toXML(self, writer, ttFont): - writer.begintag(self.__class__.__name__, format=self.ulFormat) - writer.newline() - writer.write_noindent("-----BEGIN PKCS7-----\n") - writer.write_noindent(b64encode(self.pkcs7)) - writer.write_noindent("-----END PKCS7-----\n") - writer.endtag(self.__class__.__name__) - - def fromXML(self, name, attrs, content, ttFont): - self.ulFormat = safeEval(attrs["format"]) - self.usReserved1 = safeEval(attrs.get("reserved1", "0")) - self.usReserved2 = safeEval(attrs.get("reserved2", "0")) - self.pkcs7 = base64.b64decode(tobytes(strjoin(filter(pem_spam, content)))) diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-37519934.css b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-37519934.css deleted file mode 100644 index d48203638574e0f2bb6a0f0ad5a39d7f559a8875..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-37519934.css +++ /dev/null @@ -1 +0,0 @@ -div.svelte-iyf88w{border:var(--block-border-width) solid var(--border-color-primary);background:var(--border-color-primary);border-radius:var(--block-radius);display:flex;flex-direction:column;gap:var(--form-gap-width);overflow:hidden}div.svelte-iyf88w>*:not(.absolute){border:none;border-radius:0}.hide.svelte-iyf88w{display:none} diff --git a/spaces/declare-lab/tango/diffusers/examples/community/lpw_stable_diffusion_onnx.py b/spaces/declare-lab/tango/diffusers/examples/community/lpw_stable_diffusion_onnx.py deleted file mode 100644 index 9aa7d47eeab09d2ec365dac3e2e18ec0e500a952..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/examples/community/lpw_stable_diffusion_onnx.py +++ /dev/null @@ -1,1146 +0,0 @@ -import inspect -import re -from typing import Callable, List, Optional, Union - -import numpy as np -import PIL -import torch -from packaging import version -from transformers import CLIPImageProcessor, CLIPTokenizer - -import diffusers -from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, SchedulerMixin -from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput -from diffusers.utils import logging - - -try: - from diffusers.pipelines.onnx_utils import ORT_TO_NP_TYPE -except ImportError: - ORT_TO_NP_TYPE = { - "tensor(bool)": np.bool_, - "tensor(int8)": np.int8, - "tensor(uint8)": np.uint8, - "tensor(int16)": np.int16, - "tensor(uint16)": np.uint16, - "tensor(int32)": np.int32, - "tensor(uint32)": np.uint32, - "tensor(int64)": np.int64, - "tensor(uint64)": np.uint64, - "tensor(float16)": np.float16, - "tensor(float)": np.float32, - "tensor(double)": np.float64, - } - -try: - from diffusers.utils import PIL_INTERPOLATION -except ImportError: - if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): - PIL_INTERPOLATION = { - "linear": PIL.Image.Resampling.BILINEAR, - "bilinear": PIL.Image.Resampling.BILINEAR, - "bicubic": PIL.Image.Resampling.BICUBIC, - "lanczos": PIL.Image.Resampling.LANCZOS, - "nearest": PIL.Image.Resampling.NEAREST, - } - else: - PIL_INTERPOLATION = { - "linear": PIL.Image.LINEAR, - "bilinear": PIL.Image.BILINEAR, - "bicubic": PIL.Image.BICUBIC, - "lanczos": PIL.Image.LANCZOS, - "nearest": PIL.Image.NEAREST, - } -# ------------------------------------------------------------------------------ - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - -re_attention = re.compile( - r""" -\\\(| -\\\)| -\\\[| -\\]| -\\\\| -\\| -\(| -\[| -:([+-]?[.\d]+)\)| -\)| -]| -[^\\()\[\]:]+| -: -""", - re.X, -) - - -def parse_prompt_attention(text): - """ - Parses a string with attention tokens and returns a list of pairs: text and its associated weight. - Accepted tokens are: - (abc) - increases attention to abc by a multiplier of 1.1 - (abc:3.12) - increases attention to abc by a multiplier of 3.12 - [abc] - decreases attention to abc by a multiplier of 1.1 - \( - literal character '(' - \[ - literal character '[' - \) - literal character ')' - \] - literal character ']' - \\ - literal character '\' - anything else - just text - >>> parse_prompt_attention('normal text') - [['normal text', 1.0]] - >>> parse_prompt_attention('an (important) word') - [['an ', 1.0], ['important', 1.1], [' word', 1.0]] - >>> parse_prompt_attention('(unbalanced') - [['unbalanced', 1.1]] - >>> parse_prompt_attention('\(literal\]') - [['(literal]', 1.0]] - >>> parse_prompt_attention('(unnecessary)(parens)') - [['unnecessaryparens', 1.1]] - >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).') - [['a ', 1.0], - ['house', 1.5730000000000004], - [' ', 1.1], - ['on', 1.0], - [' a ', 1.1], - ['hill', 0.55], - [', sun, ', 1.1], - ['sky', 1.4641000000000006], - ['.', 1.1]] - """ - - res = [] - round_brackets = [] - square_brackets = [] - - round_bracket_multiplier = 1.1 - square_bracket_multiplier = 1 / 1.1 - - def multiply_range(start_position, multiplier): - for p in range(start_position, len(res)): - res[p][1] *= multiplier - - for m in re_attention.finditer(text): - text = m.group(0) - weight = m.group(1) - - if text.startswith("\\"): - res.append([text[1:], 1.0]) - elif text == "(": - round_brackets.append(len(res)) - elif text == "[": - square_brackets.append(len(res)) - elif weight is not None and len(round_brackets) > 0: - multiply_range(round_brackets.pop(), float(weight)) - elif text == ")" and len(round_brackets) > 0: - multiply_range(round_brackets.pop(), round_bracket_multiplier) - elif text == "]" and len(square_brackets) > 0: - multiply_range(square_brackets.pop(), square_bracket_multiplier) - else: - res.append([text, 1.0]) - - for pos in round_brackets: - multiply_range(pos, round_bracket_multiplier) - - for pos in square_brackets: - multiply_range(pos, square_bracket_multiplier) - - if len(res) == 0: - res = [["", 1.0]] - - # merge runs of identical weights - i = 0 - while i + 1 < len(res): - if res[i][1] == res[i + 1][1]: - res[i][0] += res[i + 1][0] - res.pop(i + 1) - else: - i += 1 - - return res - - -def get_prompts_with_weights(pipe, prompt: List[str], max_length: int): - r""" - Tokenize a list of prompts and return its tokens with weights of each token. - - No padding, starting or ending token is included. - """ - tokens = [] - weights = [] - truncated = False - for text in prompt: - texts_and_weights = parse_prompt_attention(text) - text_token = [] - text_weight = [] - for word, weight in texts_and_weights: - # tokenize and discard the starting and the ending token - token = pipe.tokenizer(word, return_tensors="np").input_ids[0, 1:-1] - text_token += list(token) - # copy the weight by length of token - text_weight += [weight] * len(token) - # stop if the text is too long (longer than truncation limit) - if len(text_token) > max_length: - truncated = True - break - # truncate - if len(text_token) > max_length: - truncated = True - text_token = text_token[:max_length] - text_weight = text_weight[:max_length] - tokens.append(text_token) - weights.append(text_weight) - if truncated: - logger.warning("Prompt was truncated. Try to shorten the prompt or increase max_embeddings_multiples") - return tokens, weights - - -def pad_tokens_and_weights(tokens, weights, max_length, bos, eos, pad, no_boseos_middle=True, chunk_length=77): - r""" - Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length. - """ - max_embeddings_multiples = (max_length - 2) // (chunk_length - 2) - weights_length = max_length if no_boseos_middle else max_embeddings_multiples * chunk_length - for i in range(len(tokens)): - tokens[i] = [bos] + tokens[i] + [pad] * (max_length - 1 - len(tokens[i]) - 1) + [eos] - if no_boseos_middle: - weights[i] = [1.0] + weights[i] + [1.0] * (max_length - 1 - len(weights[i])) - else: - w = [] - if len(weights[i]) == 0: - w = [1.0] * weights_length - else: - for j in range(max_embeddings_multiples): - w.append(1.0) # weight for starting token in this chunk - w += weights[i][j * (chunk_length - 2) : min(len(weights[i]), (j + 1) * (chunk_length - 2))] - w.append(1.0) # weight for ending token in this chunk - w += [1.0] * (weights_length - len(w)) - weights[i] = w[:] - - return tokens, weights - - -def get_unweighted_text_embeddings( - pipe, - text_input: np.array, - chunk_length: int, - no_boseos_middle: Optional[bool] = True, -): - """ - When the length of tokens is a multiple of the capacity of the text encoder, - it should be split into chunks and sent to the text encoder individually. - """ - max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2) - if max_embeddings_multiples > 1: - text_embeddings = [] - for i in range(max_embeddings_multiples): - # extract the i-th chunk - text_input_chunk = text_input[:, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2].copy() - - # cover the head and the tail by the starting and the ending tokens - text_input_chunk[:, 0] = text_input[0, 0] - text_input_chunk[:, -1] = text_input[0, -1] - - text_embedding = pipe.text_encoder(input_ids=text_input_chunk)[0] - - if no_boseos_middle: - if i == 0: - # discard the ending token - text_embedding = text_embedding[:, :-1] - elif i == max_embeddings_multiples - 1: - # discard the starting token - text_embedding = text_embedding[:, 1:] - else: - # discard both starting and ending tokens - text_embedding = text_embedding[:, 1:-1] - - text_embeddings.append(text_embedding) - text_embeddings = np.concatenate(text_embeddings, axis=1) - else: - text_embeddings = pipe.text_encoder(input_ids=text_input)[0] - return text_embeddings - - -def get_weighted_text_embeddings( - pipe, - prompt: Union[str, List[str]], - uncond_prompt: Optional[Union[str, List[str]]] = None, - max_embeddings_multiples: Optional[int] = 4, - no_boseos_middle: Optional[bool] = False, - skip_parsing: Optional[bool] = False, - skip_weighting: Optional[bool] = False, - **kwargs, -): - r""" - Prompts can be assigned with local weights using brackets. For example, - prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful', - and the embedding tokens corresponding to the words get multiplied by a constant, 1.1. - - Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean. - - Args: - pipe (`OnnxStableDiffusionPipeline`): - Pipe to provide access to the tokenizer and the text encoder. - prompt (`str` or `List[str]`): - The prompt or prompts to guide the image generation. - uncond_prompt (`str` or `List[str]`): - The unconditional prompt or prompts for guide the image generation. If unconditional prompt - is provided, the embeddings of prompt and uncond_prompt are concatenated. - max_embeddings_multiples (`int`, *optional*, defaults to `1`): - The max multiple length of prompt embeddings compared to the max output length of text encoder. - no_boseos_middle (`bool`, *optional*, defaults to `False`): - If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and - ending token in each of the chunk in the middle. - skip_parsing (`bool`, *optional*, defaults to `False`): - Skip the parsing of brackets. - skip_weighting (`bool`, *optional*, defaults to `False`): - Skip the weighting. When the parsing is skipped, it is forced True. - """ - max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2 - if isinstance(prompt, str): - prompt = [prompt] - - if not skip_parsing: - prompt_tokens, prompt_weights = get_prompts_with_weights(pipe, prompt, max_length - 2) - if uncond_prompt is not None: - if isinstance(uncond_prompt, str): - uncond_prompt = [uncond_prompt] - uncond_tokens, uncond_weights = get_prompts_with_weights(pipe, uncond_prompt, max_length - 2) - else: - prompt_tokens = [ - token[1:-1] - for token in pipe.tokenizer(prompt, max_length=max_length, truncation=True, return_tensors="np").input_ids - ] - prompt_weights = [[1.0] * len(token) for token in prompt_tokens] - if uncond_prompt is not None: - if isinstance(uncond_prompt, str): - uncond_prompt = [uncond_prompt] - uncond_tokens = [ - token[1:-1] - for token in pipe.tokenizer( - uncond_prompt, - max_length=max_length, - truncation=True, - return_tensors="np", - ).input_ids - ] - uncond_weights = [[1.0] * len(token) for token in uncond_tokens] - - # round up the longest length of tokens to a multiple of (model_max_length - 2) - max_length = max([len(token) for token in prompt_tokens]) - if uncond_prompt is not None: - max_length = max(max_length, max([len(token) for token in uncond_tokens])) - - max_embeddings_multiples = min( - max_embeddings_multiples, - (max_length - 1) // (pipe.tokenizer.model_max_length - 2) + 1, - ) - max_embeddings_multiples = max(1, max_embeddings_multiples) - max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2 - - # pad the length of tokens and weights - bos = pipe.tokenizer.bos_token_id - eos = pipe.tokenizer.eos_token_id - pad = getattr(pipe.tokenizer, "pad_token_id", eos) - prompt_tokens, prompt_weights = pad_tokens_and_weights( - prompt_tokens, - prompt_weights, - max_length, - bos, - eos, - pad, - no_boseos_middle=no_boseos_middle, - chunk_length=pipe.tokenizer.model_max_length, - ) - prompt_tokens = np.array(prompt_tokens, dtype=np.int32) - if uncond_prompt is not None: - uncond_tokens, uncond_weights = pad_tokens_and_weights( - uncond_tokens, - uncond_weights, - max_length, - bos, - eos, - pad, - no_boseos_middle=no_boseos_middle, - chunk_length=pipe.tokenizer.model_max_length, - ) - uncond_tokens = np.array(uncond_tokens, dtype=np.int32) - - # get the embeddings - text_embeddings = get_unweighted_text_embeddings( - pipe, - prompt_tokens, - pipe.tokenizer.model_max_length, - no_boseos_middle=no_boseos_middle, - ) - prompt_weights = np.array(prompt_weights, dtype=text_embeddings.dtype) - if uncond_prompt is not None: - uncond_embeddings = get_unweighted_text_embeddings( - pipe, - uncond_tokens, - pipe.tokenizer.model_max_length, - no_boseos_middle=no_boseos_middle, - ) - uncond_weights = np.array(uncond_weights, dtype=uncond_embeddings.dtype) - - # assign weights to the prompts and normalize in the sense of mean - # TODO: should we normalize by chunk or in a whole (current implementation)? - if (not skip_parsing) and (not skip_weighting): - previous_mean = text_embeddings.mean(axis=(-2, -1)) - text_embeddings *= prompt_weights[:, :, None] - text_embeddings *= (previous_mean / text_embeddings.mean(axis=(-2, -1)))[:, None, None] - if uncond_prompt is not None: - previous_mean = uncond_embeddings.mean(axis=(-2, -1)) - uncond_embeddings *= uncond_weights[:, :, None] - uncond_embeddings *= (previous_mean / uncond_embeddings.mean(axis=(-2, -1)))[:, None, None] - - # For classifier free guidance, we need to do two forward passes. - # Here we concatenate the unconditional and text embeddings into a single batch - # to avoid doing two forward passes - if uncond_prompt is not None: - return text_embeddings, uncond_embeddings - - return text_embeddings - - -def preprocess_image(image): - w, h = image.size - w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 - image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) - image = np.array(image).astype(np.float32) / 255.0 - image = image[None].transpose(0, 3, 1, 2) - return 2.0 * image - 1.0 - - -def preprocess_mask(mask, scale_factor=8): - mask = mask.convert("L") - w, h = mask.size - w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 - mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"]) - mask = np.array(mask).astype(np.float32) / 255.0 - mask = np.tile(mask, (4, 1, 1)) - mask = mask[None].transpose(0, 1, 2, 3) # what does this step do? - mask = 1 - mask # repaint white, keep black - return mask - - -class OnnxStableDiffusionLongPromptWeightingPipeline(OnnxStableDiffusionPipeline): - r""" - Pipeline for text-to-image generation using Stable Diffusion without tokens length limit, and support parsing - weighting in prompt. - - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the - library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) - """ - if version.parse(version.parse(diffusers.__version__).base_version) >= version.parse("0.9.0"): - - def __init__( - self, - vae_encoder: OnnxRuntimeModel, - vae_decoder: OnnxRuntimeModel, - text_encoder: OnnxRuntimeModel, - tokenizer: CLIPTokenizer, - unet: OnnxRuntimeModel, - scheduler: SchedulerMixin, - safety_checker: OnnxRuntimeModel, - feature_extractor: CLIPImageProcessor, - requires_safety_checker: bool = True, - ): - super().__init__( - vae_encoder=vae_encoder, - vae_decoder=vae_decoder, - text_encoder=text_encoder, - tokenizer=tokenizer, - unet=unet, - scheduler=scheduler, - safety_checker=safety_checker, - feature_extractor=feature_extractor, - requires_safety_checker=requires_safety_checker, - ) - self.__init__additional__() - - else: - - def __init__( - self, - vae_encoder: OnnxRuntimeModel, - vae_decoder: OnnxRuntimeModel, - text_encoder: OnnxRuntimeModel, - tokenizer: CLIPTokenizer, - unet: OnnxRuntimeModel, - scheduler: SchedulerMixin, - safety_checker: OnnxRuntimeModel, - feature_extractor: CLIPImageProcessor, - ): - super().__init__( - vae_encoder=vae_encoder, - vae_decoder=vae_decoder, - text_encoder=text_encoder, - tokenizer=tokenizer, - unet=unet, - scheduler=scheduler, - safety_checker=safety_checker, - feature_extractor=feature_extractor, - ) - self.__init__additional__() - - def __init__additional__(self): - self.unet_in_channels = 4 - self.vae_scale_factor = 8 - - def _encode_prompt( - self, - prompt, - num_images_per_prompt, - do_classifier_free_guidance, - negative_prompt, - max_embeddings_multiples, - ): - r""" - Encodes the prompt into text encoder hidden states. - - Args: - prompt (`str` or `list(int)`): - prompt to be encoded - num_images_per_prompt (`int`): - number of images that should be generated per prompt - do_classifier_free_guidance (`bool`): - whether to use classifier free guidance or not - negative_prompt (`str` or `List[str]`): - The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored - if `guidance_scale` is less than `1`). - max_embeddings_multiples (`int`, *optional*, defaults to `3`): - The max multiple length of prompt embeddings compared to the max output length of text encoder. - """ - batch_size = len(prompt) if isinstance(prompt, list) else 1 - - if negative_prompt is None: - negative_prompt = [""] * batch_size - elif isinstance(negative_prompt, str): - negative_prompt = [negative_prompt] * batch_size - if batch_size != len(negative_prompt): - raise ValueError( - f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" - f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" - " the batch size of `prompt`." - ) - - text_embeddings, uncond_embeddings = get_weighted_text_embeddings( - pipe=self, - prompt=prompt, - uncond_prompt=negative_prompt if do_classifier_free_guidance else None, - max_embeddings_multiples=max_embeddings_multiples, - ) - - text_embeddings = text_embeddings.repeat(num_images_per_prompt, 0) - if do_classifier_free_guidance: - uncond_embeddings = uncond_embeddings.repeat(num_images_per_prompt, 0) - text_embeddings = np.concatenate([uncond_embeddings, text_embeddings]) - - return text_embeddings - - def check_inputs(self, prompt, height, width, strength, callback_steps): - if not isinstance(prompt, str) and not isinstance(prompt, list): - raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") - - if strength < 0 or strength > 1: - raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") - - if height % 8 != 0 or width % 8 != 0: - raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") - - if (callback_steps is None) or ( - callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) - ): - raise ValueError( - f"`callback_steps` has to be a positive integer but is {callback_steps} of type" - f" {type(callback_steps)}." - ) - - def get_timesteps(self, num_inference_steps, strength, is_text2img): - if is_text2img: - return self.scheduler.timesteps, num_inference_steps - else: - # get the original timestep using init_timestep - offset = self.scheduler.config.get("steps_offset", 0) - init_timestep = int(num_inference_steps * strength) + offset - init_timestep = min(init_timestep, num_inference_steps) - - t_start = max(num_inference_steps - init_timestep + offset, 0) - timesteps = self.scheduler.timesteps[t_start:] - return timesteps, num_inference_steps - t_start - - def run_safety_checker(self, image): - if self.safety_checker is not None: - safety_checker_input = self.feature_extractor( - self.numpy_to_pil(image), return_tensors="np" - ).pixel_values.astype(image.dtype) - # There will throw an error if use safety_checker directly and batchsize>1 - images, has_nsfw_concept = [], [] - for i in range(image.shape[0]): - image_i, has_nsfw_concept_i = self.safety_checker( - clip_input=safety_checker_input[i : i + 1], images=image[i : i + 1] - ) - images.append(image_i) - has_nsfw_concept.append(has_nsfw_concept_i[0]) - image = np.concatenate(images) - else: - has_nsfw_concept = None - return image, has_nsfw_concept - - def decode_latents(self, latents): - latents = 1 / 0.18215 * latents - # image = self.vae_decoder(latent_sample=latents)[0] - # it seems likes there is a strange result for using half-precision vae decoder if batchsize>1 - image = np.concatenate( - [self.vae_decoder(latent_sample=latents[i : i + 1])[0] for i in range(latents.shape[0])] - ) - image = np.clip(image / 2 + 0.5, 0, 1) - image = image.transpose((0, 2, 3, 1)) - return image - - def prepare_extra_step_kwargs(self, generator, eta): - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. - # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 - # and should be between [0, 1] - - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - extra_step_kwargs = {} - if accepts_eta: - extra_step_kwargs["eta"] = eta - - # check if the scheduler accepts generator - accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) - if accepts_generator: - extra_step_kwargs["generator"] = generator - return extra_step_kwargs - - def prepare_latents(self, image, timestep, batch_size, height, width, dtype, generator, latents=None): - if image is None: - shape = ( - batch_size, - self.unet_in_channels, - height // self.vae_scale_factor, - width // self.vae_scale_factor, - ) - - if latents is None: - latents = torch.randn(shape, generator=generator, device="cpu").numpy().astype(dtype) - else: - if latents.shape != shape: - raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") - - # scale the initial noise by the standard deviation required by the scheduler - latents = (torch.from_numpy(latents) * self.scheduler.init_noise_sigma).numpy() - return latents, None, None - else: - init_latents = self.vae_encoder(sample=image)[0] - init_latents = 0.18215 * init_latents - init_latents = np.concatenate([init_latents] * batch_size, axis=0) - init_latents_orig = init_latents - shape = init_latents.shape - - # add noise to latents using the timesteps - noise = torch.randn(shape, generator=generator, device="cpu").numpy().astype(dtype) - latents = self.scheduler.add_noise( - torch.from_numpy(init_latents), torch.from_numpy(noise), timestep - ).numpy() - return latents, init_latents_orig, noise - - @torch.no_grad() - def __call__( - self, - prompt: Union[str, List[str]], - negative_prompt: Optional[Union[str, List[str]]] = None, - image: Union[np.ndarray, PIL.Image.Image] = None, - mask_image: Union[np.ndarray, PIL.Image.Image] = None, - height: int = 512, - width: int = 512, - num_inference_steps: int = 50, - guidance_scale: float = 7.5, - strength: float = 0.8, - num_images_per_prompt: Optional[int] = 1, - eta: float = 0.0, - generator: Optional[torch.Generator] = None, - latents: Optional[np.ndarray] = None, - max_embeddings_multiples: Optional[int] = 3, - output_type: Optional[str] = "pil", - return_dict: bool = True, - callback: Optional[Callable[[int, int, np.ndarray], None]] = None, - is_cancelled_callback: Optional[Callable[[], bool]] = None, - callback_steps: int = 1, - **kwargs, - ): - r""" - Function invoked when calling the pipeline for generation. - - Args: - prompt (`str` or `List[str]`): - The prompt or prompts to guide the image generation. - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored - if `guidance_scale` is less than `1`). - image (`np.ndarray` or `PIL.Image.Image`): - `Image`, or tensor representing an image batch, that will be used as the starting point for the - process. - mask_image (`np.ndarray` or `PIL.Image.Image`): - `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be - replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a - PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should - contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`. - height (`int`, *optional*, defaults to 512): - The height in pixels of the generated image. - width (`int`, *optional*, defaults to 512): - The width in pixels of the generated image. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - guidance_scale (`float`, *optional*, defaults to 7.5): - Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). - `guidance_scale` is defined as `w` of equation 2. of [Imagen - Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > - 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, - usually at the expense of lower image quality. - strength (`float`, *optional*, defaults to 0.8): - Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. - `image` will be used as a starting point, adding more noise to it the larger the `strength`. The - number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added - noise will be maximum and the denoising process will run for the full number of iterations specified in - `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. - num_images_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. - eta (`float`, *optional*, defaults to 0.0): - Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to - [`schedulers.DDIMScheduler`], will be ignored for others. - generator (`torch.Generator`, *optional*): - A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation - deterministic. - latents (`np.ndarray`, *optional*): - Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image - generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. - max_embeddings_multiples (`int`, *optional*, defaults to `3`): - The max multiple length of prompt embeddings compared to the max output length of text encoder. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generate image. Choose between - [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a - plain tuple. - callback (`Callable`, *optional*): - A function that will be called every `callback_steps` steps during inference. The function will be - called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`. - is_cancelled_callback (`Callable`, *optional*): - A function that will be called every `callback_steps` steps during inference. If the function returns - `True`, the inference will be cancelled. - callback_steps (`int`, *optional*, defaults to 1): - The frequency at which the `callback` function will be called. If not specified, the callback will be - called at every step. - - Returns: - `None` if cancelled by `is_cancelled_callback`, - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. - When returning a tuple, the first element is a list with the generated images, and the second element is a - list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" - (nsfw) content, according to the `safety_checker`. - """ - # 0. Default height and width to unet - height = height or self.unet.config.sample_size * self.vae_scale_factor - width = width or self.unet.config.sample_size * self.vae_scale_factor - - # 1. Check inputs. Raise error if not correct - self.check_inputs(prompt, height, width, strength, callback_steps) - - # 2. Define call parameters - batch_size = 1 if isinstance(prompt, str) else len(prompt) - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) - # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` - # corresponds to doing no classifier free guidance. - do_classifier_free_guidance = guidance_scale > 1.0 - - # 3. Encode input prompt - text_embeddings = self._encode_prompt( - prompt, - num_images_per_prompt, - do_classifier_free_guidance, - negative_prompt, - max_embeddings_multiples, - ) - dtype = text_embeddings.dtype - - # 4. Preprocess image and mask - if isinstance(image, PIL.Image.Image): - image = preprocess_image(image) - if image is not None: - image = image.astype(dtype) - if isinstance(mask_image, PIL.Image.Image): - mask_image = preprocess_mask(mask_image, self.vae_scale_factor) - if mask_image is not None: - mask = mask_image.astype(dtype) - mask = np.concatenate([mask] * batch_size * num_images_per_prompt) - else: - mask = None - - # 5. set timesteps - self.scheduler.set_timesteps(num_inference_steps) - timestep_dtype = next( - (input.type for input in self.unet.model.get_inputs() if input.name == "timestep"), "tensor(float)" - ) - timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype] - timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, image is None) - latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) - - # 6. Prepare latent variables - latents, init_latents_orig, noise = self.prepare_latents( - image, - latent_timestep, - batch_size * num_images_per_prompt, - height, - width, - dtype, - generator, - latents, - ) - - # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline - extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) - - # 8. Denoising loop - for i, t in enumerate(self.progress_bar(timesteps)): - # expand the latents if we are doing classifier free guidance - latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents - latent_model_input = self.scheduler.scale_model_input(torch.from_numpy(latent_model_input), t) - latent_model_input = latent_model_input.numpy() - - # predict the noise residual - noise_pred = self.unet( - sample=latent_model_input, - timestep=np.array([t], dtype=timestep_dtype), - encoder_hidden_states=text_embeddings, - ) - noise_pred = noise_pred[0] - - # perform guidance - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2) - noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - scheduler_output = self.scheduler.step( - torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs - ) - latents = scheduler_output.prev_sample.numpy() - - if mask is not None: - # masking - init_latents_proper = self.scheduler.add_noise( - torch.from_numpy(init_latents_orig), - torch.from_numpy(noise), - t, - ).numpy() - latents = (init_latents_proper * mask) + (latents * (1 - mask)) - - # call the callback, if provided - if i % callback_steps == 0: - if callback is not None: - callback(i, t, latents) - if is_cancelled_callback is not None and is_cancelled_callback(): - return None - - # 9. Post-processing - image = self.decode_latents(latents) - - # 10. Run safety checker - image, has_nsfw_concept = self.run_safety_checker(image) - - # 11. Convert to PIL - if output_type == "pil": - image = self.numpy_to_pil(image) - - if not return_dict: - return image, has_nsfw_concept - - return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) - - def text2img( - self, - prompt: Union[str, List[str]], - negative_prompt: Optional[Union[str, List[str]]] = None, - height: int = 512, - width: int = 512, - num_inference_steps: int = 50, - guidance_scale: float = 7.5, - num_images_per_prompt: Optional[int] = 1, - eta: float = 0.0, - generator: Optional[torch.Generator] = None, - latents: Optional[np.ndarray] = None, - max_embeddings_multiples: Optional[int] = 3, - output_type: Optional[str] = "pil", - return_dict: bool = True, - callback: Optional[Callable[[int, int, np.ndarray], None]] = None, - callback_steps: int = 1, - **kwargs, - ): - r""" - Function for text-to-image generation. - Args: - prompt (`str` or `List[str]`): - The prompt or prompts to guide the image generation. - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored - if `guidance_scale` is less than `1`). - height (`int`, *optional*, defaults to 512): - The height in pixels of the generated image. - width (`int`, *optional*, defaults to 512): - The width in pixels of the generated image. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - guidance_scale (`float`, *optional*, defaults to 7.5): - Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). - `guidance_scale` is defined as `w` of equation 2. of [Imagen - Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > - 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, - usually at the expense of lower image quality. - num_images_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. - eta (`float`, *optional*, defaults to 0.0): - Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to - [`schedulers.DDIMScheduler`], will be ignored for others. - generator (`torch.Generator`, *optional*): - A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation - deterministic. - latents (`np.ndarray`, *optional*): - Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image - generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. - max_embeddings_multiples (`int`, *optional*, defaults to `3`): - The max multiple length of prompt embeddings compared to the max output length of text encoder. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generate image. Choose between - [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a - plain tuple. - callback (`Callable`, *optional*): - A function that will be called every `callback_steps` steps during inference. The function will be - called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`. - callback_steps (`int`, *optional*, defaults to 1): - The frequency at which the `callback` function will be called. If not specified, the callback will be - called at every step. - Returns: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. - When returning a tuple, the first element is a list with the generated images, and the second element is a - list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" - (nsfw) content, according to the `safety_checker`. - """ - return self.__call__( - prompt=prompt, - negative_prompt=negative_prompt, - height=height, - width=width, - num_inference_steps=num_inference_steps, - guidance_scale=guidance_scale, - num_images_per_prompt=num_images_per_prompt, - eta=eta, - generator=generator, - latents=latents, - max_embeddings_multiples=max_embeddings_multiples, - output_type=output_type, - return_dict=return_dict, - callback=callback, - callback_steps=callback_steps, - **kwargs, - ) - - def img2img( - self, - image: Union[np.ndarray, PIL.Image.Image], - prompt: Union[str, List[str]], - negative_prompt: Optional[Union[str, List[str]]] = None, - strength: float = 0.8, - num_inference_steps: Optional[int] = 50, - guidance_scale: Optional[float] = 7.5, - num_images_per_prompt: Optional[int] = 1, - eta: Optional[float] = 0.0, - generator: Optional[torch.Generator] = None, - max_embeddings_multiples: Optional[int] = 3, - output_type: Optional[str] = "pil", - return_dict: bool = True, - callback: Optional[Callable[[int, int, np.ndarray], None]] = None, - callback_steps: int = 1, - **kwargs, - ): - r""" - Function for image-to-image generation. - Args: - image (`np.ndarray` or `PIL.Image.Image`): - `Image`, or ndarray representing an image batch, that will be used as the starting point for the - process. - prompt (`str` or `List[str]`): - The prompt or prompts to guide the image generation. - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored - if `guidance_scale` is less than `1`). - strength (`float`, *optional*, defaults to 0.8): - Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. - `image` will be used as a starting point, adding more noise to it the larger the `strength`. The - number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added - noise will be maximum and the denoising process will run for the full number of iterations specified in - `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. This parameter will be modulated by `strength`. - guidance_scale (`float`, *optional*, defaults to 7.5): - Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). - `guidance_scale` is defined as `w` of equation 2. of [Imagen - Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > - 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, - usually at the expense of lower image quality. - num_images_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. - eta (`float`, *optional*, defaults to 0.0): - Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to - [`schedulers.DDIMScheduler`], will be ignored for others. - generator (`torch.Generator`, *optional*): - A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation - deterministic. - max_embeddings_multiples (`int`, *optional*, defaults to `3`): - The max multiple length of prompt embeddings compared to the max output length of text encoder. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generate image. Choose between - [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a - plain tuple. - callback (`Callable`, *optional*): - A function that will be called every `callback_steps` steps during inference. The function will be - called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`. - callback_steps (`int`, *optional*, defaults to 1): - The frequency at which the `callback` function will be called. If not specified, the callback will be - called at every step. - Returns: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. - When returning a tuple, the first element is a list with the generated images, and the second element is a - list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" - (nsfw) content, according to the `safety_checker`. - """ - return self.__call__( - prompt=prompt, - negative_prompt=negative_prompt, - image=image, - num_inference_steps=num_inference_steps, - guidance_scale=guidance_scale, - strength=strength, - num_images_per_prompt=num_images_per_prompt, - eta=eta, - generator=generator, - max_embeddings_multiples=max_embeddings_multiples, - output_type=output_type, - return_dict=return_dict, - callback=callback, - callback_steps=callback_steps, - **kwargs, - ) - - def inpaint( - self, - image: Union[np.ndarray, PIL.Image.Image], - mask_image: Union[np.ndarray, PIL.Image.Image], - prompt: Union[str, List[str]], - negative_prompt: Optional[Union[str, List[str]]] = None, - strength: float = 0.8, - num_inference_steps: Optional[int] = 50, - guidance_scale: Optional[float] = 7.5, - num_images_per_prompt: Optional[int] = 1, - eta: Optional[float] = 0.0, - generator: Optional[torch.Generator] = None, - max_embeddings_multiples: Optional[int] = 3, - output_type: Optional[str] = "pil", - return_dict: bool = True, - callback: Optional[Callable[[int, int, np.ndarray], None]] = None, - callback_steps: int = 1, - **kwargs, - ): - r""" - Function for inpaint. - Args: - image (`np.ndarray` or `PIL.Image.Image`): - `Image`, or tensor representing an image batch, that will be used as the starting point for the - process. This is the image whose masked region will be inpainted. - mask_image (`np.ndarray` or `PIL.Image.Image`): - `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be - replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a - PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should - contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`. - prompt (`str` or `List[str]`): - The prompt or prompts to guide the image generation. - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored - if `guidance_scale` is less than `1`). - strength (`float`, *optional*, defaults to 0.8): - Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength` - is 1, the denoising process will be run on the masked area for the full number of iterations specified - in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more - noise to that region the larger the `strength`. If `strength` is 0, no inpainting will occur. - num_inference_steps (`int`, *optional*, defaults to 50): - The reference number of denoising steps. More denoising steps usually lead to a higher quality image at - the expense of slower inference. This parameter will be modulated by `strength`, as explained above. - guidance_scale (`float`, *optional*, defaults to 7.5): - Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). - `guidance_scale` is defined as `w` of equation 2. of [Imagen - Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > - 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, - usually at the expense of lower image quality. - num_images_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. - eta (`float`, *optional*, defaults to 0.0): - Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to - [`schedulers.DDIMScheduler`], will be ignored for others. - generator (`torch.Generator`, *optional*): - A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation - deterministic. - max_embeddings_multiples (`int`, *optional*, defaults to `3`): - The max multiple length of prompt embeddings compared to the max output length of text encoder. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generate image. Choose between - [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a - plain tuple. - callback (`Callable`, *optional*): - A function that will be called every `callback_steps` steps during inference. The function will be - called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`. - callback_steps (`int`, *optional*, defaults to 1): - The frequency at which the `callback` function will be called. If not specified, the callback will be - called at every step. - Returns: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. - When returning a tuple, the first element is a list with the generated images, and the second element is a - list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" - (nsfw) content, according to the `safety_checker`. - """ - return self.__call__( - prompt=prompt, - negative_prompt=negative_prompt, - image=image, - mask_image=mask_image, - num_inference_steps=num_inference_steps, - guidance_scale=guidance_scale, - strength=strength, - num_images_per_prompt=num_images_per_prompt, - eta=eta, - generator=generator, - max_embeddings_multiples=max_embeddings_multiples, - output_type=output_type, - return_dict=return_dict, - callback=callback, - callback_steps=callback_steps, - **kwargs, - ) diff --git a/spaces/declare-lab/tango/diffusers/tests/test_ema.py b/spaces/declare-lab/tango/diffusers/tests/test_ema.py deleted file mode 100644 index 812d83e2f2418817f4d7e0e1c81d1b1dedfa611d..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/tests/test_ema.py +++ /dev/null @@ -1,156 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import tempfile -import unittest - -import torch - -from diffusers import UNet2DConditionModel -from diffusers.training_utils import EMAModel -from diffusers.utils.testing_utils import skip_mps, torch_device - - -class EMAModelTests(unittest.TestCase): - model_id = "hf-internal-testing/tiny-stable-diffusion-pipe" - batch_size = 1 - prompt_length = 77 - text_encoder_hidden_dim = 32 - num_in_channels = 4 - latent_height = latent_width = 64 - generator = torch.manual_seed(0) - - def get_models(self, decay=0.9999): - unet = UNet2DConditionModel.from_pretrained(self.model_id, subfolder="unet") - unet = unet.to(torch_device) - ema_unet = EMAModel(unet.parameters(), decay=decay, model_cls=UNet2DConditionModel, model_config=unet.config) - return unet, ema_unet - - def get_dummy_inputs(self): - noisy_latents = torch.randn( - self.batch_size, self.num_in_channels, self.latent_height, self.latent_width, generator=self.generator - ).to(torch_device) - timesteps = torch.randint(0, 1000, size=(self.batch_size,), generator=self.generator).to(torch_device) - encoder_hidden_states = torch.randn( - self.batch_size, self.prompt_length, self.text_encoder_hidden_dim, generator=self.generator - ).to(torch_device) - return noisy_latents, timesteps, encoder_hidden_states - - def simulate_backprop(self, unet): - updated_state_dict = {} - for k, param in unet.state_dict().items(): - updated_param = torch.randn_like(param) + (param * torch.randn_like(param)) - updated_state_dict.update({k: updated_param}) - unet.load_state_dict(updated_state_dict) - return unet - - def test_optimization_steps_updated(self): - unet, ema_unet = self.get_models() - # Take the first (hypothetical) EMA step. - ema_unet.step(unet.parameters()) - assert ema_unet.optimization_step == 1 - - # Take two more. - for _ in range(2): - ema_unet.step(unet.parameters()) - assert ema_unet.optimization_step == 3 - - def test_shadow_params_not_updated(self): - unet, ema_unet = self.get_models() - # Since the `unet` is not being updated (i.e., backprop'd) - # there won't be any difference between the `params` of `unet` - # and `ema_unet` even if we call `ema_unet.step(unet.parameters())`. - ema_unet.step(unet.parameters()) - orig_params = list(unet.parameters()) - for s_param, param in zip(ema_unet.shadow_params, orig_params): - assert torch.allclose(s_param, param) - - # The above holds true even if we call `ema.step()` multiple times since - # `unet` params are still not being updated. - for _ in range(4): - ema_unet.step(unet.parameters()) - for s_param, param in zip(ema_unet.shadow_params, orig_params): - assert torch.allclose(s_param, param) - - def test_shadow_params_updated(self): - unet, ema_unet = self.get_models() - # Here we simulate the parameter updates for `unet`. Since there might - # be some parameters which are initialized to zero we take extra care to - # initialize their values to something non-zero before the multiplication. - unet_pseudo_updated_step_one = self.simulate_backprop(unet) - - # Take the EMA step. - ema_unet.step(unet_pseudo_updated_step_one.parameters()) - - # Now the EMA'd parameters won't be equal to the original model parameters. - orig_params = list(unet_pseudo_updated_step_one.parameters()) - for s_param, param in zip(ema_unet.shadow_params, orig_params): - assert ~torch.allclose(s_param, param) - - # Ensure this is the case when we take multiple EMA steps. - for _ in range(4): - ema_unet.step(unet.parameters()) - for s_param, param in zip(ema_unet.shadow_params, orig_params): - assert ~torch.allclose(s_param, param) - - def test_consecutive_shadow_params_updated(self): - # If we call EMA step after a backpropagation consecutively for two times, - # the shadow params from those two steps should be different. - unet, ema_unet = self.get_models() - - # First backprop + EMA - unet_step_one = self.simulate_backprop(unet) - ema_unet.step(unet_step_one.parameters()) - step_one_shadow_params = ema_unet.shadow_params - - # Second backprop + EMA - unet_step_two = self.simulate_backprop(unet_step_one) - ema_unet.step(unet_step_two.parameters()) - step_two_shadow_params = ema_unet.shadow_params - - for step_one, step_two in zip(step_one_shadow_params, step_two_shadow_params): - assert ~torch.allclose(step_one, step_two) - - def test_zero_decay(self): - # If there's no decay even if there are backprops, EMA steps - # won't take any effect i.e., the shadow params would remain the - # same. - unet, ema_unet = self.get_models(decay=0.0) - unet_step_one = self.simulate_backprop(unet) - ema_unet.step(unet_step_one.parameters()) - step_one_shadow_params = ema_unet.shadow_params - - unet_step_two = self.simulate_backprop(unet_step_one) - ema_unet.step(unet_step_two.parameters()) - step_two_shadow_params = ema_unet.shadow_params - - for step_one, step_two in zip(step_one_shadow_params, step_two_shadow_params): - assert torch.allclose(step_one, step_two) - - @skip_mps - def test_serialization(self): - unet, ema_unet = self.get_models() - noisy_latents, timesteps, encoder_hidden_states = self.get_dummy_inputs() - - with tempfile.TemporaryDirectory() as tmpdir: - ema_unet.save_pretrained(tmpdir) - loaded_unet = UNet2DConditionModel.from_pretrained(tmpdir, model_cls=UNet2DConditionModel) - loaded_unet = loaded_unet.to(unet.device) - - # Since no EMA step has been performed the outputs should match. - output = unet(noisy_latents, timesteps, encoder_hidden_states).sample - output_loaded = loaded_unet(noisy_latents, timesteps, encoder_hidden_states).sample - - assert torch.allclose(output, output_loaded, atol=1e-4) diff --git a/spaces/deepghs/deepdanbooru_online/README.md b/spaces/deepghs/deepdanbooru_online/README.md deleted file mode 100644 index 2e2b565096f059fc13e38a4c5261a9a97f617d7a..0000000000000000000000000000000000000000 --- a/spaces/deepghs/deepdanbooru_online/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Deepdanbooru Online -emoji: 📚 -colorFrom: green -colorTo: purple -sdk: gradio -sdk_version: 3.16.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/deepwisdom/MetaGPT/docs/scripts/get_all_classes_and_funcs.sh b/spaces/deepwisdom/MetaGPT/docs/scripts/get_all_classes_and_funcs.sh deleted file mode 100644 index 011349caf35729702d0dfc1aa69474c8f2d9c833..0000000000000000000000000000000000000000 --- a/spaces/deepwisdom/MetaGPT/docs/scripts/get_all_classes_and_funcs.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env bash - -find metagpt | grep "\.py" | grep -Ev "(__init__|pyc)" | xargs grep -E "(^class| def )" 2>/dev/null | grep -v -E "(grep|tests|examples)" \ No newline at end of file diff --git a/spaces/deepwisdom/MetaGPT/metagpt/utils/__init__.py b/spaces/deepwisdom/MetaGPT/metagpt/utils/__init__.py deleted file mode 100644 index f13175cf88f3d9dc80246c02776b1f6e6314330a..0000000000000000000000000000000000000000 --- a/spaces/deepwisdom/MetaGPT/metagpt/utils/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Time : 2023/4/29 15:50 -@Author : alexanderwu -@File : __init__.py -""" - -from metagpt.utils.read_document import read_docx -from metagpt.utils.singleton import Singleton -from metagpt.utils.token_counter import ( - TOKEN_COSTS, - count_message_tokens, - count_string_tokens, -) - - -__all__ = [ - "read_docx", - "Singleton", - "TOKEN_COSTS", - "count_message_tokens", - "count_string_tokens", -] diff --git a/spaces/delmaksym/Huggy/Build/HuggyCorrectUnity.loader.js b/spaces/delmaksym/Huggy/Build/HuggyCorrectUnity.loader.js deleted file mode 100644 index beba2e2e6cf28a5526f230375acbdf60b2f31ed2..0000000000000000000000000000000000000000 --- a/spaces/delmaksym/Huggy/Build/HuggyCorrectUnity.loader.js +++ /dev/null @@ -1,2 +0,0 @@ -function createUnityInstance(e,t,r){function n(e,r){if(!n.aborted&&t.showBanner)return"error"==r&&(n.aborted=!0),t.showBanner(e,r);switch(r){case"error":console.error(e);break;case"warning":console.warn(e);break;default:console.log(e)}}function o(e){var t=e.reason||e.error,r=t?t.toString():e.message||e.reason||"",n=t&&t.stack?t.stack.toString():"";if(n.startsWith(r)&&(n=n.substring(r.length)),r+="\n"+n.trim(),r&&f.stackTraceRegExp&&f.stackTraceRegExp.test(r)){var o=e.filename||t&&(t.fileName||t.sourceURL)||"",a=e.lineno||t&&(t.lineNumber||t.line)||0;i(r,o,a)}}function a(e){e.preventDefault()}function i(e,t,r){if(e.indexOf("fullscreen error")==-1){if(f.startupErrorHandler)return void f.startupErrorHandler(e,t,r);if(!(f.errorHandler&&f.errorHandler(e,t,r)||(console.log("Invoking error handler due to\n"+e),"function"==typeof dump&&dump("Invoking error handler due to\n"+e),i.didShowErrorMessage))){var e="An error occurred running the Unity content on this page. See your browser JavaScript console for more info. The error was:\n"+e;e.indexOf("DISABLE_EXCEPTION_CATCHING")!=-1?e="An exception has occurred, but exception handling has been disabled in this build. If you are the developer of this content, enable exceptions in your project WebGL player settings to be able to catch the exception or see the stack trace.":e.indexOf("Cannot enlarge memory arrays")!=-1?e="Out of memory. If you are the developer of this content, try allocating more memory to your WebGL build in the WebGL player settings.":e.indexOf("Invalid array buffer length")==-1&&e.indexOf("Invalid typed array length")==-1&&e.indexOf("out of memory")==-1&&e.indexOf("could not allocate memory")==-1||(e="The browser could not allocate enough memory for the WebGL content. If you are the developer of this content, try allocating less memory to your WebGL build in the WebGL player settings."),alert(e),i.didShowErrorMessage=!0}}}function s(e,t){if("symbolsUrl"!=e){var n=f.downloadProgress[e];n||(n=f.downloadProgress[e]={started:!1,finished:!1,lengthComputable:!1,total:0,loaded:0}),"object"!=typeof t||"progress"!=t.type&&"load"!=t.type||(n.started||(n.started=!0,n.lengthComputable=t.lengthComputable),n.total=t.total,n.loaded=t.loaded,"load"==t.type&&(n.finished=!0));var o=0,a=0,i=0,s=0,l=0;for(var e in f.downloadProgress){var n=f.downloadProgress[e];if(!n.started)return 0;i++,n.lengthComputable?(o+=n.loaded,a+=n.total,s++):n.finished||l++}var d=i?(i-l-(a?s*(a-o)/a:0))/i:0;r(.9*d)}}function l(e,t){return new Promise(function(r,n){try{for(var o in w)if(w[o].hasUnityMarker(e)){t&&console.log('You can reduce startup time if you configure your web server to add "Content-Encoding: '+o+'" response header when serving "'+t+'" file.');var a=w[o];if(!a.worker){var i=URL.createObjectURL(new Blob(["this.require = ",a.require.toString(),"; this.decompress = ",a.decompress.toString(),"; this.onmessage = ",function(e){var t={id:e.data.id,decompressed:this.decompress(e.data.compressed)};postMessage(t,t.decompressed?[t.decompressed.buffer]:[])}.toString(),"; postMessage({ ready: true });"],{type:"application/javascript"}));a.worker=new Worker(i),a.worker.onmessage=function(e){return e.data.ready?void URL.revokeObjectURL(i):(this.callbacks[e.data.id](e.data.decompressed),void delete this.callbacks[e.data.id])},a.worker.callbacks={},a.worker.nextCallbackId=0}var s=a.worker.nextCallbackId++;return a.worker.callbacks[s]=r,void a.worker.postMessage({id:s,compressed:e},[e.buffer])}r(e)}catch(e){n(e)}})}function d(e){s(e);var t=f.cacheControl(f[e]),r=f.companyName&&f.productName?f.cachedFetch:f.fetchWithProgress,o=f[e],a=/file:\/\//.exec(o)?"same-origin":void 0,i=r(f[e],{method:"GET",companyName:f.companyName,productName:f.productName,control:t,mode:a,onProgress:function(t){s(e,t)}});return i.then(function(t){return l(t.parsedBody,f[e])}).catch(function(t){var r="Failed to download file "+f[e];"file:"==location.protocol?n(r+". Loading web pages via a file:// URL without a web server is not supported by this browser. Please use a local development web server to host Unity content, or use the Unity Build and Run option.","error"):console.error(r)})}function u(){return d("frameworkUrl").then(function(e){var t=URL.createObjectURL(new Blob([e],{type:"application/javascript"}));return new Promise(function(e,r){var o=document.createElement("script");o.src=t,o.onload=function(){if("undefined"==typeof unityFramework||!unityFramework){var r=[["br","br"],["gz","gzip"]];for(var a in r){var i=r[a];if(f.frameworkUrl.endsWith("."+i[0])){var s="Unable to parse "+f.frameworkUrl+"!";if("file:"==location.protocol)return void n(s+" Loading pre-compressed (brotli or gzip) content via a file:// URL without a web server is not supported by this browser. Please use a local development web server to host compressed Unity content, or use the Unity Build and Run option.","error");if(s+=' This can happen if build compression was enabled but web server hosting the content was misconfigured to not serve the file with HTTP Response Header "Content-Encoding: '+i[1]+'" present. Check browser Console and Devtools Network tab to debug.',"br"==i[0]&&"http:"==location.protocol){var l=["localhost","127.0.0.1"].indexOf(location.hostname)!=-1?"":"Migrate your server to use HTTPS.";s=/Firefox/.test(navigator.userAgent)?"Unable to parse "+f.frameworkUrl+'!
        If using custom web server, verify that web server is sending .br files with HTTP Response Header "Content-Encoding: br". Brotli compression may not be supported in Firefox over HTTP connections. '+l+' See https://bugzilla.mozilla.org/show_bug.cgi?id=1670675 for more information.':"Unable to parse "+f.frameworkUrl+'!
        If using custom web server, verify that web server is sending .br files with HTTP Response Header "Content-Encoding: br". Brotli compression may not be supported over HTTP connections. Migrate your server to use HTTPS.'}return void n(s,"error")}}n("Unable to parse "+f.frameworkUrl+"! The file is corrupt, or compression was misconfigured? (check Content-Encoding HTTP Response Header on web server)","error")}var d=unityFramework;unityFramework=null,o.onload=null,URL.revokeObjectURL(t),e(d)},o.onerror=function(e){n("Unable to load file "+f.frameworkUrl+"! Check that the file exists on the remote server. (also check browser Console and Devtools Network tab to debug)","error")},document.body.appendChild(o),f.deinitializers.push(function(){document.body.removeChild(o)})})})}function c(){Promise.all([u(),d("codeUrl")]).then(function(e){f.wasmBinary=e[1],e[0](f)});var e=d("dataUrl");f.preRun.push(function(){f.addRunDependency("dataUrl"),e.then(function(e){var t=new DataView(e.buffer,e.byteOffset,e.byteLength),r=0,n="UnityWebData1.0\0";if(!String.fromCharCode.apply(null,e.subarray(r,r+n.length))==n)throw"unknown data format";r+=n.length;var o=t.getUint32(r,!0);for(r+=4;r0;d=u,u=l.indexOf("/",d)+1)f.FS_createPath(l.substring(0,d),l.substring(d,u-1),!0,!0);f.FS_createDataFile(l,null,e.subarray(a,a+i),!0,!0,!0)}f.removeRunDependency("dataUrl")})})}r=r||function(){};var f={canvas:e,webglContextAttributes:{preserveDrawingBuffer:!1},cacheControl:function(e){return e==f.dataUrl?"must-revalidate":"no-store"},streamingAssetsUrl:"StreamingAssets",downloadProgress:{},deinitializers:[],intervals:{},setInterval:function(e,t){var r=window.setInterval(e,t);return this.intervals[r]=!0,r},clearInterval:function(e){delete this.intervals[e],window.clearInterval(e)},preRun:[],postRun:[],print:function(e){console.log(e)},printErr:function(e){console.error(e),"string"==typeof e&&e.indexOf("wasm streaming compile failed")!=-1&&(e.toLowerCase().indexOf("mime")!=-1?n('HTTP Response Header "Content-Type" configured incorrectly on the server for file '+f.codeUrl+' , should be "application/wasm". Startup time performance will suffer.',"warning"):n('WebAssembly streaming compilation failed! This can happen for example if "Content-Encoding" HTTP header is incorrectly enabled on the server for file '+f.codeUrl+", but the file is not pre-compressed on disk (or vice versa). Check the Network tab in browser Devtools to debug server header configuration.","warning"))},locateFile:function(e){return e},disabledCanvasEvents:["contextmenu","dragstart"]};for(var h in t)f[h]=t[h];f.streamingAssetsUrl=new URL(f.streamingAssetsUrl,document.URL).href;var b=f.disabledCanvasEvents.slice();b.forEach(function(t){e.addEventListener(t,a)}),window.addEventListener("error",o),window.addEventListener("unhandledrejection",o),f.deinitializers.push(function(){f.disableAccessToMediaDevices(),b.forEach(function(t){e.removeEventListener(t,a)}),window.removeEventListener("error",o),window.removeEventListener("unhandledrejection",o);for(var t in f.intervals)window.clearInterval(t);f.intervals={}}),f.QuitCleanup=function(){for(var e=0;e=200&&this.status<=299}.bind(this)})}function o(e,t,r,n,o){var a={url:e,version:l.version,company:t,product:r,updated:n,revalidated:n,accessed:n,response:{headers:{}}};return o&&(o.headers.forEach(function(e,t){a.response.headers[t]=e}),["redirected","status","statusText","type","url"].forEach(function(e){a.response[e]=o[e]}),a.response.parsedBody=o.parsedBody),a}function a(e,t){return(!t||!t.method||"GET"===t.method)&&((!t||["must-revalidate","immutable"].indexOf(t.control)!=-1)&&!!e.match("^https?://"))}function i(i,u){function c(t,r){return d(t,r).then(function(t){return!m.enabled||m.revalidated?t:304===t.status?(m.result.revalidated=m.result.accessed,m.revalidated=!0,h.storeRequest(m.result).then(function(){e("'"+m.result.url+"' successfully revalidated and served from the indexedDB cache")}).catch(function(t){e("'"+m.result.url+"' successfully revalidated but not stored in the indexedDB cache due to the error: "+t)}),new n(m.result.response)):(200==t.status?(m.result=o(t.url,m.company,m.product,m.accessed,t),m.revalidated=!0,h.storeRequest(m.result).then(function(){e("'"+m.result.url+"' successfully downloaded and stored in the indexedDB cache")}).catch(function(t){e("'"+m.result.url+"' successfully downloaded but not stored in the indexedDB cache due to the error: "+t)})):e("'"+m.result.url+"' request failed with status: "+t.status+" "+t.statusText),t)})}function f(e){u&&u.onProgress&&(u.onProgress({type:"progress",total:e.parsedBody.length,loaded:e.parsedBody.length,lengthComputable:!0}),u.onProgress({type:"load",total:e.parsedBody.length,loaded:e.parsedBody.length,lengthComputable:!0}))}var h=s.getInstance(),b=t("string"==typeof i?i:i.url),m={enabled:a(b,u)};return u&&(m.control=u.control,m.company=u.company,m.product=u.product),m.result=o(b,m.company,m.product,Date.now()),m.revalidated=!1,m.enabled?h.loadRequest(m.result.url).then(function(t){if(!t||t.version!==l.version)return c(i,u);m.result=t,m.result.accessed=Date.now();var o=new n(m.result.response);if("immutable"==m.control)return m.revalidated=!0,h.storeRequest(m.result),e("'"+m.result.url+"' served from the indexedDB cache without revalidation"),f(o),o;if(r(m.result.url)&&(o.headers.get("Last-Modified")||o.headers.get("ETag")))return fetch(m.result.url,{method:"HEAD"}).then(function(t){return m.revalidated=["Last-Modified","ETag"].every(function(e){return!o.headers.get(e)||o.headers.get(e)==t.headers.get(e)}),m.revalidated?(m.result.revalidated=m.result.accessed,h.storeRequest(m.result),e("'"+m.result.url+"' successfully revalidated and served from the indexedDB cache"),f(o),o):c(i,u)});u=u||{};var a=u.headers||{};return u.headers=a,o.headers.get("Last-Modified")?(a["If-Modified-Since"]=o.headers.get("Last-Modified"),a["Cache-Control"]="no-cache"):o.headers.get("ETag")&&(a["If-None-Match"]=o.headers.get("ETag"),a["Cache-Control"]="no-cache"),c(i,u)}).catch(function(t){return e("Failed to load '"+m.result.url+"' from indexedDB cache due to the error: "+t),d(i,u)}):d(i,u)}var s=f.UnityCache,l=s.RequestStore,d=f.fetchWithProgress;return n.prototype.arrayBuffer=function(){return Promise.resolve(this.parsedBody.buffer)},n.prototype.blob=function(){return this.arrayBuffer().then(function(e){return new Blob([e])})},n.prototype.json=function(){return this.text().then(function(e){return JSON.parse(e)})},n.prototype.text=function(){var e=new TextDecoder;return Promise.resolve(e.decode(this.parsedBody))},i}();var w={gzip:{require:function(e){var t={"inflate.js":function(e,t,r){"use strict";function n(e){if(!(this instanceof n))return new n(e);this.options=s.assign({chunkSize:16384,windowBits:0,to:""},e||{});var t=this.options;t.raw&&t.windowBits>=0&&t.windowBits<16&&(t.windowBits=-t.windowBits,0===t.windowBits&&(t.windowBits=-15)),!(t.windowBits>=0&&t.windowBits<16)||e&&e.windowBits||(t.windowBits+=32),t.windowBits>15&&t.windowBits<48&&0===(15&t.windowBits)&&(t.windowBits|=15),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new c,this.strm.avail_out=0;var r=i.inflateInit2(this.strm,t.windowBits);if(r!==d.Z_OK)throw new Error(u[r]);this.header=new f,i.inflateGetHeader(this.strm,this.header)}function o(e,t){var r=new n(t);if(r.push(e,!0),r.err)throw r.msg||u[r.err];return r.result}function a(e,t){return t=t||{},t.raw=!0,o(e,t)}var i=e("./zlib/inflate"),s=e("./utils/common"),l=e("./utils/strings"),d=e("./zlib/constants"),u=e("./zlib/messages"),c=e("./zlib/zstream"),f=e("./zlib/gzheader"),h=Object.prototype.toString;n.prototype.push=function(e,t){var r,n,o,a,u,c,f=this.strm,b=this.options.chunkSize,m=this.options.dictionary,g=!1;if(this.ended)return!1;n=t===~~t?t:t===!0?d.Z_FINISH:d.Z_NO_FLUSH,"string"==typeof e?f.input=l.binstring2buf(e):"[object ArrayBuffer]"===h.call(e)?f.input=new Uint8Array(e):f.input=e,f.next_in=0,f.avail_in=f.input.length;do{if(0===f.avail_out&&(f.output=new s.Buf8(b),f.next_out=0,f.avail_out=b),r=i.inflate(f,d.Z_NO_FLUSH),r===d.Z_NEED_DICT&&m&&(c="string"==typeof m?l.string2buf(m):"[object ArrayBuffer]"===h.call(m)?new Uint8Array(m):m,r=i.inflateSetDictionary(this.strm,c)),r===d.Z_BUF_ERROR&&g===!0&&(r=d.Z_OK,g=!1),r!==d.Z_STREAM_END&&r!==d.Z_OK)return this.onEnd(r),this.ended=!0,!1;f.next_out&&(0!==f.avail_out&&r!==d.Z_STREAM_END&&(0!==f.avail_in||n!==d.Z_FINISH&&n!==d.Z_SYNC_FLUSH)||("string"===this.options.to?(o=l.utf8border(f.output,f.next_out),a=f.next_out-o,u=l.buf2string(f.output,o),f.next_out=a,f.avail_out=b-a,a&&s.arraySet(f.output,f.output,o,a,0),this.onData(u)):this.onData(s.shrinkBuf(f.output,f.next_out)))),0===f.avail_in&&0===f.avail_out&&(g=!0)}while((f.avail_in>0||0===f.avail_out)&&r!==d.Z_STREAM_END);return r===d.Z_STREAM_END&&(n=d.Z_FINISH),n===d.Z_FINISH?(r=i.inflateEnd(this.strm),this.onEnd(r),this.ended=!0,r===d.Z_OK):n!==d.Z_SYNC_FLUSH||(this.onEnd(d.Z_OK),f.avail_out=0,!0)},n.prototype.onData=function(e){this.chunks.push(e)},n.prototype.onEnd=function(e){e===d.Z_OK&&("string"===this.options.to?this.result=this.chunks.join(""):this.result=s.flattenChunks(this.chunks)),this.chunks=[],this.err=e,this.msg=this.strm.msg},r.Inflate=n,r.inflate=o,r.inflateRaw=a,r.ungzip=o},"utils/common.js":function(e,t,r){"use strict";var n="undefined"!=typeof Uint8Array&&"undefined"!=typeof Uint16Array&&"undefined"!=typeof Int32Array;r.assign=function(e){for(var t=Array.prototype.slice.call(arguments,1);t.length;){var r=t.shift();if(r){if("object"!=typeof r)throw new TypeError(r+"must be non-object");for(var n in r)r.hasOwnProperty(n)&&(e[n]=r[n])}}return e},r.shrinkBuf=function(e,t){return e.length===t?e:e.subarray?e.subarray(0,t):(e.length=t,e)};var o={arraySet:function(e,t,r,n,o){if(t.subarray&&e.subarray)return void e.set(t.subarray(r,r+n),o);for(var a=0;a=252?6:l>=248?5:l>=240?4:l>=224?3:l>=192?2:1;s[254]=s[254]=1,r.string2buf=function(e){var t,r,n,a,i,s=e.length,l=0;for(a=0;a>>6,t[i++]=128|63&r):r<65536?(t[i++]=224|r>>>12,t[i++]=128|r>>>6&63,t[i++]=128|63&r):(t[i++]=240|r>>>18,t[i++]=128|r>>>12&63,t[i++]=128|r>>>6&63,t[i++]=128|63&r);return t},r.buf2binstring=function(e){return n(e,e.length)},r.binstring2buf=function(e){for(var t=new o.Buf8(e.length),r=0,n=t.length;r4)d[o++]=65533,r+=i-1;else{for(a&=2===i?31:3===i?15:7;i>1&&r1?d[o++]=65533:a<65536?d[o++]=a:(a-=65536,d[o++]=55296|a>>10&1023,d[o++]=56320|1023&a)}return n(d,o)},r.utf8border=function(e,t){var r;for(t=t||e.length,t>e.length&&(t=e.length),r=t-1;r>=0&&128===(192&e[r]);)r--;return r<0?t:0===r?t:r+s[e[r]]>t?r:t}},"zlib/inflate.js":function(e,t,r){"use strict";function n(e){return(e>>>24&255)+(e>>>8&65280)+((65280&e)<<8)+((255&e)<<24)}function o(){this.mode=0,this.last=!1,this.wrap=0,this.havedict=!1,this.flags=0,this.dmax=0,this.check=0,this.total=0,this.head=null,this.wbits=0,this.wsize=0,this.whave=0,this.wnext=0,this.window=null,this.hold=0,this.bits=0,this.length=0,this.offset=0,this.extra=0,this.lencode=null,this.distcode=null,this.lenbits=0,this.distbits=0,this.ncode=0,this.nlen=0,this.ndist=0,this.have=0,this.next=null,this.lens=new w.Buf16(320),this.work=new w.Buf16(288),this.lendyn=null,this.distdyn=null,this.sane=0,this.back=0,this.was=0}function a(e){var t;return e&&e.state?(t=e.state,e.total_in=e.total_out=t.total=0,e.msg="",t.wrap&&(e.adler=1&t.wrap),t.mode=z,t.last=0,t.havedict=0,t.dmax=32768,t.head=null,t.hold=0,t.bits=0,t.lencode=t.lendyn=new w.Buf32(me),t.distcode=t.distdyn=new w.Buf32(ge),t.sane=1,t.back=-1,T):O}function i(e){var t;return e&&e.state?(t=e.state,t.wsize=0,t.whave=0,t.wnext=0,a(e)):O}function s(e,t){var r,n;return e&&e.state?(n=e.state,t<0?(r=0,t=-t):(r=(t>>4)+1,t<48&&(t&=15)),t&&(t<8||t>15)?O:(null!==n.window&&n.wbits!==t&&(n.window=null),n.wrap=r,n.wbits=t,i(e))):O}function l(e,t){var r,n;return e?(n=new o,e.state=n,n.window=null,r=s(e,t),r!==T&&(e.state=null),r):O}function d(e){return l(e,we)}function u(e){if(ve){var t;for(g=new w.Buf32(512),p=new w.Buf32(32),t=0;t<144;)e.lens[t++]=8;for(;t<256;)e.lens[t++]=9;for(;t<280;)e.lens[t++]=7;for(;t<288;)e.lens[t++]=8;for(x(S,e.lens,0,288,g,0,e.work,{bits:9}),t=0;t<32;)e.lens[t++]=5;x(E,e.lens,0,32,p,0,e.work,{bits:5}),ve=!1}e.lencode=g,e.lenbits=9,e.distcode=p,e.distbits=5}function c(e,t,r,n){var o,a=e.state;return null===a.window&&(a.wsize=1<=a.wsize?(w.arraySet(a.window,t,r-a.wsize,a.wsize,0),a.wnext=0,a.whave=a.wsize):(o=a.wsize-a.wnext,o>n&&(o=n),w.arraySet(a.window,t,r-n,o,a.wnext),n-=o,n?(w.arraySet(a.window,t,r-n,n,0),a.wnext=n,a.whave=a.wsize):(a.wnext+=o,a.wnext===a.wsize&&(a.wnext=0),a.whave>>8&255,r.check=y(r.check,Be,2,0),f=0,h=0,r.mode=N;break}if(r.flags=0,r.head&&(r.head.done=!1),!(1&r.wrap)||(((255&f)<<8)+(f>>8))%31){e.msg="incorrect header check",r.mode=fe;break}if((15&f)!==D){e.msg="unknown compression method",r.mode=fe;break}if(f>>>=4,h-=4,xe=(15&f)+8,0===r.wbits)r.wbits=xe;else if(xe>r.wbits){e.msg="invalid window size",r.mode=fe;break}r.dmax=1<>8&1),512&r.flags&&(Be[0]=255&f,Be[1]=f>>>8&255,r.check=y(r.check,Be,2,0)),f=0,h=0,r.mode=F;case F:for(;h<32;){if(0===l)break e;l--,f+=o[i++]<>>8&255,Be[2]=f>>>16&255,Be[3]=f>>>24&255,r.check=y(r.check,Be,4,0)),f=0,h=0,r.mode=Z;case Z:for(;h<16;){if(0===l)break e;l--,f+=o[i++]<>8),512&r.flags&&(Be[0]=255&f,Be[1]=f>>>8&255,r.check=y(r.check,Be,2,0)),f=0,h=0,r.mode=j;case j:if(1024&r.flags){for(;h<16;){if(0===l)break e;l--,f+=o[i++]<>>8&255,r.check=y(r.check,Be,2,0)),f=0,h=0}else r.head&&(r.head.extra=null);r.mode=H;case H:if(1024&r.flags&&(g=r.length,g>l&&(g=l),g&&(r.head&&(xe=r.head.extra_len-r.length,r.head.extra||(r.head.extra=new Array(r.head.extra_len)),w.arraySet(r.head.extra,o,i,g,xe)),512&r.flags&&(r.check=y(r.check,o,g,i)),l-=g,i+=g,r.length-=g),r.length))break e;r.length=0,r.mode=M;case M:if(2048&r.flags){if(0===l)break e;g=0;do xe=o[i+g++],r.head&&xe&&r.length<65536&&(r.head.name+=String.fromCharCode(xe));while(xe&&g>9&1,r.head.done=!0),e.adler=r.check=0,r.mode=Y;break;case q:for(;h<32;){if(0===l)break e;l--,f+=o[i++]<>>=7&h,h-=7&h,r.mode=de;break}for(;h<3;){if(0===l)break e;l--,f+=o[i++]<>>=1,h-=1,3&f){case 0:r.mode=Q;break;case 1:if(u(r),r.mode=re,t===U){f>>>=2,h-=2;break e}break;case 2:r.mode=$;break;case 3:e.msg="invalid block type",r.mode=fe}f>>>=2,h-=2;break;case Q:for(f>>>=7&h,h-=7&h;h<32;){if(0===l)break e;l--,f+=o[i++]<>>16^65535)){e.msg="invalid stored block lengths",r.mode=fe;break}if(r.length=65535&f,f=0,h=0,r.mode=X,t===U)break e;case X:r.mode=J;case J:if(g=r.length){if(g>l&&(g=l),g>d&&(g=d),0===g)break e;w.arraySet(a,o,i,g,s),l-=g,i+=g,d-=g,s+=g,r.length-=g;break}r.mode=Y;break;case $:for(;h<14;){if(0===l)break e;l--,f+=o[i++]<>>=5,h-=5,r.ndist=(31&f)+1,f>>>=5,h-=5,r.ncode=(15&f)+4,f>>>=4,h-=4,r.nlen>286||r.ndist>30){e.msg="too many length or distance symbols",r.mode=fe;break}r.have=0,r.mode=ee;case ee:for(;r.have>>=3,h-=3}for(;r.have<19;)r.lens[Ue[r.have++]]=0;if(r.lencode=r.lendyn,r.lenbits=7,Se={bits:r.lenbits},_e=x(_,r.lens,0,19,r.lencode,0,r.work,Se),r.lenbits=Se.bits,_e){e.msg="invalid code lengths set",r.mode=fe;break}r.have=0,r.mode=te;case te:for(;r.have>>24,pe=Ce>>>16&255,we=65535&Ce,!(ge<=h);){if(0===l)break e;l--,f+=o[i++]<>>=ge,h-=ge,r.lens[r.have++]=we;else{if(16===we){for(Ee=ge+2;h>>=ge,h-=ge,0===r.have){e.msg="invalid bit length repeat",r.mode=fe; -break}xe=r.lens[r.have-1],g=3+(3&f),f>>>=2,h-=2}else if(17===we){for(Ee=ge+3;h>>=ge,h-=ge,xe=0,g=3+(7&f),f>>>=3,h-=3}else{for(Ee=ge+7;h>>=ge,h-=ge,xe=0,g=11+(127&f),f>>>=7,h-=7}if(r.have+g>r.nlen+r.ndist){e.msg="invalid bit length repeat",r.mode=fe;break}for(;g--;)r.lens[r.have++]=xe}}if(r.mode===fe)break;if(0===r.lens[256]){e.msg="invalid code -- missing end-of-block",r.mode=fe;break}if(r.lenbits=9,Se={bits:r.lenbits},_e=x(S,r.lens,0,r.nlen,r.lencode,0,r.work,Se),r.lenbits=Se.bits,_e){e.msg="invalid literal/lengths set",r.mode=fe;break}if(r.distbits=6,r.distcode=r.distdyn,Se={bits:r.distbits},_e=x(E,r.lens,r.nlen,r.ndist,r.distcode,0,r.work,Se),r.distbits=Se.bits,_e){e.msg="invalid distances set",r.mode=fe;break}if(r.mode=re,t===U)break e;case re:r.mode=ne;case ne:if(l>=6&&d>=258){e.next_out=s,e.avail_out=d,e.next_in=i,e.avail_in=l,r.hold=f,r.bits=h,k(e,m),s=e.next_out,a=e.output,d=e.avail_out,i=e.next_in,o=e.input,l=e.avail_in,f=r.hold,h=r.bits,r.mode===Y&&(r.back=-1);break}for(r.back=0;Ce=r.lencode[f&(1<>>24,pe=Ce>>>16&255,we=65535&Ce,!(ge<=h);){if(0===l)break e;l--,f+=o[i++]<>ve)],ge=Ce>>>24,pe=Ce>>>16&255,we=65535&Ce,!(ve+ge<=h);){if(0===l)break e;l--,f+=o[i++]<>>=ve,h-=ve,r.back+=ve}if(f>>>=ge,h-=ge,r.back+=ge,r.length=we,0===pe){r.mode=le;break}if(32&pe){r.back=-1,r.mode=Y;break}if(64&pe){e.msg="invalid literal/length code",r.mode=fe;break}r.extra=15&pe,r.mode=oe;case oe:if(r.extra){for(Ee=r.extra;h>>=r.extra,h-=r.extra,r.back+=r.extra}r.was=r.length,r.mode=ae;case ae:for(;Ce=r.distcode[f&(1<>>24,pe=Ce>>>16&255,we=65535&Ce,!(ge<=h);){if(0===l)break e;l--,f+=o[i++]<>ve)],ge=Ce>>>24,pe=Ce>>>16&255,we=65535&Ce,!(ve+ge<=h);){if(0===l)break e;l--,f+=o[i++]<>>=ve,h-=ve,r.back+=ve}if(f>>>=ge,h-=ge,r.back+=ge,64&pe){e.msg="invalid distance code",r.mode=fe;break}r.offset=we,r.extra=15&pe,r.mode=ie;case ie:if(r.extra){for(Ee=r.extra;h>>=r.extra,h-=r.extra,r.back+=r.extra}if(r.offset>r.dmax){e.msg="invalid distance too far back",r.mode=fe;break}r.mode=se;case se:if(0===d)break e;if(g=m-d,r.offset>g){if(g=r.offset-g,g>r.whave&&r.sane){e.msg="invalid distance too far back",r.mode=fe;break}g>r.wnext?(g-=r.wnext,p=r.wsize-g):p=r.wnext-g,g>r.length&&(g=r.length),me=r.window}else me=a,p=s-r.offset,g=r.length;g>d&&(g=d),d-=g,r.length-=g;do a[s++]=me[p++];while(--g);0===r.length&&(r.mode=ne);break;case le:if(0===d)break e;a[s++]=r.length,d--,r.mode=ne;break;case de:if(r.wrap){for(;h<32;){if(0===l)break e;l--,f|=o[i++]<>>16&65535|0,i=0;0!==r;){i=r>2e3?2e3:r,r-=i;do o=o+t[n++]|0,a=a+o|0;while(--i);o%=65521,a%=65521}return o|a<<16|0}t.exports=n},"zlib/crc32.js":function(e,t,r){"use strict";function n(){for(var e,t=[],r=0;r<256;r++){e=r;for(var n=0;n<8;n++)e=1&e?3988292384^e>>>1:e>>>1;t[r]=e}return t}function o(e,t,r,n){var o=a,i=n+r;e^=-1;for(var s=n;s>>8^o[255&(e^t[s])];return e^-1}var a=n();t.exports=o},"zlib/inffast.js":function(e,t,r){"use strict";var n=30,o=12;t.exports=function(e,t){var r,a,i,s,l,d,u,c,f,h,b,m,g,p,w,v,y,k,x,_,S,E,C,B,U;r=e.state,a=e.next_in,B=e.input,i=a+(e.avail_in-5),s=e.next_out,U=e.output,l=s-(t-e.avail_out),d=s+(e.avail_out-257),u=r.dmax,c=r.wsize,f=r.whave,h=r.wnext,b=r.window,m=r.hold,g=r.bits,p=r.lencode,w=r.distcode,v=(1<>>24,m>>>=x,g-=x,x=k>>>16&255,0===x)U[s++]=65535&k;else{if(!(16&x)){if(0===(64&x)){k=p[(65535&k)+(m&(1<>>=x,g-=x),g<15&&(m+=B[a++]<>>24,m>>>=x,g-=x,x=k>>>16&255,!(16&x)){if(0===(64&x)){k=w[(65535&k)+(m&(1<u){e.msg="invalid distance too far back",r.mode=n;break e}if(m>>>=x,g-=x,x=s-l,S>x){if(x=S-x,x>f&&r.sane){e.msg="invalid distance too far back",r.mode=n;break e}if(E=0,C=b,0===h){if(E+=c-x,x<_){_-=x;do U[s++]=b[E++];while(--x);E=s-S,C=U}}else if(h2;)U[s++]=C[E++],U[s++]=C[E++],U[s++]=C[E++],_-=3;_&&(U[s++]=C[E++],_>1&&(U[s++]=C[E++]))}else{E=s-S;do U[s++]=U[E++],U[s++]=U[E++],U[s++]=U[E++],_-=3;while(_>2);_&&(U[s++]=U[E++],_>1&&(U[s++]=U[E++]))}break}}break}}while(a>3,a-=_,g-=_<<3,m&=(1<=1&&0===j[O];O--);if(I>O&&(I=O),0===O)return m[g++]=20971520,m[g++]=20971520,w.bits=1,0;for(L=1;L0&&(e===s||1!==O))return-1;for(H[1]=0,T=1;Ta||e===d&&z>i)return 1;for(;;){E=T-P,p[R]S?(C=M[W+p[R]],B=F[Z+p[R]]):(C=96,B=0),v=1<>P)+y]=E<<24|C<<16|B|0;while(0!==y);for(v=1<>=1;if(0!==v?(N&=v-1,N+=v):N=0,R++,0===--j[T]){if(T===O)break;T=t[r+p[R]]}if(T>I&&(N&x)!==k){for(0===P&&(P=I),_+=L,A=T-P,D=1<a||e===d&&z>i)return 1;k=N&x,m[k]=I<<24|A<<16|_-g|0}}return 0!==N&&(m[_+N]=T-P<<24|64<<16|0),w.bits=I,0}}};for(var r in t)t[r].folder=r.substring(0,r.lastIndexOf("/")+1);var n=function(e){var r=[];return e=e.split("/").every(function(e){return".."==e?r.pop():"."==e||""==e||r.push(e)})?r.join("/"):null,e?t[e]||t[e+".js"]||t[e+"/index.js"]:null},o=function(e,t){return e?n(e.folder+"node_modules/"+t)||o(e.parent,t):null},a=function(e,t){var r=t.match(/^\//)?null:e?t.match(/^\.\.?\//)?n(e.folder+t):o(e,t):n(t);if(!r)throw"module not found: "+t;return r.exports||(r.parent=e,r(a.bind(null,r),r,r.exports={})),r.exports};return a(null,e)},decompress:function(e){this.exports||(this.exports=this.require("inflate.js"));try{return this.exports.inflate(e)}catch(e){}},hasUnityMarker:function(e){var t=10,r="UnityWeb Compressed Content (gzip)";if(t>e.length||31!=e[0]||139!=e[1])return!1;var n=e[3];if(4&n){if(t+2>e.length)return!1;if(t+=2+e[t]+(e[t+1]<<8),t>e.length)return!1}if(8&n){for(;te.length)return!1;t++}return 16&n&&String.fromCharCode.apply(null,e.subarray(t,t+r.length+1))==r+"\0"}}};return new Promise(function(e,t){f.SystemInfo.hasWebGL?1==f.SystemInfo.hasWebGL?t('Your browser does not support graphics API "WebGL 2" which is required for this content.'):f.SystemInfo.hasWasm?(1==f.SystemInfo.hasWebGL&&f.print('Warning: Your browser does not support "WebGL 2" Graphics API, switching to "WebGL 1"'),f.startupErrorHandler=t,r(0),f.postRun.push(function(){r(1),delete f.startupErrorHandler,e(p)}),c()):t("Your browser does not support WebAssembly."):t("Your browser does not support WebGL.")})} \ No newline at end of file diff --git a/spaces/derina/MusicSpleeter/app.py b/spaces/derina/MusicSpleeter/app.py deleted file mode 100644 index 0135ea51f8fb8ea2cb90936cac6672938a8d8780..0000000000000000000000000000000000000000 --- a/spaces/derina/MusicSpleeter/app.py +++ /dev/null @@ -1,30 +0,0 @@ -from spleeter.separator import Separator -import gradio as gr -import shutil - -def spleeter(aud, instrument): - separator = Separator('spleeter:2stems') - try: - shutil.rmtree("output") - except FileNotFoundError: - pass - separator.separate_to_file(aud, "output/", filename_format="audio_example/{instrument}.wav") - return f"./output/audio_example/{instrument}.wav", f"./output/audio_example/{instrument}.wav" - -inputs = [ - gr.inputs.Audio(label="Input Audio File", type="filepath"), - gr.inputs.Radio(label="Output", choices=["accompaniment", "vocals"], type="value") -] -outputs = [ - gr.outputs.Audio(label="Output Audio", type="filepath"), - gr.outputs.File(label="Output File") -] - -title = "Music Spleeter" -description = "Clearing a musical composition of the performer's voice is a common task. It is solved well, for example, by professional audio file editing programs. AI algorithms have also been gaining ground recently." -article = "

        In this case we use Deezer's Spleeter with ready pretrained models. It can leave as an output both just the music and just the performer's voice.

        Sources: Spleeter: a Fast and Efficient Music Source Separation Tool with Pre-Trained Models

        StarStat Music: Youtubers Net Worth in category Music

        " -examples = [ - ["audio_example.mp3", "vocals"] -] - -gr.Interface(spleeter, inputs, outputs, title=title, description=description, article=article, examples=examples).launch() \ No newline at end of file diff --git a/spaces/devthedeveloper/Bark-with-Voice-Cloning/util/parseinput.py b/spaces/devthedeveloper/Bark-with-Voice-Cloning/util/parseinput.py deleted file mode 100644 index f2102648cf169f0a52bb66755308fee5f81247e0..0000000000000000000000000000000000000000 --- a/spaces/devthedeveloper/Bark-with-Voice-Cloning/util/parseinput.py +++ /dev/null @@ -1,129 +0,0 @@ -import re -import xml.etree.ElementTree as ET -from xml.sax import saxutils -#import nltk - -# Chunked generation originally from https://github.com/serp-ai/bark-with-voice-clone -def split_and_recombine_text(text, desired_length=100, max_length=150): - # return nltk.sent_tokenize(text) - - # from https://github.com/neonbjb/tortoise-tts - """Split text it into chunks of a desired length trying to keep sentences intact.""" - # normalize text, remove redundant whitespace and convert non-ascii quotes to ascii - text = re.sub(r"\n\n+", "\n", text) - text = re.sub(r"\s+", " ", text) - text = re.sub(r"[“”]", '"', text) - - rv = [] - in_quote = False - current = "" - split_pos = [] - pos = -1 - end_pos = len(text) - 1 - - def seek(delta): - nonlocal pos, in_quote, current - is_neg = delta < 0 - for _ in range(abs(delta)): - if is_neg: - pos -= 1 - current = current[:-1] - else: - pos += 1 - current += text[pos] - if text[pos] == '"': - in_quote = not in_quote - return text[pos] - - def peek(delta): - p = pos + delta - return text[p] if p < end_pos and p >= 0 else "" - - def commit(): - nonlocal rv, current, split_pos - rv.append(current) - current = "" - split_pos = [] - - while pos < end_pos: - c = seek(1) - # do we need to force a split? - if len(current) >= max_length: - if len(split_pos) > 0 and len(current) > (desired_length / 2): - # we have at least one sentence and we are over half the desired length, seek back to the last split - d = pos - split_pos[-1] - seek(-d) - else: - # no full sentences, seek back until we are not in the middle of a word and split there - while c not in "!?.,\n " and pos > 0 and len(current) > desired_length: - c = seek(-1) - commit() - # check for sentence boundaries - elif not in_quote and (c in "!?]\n" or (c == "." and peek(1) in "\n ")): - # seek forward if we have consecutive boundary markers but still within the max length - while ( - pos < len(text) - 1 and len(current) < max_length and peek(1) in "!?.]" - ): - c = seek(1) - split_pos.append(pos) - if len(current) >= desired_length: - commit() - # treat end of quote as a boundary if its followed by a space or newline - elif in_quote and peek(1) == '"' and peek(2) in "\n ": - seek(2) - split_pos.append(pos) - rv.append(current) - - # clean up, remove lines with only whitespace or punctuation - rv = [s.strip() for s in rv] - rv = [s for s in rv if len(s) > 0 and not re.match(r"^[\s\.,;:!?]*$", s)] - - return rv - -def is_ssml(value): - try: - ET.fromstring(value) - except ET.ParseError: - return False - return True - -def build_ssml(rawtext, selected_voice): - texts = rawtext.split("\n") - joinedparts = "" - for textpart in texts: - textpart = textpart.strip() - if len(textpart) < 1: - continue - joinedparts = joinedparts + f"\n{saxutils.escape(textpart)}" - ssml = f""" - - {joinedparts} - - """ - return ssml - -def create_clips_from_ssml(ssmlinput): - # Parse the XML - tree = ET.ElementTree(ET.fromstring(ssmlinput)) - root = tree.getroot() - - # Create an empty list - voice_list = [] - - # Loop through all voice tags - for voice in root.iter('{http://www.w3.org/2001/10/synthesis}voice'): - # Extract the voice name attribute and the content text - voice_name = voice.attrib['name'] - voice_content = voice.text.strip() if voice.text else '' - if(len(voice_content) > 0): - parts = split_and_recombine_text(voice_content) - for p in parts: - if(len(p) > 1): - # add to tuple list - voice_list.append((voice_name, p)) - return voice_list - diff --git a/spaces/diacanFperku/AutoGPT/Acpi Ibm0071 Windows 7 Driver Extra Quality.md b/spaces/diacanFperku/AutoGPT/Acpi Ibm0071 Windows 7 Driver Extra Quality.md deleted file mode 100644 index 85b8d59246b4ed284afebc743d9551a61251f138..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Acpi Ibm0071 Windows 7 Driver Extra Quality.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Acpi Ibm0071 Windows 7 Driver


        Download Filehttps://gohhs.com/2uFVwN



        - -Unknown device ACPI\IBM0071 è comunemente causato da impostazioni di sistema ... Questo errore può essere risolto con un software speciale che ripara il ... 4d29de3e1b
        -
        -
        -

        diff --git a/spaces/diacanFperku/AutoGPT/Igo8.exe Free Download.md b/spaces/diacanFperku/AutoGPT/Igo8.exe Free Download.md deleted file mode 100644 index 4215cf5717b41534e5c5f48e8cb37cc266866378..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Igo8.exe Free Download.md +++ /dev/null @@ -1,6 +0,0 @@ -

        igo8.exe free download


        DOWNLOAD ✺✺✺ https://gohhs.com/2uFUnB



        -
        -Hello,Yes it is working on route 66But I have a problem:I have downloaded the Primo 2 from this post -I copied the content to the sd card with all ... 1fdad05405
        -
        -
        -

        diff --git a/spaces/digitalxingtong/Nanami-Bert-VITS2/text/symbols.py b/spaces/digitalxingtong/Nanami-Bert-VITS2/text/symbols.py deleted file mode 100644 index 9dfae4e633829f20c4fd767b1c7a9198911ed801..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Nanami-Bert-VITS2/text/symbols.py +++ /dev/null @@ -1,51 +0,0 @@ -punctuation = ['!', '?', '…', ",", ".", "'", '-'] -pu_symbols = punctuation + ["SP", "UNK"] -pad = '_' - -# chinese -zh_symbols = ['E', 'En', 'a', 'ai', 'an', 'ang', 'ao', 'b', 'c', 'ch', 'd', 'e', 'ei', 'en', 'eng', 'er', 'f', 'g', 'h', - 'i', 'i0', 'ia', 'ian', 'iang', 'iao', 'ie', 'in', 'ing', 'iong', 'ir', 'iu', 'j', 'k', 'l', 'm', 'n', 'o', - 'ong', - 'ou', 'p', 'q', 'r', 's', 'sh', 't', 'u', 'ua', 'uai', 'uan', 'uang', 'ui', 'un', 'uo', 'v', 'van', 've', 'vn', - 'w', 'x', 'y', 'z', 'zh', - "AA", "EE", "OO"] -num_zh_tones = 6 - -# japanese -ja_symbols = ['I', 'N', 'U', 'a', 'b', 'by', 'ch', 'cl', 'd', 'dy', 'e', 'f', 'g', 'gy', 'h', 'hy', 'i', 'j', 'k', 'ky', - 'm', 'my', 'n', 'ny', 'o', 'p', 'py', 'r', 'ry', 's', 'sh', 't', 'ts', 'u', 'V', 'w', 'y', 'z'] -num_ja_tones = 1 - -# English -en_symbols = ['aa', 'ae', 'ah', 'ao', 'aw', 'ay', 'b', 'ch', 'd', 'dh', 'eh', 'er', 'ey', 'f', 'g', 'hh', 'ih', 'iy', - 'jh', 'k', 'l', 'm', 'n', 'ng', 'ow', 'oy', 'p', 'r', 's', - 'sh', 't', 'th', 'uh', 'uw', 'V', 'w', 'y', 'z', 'zh'] -num_en_tones = 4 - -# combine all symbols -normal_symbols = sorted(set(zh_symbols + ja_symbols + en_symbols)) -symbols = [pad] + normal_symbols + pu_symbols -sil_phonemes_ids = [symbols.index(i) for i in pu_symbols] - -# combine all tones -num_tones = num_zh_tones + num_ja_tones + num_en_tones - -# language maps -language_id_map = { - 'ZH': 0, - "JA": 1, - "EN": 2 -} -num_languages = len(language_id_map.keys()) - -language_tone_start_map = { - 'ZH': 0, - "JA": num_zh_tones, - "EN": num_zh_tones + num_ja_tones -} - -if __name__ == '__main__': - a = set(zh_symbols) - b = set(en_symbols) - print(sorted(a&b)) - diff --git a/spaces/digitalxingtong/Shanbao-Bert-VITS2/mel_processing.py b/spaces/digitalxingtong/Shanbao-Bert-VITS2/mel_processing.py deleted file mode 100644 index 50435ecf88ef4fb6c1d47f3e6edd04c3ea7d3e80..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Shanbao-Bert-VITS2/mel_processing.py +++ /dev/null @@ -1,112 +0,0 @@ -import math -import os -import random -import torch -from torch import nn -import torch.nn.functional as F -import torch.utils.data -import numpy as np -import librosa -import librosa.util as librosa_util -from librosa.util import normalize, pad_center, tiny -from scipy.signal import get_window -from scipy.io.wavfile import read -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + '_' + str(spec.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - - return spec diff --git a/spaces/dpv/Stage1Recycling/README.md b/spaces/dpv/Stage1Recycling/README.md deleted file mode 100644 index f39f76be23764ed2306402aca760421e3db976da..0000000000000000000000000000000000000000 --- a/spaces/dpv/Stage1Recycling/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Stage1Recycling -emoji: 🚀 -colorFrom: purple -colorTo: red -sdk: gradio -sdk_version: 3.0.4 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/dragonSwing/annotate-anything/GroundingDINO/groundingdino/models/GroundingDINO/backbone/__init__.py b/spaces/dragonSwing/annotate-anything/GroundingDINO/groundingdino/models/GroundingDINO/backbone/__init__.py deleted file mode 100644 index 76e4b272b479a26c63d120c818c140870cd8c287..0000000000000000000000000000000000000000 --- a/spaces/dragonSwing/annotate-anything/GroundingDINO/groundingdino/models/GroundingDINO/backbone/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .backbone import build_backbone diff --git a/spaces/dragonSwing/annotate-anything/tag2text/models/vit.py b/spaces/dragonSwing/annotate-anything/tag2text/models/vit.py deleted file mode 100644 index fbe0ed93321d786f93ce43b65f71272a918cb042..0000000000000000000000000000000000000000 --- a/spaces/dragonSwing/annotate-anything/tag2text/models/vit.py +++ /dev/null @@ -1,430 +0,0 @@ -""" - * Copyright (c) 2022, salesforce.com, inc. - * All rights reserved. - * SPDX-License-Identifier: BSD-3-Clause - * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause - * By Junnan Li - * Based on timm code base - * https://github.com/rwightman/pytorch-image-models/tree/master/timm -""" -from functools import partial - -import torch -import torch.nn as nn -import torch.nn.functional as F -from fairscale.nn.checkpoint.checkpoint_activations import checkpoint_wrapper -from timm.models.helpers import adapt_input_conv -from timm.models.helpers import named_apply -from timm.models.layers import DropPath -from timm.models.layers import trunc_normal_ -from timm.models.registry import register_model -from timm.models.vision_transformer import _cfg -from timm.models.vision_transformer import PatchEmbed - - -class Mlp(nn.Module): - """MLP as used in Vision Transformer, MLP-Mixer and related networks.""" - - def __init__( - self, - in_features, - hidden_features=None, - out_features=None, - act_layer=nn.GELU, - drop=0.0, - ): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = act_layer() - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - - -class Attention(nn.Module): - def __init__( - self, - dim, - num_heads=8, - qkv_bias=False, - qk_scale=None, - attn_drop=0.0, - proj_drop=0.0, - ): - super().__init__() - self.num_heads = num_heads - head_dim = dim // num_heads - # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights - self.scale = qk_scale or head_dim**-0.5 - self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) - self.attn_drop = nn.Dropout(attn_drop) - self.proj = nn.Linear(dim, dim) - self.proj_drop = nn.Dropout(proj_drop) - self.attn_gradients = None - self.attention_map = None - - def save_attn_gradients(self, attn_gradients): - self.attn_gradients = attn_gradients - - def get_attn_gradients(self): - return self.attn_gradients - - def save_attention_map(self, attention_map): - self.attention_map = attention_map - - def get_attention_map(self): - return self.attention_map - - def forward(self, x, register_hook=False): - B, N, C = x.shape - qkv = ( - self.qkv(x) - .reshape(B, N, 3, self.num_heads, C // self.num_heads) - .permute(2, 0, 3, 1, 4) - ) - q, k, v = ( - qkv[0], - qkv[1], - qkv[2], - ) # make torchscript happy (cannot use tensor as tuple) - - attn = (q @ k.transpose(-2, -1)) * self.scale - attn = attn.softmax(dim=-1) - attn = self.attn_drop(attn) - - if register_hook: - self.save_attention_map(attn) - attn.register_hook(self.save_attn_gradients) - - x = (attn @ v).transpose(1, 2).reshape(B, N, C) - x = self.proj(x) - x = self.proj_drop(x) - return x - - -class Block(nn.Module): - def __init__( - self, - dim, - num_heads, - mlp_ratio=4.0, - qkv_bias=False, - qk_scale=None, - drop=0.0, - attn_drop=0.0, - drop_path=0.0, - act_layer=nn.GELU, - norm_layer=nn.LayerNorm, - use_grad_checkpointing=False, - ): - super().__init__() - self.norm1 = norm_layer(dim) - self.attn = Attention( - dim, - num_heads=num_heads, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - attn_drop=attn_drop, - proj_drop=drop, - ) - # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here - self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp( - in_features=dim, - hidden_features=mlp_hidden_dim, - act_layer=act_layer, - drop=drop, - ) - - if use_grad_checkpointing: - self.attn = checkpoint_wrapper(self.attn) - self.mlp = checkpoint_wrapper(self.mlp) - - def forward(self, x, register_hook=False): - x = x + self.drop_path(self.attn(self.norm1(x), register_hook=register_hook)) - x = x + self.drop_path(self.mlp(self.norm2(x))) - return x - - -class VisionTransformer(nn.Module): - """Vision Transformer - A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - - https://arxiv.org/abs/2010.11929 - """ - - def __init__( - self, - img_size=224, - patch_size=16, - in_chans=3, - num_classes=1000, - embed_dim=768, - depth=12, - num_heads=12, - mlp_ratio=4.0, - qkv_bias=True, - qk_scale=None, - representation_size=None, - drop_rate=0.0, - attn_drop_rate=0.0, - drop_path_rate=0.0, - norm_layer=None, - use_grad_checkpointing=False, - ckpt_layer=0, - ): - """ - Args: - img_size (int, tuple): input image size - patch_size (int, tuple): patch size - in_chans (int): number of input channels - num_classes (int): number of classes for classification head - embed_dim (int): embedding dimension - depth (int): depth of transformer - num_heads (int): number of attention heads - mlp_ratio (int): ratio of mlp hidden dim to embedding dim - qkv_bias (bool): enable bias for qkv if True - qk_scale (float): override default qk scale of head_dim ** -0.5 if set - representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set - drop_rate (float): dropout rate - attn_drop_rate (float): attention dropout rate - drop_path_rate (float): stochastic depth rate - norm_layer: (nn.Module): normalization layer - """ - super().__init__() - self.num_features = ( - self.embed_dim - ) = embed_dim # num_features for consistency with other models - norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) - - self.patch_embed = PatchEmbed( - img_size=img_size, - patch_size=patch_size, - in_chans=in_chans, - embed_dim=embed_dim, - ) - - num_patches = self.patch_embed.num_patches - - self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) - self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) - self.pos_drop = nn.Dropout(p=drop_rate) - - dpr = [ - x.item() for x in torch.linspace(0, drop_path_rate, depth) - ] # stochastic depth decay rule - self.blocks = nn.ModuleList( - [ - Block( - dim=embed_dim, - num_heads=num_heads, - mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - drop=drop_rate, - attn_drop=attn_drop_rate, - drop_path=dpr[i], - norm_layer=norm_layer, - use_grad_checkpointing=( - use_grad_checkpointing and i >= depth - ckpt_layer - ), - ) - for i in range(depth) - ] - ) - self.norm = norm_layer(embed_dim) - - trunc_normal_(self.pos_embed, std=0.02) - trunc_normal_(self.cls_token, std=0.02) - self.apply(self._init_weights) - - def _init_weights(self, m): - if isinstance(m, nn.Linear): - trunc_normal_(m.weight, std=0.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.LayerNorm): - nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) - - @torch.jit.ignore - def no_weight_decay(self): - return {"pos_embed", "cls_token"} - - def forward(self, x, register_blk=-1): - B = x.shape[0] - x = self.patch_embed(x) - - cls_tokens = self.cls_token.expand( - B, -1, -1 - ) # stole cls_tokens impl from Phil Wang, thanks - x = torch.cat((cls_tokens, x), dim=1) - - x = x + self.pos_embed[:, : x.size(1), :] - x = self.pos_drop(x) - - for i, blk in enumerate(self.blocks): - x = blk(x, register_blk == i) - x = self.norm(x) - - return x - - @torch.jit.ignore() - def load_pretrained(self, checkpoint_path, prefix=""): - _load_weights(self, checkpoint_path, prefix) - - -@torch.no_grad() -def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ""): - """Load weights from .npz checkpoints for official Google Brain Flax implementation.""" - import numpy as np - - def _n2p(w, t=True): - if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1: - w = w.flatten() - if t: - if w.ndim == 4: - w = w.transpose([3, 2, 0, 1]) - elif w.ndim == 3: - w = w.transpose([2, 0, 1]) - elif w.ndim == 2: - w = w.transpose([1, 0]) - return torch.from_numpy(w) - - w = np.load(checkpoint_path) - if not prefix and "opt/target/embedding/kernel" in w: - prefix = "opt/target/" - - if hasattr(model.patch_embed, "backbone"): - # hybrid - backbone = model.patch_embed.backbone - stem_only = not hasattr(backbone, "stem") - stem = backbone if stem_only else backbone.stem - stem.conv.weight.copy_( - adapt_input_conv( - stem.conv.weight.shape[1], _n2p(w[f"{prefix}conv_root/kernel"]) - ) - ) - stem.norm.weight.copy_(_n2p(w[f"{prefix}gn_root/scale"])) - stem.norm.bias.copy_(_n2p(w[f"{prefix}gn_root/bias"])) - if not stem_only: - for i, stage in enumerate(backbone.stages): - for j, block in enumerate(stage.blocks): - bp = f"{prefix}block{i + 1}/unit{j + 1}/" - for r in range(3): - getattr(block, f"conv{r + 1}").weight.copy_( - _n2p(w[f"{bp}conv{r + 1}/kernel"]) - ) - getattr(block, f"norm{r + 1}").weight.copy_( - _n2p(w[f"{bp}gn{r + 1}/scale"]) - ) - getattr(block, f"norm{r + 1}").bias.copy_( - _n2p(w[f"{bp}gn{r + 1}/bias"]) - ) - if block.downsample is not None: - block.downsample.conv.weight.copy_( - _n2p(w[f"{bp}conv_proj/kernel"]) - ) - block.downsample.norm.weight.copy_( - _n2p(w[f"{bp}gn_proj/scale"]) - ) - block.downsample.norm.bias.copy_(_n2p(w[f"{bp}gn_proj/bias"])) - embed_conv_w = _n2p(w[f"{prefix}embedding/kernel"]) - else: - embed_conv_w = adapt_input_conv( - model.patch_embed.proj.weight.shape[1], _n2p(w[f"{prefix}embedding/kernel"]) - ) - model.patch_embed.proj.weight.copy_(embed_conv_w) - model.patch_embed.proj.bias.copy_(_n2p(w[f"{prefix}embedding/bias"])) - model.cls_token.copy_(_n2p(w[f"{prefix}cls"], t=False)) - pos_embed_w = _n2p(w[f"{prefix}Transformer/posembed_input/pos_embedding"], t=False) - if pos_embed_w.shape != model.pos_embed.shape: - pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights - pos_embed_w, - model.pos_embed, - getattr(model, "num_tokens", 1), - model.patch_embed.grid_size, - ) - model.pos_embed.copy_(pos_embed_w) - model.norm.weight.copy_(_n2p(w[f"{prefix}Transformer/encoder_norm/scale"])) - model.norm.bias.copy_(_n2p(w[f"{prefix}Transformer/encoder_norm/bias"])) - # if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]: - # model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel'])) - # model.head.bias.copy_(_n2p(w[f'{prefix}head/bias'])) - # if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w: - # model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel'])) - # model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias'])) - for i, block in enumerate(model.blocks.children()): - block_prefix = f"{prefix}Transformer/encoderblock_{i}/" - mha_prefix = block_prefix + "MultiHeadDotProductAttention_1/" - block.norm1.weight.copy_(_n2p(w[f"{block_prefix}LayerNorm_0/scale"])) - block.norm1.bias.copy_(_n2p(w[f"{block_prefix}LayerNorm_0/bias"])) - block.attn.qkv.weight.copy_( - torch.cat( - [ - _n2p(w[f"{mha_prefix}{n}/kernel"], t=False).flatten(1).T - for n in ("query", "key", "value") - ] - ) - ) - block.attn.qkv.bias.copy_( - torch.cat( - [ - _n2p(w[f"{mha_prefix}{n}/bias"], t=False).reshape(-1) - for n in ("query", "key", "value") - ] - ) - ) - block.attn.proj.weight.copy_(_n2p(w[f"{mha_prefix}out/kernel"]).flatten(1)) - block.attn.proj.bias.copy_(_n2p(w[f"{mha_prefix}out/bias"])) - for r in range(2): - getattr(block.mlp, f"fc{r + 1}").weight.copy_( - _n2p(w[f"{block_prefix}MlpBlock_3/Dense_{r}/kernel"]) - ) - getattr(block.mlp, f"fc{r + 1}").bias.copy_( - _n2p(w[f"{block_prefix}MlpBlock_3/Dense_{r}/bias"]) - ) - block.norm2.weight.copy_(_n2p(w[f"{block_prefix}LayerNorm_2/scale"])) - block.norm2.bias.copy_(_n2p(w[f"{block_prefix}LayerNorm_2/bias"])) - - -def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder): - # interpolate position embedding - embedding_size = pos_embed_checkpoint.shape[-1] - num_patches = visual_encoder.patch_embed.num_patches - num_extra_tokens = visual_encoder.pos_embed.shape[-2] - num_patches - # height (== width) for the checkpoint position embedding - orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) - # height (== width) for the new position embedding - new_size = int(num_patches**0.5) - - if orig_size != new_size: - # class_token and dist_token are kept unchanged - extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] - # only the position tokens are interpolated - pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] - pos_tokens = pos_tokens.reshape( - -1, orig_size, orig_size, embedding_size - ).permute(0, 3, 1, 2) - pos_tokens = torch.nn.functional.interpolate( - pos_tokens, size=(new_size, new_size), mode="bicubic", align_corners=False - ) - pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) - new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) - print( - "reshape position embedding from %d to %d" % (orig_size**2, new_size**2) - ) - - return new_pos_embed - else: - return pos_embed_checkpoint diff --git a/spaces/ds520/bingo/src/pages/api/create.ts b/spaces/ds520/bingo/src/pages/api/create.ts deleted file mode 100644 index 430bb2d53431e6a2c7608234f512f2d9f577daee..0000000000000000000000000000000000000000 --- a/spaces/ds520/bingo/src/pages/api/create.ts +++ /dev/null @@ -1,31 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' -import { fetch, debug } from '@/lib/isomorphic' -import { createHeaders } from '@/lib/utils' - -const API_ENDPOINT = 'https://www.bing.com/turing/conversation/create' -// const API_ENDPOINT = 'https://edgeservices.bing.com/edgesvc/turing/conversation/create'; - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - try { - const headers = createHeaders(req.cookies) - - res.writeHead(200, { - 'Content-Type': 'application/json', - }) - - debug('headers', headers) - const response = await fetch(API_ENDPOINT, { method: 'GET', headers }) - .then((res) => res.text()) - - res.end(response) - } catch (e) { - return res.end(JSON.stringify({ - result: { - value: 'UnauthorizedRequest', - message: `${e}` - } - })) - } -} diff --git a/spaces/dyhzq/vits-uma-genshin-honkai/Docker/Dockerfile b/spaces/dyhzq/vits-uma-genshin-honkai/Docker/Dockerfile deleted file mode 100644 index 4d39cdf02a2ec151686cc1d61234bf723068fed8..0000000000000000000000000000000000000000 --- a/spaces/dyhzq/vits-uma-genshin-honkai/Docker/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -FROM python:3.9-bullseye -VOLUME ["/app"] -WORKDIR /app -# Set apt to Chinese mirror -RUN sed -i 's/deb.debian.org/mirrors.ustc.edu.cn/g' /etc/apt/sources.list -RUN apt-get update && apt-get -y install cmake git -RUN git clone https://huggingface.co/spaces/ikechan8370/vits-uma-genshin-honkai -WORKDIR /app/vits-uma-genshin-honkai -RUN sed -i "s/\.launch()/\.launch(server_name=\"0.0.0.0\")/" /app/vits-uma-genshin-honkai/app.py -ADD vits.sh /app/vits.sh -EXPOSE 7860 -ENTRYPOINT [ "/app/vits.sh" ] \ No newline at end of file diff --git a/spaces/eaglelandsonce/autogenmultichat/README.md b/spaces/eaglelandsonce/autogenmultichat/README.md deleted file mode 100644 index a5378a18b8492ade65b269615f9c24f66930e881..0000000000000000000000000000000000000000 --- a/spaces/eaglelandsonce/autogenmultichat/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: AutoGen Multi-Agent Python Programmer -emoji: ⚡ -colorFrom: pink -colorTo: blue -sdk: streamlit -sdk_version: 1.27.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/egumasa/engagement-analyzer-demo/README.md b/spaces/egumasa/engagement-analyzer-demo/README.md deleted file mode 100644 index 8bec993361867d9c85aeba17cb56df7113f39aac..0000000000000000000000000000000000000000 --- a/spaces/egumasa/engagement-analyzer-demo/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Engagement analyzer (demo) -emoji: 👀 -colorFrom: gray -colorTo: red -sdk: streamlit -sdk_version: 1.25.0 -app_file: demo.py -pinned: false ---- - -# engagement-analyzer-demo - -This is a demo of automatic analysis tool for Engagement (Martin & White, 2005). - diff --git a/spaces/ehristoforu/T3/README.md b/spaces/ehristoforu/T3/README.md deleted file mode 100644 index eff2870c3f6d49f61b2f6862626c12ad18a9910c..0000000000000000000000000000000000000000 --- a/spaces/ehristoforu/T3/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: T3 -emoji: 🌍 -colorFrom: purple -colorTo: purple -sdk: gradio -sdk_version: 3.39.0 -app_file: translator.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/emc348/faces-through-time/models/StyleCLIP/global_directions/dnnlib/tflib/ops/__init__.py b/spaces/emc348/faces-through-time/models/StyleCLIP/global_directions/dnnlib/tflib/ops/__init__.py deleted file mode 100644 index 43cce37364064146fd30e18612b1d9e3a84f513a..0000000000000000000000000000000000000000 --- a/spaces/emc348/faces-through-time/models/StyleCLIP/global_directions/dnnlib/tflib/ops/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -# empty diff --git a/spaces/eson/tokenizer-arena/README.md b/spaces/eson/tokenizer-arena/README.md deleted file mode 100644 index 8225e5967ebc8e049dedaaf8f8e606c31fdd6b31..0000000000000000000000000000000000000000 --- a/spaces/eson/tokenizer-arena/README.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: Tokenizer Arena -emoji: ⚡ -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.41.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference - - -## ss - - -## ss - diff --git a/spaces/failfast/nextjs-hf-spaces/src/components/huggingface/inference/summarization.tsx b/spaces/failfast/nextjs-hf-spaces/src/components/huggingface/inference/summarization.tsx deleted file mode 100644 index 8183106e019c6a7eaf77cfa12ebe362d920c004c..0000000000000000000000000000000000000000 --- a/spaces/failfast/nextjs-hf-spaces/src/components/huggingface/inference/summarization.tsx +++ /dev/null @@ -1,190 +0,0 @@ -import { - Alert, - Box, - Button, - CircularProgress, - Paper, - Slider, - Stack, - TextField, - Typography, -} from "@mui/material"; -import { useEffect, useRef, useState } from "react"; -import { HfInference, SummarizationArgs } from "@huggingface/inference"; -import { InferenceProps } from "../huggingface"; -import Options from "@/components/base/options"; -import SliderWithLabel from "@/components/base/slider-with-label"; -import ExampleButton from "@/components/base/example-button"; -import Secret from "@/components/base/secret"; - -type SummarizationProps = InferenceProps & { - /** - * (Default: None). Integer to define the maximum length in tokens of the output summary. - */ - maxLength?: number; - /** - * (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. Network can cause some overhead so it will be a soft limit. - */ - maxTime?: number; - /** - * (Default: None). Integer to define the minimum length in tokens of the output summary. - */ - minLength?: number; - /** - * (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized to not be picked in successive generation passes. - */ - repetitionPenalty?: number; - /** - * (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, 0 means always take the highest score, 100.0 is getting closer to uniform probability. - */ - temperature?: number; - /** - * (Default: None). Integer to define the top tokens considered within the sample operation to create new text. - */ - topK?: number; - /** - * (Default: None). Float to define the tokens that are within the sample operation of text generation. Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p. - */ - topP?: number; -}; - -export default function Summarization(props: SummarizationProps) { - const { - model, - maxLength, - maxTime, - minLength, - repetitionPenalty, - temperature, - topK, - topP, - } = props; - - const [token, setToken] = useState(""); - const [inputText, setInputText] = useState(""); - const [summary, setSummary] = useState(""); - const [error, setError] = useState(""); - const [loading, setLoading] = useState(false); - - const inference = useRef(null); - - useEffect(() => { - inference.current = new HfInference(token); - }, [token]); - - // Parse the data of the form and trigger "call" - const handleSubmit = (event: any) => { - event.preventDefault(); - const data = new FormData(event.currentTarget); - - setToken(data.get("token") as string); - - const text = data.get("text") as string; - const max_length = Number(data.get("maxLength") as string); - - call({ model, inputs: text, parameters: { max_length } }); - }; - - /** - * Call the inference API using args - */ - const call = async (args: SummarizationArgs) => { - const { inputs, parameters } = args; - - try { - setLoading(true); - - const response = await inference.current?.summarization({ - model, - inputs, - parameters, - }); - - setSummary(response?.summary_text as string); - setError(""); - } catch (error) { - if (error instanceof Error) { - setError(error.message); - } else { - setError("An unknown error occurred"); - } - } - - setLoading(false); - }; - - return ( - <> - - - setInputText(e.target.value)} - /> - - - - {error && {error}} - - - - - - - - - - - Examples - - - - - - - - - - ); -} diff --git a/spaces/falterWliame/Face_Mask_Detection/Iperius Backup Full 7.0.0 With _TOP_ Keygen.md b/spaces/falterWliame/Face_Mask_Detection/Iperius Backup Full 7.0.0 With _TOP_ Keygen.md deleted file mode 100644 index 162e9c95e54928b0489a42a58d555138b06792ae..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Iperius Backup Full 7.0.0 With _TOP_ Keygen.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Iperius Backup Full 7.0.0 with Keygen


        DOWNLOAD 🆓 https://urlca.com/2uDc07



        -
        - 1fdad05405
        -
        -
        -

        diff --git a/spaces/fatiXbelha/sd/Android Users Download COC Unlimited Gems for Clash of Clans APK and Unlock Everything.md b/spaces/fatiXbelha/sd/Android Users Download COC Unlimited Gems for Clash of Clans APK and Unlock Everything.md deleted file mode 100644 index 4af14db59be67bd406bcbfdbf4cc5529278d1652..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Android Users Download COC Unlimited Gems for Clash of Clans APK and Unlock Everything.md +++ /dev/null @@ -1,87 +0,0 @@ - -

        Clash of Clans Unlimited Gems APK Download Android Uptodown

        -

        Are you a fan of Clash of Clans, the popular strategy game where you build your own village, train your troops, and fight against other players? If so, you might be interested in downloading an APK that can give you unlimited money and gems for free. In this article, we will tell you everything you need to know about COC Unlimited Gems for Clash of Clans, an APK that you can download from Uptodown, a website that offers safe and verified downloads for Android devices. We will also show you how to install and use this APK, as well as its advantages and disadvantages, and some alternatives that you can try.

        -

        What is Clash of Clans?

        -

        Clash of Clans is a freemium mobile game developed by Supercell, a Finnish company that also created other popular games like Hay Day, Boom Beach, and Brawl Stars. Clash of Clans was released in 2012 for iOS and in 2013 for Android, and since then it has become one of the most downloaded and played games in the world, with over 500 million downloads on Google Play alone.

        -

        clash of clans unlimited gems apk download android uptodown


        Download Zip ★★★ https://urllie.com/2uNIiG



        -

        In Clash of Clans, you can create your own village from scratch, using various buildings, defenses, traps, and decorations. You can also join or create a clan with other players, and participate in clan wars, clan games, and clan leagues. The main currency in the game is gold, which you can use to upgrade your buildings and defenses. The secondary currency is elixir, which you can use to train your troops and spells. The premium currency is gems, which you can use to speed up your progress, buy special items, or get more resources.

        -

        What is COC Unlimited Gems for Clash of Clans?

        -

        COC Unlimited Gems for Clash of Clans is an APK file that you can download from Uptodown, a website that offers free downloads for Android apps and games. An APK file is an Android application package file that contains all the files needed to install an app on your device. COC Unlimited Gems for Clash of Clans is not an official app from Supercell, but a modified version that allows you to add unlimited money and gems to your Clash of Clans account.

        -

        By using this APK, you can enjoy all the features of Clash of Clans without spending any real money or waiting for long hours. You can upgrade your buildings and defenses instantly, train your troops and spells without any cost or time limit, and buy any item or resource that you want. You can also access some exclusive features that are not available in the original game, such as custom mods, private servers, unlimited troops, and more.

        -

        How to download and install COC Unlimited Gems for Clash of Clans?

        -

        To download and install COC Unlimited Gems for Clash of Clans on your Android device, follow these simple steps:

        -
          -
        1. Go to Uptodown on your browser and search for COC Unlimited Gems for Clash of Clans.
        2. -
        3. Click on the green Download button and wait for the APK file to be downloaded on your device.
        4. -
        5. Once the download is complete, go to your device settings and enable the option to install apps from unknown sources. This will allow you to install the APK file that you downloaded from Uptodown.
        6. -
        7. Go to your file manager and locate the APK file that you downloaded. Tap on it and follow the instructions to install it on your device.
        8. -
        9. Once the installation is complete, open the app and enjoy unlimited money and gems in Clash of Clans.
        10. -
        -

        Note: You may need to uninstall the original Clash of Clans app before installing the APK file, or use a different device or account to avoid any conflicts or bans.

        -

        How to use COC Unlimited Gems for Clash of Clans?

        -

        To use COC Unlimited Gems for Clash of Clans on your device, follow these simple steps:

        -
          -
        1. Open the app and tap on the Start button. You will be redirected to a private server where you can play Clash of Clans with unlimited money and gems.
        2. -
        3. Choose your username and create your village as you normally would. You will notice that you have a lot of gold, elixir, and gems in your account.
        4. -
        5. Use the money and gems to upgrade your buildings and defenses, train your troops and spells, and buy any item or resource that you want.
        6. -
        7. You can also access some custom mods by tapping on the menu button on the top right corner. You can change the game settings, add unlimited troops, enable cheats, and more.
        8. -
        9. You can also join or create a clan with other players who are using the same APK. You can chat with them, donate troops, and participate in clan wars.
        10. -
        -

        Note: You may not be able to play with players who are using the official Clash of Clans app, or access some features that are exclusive to the original game, such as clan games, clan leagues, seasonal events, etc.

        -

        What are the advantages and disadvantages of using COC Unlimited Gems for Clash of Clans?

        -

        Using COC Unlimited Gems for Clash of Clans has its pros and cons. Here is a table that summarizes them:

        -

        clash of clans mod apk unlimited gems and coins download for android
        -coc unlimited gems apk free download latest version android
        -clash of clans hack apk download android 1 with unlimited gems
        -download clash of clans mod apk unlimited everything android 2020
        -coc mod apk unlimited gems and resources download for android
        -clash of clans apk hack download no root android unlimited gems
        -coc hack apk download 2021 android unlimited gems and gold
        -clash of clans mod apk unlimited gems offline download for android
        -coc unlimited gems apk download android 10 latest update
        -clash of clans hack apk download android 11 with unlimited gems and elixir
        -coc mod apk unlimited gems and troops download for android
        -clash of clans apk download with unlimited gems and money for android
        -coc hack apk download 2022 android unlimited gems and dark elixir
        -clash of clans mod apk unlimited gems online download for android
        -coc unlimited gems apk download android 9 pie latest version
        -clash of clans hack apk download android 8 oreo with unlimited gems and coins
        -coc mod apk unlimited gems and builder base download for android
        -clash of clans apk download with unlimited gems and heroes for android
        -coc hack apk download 2023 android unlimited gems and super troops
        -clash of clans mod apk unlimited gems generator download for android
        -coc unlimited gems apk download android 7 nougat latest update
        -clash of clans hack apk download android 6 marshmallow with unlimited gems and resources
        -coc mod apk unlimited gems and clan wars download for android
        -clash of clans apk download with unlimited gems and spells for android
        -coc hack apk download 2024 android unlimited gems and magic items
        -clash of clans mod apk unlimited gems no survey download for android
        -coc unlimited gems apk download android 5 lollipop latest version
        -clash of clans hack apk download android 4 kitkat with unlimited gems and money
        -coc mod apk unlimited gems and town hall 14 download for android
        -clash of clans apk download with unlimited gems and skins for android
        -coc hack apk download 2025 android unlimited gems and pets
        -clash of clans mod apk unlimited gems no ban download for android
        -coc unlimited gems apk download android 12 beta latest update
        -clash of clans hack apk download android emulator with unlimited gems and coins
        -coc mod apk unlimited gems and season pass download for android

        - | Pros | Cons | | --- | --- | | You can get unlimited money and gems for free | You may get banned from the official game or lose your progress | | You can upgrade your buildings and defenses instantly | You may not be able to play with players who are using the original game | | You can train your troops and spells without any cost or time limit | You may miss out on some features that are exclusive to the original game | | You can buy any item or resource that you want | You may encounter some bugs or errors while using the APK | | You can access some custom mods and cheats | You may compromise your device security or privacy by installing an APK from an unknown source |

        What are some alternatives to COC Unlimited Gems for Clash of Clans?

        -

        If you are looking for some alternatives to COC Unlimited Gems for Clash of Clans, here are some other APKs or hacks that offer similar features:

        -
          -
        • Clash of Lights: This is another modified version of Clash of Clans that gives you unlimited money and gems, as well as custom mods, private servers, unlimited troops, etc.
        • -
        • Clash of Magic: This is a hack tool that allows you to generate free money and gems for your Clash of Clans account. You just need to enter your username and choose the amount of resources that you want.
        • -
        • Clash Royale Mod APK: This is a modified version of Clash Royale, another popular game from Supercell. It gives you unlimited money and gems, as well as custom cards, private servers, unlimited chests, etc.
        • -
        -

        Conclusion

        -

        COC Unlimited Gems for Clash of Clans is an APK that you can download from Uptodown to get unlimited money and gems in Clash of Clans. It also gives you access to some custom mods and cheats that can enhance your gaming experience. However, it also has some drawbacks, such as possible bans, compatibility issues, missing features, bugs, errors, and security risks. Therefore, you should use it at your own risk and discretion. If you want to try it out, you can follow our guide on how to download, install, and use it on your Android device. Alternatively, you can also check out some other APKs or hacks that offer similar features.

        -

        FAQs

        -

        Here are some frequently asked questions and answers related to COC Unlimited Gems for Clash of Clans:

        -
          -
        1. Is COC Unlimited Gems for Clash of Clans safe?
          There is no guarantee that COC Unlimited Gems for Clash of Clans is safe, as it is not an official app from Supercell, but a modified version that may contain malware or viruses. You should always be careful when downloading and installing APKs from unknown sources, as they may compromise your device security or privacy. You should also scan the APK file with an antivirus software before installing it, and backup your data in case something goes wrong.
        2. -
        3. Will I get banned from Clash of Clans if I use COC Unlimited Gems for Clash of Clans?
          There is a high chance that you will get banned from Clash of Clans if you use COC Unlimited Gems for Clash of Clans, as it violates the terms of service and fair play policy of the game. Supercell has a strict anti-cheat system that can detect and ban players who use hacks, mods, or bots to gain an unfair advantage over other players. If you get banned, you may lose your progress, your account, and your access to the game. Therefore, you should use COC Unlimited Gems for Clash of Clans at your own risk and discretion.
        4. -
        5. Can I use COC Unlimited Gems for Clash of Clans on iOS devices?
          No, you cannot use COC Unlimited Gems for Clash of Clans on iOS devices, as it is only compatible with Android devices. iOS devices have a different operating system and file format than Android devices, and they do not support APK files. If you want to use COC Unlimited Gems for Clash of Clans on iOS devices, you will need to jailbreak your device, which is not recommended as it may void your warranty, damage your device, or expose it to security risks.
        6. -
        7. Can I update COC Unlimited Gems for Clash of Clans?
          No, you cannot update COC Unlimited Gems for Clash of Clans, as it is not connected to the official game servers or Google Play Store. If you try to update it, you may lose the unlimited money and gems feature, or encounter some errors or bugs. You will need to download and install the latest version of the APK file from Uptodown whenever there is a new update available.
        8. -
        9. Can I play with my friends who are using the original Clash of Clans app?
          No, you cannot play with your friends who are using the original Clash of Clans app, as they are on different servers and versions than you. You can only play with other players who are using the same APK or hack as you. If you want to play with your friends who are using the original Clash of Clans app, you will need to uninstall the APK file and reinstall the official app from Google Play Store.
        10. -

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Chess 3D A Next-Generation Chess Game with Unreal Engine 5 - Download Now.md b/spaces/fatiXbelha/sd/Chess 3D A Next-Generation Chess Game with Unreal Engine 5 - Download Now.md deleted file mode 100644 index 5cbf843c8bb14af07bd1a6723aac9f01b6baa73e..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Chess 3D A Next-Generation Chess Game with Unreal Engine 5 - Download Now.md +++ /dev/null @@ -1,119 +0,0 @@ -
        -

        Download Chess 3D: How to Play Chess in Three Dimensions

        -

        Chess is one of the most popular and challenging board games in the world. It has been played for centuries by people of all ages and backgrounds. But have you ever wondered what it would be like to play chess in three dimensions? If you are looking for a new and exciting way to enjoy this classic game, then you should try chess 3D. In this article, we will explain what chess 3D is, how to download it, and how to play it.

        -

        What is Chess 3D?

        -

        Chess 3D is a variant of chess that uses a three-dimensional board instead of a flat one. The board can rotate, shift, and feel like a real chessboard. You can play chess 3D online or offline, with a friend or against the computer. You can also choose from different levels of difficulty, from beginner to expert.

        -

        download chess 3d


        DOWNLOAD ✵✵✵ https://urllie.com/2uNAnU



        -

        The difference between 3D chess and regular chess

        -

        The main difference between 3D chess and regular chess is the shape and size of the board. In regular chess, the board is a square with 64 squares (8x8). In 3D chess, the board is a cube with 512 cubes (8x8x8). This means that there are more spaces and directions to move your pieces. For example, you can move your rook up or down, as well as left or right.

        -

        The benefits of playing 3D chess

        -

        Playing 3D chess can have many benefits for your brain and your skills. Some of them are:

        -
          -
        • It improves your spatial awareness and visualization abilities.
        • -
        • It enhances your logic and strategic thinking.
        • -
        • It challenges your creativity and imagination.
        • -
        • It increases your concentration and memory.
        • -
        • It makes you more adaptable and flexible.
        • -
        -

        How to Download Chess 3D?

        -

        If you want to play chess 3D, you have two options: you can play it online or download it on your device. Here are some of the best websites and apps to download chess 3D:

        -

        The best websites to play 3D chess online

        -

        If you want to play chess 3D online, you can visit these websites:

        -

        Chess.com

        -

        Chess.com is one of the most popular and trusted websites for playing chess online. It has millions of users from all over the world. You can play 3D chess on Chess.com by selecting the "Three Dimensional Board" option in the settings. You can also customize the appearance and sound of the board. You can play against other players or against the computer, with different time controls and ratings.

        -

        Y8.com

        -

        Y8.com is a website that offers a variety of free online games, including 3D chess. You can play 3D chess on Y8.com by clicking on the "Play" button on the game page. You can choose one of seven difficulties, from easy to impossible. You can also switch between different views of the board, from top to side. You can play with a friend on the same device or against the computer.

        -

        Chess.com/vi

        -

        <

        Chess.com/vi is a website that offers 3D chess in Vietnamese. You can play 3D chess on Chess.com/vi by selecting the "Bàn cờ ba chiều" option in the settings. You can also change the color and style of the board. You can play with other players or against the computer, with different modes and levels.

        -

        download chess 3d for pc
        -download chess 3d for android
        -download chess 3d free
        -download chess 3d game
        -download chess 3d apk
        -download chess 3d offline
        -download chess 3d mod apk
        -download chess 3d for windows 10
        -download chess 3d for mac
        -download chess 3d pro
        -download chess 3d full version
        -download chess 3d online
        -download chess 3d app
        -download chess 3d for ios
        -download chess 3d for laptop
        -download chess 3d software
        -download chess 3d hd
        -download chess 3d for pc windows 7
        -download chess 3d deluxe
        -download chess 3d java
        -download chess 3d for windows xp
        -download chess 3d for windows 8
        -download chess 3d exe
        -download chess 3d for mobile
        -download chess 3d setup
        -download chess 3d crack
        -download chess 3d with friends
        -download chess 3d animation
        -download chess 3d modded
        -download chess 3d premium
        -download chess 3d latest version
        -download chess 3d multiplayer
        -download chess 3d for pc full crack
        -download chess 3d for chromebook
        -download chess 3d simulator
        -download chess 3d tutorial
        -download chess 3d unlimited coins
        -download chess 3d graphics
        -download chess 3d play store
        -download chess 3d best game ever

        -

        The best apps to download 3D chess on your device

        -

        If you want to download chess 3D on your device, you can try these apps:

        -

        3D Chess Game for Android

        -

        3D Chess Game is a free app that lets you play 3D chess on your Android device. You can play with a friend on the same device or against the computer, with 25 levels of difficulty. You can also watch and learn from other games online. You can rotate and zoom the board, and undo and redo moves. You can also change the theme and sound of the game.

        -

        Real Chess 3D for iOS

        -

        Real Chess 3D is a free app that lets you play 3D chess on your iOS device. You can play with a friend on the same device or against the computer, with four levels of difficulty. You can also play online with other players or join tournaments. You can tilt and swipe the board, and use hints and analysis. You can also customize the board and pieces.

        -

        Chess 3D for Windows

        -

        Chess 3D is a free app that lets you play 3D chess on your Windows device. You can play with a friend on the same device or against the computer, with three levels of difficulty. You can also play online with other players or chat with them. You can rotate and move the board, and save and load games. You can also choose from different boards and pieces.

        -

        How to Play Chess 3D?

        -

        If you want to play chess 3D, you need to know the basic rules and moves of 3D chess. Here are some of them:

        -

        The basic rules and moves of 3D chess

        -

        The basic rules and moves of 3D chess are similar to those of regular chess, with some differences:

        -
          -
        • The game is played on a cube-shaped board with eight layers, each with eight squares.
        • -
        • Each player has 16 pieces: one king, one queen, two rooks, two bishops, two knights, and eight pawns.
        • -
        • The pieces are placed on the bottom layer for white and on the top layer for black.
        • -
        • The goal of the game is to checkmate the opponent's king, or to make them unable to move.
        • -
        • The pieces move according to their type, but they can also move up or down one layer at a time.
        • -
        • The pawn can move one square forward, diagonally forward, or vertically forward. It can capture an enemy piece only by moving diagonally forward or vertically forward.
        • -
        • The rook can move any number of squares horizontally, vertically, or perpendicularly. It can also move up or down one layer at a time.
        • -
        • The bishop can move any number of squares diagonally or triagonally. It can also move up or down one layer at a time.
        • -
        • The knight can move two squares horizontally or vertically and then one square perpendicularly or diagonally. It can also move up or down one layer at a time.
        • -
        • The queen can move any number of squares horizontally, vertically, diagonally, perpendicularly, or triagonally. It can also move up or down one layer at a time.
        • -
        • The king can move one square in any direction. It can also move up or down one layer at a time.
        • -
        • The castling move is possible if the king and rook have not moved and there are no pieces between them. The king moves two squares towards the rook and the rook moves over the king to the adjacent square.
        • -
        • The en passant move is possible if an enemy pawn moves two squares forward from its initial position and lands next to your pawn. You can capture the enemy pawn by moving your pawn diagonally forward to the square where it passed.
        • -
        • The promotion move is possible if your pawn reaches the opposite end of the board. You can replace it with any piece of your choice except a king.
        • -
        -

        The tips and tricks to improve your 3D chess skills

        -

        If you want to improve your

        If you want to improve your 3D chess skills, you can follow these tips and tricks:

        -
          -
        • Practice regularly and play with different opponents. You can use the websites and apps mentioned above to play 3D chess online or offline. You can also join online communities and forums to learn from other players and share your experiences.
        • -
        • Study the basics and the strategies of 3D chess. You can read books, articles, and blogs about 3D chess. You can also watch videos and tutorials on YouTube or other platforms. You can learn from the experts and the masters of 3D chess.
        • -
        • Analyze your games and learn from your mistakes. You can use tools and software to review your games and see where you went wrong or what you could have done better. You can also ask for feedback and advice from other players or coaches.
        • -
        • Challenge yourself and have fun. You can try different modes and levels of 3D chess, from easy to impossible. You can also play different variants and puzzles of 3D chess, such as 3D chess with four players or 3D chess with random pieces. You can also create your own rules and scenarios for 3D chess.
        • -
        -

        Conclusion

        -

        Chess 3D is a fascinating and stimulating way to play chess in three dimensions. It is a game that tests your brain, your skills, and your imagination. It is a game that you can play anytime, anywhere, with anyone. It is a game that you can enjoy for a lifetime. If you want to download chess 3D, you can use the websites and apps that we have recommended in this article. If you want to play chess 3D, you can use the rules and tips that we have provided in this article. We hope that this article has helped you to learn more about chess 3D and how to play it. Thank you for reading!

        -

        FAQs

        -

        Here are some of the frequently asked questions about chess 3D:

        -

        Q: Is chess 3D harder than regular chess?

        -

        A: Chess 3D is not necessarily harder than regular chess, but it is different. It requires more spatial awareness, visualization, and creativity. It also offers more possibilities and challenges. It depends on your preference and style which one you find easier or harder.

        -

        Q: Is chess 3D good for your brain?

        -

        A: Chess 3D is good for your brain, just like regular chess. It can improve your cognitive functions, such as memory, concentration, logic, and problem-solving. It can also prevent or delay mental decline, such as Alzheimer's disease or dementia.

        -

        Q: How long does a game of chess 3D last?

        -

        A: A game of chess 3D can last as long as a game of regular chess, depending on the time control and the skill level of the players. It can range from a few minutes to several hours.

        -

        Q: Can I play chess 3D with a physical board?

        -

        A: Yes, you can play chess 3D with a physical board, if you have one. There are some models of 3D chess boards that you can buy online or in stores. They are usually made of plastic or wood, with magnets or pegs to hold the pieces.

        -

        Q: Where did chess 3D originate from?

        -

        A: Chess 3D originated from various sources and influences. One of them is the fictional game of "tri-dimensional chess" that appeared in the Star Trek series. Another one is the mathematical concept of "hypercube" or "tesseract". There are also other versions and variations of 3D chess that have been invented by different people over time.

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Create Amazing Beats with Incredibox APK File and Share Them with the World.md b/spaces/fatiXbelha/sd/Create Amazing Beats with Incredibox APK File and Share Them with the World.md deleted file mode 100644 index 26090aa8c5a22306bea6842c2cf646c7bbb83bae..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Create Amazing Beats with Incredibox APK File and Share Them with the World.md +++ /dev/null @@ -1,126 +0,0 @@ - -

        Incredibox APK File: How to Download and Enjoy the Music App

        -

        If you love music and want to create your own tunes with the help of a fun and easy app, you might have heard of Incredibox. Incredibox is a music app that lets you create your own music with the help of a merry crew of beatboxers. You can choose from different musical styles, drag and drop icons onto the characters, and mix sounds, rhythms, and voices to create your own compositions. You can also save, share, and download your mixes, as well as listen to and vote for other users' creations.

        -

        incredibox apk file


        Downloadhttps://urllie.com/2uNvGZ



        -

        Incredibox is a popular app that has been downloaded more than 1.8 million times and has received many awards and positive reviews from users and critics alike. It is also used by schools as an educational tool to teach children about rhythm, tempo, and musical composition. However, if you want to enjoy the full Incredibox experience, you need to download the app from the official website or the app stores. The app is not free, though, and costs $4.99 on both iOS and Android platforms.

        -

        But what if you want to download Incredibox for free? Or what if you cannot access the app stores in your region? Or what if you have an older device that is not compatible with the latest version of the app? In that case, you might want to try downloading the Incredibox APK file. An APK file is an Android application package file that contains all the files and data needed to install an app on an Android device. By downloading the Incredibox APK file, you can bypass the app stores and install the app directly on your device.

        -

        However, downloading and installing an APK file is not as simple as downloading an app from the app store. You need to follow some steps and precautions to make sure that you get a safe and working version of the app. In this article, we will show you how to download and install the Incredibox APK file on your Android device, how to use the app and create your own music, what features and benefits the app offers, and what alternatives you can try if you are looking for something different.

        -

        How to Download Incredibox APK File for Android Devices

        -

        The first step to enjoy Incredibox on your Android device is to download the APK file. There are many websites that offer APK files for various apps, but not all of them are trustworthy or reliable. Some of them may contain malware, viruses, or outdated versions of the apps that may harm your device or compromise your privacy. Therefore, you need to be careful when choosing a source for downloading the Incredibox APK file.

        -

        One of the best websites that we recommend for downloading the Incredibox APK file is [APKPure](^7^). APKPure is a reputable website that provides safe and verified APK files for thousands of apps. It also updates its files regularly to ensure that they are compatible with the latest versions of Android devices. To download the Incredibox APK file from APKPure, follow these steps:

        -

        Incredibox app download for Android
        -How to install Incredibox apk on your device
        -Incredibox music game free apk
        -Incredibox V9 Wekiddy apk file
        -Incredibox apk mod unlocked all versions
        -Incredibox apk latest version 2023
        -Incredibox apk offline mode
        -Incredibox apk no ads
        -Incredibox apk premium features
        -Incredibox apk full version
        -Incredibox apk cracked
        -Incredibox apk hack
        -Incredibox apk cheat codes
        -Incredibox apk unlimited money
        -Incredibox apk best settings
        -Incredibox apk tips and tricks
        -Incredibox apk review
        -Incredibox apk gameplay
        -Incredibox apk tutorial
        -Incredibox apk guide
        -Incredibox apk walkthrough
        -Incredibox apk video
        -Incredibox apk screenshot
        -Incredibox apk wallpaper
        -Incredibox apk theme song
        -Incredibox apk sound effects
        -Incredibox apk beatboxers
        -Incredibox apk musical styles
        -Incredibox apk atmospheres
        -Incredibox apk Brazil edition
        -Incredibox apk Alive edition
        -Incredibox apk Jeevan edition
        -Incredibox apk Dystopia edition
        -Incredibox apk Wekiddy edition
        -Incredibox app vs apk file
        -Incredibox app or apk file: which one is better?
        -How to update your Incredibox app or apk file?
        -How to uninstall your Incredibox app or apk file?
        -How to backup your Incredibox app or apk file?
        -How to restore your Incredibox app or apk file?
        -How to transfer your Incredibox app or apk file to another device?
        -How to share your Incredibox app or apk file with others?
        -How to record and share your mix with the Incredibox app or apk file?
        -How to create your own music with the Incredibox app or apk file?
        -How to play the game mode with the Incredibox app or apk file?
        -How to use the tool mode with the Incredibox app or apk file?
        -How to enjoy the audio and visual experience with the Incredibox app or apk file?

        -
          -
        1. Go to [APKPure](^7^) website on your browser.
        2. -
        3. Search for "Incredibox" in the search bar.
        4. -
        5. Select the "Incredibox" app from the results.
        6. -
        7. Click on "Download APK" button.
        8. -
        9. Choose a location on your device where you want to save the file.
        10. -
        11. Wait for the download to finish.
        12. -
        -

        Congratulations! You have successfully downloaded the Incredibox APK file on your device. Now you need to install it.

        -

        How to Install Incredibox APK File on Android Devices

        -

        the installation of apps from sources other than the official app stores, such as APK files, for security reasons. To enable this setting, follow these steps:

        -
          -
        1. Go to your device's "Settings" menu.
        2. -
        3. Tap on "Security" or "Privacy" option.
        4. -
        5. Find and toggle on the option that says "Unknown sources" or "Allow installation of apps from unknown sources".
        6. -
        7. Confirm your choice by tapping on "OK" or "Allow".
        8. -
        -

        Once you have enabled this setting, you can proceed to install the Incredibox APK file. To do that, follow these steps:

        -
          -
        1. Locate the Incredibox APK file that you downloaded on your device.
        2. -
        3. Tap on the file to open it.
        4. -
        5. Tap on "Install" button.
        6. -
        7. Wait for the installation to finish.
        8. -
        9. Tap on "Open" button to launch the app.
        10. -
        -

        Congratulations! You have successfully installed the Incredibox app on your device. Now you can start using it and create your own music.

        -

        How to Use Incredibox App and Create Your Own Music

        -

        Incredibox is a very easy and fun app to use. You don't need any musical skills or knowledge to create your own tunes. All you need is your imagination and creativity. Here are some tips on how to use Incredibox app and create your own music:

        -
          -
        • When you open the app, you will see a screen with seven musical styles to choose from: Alpha, Little Miss, Sunrise, The Love, Brazil, Alive, and Jeevan. Each style has a different theme, mood, and sound. Tap on the style that you like to start creating your music.
        • -
        • You will then see a screen with eight characters dressed in different outfits. These are your beatboxers. Each character represents a different sound element: beat, effects, melody, chorus, etc. You can drag and drop icons onto the characters to make them produce different sounds. You can also tap on the characters to mute or unmute them.
        • -
        • You can create up to four different parts for your music by tapping on the arrows at the bottom of the screen. You can switch between the parts by tapping on them. You can also record your voice and add it to your music by tapping on the microphone icon at the top of the screen.
        • -
        • You can save your music by tapping on the floppy disk icon at the top of the screen. You can also share your music with other users by tapping on the globe icon at the top of the screen. You can also download your music as an MP3 file by tapping on the download icon at the top of the screen.
        • -
        • You can listen to and vote for other users' music by tapping on the headphones icon at the top of the screen. You can also discover new musical styles and unlock bonus content by tapping on the gift box icon at the top of the screen.
        • -
        -

        Incredibox is a great app for expressing yourself through music and having fun. You can experiment with different sounds, styles, and combinations to create unique and original tunes. You can also share your music with others and get inspired by their creations.

        -

        Features and Benefits of Incredibox App

        -

        Incredibox is not just a music app. It is also a creative tool that offers many features and benefits for its users. Some of these features and benefits are:

        -
          -
        • Incredibox is easy to use and suitable for all ages and backgrounds. You don't need any musical skills or knowledge to create your own tunes. You just need to drag and drop icons onto the characters and listen to their sounds.
        • -
        • Incredibox is fun and entertaining. You can enjoy creating your own music with different musical styles, sounds, and effects. You can also customize your beatboxers with different outfits and accessories.
        • -
        • Incredibox is educational and inspiring. You can learn about rhythm, tempo, harmony, melody, and musical composition through Incredibox. You can also discover new musical genres and cultures through Incredibox.
        • -
        • Incredibox is social and interactive. You can share your music with other users and listen to their music as well. You can also vote for your favorite tunes and get feedback from others.
        • -
        • Incredibox is rewarding and satisfying. You can save your music and download it as an MP3 file. You can also unlock bonus content and achievements by creating more music.
        • -
        -

        and precautions to make sure that you get a safe and working version of the app. You need to enable a setting on your device that allows you to install apps from unknown sources, and you need to choose a reliable and verified source for downloading the Incredibox APK file.

        -

        Incredibox is a great app for creating your own music and having fun. You can experiment with different sounds, styles, and combinations to create unique and original tunes. You can also share your music with others and get inspired by their creations. Incredibox is also a creative tool that offers many features and benefits for its users, such as learning, discovering, and expressing yourself through music.

        -

        If you are looking for something different or want to try other music apps, there are some alternatives that you can check out. However, none of them can match the simplicity, fun, and originality of Incredibox app. Incredibox is a one-of-a-kind app that will make you fall in love with music.

        -

        We recommend that you download and install the Incredibox APK file on your Android device and enjoy the music app. You will not regret it. Incredibox is an amazing app that will bring joy and creativity to your life.

        -

        FAQs: Common Questions and Answers About Incredibox App

        -

        Here are some of the common questions and answers about Incredibox app that you might find useful:

        -
          -
        1. Is Incredibox app safe to use?
        2. -

          Yes, Incredibox app is safe to use as long as you download it from the official website or the app stores. However, if you download the Incredibox APK file from other sources, you need to be careful and make sure that the file is safe and verified. You can use a reputable website like APKPure to download the Incredibox APK file.

          -
        3. Is Incredibox app free to use?
        4. -

          No, Incredibox app is not free to use. It costs $4.99 on both iOS and Android platforms. However, if you want to download Incredibox for free, you can try downloading the Incredibox APK file from a reliable source like APKPure.

          -
        5. What are the requirements for using Incredibox app?
        6. -

          To use Incredibox app, you need to have an iOS or Android device that meets the following requirements:

          -
            -
          • iOS: Requires iOS 10.0 or later. Compatible with iPhone, iPad, and iPod touch.
          • -
          • Android: Requires Android 5.0 or later.
          • -
          -
        7. How can I contact the developers of Incredibox app?
        8. -

          If you have any questions, feedback, or issues with Incredibox app, you can contact the developers of the app by visiting their website [incredibox.com] or by sending an email to contact@incredibox.com.

          -
        9. How can I support the developers of Incredibox app?
        10. -

          If you love Incredibox app and want to support the developers of the app, you can do so by buying the app from the official website or the app stores, by leaving a positive review and rating on the app stores, by sharing your music with others and promoting the app, or by donating to their [Patreon] page.

          -

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download Lagu Gyal You A Party Animal Lyrics - Sing Along to Charly Black and Daddy Yankees Smash Hit.md b/spaces/fatiXbelha/sd/Download Lagu Gyal You A Party Animal Lyrics - Sing Along to Charly Black and Daddy Yankees Smash Hit.md deleted file mode 100644 index 9678ee22aa8ada2244ba668e25b0ce7a833213df..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download Lagu Gyal You A Party Animal Lyrics - Sing Along to Charly Black and Daddy Yankees Smash Hit.md +++ /dev/null @@ -1,106 +0,0 @@ -
        -

        Download Lagu Gyal You A Party Animal: How to Enjoy the Hit Song by Charly Black and Daddy Yankee

        -

        If you are looking for a catchy and upbeat song to dance to, you might want to check out "Gyal You A Party Animal" by Charly Black and Daddy Yankee. This song is a fusion of reggae, dancehall, and Latin pop that will make you want to move your body. But how can you download, stream, or buy this song legally and for free? In this article, we will show you how to enjoy this hit song in different ways.

        -

        What is the Song About and Why is it Popular?

        -

        The title "Gyal You A Party Animal" is a Jamaican slang that means "Girl You Are a Party Lover". The song is a tribute to all the women who love to have fun and party hard. The lyrics are simple but catchy, with phrases like "She took one shot, two shot, three shot, four / After nine minutes she come back for more / She's a party animal" or "She's a good girl but she bad bad bad / She love rock 'n' roll / She love reggae / She love dancehall".

        -

        download lagu gyal you a party animal


        Download File > https://urllie.com/2uNxJH



        -

        The song is a collaboration between two popular artists from different backgrounds. Charly Black is a Jamaican reggae and dancehall singer who started his career in 2004. He is best known for his songs like "Whine & Kotch", "Girlfriend", "Bike Back", and "Hoist & Wine". Daddy Yankee is a Puerto Rican rapper, singer, songwriter, and actor who is considered as the "King of Reggaeton". He has been making music since 1994 and has released hits like "Gasolina", "Despacito", "Con Calma", and "Dura".

        -

        The song belongs to the genre of dancehall music, which is a type of Jamaican popular music that originated in the late 1970s. Dancehall music is characterized by fast rhythms, deejaying (rapping), singing, and sound effects. Dancehall music is influenced by reggae, hip hop, pop, salsa, soca, and other genres. Dancehall music is popular not only in Jamaica but also in other parts of the world, especially in Latin America.

        -

        How

        How to Download the Song Legally and for Free from Various Websites

        -

        Downloading music for free can be tempting, but it can also be risky. You might end up with low-quality files, viruses, malware, or legal issues. That's why you should always download music from reputable and legal sources. Fortunately, there are many websites that offer free music downloads legally and safely. Here are some of the best ones that have the song "Gyal You A Party Animal" by Charly Black and Daddy Yankee.

        -
          -
        • SoundCloud: SoundCloud is one of the largest and most popular platforms for music streaming and downloading. It has millions of songs from various genres, artists, and labels. You can find the song "Gyal You A Party Animal" by Charly Black and Daddy Yankee on SoundCloud. To download it, you need to create a free account and click on the "More" button below the song. Then, select "Download file" and save it to your device. The format of the file is MP3 and the quality is 128 kbps.
        • -
        • Last.fm: Last.fm is another great platform for music streaming and downloading. It has a huge collection of songs from different artists and genres. You can also discover new music based on your preferences and listening history. You can find the song "Gyal You A Party Animal" by Charly Black and Daddy Yankee on Last.fm. To download it, you need to click on the "Free Download" button below the song and follow the instructions. The format of the file is MP3 and the quality is 192 kbps.
        • -
        • Noisetrade: Noisetrade is a platform that connects artists and fans. It allows artists to share their music for free in exchange for fan emails and tips. It has a variety of songs from different genres and styles. You can find the song "Gyal You A Party Animal" by Charly Black and Daddy Yankee on Noisetrade. To download it, you need to enter your email address and zip code and click on the "Download Music" button. The format of the file is MP3 and the quality is 320 kbps.
        • -
        • Jamendo Music: Jamendo Music is a platform that offers free music downloads under Creative Commons licenses. It has thousands of songs from independent artists and labels. You can find the song "Gyal You A Party Animal" by Charly Black and Daddy Yankee on Jamendo Music. To download it, you need to click on the "Download" button below the song and choose the license type. The format of the file is MP3 and the quality is 320 kbps.
        • -
        • Bandcamp: Bandcamp is a platform that allows artists to sell their music directly to fans. It has a wide range of songs from various genres and artists. You can find the song "Gyal You A Party Animal" by Charly Black and Daddy Yankee on Bandcamp. To download it, you need to click on the "Buy Digital Track" button below the song and name your price (you can enter zero if you want it for free). The format of the file is MP3, FLAC, or WAV and the quality is variable.
        • -
        -

        How to Stream the Song Online from YouTube, Spotify, and Other Platforms

        -

        Streaming music online can be convenient, but it can also have some drawbacks. You might need a stable internet connection, pay for a subscription fee, or deal with ads or interruptions. That's why you should always stream music from reliable and legal platforms. Here are some of the best ones that have the song "Gyal You A Party Animal" by Charly Black and Daddy Yankee.

        -
          -
        • YouTube: YouTube is one of the most popular and widely used platforms for video streaming. It has billions of videos from various categories, including music videos. You can find the official music video of "Gyal You A Party Animal" by Charly Black and Daddy Yankee on YouTube. To stream it, you just need to click on the "Play" button and enjoy. The quality of the video is HD (1080p) and the sound is stereo.
        • -
        • Spotify: Spotify is one of the leading platforms for music streaming. It has millions of songs from different genres, artists, and playlists. You can find the song "Gyal You A Party Animal" by Charly Black and Daddy Yankee on Spotify. To stream it, you need to create a free account or log in with your Facebook or Google account. Then, you can either listen to it with ads or upgrade to Spotify Premium for ad-free listening, offline mode, better sound quality, and more features. The quality of the sound is 96 kbps for free users and 320 kbps for premium users.
        • -
        • Apple Music: Apple Music is another popular platform for music streaming. It has over 75 million songs from various genres, artists, and radio stations. You can find the song "Gyal You A Party Animal" by Charly Black and Daddy Yankee on Apple Music. To stream it, you need to sign up for a free trial or a subscription plan. Then, you can listen to it online or download it for offline listening. The quality of the sound is 256 kbps.
        • -
        • Amazon Music: Amazon Music is a platform that offers music streaming and downloading for Amazon Prime members. It has over 70 million songs from different genres, artists, and playlists. You can find the song "Gyal You A Party Animal" by Charly Black and Daddy Yankee on Amazon Music. To stream it, you need to have an Amazon Prime account or sign up for a free trial. Then, you can listen to it online or download it for offline listening. The quality of the sound is 256 kbps.
        • -
        • Deezer: Deezer is a platform that provides music streaming and downloading for music lovers. It has over 73 million songs from different genres, artists, and channels. You can find the song "Gyal You A Party Animal" by Charly Black and Daddy Yankee on Deezer. To stream it, you need to create a free account or log in with your Facebook or Google account. Then, you can either listen to it with ads or upgrade to Deezer Premium for ad-free listening, offline mode, better sound quality, and more features. The quality of the sound is 128 kbps for free users and 320 kbps for premium users.
        • -
        -

        How to Support the Artists by Buying Their Music or Merchandise

        -

        Streaming or downloading music for free can be convenient, but it can also be unfair to the artists who work hard to create their music. That's why you should always support the artists by buying their music or merchandise if you like their songs. Here are some of the ways you can support Charly Black and Daddy Yankee financially and morally.

        -
          -
        • Buy their music: One of the best ways to support the artists is to buy their music from their official websites or online stores. You can buy the song "Gyal You A Party Animal" by Charly Black and Daddy Yankee as a single or as part of their albums. You can also buy their other songs or albums that you like. Buying their music will not only give you high-quality files but also help them earn money and recognition.
        • -
        • Buy their merchandise: Another way to support the artists is to buy their merchandise from their official websites or online stores. You can buy t-shirts, hoodies, hats, posters, stickers, mugs, and other items that feature their names, logos, or images. Buying their merchandise will not only show your appreciation and loyalty but also help them promote their brand and identity.
        • -
        • Follow them on social media: A simple way to support the artists is to follow them on social media platforms like Facebook, Instagram, Twitter, YouTube, TikTok, and others. You can like, comment, share, subscribe, or watch their posts, videos, stories, or live streams. Following them on social media will not only keep you updated with their news and activities but also help them grow their fan base and influence.
        • -
        • Attend their events and tours: A fun way to support the artists is to attend their events and tours when they are in your area or country. You can buy tickets to their concerts, festivals, clubs, or parties where they perform live. You can also meet them in person at their meet-and-greets, signings, or fan meetings. Attending their events and tours will not only give you an unforgettable experience but also help them generate revenue and exposure.
        • -
        -

        Conclusion

        -

        In conclusion, "Gyal You A Party Animal" by Charly Black and Daddy Yankee is a hit song that you can enjoy in different ways. You can download it legally and for free from various websites like SoundCloud, Last.fm, Noisetrade, Jamendo Music, or Bandcamp. You can stream it online from platforms like YouTube, Spotify, Apple Music, Amazon Music or Deezer. You can also support the artists by buying their music or merchandise from their official websites or online stores. You can also follow them on social media or attend their events and tours. No matter how you choose to enjoy the song, you will surely have a great time with this party anthem.

        -

        Thank you for reading this article. We hope you learned something new and useful. If you have any questions, comments, or feedback, please feel free to share them with us. We would love to hear from you. And don't forget to download, stream, or buy the song "Gyal You A Party Animal" by Charly Black and Daddy Yankee and have fun!

        -

        FAQs

        -

        Here are some of the frequently asked questions about the song "Gyal You A Party Animal" by Charly Black and Daddy Yankee.

        -

        download mp3 gyal you a party animal
        -download video gyal you a party animal
        -download lagu charly black gyal you a party animal
        -download lagu daddy yankee gyal you a party animal
        -download lagu gyal you a party animal remix
        -download lagu gyal you a party animal 320kbps
        -download lagu gyal you a party animal gratis
        -download lagu gyal you a party animal original
        -download lagu gyal you a party animal stafaband
        -download lagu gyal you a party animal planetlagu
        -download lagu gyal you a party animal wapka
        -download lagu gyal you a party animal metrolagu
        -download lagu gyal you a party animal uyeshare
        -download lagu gyal you a party animal cover
        -download lagu gyal you a party animal acoustic
        -download lagu gyal you a party animal instrumental
        -download lagu gyal you a party animal karaoke
        -download lagu gyal you a party animal lirik
        -download lagu gyal you a party animal terjemahan
        -download lagu gyal you a party animal versi indonesia
        -download lagu gyal you a party animal versi reggae
        -download lagu gyal you a party animal versi dj
        -download lagu gyal you a party animal versi koplo
        -download lagu gyal you a party animal versi dangdut
        -download lagu gyal you a party animal versi ska
        -download lagu gyal you a party animal feat daddy yankee
        -download lagu gyal you a party animal feat maluma
        -download lagu gyal you a party animal feat j balvin
        -download lagu gyal you a party animal feat luis fonsi
        -download lagu gyal you a party animal feat nicky jam
        -download lagu gyal you a party animal live
        -download lagu gyal you a party animal audio
        -download lagu gyal you a party animal official video
        -download lagu gyal you a party animal dance video
        -download lagu gyal you a party animal choreography video
        -download lagu gyal you a party animal tiktok video
        -download lagu gyal you a party animal reaction video
        -download lagu gyal you a party animal youtube video
        -download lagu gyal you a party animal youtube mp3
        -download lagu gyal you a party animal youtube converter
        -download lagu gyal you a party animal online free
        -download lagu gyal you a party animal offline free
        -download lagu gyal you a party animal full album free
        -download lagu gyal you a party animal full song free
        -download lagu gyal you a party animal ringtone free
        -download lagu gyal you a party animal zedge free
        -download lagu gyal you a party animal spotify free
        -download lagu gyal you a party animal itunes free
        -download lagu gyal you a party animal soundcloud free

        -
          -
        1. When was the song released?
        2. -

          The song was originally released by Charly Black in 2014 as a single. It was later remixed by Daddy Yankee in 2016 and released as a single again.

          -
        3. How popular is the song?
        4. -

          The song is very popular worldwide, especially in Latin America and Europe. It has over 1 billion views on YouTube, over 300 million streams on Spotify, and over 10 million sales and streams combined in the US. It has also reached the top 10 charts in several countries like Colombia, Spain, Italy, France, and Germany.

          -
        5. What awards has the song won?
        6. -

          The song has won several awards, such as the ASCAP Latin Award for Urban Song of the Year in 2017, the Premio Lo Nuestro for Urban Collaboration of the Year in 2017, and the Billboard Latin Music Award for Latin Rhythm Song of the Year in 2018.

          -
        7. What is the meaning of "lagu"?
        8. -

          "Lagu" is an Indonesian word that means "song". It is often used as a prefix to indicate that something is a song, such as "lagu pop" (pop song) or "lagu rock" (rock song). In this case, "download lagu gyal you a party animal" means "download the song gyal you a party animal".

          -
        9. How can I learn more about the artists?
        10. -

          You can learn more about Charly Black and Daddy Yankee by visiting their official websites or social media accounts. You can also read their biographies, interviews, or articles online. Here are some of their links:

          - -

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Evertale Mod APK OBB How to Get Unlimited Money and Soul Stones.md b/spaces/fatiXbelha/sd/Evertale Mod APK OBB How to Get Unlimited Money and Soul Stones.md deleted file mode 100644 index 1c0a0783e8075571a8fef78eba6530dceb8a7150..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Evertale Mod APK OBB How to Get Unlimited Money and Soul Stones.md +++ /dev/null @@ -1,70 +0,0 @@ -
        -

        Evertale APK OBB Mod: A Guide for Android Gamers

        -

        If you are a fan of role-playing games with fantasy elements, you might have heard of Evertale, a popular game developed by ZigZaGame Inc. In this game, you can capture and train monsters, explore a vast open world, and battle against other players in online PvP mode. But did you know that you can also enjoy a modded version of Evertale on your Android device? In this article, we will tell you everything you need to know about Evertale APK OBB mod, including what it is, how to download and install it, and what benefits it can offer you. Read on to find out more!

        -

        evertale apk obb mod


        Download File ⇒⇒⇒ https://urllie.com/2uNEdi



        -

        What is Evertale?

        -

        Evertale is a game that combines elements of Pokemon, Final Fantasy, and Dragon Quest. It is set in a world called Erden, where a mysterious curse called Pandemonium has plagued the land for centuries. You play as one of the Crestbearers, a group of heroes who can wield the power of ancient weapons and break the curse. Along the way, you will encounter over 200 unique monsters that you can capture and train, as well as other characters that will join your party.

        -

        The story and gameplay of Evertale

        -

        The game has two modes: story mode and online mode. In story mode, you can follow the main storyline and complete various quests and side missions. You can also explore the different regions of Erden, such as forests, deserts, caves, and cities. You will have to fight against wild monsters and enemy factions using a turn-based combat system. You can use different skills, items, and strategies to win battles. You can also customize your team by choosing which monsters and characters to bring along.

        -

        The features and benefits of Evertale

        -

        One of the main features of Evertale is the monster collection aspect. You can catch and collect over 200 monsters with different types, abilities, and evolutions. You can also train them by leveling them up, equipping them with gear, and teaching them new skills. You can even breed them to create new hybrids with unique traits. Another feature of Evertale is the online mode, where you can compete with other players in real-time PvP battles. You can join guilds, participate in events, and rank up in leaderboards. You can also trade monsters and items with other players. The game also has stunning graphics, music, and voice acting that enhance the immersive experience.

        -

        What is an APK OBB mod?

        -

        An APK OBB mod is a modified version of an original game that has been altered by third-party developers or hackers. It usually comes in two files: an APK file and an OBB file. An APK file is an application package file that contains the code, resources, and metadata of the game. An OBB file is an opaque binary blob file that contains additional data such as graphics, sounds, and videos.

        -

        evertale apk obb mod unlimited money
        -evertale apk obb mod offline
        -evertale apk obb mod latest version
        -evertale apk obb mod free download
        -evertale apk obb mod android 1
        -evertale apk obb mod rexdl
        -evertale apk obb mod revdl
        -evertale apk obb mod 2.0.82
        -evertale apk obb mod hack
        -evertale apk obb mod mega
        -evertale apk obb mod no root
        -evertale apk obb mod god mode
        -evertale apk obb mod high damage
        -evertale apk obb mod unlimited soul stones
        -evertale apk obb mod unlimited gems
        -evertale apk obb mod unlimited coins
        -evertale apk obb mod unlimited everything
        -evertale apk obb mod unlocked all characters
        -evertale apk obb mod unlocked all weapons
        -evertale apk obb mod unlocked all skills
        -evertale apk obb mod unlocked all chapters
        -evertale apk obb mod unlocked all monsters
        -evertale apk obb mod premium
        -evertale apk obb mod pro
        -evertale apk obb mod full version
        -evertale apk obb mod full paid
        -evertale apk obb mod full cracked
        -evertale apk obb mod full unlocked
        -evertale apk obb mod full features
        -evertale apk obb mod full game
        -evertale apk obb mod updated version
        -evertale apk obb mod new version
        -evertale apk obb mod latest update
        -evertale apk obb mod new update
        -evertale apk obb mod download link
        -evertale apk obb mod download for android
        -evertale apk obb mod download for pc
        -evertale apk obb mod download free 2023
        -evertale apk obb mod download apkpure
        -evertale apk obb mod download dlandroid.com[^1^]

        -

        The difference between APK and OBB files

        -

        The main difference between APK and OBB files is that APK files are executable files that can be installed on your device directly, while OBB files are data files that need to be placed in a specific folder on your device's storage. APK files are usually smaller than OBB files because they only contain the essential components of the game. OBB files are usually larger because they contain the extra content that enhances the game's quality.

        -

        The advantages and risks of using mods

        -

        Using mods can have some advantages and risks for gamers. Some of the advantages guide and download the mod files from a reliable source. You will be amazed by how much more fun and exciting Evertale can be with the mod. However, you should also be aware of the risks and responsibilities that come with using mods. You should always respect the game developer and other players, and use the mod moderately and responsibly. We hope you have enjoyed this article and learned something new. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy gaming!

        -

        FAQs

        -

        Here are some frequently asked questions about Evertale APK OBB mod:

        -

        Q: Is Evertale APK OBB mod free?

        -

        A: Yes, Evertale APK OBB mod is free to download and use. However, you might need to pay for some in-app purchases or subscriptions if you want to access some premium features or content.

        -

        Q: Is Evertale APK OBB mod safe?

        -

        A: Evertale APK OBB mod is generally safe to use, as long as you download it from a reputable source and scan it for viruses or malware before installing it. However, you should also be careful when using online features or connecting to other players, as they might have malicious intentions or use cheats or hacks.

        -

        Q: Is Evertale APK OBB mod compatible with my device?

        -

        A: Evertale APK OBB mod is compatible with most Android devices that run on Android 4.4 or higher. However, you should also check the minimum requirements and specifications of the game and the mod before downloading and installing them. You should also make sure that you have enough storage space and battery life on your device.

        -

        Q: Can I play Evertale APK OBB mod offline?

        -

        A: Yes, you can play Evertale APK OBB mod offline, as long as you have downloaded and installed both the APK file and the OBB file on your device. You can enjoy the story mode and the offline features of the game without any internet connection. However, you will need an internet connection to access the online mode and the online features of the game.

        -

        Q: Can I update Evertale APK OBB mod?

        -

        A: Yes, you can update Evertale APK OBB mod whenever there is a new version available. You can check for updates on the source website or app that you downloaded the mod from, or on other websites or forums that offer the mod files. You will need to download and install both the new APK file and the new OBB file to update the mod.

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Experience the Thrills and Chills of Garden of Banban 3 APK Gratis on Your Android Device.md b/spaces/fatiXbelha/sd/Experience the Thrills and Chills of Garden of Banban 3 APK Gratis on Your Android Device.md deleted file mode 100644 index 501c155e2e0d93f2fbdee59d3d0bbb806587aada..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Experience the Thrills and Chills of Garden of Banban 3 APK Gratis on Your Android Device.md +++ /dev/null @@ -1,104 +0,0 @@ -
        -

        Garten of Banban 3: A Horror Game That Will Keep You on the Edge of Your Seat

        -

        If you are looking for a horror game that will keep you on the edge of your seat, then you should try Garten of Banban 3, the official mobile game of the Garten of Banban series. This game is a sequel to the popular Garten of Banban and Garten of Banban 2 games, which have been downloaded by thousands of gamers around the world. In this game, you will explore the mysterious and magical Garden of Banban, where you will encounter a variety of fascinating characters, including Jumbo Josh, Opila, and many more. With its engaging storyline and immersive gameplay, this game is sure to keep you entertained for hours on end.

        -

        garden of ban ban 3 apk gratis


        DOWNLOADhttps://urllie.com/2uNEQ8



        -

        What is Garten of Banban 3?

        -

        The sequel to the popular Garten of Banban game series

        -

        Garten of Banban 3 is the third installment in the Garten of Banban game series, which was created by Incoming Call, a developer based in South Korea. The first game, Garten of Banban, was released in 2021, and it introduced the players to the world of Garten of Banban, where they had to find eggs and escape from a creepy kindergarten. The second game, Garten of Banban 2, was released in 2022, and it continued the story of the first game, with more challenges and surprises. The third game, Garten of Banban 3, was released in 2023, and it is the most advanced and exciting game in the series so far.

        -

        A mobile game that explores the mysterious Garden of Banban

        -

        Garten of Banban 3 is a mobile game that can be played on Android devices or on computers. The game is available for free download from APKCombo or Google Play. The game is compatible with various languages, such as English, Spanish, Portuguese, Russian, Japanese, Korean, Chinese, German, Polish, Turkish, Indonesian, French, and Italian. The game follows the story of a parent who is looking for their child in the Garden of Banban, a mysterious establishment that was left suspiciously empty. The player has to explore different areas of the garden, such as the beach, the forest, and the underground. Along the way, they will meet different characters who will either help them or hinder them.

        -

        A horror game that features creepy characters and challenges

        -

        Garten of Banban 3 is a horror game that features creepy characters and challenges that will test your skills and nerves. Some of the characters that you will encounter are Jumbo Josh, a giant teddy bear who likes to hug people; Opila, a bird-like creature who likes to peck people; and Bambam, a mysterious figure who controls everything in the garden. You will also face various obstacles and puzzles that will require your attention and logic. You will have to collect special eggs that will allow you to unlock new areas and characters. You will also have to avoid the dangers that lurk in every corner and escape from the garden before it's too late.

        -

        How to play Garten of Banban 3?

        -

        Download the game for free from APKCombo or Google Play

        -

        The first step to play Garten of Banban 3 is to download the game for free from APKCombo or Google Play. APKCombo is a website that provides APK files for Android games and apps, which you can install on your device without using the Google Play Store. Google Play is the official app store for Android devices, where you can find and download millions of games and apps. Both APKCombo and Google Play are safe and reliable sources for downloading Garten of Banban 3.

        -

        Choose your language from the available options

        -

        The next step to play Garten of Banban 3 is to choose your language from the available options. The game supports various languages, such as English, Spanish, Portuguese, Russian, Japanese, Korean, Chinese, German, Polish, Turkish, Indonesian, French, and Italian. You can select your preferred language from the settings menu in the game. You can also change your language at any time during the game.

        -

        Follow the storyline and interact with the characters

        -

        The main part of playing Garten of Banban 3 is to follow the storyline and interact with the characters. The game follows the story of a parent who is looking for their child in the Garden of Banban, a mysterious establishment that was left suspiciously empty. You will have to explore different areas of the garden, such as the beach, the forest, and the underground. Along the way, you will meet different characters who will either help you or hinder you. Some of the characters that you will encounter are Jumbo Josh, a giant teddy bear who likes to hug people; Opila, a bird-like creature who likes to peck people; and Bambam, a mysterious figure who controls everything in the garden. You will also face various obstacles and puzzles that will require your attention and logic.

        -

        garden of ban ban 3 apk free download
        -garden of ban ban 3 mod apk unlimited money
        -garden of ban ban 3 android game review
        -garden of ban ban 3 apk latest version
        -garden of ban ban 3 game play online
        -garden of ban ban 3 apk obb data
        -garden of ban ban 3 horror game walkthrough
        -garden of ban ban 3 apk full unlocked
        -garden of ban ban 3 cheats and tips
        -garden of ban ban 3 apk no ads
        -garden of ban ban 3 game trailer
        -garden of ban ban 3 mod apk all characters
        -garden of ban ban 3 android game download
        -garden of ban ban 3 apk offline
        -garden of ban ban 3 game story
        -garden of ban ban 3 apk hack
        -garden of ban ban 3 mod apk android original gratis 2023[^2^]
        -garden of ban ban 3 game guide
        -garden of ban ban 3 apk for pc
        -garden of ban ban 3 game features
        -garden of ban ban 3 apk update
        -garden of ban ban 3 mod apk download for android[^2^]
        -garden of ban ban 3 game rating and reviews[^1^]
        -garden of ban ban 3 apk size and requirements
        -garden of ban ban 3 game characters and enemies
        -garden of ban ban 3 apk mirror
        -garden of ban ban 3 mod apk free shopping
        -garden of ban ban 3 android game release date[^1^]
        -garden of ban ban 3 apk old version
        -garden of ban ban 3 game secrets and easter eggs
        -garden of ban ban 3 apk pure
        -garden of ban ban 3 mod apk no root
        -garden of ban ban 3 android game screenshots[^1^]
        -garden of ban ban 3 apk direct link
        -garden of banban 3 euphoric brothers games[^1^]
        -garden of Banban III - Apps on Google Play[^1^]
        -Garten Of Banban III - Android Gameplay HD[^2^]

        -

        Collect special eggs and use them to unlock new areas and characters

        -

        One of the key features of playing Garten of Banban 3 is to collect special eggs and use them to unlock new areas and characters. The eggs are hidden throughout the garden, and they have different colors and shapes. Each egg has a unique ability or function that can help you progress in the game. For example, some eggs can open doors or activate switches; some eggs can transform into different objects or animals; and some eggs can give you hints or clues. You will have to collect as many eggs as possible and use them wisely to unlock new areas and characters in the game.

        -

        Avoid the dangers and escape the garden

        -

        The final goal of playing Garten of Banban 3 is to avoid the dangers and escape the garden. The garden is not a safe place, as it is filled with traps, enemies, and secrets. You will have to be careful and alert at all times, as you never know what might happen next. You will have to avoid being caught by Jumbo Josh, Opila, Bambam, or any other hostile character in the game. You will also have to avoid falling into pits, spikes, water, or fire. You will have to find a way out of the garden before it's too late.

        -

        Why should you play Garten of Banban 3?

        -

        It has an engaging and immersive gameplay

        -

        One of the reasons why you should play Garten of Banban 3 is that it has an engaging and immersive gameplay. The game has a captivating storyline that will keep you interested and curious throughout the game. The game also has a variety of gameplay elements that will keep you entertained and challenged. You will have to explore, interact, collect, solve, avoid, and escape in this game. The game also has a dynamic difficulty system that will adjust according to your performance and preferences.

        -

        It has a smooth and optimized performance

        -

        Another reason why you should play Garten of Banban 3 is that it has a smooth and optimized performance. The game has been designed with high-quality graphics and sound effects that will enhance your gaming experience. The game also has a fast loading time and a stable frame rate that will ensure a smooth gameplay. The game also has a low battery consumption and a small storage space requirement that will make it easy for you to play on your device.

        -

        It has a creepy and thrilling atmosphere

        -

        A third reason why you should play Garten of Banban 3 is that it has a creepy and thrilling atmosphere. The game has a horror theme that will create a sense of fear and suspense in you. The game also has a dark and mysterious setting that will make you feel like you are in a nightmare. The game also has a realistic and atmospheric sound design that will make you hear every scream and whisper in the game. The game also has a jump scare system that will make you jump out of your seat at any moment.

        -

        It has a memorable and captivating storyline

        -

        A fourth reason why you should play Garten of Banban 3 is that it has a memorable and captivating storyline. The game has a well-written and well-acted script that will make you feel emotionally invested in the game. The game also has a branching and nonlinear storyline that will make you experience different outcomes and endings depending on your choices and actions. The game also has a hidden and mysterious backstory that will make you want to discover more about the Garden of Banban and its secrets.

        -

        It has a mobile compatibility and accessibility

        -

        A fifth reason why you should play Garten of Banban 3 is that it has a mobile compatibility and accessibility. The game is designed for mobile devices, which means that you can play it anytime and anywhere you want. The game also has a simple and intuitive user interface that will make it easy for you to navigate and control the game. The game also has a touch screen and tilt sensor support that will make it more interactive and fun to play. The game also has an offline mode that will allow you to play the game without an internet connection.

        -

        Conclusion

        -

        Garten of Banban 3 is a horror game that will keep you on the edge of your seat. It is the sequel to the popular Garten of Banban game series, which have been downloaded by thousands of gamers around the world. In this game, you will explore the mysterious and magical Garden of Banban, where you will encounter a variety of fascinating characters, including Jumbo Josh, Opila, and many more. With its engaging storyline and immersive gameplay, this game is sure to keep you entertained for hours on end.

        -

        If you are looking for a horror game that will keep you on the edge of your seat, then you should try Garten of Banban 3, the official mobile game of the Garten of Banban series. You can download the game for free from APKCombo or Google Play. You can choose your language from the available options. You can follow the storyline and interact with the characters. You can collect special eggs and use them to unlock new areas and characters. You can avoid the dangers and escape the garden. You can enjoy the smooth and optimized performance, the creepy and thrilling atmosphere, the memorable and captivating storyline, and the mobile compatibility and accessibility of this game.

        -

        So what are you waiting for? Download Garten of Banban 3 today and experience the horror of the Garden of Banban!

        -

        FAQs

        -

        Here are some frequently asked questions about Garten of Banban 3:

        - - - - - - - - - - - - - - - - - - - - - - - - - -
        QuestionAnswer
        What is Garten of Banban 3?Garten of Banban 3 is a horror game that follows the story of a parent who is looking for their child in the Garden of Banban, a mysterious establishment that was left suspiciously empty.
        How to download Garten of Banban 3?You can download Garten of Banban 3 for free from APKCombo or Google Play.
        How to play Garten of Banban 3?You have to explore different areas of the garden, meet different characters, collect special eggs, avoid dangers, and escape from the garden.
        What are some features of Garten of Banban 3?Garten of Banban 3 has an engaging and immersive gameplay, a smooth and optimized performance, a creepy and thrilling atmosphere, a memorable and captivating storyline, and a mobile compatibility and accessibility.
        Is Garten of Banban 3 scary?Garten of Banban 3 is a horror game that features creepy characters, challenges, obstacles, puzzles, traps, enemies, secrets, jumpscares, and a dark and mysterious setting. It is not recommended for people who are easily scared or have heart problems.

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/fazzam/Grainsight2/tools.py b/spaces/fazzam/Grainsight2/tools.py deleted file mode 100644 index 36d0e71afede07c26ecb9ade3708892089789af5..0000000000000000000000000000000000000000 --- a/spaces/fazzam/Grainsight2/tools.py +++ /dev/null @@ -1,432 +0,0 @@ -import numpy as np -from PIL import Image -import matplotlib.pyplot as plt -import cv2 -import torch -import os -import sys -import clip - - -def convert_box_xywh_to_xyxy(box): - if len(box) == 4: - return [box[0], box[1], box[0] + box[2], box[1] + box[3]] - else: - result = [] - for b in box: - b = convert_box_xywh_to_xyxy(b) - result.append(b) - return result - - -def segment_image(image, bbox): - image_array = np.array(image) - segmented_image_array = np.zeros_like(image_array) - x1, y1, x2, y2 = bbox - segmented_image_array[y1:y2, x1:x2] = image_array[y1:y2, x1:x2] - segmented_image = Image.fromarray(segmented_image_array) - black_image = Image.new("RGB", image.size, (255, 255, 255)) - # transparency_mask = np.zeros_like((), dtype=np.uint8) - transparency_mask = np.zeros( - (image_array.shape[0], image_array.shape[1]), dtype=np.uint8 - ) - transparency_mask[y1:y2, x1:x2] = 255 - transparency_mask_image = Image.fromarray(transparency_mask, mode="L") - black_image.paste(segmented_image, mask=transparency_mask_image) - return black_image - - -def format_results(result, filter=0): - annotations = [] - n = len(result.masks.data) - for i in range(n): - annotation = {} - mask = result.masks.data[i] == 1.0 - - if torch.sum(mask) < filter: - continue - annotation["id"] = i - annotation["segmentation"] = mask.cpu().numpy() - annotation["bbox"] = result.boxes.data[i] - annotation["score"] = result.boxes.conf[i] - annotation["area"] = annotation["segmentation"].sum() - annotations.append(annotation) - return annotations - - -def filter_masks(annotations): # filter the overlap mask - annotations.sort(key=lambda x: x["area"], reverse=True) - to_remove = set() - for i in range(0, len(annotations)): - a = annotations[i] - for j in range(i + 1, len(annotations)): - b = annotations[j] - if i != j and j not in to_remove: - # check if - if b["area"] < a["area"]: - if (a["segmentation"] & b["segmentation"]).sum() / b[ - "segmentation" - ].sum() > 0.8: - to_remove.add(j) - - return [a for i, a in enumerate(annotations) if i not in to_remove], to_remove - - -def get_bbox_from_mask(mask): - mask = mask.astype(np.uint8) - contours, hierarchy = cv2.findContours( - mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE - ) - x1, y1, w, h = cv2.boundingRect(contours[0]) - x2, y2 = x1 + w, y1 + h - if len(contours) > 1: - for b in contours: - x_t, y_t, w_t, h_t = cv2.boundingRect(b) - # 将多个bbox合并成一个 - x1 = min(x1, x_t) - y1 = min(y1, y_t) - x2 = max(x2, x_t + w_t) - y2 = max(y2, y_t + h_t) - h = y2 - y1 - w = x2 - x1 - return [x1, y1, x2, y2] - - -def fast_process( - annotations, args, mask_random_color, bbox=None, points=None, edges=False, contour_thickness=2 -): - if isinstance(annotations[0], dict): - annotations = [annotation["segmentation"] for annotation in annotations] - - result_name = os.path.basename(args.img_path) - image = cv2.imread(args.img_path) - image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) - original_h = image.shape[0] - original_w = image.shape[1] - - plt.figure(figsize=(original_w/100, original_h/100)) - plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0) - plt.margins(0, 0) - plt.gca().xaxis.set_major_locator(plt.NullLocator()) - plt.gca().yaxis.set_major_locator(plt.NullLocator()) - plt.imshow(image) - if args.better_quality == True: - if isinstance(annotations[0], torch.Tensor): - annotations = np.array(annotations.cpu()) - for i, mask in enumerate(annotations): - mask = cv2.morphologyEx( - mask.astype(np.uint8), cv2.MORPH_CLOSE, np.ones((3, 3), np.uint8) - ) - annotations[i] = cv2.morphologyEx( - mask.astype(np.uint8), cv2.MORPH_OPEN, np.ones((8, 8), np.uint8) - ) - if args.device == "cpu": - annotations = np.array(annotations) - fast_show_mask( - annotations, - plt.gca(), - random_color=mask_random_color, - bbox=bbox, - points=points, - point_label=args.point_label, - retinamask=args.retina, - target_height=original_h, - target_width=original_w, - ) - else: - if isinstance(annotations[0], np.ndarray): - annotations = torch.from_numpy(annotations) - fast_show_mask_gpu( - annotations, - plt.gca(), - random_color=args.randomcolor, - bbox=bbox, - points=points, - point_label=args.point_label, - retinamask=args.retina, - target_height=original_h, - target_width=original_w, - ) - if isinstance(annotations, torch.Tensor): - annotations = annotations.cpu().numpy() - if args.withContours == True: - contour_all = [] - temp = np.zeros((original_h, original_w, 1)) - for i, mask in enumerate(annotations): - if type(mask) == dict: - mask = mask["segmentation"] - annotation = mask.astype(np.uint8) - contours, hierarchy = cv2.findContours( - annotation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE - ) - for contour in contours: - contour_all.append(contour) - cv2.drawContours(temp, contour_all, -1, (255, 255, 255), contour_thickness) # Using contour_thickness here - - save_path = args.output - if not os.path.exists(save_path): - os.makedirs(save_path) - plt.axis("off") - fig = plt.gcf() - plt.draw() - - try: - buf = fig.canvas.tostring_rgb() - except AttributeError: - fig.canvas.draw() - buf = fig.canvas.tostring_rgb() - - cols, rows = fig.canvas.get_width_height() - img_array = np.fromstring(buf, dtype=np.uint8).reshape(rows, cols, 3) - cv2.imwrite(os.path.join(save_path, result_name), cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR)) - - -# CPU post process -def fast_show_mask( - annotation, - ax, - random_color=False, - bbox=None, - points=None, - point_label=None, - retinamask=True, - target_height=960, - target_width=960, -): - msak_sum = annotation.shape[0] - height = annotation.shape[1] - weight = annotation.shape[2] - # 将annotation 按照面积 排序 - areas = np.sum(annotation, axis=(1, 2)) - sorted_indices = np.argsort(areas) - annotation = annotation[sorted_indices] - - index = (annotation != 0).argmax(axis=0) - if random_color == True: - color = np.random.random((msak_sum, 1, 1, 3)) - else: - color = np.ones((msak_sum, 1, 1, 3)) * np.array( - [30 / 255, 144 / 255, 255 / 255] - ) - transparency = np.ones((msak_sum, 1, 1, 1)) * 0.6 - visual = np.concatenate([color, transparency], axis=-1) - mask_image = np.expand_dims(annotation, -1) * visual - - show = np.zeros((height, weight, 4)) - h_indices, w_indices = np.meshgrid( - np.arange(height), np.arange(weight), indexing="ij" - ) - indices = (index[h_indices, w_indices], h_indices, w_indices, slice(None)) - # 使用向量化索引更新show的值 - show[h_indices, w_indices, :] = mask_image[indices] - if bbox is not None: - x1, y1, x2, y2 = bbox - ax.add_patch( - plt.Rectangle( - (x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor="b", linewidth=1 - ) - ) - # draw point - if points is not None: - plt.scatter( - [point[0] for i, point in enumerate(points) if point_label[i] == 1], - [point[1] for i, point in enumerate(points) if point_label[i] == 1], - s=20, - c="y", - ) - plt.scatter( - [point[0] for i, point in enumerate(points) if point_label[i] == 0], - [point[1] for i, point in enumerate(points) if point_label[i] == 0], - s=20, - c="m", - ) - - if retinamask == False: - show = cv2.resize( - show, (target_width, target_height), interpolation=cv2.INTER_NEAREST - ) - ax.imshow(show) - - -def fast_show_mask_gpu( - annotation, - ax, - random_color=False, - bbox=None, - points=None, - point_label=None, - retinamask=True, - target_height=960, - target_width=960, -): - msak_sum = annotation.shape[0] - height = annotation.shape[1] - weight = annotation.shape[2] - areas = torch.sum(annotation, dim=(1, 2)) - sorted_indices = torch.argsort(areas, descending=False) - annotation = annotation[sorted_indices] - # 找每个位置第一个非零值下标 - index = (annotation != 0).to(torch.long).argmax(dim=0) - if random_color == True: - color = torch.rand((msak_sum, 1, 1, 3)).to(annotation.device) - else: - color = torch.ones((msak_sum, 1, 1, 3)).to(annotation.device) * torch.tensor( - [30 / 255, 144 / 255, 255 / 255] - ).to(annotation.device) - transparency = torch.ones((msak_sum, 1, 1, 1)).to(annotation.device) * 0.6 - visual = torch.cat([color, transparency], dim=-1) - mask_image = torch.unsqueeze(annotation, -1) * visual - # 按index取数,index指每个位置选哪个batch的数,把mask_image转成一个batch的形式 - show = torch.zeros((height, weight, 4)).to(annotation.device) - h_indices, w_indices = torch.meshgrid( - torch.arange(height), torch.arange(weight), indexing="ij" - ) - indices = (index[h_indices, w_indices], h_indices, w_indices, slice(None)) - # 使用向量化索引更新show的值 - show[h_indices, w_indices, :] = mask_image[indices] - show_cpu = show.cpu().numpy() - if bbox is not None: - x1, y1, x2, y2 = bbox - ax.add_patch( - plt.Rectangle( - (x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor="b", linewidth=1 - ) - ) - # draw point - if points is not None: - plt.scatter( - [point[0] for i, point in enumerate(points) if point_label[i] == 1], - [point[1] for i, point in enumerate(points) if point_label[i] == 1], - s=20, - c="y", - ) - plt.scatter( - [point[0] for i, point in enumerate(points) if point_label[i] == 0], - [point[1] for i, point in enumerate(points) if point_label[i] == 0], - s=20, - c="m", - ) - if retinamask == False: - show_cpu = cv2.resize( - show_cpu, (target_width, target_height), interpolation=cv2.INTER_NEAREST - ) - ax.imshow(show_cpu) - - -# clip -@torch.no_grad() -def retriev( - model, preprocess, elements: [Image.Image], search_text: str, device -): - preprocessed_images = [preprocess(image).to(device) for image in elements] - tokenized_text = clip.tokenize([search_text]).to(device) - stacked_images = torch.stack(preprocessed_images) - image_features = model.encode_image(stacked_images) - text_features = model.encode_text(tokenized_text) - image_features /= image_features.norm(dim=-1, keepdim=True) - text_features /= text_features.norm(dim=-1, keepdim=True) - probs = 100.0 * image_features @ text_features.T - return probs[:, 0].softmax(dim=0) - - -def crop_image(annotations, image_like): - if isinstance(image_like, str): - image = Image.open(image_like) - else: - image = image_like - ori_w, ori_h = image.size - mask_h, mask_w = annotations[0]["segmentation"].shape - if ori_w != mask_w or ori_h != mask_h: - image = image.resize((mask_w, mask_h)) - cropped_boxes = [] - cropped_images = [] - not_crop = [] - origin_id = [] - for _, mask in enumerate(annotations): - if np.sum(mask["segmentation"]) <= 100: - continue - origin_id.append(_) - bbox = get_bbox_from_mask(mask["segmentation"]) # mask 的 bbox - cropped_boxes.append(segment_image(image, bbox)) # 保存裁剪的图片 - # cropped_boxes.append(segment_image(image,mask["segmentation"])) - cropped_images.append(bbox) # 保存裁剪的图片的bbox - return cropped_boxes, cropped_images, not_crop, origin_id, annotations - - -def box_prompt(masks, bbox, target_height, target_width): - h = masks.shape[1] - w = masks.shape[2] - if h != target_height or w != target_width: - bbox = [ - int(bbox[0] * w / target_width), - int(bbox[1] * h / target_height), - int(bbox[2] * w / target_width), - int(bbox[3] * h / target_height), - ] - bbox[0] = round(bbox[0]) if round(bbox[0]) > 0 else 0 - bbox[1] = round(bbox[1]) if round(bbox[1]) > 0 else 0 - bbox[2] = round(bbox[2]) if round(bbox[2]) < w else w - bbox[3] = round(bbox[3]) if round(bbox[3]) < h else h - - # IoUs = torch.zeros(len(masks), dtype=torch.float32) - bbox_area = (bbox[3] - bbox[1]) * (bbox[2] - bbox[0]) - - masks_area = torch.sum(masks[:, bbox[1] : bbox[3], bbox[0] : bbox[2]], dim=(1, 2)) - orig_masks_area = torch.sum(masks, dim=(1, 2)) - - union = bbox_area + orig_masks_area - masks_area - IoUs = masks_area / union - max_iou_index = torch.argmax(IoUs) - - return masks[max_iou_index].cpu().numpy(), max_iou_index - - -def point_prompt(masks, points, point_label, target_height, target_width): # numpy 处理 - h = masks[0]["segmentation"].shape[0] - w = masks[0]["segmentation"].shape[1] - if h != target_height or w != target_width: - points = [ - [int(point[0] * w / target_width), int(point[1] * h / target_height)] - for point in points - ] - onemask = np.zeros((h, w)) - masks = sorted(masks, key=lambda x: x['area'], reverse=True) - for i, annotation in enumerate(masks): - if type(annotation) == dict: - mask = annotation['segmentation'] - else: - mask = annotation - for i, point in enumerate(points): - if mask[point[1], point[0]] == 1 and point_label[i] == 1: - onemask[mask] = 1 - if mask[point[1], point[0]] == 1 and point_label[i] == 0: - onemask[mask] = 0 - onemask = onemask >= 1 - return onemask, 0 - - -def text_prompt(annotations, text, img_path, device, wider=False, threshold=0.9): - cropped_boxes, cropped_images, not_crop, origin_id, annotations_ = crop_image( - annotations, img_path - ) - clip_model, preprocess = clip.load("ViT-B/32", device=device) - scores = retriev( - clip_model, preprocess, cropped_boxes, text, device=device - ) - max_idx = scores.argsort() - max_idx = max_idx[-1] - max_idx = origin_id[int(max_idx)] - - # find the biggest mask which contains the mask with max score - if wider: - mask0 = annotations_[max_idx]["segmentation"] - area0 = np.sum(mask0) - areas = [(i, np.sum(mask["segmentation"])) for i, mask in enumerate(annotations_) if i in origin_id] - areas = sorted(areas, key=lambda area: area[1], reverse=True) - indices = [area[0] for area in areas] - for index in indices: - if index == max_idx or np.sum(annotations_[index]["segmentation"] & mask0) / area0 > threshold: - max_idx = index - break - - return annotations_[max_idx]["segmentation"], max_idx diff --git a/spaces/fb700/chatglm-fitness-RLHF/src/face3d/util/util.py b/spaces/fb700/chatglm-fitness-RLHF/src/face3d/util/util.py deleted file mode 100644 index 0d689ca138fc0fbf5bec794511ea0f9e638f9ea9..0000000000000000000000000000000000000000 --- a/spaces/fb700/chatglm-fitness-RLHF/src/face3d/util/util.py +++ /dev/null @@ -1,208 +0,0 @@ -"""This script contains basic utilities for Deep3DFaceRecon_pytorch -""" -from __future__ import print_function -import numpy as np -import torch -from PIL import Image -import os -import importlib -import argparse -from argparse import Namespace -import torchvision - - -def str2bool(v): - if isinstance(v, bool): - return v - if v.lower() in ('yes', 'true', 't', 'y', '1'): - return True - elif v.lower() in ('no', 'false', 'f', 'n', '0'): - return False - else: - raise argparse.ArgumentTypeError('Boolean value expected.') - - -def copyconf(default_opt, **kwargs): - conf = Namespace(**vars(default_opt)) - for key in kwargs: - setattr(conf, key, kwargs[key]) - return conf - -def genvalconf(train_opt, **kwargs): - conf = Namespace(**vars(train_opt)) - attr_dict = train_opt.__dict__ - for key, value in attr_dict.items(): - if 'val' in key and key.split('_')[0] in attr_dict: - setattr(conf, key.split('_')[0], value) - - for key in kwargs: - setattr(conf, key, kwargs[key]) - - return conf - -def find_class_in_module(target_cls_name, module): - target_cls_name = target_cls_name.replace('_', '').lower() - clslib = importlib.import_module(module) - cls = None - for name, clsobj in clslib.__dict__.items(): - if name.lower() == target_cls_name: - cls = clsobj - - assert cls is not None, "In %s, there should be a class whose name matches %s in lowercase without underscore(_)" % (module, target_cls_name) - - return cls - - -def tensor2im(input_image, imtype=np.uint8): - """"Converts a Tensor array into a numpy image array. - - Parameters: - input_image (tensor) -- the input image tensor array, range(0, 1) - imtype (type) -- the desired type of the converted numpy array - """ - if not isinstance(input_image, np.ndarray): - if isinstance(input_image, torch.Tensor): # get the data from a variable - image_tensor = input_image.data - else: - return input_image - image_numpy = image_tensor.clamp(0.0, 1.0).cpu().float().numpy() # convert it into a numpy array - if image_numpy.shape[0] == 1: # grayscale to RGB - image_numpy = np.tile(image_numpy, (3, 1, 1)) - image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0 # post-processing: tranpose and scaling - else: # if it is a numpy array, do nothing - image_numpy = input_image - return image_numpy.astype(imtype) - - -def diagnose_network(net, name='network'): - """Calculate and print the mean of average absolute(gradients) - - Parameters: - net (torch network) -- Torch network - name (str) -- the name of the network - """ - mean = 0.0 - count = 0 - for param in net.parameters(): - if param.grad is not None: - mean += torch.mean(torch.abs(param.grad.data)) - count += 1 - if count > 0: - mean = mean / count - print(name) - print(mean) - - -def save_image(image_numpy, image_path, aspect_ratio=1.0): - """Save a numpy image to the disk - - Parameters: - image_numpy (numpy array) -- input numpy array - image_path (str) -- the path of the image - """ - - image_pil = Image.fromarray(image_numpy) - h, w, _ = image_numpy.shape - - if aspect_ratio is None: - pass - elif aspect_ratio > 1.0: - image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC) - elif aspect_ratio < 1.0: - image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC) - image_pil.save(image_path) - - -def print_numpy(x, val=True, shp=False): - """Print the mean, min, max, median, std, and size of a numpy array - - Parameters: - val (bool) -- if print the values of the numpy array - shp (bool) -- if print the shape of the numpy array - """ - x = x.astype(np.float64) - if shp: - print('shape,', x.shape) - if val: - x = x.flatten() - print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % ( - np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x))) - - -def mkdirs(paths): - """create empty directories if they don't exist - - Parameters: - paths (str list) -- a list of directory paths - """ - if isinstance(paths, list) and not isinstance(paths, str): - for path in paths: - mkdir(path) - else: - mkdir(paths) - - -def mkdir(path): - """create a single empty directory if it didn't exist - - Parameters: - path (str) -- a single directory path - """ - if not os.path.exists(path): - os.makedirs(path) - - -def correct_resize_label(t, size): - device = t.device - t = t.detach().cpu() - resized = [] - for i in range(t.size(0)): - one_t = t[i, :1] - one_np = np.transpose(one_t.numpy().astype(np.uint8), (1, 2, 0)) - one_np = one_np[:, :, 0] - one_image = Image.fromarray(one_np).resize(size, Image.NEAREST) - resized_t = torch.from_numpy(np.array(one_image)).long() - resized.append(resized_t) - return torch.stack(resized, dim=0).to(device) - - -def correct_resize(t, size, mode=Image.BICUBIC): - device = t.device - t = t.detach().cpu() - resized = [] - for i in range(t.size(0)): - one_t = t[i:i + 1] - one_image = Image.fromarray(tensor2im(one_t)).resize(size, Image.BICUBIC) - resized_t = torchvision.transforms.functional.to_tensor(one_image) * 2 - 1.0 - resized.append(resized_t) - return torch.stack(resized, dim=0).to(device) - -def draw_landmarks(img, landmark, color='r', step=2): - """ - Return: - img -- numpy.array, (B, H, W, 3) img with landmark, RGB order, range (0, 255) - - - Parameters: - img -- numpy.array, (B, H, W, 3), RGB order, range (0, 255) - landmark -- numpy.array, (B, 68, 2), y direction is opposite to v direction - color -- str, 'r' or 'b' (red or blue) - """ - if color =='r': - c = np.array([255., 0, 0]) - else: - c = np.array([0, 0, 255.]) - - _, H, W, _ = img.shape - img, landmark = img.copy(), landmark.copy() - landmark[..., 1] = H - 1 - landmark[..., 1] - landmark = np.round(landmark).astype(np.int32) - for i in range(landmark.shape[1]): - x, y = landmark[:, i, 0], landmark[:, i, 1] - for j in range(-step, step): - for k in range(-step, step): - u = np.clip(x + j, 0, W - 1) - v = np.clip(y + k, 0, H - 1) - for m in range(landmark.shape[0]): - img[m, v[m], u[m]] = c - return img diff --git a/spaces/fclong/summary/fengshen/examples/sequence_tagging/finetune_sequence_tagging.sh b/spaces/fclong/summary/fengshen/examples/sequence_tagging/finetune_sequence_tagging.sh deleted file mode 100644 index a477ed89852a4ec96139e85d7e44ed476aaeab76..0000000000000000000000000000000000000000 --- a/spaces/fclong/summary/fengshen/examples/sequence_tagging/finetune_sequence_tagging.sh +++ /dev/null @@ -1,83 +0,0 @@ -#!/bin/bash -#SBATCH --job-name=zen2_base_cmeee # create a short name for your job -#SBATCH --nodes=1 # node count -#SBATCH --ntasks-per-node=1 # total number of tasks across all nodes -#SBATCH --cpus-per-task=30 # cpu-cores per task (>1 if multi-threaded tasks) -#SBATCH --gres=gpu:1 # number of gpus per node -#SBATCH --mail-type=ALL # send email when job begins, ends or failed etc. -#SBATCH -o /cognitive_comp/lujunyu/experiments/ner_finetune/zen2_base_cmeee/%x-%j.log # output and error file name (%x=job name, %j=job id) -#SBATCH -p hgx - - -ROOT_DIR=../../workspace -export TORCH_EXTENSIONS_DIR=${ROOT_DIR}/torch_extendsions - -MODEL_NAME=ner_bert_base -TASK=cmeee - -MODEL_NAME=bert-base -MODEL_ROOT_DIR=$ROOT_DIR/${MODEL_NAME} -if [ ! -d ${MODEL_ROOT_DIR} ];then - mkdir ${MODEL_ROOT_DIR} -fi - -NNODES=1 -GPUS_PER_NODE=1 - -MICRO_BATCH_SIZE=16 - -ZERO_STAGE=1 -STRATEGY=deepspeed_stage_${ZERO_STAGE} - -DATA_ARGS="\ - --num_workers 8 \ - --dataloader_workers 8 \ - --train_batchsize $MICRO_BATCH_SIZE \ - --val_batchsize $MICRO_BATCH_SIZE \ - --test_batchsize $MICRO_BATCH_SIZE \ - " - -MODEL_ARGS="\ - --model_path $MODEL_ROOT_DIR/pretrain \ - --data_dir /cognitive_comp/lujunyu/data_zh/NER_Aligned/weibo \ - --model_type bert \ - --decode_type linear \ - --learning_rate 5e-5 \ - --weight_decay 0.05 \ - --warmup_ratio 0.1 \ - " - -MODEL_CHECKPOINT_ARGS="\ - --save_top_k -1 \ - --save_last \ - --every_n_train_steps 100 \ - --save_ckpt_path ${MODEL_ROOT_DIR} \ - " - -TRAINER_ARGS="\ - --max_epochs 30 \ - --gpus $GPUS_PER_NODE \ - --num_nodes $NNODES \ - --strategy deepspeed_stage_${ZERO_STAGE} \ - --check_val_every_n_epoch 1 \ - --default_root_dir ${MODEL_ROOT_DIR} \ - --replace_sampler_ddp False \ - " - - -export options=" \ - $DATA_ARGS \ - $MODEL_ARGS \ - $MODEL_CHECKPOINT_ARGS \ - $TRAINER_ARGS \ -" - -python3 finetune_sequence_tagging.py $options - - -# SINGULARITY_PATH=/cognitive_comp/ganruyi/pytorch21_06_py3_docker_image_v2.sif -# python3 $SCRIPT_PATH $options -# source activate base -# singularity exec --nv -B /cognitive_comp/:/cognitive_comp/ $SINGULARITY_PATH /home/ganruyi/anaconda3/bin/python $SCRIPT_PATH $options -# /home/ganruyi/anaconda3/bin/python $SCRIPT_PATH $options - diff --git a/spaces/fengmuxi/ChatGpt-Web/app/components/markdown.tsx b/spaces/fengmuxi/ChatGpt-Web/app/components/markdown.tsx deleted file mode 100644 index 3328d11d5f83d0f79689dcbc67d8756132763cb9..0000000000000000000000000000000000000000 --- a/spaces/fengmuxi/ChatGpt-Web/app/components/markdown.tsx +++ /dev/null @@ -1,178 +0,0 @@ -import ReactMarkdown from "react-markdown"; -import "katex/dist/katex.min.css"; -import RemarkMath from "remark-math"; -import RemarkBreaks from "remark-breaks"; -import RehypeKatex from "rehype-katex"; -import RemarkGfm from "remark-gfm"; -import RehypeHighlight from "rehype-highlight"; -import mermaid from "mermaid"; -import { useRef, useState, RefObject, useEffect } from "react"; -import { copyToClipboard } from "../utils"; - -import LoadingIcon from "../icons/three-dots.svg"; -import React from "react"; - -export function Mermaid(props: { code: string; onError: () => void }) { - const ref = useRef(null); - - useEffect(() => { - if (props.code && ref.current) { - mermaid - .run({ - nodes: [ref.current], - }) - .catch((e) => { - props.onError(); - console.error("[Mermaid] ", e.message); - }); - } - }, [props]); - - function viewSvgInNewWindow() { - const svg = ref.current?.querySelector("svg"); - if (!svg) return; - const text = new XMLSerializer().serializeToString(svg); - const blob = new Blob([text], { type: "image/svg+xml" }); - const url = URL.createObjectURL(blob); - const win = window.open(url); - if (win) { - win.onload = () => URL.revokeObjectURL(url); - } - } - - return ( -
        viewSvgInNewWindow()} - > - {props.code} -
        - ); -} - -export function PreCode(props: { children: any }) { - const ref = useRef(null); - const [mermaidCode, setMermaidCode] = useState(""); - - useEffect(() => { - if (!ref.current) return; - const mermaidDom = ref.current.querySelector("code.language-mermaid"); - if (mermaidDom) { - setMermaidCode((mermaidDom as HTMLElement).innerText); - } - }, [props.children]); - - if (mermaidCode) { - return setMermaidCode("")} />; - } - return ( -
        -       {
        -          if (ref.current) {
        -            const code = ref.current.innerText;
        -            copyToClipboard(code);
        -          }
        -        }}
        -      >
        -      {props.children}
        -    
        - ); -} - -function _MarkDownContent(props: { content: string }) { - return ( - { - const href = aProps.href || ""; - const isInternal = /^\/#/i.test(href); - const target = isInternal ? "_self" : aProps.target ?? "_blank"; - return ; - }, - }} - > - {props.content} - - ); -} - -export const MarkdownContent = React.memo(_MarkDownContent); - -export function Markdown( - props: { - content: string; - loading?: boolean; - fontSize?: number; - parentRef: RefObject; - defaultShow?: boolean; - } & React.DOMAttributes, -) { - const mdRef = useRef(null); - const renderedHeight = useRef(0); - const inView = useRef(!!props.defaultShow); - - const parent = props.parentRef.current; - const md = mdRef.current; - - const checkInView = () => { - if (parent && md) { - const parentBounds = parent.getBoundingClientRect(); - const twoScreenHeight = Math.max(500, parentBounds.height * 2); - const mdBounds = md.getBoundingClientRect(); - const parentTop = parentBounds.top - twoScreenHeight; - const parentBottom = parentBounds.bottom + twoScreenHeight; - const isOverlap = - Math.max(parentTop, mdBounds.top) <= - Math.min(parentBottom, mdBounds.bottom); - inView.current = isOverlap; - } - - if (inView.current && md) { - renderedHeight.current = Math.max( - renderedHeight.current, - md.getBoundingClientRect().height, - ); - } - }; - - setTimeout(() => checkInView(), 1); - - return ( -
        0 - ? renderedHeight.current - : "auto", - }} - ref={mdRef} - onContextMenu={props.onContextMenu} - onDoubleClickCapture={props.onDoubleClickCapture} - > - {inView.current && - (props.loading ? ( - - ) : ( - - ))} -
        - ); -} diff --git a/spaces/fffiloni/Image-Caption-2-Shap-E/model.py b/spaces/fffiloni/Image-Caption-2-Shap-E/model.py deleted file mode 100644 index e1a5da2b3e5bb2938b41fbfaa222155c985d8836..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/Image-Caption-2-Shap-E/model.py +++ /dev/null @@ -1,158 +0,0 @@ -import tempfile - -import numpy as np -import gradio as gr -import torch -import trimesh -from shap_e.diffusion.gaussian_diffusion import diffusion_from_config -from shap_e.diffusion.sample import sample_latents -from shap_e.models.download import load_config, load_model -from shap_e.models.nn.camera import (DifferentiableCameraBatch, - DifferentiableProjectiveCamera) -from shap_e.models.transmitter.base import Transmitter, VectorDecoder -from shap_e.rendering.torch_mesh import TorchMesh -from shap_e.util.collections import AttrDict -from shap_e.util.image_util import load_image - -caption = gr.load(name="spaces/fffiloni/CoCa-clone") - -def create_image_caption(image_init): - cap = caption(image_init, "Nucleus sampling", 1.2, 0.5, 5, 20, fn_index=0) - print("cap: " + cap) - return cap - - -# Copied from https://github.com/openai/shap-e/blob/d99cedaea18e0989e340163dbaeb4b109fa9e8ec/shap_e/util/notebooks.py#L15-L42 -def create_pan_cameras(size: int, - device: torch.device) -> DifferentiableCameraBatch: - origins = [] - xs = [] - ys = [] - zs = [] - for theta in np.linspace(0, 2 * np.pi, num=20): - z = np.array([np.sin(theta), np.cos(theta), -0.5]) - z /= np.sqrt(np.sum(z**2)) - origin = -z * 4 - x = np.array([np.cos(theta), -np.sin(theta), 0.0]) - y = np.cross(z, x) - origins.append(origin) - xs.append(x) - ys.append(y) - zs.append(z) - return DifferentiableCameraBatch( - shape=(1, len(xs)), - flat_camera=DifferentiableProjectiveCamera( - origin=torch.from_numpy(np.stack(origins, - axis=0)).float().to(device), - x=torch.from_numpy(np.stack(xs, axis=0)).float().to(device), - y=torch.from_numpy(np.stack(ys, axis=0)).float().to(device), - z=torch.from_numpy(np.stack(zs, axis=0)).float().to(device), - width=size, - height=size, - x_fov=0.7, - y_fov=0.7, - ), - ) - - -# Copied from https://github.com/openai/shap-e/blob/8625e7c15526d8510a2292f92165979268d0e945/shap_e/util/notebooks.py#LL64C1-L76C33 -@torch.no_grad() -def decode_latent_mesh( - xm: Transmitter | VectorDecoder, - latent: torch.Tensor, -) -> TorchMesh: - decoded = xm.renderer.render_views( - AttrDict(cameras=create_pan_cameras( - 2, latent.device)), # lowest resolution possible - params=(xm.encoder if isinstance(xm, Transmitter) else - xm).bottleneck_to_params(latent[None]), - options=AttrDict(rendering_mode='stf', render_with_direction=False), - ) - return decoded.raw_meshes[0] - - -class Model: - def __init__(self): - self.device = torch.device( - 'cuda' if torch.cuda.is_available() else 'cpu') - self.xm = load_model('transmitter', device=self.device) - self.diffusion = diffusion_from_config(load_config('diffusion')) - self.model_text = None - self.model_image = None - - def load_model(self, model_name: str) -> None: - assert model_name in ['text300M', 'image300M'] - if model_name == 'text300M' and self.model_text is None: - self.model_text = load_model(model_name, device=self.device) - elif model_name == 'image300M' and self.model_image is None: - self.model_image = load_model(model_name, device=self.device) - - def to_glb(self, latent: torch.Tensor) -> str: - ply_path = tempfile.NamedTemporaryFile(suffix='.ply', - delete=False, - mode='w+b') - decode_latent_mesh(self.xm, latent).tri_mesh().write_ply(ply_path) - - mesh = trimesh.load(ply_path.name) - rot = trimesh.transformations.rotation_matrix(-np.pi / 2, [1, 0, 0]) - mesh = mesh.apply_transform(rot) - rot = trimesh.transformations.rotation_matrix(np.pi, [0, 1, 0]) - mesh = mesh.apply_transform(rot) - - mesh_path = tempfile.NamedTemporaryFile(suffix='.glb', delete=False) - mesh.export(mesh_path.name, file_type='glb') - - return mesh_path.name - - def run_text(self, - image: str, - seed: int = 0, - guidance_scale: float = 15.0, - num_steps: int = 64) -> str: - self.load_model('text300M') - torch.manual_seed(seed) - - prompt = create_image_caption(image) - - latents = sample_latents( - batch_size=1, - model=self.model_text, - diffusion=self.diffusion, - guidance_scale=guidance_scale, - model_kwargs=dict(texts=[prompt]), - progress=True, - clip_denoised=True, - use_fp16=True, - use_karras=True, - karras_steps=num_steps, - sigma_min=1e-3, - sigma_max=160, - s_churn=0, - ) - return prompt, self.to_glb(latents[0]) - - def run_image(self, - image_path: str, - seed: int = 0, - guidance_scale: float = 3.0, - num_steps: int = 64) -> str: - self.load_model('image300M') - torch.manual_seed(seed) - - image = load_image(image_path) - latents = sample_latents( - batch_size=1, - model=self.model_image, - diffusion=self.diffusion, - guidance_scale=guidance_scale, - model_kwargs=dict(images=[image]), - progress=True, - clip_denoised=True, - use_fp16=True, - use_karras=True, - karras_steps=num_steps, - sigma_min=1e-3, - sigma_max=160, - s_churn=0, - ) - return self.to_glb(latents[0]) diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/content-type/HISTORY.md b/spaces/fffiloni/controlnet-animation-doodle/node_modules/content-type/HISTORY.md deleted file mode 100644 index 458367139eb9f0af3daa5449ff0a3d9e2e189582..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/content-type/HISTORY.md +++ /dev/null @@ -1,29 +0,0 @@ -1.0.5 / 2023-01-29 -================== - - * perf: skip value escaping when unnecessary - -1.0.4 / 2017-09-11 -================== - - * perf: skip parameter parsing when no parameters - -1.0.3 / 2017-09-10 -================== - - * perf: remove argument reassignment - -1.0.2 / 2016-05-09 -================== - - * perf: enable strict mode - -1.0.1 / 2015-02-13 -================== - - * Improve missing `Content-Type` header error message - -1.0.0 / 2015-02-01 -================== - - * Initial implementation, derived from `media-typer@0.3.0` diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/function-bind/README.md b/spaces/fffiloni/controlnet-animation-doodle/node_modules/function-bind/README.md deleted file mode 100644 index 81862a02cb940c85d931749fd507165b1bc54058..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/function-bind/README.md +++ /dev/null @@ -1,48 +0,0 @@ -# function-bind - - - - - -Implementation of function.prototype.bind - -## Example - -I mainly do this for unit tests I run on phantomjs. -PhantomJS does not have Function.prototype.bind :( - -```js -Function.prototype.bind = require("function-bind") -``` - -## Installation - -`npm install function-bind` - -## Contributors - - - Raynos - -## MIT Licenced - - [travis-svg]: https://travis-ci.org/Raynos/function-bind.svg - [travis-url]: https://travis-ci.org/Raynos/function-bind - [npm-badge-svg]: https://badge.fury.io/js/function-bind.svg - [npm-url]: https://npmjs.org/package/function-bind - [5]: https://coveralls.io/repos/Raynos/function-bind/badge.png - [6]: https://coveralls.io/r/Raynos/function-bind - [7]: https://gemnasium.com/Raynos/function-bind.png - [8]: https://gemnasium.com/Raynos/function-bind - [deps-svg]: https://david-dm.org/Raynos/function-bind.svg - [deps-url]: https://david-dm.org/Raynos/function-bind - [dev-deps-svg]: https://david-dm.org/Raynos/function-bind/dev-status.svg - [dev-deps-url]: https://david-dm.org/Raynos/function-bind#info=devDependencies - [11]: https://ci.testling.com/Raynos/function-bind.png - [12]: https://ci.testling.com/Raynos/function-bind diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/object-inspect/test/number.js b/spaces/fffiloni/controlnet-animation-doodle/node_modules/object-inspect/test/number.js deleted file mode 100644 index 8f287e8e2ab93785108de2e82e5b359ffb524f02..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/object-inspect/test/number.js +++ /dev/null @@ -1,58 +0,0 @@ -var test = require('tape'); -var v = require('es-value-fixtures'); -var forEach = require('for-each'); - -var inspect = require('../'); - -test('negative zero', function (t) { - t.equal(inspect(0), '0', 'inspect(0) === "0"'); - t.equal(inspect(Object(0)), 'Object(0)', 'inspect(Object(0)) === "Object(0)"'); - - t.equal(inspect(-0), '-0', 'inspect(-0) === "-0"'); - t.equal(inspect(Object(-0)), 'Object(-0)', 'inspect(Object(-0)) === "Object(-0)"'); - - t.end(); -}); - -test('numericSeparator', function (t) { - forEach(v.nonBooleans, function (nonBoolean) { - t['throws']( - function () { inspect(true, { numericSeparator: nonBoolean }); }, - TypeError, - inspect(nonBoolean) + ' is not a boolean' - ); - }); - - t.test('3 digit numbers', function (st) { - var failed = false; - for (var i = -999; i < 1000; i += 1) { - var actual = inspect(i); - var actualSepNo = inspect(i, { numericSeparator: false }); - var actualSepYes = inspect(i, { numericSeparator: true }); - var expected = String(i); - if (actual !== expected || actualSepNo !== expected || actualSepYes !== expected) { - failed = true; - t.equal(actual, expected); - t.equal(actualSepNo, expected); - t.equal(actualSepYes, expected); - } - } - - st.notOk(failed, 'all 3 digit numbers passed'); - - st.end(); - }); - - t.equal(inspect(1e3), '1000', '1000'); - t.equal(inspect(1e3, { numericSeparator: false }), '1000', '1000, numericSeparator false'); - t.equal(inspect(1e3, { numericSeparator: true }), '1_000', '1000, numericSeparator true'); - t.equal(inspect(-1e3), '-1000', '-1000'); - t.equal(inspect(-1e3, { numericSeparator: false }), '-1000', '-1000, numericSeparator false'); - t.equal(inspect(-1e3, { numericSeparator: true }), '-1_000', '-1000, numericSeparator true'); - - t.equal(inspect(1234.5678, { numericSeparator: true }), '1_234.567_8', 'fractional numbers get separators'); - t.equal(inspect(1234.56789, { numericSeparator: true }), '1_234.567_89', 'fractional numbers get separators'); - t.equal(inspect(1234.567891, { numericSeparator: true }), '1_234.567_891', 'fractional numbers get separators'); - - t.end(); -}); diff --git a/spaces/florim/MedGPT/autogpt/commands/analyze_code.py b/spaces/florim/MedGPT/autogpt/commands/analyze_code.py deleted file mode 100644 index e02ea4c5b4ba53530e559d1cab7a07b8e3c7c638..0000000000000000000000000000000000000000 --- a/spaces/florim/MedGPT/autogpt/commands/analyze_code.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Code evaluation module.""" -from __future__ import annotations - -from autogpt.llm_utils import call_ai_function - - -def analyze_code(code: str) -> list[str]: - """ - A function that takes in a string and returns a response from create chat - completion api call. - - Parameters: - code (str): Code to be evaluated. - Returns: - A result string from create chat completion. A list of suggestions to - improve the code. - """ - - function_string = "def analyze_code(code: str) -> List[str]:" - args = [code] - description_string = ( - "Analyzes the given code and returns a list of suggestions" " for improvements." - ) - - return call_ai_function(function_string, args, description_string) diff --git a/spaces/freddyaboulton/blocks-js-methods/README.md b/spaces/freddyaboulton/blocks-js-methods/README.md deleted file mode 100644 index 83b298f8e3a1442bc87662f9cf51ee9a1284712e..0000000000000000000000000000000000000000 --- a/spaces/freddyaboulton/blocks-js-methods/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Blocks Js Methods -emoji: 👁 -colorFrom: pink -colorTo: gray -sdk: gradio -sdk_version: 3.1.4 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/garima-mahato/ShakespearesWeirdTales/model.py b/spaces/garima-mahato/ShakespearesWeirdTales/model.py deleted file mode 100644 index 31286fb6862b74d0e09e516ff9b4aad9a532f8fe..0000000000000000000000000000000000000000 --- a/spaces/garima-mahato/ShakespearesWeirdTales/model.py +++ /dev/null @@ -1,166 +0,0 @@ -import torch -import torch.nn as nn -from torch.nn import functional as F - -# hyperparameters -batch_size = 64 # how many independent sequences will we process in parallel? -block_size = 256 # what is the maximum context length for predictions? -max_iters = 5000 -eval_interval = 500 -learning_rate = 3e-4 -device = 'cuda' if torch.cuda.is_available() else 'cpu' -eval_iters = 200 -n_embd = 384 -n_head = 6 -n_layer = 6 -dropout = 0.2 -# ------------ - -torch.manual_seed(1337) - -with open('input.txt', 'r', encoding='utf-8') as f: - text = f.read() - -# here are all the unique characters that occur in this text -chars = sorted(list(set(text))) -vocab_size = len(chars) -# create a mapping from characters to integers -stoi = { ch:i for i,ch in enumerate(chars) } -itos = { i:ch for i,ch in enumerate(chars) } -encode = lambda s: [stoi[c] for c in s] # encoder: take a string, output a list of integers -decode = lambda l: ''.join([itos[i] for i in l]) # decoder: take a list of integers, output a string - -class Head(nn.Module): - """ one head of self-attention """ - - def __init__(self, head_size): - super().__init__() - self.key = nn.Linear(n_embd, head_size, bias=False) - self.query = nn.Linear(n_embd, head_size, bias=False) - self.value = nn.Linear(n_embd, head_size, bias=False) - self.register_buffer('tril', torch.tril(torch.ones(block_size, block_size))) - - self.dropout = nn.Dropout(dropout) - - def forward(self, x): - # input of size (batch, time-step, channels) - # output of size (batch, time-step, head size) - B,T,C = x.shape - k = self.key(x) # (B,T,hs) - q = self.query(x) # (B,T,hs) - # compute attention scores ("affinities") - wei = q @ k.transpose(-2,-1) * k.shape[-1]**-0.5 # (B, T, hs) @ (B, hs, T) -> (B, T, T) - wei = wei.masked_fill(self.tril[:T, :T] == 0, float('-inf')) # (B, T, T) - wei = F.softmax(wei, dim=-1) # (B, T, T) - wei = self.dropout(wei) - # perform the weighted aggregation of the values - v = self.value(x) # (B,T,hs) - out = wei @ v # (B, T, T) @ (B, T, hs) -> (B, T, hs) - return out - -class MultiHeadAttention(nn.Module): - """ multiple heads of self-attention in parallel """ - - def __init__(self, num_heads, head_size): - super().__init__() - self.heads = nn.ModuleList([Head(head_size) for _ in range(num_heads)]) - self.proj = nn.Linear(head_size * num_heads, n_embd) - self.dropout = nn.Dropout(dropout) - - def forward(self, x): - out = torch.cat([h(x) for h in self.heads], dim=-1) - out = self.dropout(self.proj(out)) - return out - -class FeedFoward(nn.Module): - """ a simple linear layer followed by a non-linearity """ - - def __init__(self, n_embd): - super().__init__() - self.net = nn.Sequential( - nn.Linear(n_embd, 4 * n_embd), - nn.ReLU(), - nn.Linear(4 * n_embd, n_embd), - nn.Dropout(dropout), - ) - - def forward(self, x): - return self.net(x) - -class Block(nn.Module): - """ Transformer block: communication followed by computation """ - - def __init__(self, n_embd, n_head): - # n_embd: embedding dimension, n_head: the number of heads we'd like - super().__init__() - head_size = n_embd // n_head - self.sa = MultiHeadAttention(n_head, head_size) - self.ffwd = FeedFoward(n_embd) - self.ln1 = nn.LayerNorm(n_embd) - self.ln2 = nn.LayerNorm(n_embd) - - def forward(self, x): - x = x + self.sa(self.ln1(x)) - x = x + self.ffwd(self.ln2(x)) - return x - -class GPTLanguageModel(nn.Module): - - def __init__(self): - super().__init__() - # each token directly reads off the logits for the next token from a lookup table - self.token_embedding_table = nn.Embedding(vocab_size, n_embd) - self.position_embedding_table = nn.Embedding(block_size, n_embd) - self.blocks = nn.Sequential(*[Block(n_embd, n_head=n_head) for _ in range(n_layer)]) - self.ln_f = nn.LayerNorm(n_embd) # final layer norm - self.lm_head = nn.Linear(n_embd, vocab_size) - - # better init, not covered in the original GPT video, but important, will cover in followup video - self.apply(self._init_weights) - - def _init_weights(self, module): - if isinstance(module, nn.Linear): - torch.nn.init.normal_(module.weight, mean=0.0, std=0.02) - if module.bias is not None: - torch.nn.init.zeros_(module.bias) - elif isinstance(module, nn.Embedding): - torch.nn.init.normal_(module.weight, mean=0.0, std=0.02) - - def forward(self, idx, targets=None): - B, T = idx.shape - - # idx and targets are both (B,T) tensor of integers - tok_emb = self.token_embedding_table(idx) # (B,T,C) - pos_emb = self.position_embedding_table(torch.arange(T, device=device)) # (T,C) - x = tok_emb + pos_emb # (B,T,C) - x = self.blocks(x) # (B,T,C) - x = self.ln_f(x) # (B,T,C) - logits = self.lm_head(x) # (B,T,vocab_size) - - if targets is None: - loss = None - else: - B, T, C = logits.shape - logits = logits.view(B*T, C) - targets = targets.view(B*T) - loss = F.cross_entropy(logits, targets) - - return logits, loss - - def generate(self, idx, max_new_tokens): - # idx is (B, T) array of indices in the current context - for _ in range(max_new_tokens): - # crop idx to the last block_size tokens - idx_cond = idx[:, -block_size:] - # get the predictions - logits, loss = self(idx_cond) - # focus only on the last time step - logits = logits[:, -1, :] # becomes (B, C) - # apply softmax to get probabilities - probs = F.softmax(logits, dim=-1) # (B, C) - # sample from the distribution - idx_next = torch.multinomial(probs, num_samples=1) # (B, 1) - # append sampled index to the running sequence - idx = torch.cat((idx, idx_next), dim=1) # (B, T+1) - return idx - diff --git a/spaces/gdn/Question-Answer-Demo/README.md b/spaces/gdn/Question-Answer-Demo/README.md deleted file mode 100644 index 4984e34d330688a9804241ecd96e45000dac05e6..0000000000000000000000000000000000000000 --- a/spaces/gdn/Question-Answer-Demo/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Question Answer Demo -emoji: 👁 -colorFrom: pink -colorTo: indigo -sdk: gradio -sdk_version: 2.9.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/core/seg/builder.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/core/seg/builder.py deleted file mode 100644 index db61f03d4abb2072f2532ce4429c0842495e015b..0000000000000000000000000000000000000000 --- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/mmseg/core/seg/builder.py +++ /dev/null @@ -1,8 +0,0 @@ -from annotator.uniformer.mmcv.utils import Registry, build_from_cfg - -PIXEL_SAMPLERS = Registry('pixel sampler') - - -def build_pixel_sampler(cfg, **default_args): - """Build pixel sampler for segmentation map.""" - return build_from_cfg(cfg, PIXEL_SAMPLERS, default_args) diff --git a/spaces/golda/gagal-jantung-2023/main.py b/spaces/golda/gagal-jantung-2023/main.py deleted file mode 100644 index 19c9783422b4b5034056493066105c218797b15e..0000000000000000000000000000000000000000 --- a/spaces/golda/gagal-jantung-2023/main.py +++ /dev/null @@ -1,11 +0,0 @@ -import streamlit as st -import eda -import prediction - -navigation = st.sidebar.selectbox('Pilih Halaman: ', ('EDA', 'predict')) - -if navigation == 'EDA': - eda.run() -else: - prediction.run() - diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Design on an iPad with FULL FormIt 2006 [32-64Bit] and Sync Your Projects with the Cloud.md b/spaces/gotiQspiryo/whisper-ui/examples/Design on an iPad with FULL FormIt 2006 [32-64Bit] and Sync Your Projects with the Cloud.md deleted file mode 100644 index 3e30e1aada9f937545f6eb8cbb4e8e34c9744692..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Design on an iPad with FULL FormIt 2006 [32-64Bit] and Sync Your Projects with the Cloud.md +++ /dev/null @@ -1,7 +0,0 @@ - -

        The recent discussions on the proposed version 3 of the GNU General PublicLicense have been well documented here and elsewhere. This proposal hasclearly exposed some differences of opinion within the developmentcommunity, with the anti-DRM provisions being at the core of the debate.The addition of these provisions has created a fair amount of ill willagainst the Free Software Foundation; opposition to them appears to havecreated similar feelings in the opposite direction.In theory, this disagreement should not come about. GPLv2 contains thefollowing language:9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. If the FSF is adhering to its part of this bargain, then anybody who boughtinto the "spirit" of GPLv2 should not have trouble with this revision. So,clearly, those who oppose the GPLv3 draft - many of whom have released vastamounts of code under GPLv2 - believe that the revisions are not "similar inspirit." Some have gone as far as to accuse the FSF of using its powerover the GPL to push its founder's radical agenda onto the code of largenumbers of unwilling developers.That accusation is probably over the top. The FSF is, with GPLv3,attempting to respond to a number of problems as it sees them. Softwarepatents are a clear problem, and the GPLv3 draft tries to mitigate thatproblem somewhat. International applicability of the license has not yetproved to be a problem in practice, but it is clearly something thatreasonable lawyers can worry about. It seems worth fixing the languagebefore some court somewhere on the planet decides that the GPLv2incantations only work in the US. And so on.The FSF also, clearly, sees locked-down systems as a problem. It isinteresting that this has not always been the case; back in 2000, LWN took issue with an interview withRichard Stallman, where he said:I'm less concerned with what happens with embedded systems than Iam with real computers. The real reason for this is the moralissues about software freedom are much more significant forcomputers that users see as a computer. And so I'm not reallyconcerned with what's running inside my microwave oven.(This interview has disappeared off the original site, but theWayback Machine has it).Most TiVo owners probably see their gadget as being more like a microwaveoven than a computer. It is not that TiVo has come along since then (the2000 LWN article mentions it); what has changed is the FSF's - or, at least,Richard Stallman's - position on it.There are few people who disagree with the idea that locked-down systemscan be a problem. Beyond the fact that such devices will always deny usersthe full potential of the hardware, they can spy on us, deny fair userights under copyright law, lock us out of our own data, prevent us fromfixing serious problems, and so on. Locked-down systems are designed toimplement the goals of somebody other than the ultimate owner of thedevice. Such systems are undesirable at best, and outright evil at theirworst.The disagreement is over how this problem should be addressed. The twosides, insofar as there are two clear sides, would appear to be these:

        • The anti-DRM provisions are a licensing-based response to a legal and market problem. They prohibit legitimate uses of the technology (examples could be ensuring that certified software runs on voting machines or systems - like X-ray machines - which could hurt people if the wrong software is run) while failing to solve the real problem. These provisions are trivially circumvented by putting the software in ROM, do nothing about the DRM being incorporated into all aspects of computing systems, and would primarily result in Linux being replaced with proprietary software in the embedded market. These provisions are a new restriction on how the software can be used, and, thus, are not "similar in spirit" to GPLv2.
        • The new provisions are needed to preserve the user's freedom to modify, rebuild, and replace the original software on devices that this user owns. Failure to provide encryption keys when the hardware requires them is a fundamental failure to live up to the moral requirements of using free software and, according to some, is already a violation of GPLv2. DRM is an evil which threatens to take away many of the freedoms we have worked so hard to assure for ourselves; it must be fought whenever possible and it certainly should not be supported by free software. The anti-DRM provisions simply reaffirm the freedoms we had thought the GPL already guaranteed to us, and, thus, they are very much "similar in spirit" to GPLv2.
        This logjam looks hard to break. Your editor, in his infinite humility,would like to offer a couple of suggestions, however:
        • Reasonable people who believe in free software, and who have put much of their lives into the creation of that software, can support either of the two viewpoints above (or other viewpoints entirely). They are not (necessarily) free software fundamentalist radicals, corporate stooges, people on power trips, or any of those other mean and nasty things they have been called in recent times. We can discuss this issue without doubting each others' motives and without the need for personal attacks.
        • The FSF clearly has some strong feelings about what it wants to achieve with this license revision, and there are issues it does not want to back down on. There have also been signs, however, that the FSF is listening more than it has in the creation of any other license. This process is not done yet, there is no GPLv3 at this time. Continued, polite participation in the process would seem to be called for.
        Finally, while your editor is standing on this nice soapbox... Theanti-DRM language was very appealing when it first came out. Your editordoes not much appreciate the idea of some vendor locking up his softwareand selling it back to him in a non-modifiable and potentially hostileform. It is a violation of the social contract (if not the legal license)under which the software was contributed. But the attempt to address thisproblem in GPLv3 carries a high risk of splitting the development communitywhile doing very little to solve the real problem. Dropping that languagecould help to bring the community back together behind the new license,leaving us united to fight DRM (and numerous other attacks on our freedom)in more effective ways. The FSF may want to consider whether, in the longrun, its goals would be better served by a license which lacks thislanguage. Such a license might be closer to the spirit which brought thiscommunity together in the first place. (Log in to post comments) Similar in spirit? Posted Oct 5, 2006 0:55 UTC (Thu) by Sombrio (guest, #26942) [Link]

        -

        I have a right to purchase a copy of a work, hold on to it
        for 95 years (or whatever the current limit is), and then
        make free use of that work in any way I choose. DRM curtails
        that right."
        No, it doesn't. At that point you are free to circumvent the DRM, because the work is no longer under copyright. Copyright does not require them to provide you with a copy when the copyright expires, it just bars you from making your own copy in the meantime.
        "I have a right to copy limited portions of a work still in
        copyright for use in a review or research. DRM curtails that
        right."
        Yes, you have a right to copy limited portions of a work. However, the copyright owner is NOT required to provide you with those portions. If you make a copy (say by videotaping a TV playback) to use in a review or research, that is not infringing. But there is nothing in the law that requires that you be able to copy such excerpts from a piece of licensed media that you own.
        "I have a right to privacy. Spyware embedded in a device sold
        for an entirely different purpose, which does not permit me
        to remove the spyware without damaging the device, curtails
        that right."
        Now that's a good argument. That's one to take to your representative and ask for legislation that specifically protects consumers against such reporting. It has, however, nothing specifically to do with DRM.
        I think DRM makes content less desirable. People should object to it and push back on the content owners to not use it, just as consumers once successfully marginalized copy protection on software. I wouldn't mind seeing a mandatory licensing law that barred DRM and required payment of a small royalty on blank media, as consumers and device manufacturers also once successfully demanded. I would also love to see the DMCA repealed.
        I just don't think the anti-DRM language in GPLv3 draft 2 will accomplish anything other than causing some amount of fracturing within the community.
        Similar in spirit? Posted Oct 5, 2006 16:40 UTC (Thu) by felixfix (subscriber, #242) [Link]

        -

        FULL FormIt 2006 [32-64Bit]


        Download ››››› https://urlgoal.com/2uyMfE



        -

        That worries me too, but I don't stay up thinking about it at night
        because I think things just look that way.
        The kernel community is one of the most visible and vocal parties when it
        actually has something to say in the public. And while the majority of
        the stakeholders in GPLv3 have been relatively quiet, preferring to work
        within the Free Software Foundation's open license drafting process, the
        kernel developers have limited their response to complaint, telling the
        press GPLv3 is wrong, telling the press FSF is wrong, drafting a document
        (thankfully some of them at least went this far, but they really ought to
        participate officially rather than lob press releases), and further
        complaint.
        It's no surprise to me that it appears as if we're split right down the
        middle. Anyone unhappy about the license is going to scream, but anyone
        pleased with it is going to sit back with a smile (think, Tux just got
        laid!)
        There seem to be a lot of people that are really happy with the way
        things are going. We still haven't reached agreement, but that's why
        further draft(s) are coming. Sun and Nokia, for example, are encouraged
        and predict even further improvement.
        The _real_ danger isn't from the GPLv3 license - it's from the GPLv3
        license FUD. If you want to make sure we don't get split, focus on
        de-fusing emotional tension wherever you encounter it. Discuss the
        license but encourage real participation. And make sure that people
        realize that preemptive reactions to an unreleased license are absurd,
        especially if that stakeholder refuses to officially participate.
        Cheers!
        Similar in spirit? Posted Oct 6, 2006 14:37 UTC (Fri) by mingo (subscriber, #31122) [Link]

        aaccfb2cb3
        -
        -
        \ No newline at end of file diff --git a/spaces/gradio/HuBERT/examples/wav2vec/unsupervised/scripts/g2p_wrd_to_phn.py b/spaces/gradio/HuBERT/examples/wav2vec/unsupervised/scripts/g2p_wrd_to_phn.py deleted file mode 100644 index 2e31c307bd67d10941150160c7fb8c9e085ac5d9..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/examples/wav2vec/unsupervised/scripts/g2p_wrd_to_phn.py +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env python3 -u -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import sys - -from g2p_en import G2p - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument( - "--compact", - action="store_true", - help="if set, compacts phones", - ) - args = parser.parse_args() - - compact = args.compact - - wrd_to_phn = {} - g2p = G2p() - for line in sys.stdin: - words = line.strip().split() - phones = [] - for w in words: - if w not in wrd_to_phn: - wrd_to_phn[w] = g2p(w) - if compact: - wrd_to_phn[w] = [ - p[:-1] if p[-1].isnumeric() else p for p in wrd_to_phn[w] - ] - phones.extend(wrd_to_phn[w]) - try: - print(" ".join(phones)) - except: - print(wrd_to_phn, words, phones, file=sys.stderr) - raise - - -if __name__ == "__main__": - main() diff --git a/spaces/gradio/HuBERT/examples/wmt20/README.md b/spaces/gradio/HuBERT/examples/wmt20/README.md deleted file mode 100644 index b4f2874652f8be19998a65faa1d9276d8017ec59..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/examples/wmt20/README.md +++ /dev/null @@ -1,72 +0,0 @@ -# WMT 20 - -This page provides pointers to the models of Facebook-FAIR's WMT'20 news translation task submission [(Chen et al., 2020)](https://arxiv.org/abs/2011.08298). - -## Single best MT models (after finetuning on part of WMT20 news dev set) - -Model | Description | Download ----|---|--- -`transformer.wmt20.ta-en` | Ta->En | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt20.ta-en.single.tar.gz) -`transformer.wmt20.en-ta` | En->Ta | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-ta.single.tar.gz) -`transformer.wmt20.iu-en.news` | Iu->En (News domain) | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu-en.news.single.tar.gz) -`transformer.wmt20.en-iu.news` | En->Iu (News domain) | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-iu.news.single.tar.gz) -`transformer.wmt20.iu-en.nh` | Iu->En (Nunavut Hansard domain) | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu-en.nh.single.tar.gz) -`transformer.wmt20.en-iu.nh` | En->Iu (Nunavut Hansard domain) | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en-iu.nh.single.tar.gz) - -## Language models -Model | Description | Download ----|---|--- -`transformer_lm.wmt20.en` | En Language Model | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt20.en.tar.gz) -`transformer_lm.wmt20.ta` | Ta Language Model | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt20.ta.tar.gz) -`transformer_lm.wmt20.iu.news` | Iu Language Model (News domain) | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu.news.tar.gz) -`transformer_lm.wmt20.iu.nh` | Iu Language Model (Nunavut Hansard domain) | [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/wmt20.iu.nh.tar.gz) - -## Example usage (torch.hub) - -#### Translation - -```python -import torch - -# English to Tamil translation -en2ta = torch.hub.load('pytorch/fairseq', 'transformer.wmt20.en-ta') -en2ta.translate("Machine learning is great!") # 'இயந்திரக் கற்றல் அருமை!' - -# Tamil to English translation -ta2en = torch.hub.load('pytorch/fairseq', 'transformer.wmt20.ta-en') -ta2en.translate("இயந்திரக் கற்றல் அருமை!") # 'Machine learning is great!' - -# English to Inuktitut translation -en2iu = torch.hub.load('pytorch/fairseq', 'transformer.wmt20.en-iu.news') -en2iu.translate("machine learning is great!") # 'ᖃᒧᑕᐅᔭᓄᑦ ᐃᓕᓐᓂᐊᕐᓂᖅ ᐱᐅᔪᒻᒪᕆᒃ!' - -# Inuktitut to English translation -iu2en = torch.hub.load('pytorch/fairseq', 'transformer.wmt20.iu-en.news') -iu2en.translate("ᖃᒧᑕᐅᔭᓄᑦ ᐃᓕᓐᓂᐊᕐᓂᖅ ᐱᐅᔪᒻᒪᕆᒃ!") # 'Machine learning excellence!' -``` - -#### Language Modeling - -```python -# Sample from the English LM -en_lm = torch.hub.load('pytorch/fairseq', 'transformer_lm.wmt20.en') -en_lm.sample("Machine learning is") # 'Machine learning is a type of artificial intelligence that uses machine learning to learn from data and make predictions.' - -# Sample from the Tamil LM -ta_lm = torch.hub.load('pytorch/fairseq', 'transformer_lm.wmt20.ta') -ta_lm.sample("இயந்திரக் கற்றல் என்பது செயற்கை நுண்ணறிவின்") # 'இயந்திரக் கற்றல் என்பது செயற்கை நுண்ணறிவின் ஒரு பகுதியாகும்.' - -# Sample from the Inuktitut LM -iu_lm = torch.hub.load('pytorch/fairseq', 'transformer_lm.wmt20.iu.news') -iu_lm.sample("ᖃᒧᑕᐅᔭᓄᑦ ᐃᓕᓐᓂᐊᕐᓂᖅ") # 'ᖃᒧᑕᐅᔭᓄᑦ ᐃᓕᓐᓂᐊᕐᓂᖅ, ᐊᒻᒪᓗ ᓯᓚᐅᑉ ᐊᓯᙳᖅᐸᓪᓕᐊᓂᖓᓄᑦ ᖃᓄᐃᓕᐅᕈᑎᒃᓴᑦ, ᐃᓚᖃᖅᖢᑎᒃ ᐅᑯᓂᖓ:' -``` - -## Citation -```bibtex -@inproceedings{chen2020facebook - title={Facebook AI's WMT20 News Translation Task Submission}, - author={Peng-Jen Chen and Ann Lee and Changhan Wang and Naman Goyal and Angela Fan and Mary Williamson and Jiatao Gu}, - booktitle={Proc. of WMT}, - year={2020}, -} -``` diff --git a/spaces/gradio/HuBERT/fairseq/data/multilingual/sampled_multi_dataset.py b/spaces/gradio/HuBERT/fairseq/data/multilingual/sampled_multi_dataset.py deleted file mode 100644 index b0a617424ee3c5923b37796773da4c97851a16c5..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/fairseq/data/multilingual/sampled_multi_dataset.py +++ /dev/null @@ -1,467 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import datetime -import hashlib -import logging -import time -from bisect import bisect_right -from collections import OrderedDict, defaultdict -from enum import Enum -from typing import List - -import numpy as np -import torch -from fairseq.data import FairseqDataset, data_utils -from fairseq.distributed import utils as distributed_utils - - -def get_time_gap(s, e): - return ( - datetime.datetime.fromtimestamp(e) - datetime.datetime.fromtimestamp(s) - ).__str__() - - -logger = logging.getLogger(__name__) - - -def default_virtual_size_func(datasets, ratios, max_scale_up=1.5): - sizes = [len(d) for d in datasets] - if ratios is None: - return sum(sizes) - largest_idx = np.argmax(sizes) - largest_r = ratios[largest_idx] - largest_s = sizes[largest_idx] - # set virtual sizes relative to the largest dataset - virtual_sizes = [(r / largest_r) * largest_s for r in ratios] - vsize = sum(virtual_sizes) - max_size = sum(sizes) * max_scale_up - return int(vsize if vsize < max_size else max_size) - - -class CollateFormat(Enum): - single = 1 - ordered_dict = 2 - - -class SampledMultiDataset(FairseqDataset): - """Samples from multiple sub-datasets according to given sampling ratios. - Args: - datasets ( - List[~torch.utils.data.Dataset] - or OrderedDict[str, ~torch.utils.data.Dataset] - ): datasets - sampling_ratios (List[float]): list of probability of each dataset to be sampled - (default: None, which corresponds to concatenating all dataset together). - seed (int): RNG seed to use (default: 2). - epoch (int): starting epoch number (default: 1). - eval_key (str, optional): a key used at evaluation time that causes - this instance to pass-through batches from *datasets[eval_key]*. - collate_format (CollateFormat): collater output format, either CollateFormat.ordered_dict or - CollateFormat.single (default: CollateFormat.single) where CollateFormat.single configures - the collater to output batches of data mixed from all sub-datasets, - and CollateFormat.ordered_dict configures the collater to output a dictionary of batches indexed by keys - of sub-datasets. - Note that not all sub-datasets will present in a single batch in both formats. - virtual_size (int, or callable): the expected virtual size of the dataset (default: default_virtual_size_func). - split (str): the split of the data, e.g. 'train', 'valid' or 'test'. - shared_collater (bool): whether or not to all sub-datasets have the same collater. - shuffle (bool): whether or not to shuffle data (default: True). - """ - - def __init__( - self, - datasets, - sampling_ratios=None, - seed=2, - epoch=1, - eval_key=None, - collate_format=CollateFormat.single, - virtual_size=default_virtual_size_func, - split="", - shared_collater=False, - shuffle=True, - ): - super().__init__() - self.shared_collater = shared_collater - self.shuffle = shuffle - - if isinstance(datasets, OrderedDict): - self.keys = list(datasets.keys()) - datasets = list(datasets.values()) - elif isinstance(datasets, List): - self.keys = list(range(len(datasets))) - else: - raise AssertionError() - self.datasets = datasets - self.split = split - - self.eval_key = eval_key - if self.eval_key is not None: - self.collate_format = CollateFormat.single - else: - self.collate_format = collate_format - - self.seed = seed - self._cur_epoch = None - - self.cumulated_sizes = None - # self.datasets[k][self._cur_indices[i]] is the data item i in this sampled dataset - # namely, data item i is sampled from the kth sub-dataset self.datasets[k] - # where self.cumulated_sizes[k-1] <= i < self.cumulated_sizes[k] - self._cur_indices = None - - self._sizes = None - self.virtual_size_per_dataset = None - # caching properties - self._reset_cached_properties() - self.setup_sampling(sampling_ratios, virtual_size) - self.set_epoch(epoch) - - def _clean_if_not_none(self, var_list): - for v in var_list: - if v is not None: - del v - - def _reset_cached_properties(self): - self._clean_if_not_none([self._sizes, self._cur_indices]) - self._sizes = None - self._cur_indices = None - - def setup_sampling(self, sample_ratios, virtual_size): - sizes = [len(d) for d in self.datasets] - if sample_ratios is None: - # default back to concating datasets - self.sample_ratios = None - self.virtual_size = sum(sizes) - else: - if not isinstance(sample_ratios, np.ndarray): - sample_ratios = np.array(sample_ratios) - self.sample_ratios = sample_ratios - virtual_size = ( - default_virtual_size_func if virtual_size is None else virtual_size - ) - self.virtual_size = ( - virtual_size(self.datasets, self.sample_ratios) - if callable(virtual_size) - else virtual_size - ) - - def adjust_sampling(self, epoch, sampling_ratios, virtual_size): - if sampling_ratios is not None: - sampling_ratios = self._sync_sample_ratios(sampling_ratios) - self.setup_sampling(sampling_ratios, virtual_size) - - def _sync_sample_ratios(self, ratios): - # in case the ratios are not precisely the same across processes - # also to ensure every procresses update the ratios in the same pace - ratios = torch.DoubleTensor(ratios) - if torch.distributed.is_initialized(): - if torch.cuda.is_available(): - distributed_utils.all_reduce( - ratios.cuda(), group=distributed_utils.get_data_parallel_group() - ) - else: - distributed_utils.all_reduce( - ratios, group=distributed_utils.get_data_parallel_group() - ) - ret = ratios.cpu() - ret = ret.numpy() - return ret - - def random_choice_in_dataset(self, rng, dataset, choice_size): - if hasattr(dataset, "random_choice_in_dataset"): - return dataset.random_choice_in_dataset(rng, choice_size) - dataset_size = len(dataset) - return rng.choice( - dataset_size, choice_size, replace=(choice_size > dataset_size) - ) - - def get_virtual_indices(self, rng, datasets, sample_ratios, virtual_size): - def get_counts(sample_ratios): - counts = np.array([virtual_size * r for r in sample_ratios], dtype=np.int64) - diff = virtual_size - counts.sum() - assert diff >= 0 - # due to round-offs, the size might not match the desired sizes - if diff > 0: - dataset_indices = rng.choice( - len(sample_ratios), size=diff, p=sample_ratios - ) - for i in dataset_indices: - counts[i] += 1 - return counts - - def get_in_dataset_indices(datasets, sizes, sample_ratios): - counts = get_counts(sample_ratios) - # uniformally sample desired counts for each dataset - # if the desired counts are large, sample with replacement: - indices = [ - self.random_choice_in_dataset(rng, d, c) - for c, d in zip(counts, datasets) - ] - return indices - - sizes = [len(d) for d in datasets] - if sample_ratios is None: - # default back to concating datasets - in_dataset_indices = [list(range(s)) for s in sizes] - virtual_sizes_per_dataset = sizes - else: - ratios = sample_ratios / sample_ratios.sum() - in_dataset_indices = get_in_dataset_indices(datasets, sizes, ratios) - virtual_sizes_per_dataset = [len(d) for d in in_dataset_indices] - virtual_sizes_per_dataset = np.array(virtual_sizes_per_dataset, np.int64) - cumulative_sizes = np.cumsum(virtual_sizes_per_dataset) - assert sum(virtual_sizes_per_dataset) == virtual_size - assert cumulative_sizes[-1] == virtual_size - if virtual_size < sum(sizes): - logger.warning( - f"virtual data size ({virtual_size}) is less than real data size ({sum(sizes)})." - " If virtual size << real data size, there could be data coverage issue." - ) - in_dataset_indices = np.hstack(in_dataset_indices) - return in_dataset_indices, cumulative_sizes, virtual_sizes_per_dataset - - def _get_dataset_and_index(self, index): - i = bisect_right(self.cumulated_sizes, index) - return i, self._cur_indices[index] - - def __getitem__(self, index): - # self.__getitem__(index) returns self.datasets[k][self._cur_indices[index]] - # where k satisfies self.cumulated_sizes[k - 1] <= k < self.cumulated_sizes[k] - ds_idx, ds_sample_idx = self._get_dataset_and_index(index) - ret = (ds_idx, self.datasets[ds_idx][ds_sample_idx]) - return ret - - def num_tokens(self, index): - return self.sizes[index].max() - - def num_tokens_vec(self, indices): - sizes_vec = self.sizes[np.array(indices)] - # max across all dimensions but first one - return np.amax(sizes_vec, axis=tuple(range(1, len(sizes_vec.shape)))) - - def size(self, index): - return self.sizes[index] - - def __len__(self): - return self.virtual_size - - def collater(self, samples, **extra_args): - """Merge a list of samples to form a mini-batch.""" - if len(samples) == 0: - return None - if self.collate_format == "ordered_dict": - collect_samples = [[] for _ in range(len(self.datasets))] - for (i, sample) in samples: - collect_samples[i].append(sample) - batch = OrderedDict( - [ - (self.keys[i], dataset.collater(collect_samples[i])) - for i, (key, dataset) in enumerate(zip(self.keys, self.datasets)) - if len(collect_samples[i]) > 0 - ] - ) - elif self.shared_collater: - batch = self.datasets[0].collater([s for _, s in samples]) - else: - samples_dict = defaultdict(list) - pad_to_length = ( - defaultdict(int) - if "pad_to_length" not in extra_args - else extra_args["pad_to_length"] - ) - for ds_idx, s in samples: - pad_to_length["source"] = max( - pad_to_length["source"], s["source"].size(0) - ) - if s["target"] is not None: - pad_to_length["target"] = max( - pad_to_length["target"], s["target"].size(0) - ) - samples_dict[ds_idx].append(s) - batches = [ - self.datasets[i].collater(samples_dict[i], pad_to_length=pad_to_length) - for i in range(len(self.datasets)) - if len(samples_dict[i]) > 0 - ] - - def straight_data(tensors): - batch = torch.cat(tensors, dim=0) - return batch - - src_lengths = straight_data( - [b["net_input"]["src_lengths"] for b in batches] - ) - src_lengths, sort_order = src_lengths.sort(descending=True) - - def straight_order(tensors): - batch = straight_data(tensors) - return batch.index_select(0, sort_order) - - batch = { - "id": straight_order([b["id"] for b in batches]), - "nsentences": sum(b["nsentences"] for b in batches), - "ntokens": sum(b["ntokens"] for b in batches), - "net_input": { - "src_tokens": straight_order( - [b["net_input"]["src_tokens"] for b in batches] - ), - "src_lengths": src_lengths, - }, - "target": straight_order([b["target"] for b in batches]) - if batches[0]["target"] is not None - else None, - } - if "prev_output_tokens" in batches[0]["net_input"]: - batch["net_input"]["prev_output_tokens"] = straight_order( - [b["net_input"]["prev_output_tokens"] for b in batches] - ) - if "src_lang_id" in batches[0]["net_input"]: - batch["net_input"]["src_lang_id"] = straight_order( - [b["net_input"]["src_lang_id"] for b in batches] - ) - if "tgt_lang_id" in batches[0]: - batch["tgt_lang_id"] = straight_order( - [b["tgt_lang_id"] for b in batches] - ) - return batch - - @property - def sizes(self): - if self._sizes is not None: - return self._sizes - start_time = time.time() - in_sub_dataset_indices = [ - self._cur_indices[ - 0 if i == 0 else self.cumulated_sizes[i - 1] : self.cumulated_sizes[i] - ] - for i in range(len(self.datasets)) - ] - sub_dataset_sizes = [ - d.sizes[indices] - for d, indices in zip(self.datasets, in_sub_dataset_indices) - ] - self._sizes = np.vstack(sub_dataset_sizes) - logger.info(f"sizes() calling time: {get_time_gap(start_time, time.time())}") - return self._sizes - - def ordered_indices(self): - if self.shuffle: - indices = np.random.permutation(len(self)) - else: - indices = np.arange(len(self)) - - sizes = self.sizes - tgt_sizes = sizes[:, 1] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else None - src_sizes = ( - sizes[:, 0] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else sizes - ) - - # sort by target length, then source length - if tgt_sizes is not None: - indices = indices[np.argsort(tgt_sizes[indices], kind="mergesort")] - sort_indices = indices[np.argsort(src_sizes[indices], kind="mergesort")] - return sort_indices - - def prefetch(self, indices): - prefetch_indices = [[] for _ in range(len(self.datasets))] - for i in indices: - ds_idx, ds_sample_idx = self._get_dataset_and_index(i) - prefetch_indices[ds_idx].append(ds_sample_idx) - for i in range(len(prefetch_indices)): - self.datasets[i].prefetch(prefetch_indices[i]) - - @property - def can_reuse_epoch_itr_across_epochs(self): - return False - - def set_epoch(self, epoch): - super().set_epoch(epoch) - if epoch == self._cur_epoch: - # re-enter so return - return - for d in self.datasets: - if hasattr(d, "set_epoch"): - d.set_epoch(epoch) - self._cur_epoch = epoch - self._establish_virtual_datasets() - - def _establish_virtual_datasets(self): - if self.sample_ratios is None and self._cur_indices is not None: - # not a samping dataset, no need to resample if indices are already established - return - self._reset_cached_properties() - - start_time = time.time() - # Generate a weighted sample of indices as a function of the - # random seed and the current epoch. - rng = np.random.RandomState( - [ - int( - hashlib.sha1( - str(self.__class__.__name__).encode("utf-8") - ).hexdigest(), - 16, - ) - % (2 ** 32), - self.seed % (2 ** 32), # global seed - self._cur_epoch, # epoch index, - ] - ) - self._clean_if_not_none( - [self.cumulated_sizes, self.virtual_size_per_dataset, self._sizes] - ) - self._sizes = None - - indices, cumulated_sizes, virtual_size_per_dataset = self.get_virtual_indices( - rng, self.datasets, self.sample_ratios, self.virtual_size - ) - self._cur_indices = indices - self.cumulated_sizes = cumulated_sizes - self.virtual_size_per_dataset = virtual_size_per_dataset - - raw_sizes = [len(d) for d in self.datasets] - sampled_sizes = self.virtual_size_per_dataset - logger.info( - f"[{self.split}] Raw sizes: {str(dict(zip(self.keys, raw_sizes)))}; " - f"raw total size: {sum(raw_sizes)}" - ) - logger.info( - f"[{self.split}] Resampled sizes: {str(dict(zip(self.keys, sampled_sizes)))}; " - f"resampled total size: {sum(sampled_sizes)}" - ) - if self.sample_ratios is not None: - logger.info( - f"[{self.split}] Upsampling ratios: {str(dict(zip(self.keys, self.sample_ratios)))}" - ) - else: - logger.info(f"[{self.split}] A concat dataset") - logger.info( - f"[{self.split}] virtual dataset established time: {get_time_gap(start_time, time.time())}" - ) - - def filter_indices_by_size(self, indices, max_sizes): - """Filter a list of sample indices. Remove those that are longer - than specified in max_sizes. - - Args: - indices (np.array): original array of sample indices - max_sizes (int or list[int] or tuple[int]): max sample size, - can be defined separately for src and tgt (then list or tuple) - - Returns: - np.array: filtered sample array - list: list of removed indices - """ - sizes = self.sizes - tgt_sizes = sizes[:, 1] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else None - src_sizes = ( - sizes[:, 0] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else sizes - ) - - return data_utils.filter_paired_dataset_indices_by_size( - src_sizes, tgt_sizes, indices, max_sizes - ) diff --git a/spaces/gyrojeff/YuzuMarker.FontDetection/font_ds_generate_script.py b/spaces/gyrojeff/YuzuMarker.FontDetection/font_ds_generate_script.py deleted file mode 100644 index 6e57d4dc1720c3c434349b65b145f424b3528080..0000000000000000000000000000000000000000 --- a/spaces/gyrojeff/YuzuMarker.FontDetection/font_ds_generate_script.py +++ /dev/null @@ -1,143 +0,0 @@ -import sys -import traceback -import pickle -import os -import concurrent.futures -from tqdm import tqdm -import time -from font_dataset.font import load_fonts, DSFont -from font_dataset.layout import generate_font_image, TextSizeTooSmallException -from font_dataset.text import CorpusGeneratorManager, UnqualifiedFontException -from font_dataset.background import background_image_generator - - -global_script_index = int(sys.argv[1]) -global_script_index_total = int(sys.argv[2]) - -print(f"Mission {global_script_index} / {global_script_index_total}") - -num_workers = 32 - -cjk_ratio = 3 - -train_cnt = 100 -val_cnt = 5 -test_cnt = 30 - -train_cnt_cjk = int(train_cnt * cjk_ratio) -val_cnt_cjk = int(val_cnt * cjk_ratio) -test_cnt_cjk = int(test_cnt * cjk_ratio) - -dataset_path = "./dataset/font_img" -os.makedirs(dataset_path, exist_ok=True) - -unqualified_log_file_name = f"unqualified_font_{time.time()}.txt" -runtime_exclusion_list = [] - -fonts, exclusion_rule = load_fonts() -corpus_manager = CorpusGeneratorManager() -images = background_image_generator() - - -def add_exclusion(font: DSFont, reason: str, dataset_base_dir: str, i: int, j: int): - print(f"Excluded font: {font.path}, reason: {reason}") - runtime_exclusion_list.append(font.path) - with open(unqualified_log_file_name, "a+") as f: - f.write(f"{font.path} # {reason}\n") - for jj in range(j + 1): - image_file_name = f"font_{i}_img_{jj}.jpg" - label_file_name = f"font_{i}_img_{jj}.bin" - - image_file_path = os.path.join(dataset_base_dir, image_file_name) - label_file_path = os.path.join(dataset_base_dir, label_file_name) - - if os.path.exists(image_file_path): - os.remove(image_file_path) - if os.path.exists(label_file_path): - os.remove(label_file_path) - - -def generate_dataset(dataset_type: str, cnt: int): - dataset_base_dir = os.path.join(dataset_path, dataset_type) - os.makedirs(dataset_base_dir, exist_ok=True) - - def _generate_single(args): - i, j, font = args - print( - f"Generating {dataset_type} font: {font.path} {i} / {len(fonts)}, image {j}" - ) - - if exclusion_rule(font): - print(f"Excluded font: {font.path}") - return - if font.path in runtime_exclusion_list: - print(f"Excluded font: {font.path}") - return - - while True: - try: - image_file_name = f"font_{i}_img_{j}.jpg" - label_file_name = f"font_{i}_img_{j}.bin" - - image_file_path = os.path.join(dataset_base_dir, image_file_name) - label_file_path = os.path.join(dataset_base_dir, label_file_name) - - # detect cache - if os.path.exists(image_file_path) and os.path.exists(label_file_path): - return - - im = next(images) - im, label = generate_font_image( - im, - font, - corpus_manager, - ) - - im.save(image_file_path) - pickle.dump(label, open(label_file_path, "wb")) - return - except UnqualifiedFontException as e: - traceback.print_exc() - add_exclusion(font, "unqualified font", dataset_base_dir, i, j) - return - except TextSizeTooSmallException as e: - traceback.print_exc() - continue - except Exception as e: - traceback.print_exc() - add_exclusion(font, f"other: {repr(e)}", dataset_base_dir, i, j) - return - - work_list = [] - - # divide len(fonts) into 64 parts and choose the third part for this script - for i in range( - (global_script_index - 1) * len(fonts) // global_script_index_total, - global_script_index * len(fonts) // global_script_index_total, - ): - font = fonts[i] - if font.language == "CJK": - true_cnt = cnt * cjk_ratio - else: - true_cnt = cnt - for j in range(true_cnt): - work_list.append((i, j, font)) - - # with concurrent.futures.ThreadPoolExecutor(max_workers=num_workers) as executor: - # _ = list( - # tqdm( - # executor.map(_generate_single, work_list), - # total=len(work_list), - # leave=True, - # desc=dataset_type, - # miniters=1, - # ) - # ) - - for i in tqdm(range(len(work_list))): - _generate_single(work_list[i]) - - -generate_dataset("train", train_cnt) -generate_dataset("val", val_cnt) -generate_dataset("test", test_cnt) diff --git a/spaces/h2oai/wave-tour/examples/dropdown.py b/spaces/h2oai/wave-tour/examples/dropdown.py deleted file mode 100644 index 4a150c6da475a30e72c3d7b8ec41e8fab4ce3497..0000000000000000000000000000000000000000 --- a/spaces/h2oai/wave-tour/examples/dropdown.py +++ /dev/null @@ -1,44 +0,0 @@ -# Form / Dropdown -# Use dropdowns to allow users to choose between available choices. -# #form #dropdown #choice -# --- -from h2o_wave import main, app, Q, ui - -choices = [ - ui.choice('A', 'Option A'), - ui.choice('B', 'Option B'), - ui.choice('C', 'Option C', disabled=True), - ui.choice('D', 'Option D'), -] - -choices_dialog = [ui.choice(str(i), f'Option {i}') for i in range(1, 102)] - - -@app('/demo') -async def serve(q: Q): - if q.args.show_inputs: - q.page['example'].items = [ - ui.text(f'dropdown={q.args.dropdown}'), - ui.text(f'dropdown_multi={q.args.dropdown_multi}'), - ui.text(f'dropdown_disabled={q.args.dropdown_disabled}'), - ui.text(f'dropdown_dialog={q.args.dropdown_dialog}'), - ui.text(f'dropdown_popup_always={q.args.dropdown_popup_always}'), - ui.text(f'dropdown_popup_never={q.args.dropdown_popup_never}'), - ui.button(name='show_form', label='Back', primary=True), - ] - else: - q.page['example'] = ui.form_card(box='1 1 4 7', items=[ - ui.dropdown(name='dropdown', label='Pick one', value='B', required=True, choices=choices), - ui.dropdown(name='dropdown_multi', label='Pick multiple', values=['B', 'D'], required=True, - choices=choices), - ui.dropdown(name='dropdown_disabled', label='Pick one (Disabled)', value='B', choices=choices, - disabled=True), - ui.dropdown(name='dropdown_dialog', label='Pick multiple in dialog (>100 choices)', values=['1'], - required=True, choices=choices_dialog), - ui.dropdown(name='dropdown_popup_always', label='Always show popup even when choices < 100', value='A', - required=True, choices=choices, popup='always'), - ui.dropdown(name='dropdown_popup_never', label='Never show popup even when choices > 100', value='1', - required=True, choices=choices_dialog, popup='never'), - ui.button(name='show_inputs', label='Submit', primary=True), - ]) - await q.page.save() diff --git a/spaces/haakohu/deep_privacy2/dp2/detection/models/keypoint_maskrcnn.py b/spaces/haakohu/deep_privacy2/dp2/detection/models/keypoint_maskrcnn.py deleted file mode 100644 index b9e35a9f44a7133e9a0418f5b73d2bc39229139b..0000000000000000000000000000000000000000 --- a/spaces/haakohu/deep_privacy2/dp2/detection/models/keypoint_maskrcnn.py +++ /dev/null @@ -1,111 +0,0 @@ -import numpy as np -import torch -from detectron2.checkpoint import DetectionCheckpointer -from detectron2.modeling.roi_heads import CascadeROIHeads, StandardROIHeads -from detectron2.data.transforms import ResizeShortestEdge -from detectron2.structures import Instances -from detectron2 import model_zoo -from detectron2.config import instantiate -from detectron2.config import LazyCall as L -from PIL import Image -import tops -import functools -from torchvision.transforms.functional import resize - - -def get_rn50_fpn_keypoint_rcnn(weight_path: str): - from detectron2.modeling.poolers import ROIPooler - from detectron2.modeling.roi_heads import KRCNNConvDeconvUpsampleHead - from detectron2.layers import ShapeSpec - model = model_zoo.get_config("common/models/mask_rcnn_fpn.py").model - model.roi_heads.update( - num_classes=1, - keypoint_in_features=["p2", "p3", "p4", "p5"], - keypoint_pooler=L(ROIPooler)( - output_size=14, - scales=(1.0 / 4, 1.0 / 8, 1.0 / 16, 1.0 / 32), - sampling_ratio=0, - pooler_type="ROIAlignV2", - ), - keypoint_head=L(KRCNNConvDeconvUpsampleHead)( - input_shape=ShapeSpec(channels=256, width=14, height=14), - num_keypoints=17, - conv_dims=[512] * 8, - loss_normalizer="visible", - ), - ) - - # Detectron1 uses 2000 proposals per-batch, but this option is per-image in detectron2. - # 1000 proposals per-image is found to hurt box AP. - # Therefore we increase it to 1500 per-image. - model.proposal_generator.post_nms_topk = (1500, 1000) - - # Keypoint AP degrades (though box AP improves) when using plain L1 loss - model.roi_heads.box_predictor.smooth_l1_beta = 0.5 - model = instantiate(model) - - dataloader = model_zoo.get_config("common/data/coco_keypoint.py").dataloader - test_transform = instantiate(dataloader.test.mapper.augmentations) - DetectionCheckpointer(model).load(weight_path) - return model, test_transform - - -models = { - "rn50_fpn_maskrcnn": functools.partial(get_rn50_fpn_keypoint_rcnn, weight_path="https://folk.ntnu.no/haakohu/checkpoints/maskrcnn_keypoint/keypoint_maskrcnn_R_50_FPN_1x.pth") -} - - -class KeypointMaskRCNN: - - def __init__(self, model_name: str, score_threshold: float) -> None: - assert model_name in models, f"Did not find {model_name} in models" - model, test_transform = models[model_name]() - self.model = model.eval().to(tops.get_device()) - if isinstance(self.model.roi_heads, CascadeROIHeads): - for head in self.model.roi_heads.box_predictors: - assert hasattr(head, "test_score_thresh") - head.test_score_thresh = score_threshold - else: - assert isinstance(self.model.roi_heads, StandardROIHeads) - assert hasattr(self.model.roi_heads.box_predictor, "test_score_thresh") - self.model.roi_heads.box_predictor.test_score_thresh = score_threshold - - self.test_transform = test_transform - assert len(self.test_transform) == 1 - self.test_transform = self.test_transform[0] - assert isinstance(self.test_transform, ResizeShortestEdge) - assert self.test_transform.interp == Image.BILINEAR - self.image_format = self.model.input_format - - def resize_im(self, im): - H, W = im.shape[-2:] - if self.test_transform.is_range: - size = np.random.randint( - self.test_transform.short_edge_length[0], self.test_transform.short_edge_length[1] + 1) - else: - size = np.random.choice(self.test_transform.short_edge_length) - newH, newW = ResizeShortestEdge.get_output_shape(H, W, size, self.test_transform.max_size) - return resize( - im, (newH, newW), antialias=True) - - def __call__(self, *args, **kwargs): - return self.forward(*args, **kwargs) - - @torch.no_grad() - def forward(self, im: torch.Tensor): - assert im.ndim == 3 - if self.image_format == "BGR": - im = im.flip(0) - H, W = im.shape[-2:] - im = im.float() - im = self.resize_im(im) - - inputs = dict(image=im, height=H, width=W) - # instances contains - # dict_keys(['pred_boxes', 'scores', 'pred_classes', 'pred_masks', 'pred_keypoints', 'pred_keypoint_heatmaps']) - instances = self.model([inputs])[0]["instances"] - return dict( - scores=instances.get("scores").cpu(), - segmentation=instances.get("pred_masks").cpu(), - keypoints=instances.get("pred_keypoints").cpu() - ) diff --git a/spaces/haakohu/deep_privacy2_face/dp2/detection/models/keypoint_maskrcnn.py b/spaces/haakohu/deep_privacy2_face/dp2/detection/models/keypoint_maskrcnn.py deleted file mode 100644 index b9e35a9f44a7133e9a0418f5b73d2bc39229139b..0000000000000000000000000000000000000000 --- a/spaces/haakohu/deep_privacy2_face/dp2/detection/models/keypoint_maskrcnn.py +++ /dev/null @@ -1,111 +0,0 @@ -import numpy as np -import torch -from detectron2.checkpoint import DetectionCheckpointer -from detectron2.modeling.roi_heads import CascadeROIHeads, StandardROIHeads -from detectron2.data.transforms import ResizeShortestEdge -from detectron2.structures import Instances -from detectron2 import model_zoo -from detectron2.config import instantiate -from detectron2.config import LazyCall as L -from PIL import Image -import tops -import functools -from torchvision.transforms.functional import resize - - -def get_rn50_fpn_keypoint_rcnn(weight_path: str): - from detectron2.modeling.poolers import ROIPooler - from detectron2.modeling.roi_heads import KRCNNConvDeconvUpsampleHead - from detectron2.layers import ShapeSpec - model = model_zoo.get_config("common/models/mask_rcnn_fpn.py").model - model.roi_heads.update( - num_classes=1, - keypoint_in_features=["p2", "p3", "p4", "p5"], - keypoint_pooler=L(ROIPooler)( - output_size=14, - scales=(1.0 / 4, 1.0 / 8, 1.0 / 16, 1.0 / 32), - sampling_ratio=0, - pooler_type="ROIAlignV2", - ), - keypoint_head=L(KRCNNConvDeconvUpsampleHead)( - input_shape=ShapeSpec(channels=256, width=14, height=14), - num_keypoints=17, - conv_dims=[512] * 8, - loss_normalizer="visible", - ), - ) - - # Detectron1 uses 2000 proposals per-batch, but this option is per-image in detectron2. - # 1000 proposals per-image is found to hurt box AP. - # Therefore we increase it to 1500 per-image. - model.proposal_generator.post_nms_topk = (1500, 1000) - - # Keypoint AP degrades (though box AP improves) when using plain L1 loss - model.roi_heads.box_predictor.smooth_l1_beta = 0.5 - model = instantiate(model) - - dataloader = model_zoo.get_config("common/data/coco_keypoint.py").dataloader - test_transform = instantiate(dataloader.test.mapper.augmentations) - DetectionCheckpointer(model).load(weight_path) - return model, test_transform - - -models = { - "rn50_fpn_maskrcnn": functools.partial(get_rn50_fpn_keypoint_rcnn, weight_path="https://folk.ntnu.no/haakohu/checkpoints/maskrcnn_keypoint/keypoint_maskrcnn_R_50_FPN_1x.pth") -} - - -class KeypointMaskRCNN: - - def __init__(self, model_name: str, score_threshold: float) -> None: - assert model_name in models, f"Did not find {model_name} in models" - model, test_transform = models[model_name]() - self.model = model.eval().to(tops.get_device()) - if isinstance(self.model.roi_heads, CascadeROIHeads): - for head in self.model.roi_heads.box_predictors: - assert hasattr(head, "test_score_thresh") - head.test_score_thresh = score_threshold - else: - assert isinstance(self.model.roi_heads, StandardROIHeads) - assert hasattr(self.model.roi_heads.box_predictor, "test_score_thresh") - self.model.roi_heads.box_predictor.test_score_thresh = score_threshold - - self.test_transform = test_transform - assert len(self.test_transform) == 1 - self.test_transform = self.test_transform[0] - assert isinstance(self.test_transform, ResizeShortestEdge) - assert self.test_transform.interp == Image.BILINEAR - self.image_format = self.model.input_format - - def resize_im(self, im): - H, W = im.shape[-2:] - if self.test_transform.is_range: - size = np.random.randint( - self.test_transform.short_edge_length[0], self.test_transform.short_edge_length[1] + 1) - else: - size = np.random.choice(self.test_transform.short_edge_length) - newH, newW = ResizeShortestEdge.get_output_shape(H, W, size, self.test_transform.max_size) - return resize( - im, (newH, newW), antialias=True) - - def __call__(self, *args, **kwargs): - return self.forward(*args, **kwargs) - - @torch.no_grad() - def forward(self, im: torch.Tensor): - assert im.ndim == 3 - if self.image_format == "BGR": - im = im.flip(0) - H, W = im.shape[-2:] - im = im.float() - im = self.resize_im(im) - - inputs = dict(image=im, height=H, width=W) - # instances contains - # dict_keys(['pred_boxes', 'scores', 'pred_classes', 'pred_masks', 'pred_keypoints', 'pred_keypoint_heatmaps']) - instances = self.model([inputs])[0]["instances"] - return dict( - scores=instances.get("scores").cpu(), - segmentation=instances.get("pred_masks").cpu(), - keypoints=instances.get("pred_keypoints").cpu() - ) diff --git a/spaces/hackathon-pln-es/AbstractGen_ES/README.md b/spaces/hackathon-pln-es/AbstractGen_ES/README.md deleted file mode 100644 index 79dd194df41da836dec6cc706394a04128426038..0000000000000000000000000000000000000000 --- a/spaces/hackathon-pln-es/AbstractGen_ES/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: AbstractGen_ES -emoji: 📝🤓 -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 2.9.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/hackathon-pln-es/poem-generation-es/app.py b/spaces/hackathon-pln-es/poem-generation-es/app.py deleted file mode 100644 index 1afb0de5a8589e66ed0b60b746a74b0774a99596..0000000000000000000000000000000000000000 --- a/spaces/hackathon-pln-es/poem-generation-es/app.py +++ /dev/null @@ -1,121 +0,0 @@ -import gradio as gr -import random -from transformers import AutoTokenizer, AutoModelForSeq2SeqLM - -author_set = {'Leopoldo Lugones', 'Nacho Buzón', 'Octavio Paz', 'Luis Cañizal de la Fuente', 'Juan de Salinas', 'Vicente Huidobro', 'Hilario Barrero', - 'Ramón de Campoamor', 'Anna Ajmátova', 'Víctor Hugo López Cancino', 'Ramón María del Valle-Inclán', 'Infantiles', 'Jorge Luis Borges', - 'Carlos Bousoño', 'Gonzalo Rojas', 'Juan Ruiz, Arcipreste de Hita', 'Rubén Izaguirre Fiallos', 'Juan Meléndez Valdés', - 'José María de Heredia', 'Jaime Sabines', 'Alejandra Pizarnik', 'María Cristina Azcona', 'Guillermo Aguirre y Fierro', - 'Miguel Ramos Carrión', 'José de Espronceda', 'Juan del Encina', 'Jorge Guillén', 'Manuel Machado', 'José Santos Chocano', 'Meira Delmar', - 'Iván Tubau', 'Tirso de Molina', 'Oliverio Girondo', 'Justo Braga', 'Consuelo Hernández', 'Belén Reyes', 'Pablo Neruda', - 'Francisco de Aldana', 'Bertolt Brecht', 'José Antonio Labordeta', 'Emilio Prados', 'Porfirio Barba Jacob', 'Leopoldo Marechal', - 'Aurelio González Ovies', 'Darío Jaramillo Agudelo', 'Víctor Botas', 'Leopoldo María Panero', 'Juan de Mena', 'Tomás de Iriarte', - 'Gabriela Mistral', 'Gonzalo de Berceo', 'Antonio Machado', 'Santiago Montobbio', 'Pedro Calderón de la Barca', 'Dionisio Ridruejo', - 'Atahualpa Yupanqui', 'Nicolás Guillén', 'Claudio Rodríguez', 'José María Blanco White', 'Gil Vicente', 'Corina Bruni', 'Gabriel Celaya', - 'Javier Alvarado', 'Rosalía de Castro', 'Gustavo Pereira', 'Miguel de Cervantes y Saavedra', 'Jorge Riechmann', 'José Juan Tablada', - 'Lope de Vega', 'Basilio Sánchez', 'Alfonso X el sabio', 'Rafael de León', 'Eunice Odio', 'Manuel Acuña', 'Víctor Jiménez', - 'José Asunción Silva', 'Omar García Ramírez', 'Luis Cernuda', 'perdón', 'Blas de Otero', 'Luis Benítez', 'Ángeles Carbajal', - 'Manuel Bretón de los Herreros', 'Baldomero Fernández Moreno', 'Luis Barahona de Soto', 'Guillermo Valencia', 'José María Hinojosa', - 'Macedonio Fernández', 'Manuel Gutiérrez Nájera', 'Alfredo Buxán', 'Salvador Novo', 'José Ángel Valente', 'José Cadalso', - 'Juan Ramón Mansilla', 'Ana Istarú', 'Juan Ramón Jiménez', 'Miguel Ángel Asturias', 'Ángel González', 'amistad', 'Alfredo Lavergne', - 'Xavier Villaurrutia', 'René Chacón Linares', 'Carolina Coronado', 'Bartolomé Leonardo de Argensola', 'Marilina Rébora', - 'Vicente Aleixandre', 'Alberto Girri', 'Juana de Ibarbourou', 'Ricardo Dávila Díaz Flores', 'Garcilaso de la Vega', - 'Lupercio Leonardo de Argensola', 'Enrique Lihn', 'Julia de Burgos', 'Mariano Brull', 'Efraín Huerta', 'Roxana Popelka', - 'Pelayo Fueyo', 'San Juan de la Cruz', 'Vicente García', 'Basilio Fernández', 'Paz Díez Taboada', 'cristianos', 'León Felipe', - 'Diana Bellessi', 'Genaro Ortega Gutiérrez', 'Cristóbal de Castillejo', 'Gioconda Belli', 'Iacyr Anderson Freitas', - 'Juan José Vélez Otero', 'Ezequiel Martínez Estrada', 'Juan de Arguijo', 'Gertrudis Gómez de Avellaneda', 'Marcos Rafael Blanco Belmonte', - 'Julio Aumente', 'Ramón López Velarde', 'para la familia', 'Antonia Álvarez Álvarez', 'José Zorrilla', 'Juan Luis Panero', - 'Teresa Palazzo Conti', 'Claribel Alegría', 'Francisco de Medrano', 'Antonio Colinas', 'Jordi Doce', 'Ismael Enrique Arciniegas', - 'Josefina Plá', 'José Agustín Goytisolo', 'Blanca Andreu', 'Enrique González Martínez', 'José García Nieto', 'Ernesto Cardenal', - 'Pedro Luis Menéndez', 'Carmen Conde Abellán', 'Salvador Rueda', 'Dulce María Loynaz', 'Odette Alonso', 'Manuel Altolaguirre', - 'Byron Espinoza', 'Francisco Álvarez', 'Vicente Gerbasi', 'César Vallejo', 'Gloria Fuertes', 'Sor Juana Inés de la Cruz', - 'Francisco de la Torre', 'Francisco Matos Paoli', 'Rubén Darío', 'Rafael Pombo', 'Mara Romero', 'José Albi', 'Francisco de Quevedo', - 'Juan de Dios Peza', 'Leopoldo Panero', 'Fernando de Herrera', 'Victoriano Crémer', 'Ana Rossetti', 'Gabriel García Márquez', - 'Teresa Domingo Català', 'Melchor de Palau', 'Miguel Rasch Isla', 'Luis Antonio de Villena', 'Mario Benedetti', 'Ramón Pérez de Ayala', - 'Antonio Plaza Llamas', 'David Escobar Galindo', 'Mario Meléndez', 'José Gorostiza', 'Delfina Acosta', 'en español', 'Delmira Agustini', - 'José Luis Piquero', 'Baltasar del Alcázar', 'Nimia Vicéns', 'Rafael Alberti', 'María Eugenia Caseiro', 'Nicomedes Santa Cruz', - 'Carlos Pellicer', 'Luis de Góngora', 'Manuel Alcántara', 'Toni García Arias', 'Antonio Fernández Lera', 'José María Pemán', - 'Dina Posada', 'Gaspar Melchor de Jovellanos', 'Alfonso Reyes', 'José María Gabriel y Galán', 'Manuel José Othón', 'Luciano Castañón', - 'Luis Alberto de Cuenca', 'Andrés Eloy Blanco', 'Luis Antonio Chávez', 'Pedro Salinas', 'Luis Palés Matos', 'Álvaro García', - 'Pablo de Rokha', 'Dámaso Alonso', 'Luis Álvarez Piner', 'Salvador García Ramírez', 'Roque Dalton', 'Gerardo Diego', - 'Felipe Benítez Reyes', 'William Shakespeare', 'José Ángel Buesa', 'Miguel Florián', 'Luis Gonzaga Urbina', 'Jesús Hilario Tundidor', - 'Amado Nervo', 'Miguel Hernández', 'Federico García Lorca', 'José Martí', 'Oscar Ferreiro', 'Pedro Miguel Lamet', 'Fray Luis de León', - 'Julio Flórez Roa', 'León de Greiff', 'Gustavo Adolfo Bécquer', 'Carlos Edmundo de Ory', 'Miguel de Unamuno', 'Manuel del Cabral', - 'Oscar Acosta', 'José Lezama Lima', 'Hernando de Acuña', 'Ángel García Aller', 'Salvador Díaz Mirón', 'Ricardo Molinari', - 'Julio Herrera y Reissig', 'Francisco Luis Bernárdez', 'Fa Claes', 'Jorge Debravo', 'Francisco de Figueroa', 'Marqués de Santillana', - 'Eugenio Florit', 'José Gautier Benítez', 'Óscar Hahn', 'Andrés Bello', 'Santa Teresa de Jesús, Sánchez de Cep', 'Juan Liscano', - 'Jorge Teillier', 'Félix María de Samaniego', 'Nicolás Fernández de Moratín', 'Juan Boscán', 'Manuel María Flores', 'Gutierre de Cetina', - 'Alfonsina Storni', 'José Luis Rey Cano', 'Jorge Manrique', 'Nicanor Parra'} - -model_name = 'hackathon-pln-es/poem-gen-spanish-t5-small' -tokenizer = AutoTokenizer.from_pretrained(model_name) -model = AutoModelForSeq2SeqLM.from_pretrained(model_name) - -# tts_es = gr.Interface.load("huggingface/facebook/tts_transformer-es-css10") - -def make_poem(author, sentiment, words, text): - num_lines=5 - poem = text - prev_output = '' - l_words = [x.strip() for x in words.split(',')] - - # Add empty strings to words - if num_lines > len(l_words): - diff = num_lines - len(l_words) - l_words += [''] * diff - - random.shuffle(l_words) - - for i in range(num_lines): - word = l_words[i] - if word == '': - input_text = f"""poema: estilo: {author} && sentimiento: {sentiment} && texto: {poem} """ - else: - input_text = f"""poema: estilo: {author} && sentimiento: {sentiment} && palabras: {word} && texto: {poem} """ - inputs = tokenizer(input_text, return_tensors="pt") - - outputs = model.generate(inputs["input_ids"], - do_sample = True, - max_length = 30, - repetition_penalty = 20.0, - top_k = 50, - top_p = 0.92) - detok_outputs = [tokenizer.decode(x, skip_special_tokens=True) for x in outputs] - pre_output = detok_outputs[0] - - poem += '\n' + pre_output - # audio = tts_es(poem) - # return poem, audio - return poem - -article = "

        Don't miss this other cool space based in Spanish generation of poems based in sonnets: sonnet-poetry-generator-spanish

        " - - -iface = gr.Interface( - fn=make_poem, - title='Generation of Spanish poems', - description="""

        For the participation in the First Spanish Hackathon of NLP the team worked with two datasets.

        -
          -
        • The first one was a collection of sonnets in Spanish, and
        • -
        • the second one was a collection of poems.
        • -

          For the dataset of sonnets was trained a GPT-2 model that you can find here and was created this space.

          -

          For the second dataset was trained a T5 model that you can find here and this is the space that was created to play with it :)

          """, - theme='huggingface', - inputs= - [ - gr.inputs.Dropdown(sorted(author_set), type="value", label='Nombre del autor'), - gr.inputs.Radio(['positivo', 'negativo'], type="value", label='Sentimiento del texto generado'), - gr.inputs.Textbox(lines=1, placeholder='palabra_1, palabra_2, ..., palabra_n', label='Palabras que desea incluir'), - gr.inputs.Textbox(lines=4, placeholder='texto inicial', label='Texto inicial'), - ], - outputs= - [ - gr.outputs.Textbox(label="Texto generado"), - # gr.outputs.Audio(label="Primeros segundos") - ], - article= article, - examples= - [ - ['Pablo Neruda', 'positivo', 'cielo, luna, mar', 'Todos fueron a verle pasar'] - ]) -iface.launch(enable_queue=True) \ No newline at end of file diff --git a/spaces/hahahafofo/vits-uma-genshin-honkai/app.py b/spaces/hahahafofo/vits-uma-genshin-honkai/app.py deleted file mode 100644 index ba29f6a5aff153461017c2e11e03a8765581c0d5..0000000000000000000000000000000000000000 --- a/spaces/hahahafofo/vits-uma-genshin-honkai/app.py +++ /dev/null @@ -1,150 +0,0 @@ -# coding=utf-8 -import time -import os -import gradio as gr -import utils -import argparse -import commons -from models import SynthesizerTrn -from text import text_to_sequence -import torch -from torch import no_grad, LongTensor -import webbrowser -import logging -import gradio.processing_utils as gr_processing_utils -logging.getLogger('numba').setLevel(logging.WARNING) -limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces - -audio_postprocess_ori = gr.Audio.postprocess -def audio_postprocess(self, y): - data = audio_postprocess_ori(self, y) - if data is None: - return None - return gr_processing_utils.encode_url_or_file_to_base64(data["name"]) -gr.Audio.postprocess = audio_postprocess - -def get_text(text, hps): - text_norm, clean_text = text_to_sequence(text, hps.symbols, hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = LongTensor(text_norm) - return text_norm, clean_text - -def vits(text, language, speaker_id, noise_scale, noise_scale_w, length_scale): - start = time.perf_counter() - if not len(text): - return "输入文本不能为空!", None, None - text = text.replace('\n', ' ').replace('\r', '').replace(" ", "") - if len(text) > 100 and limitation: - return f"输入文字过长!{len(text)}>100", None, None - if language == 0: - text = f"[ZH]{text}[ZH]" - elif language == 1: - text = f"[JA]{text}[JA]" - else: - text = f"{text}" - stn_tst, clean_text = get_text(text, hps_ms) - with no_grad(): - x_tst = stn_tst.unsqueeze(0).to(device) - x_tst_lengths = LongTensor([stn_tst.size(0)]).to(device) - speaker_id = LongTensor([speaker_id]).to(device) - audio = net_g_ms.infer(x_tst, x_tst_lengths, sid=speaker_id, noise_scale=noise_scale, noise_scale_w=noise_scale_w, - length_scale=length_scale)[0][0, 0].data.cpu().float().numpy() - - return "生成成功!", (22050, audio), f"生成耗时 {round(time.perf_counter()-start, 2)} s" - -def search_speaker(search_value): - for s in speakers: - if search_value == s: - return s - for s in speakers: - if search_value in s: - return s - -def change_lang(language): - if language == 0: - return 0.6, 0.668, 1.2 - else: - return 0.6, 0.668, 1.1 - -download_audio_js = """ -() =>{{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let audio = root.querySelector("#tts-audio").querySelector("audio"); - let text = root.querySelector("#input-text").querySelector("textarea"); - if (audio == undefined) - return; - text = text.value; - if (text == undefined) - text = Math.floor(Math.random()*100000000); - audio = audio.src; - let oA = document.createElement("a"); - oA.download = text.substr(0, 20)+'.wav'; - oA.href = audio; - document.body.appendChild(oA); - oA.click(); - oA.remove(); -}} -""" - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--device', type=str, default='cpu') - parser.add_argument('--api', action="store_true", default=False) - parser.add_argument("--share", action="store_true", default=False, help="share gradio app") - parser.add_argument("--colab", action="store_true", default=False, help="share gradio app") - args = parser.parse_args() - device = torch.device(args.device) - - hps_ms = utils.get_hparams_from_file(r'./model/config.json') - net_g_ms = SynthesizerTrn( - len(hps_ms.symbols), - hps_ms.data.filter_length // 2 + 1, - hps_ms.train.segment_size // hps_ms.data.hop_length, - n_speakers=hps_ms.data.n_speakers, - **hps_ms.model) - _ = net_g_ms.eval().to(device) - speakers = hps_ms.speakers - model, optimizer, learning_rate, epochs = utils.load_checkpoint(r'./model/G_953000.pth', net_g_ms, None) - - with gr.Blocks() as app: - gr.Markdown( - "#
          VITS语音在线合成demo\n" - "#
          严禁将模型用于任何商业项目,否则后果自负\n" - "
          主要有赛马娘,原神中文,原神日语,崩坏3的音色
          " - '' - '' - ) - - with gr.Tabs(): - with gr.TabItem("vits"): - with gr.Row(): - with gr.Column(): - input_text = gr.Textbox(label="Text (100 words limitation) " if limitation else "Text", lines=5, value="今天晚上吃啥好呢。", elem_id=f"input-text") - lang = gr.Dropdown(label="Language", choices=["中文", "日语", "中日混合(中文用[ZH][ZH]包裹起来,日文用[JA][JA]包裹起来)"], - type="index", value="中文") - btn = gr.Button(value="Submit") - with gr.Row(): - search = gr.Textbox(label="Search Speaker", lines=1) - btn2 = gr.Button(value="Search") - sid = gr.Dropdown(label="Speaker", choices=speakers, type="index", value=speakers[228]) - with gr.Row(): - ns = gr.Slider(label="noise_scale(控制感情变化程度)", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True) - nsw = gr.Slider(label="noise_scale_w(控制音素发音长度)", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True) - ls = gr.Slider(label="length_scale(控制整体语速)", minimum=0.1, maximum=2.0, step=0.1, value=1.2, interactive=True) - with gr.Column(): - o1 = gr.Textbox(label="Output Message") - o2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio") - o3 = gr.Textbox(label="Extra Info") - download = gr.Button("Download Audio") - btn.click(vits, inputs=[input_text, lang, sid, ns, nsw, ls], outputs=[o1, o2, o3]) - download.click(None, [], [], _js=download_audio_js.format()) - btn2.click(search_speaker, inputs=[search], outputs=[sid]) - lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls]) - with gr.TabItem("可用人物一览"): - gr.Radio(label="Speaker", choices=speakers, interactive=False, type="index") - if args.colab: - webbrowser.open("http://127.0.0.1:7860") - app.queue(concurrency_count=1, api_open=args.api).launch(share=args.share) diff --git a/spaces/hamelcubsfan/AutoGPT/autogpt/spinner.py b/spaces/hamelcubsfan/AutoGPT/autogpt/spinner.py deleted file mode 100644 index 4e33d74213881352546f334ccb1eb4772b8b7b70..0000000000000000000000000000000000000000 --- a/spaces/hamelcubsfan/AutoGPT/autogpt/spinner.py +++ /dev/null @@ -1,65 +0,0 @@ -"""A simple spinner module""" -import itertools -import sys -import threading -import time - - -class Spinner: - """A simple spinner class""" - - def __init__(self, message: str = "Loading...", delay: float = 0.1) -> None: - """Initialize the spinner class - - Args: - message (str): The message to display. - delay (float): The delay between each spinner update. - """ - self.spinner = itertools.cycle(["-", "/", "|", "\\"]) - self.delay = delay - self.message = message - self.running = False - self.spinner_thread = None - - def spin(self) -> None: - """Spin the spinner""" - while self.running: - sys.stdout.write(f"{next(self.spinner)} {self.message}\r") - sys.stdout.flush() - time.sleep(self.delay) - sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r") - - def __enter__(self): - """Start the spinner""" - self.running = True - self.spinner_thread = threading.Thread(target=self.spin) - self.spinner_thread.start() - - return self - - def __exit__(self, exc_type, exc_value, exc_traceback) -> None: - """Stop the spinner - - Args: - exc_type (Exception): The exception type. - exc_value (Exception): The exception value. - exc_traceback (Exception): The exception traceback. - """ - self.running = False - if self.spinner_thread is not None: - self.spinner_thread.join() - sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r") - sys.stdout.flush() - - def update_message(self, new_message, delay=0.1): - """Update the spinner message - Args: - new_message (str): New message to display - delay: Delay in seconds before updating the message - """ - time.sleep(delay) - sys.stdout.write( - f"\r{' ' * (len(self.message) + 2)}\r" - ) # Clear the current message - sys.stdout.flush() - self.message = new_message diff --git a/spaces/hands012/gpt-academic/request_llm/bridge_chatgpt.py b/spaces/hands012/gpt-academic/request_llm/bridge_chatgpt.py deleted file mode 100644 index eef8fbf0b43f30b915f770f4bc54120c84ebd092..0000000000000000000000000000000000000000 --- a/spaces/hands012/gpt-academic/request_llm/bridge_chatgpt.py +++ /dev/null @@ -1,285 +0,0 @@ -# 借鉴了 https://github.com/GaiZhenbiao/ChuanhuChatGPT 项目 - -""" - 该文件中主要包含三个函数 - - 不具备多线程能力的函数: - 1. predict: 正常对话时使用,具备完备的交互功能,不可多线程 - - 具备多线程调用能力的函数 - 2. predict_no_ui:高级实验性功能模块调用,不会实时显示在界面上,参数简单,可以多线程并行,方便实现复杂的功能逻辑 - 3. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程 -""" - -import json -import time -import gradio as gr -import logging -import traceback -import requests -import importlib - -# config_private.py放自己的秘密如API和代理网址 -# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件 -from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history, trimmed_format_exc -proxies, API_KEY, TIMEOUT_SECONDS, MAX_RETRY = \ - get_conf('proxies', 'API_KEY', 'TIMEOUT_SECONDS', 'MAX_RETRY') - -timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \ - '网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。' - -def get_full_error(chunk, stream_response): - """ - 获取完整的从Openai返回的报错 - """ - while True: - try: - chunk += next(stream_response) - except: - break - return chunk - - -def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False): - """ - 发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。 - inputs: - 是本次问询的输入 - sys_prompt: - 系统静默prompt - llm_kwargs: - chatGPT的内部调优参数 - history: - 是之前的对话列表 - observe_window = None: - 用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗 - """ - watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可 - headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt=sys_prompt, stream=True) - retry = 0 - while True: - try: - # make a POST request to the API endpoint, stream=False - from .bridge_all import model_info - endpoint = model_info[llm_kwargs['llm_model']]['endpoint'] - response = requests.post(endpoint, headers=headers, proxies=proxies, - json=payload, stream=True, timeout=TIMEOUT_SECONDS); break - except requests.exceptions.ReadTimeout as e: - retry += 1 - traceback.print_exc() - if retry > MAX_RETRY: raise TimeoutError - if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……') - - stream_response = response.iter_lines() - result = '' - while True: - try: chunk = next(stream_response).decode() - except StopIteration: - break - except requests.exceptions.ConnectionError: - chunk = next(stream_response).decode() # 失败了,重试一次?再失败就没办法了。 - if len(chunk)==0: continue - if not chunk.startswith('data:'): - error_msg = get_full_error(chunk.encode('utf8'), stream_response).decode() - if "reduce the length" in error_msg: - raise ConnectionAbortedError("OpenAI拒绝了请求:" + error_msg) - else: - raise RuntimeError("OpenAI拒绝了请求:" + error_msg) - if ('data: [DONE]' in chunk): break # api2d 正常完成 - json_data = json.loads(chunk.lstrip('data:'))['choices'][0] - delta = json_data["delta"] - if len(delta) == 0: break - if "role" in delta: continue - if "content" in delta: - result += delta["content"] - if not console_slience: print(delta["content"], end='') - if observe_window is not None: - # 观测窗,把已经获取的数据显示出去 - if len(observe_window) >= 1: observe_window[0] += delta["content"] - # 看门狗,如果超过期限没有喂狗,则终止 - if len(observe_window) >= 2: - if (time.time()-observe_window[1]) > watch_dog_patience: - raise RuntimeError("用户取消了程序。") - else: raise RuntimeError("意外Json结构:"+delta) - if json_data['finish_reason'] == 'length': - raise ConnectionAbortedError("正常结束,但显示Token不足,导致输出不完整,请削减单次输入的文本量。") - return result - - -def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): - """ - 发送至chatGPT,流式获取输出。 - 用于基础的对话功能。 - inputs 是本次问询的输入 - top_p, temperature是chatGPT的内部调优参数 - history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误) - chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容 - additional_fn代表点击的哪个按钮,按钮见functional.py - """ - if is_any_api_key(inputs): - chatbot._cookies['api_key'] = inputs - chatbot.append(("输入已识别为openai的api_key", what_keys(inputs))) - yield from update_ui(chatbot=chatbot, history=history, msg="api_key已导入") # 刷新界面 - return - elif not is_any_api_key(chatbot._cookies['api_key']): - chatbot.append((inputs, "缺少api_key。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。")) - yield from update_ui(chatbot=chatbot, history=history, msg="缺少api_key") # 刷新界面 - return - - if additional_fn is not None: - import core_functional - importlib.reload(core_functional) # 热更新prompt - core_functional = core_functional.get_core_functions() - if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话) - inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"] - - raw_input = inputs - logging.info(f'[raw_input] {raw_input}') - chatbot.append((inputs, "")) - yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面 - - try: - headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt, stream) - except RuntimeError as e: - chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。您可能选择了错误的模型或请求源。") - yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面 - return - - history.append(inputs); history.append("") - - retry = 0 - while True: - try: - # make a POST request to the API endpoint, stream=True - from .bridge_all import model_info - endpoint = model_info[llm_kwargs['llm_model']]['endpoint'] - response = requests.post(endpoint, headers=headers, proxies=proxies, - json=payload, stream=True, timeout=TIMEOUT_SECONDS);break - except: - retry += 1 - chatbot[-1] = ((chatbot[-1][0], timeout_bot_msg)) - retry_msg = f",正在重试 ({retry}/{MAX_RETRY}) ……" if MAX_RETRY > 0 else "" - yield from update_ui(chatbot=chatbot, history=history, msg="请求超时"+retry_msg) # 刷新界面 - if retry > MAX_RETRY: raise TimeoutError - - gpt_replying_buffer = "" - - is_head_of_the_stream = True - if stream: - stream_response = response.iter_lines() - while True: - try: - chunk = next(stream_response) - except StopIteration: - # 非OpenAI官方接口的出现这样的报错,OpenAI和API2D不会走这里 - from toolbox import regular_txt_to_markdown; tb_str = '```\n' + trimmed_format_exc() + '```' - chatbot[-1] = (chatbot[-1][0], f"[Local Message] 远程返回错误: \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk.decode())}") - yield from update_ui(chatbot=chatbot, history=history, msg="远程返回错误:" + chunk.decode()) # 刷新界面 - return - - # print(chunk.decode()[6:]) - if is_head_of_the_stream and (r'"object":"error"' not in chunk.decode()): - # 数据流的第一帧不携带content - is_head_of_the_stream = False; continue - - if chunk: - try: - chunk_decoded = chunk.decode() - # 前者API2D的 - if ('data: [DONE]' in chunk_decoded) or (len(json.loads(chunk_decoded[6:])['choices'][0]["delta"]) == 0): - # 判定为数据流的结束,gpt_replying_buffer也写完了 - logging.info(f'[response] {gpt_replying_buffer}') - break - # 处理数据流的主体 - chunkjson = json.loads(chunk_decoded[6:]) - status_text = f"finish_reason: {chunkjson['choices'][0]['finish_reason']}" - # 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出 - gpt_replying_buffer = gpt_replying_buffer + json.loads(chunk_decoded[6:])['choices'][0]["delta"]["content"] - history[-1] = gpt_replying_buffer - chatbot[-1] = (history[-2], history[-1]) - yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面 - - except Exception as e: - traceback.print_exc() - yield from update_ui(chatbot=chatbot, history=history, msg="Json解析不合常规") # 刷新界面 - chunk = get_full_error(chunk, stream_response) - chunk_decoded = chunk.decode() - error_msg = chunk_decoded - if "reduce the length" in error_msg: - if len(history) >= 2: history[-1] = ""; history[-2] = "" # 清除当前溢出的输入:history[-2] 是本次输入, history[-1] 是本次输出 - history = clip_history(inputs=inputs, history=history, tokenizer=model_info[llm_kwargs['llm_model']]['tokenizer'], - max_token_limit=(model_info[llm_kwargs['llm_model']]['max_token'])) # history至少释放二分之一 - chatbot[-1] = (chatbot[-1][0], "[Local Message] Reduce the length. 本次输入过长, 或历史数据过长. 历史缓存数据已部分释放, 您可以请再次尝试. (若再次失败则更可能是因为输入过长.)") - # history = [] # 清除历史 - elif "does not exist" in error_msg: - chatbot[-1] = (chatbot[-1][0], f"[Local Message] Model {llm_kwargs['llm_model']} does not exist. 模型不存在, 或者您没有获得体验资格.") - elif "Incorrect API key" in error_msg: - chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key. OpenAI以提供了不正确的API_KEY为由, 拒绝服务.") - elif "exceeded your current quota" in error_msg: - chatbot[-1] = (chatbot[-1][0], "[Local Message] You exceeded your current quota. OpenAI以账户额度不足为由, 拒绝服务.") - elif "bad forward key" in error_msg: - chatbot[-1] = (chatbot[-1][0], "[Local Message] Bad forward key. API2D账户额度不足.") - elif "Not enough point" in error_msg: - chatbot[-1] = (chatbot[-1][0], "[Local Message] Not enough point. API2D账户点数不足.") - else: - from toolbox import regular_txt_to_markdown - tb_str = '```\n' + trimmed_format_exc() + '```' - chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk_decoded)}") - yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面 - return - -def generate_payload(inputs, llm_kwargs, history, system_prompt, stream): - """ - 整合所有信息,选择LLM模型,生成http请求,为发送请求做准备 - """ - if not is_any_api_key(llm_kwargs['api_key']): - raise AssertionError("你提供了错误的API_KEY。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。") - - api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) - - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {api_key}" - } - - conversation_cnt = len(history) // 2 - - messages = [{"role": "system", "content": system_prompt}] - if conversation_cnt: - for index in range(0, 2*conversation_cnt, 2): - what_i_have_asked = {} - what_i_have_asked["role"] = "user" - what_i_have_asked["content"] = history[index] - what_gpt_answer = {} - what_gpt_answer["role"] = "assistant" - what_gpt_answer["content"] = history[index+1] - if what_i_have_asked["content"] != "": - if what_gpt_answer["content"] == "": continue - if what_gpt_answer["content"] == timeout_bot_msg: continue - messages.append(what_i_have_asked) - messages.append(what_gpt_answer) - else: - messages[-1]['content'] = what_gpt_answer['content'] - - what_i_ask_now = {} - what_i_ask_now["role"] = "user" - what_i_ask_now["content"] = inputs - messages.append(what_i_ask_now) - - payload = { - "model": llm_kwargs['llm_model'].strip('api2d-'), - "messages": messages, - "temperature": llm_kwargs['temperature'], # 1.0, - "top_p": llm_kwargs['top_p'], # 1.0, - "n": 1, - "stream": stream, - "presence_penalty": 0, - "frequency_penalty": 0, - } - try: - print(f" {llm_kwargs['llm_model']} : {conversation_cnt} : {inputs[:100]} ..........") - except: - print('输入中可能存在乱码。') - return headers,payload - - diff --git a/spaces/haonanzhang/ChatGPT-BOT/modules/utils.py b/spaces/haonanzhang/ChatGPT-BOT/modules/utils.py deleted file mode 100644 index ef8963d19b16e187a3381b85325d74a1a3562d64..0000000000000000000000000000000000000000 --- a/spaces/haonanzhang/ChatGPT-BOT/modules/utils.py +++ /dev/null @@ -1,520 +0,0 @@ -# -*- coding:utf-8 -*- -from __future__ import annotations -from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type -import logging -import json -import os -import datetime -import hashlib -import csv -import requests -import re -import html -import sys -import subprocess - -import gradio as gr -from pypinyin import lazy_pinyin -import tiktoken -import mdtex2html -from markdown import markdown -from pygments import highlight -from pygments.lexers import get_lexer_by_name -from pygments.formatters import HtmlFormatter - -from modules.presets import * -import modules.shared as shared - -logging.basicConfig( - level=logging.INFO, - format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s", -) - -if TYPE_CHECKING: - from typing import TypedDict - - class DataframeData(TypedDict): - headers: List[str] - data: List[List[str | int | bool]] - - -def count_token(message): - encoding = tiktoken.get_encoding("cl100k_base") - input_str = f"role: {message['role']}, content: {message['content']}" - length = len(encoding.encode(input_str)) - return length - - -def markdown_to_html_with_syntax_highlight(md_str): - def replacer(match): - lang = match.group(1) or "text" - code = match.group(2) - - try: - lexer = get_lexer_by_name(lang, stripall=True) - except ValueError: - lexer = get_lexer_by_name("text", stripall=True) - - formatter = HtmlFormatter() - highlighted_code = highlight(code, lexer, formatter) - - return f'
          {highlighted_code}
          ' - - code_block_pattern = r"```(\w+)?\n([\s\S]+?)\n```" - md_str = re.sub(code_block_pattern, replacer, md_str, flags=re.MULTILINE) - - html_str = markdown(md_str) - return html_str - - -def normalize_markdown(md_text: str) -> str: - lines = md_text.split("\n") - normalized_lines = [] - inside_list = False - - for i, line in enumerate(lines): - if re.match(r"^(\d+\.|-|\*|\+)\s", line.strip()): - if not inside_list and i > 0 and lines[i - 1].strip() != "": - normalized_lines.append("") - inside_list = True - normalized_lines.append(line) - elif inside_list and line.strip() == "": - if i < len(lines) - 1 and not re.match( - r"^(\d+\.|-|\*|\+)\s", lines[i + 1].strip() - ): - normalized_lines.append(line) - continue - else: - inside_list = False - normalized_lines.append(line) - - return "\n".join(normalized_lines) - - -def convert_mdtext(md_text): - code_block_pattern = re.compile(r"```(.*?)(?:```|$)", re.DOTALL) - inline_code_pattern = re.compile(r"`(.*?)`", re.DOTALL) - code_blocks = code_block_pattern.findall(md_text) - non_code_parts = code_block_pattern.split(md_text)[::2] - - result = [] - for non_code, code in zip(non_code_parts, code_blocks + [""]): - if non_code.strip(): - non_code = normalize_markdown(non_code) - if inline_code_pattern.search(non_code): - result.append(markdown(non_code, extensions=["tables"])) - else: - result.append(mdtex2html.convert(non_code, extensions=["tables"])) - if code.strip(): - # _, code = detect_language(code) # 暂时去除代码高亮功能,因为在大段代码的情况下会出现问题 - # code = code.replace("\n\n", "\n") # 暂时去除代码中的空行,因为在大段代码的情况下会出现问题 - code = f"\n```{code}\n\n```" - code = markdown_to_html_with_syntax_highlight(code) - result.append(code) - result = "".join(result) - result += ALREADY_CONVERTED_MARK - return result - - -def convert_asis(userinput): - return ( - f'

          {html.escape(userinput)}

          ' - + ALREADY_CONVERTED_MARK - ) - - -def detect_converted_mark(userinput): - if userinput.endswith(ALREADY_CONVERTED_MARK): - return True - else: - return False - - -def detect_language(code): - if code.startswith("\n"): - first_line = "" - else: - first_line = code.strip().split("\n", 1)[0] - language = first_line.lower() if first_line else "" - code_without_language = code[len(first_line) :].lstrip() if first_line else code - return language, code_without_language - - -def construct_text(role, text): - return {"role": role, "content": text} - - -def construct_user(text): - return construct_text("user", text) - - -def construct_system(text): - return construct_text("system", text) - - -def construct_assistant(text): - return construct_text("assistant", text) - - -def construct_token_message(token, stream=False): - return f"Token 计数: {token}" - - -def delete_first_conversation(history, previous_token_count): - if history: - del history[:2] - del previous_token_count[0] - return ( - history, - previous_token_count, - construct_token_message(sum(previous_token_count)), - ) - - -def delete_last_conversation(chatbot, history, previous_token_count): - if len(chatbot) > 0 and standard_error_msg in chatbot[-1][1]: - logging.info("由于包含报错信息,只删除chatbot记录") - chatbot.pop() - return chatbot, history - if len(history) > 0: - logging.info("删除了一组对话历史") - history.pop() - history.pop() - if len(chatbot) > 0: - logging.info("删除了一组chatbot对话") - chatbot.pop() - if len(previous_token_count) > 0: - logging.info("删除了一组对话的token计数记录") - previous_token_count.pop() - return ( - chatbot, - history, - previous_token_count, - construct_token_message(sum(previous_token_count)), - ) - - -def save_file(filename, system, history, chatbot): - logging.info("保存对话历史中……") - os.makedirs(HISTORY_DIR, exist_ok=True) - if filename.endswith(".json"): - json_s = {"system": system, "history": history, "chatbot": chatbot} - print(json_s) - with open(os.path.join(HISTORY_DIR, filename), "w") as f: - json.dump(json_s, f) - elif filename.endswith(".md"): - md_s = f"system: \n- {system} \n" - for data in history: - md_s += f"\n{data['role']}: \n- {data['content']} \n" - with open(os.path.join(HISTORY_DIR, filename), "w", encoding="utf8") as f: - f.write(md_s) - logging.info("保存对话历史完毕") - return os.path.join(HISTORY_DIR, filename) - - -def save_chat_history(filename, system, history, chatbot): - if filename == "": - return - if not filename.endswith(".json"): - filename += ".json" - return save_file(filename, system, history, chatbot) - - -def export_markdown(filename, system, history, chatbot): - if filename == "": - return - if not filename.endswith(".md"): - filename += ".md" - return save_file(filename, system, history, chatbot) - - -def load_chat_history(filename, system, history, chatbot): - logging.info("加载对话历史中……") - if type(filename) != str: - filename = filename.name - try: - with open(os.path.join(HISTORY_DIR, filename), "r") as f: - json_s = json.load(f) - try: - if type(json_s["history"][0]) == str: - logging.info("历史记录格式为旧版,正在转换……") - new_history = [] - for index, item in enumerate(json_s["history"]): - if index % 2 == 0: - new_history.append(construct_user(item)) - else: - new_history.append(construct_assistant(item)) - json_s["history"] = new_history - logging.info(new_history) - except: - # 没有对话历史 - pass - logging.info("加载对话历史完毕") - return filename, json_s["system"], json_s["history"], json_s["chatbot"] - except FileNotFoundError: - logging.info("没有找到对话历史文件,不执行任何操作") - return filename, system, history, chatbot - - -def sorted_by_pinyin(list): - return sorted(list, key=lambda char: lazy_pinyin(char)[0][0]) - - -def get_file_names(dir, plain=False, filetypes=[".json"]): - logging.info(f"获取文件名列表,目录为{dir},文件类型为{filetypes},是否为纯文本列表{plain}") - files = [] - try: - for type in filetypes: - files += [f for f in os.listdir(dir) if f.endswith(type)] - except FileNotFoundError: - files = [] - files = sorted_by_pinyin(files) - if files == []: - files = [""] - if plain: - return files - else: - return gr.Dropdown.update(choices=files) - - -def get_history_names(plain=False): - logging.info("获取历史记录文件名列表") - return get_file_names(HISTORY_DIR, plain) - - -def load_template(filename, mode=0): - logging.info(f"加载模板文件{filename},模式为{mode}(0为返回字典和下拉菜单,1为返回下拉菜单,2为返回字典)") - lines = [] - logging.info("Loading template...") - if filename.endswith(".json"): - with open(os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8") as f: - lines = json.load(f) - lines = [[i["act"], i["prompt"]] for i in lines] - else: - with open( - os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8" - ) as csvfile: - reader = csv.reader(csvfile) - lines = list(reader) - lines = lines[1:] - if mode == 1: - return sorted_by_pinyin([row[0] for row in lines]) - elif mode == 2: - return {row[0]: row[1] for row in lines} - else: - choices = sorted_by_pinyin([row[0] for row in lines]) - return {row[0]: row[1] for row in lines}, gr.Dropdown.update( - choices=choices, value=choices[0] - ) - - -def get_template_names(plain=False): - logging.info("获取模板文件名列表") - return get_file_names(TEMPLATES_DIR, plain, filetypes=[".csv", "json"]) - - -def get_template_content(templates, selection, original_system_prompt): - logging.info(f"应用模板中,选择为{selection},原始系统提示为{original_system_prompt}") - try: - return templates[selection] - except: - return original_system_prompt - - -def reset_state(): - logging.info("重置状态") - return [], [], [], construct_token_message(0) - - -def reset_textbox(): - logging.debug("重置文本框") - return gr.update(value="") - - -def reset_default(): - newurl = shared.state.reset_api_url() - os.environ.pop("HTTPS_PROXY", None) - os.environ.pop("https_proxy", None) - return gr.update(value=newurl), gr.update(value=""), "API URL 和代理已重置" - - -def change_api_url(url): - shared.state.set_api_url(url) - msg = f"API地址更改为了{url}" - logging.info(msg) - return msg - - -def change_proxy(proxy): - os.environ["HTTPS_PROXY"] = proxy - msg = f"代理更改为了{proxy}" - logging.info(msg) - return msg - - -def hide_middle_chars(s): - if s is None: - return "" - if len(s) <= 8: - return s - else: - head = s[:4] - tail = s[-4:] - hidden = "*" * (len(s) - 8) - return head + hidden + tail - - -def submit_key(key): - key = key.strip() - msg = f"API密钥更改为了{hide_middle_chars(key)}" - logging.info(msg) - return key, msg - - -def replace_today(prompt): - today = datetime.datetime.today().strftime("%Y-%m-%d") - return prompt.replace("{current_date}", today) - - -def get_geoip(): - try: - response = requests.get("https://ipapi.co/json/", timeout=5) - data = response.json() - except: - data = {"error": True, "reason": "连接ipapi失败"} - if "error" in data.keys(): - logging.warning(f"无法获取IP地址信息。\n{data}") - if data["reason"] == "RateLimited": - return ( - f"获取IP地理位置失败,因为达到了检测IP的速率限制。聊天功能可能仍然可用。" - ) - else: - return f"获取IP地理位置失败。原因:{data['reason']}。你仍然可以使用聊天功能。" - else: - country = data["country_name"] - if country == "China": - text = "**您的IP区域:中国。请立即检查代理设置,在不受支持的地区使用API可能导致账号被封禁。**" - else: - text = f"您的IP区域:{country}。" - logging.info(text) - return text - - -def find_n(lst, max_num): - n = len(lst) - total = sum(lst) - - if total < max_num: - return n - - for i in range(len(lst)): - if total - lst[i] < max_num: - return n - i - 1 - total = total - lst[i] - return 1 - - -def start_outputing(): - logging.debug("显示取消按钮,隐藏发送按钮") - return gr.Button.update(visible=False), gr.Button.update(visible=True) - - -def end_outputing(): - return ( - gr.Button.update(visible=True), - gr.Button.update(visible=False), - ) - - -def cancel_outputing(): - logging.info("中止输出……") - shared.state.interrupt() - - -def transfer_input(inputs): - # 一次性返回,降低延迟 - textbox = reset_textbox() - outputing = start_outputing() - return ( - inputs, - gr.update(value=""), - gr.Button.update(visible=True), - gr.Button.update(visible=False), - ) - - -def get_proxies(): - # 获取环境变量中的代理设置 - http_proxy = os.environ.get("HTTP_PROXY") or os.environ.get("http_proxy") - https_proxy = os.environ.get("HTTPS_PROXY") or os.environ.get("https_proxy") - - # 如果存在代理设置,使用它们 - proxies = {} - if http_proxy: - logging.info(f"使用 HTTP 代理: {http_proxy}") - proxies["http"] = http_proxy - if https_proxy: - logging.info(f"使用 HTTPS 代理: {https_proxy}") - proxies["https"] = https_proxy - - if proxies == {}: - proxies = None - - return proxies - -def run(command, desc=None, errdesc=None, custom_env=None, live=False): - if desc is not None: - print(desc) - if live: - result = subprocess.run(command, shell=True, env=os.environ if custom_env is None else custom_env) - if result.returncode != 0: - raise RuntimeError(f"""{errdesc or 'Error running command'}. -Command: {command} -Error code: {result.returncode}""") - - return "" - result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ if custom_env is None else custom_env) - if result.returncode != 0: - message = f"""{errdesc or 'Error running command'}. -Command: {command} -Error code: {result.returncode} -stdout: {result.stdout.decode(encoding="utf8", errors="ignore") if len(result.stdout)>0 else ''} -stderr: {result.stderr.decode(encoding="utf8", errors="ignore") if len(result.stderr)>0 else ''} -""" - raise RuntimeError(message) - return result.stdout.decode(encoding="utf8", errors="ignore") - -def versions_html(): - git = os.environ.get('GIT', "git") - python_version = ".".join([str(x) for x in sys.version_info[0:3]]) - try: - commit_hash = run(f"{git} rev-parse HEAD").strip() - except Exception: - commit_hash = "" - if commit_hash != "": - short_commit = commit_hash[0:7] - commit_info = f"{short_commit}" - else: - commit_info = "unknown \U0001F615" - return f""" -Python: {python_version} - •  -Gradio: {gr.__version__} - •  -Commit: {commit_info} -""" - -def add_source_numbers(lst, source_name = "Source", use_source = True): - if use_source: - return [f'[{idx+1}]\t "{item[0]}"\n{source_name}: {item[1]}' for idx, item in enumerate(lst)] - else: - return [f'[{idx+1}]\t "{item}"' for idx, item in enumerate(lst)] - -def add_details(lst): - nodes = [] - for index, txt in enumerate(lst): - brief = txt[:25].replace("\n", "") - nodes.append( - f"
          {brief}...

          {txt}

          " - ) - return nodes diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/modeling/backbone/__init__.py b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/modeling/backbone/__init__.py deleted file mode 100644 index d477fb1e596f77b4c24f2b2c66b528bf2f83b00e..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/detectron2/modeling/backbone/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -from .build import build_backbone, BACKBONE_REGISTRY # noqa F401 isort:skip - -from .backbone import Backbone -from .fpn import FPN -from .resnet import ResNet, ResNetBlockBase, build_resnet_backbone, make_stage - -__all__ = [k for k in globals().keys() if not k.startswith("_")] -# TODO can expose more resnet blocks after careful consideration diff --git a/spaces/heiyubili/bingo/tests/kblob.ts b/spaces/heiyubili/bingo/tests/kblob.ts deleted file mode 100644 index 9e15b41c1c94a690beb61b23cdb42fc78767ccd2..0000000000000000000000000000000000000000 --- a/spaces/heiyubili/bingo/tests/kblob.ts +++ /dev/null @@ -1,27 +0,0 @@ -import FormData from 'form-data' - -import { fetch } from '@/lib/isomorphic' - -const formData = new FormData() - -const knowledgeRequest = {"imageInfo":{"url":"https://www.baidu.com/img/PCfb_5bf082d29588c07f842ccde3f97243ea.png"},"knowledgeRequest":{"invokedSkills":["ImageById"],"subscriptionId":"Bing.Chat.Multimodal","invokedSkillsRequestData":{"enableFaceBlur":true},"convoData":{"convoid":"51D|BingProdUnAuthenticatedUsers|E3DCA904FF236C67C3450163BCEC64CFF3F618CC8A4AFD75FD518F5ED0ADA080","convotone":"Creative"}}} - -formData.append('knowledgeRequest', JSON.stringify(knowledgeRequest)) - - -fetch('https://bing.vcanbb.top/images/kblob', - { - method: 'POST', - body: formData.getBuffer(), - headers: { - "sec-ch-ua": "\"Not/A)Brand\";v=\"99\", \"Google Chrome\";v=\"115\", \"Chromium\";v=\"115\"", - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": "\"Windows\"", - "Referer": "https://bing.vcanbb.top/web/index.html", - "Referrer-Policy": "origin-when-cross-origin", - ...formData.getHeaders() - } - - } -).then(res => res.text()) -.then(res => console.log('res', res)) diff --git a/spaces/housexu123/bingo-2.0/Dockerfile b/spaces/housexu123/bingo-2.0/Dockerfile deleted file mode 100644 index 3aa2b29b5fc4fa8b8238955acd7f1fde13ce5e1a..0000000000000000000000000000000000000000 --- a/spaces/housexu123/bingo-2.0/Dockerfile +++ /dev/null @@ -1,36 +0,0 @@ -FROM node:18 - - -ARG DEBIAN_FRONTEND=noninteractive - -ENV BING_HEADER "" - -# Set home to the user's home directory -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:$PATH - -# Set up a new user named "user" with user ID 1000 -RUN useradd -o -u 1000 user && mkdir -p $HOME/app && chown -R user $HOME - -# Switch to the "user" user -USER user - -# Set the working directory to the user's home directory -WORKDIR $HOME/app - -# Install app dependencies -# A wildcard is used to ensure both package.json AND package-lock.json are copied -# where available (npm@5+) -COPY --chown=user package*.json $HOME/app/ - -RUN npm install - -# Copy the current directory contents into the container at $HOME/app setting the owner to the user -COPY --chown=user . $HOME/app/ - -RUN npm run build - -ENV PORT 7860 -EXPOSE 7860 - -CMD npm start diff --git a/spaces/huang4414/White-box-Cartoonization/README.md b/spaces/huang4414/White-box-Cartoonization/README.md deleted file mode 100644 index 9860239cf42c94e385faaaa75a85311e010d64f7..0000000000000000000000000000000000000000 --- a/spaces/huang4414/White-box-Cartoonization/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -python_version: 3.7 -title: White Box Cartoonization -emoji: 📚 -colorFrom: purple -colorTo: green -sdk: gradio -sdk_version: 2.9.4 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: hylee/White-box-Cartoonization ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/huggan/projected_gan_art/blocks.py b/spaces/huggan/projected_gan_art/blocks.py deleted file mode 100644 index 78bd113bac1cd6486ede92b1ae8d5adfb678eb81..0000000000000000000000000000000000000000 --- a/spaces/huggan/projected_gan_art/blocks.py +++ /dev/null @@ -1,325 +0,0 @@ -import functools -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn.utils import spectral_norm - - -### single layers - - -def conv2d(*args, **kwargs): - return spectral_norm(nn.Conv2d(*args, **kwargs)) - - -def convTranspose2d(*args, **kwargs): - return spectral_norm(nn.ConvTranspose2d(*args, **kwargs)) - - -def embedding(*args, **kwargs): - return spectral_norm(nn.Embedding(*args, **kwargs)) - - -def linear(*args, **kwargs): - return spectral_norm(nn.Linear(*args, **kwargs)) - - -def NormLayer(c, mode='batch'): - if mode == 'group': - return nn.GroupNorm(c//2, c) - elif mode == 'batch': - return nn.BatchNorm2d(c) - - -### Activations - - -class GLU(nn.Module): - def forward(self, x): - nc = x.size(1) - assert nc % 2 == 0, 'channels dont divide 2!' - nc = int(nc/2) - return x[:, :nc] * torch.sigmoid(x[:, nc:]) - - -class Swish(nn.Module): - def forward(self, feat): - return feat * torch.sigmoid(feat) - - -### Upblocks - - -class InitLayer(nn.Module): - def __init__(self, nz, channel, sz=4): - super().__init__() - - self.init = nn.Sequential( - convTranspose2d(nz, channel*2, sz, 1, 0, bias=False), - NormLayer(channel*2), - GLU(), - ) - - def forward(self, noise): - noise = noise.view(noise.shape[0], -1, 1, 1) - return self.init(noise) - - -def UpBlockSmall(in_planes, out_planes): - block = nn.Sequential( - nn.Upsample(scale_factor=2, mode='nearest'), - conv2d(in_planes, out_planes*2, 3, 1, 1, bias=False), - NormLayer(out_planes*2), GLU()) - return block - - -class UpBlockSmallCond(nn.Module): - def __init__(self, in_planes, out_planes, z_dim): - super().__init__() - self.in_planes = in_planes - self.out_planes = out_planes - self.up = nn.Upsample(scale_factor=2, mode='nearest') - self.conv = conv2d(in_planes, out_planes*2, 3, 1, 1, bias=False) - - which_bn = functools.partial(CCBN, which_linear=linear, input_size=z_dim) - self.bn = which_bn(2*out_planes) - self.act = GLU() - - def forward(self, x, c): - x = self.up(x) - x = self.conv(x) - x = self.bn(x, c) - x = self.act(x) - return x - - -def UpBlockBig(in_planes, out_planes): - block = nn.Sequential( - nn.Upsample(scale_factor=2, mode='nearest'), - conv2d(in_planes, out_planes*2, 3, 1, 1, bias=False), - NoiseInjection(), - NormLayer(out_planes*2), GLU(), - conv2d(out_planes, out_planes*2, 3, 1, 1, bias=False), - NoiseInjection(), - NormLayer(out_planes*2), GLU() - ) - return block - - -class UpBlockBigCond(nn.Module): - def __init__(self, in_planes, out_planes, z_dim): - super().__init__() - self.in_planes = in_planes - self.out_planes = out_planes - self.up = nn.Upsample(scale_factor=2, mode='nearest') - self.conv1 = conv2d(in_planes, out_planes*2, 3, 1, 1, bias=False) - self.conv2 = conv2d(out_planes, out_planes*2, 3, 1, 1, bias=False) - - which_bn = functools.partial(CCBN, which_linear=linear, input_size=z_dim) - self.bn1 = which_bn(2*out_planes) - self.bn2 = which_bn(2*out_planes) - self.act = GLU() - self.noise = NoiseInjection() - - def forward(self, x, c): - # block 1 - x = self.up(x) - x = self.conv1(x) - x = self.noise(x) - x = self.bn1(x, c) - x = self.act(x) - - # block 2 - x = self.conv2(x) - x = self.noise(x) - x = self.bn2(x, c) - x = self.act(x) - - return x - - -class SEBlock(nn.Module): - def __init__(self, ch_in, ch_out): - super().__init__() - self.main = nn.Sequential( - nn.AdaptiveAvgPool2d(4), - conv2d(ch_in, ch_out, 4, 1, 0, bias=False), - Swish(), - conv2d(ch_out, ch_out, 1, 1, 0, bias=False), - nn.Sigmoid(), - ) - - def forward(self, feat_small, feat_big): - return feat_big * self.main(feat_small) - - -### Downblocks - - -class SeparableConv2d(nn.Module): - def __init__(self, in_channels, out_channels, kernel_size, bias=False): - super(SeparableConv2d, self).__init__() - self.depthwise = conv2d(in_channels, in_channels, kernel_size=kernel_size, - groups=in_channels, bias=bias, padding=1) - self.pointwise = conv2d(in_channels, out_channels, - kernel_size=1, bias=bias) - - def forward(self, x): - out = self.depthwise(x) - out = self.pointwise(out) - return out - - -class DownBlock(nn.Module): - def __init__(self, in_planes, out_planes, separable=False): - super().__init__() - if not separable: - self.main = nn.Sequential( - conv2d(in_planes, out_planes, 4, 2, 1), - NormLayer(out_planes), - nn.LeakyReLU(0.2, inplace=True), - ) - else: - self.main = nn.Sequential( - SeparableConv2d(in_planes, out_planes, 3), - NormLayer(out_planes), - nn.LeakyReLU(0.2, inplace=True), - nn.AvgPool2d(2, 2), - ) - - def forward(self, feat): - return self.main(feat) - - -class DownBlockPatch(nn.Module): - def __init__(self, in_planes, out_planes, separable=False): - super().__init__() - self.main = nn.Sequential( - DownBlock(in_planes, out_planes, separable), - conv2d(out_planes, out_planes, 1, 1, 0, bias=False), - NormLayer(out_planes), - nn.LeakyReLU(0.2, inplace=True), - ) - - def forward(self, feat): - return self.main(feat) - - -### CSM - - -class ResidualConvUnit(nn.Module): - def __init__(self, cin, activation, bn): - super().__init__() - self.conv = nn.Conv2d(cin, cin, kernel_size=3, stride=1, padding=1, bias=True) - self.skip_add = nn.quantized.FloatFunctional() - - def forward(self, x): - return self.skip_add.add(self.conv(x), x) - - -class FeatureFusionBlock(nn.Module): - def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True, lowest=False): - super().__init__() - - self.deconv = deconv - self.align_corners = align_corners - - self.expand = expand - out_features = features - if self.expand==True: - out_features = features//2 - - self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1) - self.skip_add = nn.quantized.FloatFunctional() - - def forward(self, *xs): - output = xs[0] - - if len(xs) == 2: - output = self.skip_add.add(output, xs[1]) - - output = nn.functional.interpolate( - output, scale_factor=2, mode="bilinear", align_corners=self.align_corners - ) - - output = self.out_conv(output) - - return output - - -### Misc - - -class NoiseInjection(nn.Module): - def __init__(self): - super().__init__() - self.weight = nn.Parameter(torch.zeros(1), requires_grad=True) - - def forward(self, feat, noise=None): - if noise is None: - batch, _, height, width = feat.shape - noise = torch.randn(batch, 1, height, width).to(feat.device) - - return feat + self.weight * noise - - -class CCBN(nn.Module): - ''' conditional batchnorm ''' - def __init__(self, output_size, input_size, which_linear, eps=1e-5, momentum=0.1): - super().__init__() - self.output_size, self.input_size = output_size, input_size - - # Prepare gain and bias layers - self.gain = which_linear(input_size, output_size) - self.bias = which_linear(input_size, output_size) - - # epsilon to avoid dividing by 0 - self.eps = eps - # Momentum - self.momentum = momentum - - self.register_buffer('stored_mean', torch.zeros(output_size)) - self.register_buffer('stored_var', torch.ones(output_size)) - - def forward(self, x, y): - # Calculate class-conditional gains and biases - gain = (1 + self.gain(y)).view(y.size(0), -1, 1, 1) - bias = self.bias(y).view(y.size(0), -1, 1, 1) - out = F.batch_norm(x, self.stored_mean, self.stored_var, None, None, - self.training, 0.1, self.eps) - return out * gain + bias - - -class Interpolate(nn.Module): - """Interpolation module.""" - - def __init__(self, size, mode='bilinear', align_corners=False): - """Init. - Args: - scale_factor (float): scaling - mode (str): interpolation mode - """ - super(Interpolate, self).__init__() - - self.interp = nn.functional.interpolate - self.size = size - self.mode = mode - self.align_corners = align_corners - - def forward(self, x): - """Forward pass. - Args: - x (tensor): input - Returns: - tensor: interpolated data - """ - - x = self.interp( - x, - size=self.size, - mode=self.mode, - align_corners=self.align_corners, - ) - - return x diff --git a/spaces/huggingface-projects/deepfloydif-bot/README.md b/spaces/huggingface-projects/deepfloydif-bot/README.md deleted file mode 100644 index 35b6facf44e145aa2099673c4a1d73e90160741f..0000000000000000000000000000000000000000 --- a/spaces/huggingface-projects/deepfloydif-bot/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: deepfloydif-bot -emoji: 📈 -colorFrom: purple -colorTo: green -sdk: gradio -sdk_version: 3.44.4 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/hugginglearners/Identify_which_flower/app.py b/spaces/hugginglearners/Identify_which_flower/app.py deleted file mode 100644 index 42c3e011015adc241fcef6646802ca259485b606..0000000000000000000000000000000000000000 --- a/spaces/hugginglearners/Identify_which_flower/app.py +++ /dev/null @@ -1,39 +0,0 @@ -import gradio as gr -from huggingface_hub import from_pretrained_fastai -from fastai.vision.all import * -repo_id = "hugginglearners/flowers_101_convnext_model" - -learn = from_pretrained_fastai(repo_id) -labels = learn.dls.vocab -EXAMPLES_PATH = Path('./examples') - - -def predict(img): - img = PILImage.create(img) - _pred, _pred_w_idx, probs = learn.predict(img) - # gradio doesn't support tensors, so converting to float - labels_probs = {labels[i]: float(probs[i]) for i, _ in enumerate(labels)} - return labels_probs - -interface_options = { - "title": "Identify which flower it is?", - "description": "I am terribly bad at remembering names of flowers and trees and it's often difficult to fathom how diverse our natural world is. There are over 5,000 species of mammals, 10,000 species of birds, 30,000 species of fish – and astonishingly, over 400,000 different types of flowers.\n Identify which flower variety it is by uploading your images.", - "interpretation": "default", - "layout": "horizontal", - "allow_flagging": "never", - "examples": [f'{EXAMPLES_PATH}/{f.name}' for f in EXAMPLES_PATH.iterdir()], -} - -demo = gr.Interface( - fn=predict, - inputs=gr.inputs.Image(shape=(192, 192)), - outputs=gr.outputs.Label(num_top_classes=3), - **interface_options, -) - -launch_options = { - "enable_queue": True, - "share": True, -} - -demo.launch(**launch_options) \ No newline at end of file diff --git a/spaces/hysts-samples/save-user-preferences/README.md b/spaces/hysts-samples/save-user-preferences/README.md deleted file mode 100644 index 389499860fc6aaff63881c0e8c2ec04569da6176..0000000000000000000000000000000000000000 --- a/spaces/hysts-samples/save-user-preferences/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Save user preferences -emoji: ⚡ -colorFrom: red -colorTo: purple -sdk: gradio -sdk_version: 3.42.0 -python_version: 3.10.12 -app_file: app.py -pinned: false -license: mit -duplicated_from: hysts-samples/base-space ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ifey/chatdemo/AppPub/Http/UserHttp.py b/spaces/ifey/chatdemo/AppPub/Http/UserHttp.py deleted file mode 100644 index 98438c133faffac300a9abdcca08af44937f41a8..0000000000000000000000000000000000000000 --- a/spaces/ifey/chatdemo/AppPub/Http/UserHttp.py +++ /dev/null @@ -1,16 +0,0 @@ -import requests -#獲取用戶的服務器信息 -def GetUserData(sub): - # 发送GET请求 - response = requests.get(f'https://tilents.sinaapp.com/assistant/search_userInfo.php?sub={sub}') - # 检查响应状态码 - if response.status_code == 200: - # 成功获取数据 - data = response.json() # 如果响应是JSON格式的数据 - print(f"GetUserData{data}") - return data - else: - print(f'Failed to fetch data. Status code: {response.status_code}') - return None - -GetUserData() \ No newline at end of file diff --git a/spaces/inamXcontru/PoeticTTS/Andy James Modern Metal Series 1 Download Hit [BETTER].md b/spaces/inamXcontru/PoeticTTS/Andy James Modern Metal Series 1 Download Hit [BETTER].md deleted file mode 100644 index c074be30d56dd893fb84c6cd6cf00d2f2d5d7178..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Andy James Modern Metal Series 1 Download Hit [BETTER].md +++ /dev/null @@ -1,12 +0,0 @@ -

          Andy James Modern Metal Series 1 Download Hit


          DOWNLOAD === https://gohhs.com/2uz5ub



          -
          -In particular, the style of music he's playing is something we haven't heard before - an aggressive, experimental fusion of electronic, rock, and metal. So if you like epic guitar solos, sludgy beats, and what it sounds like when a crazy kraut comes over to give a class in "modern world history," you're gonna love this. This is the last release with Andy on bass, as he's moving on to a new project now, so grab this while you can! Check out the live tracks from Andy's solo shows! (Also, I'd love it if you posted this on your forum, as it's a great way to get the word out!) - -A bunch of excellent guitars from your friendly neighbourhood, "The Panoply" shop. The list below is ordered by the date you purchased the guitar (from the main website) - the first is from my own stock, the second is a sale purchase from the shop. I've linked the guitar to the shop's page where you can buy it or find more info. If you're looking for something more specific, you can always filter by brand name or price range. - -I currently have just a single 12" Low "G" string on the guitar, as I am a bassist. I can play it just fine in low "G", but the 2nd string is much more responsive, for example as a slap bassist. I am not sure how to modulate this, but I am sure there is some mod/tech out there. I have read that the long string can be plucked easier than a shorter one, so I am looking into tuning the guitar for this use. - -This is the best value in the hobby. If you want to start a band or just practice, you can't go wrong with these lower priced guitars. I've played them for a decade, through a basement amp and gigged with friends. We've been through a few. I've bought others and none has given me the reliability or sound quality. I bought my first electric guitar at one of the stores where I played with my high school band. It was a sunburst Les Paul Jr. That guitar is still going strong and I still use it for practice. I just bought a Les Paul Jr. copy and I couldn't be happier. It's solid. I just bought my first Taylor, which I love. I have a LeBlanc Custom that I bought used from one of my favorite players. He has great taste. 4fefd39f24
          -
          -
          -

          diff --git a/spaces/inamXcontru/PoeticTTS/Como Jugar Dead Space 3 Con Hamachi Pcl.md b/spaces/inamXcontru/PoeticTTS/Como Jugar Dead Space 3 Con Hamachi Pcl.md deleted file mode 100644 index 9110c941a1e24f19232d48dcf1694c9abc8f0dbb..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Como Jugar Dead Space 3 Con Hamachi Pcl.md +++ /dev/null @@ -1,6 +0,0 @@ -

          Como Jugar Dead Space 3 Con Hamachi Pcl


          Download Zip » https://gohhs.com/2uz4B1



          -
          -singhgad college of engg mr fairchild installing age of empires iii flogbrasil terra ... dual monitors lagasca 67 dead dingos donger e mail program reviews johnyy ... extreme card villa caparra puerto rico pcl 5c driver i ride or die the graduation ... giudici md in space toys www campusnet csuohio edu laurentius baby isando ... 1fdad05405
          -
          -
          -

          diff --git a/spaces/innnky/vits-nyaru/text/cleaners.py b/spaces/innnky/vits-nyaru/text/cleaners.py deleted file mode 100644 index 6adb2493e698a5abfd9c106b83a8187e206845e1..0000000000000000000000000000000000000000 --- a/spaces/innnky/vits-nyaru/text/cleaners.py +++ /dev/null @@ -1,332 +0,0 @@ -""" from https://github.com/keithito/tacotron """ - -''' -Cleaners are transformations that run over the input text at both training and eval time. - -Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" -hyperparameter. Some cleaners are English-specific. You'll typically want to use: - 1. "english_cleaners" for English text - 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using - the Unidecode library (https://pypi.python.org/pypi/Unidecode) - 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update - the symbols in symbols.py to match your data). -''' - -import re -from unidecode import unidecode -import pyopenjtalk -from jamo import h2j, j2hcj -from pypinyin import lazy_pinyin,BOPOMOFO -import jieba - - -# This is a list of Korean classifiers preceded by pure Korean numerals. -_korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통' - -# Regular expression matching whitespace: -_whitespace_re = re.compile(r'\s+') - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile(r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile(r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# List of (regular expression, replacement) pairs for abbreviations: -_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ - ('mrs', 'misess'), - ('mr', 'mister'), - ('dr', 'doctor'), - ('st', 'saint'), - ('co', 'company'), - ('jr', 'junior'), - ('maj', 'major'), - ('gen', 'general'), - ('drs', 'doctors'), - ('rev', 'reverend'), - ('lt', 'lieutenant'), - ('hon', 'honorable'), - ('sgt', 'sergeant'), - ('capt', 'captain'), - ('esq', 'esquire'), - ('ltd', 'limited'), - ('col', 'colonel'), - ('ft', 'fort'), -]] - -# List of (hangul, hangul divided) pairs: -_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ㄳ', 'ㄱㅅ'), - ('ㄵ', 'ㄴㅈ'), - ('ㄶ', 'ㄴㅎ'), - ('ㄺ', 'ㄹㄱ'), - ('ㄻ', 'ㄹㅁ'), - ('ㄼ', 'ㄹㅂ'), - ('ㄽ', 'ㄹㅅ'), - ('ㄾ', 'ㄹㅌ'), - ('ㄿ', 'ㄹㅍ'), - ('ㅀ', 'ㄹㅎ'), - ('ㅄ', 'ㅂㅅ'), - ('ㅘ', 'ㅗㅏ'), - ('ㅙ', 'ㅗㅐ'), - ('ㅚ', 'ㅗㅣ'), - ('ㅝ', 'ㅜㅓ'), - ('ㅞ', 'ㅜㅔ'), - ('ㅟ', 'ㅜㅣ'), - ('ㅢ', 'ㅡㅣ'), - ('ㅑ', 'ㅣㅏ'), - ('ㅒ', 'ㅣㅐ'), - ('ㅕ', 'ㅣㅓ'), - ('ㅖ', 'ㅣㅔ'), - ('ㅛ', 'ㅣㅗ'), - ('ㅠ', 'ㅣㅜ') -]] - -# List of (Latin alphabet, hangul) pairs: -_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ - ('a', '에이'), - ('b', '비'), - ('c', '시'), - ('d', '디'), - ('e', '이'), - ('f', '에프'), - ('g', '지'), - ('h', '에이치'), - ('i', '아이'), - ('j', '제이'), - ('k', '케이'), - ('l', '엘'), - ('m', '엠'), - ('n', '엔'), - ('o', '오'), - ('p', '피'), - ('q', '큐'), - ('r', '아르'), - ('s', '에스'), - ('t', '티'), - ('u', '유'), - ('v', '브이'), - ('w', '더블유'), - ('x', '엑스'), - ('y', '와이'), - ('z', '제트') -]] - - -def expand_abbreviations(text): - for regex, replacement in _abbreviations: - text = re.sub(regex, replacement, text) - return text - - -def lowercase(text): - return text.lower() - - -def collapse_whitespace(text): - return re.sub(_whitespace_re, ' ', text) - - -def convert_to_ascii(text): - return unidecode(text) - - -def latin_to_hangul(text): - for regex, replacement in _latin_to_hangul: - text = re.sub(regex, replacement, text) - return text - - -def divide_hangul(text): - for regex, replacement in _hangul_divided: - text = re.sub(regex, replacement, text) - return text - - -def hangul_number(num, sino=True): - '''Reference https://github.com/Kyubyong/g2pK''' - num = re.sub(',', '', num) - - if num == '0': - return '영' - if not sino and num == '20': - return '스무' - - digits = '123456789' - names = '일이삼사오육칠팔구' - digit2name = {d: n for d, n in zip(digits, names)} - - modifiers = '한 두 세 네 다섯 여섯 일곱 여덟 아홉' - decimals = '열 스물 서른 마흔 쉰 예순 일흔 여든 아흔' - digit2mod = {d: mod for d, mod in zip(digits, modifiers.split())} - digit2dec = {d: dec for d, dec in zip(digits, decimals.split())} - - spelledout = [] - for i, digit in enumerate(num): - i = len(num) - i - 1 - if sino: - if i == 0: - name = digit2name.get(digit, '') - elif i == 1: - name = digit2name.get(digit, '') + '십' - name = name.replace('일십', '십') - else: - if i == 0: - name = digit2mod.get(digit, '') - elif i == 1: - name = digit2dec.get(digit, '') - if digit == '0': - if i % 4 == 0: - last_three = spelledout[-min(3, len(spelledout)):] - if ''.join(last_three) == '': - spelledout.append('') - continue - else: - spelledout.append('') - continue - if i == 2: - name = digit2name.get(digit, '') + '백' - name = name.replace('일백', '백') - elif i == 3: - name = digit2name.get(digit, '') + '천' - name = name.replace('일천', '천') - elif i == 4: - name = digit2name.get(digit, '') + '만' - name = name.replace('일만', '만') - elif i == 5: - name = digit2name.get(digit, '') + '십' - name = name.replace('일십', '십') - elif i == 6: - name = digit2name.get(digit, '') + '백' - name = name.replace('일백', '백') - elif i == 7: - name = digit2name.get(digit, '') + '천' - name = name.replace('일천', '천') - elif i == 8: - name = digit2name.get(digit, '') + '억' - elif i == 9: - name = digit2name.get(digit, '') + '십' - elif i == 10: - name = digit2name.get(digit, '') + '백' - elif i == 11: - name = digit2name.get(digit, '') + '천' - elif i == 12: - name = digit2name.get(digit, '') + '조' - elif i == 13: - name = digit2name.get(digit, '') + '십' - elif i == 14: - name = digit2name.get(digit, '') + '백' - elif i == 15: - name = digit2name.get(digit, '') + '천' - spelledout.append(name) - return ''.join(elem for elem in spelledout) - - -def number_to_hangul(text): - '''Reference https://github.com/Kyubyong/g2pK''' - tokens = set(re.findall(r'(\d[\d,]*)([\uac00-\ud71f]+)', text)) - for token in tokens: - num, classifier = token - if classifier[:2] in _korean_classifiers or classifier[0] in _korean_classifiers: - spelledout = hangul_number(num, sino=False) - else: - spelledout = hangul_number(num, sino=True) - text = text.replace(f'{num}{classifier}', f'{spelledout}{classifier}') - # digit by digit for remaining digits - digits = '0123456789' - names = '영일이삼사오육칠팔구' - for d, n in zip(digits, names): - text = text.replace(d, n) - return text - - -def basic_cleaners(text): - '''Basic pipeline that lowercases and collapses whitespace without transliteration.''' - text = lowercase(text) - text = collapse_whitespace(text) - return text - - -def transliteration_cleaners(text): - '''Pipeline for non-English text that transliterates to ASCII.''' - text = convert_to_ascii(text) - text = lowercase(text) - text = collapse_whitespace(text) - return text - - -def japanese_cleaners(text): - '''Pipeline for notating accent in Japanese text. - Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = '' - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - if text!='': - text+=' ' - labels = pyopenjtalk.extract_fullcontext(sentence) - for n, label in enumerate(labels): - phoneme = re.search(r'\-([^\+]*)\+', label).group(1) - if phoneme not in ['sil','pau']: - text += phoneme.replace('ch','ʧ').replace('sh','ʃ').replace('cl','Q') - else: - continue - n_moras = int(re.search(r'/F:(\d+)_', label).group(1)) - a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1)) - a2 = int(re.search(r"\+(\d+)\+", label).group(1)) - a3 = int(re.search(r"\+(\d+)/", label).group(1)) - if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil','pau']: - a2_next=-1 - else: - a2_next = int(re.search(r"\+(\d+)\+", labels[n + 1]).group(1)) - # Accent phrase boundary - if a3 == 1 and a2_next == 1: - text += ' ' - # Falling - elif a1 == 0 and a2_next == a2 + 1 and a2 != n_moras: - text += '↓' - # Rising - elif a2 == 1 and a2_next == 2: - text += '↑' - if icrack uk truck simulator 1.32 keygens

          Download ->->->-> https://urlin.us/2uEwqP



          - -0.100 ROMs Extra Full Set 100% Complete (0 Missing Keygen ✓ DOWNLOAD. ... uk truck simulator 1.32 crack free 13 · Transmute 1.09 ... 4d29de3e1b
          -
          -
          -

          diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Econometriaavanzadatecnicasyherramientaspdf16 NEW!.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Econometriaavanzadatecnicasyherramientaspdf16 NEW!.md deleted file mode 100644 index d5c8672dbeaaa93d556362a54f4b482f1c156ed3..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Econometriaavanzadatecnicasyherramientaspdf16 NEW!.md +++ /dev/null @@ -1,10 +0,0 @@ -

          econometriaavanzadatecnicasyherramientaspdf16


          DOWNLOADhttps://urlin.us/2uEw9X



          - -econometriaavanzadatecnicasyherramientaspdf16 12 -Economist Ananda Sampata on the causes of the global crisis -— In July of this year, a meeting of the General Assembly of the World Trade Organization (WTO) was held in New York. -Based on its results and statements made at it, one can conclude that the WTO intends to continue the policy of liberalization of international trade, aimed at reducing trade barriers. -Does this statement not contradict the principles of the WTO, which, as you know, proclaim the protection of trade barriers? 8a78ff9644
          -
          -
          -

          diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/English Dil Chahta Hai Movie Download Bluray Hindi Movies.md b/spaces/inplisQlawa/anything-midjourney-v4-1/English Dil Chahta Hai Movie Download Bluray Hindi Movies.md deleted file mode 100644 index 8b09dbcf0daf2d11da31e7edc66e003f04756392..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/English Dil Chahta Hai Movie Download Bluray Hindi Movies.md +++ /dev/null @@ -1,9 +0,0 @@ -

          English Dil Chahta Hai Movie Download Bluray Hindi Movies


          Download >> https://urlin.us/2uEvrw



          -
          -This is a great touching life-affirming film with a lot of the best Indian actors - Saif Ali Khan, Aamir Khan, Preity Zinta and Sonali Konali in... Read more -This is a great touching and life-affirming film with many of the best Indian actors - Saif Ali Khan, Aamir Khan, Preity Zinta and Sonali Konali in a stunningly heartfelt, touching and beautiful love story. -This story is about how a man who left a girl, having met another, still yearns for her and remembers how he loved her. -And while he remembers and suffers, a girl named Roshni, who dreams of love and getting married, meets Kamar's boyfriend and falls in love with him. 8a78ff9644
          -
          -
          -

          diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/IObit Driver Booster 3.1.0.365 Multilingual Serials.md b/spaces/inplisQlawa/anything-midjourney-v4-1/IObit Driver Booster 3.1.0.365 Multilingual Serials.md deleted file mode 100644 index d7f7858293e4c4064d25d7f148282dd91968a1f5..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/IObit Driver Booster 3.1.0.365 Multilingual Serials.md +++ /dev/null @@ -1,12 +0,0 @@ -

          IObit Driver Booster 3.1.0.365 Multilingual Serials


          Download Filehttps://urlin.us/2uEvyg



          - -pdfIn this article, it will be demonstrated that all the victims of the so-called immorality of the Nige-risians in the Soviet period who were attacked and killed by the OGPU and NKVD, were the victims of political persecution. The pretext of this persecution was anti-Nige-rian propaganda in the Soviet media and the fact that the victims were enemies of the Soviet regime... - -This document is available in alternative formats upon request, and may be ordered by calling 703-531-3849. - -This paper will discuss the criteria by which land-use planning and regulation should be decided. A conceptual framework will be developed that can be used to analyze, and subsequently to decide, the environmental impact of land-use planning. The framework will include criteria such as: ease of approval; economic growth and job creation; unit of land-use planning; necessity of certain decisions; public participation; and proper land-use planning process. Since land-use planning is the most critical aspect of a nation's economic growth, it must be addressed in a scientifically and morally correct manner. It must be studied and determined how the principles of justice are applied in the decision making process, and whether any guidelines and principles are systematically violated or neglected. The paper will also examine the conflicts between planning and justice in environmental planning and regulation. - -Fascinating information on the chemical weapons industry. Relates the use of these weapons on both sides in WWI and WWII. Also provides information on the use of mustard gas and some information on the use of it on animals. Includes information on how chemical weapons were used in WWI and WWII, including the nerve agent Sarin. This is a fascinating 4fefd39f24
          -
          -
          -

          diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/International Cricket Captain 2013 Order Id Keygen.md b/spaces/inplisQlawa/anything-midjourney-v4-1/International Cricket Captain 2013 Order Id Keygen.md deleted file mode 100644 index 08fb1f806317b5185d0bef81a2e5af45f4a5979a..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/International Cricket Captain 2013 Order Id Keygen.md +++ /dev/null @@ -1,6 +0,0 @@ -

          international cricket captain 2013 order id keygen


          Download File ►►► https://urlin.us/2uExRH



          -
          -Free Shipping On All Soccer Master Orders Over. ... 2020 TESA Residential Soccer Camp – CLOSED; 2020 TESA College ID Soccer ... Home Predictions Livescore Team matches Fifa World Cup Donate us ... Livescore today for football, basketball, cricket and other sports 📊 Tournament results, ... Key features include. 1fdad05405
          -
          -
          -

          diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/MAXWELL RENDER 1.6 Setup Free.md b/spaces/inplisQlawa/anything-midjourney-v4-1/MAXWELL RENDER 1.6 Setup Free.md deleted file mode 100644 index db3c6c4d7b12ed9e154b1c59742acbb04b10ffb0..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/MAXWELL RENDER 1.6 Setup Free.md +++ /dev/null @@ -1,68 +0,0 @@ -
          -

          MAXWELL RENDER 1.6 Setup Free: How to Download and Install It

          - -

          Maxwell Render 1.6 is a 3D rendering software that can create realistic images with accurate lighting and materials. It is a light simulator that uses a physically-based approach to render scenes, without using any tricks or shortcuts. It can handle complex materials, multiple light sources, and global illumination with ease. In this article, we will show you how to download and install Maxwell Render 1.6 for free, and what are the benefits and drawbacks of using it.

          - -

          Why Use Maxwell Render 1.6?

          - -

          Maxwell Render 1.6 is a software that can help you improve your designs through lighting. It can produce high-quality images with minimal noise and artifacts, using a ray tracing algorithm that simulates the behavior of light as it interacts with the scene elements. It can also support global illumination techniques such as photon mapping and irradiance caching, which allow for realistic shadows and indirect lighting.

          -

          MAXWELL RENDER 1.6 Setup Free


          Download Ziphttps://urlin.us/2uEylm



          - -

          Maxwell Render 1.6 has many features that make it a versatile and user-friendly rendering software. Some of the main features are:

          - -
            -
          • Maxwell Studio: This is the standalone application that allows you to set up render scenes from scratch or import them from other 3D software. Maxwell Studio has a simple and intuitive interface that lets you focus on lighting and materials. You can also preview your renders in real-time using the FIRE (Fast Interactive Rendering) engine.
          • -
          • Maxwell Materials: These are physically-based materials that can simulate any kind of surface or texture. Maxwell Materials are based on layers and components that can be mixed and matched to create complex effects. You can also use image maps, procedural textures, gradients, and masks to control the appearance of your materials. Maxwell Render 1.6 comes with an online library of thousands of free materials that you can download and use.
          • -
          • Maxwell Multilight: This is a unique feature that allows you to change the intensity and color of your lights after rendering without having to re-render the whole scene. You can also create lighting animations by adjusting the lights over time and exporting them as image sequences or videos.
          • -
          • Maxwell Network: This is a network rendering system that allows you to distribute your render jobs across multiple computers on your local network or on the cloud. You can use Maxwell Network to speed up your rendering process and save time.
          • -
          - -

          How to Download and Install Maxwell Render 1.6 for Free?

          - -

          If you want to try Maxwell Render 1.6 for free, you can follow these steps:

          -

          - -
            -
          1. Go to https://www.filehorse.com/download-maxwell-render/ and click on the green Download button.
          2. -
          3. Choose the version that matches your operating system (Windows or Mac) and save the file on your computer.
          4. -
          5. Run the installer and follow the instructions on the screen.
          6. -
          7. When prompted, enter your email address and password to create a free account on Next Limit's website.
          8. -
          9. You will receive an email with a link to activate your account and download your license file.
          10. -
          11. Copy the license file to the folder where you installed Maxwell Render 1.6.
          12. -
          13. Launch Maxwell Studio or any of the app integrations (SketchUp, Rhino, ArchiCAD, Revit, 3ds Max, Maya, Cinema 4D) and enjoy your free trial.
          14. -
          - -

          Note that the free trial version of Maxwell Render 1.6 has some limitations, such as:

          - -
            -
          • You can only use it for non-commercial purposes.
          • -
          • You can only render images up to 1280 x 1024 pixels.
          • -
          • You can only use up to four network nodes.
          • -
          • You can only use it for 30 days.
          • -
          - -

          Conclusion

          - -

          Maxwell Render 1.6 is a 3D rendering software that can create realistic images with accurate lighting and materials. It is a light simulator that uses a physically-based approach to render scenes, without using any tricks or shortcuts. It has many features that make it a versatile and user-friendly rendering software, such as Maxwell Studio, Maxwell Materials, Maxwell Multilight, and Maxwell Network. You can download and install Maxwell Render 1.6 for free by following the steps above, but keep in mind that the free trial version has some limitations. If you want to unlock the full potential of Maxwell Render 1.6, you will need to purchase a license from Next Limit's website.

          - -

          We hope this article was helpful for you. If you have any questions or comments about Maxwell Render 1.6, feel free to leave them below.

          -

          What are the drawbacks of Maxwell Render 1.6?

          - -

          Maxwell Render 1.6 is not a perfect software, and it has some drawbacks that you should be aware of before using it. Some of the main drawbacks are:

          - -
            -
          • Slow rendering speed: Because Maxwell Render 1.6 uses an unbiased rendering method, it can take a long time to render complex scenes with high resolution and quality. You may need to use a powerful computer or a network rendering system to speed up the rendering process.
          • -
          • Steep learning curve: Maxwell Render 1.6 has a different workflow and logic than other rendering software, and it may take some time and practice to master it. You may need to read the manual, watch tutorials, or join forums to learn how to use Maxwell Render 1.6 effectively.
          • -
          • Limited compatibility: Maxwell Render 1.6 is not compatible with some 3D software or plugins that you may use for your projects. You may need to export your scenes to Maxwell Studio or use app integrations to render them with Maxwell Render 1.6.
          • -
          - -

          Conclusion

          - -

          Maxwell Render 1.6 is a 3D rendering software that can create realistic images with accurate lighting and materials. It is a light simulator that uses a physically-based approach to render scenes, without using any tricks or shortcuts. It has many features that make it a versatile and user-friendly rendering software, such as Maxwell Studio, Maxwell Materials, Maxwell Multilight, and Maxwell Network. You can download and install Maxwell Render 1.6 for free by following the steps above, but keep in mind that the free trial version has some limitations. If you want to unlock the full potential of Maxwell Render 1.6, you will need to purchase a license from Next Limit's website.

          - -

          We hope this article was helpful for you. If you have any questions or comments about Maxwell Render 1.6, feel free to leave them below.

          -

          Maxwell Render 1.6 is a 3D rendering software that can create realistic images with accurate lighting and materials. It is a light simulator that uses a physically-based approach to render scenes, without using any tricks or shortcuts. It has many features that make it a versatile and user-friendly rendering software, such as Maxwell Studio, Maxwell Materials, Maxwell Multilight, and Maxwell Network. You can download and install Maxwell Render 1.6 for free by following the steps above, but keep in mind that the free trial version has some limitations. If you want to unlock the full potential of Maxwell Render 1.6, you will need to purchase a license from Next Limit's website.

          - -

          We hope this article was helpful for you. If you have any questions or comments about Maxwell Render 1.6, feel free to leave them below.

          3cee63e6c2
          -
          -
          \ No newline at end of file diff --git a/spaces/inreVtussa/clothingai/Examples/Bhaiyyaji Superhit Movie Subtitle Indonesia Download __EXCLUSIVE__.md b/spaces/inreVtussa/clothingai/Examples/Bhaiyyaji Superhit Movie Subtitle Indonesia Download __EXCLUSIVE__.md deleted file mode 100644 index e720874653f2990349cbdd14f7859fd0afca4306..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Bhaiyyaji Superhit Movie Subtitle Indonesia Download __EXCLUSIVE__.md +++ /dev/null @@ -1,6 +0,0 @@ -

          Bhaiyyaji Superhit movie subtitle indonesia download


          Download Zip ››››› https://tiurll.com/2uCiVM



          -
          -download film tutak tutak tutiya subtitle indonesia tutak tutak tutiya ... Bhaiyyaji Superhit full movie with english subtitles download.... Subtitle ... 1fdad05405
          -
          -
          -

          diff --git a/spaces/inreVtussa/clothingai/Examples/CRACK Corel Draw Graphics Suite X7 2 Win32-XFORCEl.md b/spaces/inreVtussa/clothingai/Examples/CRACK Corel Draw Graphics Suite X7 2 Win32-XFORCEl.md deleted file mode 100644 index 9655e79f41773ddf998463b5bd3243824fab1a20..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/CRACK Corel Draw Graphics Suite X7 2 Win32-XFORCEl.md +++ /dev/null @@ -1,20 +0,0 @@ -

          CRACK Corel Draw Graphics Suite X7 2 Win32-XFORCEl


          Download Filehttps://tiurll.com/2uCkAO



          - -SUPPLEMENTARY DATA - -The authors wish to thank Mr Christopher West for helpful discussions and his comments on the manuscript. - -SUPPLEMENTARY DATA #SEC6 - -================== - -[Supplementary Data](#sup1)ref-type="supplementary-material" are available at NAR Online. - -FUNDING #SEC7 - -======= - -MRC \[G1000521 to L.C.W.\]; Wellcome 4fefd39f24
          -
          -
          -

          diff --git a/spaces/inreVtussa/clothingai/Examples/Counter Strike Condition Zero Free Download LINK For Windows 7 64 Bitl.md b/spaces/inreVtussa/clothingai/Examples/Counter Strike Condition Zero Free Download LINK For Windows 7 64 Bitl.md deleted file mode 100644 index ad9be7ea90b7605d3469a9b43420ee429b4c698b..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Counter Strike Condition Zero Free Download LINK For Windows 7 64 Bitl.md +++ /dev/null @@ -1,6 +0,0 @@ -

          Counter Strike Condition Zero Free Download For Windows 7 64 Bitl


          Download Ziphttps://tiurll.com/2uCjsX



          - -Counter strike condition zero free pc games download free online games ... For Pc. Counter-Strike: Condition Zero. filezilla ftp client download windows 7 64 bit? 1fdad05405
          -
          -
          -

          diff --git a/spaces/internetsignal/audioLDMtext/README.md b/spaces/internetsignal/audioLDMtext/README.md deleted file mode 100644 index a267d537ae6a23859d5640388bd6ccbf04a480f3..0000000000000000000000000000000000000000 --- a/spaces/internetsignal/audioLDMtext/README.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Audioldm Text To Audio Generation -emoji: 🔊 -colorFrom: indigo -colorTo: red -sdk: gradio -sdk_version: 3.27.0 -app_file: app.py -pinned: false -license: bigscience-openrail-m -duplicated_from: haoheliu/audioldm-text-to-audio-generation ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference - -## Reference -Part of the code from this repo is borrowed from the following repos. We would like to thank the authors of them for their contribution. - -> https://github.com/LAION-AI/CLAP -> https://github.com/CompVis/stable-diffusion -> https://github.com/v-iashin/SpecVQGAN -> https://github.com/toshas/torch-fidelity \ No newline at end of file diff --git a/spaces/jbilcke-hf/ai-clip-factory/src/pages/api/oauth/getRedirectURI.ts b/spaces/jbilcke-hf/ai-clip-factory/src/pages/api/oauth/getRedirectURI.ts deleted file mode 100644 index ee935bf00d7c3ab38431e840c95ee72809698a4c..0000000000000000000000000000000000000000 --- a/spaces/jbilcke-hf/ai-clip-factory/src/pages/api/oauth/getRedirectURI.ts +++ /dev/null @@ -1,5 +0,0 @@ - -export function getRedirectURI() { - const redirectUri = `https://jbilcke-hf-ai-clip-factory.hf.space/api/auth/callback` - return redirectUri -} diff --git a/spaces/jennysun/jwsun-multisubject-render-model/gligen/ldm/lr_scheduler.py b/spaces/jennysun/jwsun-multisubject-render-model/gligen/ldm/lr_scheduler.py deleted file mode 100644 index be39da9ca6dacc22bf3df9c7389bbb403a4a3ade..0000000000000000000000000000000000000000 --- a/spaces/jennysun/jwsun-multisubject-render-model/gligen/ldm/lr_scheduler.py +++ /dev/null @@ -1,98 +0,0 @@ -import numpy as np - - -class LambdaWarmUpCosineScheduler: - """ - note: use with a base_lr of 1.0 - """ - def __init__(self, warm_up_steps, lr_min, lr_max, lr_start, max_decay_steps, verbosity_interval=0): - self.lr_warm_up_steps = warm_up_steps - self.lr_start = lr_start - self.lr_min = lr_min - self.lr_max = lr_max - self.lr_max_decay_steps = max_decay_steps - self.last_lr = 0. - self.verbosity_interval = verbosity_interval - - def schedule(self, n, **kwargs): - if self.verbosity_interval > 0: - if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_lr}") - if n < self.lr_warm_up_steps: - lr = (self.lr_max - self.lr_start) / self.lr_warm_up_steps * n + self.lr_start - self.last_lr = lr - return lr - else: - t = (n - self.lr_warm_up_steps) / (self.lr_max_decay_steps - self.lr_warm_up_steps) - t = min(t, 1.0) - lr = self.lr_min + 0.5 * (self.lr_max - self.lr_min) * ( - 1 + np.cos(t * np.pi)) - self.last_lr = lr - return lr - - def __call__(self, n, **kwargs): - return self.schedule(n,**kwargs) - - -class LambdaWarmUpCosineScheduler2: - """ - supports repeated iterations, configurable via lists - note: use with a base_lr of 1.0. - """ - def __init__(self, warm_up_steps, f_min, f_max, f_start, cycle_lengths, verbosity_interval=0): - assert len(warm_up_steps) == len(f_min) == len(f_max) == len(f_start) == len(cycle_lengths) - self.lr_warm_up_steps = warm_up_steps - self.f_start = f_start - self.f_min = f_min - self.f_max = f_max - self.cycle_lengths = cycle_lengths - self.cum_cycles = np.cumsum([0] + list(self.cycle_lengths)) - self.last_f = 0. - self.verbosity_interval = verbosity_interval - - def find_in_interval(self, n): - interval = 0 - for cl in self.cum_cycles[1:]: - if n <= cl: - return interval - interval += 1 - - def schedule(self, n, **kwargs): - cycle = self.find_in_interval(n) - n = n - self.cum_cycles[cycle] - if self.verbosity_interval > 0: - if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, " - f"current cycle {cycle}") - if n < self.lr_warm_up_steps[cycle]: - f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle] - self.last_f = f - return f - else: - t = (n - self.lr_warm_up_steps[cycle]) / (self.cycle_lengths[cycle] - self.lr_warm_up_steps[cycle]) - t = min(t, 1.0) - f = self.f_min[cycle] + 0.5 * (self.f_max[cycle] - self.f_min[cycle]) * ( - 1 + np.cos(t * np.pi)) - self.last_f = f - return f - - def __call__(self, n, **kwargs): - return self.schedule(n, **kwargs) - - -class LambdaLinearScheduler(LambdaWarmUpCosineScheduler2): - - def schedule(self, n, **kwargs): - cycle = self.find_in_interval(n) - n = n - self.cum_cycles[cycle] - if self.verbosity_interval > 0: - if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, " - f"current cycle {cycle}") - - if n < self.lr_warm_up_steps[cycle]: - f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle] - self.last_f = f - return f - else: - f = self.f_min[cycle] + (self.f_max[cycle] - self.f_min[cycle]) * (self.cycle_lengths[cycle] - n) / (self.cycle_lengths[cycle]) - self.last_f = f - return f - diff --git a/spaces/jhwen/bingo/README.md b/spaces/jhwen/bingo/README.md deleted file mode 100644 index d65eafbc8431818f738e8e086455fa6159f101bb..0000000000000000000000000000000000000000 --- a/spaces/jhwen/bingo/README.md +++ /dev/null @@ -1,196 +0,0 @@ ---- -title: bingo -emoji: 📉 -colorFrom: red -colorTo: red -sdk: docker -license: mit -duplicated_from: hf4all/bingo ---- - -
          - -# Bingo - -Bingo,一个让你呼吸顺畅 New Bing。 - -高度还原 New Bing 网页版的主要操作,国内可用,兼容绝大多数微软 Bing AI 的功能,可自行部署使用。 - -![Github stars](https://badgen.net/github/stars/weaigc/bingo?icon=github&label=stars) -![Gthub issues](https://img.shields.io/github/issues/weaigc/bingo) -[![docker build](https://github.com/weaigc/bingo/actions/workflows/docker.yml/badge.svg)](https://hub.docker.com/repository/docker/weaigc/bingo/) -[![docker hub](https://badgen.net/docker/size/weaigc/bingo?icon=docker&label=image%20size)](https://hub.docker.com/repository/docker/weaigc/bingo/) -[![MIT License](https://img.shields.io/badge/license-MIT-97c50f)](https://github.com/weaigc/bingo/blob/main/license) - -
          - -## 演示站点 - -https://bing.github1s.tk - - - -[![img](./docs/images/demo.png)](https://bing.github1s.tk) - -## 功能和特点 - -- 完全基于 Next.js 重写,高度还原 New Bing Web 版 UI,使用体验和 Bing AI 基本一致。 -- 支持 Docker 构建,方便快捷地部署和访问。 -- Cookie 可全局配置,全局共享。 -- 支持持续语音对话 - -## RoadMap - - - [x] 支持 wss 转发 - - [x] 支持一键部署 - - [x] 优化移动端展示 - - [x] 支持画图 - - [x] 支持语音输入(支持语音指令,目前仅支持 PC 版 Edge 及 Chrome 浏览器) - - [x] 支持语音输出(需要手动开启) - - [x] 支持图片输入 - - [x] 支持自定义域名 - - [ ] 支持历史记录 - - [ ] 适配深色模式 - - [ ] 支持内置提示词 - - [ ] 支持离线访问 - - [ ] 国际化翻译 - -## 一键部署 -你也可以一键部署自己的 New Bing AI 到 🤗 HuggingFace 。 - -### 部署到 Huggingface -1. 点击此图标 -[![Deploy to HuggingFace](https://img.shields.io/badge/%E7%82%B9%E5%87%BB%E9%83%A8%E7%BD%B2-%F0%9F%A4%97-fff)](https://huggingface.co/login?next=%2Fspaces%2Fhf4all%2Fbingo%3Fduplicate%3Dtrue%26visibility%3Dpublic),配置可以不改。 - -2. 部署署完成后,点击“设置” 》“站点域名”,点一下,复制一下 HF 域名信息,然后分享给别人即可。 - -> Huggingface 不支持绑定自己的域名,不过我们可以使用曲线救国的方式来达到这个目的 -> 1. 方式二,借助 Cloudflare Workers [部署Cloudflare Workers](#使用Cloudflare-Workers自定义域名) -> 2. 方式一,借助 Github Pages 及 iframe [如何绑定域名](https://github.com/weaigc/bingo/issues/4) - -### 使用Cloudflare Workers自定义域名 - -> 核心代码 [worker.js](./cloudflare/worker.js) - -- [注册 Cloudflare 账号](https://dash.cloudflare.com/sign-up) - -- 添加一个新的网站,需要你有自己的域名并且将域名`Name Server`托管给 Cloudflare 才行(更多信息可自行 Google) - -- 通过左侧菜单进入「Workers」,并点击「Create a Worker」。 - -- 创建 Worker 服务,复制 [worker.js](./cloudflare/worker.js) 全部代码,粘贴至创建的服务中,根据注释进行改动,保存并部署。 - -- 触发器 中自定义访问域名。 - -### 部署其它平台 -
          - -由于其他平台目前遭到 New Bing 封杀,会遇到很多问题,不再做推荐,有需要的可以自行查看 - - -#### 部署到 Netlify -[![Deploy to Netlify Button](https://www.netlify.com/img/deploy/button.svg)](https://app.netlify.com/start/deploy?repository=https://github.com/weaigc/bingo) - -#### 部署到 Vercel -如果你是 Vercel 付费用户,可以点以下链接一键部署到 Vercel。免费版本有[接口超时限制](https://vercel.com/docs/concepts/limits/overview),不推荐使用 - -[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?demo-title=bingo&demo-description=bingo&demo-url=https%3A%2F%2Fbing.github1s.tk%2F&project-name=bingo&repository-name=bingo&repository-url=https%3A%2F%2Fgithub.com%2Fweaigc%2Fbingo&from=templates&skippable-integrations=1&env=BING_HEADER&envDescription=%E5%A6%82%E6%9E%9C%E4%B8%8D%E7%9F%A5%E9%81%93%E6%80%8E%E4%B9%88%E9%85%8D%E7%BD%AE%E8%AF%B7%E7%82%B9%E5%8F%B3%E4%BE%A7Learn+More&envLink=https%3A%2F%2Fgithub.com%2Fweaigc%2Fbingo%2Fblob%2Fmain%2F.env.example) - -#### 部署到 Render - -[![Deploy to Render](https://render.com/images/deploy-to-render-button.svg)](https://render.com/deploy?repo=https://github.com/weaigc/bingo) -
          - -## 环境和依赖 - -- Node.js >= 18 -- Bing AI 的[身份信息](#如何获取-BING_HEADER)) - -## 安装和使用 - -> 由于目前微软封杀比较严重,推荐优先使用 [部署 Huggingface](#部署到-huggingface) 。 - -* 使用 Node 启动 - -```bash -git clone https://github.com/weaigc/bingo.git -npm i # 推荐使用 pnpm i -npm run build -npm run start -``` - -* 使用 Docker 启动 -```bash -docker pull weaigc/bingo -docker run --rm -it -p 7860:7860 weaigc/bingo -# 或者 -docker run --rm -it -e BING_HEADER=xxxx -p 7860:7860 weaigc/bingo -``` - -## 如何获取 BING_HEADER -> 配置了 BING_HEADER 意味着你将自己的账号共享给所有使用此服务的人,如果不需要免登录画图的功能,不建议设置此变量 - -打开 https://www.bing.com 并登录,然后访问 https://www.bing.com/turing/captcha/challenge ,通过人机校验,然后 - -![BING HEADER](./docs/images/curl.png) - -> 复制出来的内容应该如下所示。确认格式无误后,打开 https://effulgent-bubblegum-e2f5df.netlify.app/#dialog=%22settings%22 ,粘贴进去,点击“转成 BING_HEADER 并复制”,然后从剪切板粘贴即可得到。(你也可以先在网页上进行验证) - -以下是格式参考,需要注意的是,网页端保存的格式是以`curl`开头, 而服务端配置的 `BING_HEADER` 是 `base64` 格式,两者不能互通。 -
          -正常格式/网页端保存的格式(格式仅供参考) - -``` -curl 'https://www.bing.com/turing/captcha/challenge' \ - -H 'authority: www.bing.com' \ - -H 'accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7' \ - -H 'accept-language: zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6' \ - -H 'cache-control: max-age=0' \ - -H 'cookie: MicrosoftApplicationsTelemetryDeviceId=3399c004-fd0e-48ec-bb92-d82a27b2bbd4; _EDGE_V=1; SRCHD=AF=NOFORM; SRCHUID=V=2&GUID=29EBDDA4E6674329ACCF1A0A423C3E98&dmnchg=1; _UR=QS=0&TQS=0; _HPVN=CS=eyJQbiI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiUCJ9LCJTYyI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiSCJ9LCJReiI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiVCJ9LCJBcCI6dHJ1ZSwiTXV0ZSI6dHJ1ZSwiTGFkIjoiMjAyMy0wNy0yNVQwMDowMDowMFoiLCJJb3RkIjowLCJHd2IiOjAsIkRmdCI6bnVsbCwiTXZzIjowLCJGbHQiOjAsIkltcCI6Mn0=; _RwBf=ilt=1&ihpd=1&ispd=0&rc=0&rb=0&gb=0&rg=200&pc=0&mtu=0&rbb=0&g=0&cid=&clo=0&v=1&l=2023-07-25T07:00:00.0000000Z&lft=0001-01-01T00:00:00.0000000&aof=0&o=2&p=&c=&t=0&s=0001-01-01T00:00:00.0000000+00:00&ts=2023-07-25T11:00:31.7111548+00:00&rwred=0&wls=&lka=0&lkt=0&TH=&dci=0; ANON=A=0043C6590EA808ED6E395059FFFFFFFF&E=1c8b&W=1; NAP=V=1.9&E=1c31&C=DnaMSbDN_4efZ_xXqBF3Daorjr53kYqYoaP8YHsupjmiXnysX7a37A&W=1; PPLState=1; KievRPSSecAuth=FABSBBRaTOJILtFsMkpLVWSG6AN6C/svRwNmAAAEgAAACMGUA7EGVSjGEAQBGHtNsc5sNL7unmJsfPJ2t6imfo4BeUJlAia3IpMTtMUy4PU/C5QAzRI5pODtsIee0+blgllXt/5IiWwGjwmdhivsFM597pRPkjARPfwsPhNLPNbJrCPNPHdje4Is78MnCADXw6/NBq2FL8V2/byw2fH6IuAMD2MvN/VvqpEa9ZxiDjZtENj4HEj0mO2SgzjfyEhVAkjvznJqU2rw/Q2tHmX94NAM2kzlzKF/hWPhCCUmu8IHLvCnHDS6mSptvJDDP/sp3ovtzOXkP1mlM/Xju5ftesUvccVEQGffXORa1dE5hEMbKIiKXz1tDdduSXE19g9/+mRMAjaQhpwhI8XmilCTx1adb1Ll5qK+VjC9GNfEZzcbsGBPVaOl+anG8rEMq+Xnhjo7J+NqTNolavHgcuV8kJsCeJZIged33UA8eOZeFo+wAECMguxMoSqgpGH+sthqynvD/FJD6r/tiU2N3uqVq8NE8V37asrN6T14Z0FGBJOe6ET1+PGApm3s11OY9/xhFEB9T5BEPUGEbvRcLcW2ncFQX0EU+xweiPqo1Q1hNUg/dCtSI+lZ7c2H8XheePZavZ0TJQ8oNCSAuKiTqJmI0fVGpwbXwfaADkEipuawz3fIuMJBNgMU0OtA7Hm59v2fGLIBuvi6YeKS6GgVk3BIPf+P/eKahwozrxQZaFnoHTSqMkvct7xCP4atBROfXKf5Ww0CcFKp+2WX9BIskTOo2jjk6bAyyYJ+ElUB1fgLKNk5m/YSMc9iYCLIBMIGN8F0Yvy3tZ7cvh7Ue5Klo98US/I+nW1G7ZJMHRgUO8h8lpneHqEMegKd8gynO4VF7RpCjJkunDmW0Ta+RkXAP619pg0dqHMFkoOgknN78oBbGTV6fJUKotv+vi61kLhAeXZGWoHGCRXh2wUC6YgfPgKA6ESRNHtFn7E5B3HHpLc5rVMDSNhKZYfdhupV4Ezf6+5DhMcZLZhi0kk+ivDiN1gdHlVtSN55xpvf+c+XZDzR0uhgcvgy0LAbmzgk6y4WbYH+LQsMpzNNj+aC72vMiWovWrKh9jY4MYCmdgxsS/skPtLdp18muiEIRXTbZQGUmhxFpJAIbBIsCscMpzL0BgeujxUwM5wr79Sd9r4xwbgSMwmBlBfUHRVBdNyg8feepeJbCS63nD6eHOuLqMRsPIio3w/ki/EAa92UUEiZeavLsMUD/y/qAvWUdzdP5Y+C/TM+CMGS/kGL4LEdY/28MQeTvU1qv1X21kQt2aiaj3pPVL36hAzxbcLgqcMo9oymDRy87kdCXW/+g4oKLtMh6fm/G6W6Y/B01JlxohyyvueHQIG557uzkEkTJ3FnOVODSKBKpb3WZ65rExfV71zSZa25F3GmpaIG6HiYrX2YYhQAkIE9pKEQBHbnwHuwNDGottZTXZw=; WLS=C=9df3f9d8518fae19&N=wen; WLID=pGY8HgWCu4p5XYCOk2oa0+DBdftkMUfmNIn8XtSjSTKsgv/Il7GUlYs0Jpjf/E12jZMgV7x44Dy3fXOgjjUoJx7Y/ClLrLhsk20THksJJoI=; _EDGE_S=F=1&SID=17CF6EE006426448213C7DB907436588&mkt=zh-CN; MUID=225621093D8A6C27301632413C0E6D08; MUIDB=225621093D8A6C27301632413C0E6D08; SUID=A; SNRHOP=I=&TS=; _U=nGyzKQruEsDwLiu65fZFIG6e12hf2lwTJmroW__k8joUJIKmG3OIjayXKGW9dCVR3sNhF76mEVxyW6yjUGPodOfjtSa3s3J_DxMOrEK1BqXCOBI9bC66spAIASV7prsYFlVAJz73jVNENp_tBubLHJy6EbT0BKRe4AjrYkH-9uMnmCKB8Zmyg; _SS=SID=17CF6EE006426448213C7DB907436588&R=0&RB=0&GB=0&RG=200&RP=0&PC=U531; SRCHS=PC=U531; USRLOC=HS=1&ELOC=LAT=22.501529693603516|LON=113.9263687133789|N=%E5%8D%97%E5%B1%B1%E5%8C%BA%EF%BC%8C%E5%B9%BF%E4%B8%9C%E7%9C%81|ELT=2|&CLOC=LAT=22.50153029046461|LON=113.92637070632928|A=733.4464586120832|TS=230726151034|SRC=W; SRCHUSR=DOB=20230725&T=1690384908000&POEX=W; ipv6=hit=1690388509974&t=6; SRCHHPGUSR=HV=1690384945&SRCHLANG=zh-Hans&PV=15.0.0&BRW=MW&BRH=MT&CW=410&CH=794&SCW=410&SCH=794&DPR=1.5&UTC=480&DM=0&WTS=63825879627&PRVCW=410&PRVCH=794&PR=1.5; cct=AjWIBYOoVP-Afq6gWwtx80If6yHn6iBuEVHA1XHdAKpny6Y_CVyi_MSyM94VyMWnjdYkkccVtm3czoIAtXUGQA; GC=AjWIBYOoVP-Afq6gWwtx80If6yHn6iBuEVHA1XHdAKpR3Y_D9Ytcks4Ht6XhadXk75dvhzP4YOUS0UmoEyqyxw' \ - -H 'dnt: 1' \ - -H 'sec-ch-ua: "Chromium";v="116", "Not)A;Brand";v="24", "Microsoft Edge";v="116"' \ - -H 'sec-ch-ua-arch: "x86"' \ - -H 'sec-ch-ua-bitness: "64"' \ - -H 'sec-ch-ua-full-version: "116.0.1938.29"' \ - -H 'sec-ch-ua-full-version-list: "Chromium";v="116.0.5845.42", "Not)A;Brand";v="24.0.0.0", "Microsoft Edge";v="116.0.1938.29"' \ - -H 'sec-ch-ua-mobile: ?0' \ - -H 'sec-ch-ua-model: ""' \ - -H 'sec-ch-ua-platform: "Windows"' \ - -H 'sec-ch-ua-platform-version: "15.0.0"' \ - -H 'sec-fetch-dest: document' \ - -H 'sec-fetch-mode: navigate' \ - -H 'sec-fetch-site: none' \ - -H 'sec-fetch-user: ?1' \ - -H 'sec-ms-gec: B3F47AD4A283CAB374C0451C46AAFD147C6A4DACAFF6A1C13F34B2C72B024494' \ - -H 'sec-ms-gec-version: 1-116.0.1938.29' \ - -H 'upgrade-insecure-requests: 1' \ - -H 'user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36 Edg/116.0.0.0' \ - -H 'x-client-data: eyIxIjoiMiIsIjEwIjoiXCJTMGg3R05HOTF2aDQ1TUZSUnZ5NHN2akRmMWdlaVJKenNxNlA3aU1WbnF3PVwiIiwiMiI6IjEiLCIzIjoiMSIsIjQiOiIyMTU4ODQ5NTM4MjY4OTM5NTA3IiwiNSI6IlwiSm9GUWpPTDk3OS9MbkRRZnlCd2N1M2FsOUN3eTZTQmdaMGNYMXBtOWVMZz1cIiIsIjYiOiJiZXRhIiwiNyI6IjE4MDM4ODYyNjQzNSIsIjkiOiJkZXNrdG9wIn0=' \ - -H 'x-edge-shopping-flag: 1' \ - --compressed -``` -
          - -
          -转成base64之后的格式(BING_HEADER只能使用 base64 之后的格式) - -``` -Y3VybCAnaHR0cHM6Ly93d3cuYmluZy5jb20vdHVyaW5nL2NvbnZlcnNhdGlvbi9jcmVhdGUnIFwgICAtSCAnYXV0aG9yaXR5OiB3d3cuYmluZy5jb20nIFwgICAtSCAnYWNjZXB0OiB0ZXh0L2h0bWwsYXBwbGljYXRpb24veGh0bWwreG1sLGFwcGxpY2F0aW9uL3htbDtxPTAuOSxpbWFnZS93ZWJwLGltYWdlL2FwbmcsKi8qO3E9MC44LGFwcGxpY2F0aW9uL3NpZ25lZC1leGNoYW5nZTt2PWIzO3E9MC43JyBcICAgLUggJ2FjY2VwdC1sYW5ndWFnZTogemgtQ04semg7cT0wLjksZW47cT0wLjgsZW4tR0I7cT0wLjcsZW4tVVM7cT0wLjYnIFwgICAtSCAnY2FjaGUtY29udHJvbDogbWF4LWFnZT0wJyBcICAgLUggJ2Nvb2tpZTogTWljcm9zb2Z0QXBwbGljYXRpb25zVGVsZW1ldHJ5RGV2aWNlSWQ9MzM5OWMwMDQtZmQwZS00OGVjLWJiOTItZDgyYTI3YjJiYmQ0OyBfRURHRV9WPTE7IFNSQ0hEPUFGPU5PRk9STTsgU1JDSFVJRD1WPTImR1VJRD0yOUVCRERBNEU2Njc0MzI5QUNDRjFBMEE0MjNDM0U5OCZkbW5jaGc9MTsgX1VSPVFTPTAmVFFTPTA7IF9IUFZOPUNTPWV5SlFiaUk2ZXlKRGJpSTZNU3dpVTNRaU9qQXNJbEZ6SWpvd0xDSlFjbTlrSWpvaVVDSjlMQ0pUWXlJNmV5SkRiaUk2TVN3aVUzUWlPakFzSWxGeklqb3dMQ0pRY205a0lqb2lTQ0o5TENKUmVpSTZleUpEYmlJNk1Td2lVM1FpT2pBc0lsRnpJam93TENKUWNtOWtJam9pVkNKOUxDSkJjQ0k2ZEhKMVpTd2lUWFYwWlNJNmRISjFaU3dpVEdGa0lqb2lNakF5TXkwd055MHlOVlF3TURvd01Eb3dNRm9pTENKSmIzUmtJam93TENKSGQySWlPakFzSWtSbWRDSTZiblZzYkN3aVRYWnpJam93TENKR2JIUWlPakFzSWtsdGNDSTZNbjA9OyBfUndCZj1pbHQ9MSZpaHBkPTEmaXNwZD0wJnJjPTAmcmI9MCZnYj0wJnJnPTIwMCZwYz0wJm10dT0wJnJiYj0wJmc9MCZjaWQ9JmNsbz0wJnY9MSZsPTIwMjMtMDctMjVUMDc6MDA6MDAuMDAwMDAwMFombGZ0PTAwMDEtMDEtMDFUMDA6MDA6MDAuMDAwMDAwMCZhb2Y9MCZvPTImcD0mYz0mdD0wJnM9MDAwMS0wMS0wMVQwMDowMDowMC4wMDAwMDAwKzAwOjAwJnRzPTIwMjMtMDctMjVUMTE6MDA6MzEuNzExMTU0OCswMDowMCZyd3JlZD0wJndscz0mbGthPTAmbGt0PTAmVEg9JmRjaT0wOyBBTk9OPUE9MDA0M0M2NTkwRUE4MDhFRDZFMzk1MDU5RkZGRkZGRkYmRT0xYzhiJlc9MTsgTkFQPVY9MS45JkU9MWMzMSZDPURuYU1TYkROXzRlZlpfeFhxQkYzRGFvcmpyNTNrWXFZb2FQOFlIc3Vwam1pWG55c1g3YTM3QSZXPTE7IFBQTFN0YXRlPTE7IEtpZXZSUFNTZWNBdXRoPUZBQlNCQlJhVE9KSUx0RnNNa3BMVldTRzZBTjZDL3N2UndObUFBQUVnQUFBQ01HVUE3RUdWU2pHRUFRQkdIdE5zYzVzTkw3dW5tSnNmUEoydDZpbWZvNEJlVUpsQWlhM0lwTVR0TVV5NFBVL0M1UUF6Ukk1cE9EdHNJZWUwK2JsZ2xsWHQvNUlpV3dHandtZGhpdnNGTTU5N3BSUGtqQVJQZndzUGhOTFBOYkpyQ1BOUEhkamU0SXM3OE1uQ0FEWHc2L05CcTJGTDhWMi9ieXcyZkg2SXVBTUQyTXZOL1Z2cXBFYTlaeGlEalp0RU5qNEhFajBtTzJTZ3pqZnlFaFZBa2p2em5KcVUycncvUTJ0SG1YOTROQU0ya3psektGL2hXUGhDQ1VtdThJSEx2Q25IRFM2bVNwdHZKRERQL3NwM292dHpPWGtQMW1sTS9YanU1ZnRlc1V2Y2NWRVFHZmZYT1JhMWRFNWhFTWJLSWlLWHoxdERkZHVTWEUxOWc5LyttUk1BamFRaHB3aEk4WG1pbENUeDFhZGIxTGw1cUsrVmpDOUdOZkVaemNic0dCUFZhT2wrYW5HOHJFTXErWG5oam83SitOcVROb2xhdkhnY3VWOGtKc0NlSlpJZ2VkMzNVQThlT1plRm8rd0FFQ01ndXhNb1NxZ3BHSCtzdGhxeW52RC9GSkQ2ci90aVUyTjN1cVZxOE5FOFYzN2Fzck42VDE0WjBGR0JKT2U2RVQxK1BHQXBtM3MxMU9ZOS94aEZFQjlUNUJFUFVHRWJ2UmNMY1cybmNGUVgwRVUreHdlaVBxbzFRMWhOVWcvZEN0U0krbFo3YzJIOFhoZWVQWmF2WjBUSlE4b05DU0F1S2lUcUptSTBmVkdwd2JYd2ZhQURrRWlwdWF3ejNmSXVNSkJOZ01VME90QTdIbTU5djJmR0xJQnV2aTZZZUtTNkdnVmszQklQZitQL2VLYWh3b3pyeFFaYUZub0hUU3FNa3ZjdDd4Q1A0YXRCUk9mWEtmNVd3MENjRktwKzJXWDlCSXNrVE9vMmpqazZiQXl5WUorRWxVQjFmZ0xLTms1bS9ZU01jOWlZQ0xJQk1JR044RjBZdnkzdFo3Y3ZoN1VlNUtsbzk4VVMvSStuVzFHN1pKTUhSZ1VPOGg4bHBuZUhxRU1lZ0tkOGd5bk80VkY3UnBDakprdW5EbVcwVGErUmtYQVA2MTlwZzBkcUhNRmtvT2drbk43OG9CYkdUVjZmSlVLb3R2K3ZpNjFrTGhBZVhaR1dvSEdDUlhoMndVQzZZZ2ZQZ0tBNkVTUk5IdEZuN0U1QjNISHBMYzVyVk1EU05oS1pZZmRodXBWNEV6ZjYrNURoTWNaTFpoaTBraytpdkRpTjFnZEhsVnRTTjU1eHB2ZitjK1haRHpSMHVoZ2N2Z3kwTEFibXpnazZ5NFdiWUgrTFFzTXB6Tk5qK2FDNzJ2TWlXb3ZXcktoOWpZNE1ZQ21kZ3hzUy9za1B0TGRwMThtdWlFSVJYVGJaUUdVbWh4RnBKQUliQklzQ3NjTXB6TDBCZ2V1anhVd001d3I3OVNkOXI0eHdiZ1NNd21CbEJmVUhSVkJkTnlnOGZlZXBlSmJDUzYzbkQ2ZUhPdUxxTVJzUElpbzN3L2tpL0VBYTkyVVVFaVplYXZMc01VRC95L3FBdldVZHpkUDVZK0MvVE0rQ01HUy9rR0w0TEVkWS8yOE1RZVR2VTFxdjFYMjFrUXQyYWlhajNwUFZMMzZoQXp4YmNMZ3FjTW85b3ltRFJ5ODdrZENYVy8rZzRvS0x0TWg2Zm0vRzZXNlkvQjAxSmx4b2h5eXZ1ZUhRSUc1NTd1emtFa1RKM0ZuT1ZPRFNLQktwYjNXWjY1ckV4ZlY3MXpTWmEyNUYzR21wYUlHNkhpWXJYMllZaFFBa0lFOXBLRVFCSGJud0h1d05ER290dFpUWFp3PTsgV0xTPUM9OWRmM2Y5ZDg1MThmYWUxOSZOPXdlbjsgV0xJRD1wR1k4SGdXQ3U0cDVYWUNPazJvYTArREJkZnRrTVVmbU5JbjhYdFNqU1RLc2d2L0lsN0dVbFlzMEpwamYvRTEyalpNZ1Y3eDQ0RHkzZlhPZ2pqVW9KeDdZL0NsTHJMaHNrMjBUSGtzSkpvST07IF9FREdFX1M9Rj0xJlNJRD0xN0NGNkVFMDA2NDI2NDQ4MjEzQzdEQjkwNzQzNjU4OCZta3Q9emgtQ047IE1VSUQ9MjI1NjIxMDkzRDhBNkMyNzMwMTYzMjQxM0MwRTZEMDg7IE1VSURCPTIyNTYyMTA5M0Q4QTZDMjczMDE2MzI0MTNDMEU2RDA4OyBTVUlEPUE7IFNOUkhPUD1JPSZUUz07IF9VPW5HeXpLUXJ1RXNEd0xpdTY1ZlpGSUc2ZTEyaGYybHdUSm1yb1dfX2s4am9VSklLbUczT0lqYXlYS0dXOWRDVlIzc05oRjc2bUVWeHlXNnlqVUdQb2RPZmp0U2EzczNKX0R4TU9yRUsxQnFYQ09CSTliQzY2c3BBSUFTVjdwcnNZRmxWQUp6NzNqVk5FTnBfdEJ1YkxISnk2RWJUMEJLUmU0QWpyWWtILTl1TW5tQ0tCOFpteWc7IF9TUz1TSUQ9MTdDRjZFRTAwNjQyNjQ0ODIxM0M3REI5MDc0MzY1ODgmUj0wJlJCPTAmR0I9MCZSRz0yMDAmUlA9MCZQQz1VNTMxOyBTUkNIUz1QQz1VNTMxOyBVU1JMT0M9SFM9MSZFTE9DPUxBVD0yMi41MDE1Mjk2OTM2MDM1MTZ8TE9OPTExMy45MjYzNjg3MTMzNzg5fE49JUU1JThEJTk3JUU1JUIxJUIxJUU1JThDJUJBJUVGJUJDJThDJUU1JUI5JUJGJUU0JUI4JTlDJUU3JTlDJTgxfEVMVD0yfCZDTE9DPUxBVD0yMi41MDE1MzAyOTA0NjQ2MXxMT049MTEzLjkyNjM3MDcwNjMyOTI4fEE9NzMzLjQ0NjQ1ODYxMjA4MzJ8VFM9MjMwNzI2MTUxMDM0fFNSQz1XOyBTUkNIVVNSPURPQj0yMDIzMDcyNSZUPTE2OTAzODQ5MDgwMDAmUE9FWD1XOyBpcHY2PWhpdD0xNjkwMzg4NTA5OTc0JnQ9NjsgU1JDSEhQR1VTUj1IVj0xNjkwMzg0OTQ1JlNSQ0hMQU5HPXpoLUhhbnMmUFY9MTUuMC4wJkJSVz1NVyZCUkg9TVQmQ1c9NDEwJkNIPTc5NCZTQ1c9NDEwJlNDSD03OTQmRFBSPTEuNSZVVEM9NDgwJkRNPTAmV1RTPTYzODI1ODc5NjI3JlBSVkNXPTQxMCZQUlZDSD03OTQmUFI9MS41OyBjY3Q9QWpXSUJZT29WUC1BZnE2Z1d3dHg4MElmNnlIbjZpQnVFVkhBMVhIZEFLcG55NllfQ1Z5aV9NU3lNOTRWeU1XbmpkWWtrY2NWdG0zY3pvSUF0WFVHUUE7IEdDPUFqV0lCWU9vVlAtQWZxNmdXd3R4ODBJZjZ5SG42aUJ1RVZIQTFYSGRBS3BSM1lfRDlZdGNrczRIdDZYaGFkWGs3NWR2aHpQNFlPVVMwVW1vRXlxeXh3JyBcICAgLUggJ2RudDogMScgXCAgIC1IICdzZWMtY2gtdWE6ICJDaHJvbWl1bSI7dj0iMTE2IiwgIk5vdClBO0JyYW5kIjt2PSIyNCIsICJNaWNyb3NvZnQgRWRnZSI7dj0iMTE2IicgXCAgIC1IICdzZWMtY2gtdWEtYXJjaDogIng4NiInIFwgICAtSCAnc2VjLWNoLXVhLWJpdG5lc3M6ICI2NCInIFwgICAtSCAnc2VjLWNoLXVhLWZ1bGwtdmVyc2lvbjogIjExNi4wLjE5MzguMjkiJyBcICAgLUggJ3NlYy1jaC11YS1mdWxsLXZlcnNpb24tbGlzdDogIkNocm9taXVtIjt2PSIxMTYuMC41ODQ1LjQyIiwgIk5vdClBO0JyYW5kIjt2PSIyNC4wLjAuMCIsICJNaWNyb3NvZnQgRWRnZSI7dj0iMTE2LjAuMTkzOC4yOSInIFwgICAtSCAnc2VjLWNoLXVhLW1vYmlsZTogPzAnIFwgICAtSCAnc2VjLWNoLXVhLW1vZGVsOiAiIicgXCAgIC1IICdzZWMtY2gtdWEtcGxhdGZvcm06ICJXaW5kb3dzIicgXCAgIC1IICdzZWMtY2gtdWEtcGxhdGZvcm0tdmVyc2lvbjogIjE1LjAuMCInIFwgICAtSCAnc2VjLWZldGNoLWRlc3Q6IGRvY3VtZW50JyBcICAgLUggJ3NlYy1mZXRjaC1tb2RlOiBuYXZpZ2F0ZScgXCAgIC1IICdzZWMtZmV0Y2gtc2l0ZTogbm9uZScgXCAgIC1IICdzZWMtZmV0Y2gtdXNlcjogPzEnIFwgICAtSCAnc2VjLW1zLWdlYzogQjNGNDdBRDRBMjgzQ0FCMzc0QzA0NTFDNDZBQUZEMTQ3QzZBNERBQ0FGRjZBMUMxM0YzNEIyQzcyQjAyNDQ5NCcgXCAgIC1IICdzZWMtbXMtZ2VjLXZlcnNpb246IDEtMTE2LjAuMTkzOC4yOScgXCAgIC1IICd1cGdyYWRlLWluc2VjdXJlLXJlcXVlc3RzOiAxJyBcICAgLUggJ3VzZXItYWdlbnQ6IE1vemlsbGEvNS4wIChXaW5kb3dzIE5UIDEwLjA7IFdpbjY0OyB4NjQpIEFwcGxlV2ViS2l0LzUzNy4zNiAoS0hUTUwsIGxpa2UgR2Vja28pIENocm9tZS8xMTYuMC4wLjAgU2FmYXJpLzUzNy4zNiBFZGcvMTE2LjAuMC4wJyBcICAgLUggJ3gtY2xpZW50LWRhdGE6IGV5SXhJam9pTWlJc0lqRXdJam9pWENKVE1HZzNSMDVIT1RGMmFEUTFUVVpTVW5aNU5ITjJha1JtTVdkbGFWSktlbk54TmxBM2FVMVdibkYzUFZ3aUlpd2lNaUk2SWpFaUxDSXpJam9pTVNJc0lqUWlPaUl5TVRVNE9EUTVOVE00TWpZNE9UTTVOVEEzSWl3aU5TSTZJbHdpU205R1VXcFBURGszT1M5TWJrUlJabmxDZDJOMU0yRnNPVU4zZVRaVFFtZGFNR05ZTVhCdE9XVk1aejFjSWlJc0lqWWlPaUppWlhSaElpd2lOeUk2SWpFNE1ETTRPRFl5TmpRek5TSXNJamtpT2lKa1pYTnJkRzl3SW4wPScgXCAgIC1IICd4LWVkZ2Utc2hvcHBpbmctZmxhZzogMScgXCAgIC0tY29tcHJlc3NlZA== -``` -
          - - -## 鸣谢 - - 感谢 [EdgeGPT](https://github.com/acheong08/EdgeGPT) 提供的代理 API 的方法。 - - 感谢 [Vercel AI](https://github.com/vercel-labs/ai-chatbot) 提供的基础脚手架和 [ChatHub](https://github.com/chathub-dev/chathub) [go-proxy-bingai](https://github.com/adams549659584/go-proxy-bingai) 提供的部分代码。 - - -## 答疑及交流 - - - -## License - -MIT © [LICENSE](https://github.com/weaigc/bingo/blob/main/LICENSE). - - diff --git a/spaces/jimschat/VITS-Umamusume-voice-synthesizer/text/symbols.py b/spaces/jimschat/VITS-Umamusume-voice-synthesizer/text/symbols.py deleted file mode 100644 index 053a7105f7ce95aa51614f6995399fa2172b3eb2..0000000000000000000000000000000000000000 --- a/spaces/jimschat/VITS-Umamusume-voice-synthesizer/text/symbols.py +++ /dev/null @@ -1,76 +0,0 @@ -''' -Defines the set of symbols used in text input to the model. -''' - -# japanese_cleaners -_pad = '_' -_punctuation = ',.!?-' -_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧ↓↑ ' - - -'''# japanese_cleaners2 -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ ' -''' - - -'''# korean_cleaners -_pad = '_' -_punctuation = ',.!?…~' -_letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ ' -''' - -'''# chinese_cleaners -_pad = '_' -_punctuation = ',。!?—…' -_letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ ' -''' - -'''# zh_ja_mixture_cleaners -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'AEINOQUabdefghijklmnoprstuvwyzʃʧʦɯɹəɥ⁼ʰ`→↓↑ ' -''' - -'''# sanskrit_cleaners -_pad = '_' -_punctuation = '।' -_letters = 'ँंःअआइईउऊऋएऐओऔकखगघङचछजझञटठडढणतथदधनपफबभमयरलळवशषसहऽािीुूृॄेैोौ्ॠॢ ' -''' - -'''# cjks_cleaners -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'NQabdefghijklmnopstuvwxyzʃʧʥʦɯɹəɥçɸɾβŋɦː⁼ʰ`^#*=→↓↑ ' -''' - -'''# thai_cleaners -_pad = '_' -_punctuation = '.!? ' -_letters = 'กขฃคฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลวศษสหฬอฮฯะัาำิีึืุูเแโใไๅๆ็่้๊๋์' -''' - -'''# cjke_cleaners2 -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'NQabdefghijklmnopstuvwxyzɑæʃʑçɯɪɔɛɹðəɫɥɸʊɾʒθβŋɦ⁼ʰ`^#*=ˈˌ→↓↑ ' -''' - -'''# shanghainese_cleaners -_pad = '_' -_punctuation = ',.!?…' -_letters = 'abdfghiklmnopstuvyzøŋȵɑɔɕəɤɦɪɿʑʔʰ̩̃ᴀᴇ15678 ' -''' - -'''# chinese_dialect_cleaners -_pad = '_' -_punctuation = ',.!?~…─' -_letters = '#Nabdefghijklmnoprstuvwxyzæçøŋœȵɐɑɒɓɔɕɗɘəɚɛɜɣɤɦɪɭɯɵɷɸɻɾɿʂʅʊʋʌʏʑʔʦʮʰʷˀː˥˦˧˨˩̥̩̃̚ᴀᴇ↑↓∅ⱼ ' -''' - -# Export all symbols: -symbols = [_pad] + list(_punctuation) + list(_letters) - -# Special symbol ids -SPACE_ID = symbols.index(" ") diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/ttFont.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/ttFont.py deleted file mode 100644 index 1bece8e5e4cfc52693e60b1414454cef5505fb8c..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/fontTools/ttLib/ttFont.py +++ /dev/null @@ -1,1145 +0,0 @@ -from fontTools.config import Config -from fontTools.misc import xmlWriter -from fontTools.misc.configTools import AbstractConfig -from fontTools.misc.textTools import Tag, byteord, tostr -from fontTools.misc.loggingTools import deprecateArgument -from fontTools.ttLib import TTLibError -from fontTools.ttLib.ttGlyphSet import _TTGlyph, _TTGlyphSetCFF, _TTGlyphSetGlyf -from fontTools.ttLib.sfnt import SFNTReader, SFNTWriter -from io import BytesIO, StringIO, UnsupportedOperation -import os -import logging -import traceback - -log = logging.getLogger(__name__) - - -class TTFont(object): - - """Represents a TrueType font. - - The object manages file input and output, and offers a convenient way of - accessing tables. Tables will be only decompiled when necessary, ie. when - they're actually accessed. This means that simple operations can be extremely fast. - - Example usage:: - - >> from fontTools import ttLib - >> tt = ttLib.TTFont("afont.ttf") # Load an existing font file - >> tt['maxp'].numGlyphs - 242 - >> tt['OS/2'].achVendID - 'B&H\000' - >> tt['head'].unitsPerEm - 2048 - - For details of the objects returned when accessing each table, see :ref:`tables`. - To add a table to the font, use the :py:func:`newTable` function:: - - >> os2 = newTable("OS/2") - >> os2.version = 4 - >> # set other attributes - >> font["OS/2"] = os2 - - TrueType fonts can also be serialized to and from XML format (see also the - :ref:`ttx` binary):: - - >> tt.saveXML("afont.ttx") - Dumping 'LTSH' table... - Dumping 'OS/2' table... - [...] - - >> tt2 = ttLib.TTFont() # Create a new font object - >> tt2.importXML("afont.ttx") - >> tt2['maxp'].numGlyphs - 242 - - The TTFont object may be used as a context manager; this will cause the file - reader to be closed after the context ``with`` block is exited:: - - with TTFont(filename) as f: - # Do stuff - - Args: - file: When reading a font from disk, either a pathname pointing to a file, - or a readable file object. - res_name_or_index: If running on a Macintosh, either a sfnt resource name or - an sfnt resource index number. If the index number is zero, TTLib will - autodetect whether the file is a flat file or a suitcase. (If it is a suitcase, - only the first 'sfnt' resource will be read.) - sfntVersion (str): When constructing a font object from scratch, sets the four-byte - sfnt magic number to be used. Defaults to ``\0\1\0\0`` (TrueType). To create - an OpenType file, use ``OTTO``. - flavor (str): Set this to ``woff`` when creating a WOFF file or ``woff2`` for a WOFF2 - file. - checkChecksums (int): How checksum data should be treated. Default is 0 - (no checking). Set to 1 to check and warn on wrong checksums; set to 2 to - raise an exception if any wrong checksums are found. - recalcBBoxes (bool): If true (the default), recalculates ``glyf``, ``CFF ``, - ``head`` bounding box values and ``hhea``/``vhea`` min/max values on save. - Also compiles the glyphs on importing, which saves memory consumption and - time. - ignoreDecompileErrors (bool): If true, exceptions raised during table decompilation - will be ignored, and the binary data will be returned for those tables instead. - recalcTimestamp (bool): If true (the default), sets the ``modified`` timestamp in - the ``head`` table on save. - fontNumber (int): The index of the font in a TrueType Collection file. - lazy (bool): If lazy is set to True, many data structures are loaded lazily, upon - access only. If it is set to False, many data structures are loaded immediately. - The default is ``lazy=None`` which is somewhere in between. - """ - - def __init__( - self, - file=None, - res_name_or_index=None, - sfntVersion="\000\001\000\000", - flavor=None, - checkChecksums=0, - verbose=None, - recalcBBoxes=True, - allowVID=NotImplemented, - ignoreDecompileErrors=False, - recalcTimestamp=True, - fontNumber=-1, - lazy=None, - quiet=None, - _tableCache=None, - cfg={}, - ): - for name in ("verbose", "quiet"): - val = locals().get(name) - if val is not None: - deprecateArgument(name, "configure logging instead") - setattr(self, name, val) - - self.lazy = lazy - self.recalcBBoxes = recalcBBoxes - self.recalcTimestamp = recalcTimestamp - self.tables = {} - self.reader = None - self.cfg = cfg.copy() if isinstance(cfg, AbstractConfig) else Config(cfg) - self.ignoreDecompileErrors = ignoreDecompileErrors - - if not file: - self.sfntVersion = sfntVersion - self.flavor = flavor - self.flavorData = None - return - seekable = True - if not hasattr(file, "read"): - closeStream = True - # assume file is a string - if res_name_or_index is not None: - # see if it contains 'sfnt' resources in the resource or data fork - from . import macUtils - - if res_name_or_index == 0: - if macUtils.getSFNTResIndices(file): - # get the first available sfnt font. - file = macUtils.SFNTResourceReader(file, 1) - else: - file = open(file, "rb") - else: - file = macUtils.SFNTResourceReader(file, res_name_or_index) - else: - file = open(file, "rb") - else: - # assume "file" is a readable file object - closeStream = False - # SFNTReader wants the input file to be seekable. - # SpooledTemporaryFile has no seekable() on < 3.11, but still can seek: - # https://github.com/fonttools/fonttools/issues/3052 - if hasattr(file, "seekable"): - seekable = file.seekable() - elif hasattr(file, "seek"): - try: - file.seek(0) - except UnsupportedOperation: - seekable = False - - if not self.lazy: - # read input file in memory and wrap a stream around it to allow overwriting - if seekable: - file.seek(0) - tmp = BytesIO(file.read()) - if hasattr(file, "name"): - # save reference to input file name - tmp.name = file.name - if closeStream: - file.close() - file = tmp - elif not seekable: - raise TTLibError("Input file must be seekable when lazy=True") - self._tableCache = _tableCache - self.reader = SFNTReader(file, checkChecksums, fontNumber=fontNumber) - self.sfntVersion = self.reader.sfntVersion - self.flavor = self.reader.flavor - self.flavorData = self.reader.flavorData - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - self.close() - - def close(self): - """If we still have a reader object, close it.""" - if self.reader is not None: - self.reader.close() - - def save(self, file, reorderTables=True): - """Save the font to disk. - - Args: - file: Similarly to the constructor, can be either a pathname or a writable - file object. - reorderTables (Option[bool]): If true (the default), reorder the tables, - sorting them by tag (recommended by the OpenType specification). If - false, retain the original font order. If None, reorder by table - dependency (fastest). - """ - if not hasattr(file, "write"): - if self.lazy and self.reader.file.name == file: - raise TTLibError("Can't overwrite TTFont when 'lazy' attribute is True") - createStream = True - else: - # assume "file" is a writable file object - createStream = False - - tmp = BytesIO() - - writer_reordersTables = self._save(tmp) - - if not ( - reorderTables is None - or writer_reordersTables - or (reorderTables is False and self.reader is None) - ): - if reorderTables is False: - # sort tables using the original font's order - tableOrder = list(self.reader.keys()) - else: - # use the recommended order from the OpenType specification - tableOrder = None - tmp.flush() - tmp2 = BytesIO() - reorderFontTables(tmp, tmp2, tableOrder) - tmp.close() - tmp = tmp2 - - if createStream: - # "file" is a path - with open(file, "wb") as file: - file.write(tmp.getvalue()) - else: - file.write(tmp.getvalue()) - - tmp.close() - - def _save(self, file, tableCache=None): - """Internal function, to be shared by save() and TTCollection.save()""" - - if self.recalcTimestamp and "head" in self: - self[ - "head" - ] # make sure 'head' is loaded so the recalculation is actually done - - tags = list(self.keys()) - if "GlyphOrder" in tags: - tags.remove("GlyphOrder") - numTables = len(tags) - # write to a temporary stream to allow saving to unseekable streams - writer = SFNTWriter( - file, numTables, self.sfntVersion, self.flavor, self.flavorData - ) - - done = [] - for tag in tags: - self._writeTable(tag, writer, done, tableCache) - - writer.close() - - return writer.reordersTables() - - def saveXML(self, fileOrPath, newlinestr="\n", **kwargs): - """Export the font as TTX (an XML-based text file), or as a series of text - files when splitTables is true. In the latter case, the 'fileOrPath' - argument should be a path to a directory. - The 'tables' argument must either be false (dump all tables) or a - list of tables to dump. The 'skipTables' argument may be a list of tables - to skip, but only when the 'tables' argument is false. - """ - - writer = xmlWriter.XMLWriter(fileOrPath, newlinestr=newlinestr) - self._saveXML(writer, **kwargs) - writer.close() - - def _saveXML( - self, - writer, - writeVersion=True, - quiet=None, - tables=None, - skipTables=None, - splitTables=False, - splitGlyphs=False, - disassembleInstructions=True, - bitmapGlyphDataFormat="raw", - ): - - if quiet is not None: - deprecateArgument("quiet", "configure logging instead") - - self.disassembleInstructions = disassembleInstructions - self.bitmapGlyphDataFormat = bitmapGlyphDataFormat - if not tables: - tables = list(self.keys()) - if "GlyphOrder" not in tables: - tables = ["GlyphOrder"] + tables - if skipTables: - for tag in skipTables: - if tag in tables: - tables.remove(tag) - numTables = len(tables) - - if writeVersion: - from fontTools import version - - version = ".".join(version.split(".")[:2]) - writer.begintag( - "ttFont", - sfntVersion=repr(tostr(self.sfntVersion))[1:-1], - ttLibVersion=version, - ) - else: - writer.begintag("ttFont", sfntVersion=repr(tostr(self.sfntVersion))[1:-1]) - writer.newline() - - # always splitTables if splitGlyphs is enabled - splitTables = splitTables or splitGlyphs - - if not splitTables: - writer.newline() - else: - path, ext = os.path.splitext(writer.filename) - - for i in range(numTables): - tag = tables[i] - if splitTables: - tablePath = path + "." + tagToIdentifier(tag) + ext - tableWriter = xmlWriter.XMLWriter( - tablePath, newlinestr=writer.newlinestr - ) - tableWriter.begintag("ttFont", ttLibVersion=version) - tableWriter.newline() - tableWriter.newline() - writer.simpletag(tagToXML(tag), src=os.path.basename(tablePath)) - writer.newline() - else: - tableWriter = writer - self._tableToXML(tableWriter, tag, splitGlyphs=splitGlyphs) - if splitTables: - tableWriter.endtag("ttFont") - tableWriter.newline() - tableWriter.close() - writer.endtag("ttFont") - writer.newline() - - def _tableToXML(self, writer, tag, quiet=None, splitGlyphs=False): - if quiet is not None: - deprecateArgument("quiet", "configure logging instead") - if tag in self: - table = self[tag] - report = "Dumping '%s' table..." % tag - else: - report = "No '%s' table found." % tag - log.info(report) - if tag not in self: - return - xmlTag = tagToXML(tag) - attrs = dict() - if hasattr(table, "ERROR"): - attrs["ERROR"] = "decompilation error" - from .tables.DefaultTable import DefaultTable - - if table.__class__ == DefaultTable: - attrs["raw"] = True - writer.begintag(xmlTag, **attrs) - writer.newline() - if tag == "glyf": - table.toXML(writer, self, splitGlyphs=splitGlyphs) - else: - table.toXML(writer, self) - writer.endtag(xmlTag) - writer.newline() - writer.newline() - - def importXML(self, fileOrPath, quiet=None): - """Import a TTX file (an XML-based text format), so as to recreate - a font object. - """ - if quiet is not None: - deprecateArgument("quiet", "configure logging instead") - - if "maxp" in self and "post" in self: - # Make sure the glyph order is loaded, as it otherwise gets - # lost if the XML doesn't contain the glyph order, yet does - # contain the table which was originally used to extract the - # glyph names from (ie. 'post', 'cmap' or 'CFF '). - self.getGlyphOrder() - - from fontTools.misc import xmlReader - - reader = xmlReader.XMLReader(fileOrPath, self) - reader.read() - - def isLoaded(self, tag): - """Return true if the table identified by ``tag`` has been - decompiled and loaded into memory.""" - return tag in self.tables - - def has_key(self, tag): - """Test if the table identified by ``tag`` is present in the font. - - As well as this method, ``tag in font`` can also be used to determine the - presence of the table.""" - if self.isLoaded(tag): - return True - elif self.reader and tag in self.reader: - return True - elif tag == "GlyphOrder": - return True - else: - return False - - __contains__ = has_key - - def keys(self): - """Returns the list of tables in the font, along with the ``GlyphOrder`` pseudo-table.""" - keys = list(self.tables.keys()) - if self.reader: - for key in list(self.reader.keys()): - if key not in keys: - keys.append(key) - - if "GlyphOrder" in keys: - keys.remove("GlyphOrder") - keys = sortedTagList(keys) - return ["GlyphOrder"] + keys - - def ensureDecompiled(self, recurse=None): - """Decompile all the tables, even if a TTFont was opened in 'lazy' mode.""" - for tag in self.keys(): - table = self[tag] - if recurse is None: - recurse = self.lazy is not False - if recurse and hasattr(table, "ensureDecompiled"): - table.ensureDecompiled(recurse=recurse) - self.lazy = False - - def __len__(self): - return len(list(self.keys())) - - def __getitem__(self, tag): - tag = Tag(tag) - table = self.tables.get(tag) - if table is None: - if tag == "GlyphOrder": - table = GlyphOrder(tag) - self.tables[tag] = table - elif self.reader is not None: - table = self._readTable(tag) - else: - raise KeyError("'%s' table not found" % tag) - return table - - def _readTable(self, tag): - log.debug("Reading '%s' table from disk", tag) - data = self.reader[tag] - if self._tableCache is not None: - table = self._tableCache.get((tag, data)) - if table is not None: - return table - tableClass = getTableClass(tag) - table = tableClass(tag) - self.tables[tag] = table - log.debug("Decompiling '%s' table", tag) - try: - table.decompile(data, self) - except Exception: - if not self.ignoreDecompileErrors: - raise - # fall back to DefaultTable, retaining the binary table data - log.exception( - "An exception occurred during the decompilation of the '%s' table", tag - ) - from .tables.DefaultTable import DefaultTable - - file = StringIO() - traceback.print_exc(file=file) - table = DefaultTable(tag) - table.ERROR = file.getvalue() - self.tables[tag] = table - table.decompile(data, self) - if self._tableCache is not None: - self._tableCache[(tag, data)] = table - return table - - def __setitem__(self, tag, table): - self.tables[Tag(tag)] = table - - def __delitem__(self, tag): - if tag not in self: - raise KeyError("'%s' table not found" % tag) - if tag in self.tables: - del self.tables[tag] - if self.reader and tag in self.reader: - del self.reader[tag] - - def get(self, tag, default=None): - """Returns the table if it exists or (optionally) a default if it doesn't.""" - try: - return self[tag] - except KeyError: - return default - - def setGlyphOrder(self, glyphOrder): - """Set the glyph order - - Args: - glyphOrder ([str]): List of glyph names in order. - """ - self.glyphOrder = glyphOrder - if hasattr(self, "_reverseGlyphOrderDict"): - del self._reverseGlyphOrderDict - if self.isLoaded("glyf"): - self["glyf"].setGlyphOrder(glyphOrder) - - def getGlyphOrder(self): - """Returns a list of glyph names ordered by their position in the font.""" - try: - return self.glyphOrder - except AttributeError: - pass - if "CFF " in self: - cff = self["CFF "] - self.glyphOrder = cff.getGlyphOrder() - elif "post" in self: - # TrueType font - glyphOrder = self["post"].getGlyphOrder() - if glyphOrder is None: - # - # No names found in the 'post' table. - # Try to create glyph names from the unicode cmap (if available) - # in combination with the Adobe Glyph List (AGL). - # - self._getGlyphNamesFromCmap() - elif len(glyphOrder) < self["maxp"].numGlyphs: - # - # Not enough names found in the 'post' table. - # Can happen when 'post' format 1 is improperly used on a font that - # has more than 258 glyphs (the lenght of 'standardGlyphOrder'). - # - log.warning( - "Not enough names found in the 'post' table, generating them from cmap instead" - ) - self._getGlyphNamesFromCmap() - else: - self.glyphOrder = glyphOrder - else: - self._getGlyphNamesFromCmap() - return self.glyphOrder - - def _getGlyphNamesFromCmap(self): - # - # This is rather convoluted, but then again, it's an interesting problem: - # - we need to use the unicode values found in the cmap table to - # build glyph names (eg. because there is only a minimal post table, - # or none at all). - # - but the cmap parser also needs glyph names to work with... - # So here's what we do: - # - make up glyph names based on glyphID - # - load a temporary cmap table based on those names - # - extract the unicode values, build the "real" glyph names - # - unload the temporary cmap table - # - if self.isLoaded("cmap"): - # Bootstrapping: we're getting called by the cmap parser - # itself. This means self.tables['cmap'] contains a partially - # loaded cmap, making it impossible to get at a unicode - # subtable here. We remove the partially loaded cmap and - # restore it later. - # This only happens if the cmap table is loaded before any - # other table that does f.getGlyphOrder() or f.getGlyphName(). - cmapLoading = self.tables["cmap"] - del self.tables["cmap"] - else: - cmapLoading = None - # Make up glyph names based on glyphID, which will be used by the - # temporary cmap and by the real cmap in case we don't find a unicode - # cmap. - numGlyphs = int(self["maxp"].numGlyphs) - glyphOrder = [None] * numGlyphs - glyphOrder[0] = ".notdef" - for i in range(1, numGlyphs): - glyphOrder[i] = "glyph%.5d" % i - # Set the glyph order, so the cmap parser has something - # to work with (so we don't get called recursively). - self.glyphOrder = glyphOrder - - # Make up glyph names based on the reversed cmap table. Because some - # glyphs (eg. ligatures or alternates) may not be reachable via cmap, - # this naming table will usually not cover all glyphs in the font. - # If the font has no Unicode cmap table, reversecmap will be empty. - if "cmap" in self: - reversecmap = self["cmap"].buildReversed() - else: - reversecmap = {} - useCount = {} - for i in range(numGlyphs): - tempName = glyphOrder[i] - if tempName in reversecmap: - # If a font maps both U+0041 LATIN CAPITAL LETTER A and - # U+0391 GREEK CAPITAL LETTER ALPHA to the same glyph, - # we prefer naming the glyph as "A". - glyphName = self._makeGlyphName(min(reversecmap[tempName])) - numUses = useCount[glyphName] = useCount.get(glyphName, 0) + 1 - if numUses > 1: - glyphName = "%s.alt%d" % (glyphName, numUses - 1) - glyphOrder[i] = glyphName - - if "cmap" in self: - # Delete the temporary cmap table from the cache, so it can - # be parsed again with the right names. - del self.tables["cmap"] - self.glyphOrder = glyphOrder - if cmapLoading: - # restore partially loaded cmap, so it can continue loading - # using the proper names. - self.tables["cmap"] = cmapLoading - - @staticmethod - def _makeGlyphName(codepoint): - from fontTools import agl # Adobe Glyph List - - if codepoint in agl.UV2AGL: - return agl.UV2AGL[codepoint] - elif codepoint <= 0xFFFF: - return "uni%04X" % codepoint - else: - return "u%X" % codepoint - - def getGlyphNames(self): - """Get a list of glyph names, sorted alphabetically.""" - glyphNames = sorted(self.getGlyphOrder()) - return glyphNames - - def getGlyphNames2(self): - """Get a list of glyph names, sorted alphabetically, - but not case sensitive. - """ - from fontTools.misc import textTools - - return textTools.caselessSort(self.getGlyphOrder()) - - def getGlyphName(self, glyphID): - """Returns the name for the glyph with the given ID. - - If no name is available, synthesises one with the form ``glyphXXXXX``` where - ```XXXXX`` is the zero-padded glyph ID. - """ - try: - return self.getGlyphOrder()[glyphID] - except IndexError: - return "glyph%.5d" % glyphID - - def getGlyphNameMany(self, lst): - """Converts a list of glyph IDs into a list of glyph names.""" - glyphOrder = self.getGlyphOrder() - cnt = len(glyphOrder) - return [glyphOrder[gid] if gid < cnt else "glyph%.5d" % gid for gid in lst] - - def getGlyphID(self, glyphName): - """Returns the ID of the glyph with the given name.""" - try: - return self.getReverseGlyphMap()[glyphName] - except KeyError: - if glyphName[:5] == "glyph": - try: - return int(glyphName[5:]) - except (NameError, ValueError): - raise KeyError(glyphName) - raise - - def getGlyphIDMany(self, lst): - """Converts a list of glyph names into a list of glyph IDs.""" - d = self.getReverseGlyphMap() - try: - return [d[glyphName] for glyphName in lst] - except KeyError: - getGlyphID = self.getGlyphID - return [getGlyphID(glyphName) for glyphName in lst] - - def getReverseGlyphMap(self, rebuild=False): - """Returns a mapping of glyph names to glyph IDs.""" - if rebuild or not hasattr(self, "_reverseGlyphOrderDict"): - self._buildReverseGlyphOrderDict() - return self._reverseGlyphOrderDict - - def _buildReverseGlyphOrderDict(self): - self._reverseGlyphOrderDict = d = {} - for glyphID, glyphName in enumerate(self.getGlyphOrder()): - d[glyphName] = glyphID - return d - - def _writeTable(self, tag, writer, done, tableCache=None): - """Internal helper function for self.save(). Keeps track of - inter-table dependencies. - """ - if tag in done: - return - tableClass = getTableClass(tag) - for masterTable in tableClass.dependencies: - if masterTable not in done: - if masterTable in self: - self._writeTable(masterTable, writer, done, tableCache) - else: - done.append(masterTable) - done.append(tag) - tabledata = self.getTableData(tag) - if tableCache is not None: - entry = tableCache.get((Tag(tag), tabledata)) - if entry is not None: - log.debug("reusing '%s' table", tag) - writer.setEntry(tag, entry) - return - log.debug("Writing '%s' table to disk", tag) - writer[tag] = tabledata - if tableCache is not None: - tableCache[(Tag(tag), tabledata)] = writer[tag] - - def getTableData(self, tag): - """Returns the binary representation of a table. - - If the table is currently loaded and in memory, the data is compiled to - binary and returned; if it is not currently loaded, the binary data is - read from the font file and returned. - """ - tag = Tag(tag) - if self.isLoaded(tag): - log.debug("Compiling '%s' table", tag) - return self.tables[tag].compile(self) - elif self.reader and tag in self.reader: - log.debug("Reading '%s' table from disk", tag) - return self.reader[tag] - else: - raise KeyError(tag) - - def getGlyphSet(self, preferCFF=True, location=None, normalized=False): - """Return a generic GlyphSet, which is a dict-like object - mapping glyph names to glyph objects. The returned glyph objects - have a ``.draw()`` method that supports the Pen protocol, and will - have an attribute named 'width'. - - If the font is CFF-based, the outlines will be taken from the ``CFF `` - or ``CFF2`` tables. Otherwise the outlines will be taken from the - ``glyf`` table. - - If the font contains both a ``CFF ``/``CFF2`` and a ``glyf`` table, you - can use the ``preferCFF`` argument to specify which one should be taken. - If the font contains both a ``CFF `` and a ``CFF2`` table, the latter is - taken. - - If the ``location`` parameter is set, it should be a dictionary mapping - four-letter variation tags to their float values, and the returned - glyph-set will represent an instance of a variable font at that - location. - - If the ``normalized`` variable is set to True, that location is - interpreted as in the normalized (-1..+1) space, otherwise it is in the - font's defined axes space. - """ - if location and "fvar" not in self: - location = None - if location and not normalized: - location = self.normalizeLocation(location) - if ("CFF " in self or "CFF2" in self) and (preferCFF or "glyf" not in self): - return _TTGlyphSetCFF(self, location) - elif "glyf" in self: - return _TTGlyphSetGlyf(self, location) - else: - raise TTLibError("Font contains no outlines") - - def normalizeLocation(self, location): - """Normalize a ``location`` from the font's defined axes space (also - known as user space) into the normalized (-1..+1) space. It applies - ``avar`` mapping if the font contains an ``avar`` table. - - The ``location`` parameter should be a dictionary mapping four-letter - variation tags to their float values. - - Raises ``TTLibError`` if the font is not a variable font. - """ - from fontTools.varLib.models import normalizeLocation, piecewiseLinearMap - - if "fvar" not in self: - raise TTLibError("Not a variable font") - - axes = { - a.axisTag: (a.minValue, a.defaultValue, a.maxValue) - for a in self["fvar"].axes - } - location = normalizeLocation(location, axes) - if "avar" in self: - avar = self["avar"] - avarSegments = avar.segments - mappedLocation = {} - for axisTag, value in location.items(): - avarMapping = avarSegments.get(axisTag, None) - if avarMapping is not None: - value = piecewiseLinearMap(value, avarMapping) - mappedLocation[axisTag] = value - location = mappedLocation - return location - - def getBestCmap( - self, - cmapPreferences=( - (3, 10), - (0, 6), - (0, 4), - (3, 1), - (0, 3), - (0, 2), - (0, 1), - (0, 0), - ), - ): - """Returns the 'best' Unicode cmap dictionary available in the font - or ``None``, if no Unicode cmap subtable is available. - - By default it will search for the following (platformID, platEncID) - pairs in order:: - - (3, 10), # Windows Unicode full repertoire - (0, 6), # Unicode full repertoire (format 13 subtable) - (0, 4), # Unicode 2.0 full repertoire - (3, 1), # Windows Unicode BMP - (0, 3), # Unicode 2.0 BMP - (0, 2), # Unicode ISO/IEC 10646 - (0, 1), # Unicode 1.1 - (0, 0) # Unicode 1.0 - - This particular order matches what HarfBuzz uses to choose what - subtable to use by default. This order prefers the largest-repertoire - subtable, and among those, prefers the Windows-platform over the - Unicode-platform as the former has wider support. - - This order can be customized via the ``cmapPreferences`` argument. - """ - return self["cmap"].getBestCmap(cmapPreferences=cmapPreferences) - - -class GlyphOrder(object): - - """A pseudo table. The glyph order isn't in the font as a separate - table, but it's nice to present it as such in the TTX format. - """ - - def __init__(self, tag=None): - pass - - def toXML(self, writer, ttFont): - glyphOrder = ttFont.getGlyphOrder() - writer.comment( - "The 'id' attribute is only for humans; " "it is ignored when parsed." - ) - writer.newline() - for i in range(len(glyphOrder)): - glyphName = glyphOrder[i] - writer.simpletag("GlyphID", id=i, name=glyphName) - writer.newline() - - def fromXML(self, name, attrs, content, ttFont): - if not hasattr(self, "glyphOrder"): - self.glyphOrder = [] - if name == "GlyphID": - self.glyphOrder.append(attrs["name"]) - ttFont.setGlyphOrder(self.glyphOrder) - - -def getTableModule(tag): - """Fetch the packer/unpacker module for a table. - Return None when no module is found. - """ - from . import tables - - pyTag = tagToIdentifier(tag) - try: - __import__("fontTools.ttLib.tables." + pyTag) - except ImportError as err: - # If pyTag is found in the ImportError message, - # means table is not implemented. If it's not - # there, then some other module is missing, don't - # suppress the error. - if str(err).find(pyTag) >= 0: - return None - else: - raise err - else: - return getattr(tables, pyTag) - - -# Registry for custom table packer/unpacker classes. Keys are table -# tags, values are (moduleName, className) tuples. -# See registerCustomTableClass() and getCustomTableClass() -_customTableRegistry = {} - - -def registerCustomTableClass(tag, moduleName, className=None): - """Register a custom packer/unpacker class for a table. - - The 'moduleName' must be an importable module. If no 'className' - is given, it is derived from the tag, for example it will be - ``table_C_U_S_T_`` for a 'CUST' tag. - - The registered table class should be a subclass of - :py:class:`fontTools.ttLib.tables.DefaultTable.DefaultTable` - """ - if className is None: - className = "table_" + tagToIdentifier(tag) - _customTableRegistry[tag] = (moduleName, className) - - -def unregisterCustomTableClass(tag): - """Unregister the custom packer/unpacker class for a table.""" - del _customTableRegistry[tag] - - -def getCustomTableClass(tag): - """Return the custom table class for tag, if one has been registered - with 'registerCustomTableClass()'. Else return None. - """ - if tag not in _customTableRegistry: - return None - import importlib - - moduleName, className = _customTableRegistry[tag] - module = importlib.import_module(moduleName) - return getattr(module, className) - - -def getTableClass(tag): - """Fetch the packer/unpacker class for a table.""" - tableClass = getCustomTableClass(tag) - if tableClass is not None: - return tableClass - module = getTableModule(tag) - if module is None: - from .tables.DefaultTable import DefaultTable - - return DefaultTable - pyTag = tagToIdentifier(tag) - tableClass = getattr(module, "table_" + pyTag) - return tableClass - - -def getClassTag(klass): - """Fetch the table tag for a class object.""" - name = klass.__name__ - assert name[:6] == "table_" - name = name[6:] # Chop 'table_' - return identifierToTag(name) - - -def newTable(tag): - """Return a new instance of a table.""" - tableClass = getTableClass(tag) - return tableClass(tag) - - -def _escapechar(c): - """Helper function for tagToIdentifier()""" - import re - - if re.match("[a-z0-9]", c): - return "_" + c - elif re.match("[A-Z]", c): - return c + "_" - else: - return hex(byteord(c))[2:] - - -def tagToIdentifier(tag): - """Convert a table tag to a valid (but UGLY) python identifier, - as well as a filename that's guaranteed to be unique even on a - caseless file system. Each character is mapped to two characters. - Lowercase letters get an underscore before the letter, uppercase - letters get an underscore after the letter. Trailing spaces are - trimmed. Illegal characters are escaped as two hex bytes. If the - result starts with a number (as the result of a hex escape), an - extra underscore is prepended. Examples:: - - >>> tagToIdentifier('glyf') - '_g_l_y_f' - >>> tagToIdentifier('cvt ') - '_c_v_t' - >>> tagToIdentifier('OS/2') - 'O_S_2f_2' - """ - import re - - tag = Tag(tag) - if tag == "GlyphOrder": - return tag - assert len(tag) == 4, "tag should be 4 characters long" - while len(tag) > 1 and tag[-1] == " ": - tag = tag[:-1] - ident = "" - for c in tag: - ident = ident + _escapechar(c) - if re.match("[0-9]", ident): - ident = "_" + ident - return ident - - -def identifierToTag(ident): - """the opposite of tagToIdentifier()""" - if ident == "GlyphOrder": - return ident - if len(ident) % 2 and ident[0] == "_": - ident = ident[1:] - assert not (len(ident) % 2) - tag = "" - for i in range(0, len(ident), 2): - if ident[i] == "_": - tag = tag + ident[i + 1] - elif ident[i + 1] == "_": - tag = tag + ident[i] - else: - # assume hex - tag = tag + chr(int(ident[i : i + 2], 16)) - # append trailing spaces - tag = tag + (4 - len(tag)) * " " - return Tag(tag) - - -def tagToXML(tag): - """Similarly to tagToIdentifier(), this converts a TT tag - to a valid XML element name. Since XML element names are - case sensitive, this is a fairly simple/readable translation. - """ - import re - - tag = Tag(tag) - if tag == "OS/2": - return "OS_2" - elif tag == "GlyphOrder": - return tag - if re.match("[A-Za-z_][A-Za-z_0-9]* *$", tag): - return tag.strip() - else: - return tagToIdentifier(tag) - - -def xmlToTag(tag): - """The opposite of tagToXML()""" - if tag == "OS_2": - return Tag("OS/2") - if len(tag) == 8: - return identifierToTag(tag) - else: - return Tag(tag + " " * (4 - len(tag))) - - -# Table order as recommended in the OpenType specification 1.4 -TTFTableOrder = [ - "head", - "hhea", - "maxp", - "OS/2", - "hmtx", - "LTSH", - "VDMX", - "hdmx", - "cmap", - "fpgm", - "prep", - "cvt ", - "loca", - "glyf", - "kern", - "name", - "post", - "gasp", - "PCLT", -] - -OTFTableOrder = ["head", "hhea", "maxp", "OS/2", "name", "cmap", "post", "CFF "] - - -def sortedTagList(tagList, tableOrder=None): - """Return a sorted copy of tagList, sorted according to the OpenType - specification, or according to a custom tableOrder. If given and not - None, tableOrder needs to be a list of tag names. - """ - tagList = sorted(tagList) - if tableOrder is None: - if "DSIG" in tagList: - # DSIG should be last (XXX spec reference?) - tagList.remove("DSIG") - tagList.append("DSIG") - if "CFF " in tagList: - tableOrder = OTFTableOrder - else: - tableOrder = TTFTableOrder - orderedTables = [] - for tag in tableOrder: - if tag in tagList: - orderedTables.append(tag) - tagList.remove(tag) - orderedTables.extend(tagList) - return orderedTables - - -def reorderFontTables(inFile, outFile, tableOrder=None, checkChecksums=False): - """Rewrite a font file, ordering the tables as recommended by the - OpenType specification 1.4. - """ - inFile.seek(0) - outFile.seek(0) - reader = SFNTReader(inFile, checkChecksums=checkChecksums) - writer = SFNTWriter( - outFile, - len(reader.tables), - reader.sfntVersion, - reader.flavor, - reader.flavorData, - ) - tables = list(reader.keys()) - for tag in sortedTagList(tables, tableOrder): - writer[tag] = reader[tag] - writer.close() - - -def maxPowerOfTwo(x): - """Return the highest exponent of two, so that - (2 ** exponent) <= x. Return 0 if x is 0. - """ - exponent = 0 - while x: - x = x >> 1 - exponent = exponent + 1 - return max(exponent - 1, 0) - - -def getSearchRange(n, itemSize=16): - """Calculate searchRange, entrySelector, rangeShift.""" - # itemSize defaults to 16, for backward compatibility - # with upstream fonttools. - exponent = maxPowerOfTwo(n) - searchRange = (2**exponent) * itemSize - entrySelector = exponent - rangeShift = max(0, n * itemSize - searchRange) - return searchRange, entrySelector, rangeShift diff --git a/spaces/jonigata/PoseMaker2/main.py b/spaces/jonigata/PoseMaker2/main.py deleted file mode 100644 index f46eee2607c89977a8591900c977cb7e2dd2b67a..0000000000000000000000000000000000000000 --- a/spaces/jonigata/PoseMaker2/main.py +++ /dev/null @@ -1,171 +0,0 @@ -import gradio as gr -import json as js -import util -from fastapi.staticfiles import StaticFiles -from fileservice import app -from pose import infer, draw - - -def image_changed(image): - if image == None: - return "estimation", {} - - if 'openpose' in image.info: - print("pose found") - jsonText = image.info['openpose'] - jsonObj = js.loads(jsonText) - subset = jsonObj['subset'] - return f"""{image.width}px x {image.height}px, {len(subset)} indivisual(s)""", jsonText - else: - print("pose not found") - pose_result, returned_outputs = infer(util.pil2cv(image)) - - candidate = [] - subset = [] - for d in pose_result: - n = len(candidate) - if d['bbox'][4] < 0.9: - continue - keypoints = d['keypoints'][:, :2].tolist() - midpoint = [(keypoints[5][0] + keypoints[6][0]) / 2, (keypoints[5][1] + keypoints[6][1]) / 2] - keypoints.append(midpoint) - candidate.extend(util.convert_keypoints(keypoints)) - m = len(candidate) - subset.append([j for j in range(n, m)]) - - jsonText = "{ \"candidate\": " + util.candidate_to_json_string(candidate) + ", \"subset\": " + util.subset_to_json_string(subset) + " }" - return f"""{image.width}px x {image.height}px, {len(subset)} indivisual(s)""", jsonText - -html_text = f""" - -""" - -with gr.Blocks(css="""button { min-width: 80px; }""") as demo: - with gr.Row(): - with gr.Column(scale=1): - width = gr.Slider(label="Width", minimum=512, maximum=1024, step=64, value=512, interactive=True) - height = gr.Slider(label="Height", minimum=512, maximum=1024, step=64, value=512, interactive=True) - with gr.Accordion(label="Pose estimation", open=False): - source = gr.Image(type="pil") - estimationResult = gr.Markdown("""estimation""") - with gr.Row(): - with gr.Column(min_width=80): - applySizeBtn = gr.Button(value="Apply size") - with gr.Column(min_width=80): - replaceBtn = gr.Button(value="Replace") - with gr.Column(min_width=80): - importBtn = gr.Button(value="Import") - with gr.Column(min_width=80): - bgBtn = gr.Button(value="Background") - with gr.Column(min_width=80): - removeBgBtn = gr.Button(value="RemoveBG") - with gr.Accordion(label="Json", open=False): - with gr.Row(): - with gr.Column(min_width=80): - replaceWithJsonBtn = gr.Button(value="Replace") - with gr.Column(min_width=80): - importJsonBtn = gr.Button(value="Import") - gr.Markdown(""" -| inout | how to | -| -----------------| ----------------------------------------------------------------------------------------- | -| Import | Paste json to "Json source" and click "Read", edit the width/height, then click "Replace" or "Import". | -| Export | click "Save" and "Copy to clipboard" of "Json" section. | -""") - json = gr.JSON(label="Json") - jsonSource = gr.Textbox(label="Json source", lines=10) - with gr.Accordion(label="Notes", open=False): - gr.Markdown(""" -#### How to bring pose to ControlNet -1. Press **Save** button -2. **Drag** the file placed at the bottom left corder of browser -3. **Drop** the file into ControlNet - -#### Reuse pose image -Pose image generated by this tool has pose data in the image itself. You can reuse pose information by loading it as the image source instead of a regular image. - -#### Points to note for pseudo-3D rotation -When performing pseudo-3D rotation on the X and Y axes, the projection is converted to 2D and Z-axis information is lost when the mouse button is released. This means that if you finish dragging while the shape is collapsed, you may not be able to restore it to its original state. In such a case, please use the "undo" function. - -#### Pose estimation -In this project, MMPose is used for pose estimation. -""") - with gr.Column(scale=2): - html = gr.HTML(html_text) - with gr.Row(): - with gr.Column(scale=1, min_width=60): - saveBtn = gr.Button(value="Save") - with gr.Column(scale=7): - gr.Markdown(""" -- "ctrl + drag" to **scale** -- "alt + drag" to **move** -- "shift + drag" to **rotate** (move right first, release shift, then up or down) -- "space + drag" to **range-move** -- "[", "]" or "Alt + wheel" or "Space + wheel" to shrink or expand **range** -- "ctrl + Z", "shift + ctrl + Z" to **undo**, **redo** -- "ctrl + E" **add** new person -- "D + click" to **delete** person -- "Q + click" to **cut off** limb -- "X + drag" to **x-axis** pseudo-3D rotation -- "C + drag" to **y-axis** pseudo-3D rotation -- "R + click" to **repair** -- "H + click" to **hide** node - -When using Q, X, C, R, pressing and dont release until the operation is complete. - -[Contact us for feature requests or bug reports (anonymous)](https://t.co/UC3jJOJJtS) -""") - - width.change(fn=None, inputs=[width], _js="(w) => { resizeCanvas(w,null); }") - height.change(fn=None, inputs=[height], _js="(h) => { resizeCanvas(null,h); }") - - source.change( - fn = image_changed, - inputs = [source], - outputs = [estimationResult, json]) - applySizeBtn.click( - fn = lambda x: (x.width, x.height), - inputs = [source], - outputs = [width, height]) - replaceBtn.click( - fn = None, - inputs = [json], - outputs = [], - _js="(json) => { initializeEditor(); importPose(json); return []; }") - importBtn.click( - fn = None, - inputs = [json], - outputs = [], - _js="(json) => { importPose(json); return []; }") - bgBtn.click( - fn = None, - inputs = [source], - outputs = [], - _js="(image) => { importBackground(image); return []; }") - removeBgBtn.click( - fn = None, - inputs = [], - outputs = [], - _js="() => { importBackground(null); return []; }") - - saveBtn.click( - fn = None, - inputs = [], outputs = [json], - _js="() => { return savePose(); }") - jsonSource.change( - fn = lambda x: x, - inputs = [jsonSource], outputs = [json]) - replaceWithJsonBtn.click( - fn = None, - inputs = [json], - outputs = [], - _js="(json) => { initializeEditor(); importPose(json); return []; }") - importJsonBtn.click( - fn = None, - inputs = [json], - outputs = [], - _js="(json) => { importPose(json); return []; }") - demo.load(fn=None, inputs=[], outputs=[], _js="() => { initializeEditor(); importPose(); return []; }") - -print("mount") -app.mount("/js", StaticFiles(directory="js"), name="js") -gr.mount_gradio_app(app, demo, path="/") diff --git a/spaces/jordonpeter01/ai-comic-factory/src/components/ui/menubar.tsx b/spaces/jordonpeter01/ai-comic-factory/src/components/ui/menubar.tsx deleted file mode 100644 index d57454816cea9b7572ad1ae6ab139d6946c4d5d5..0000000000000000000000000000000000000000 --- a/spaces/jordonpeter01/ai-comic-factory/src/components/ui/menubar.tsx +++ /dev/null @@ -1,236 +0,0 @@ -"use client" - -import * as React from "react" -import * as MenubarPrimitive from "@radix-ui/react-menubar" -import { Check, ChevronRight, Circle } from "lucide-react" - -import { cn } from "@/lib/utils" - -const MenubarMenu = MenubarPrimitive.Menu - -const MenubarGroup = MenubarPrimitive.Group - -const MenubarPortal = MenubarPrimitive.Portal - -const MenubarSub = MenubarPrimitive.Sub - -const MenubarRadioGroup = MenubarPrimitive.RadioGroup - -const Menubar = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -Menubar.displayName = MenubarPrimitive.Root.displayName - -const MenubarTrigger = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -MenubarTrigger.displayName = MenubarPrimitive.Trigger.displayName - -const MenubarSubTrigger = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef & { - inset?: boolean - } ->(({ className, inset, children, ...props }, ref) => ( - - {children} - - -)) -MenubarSubTrigger.displayName = MenubarPrimitive.SubTrigger.displayName - -const MenubarSubContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -MenubarSubContent.displayName = MenubarPrimitive.SubContent.displayName - -const MenubarContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->( - ( - { className, align = "start", alignOffset = -4, sideOffset = 8, ...props }, - ref - ) => ( - - - - ) -) -MenubarContent.displayName = MenubarPrimitive.Content.displayName - -const MenubarItem = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef & { - inset?: boolean - } ->(({ className, inset, ...props }, ref) => ( - -)) -MenubarItem.displayName = MenubarPrimitive.Item.displayName - -const MenubarCheckboxItem = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, checked, ...props }, ref) => ( - - - - - - - {children} - -)) -MenubarCheckboxItem.displayName = MenubarPrimitive.CheckboxItem.displayName - -const MenubarRadioItem = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - - - - - - {children} - -)) -MenubarRadioItem.displayName = MenubarPrimitive.RadioItem.displayName - -const MenubarLabel = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef & { - inset?: boolean - } ->(({ className, inset, ...props }, ref) => ( - -)) -MenubarLabel.displayName = MenubarPrimitive.Label.displayName - -const MenubarSeparator = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -MenubarSeparator.displayName = MenubarPrimitive.Separator.displayName - -const MenubarShortcut = ({ - className, - ...props -}: React.HTMLAttributes) => { - return ( - - ) -} -MenubarShortcut.displayname = "MenubarShortcut" - -export { - Menubar, - MenubarMenu, - MenubarTrigger, - MenubarContent, - MenubarItem, - MenubarSeparator, - MenubarLabel, - MenubarCheckboxItem, - MenubarRadioGroup, - MenubarRadioItem, - MenubarPortal, - MenubarSubContent, - MenubarSubTrigger, - MenubarGroup, - MenubarSub, - MenubarShortcut, -} diff --git a/spaces/jordonpeter01/ai-comic-factory/src/lib/replaceNonWhiteWithTransparent.ts b/spaces/jordonpeter01/ai-comic-factory/src/lib/replaceNonWhiteWithTransparent.ts deleted file mode 100644 index 6ffe6df050134290d39ee114e427741b26cfb419..0000000000000000000000000000000000000000 --- a/spaces/jordonpeter01/ai-comic-factory/src/lib/replaceNonWhiteWithTransparent.ts +++ /dev/null @@ -1,46 +0,0 @@ -export function replaceNonWhiteWithTransparent(imageBase64: string): Promise { - return new Promise((resolve, reject) => { - const img = new Image(); - img.onload = () => { - const canvas = document.createElement('canvas'); - const ctx = canvas.getContext('2d'); - if (!ctx) { - reject('Unable to get canvas context'); - return; - } - - const ratio = window.devicePixelRatio || 1; - canvas.width = img.width * ratio; - canvas.height = img.height * ratio; - ctx.scale(ratio, ratio); - - ctx.drawImage(img, 0, 0); - - const imageData = ctx.getImageData(0, 0, img.width, img.height); - const data = imageData.data; - console.log("ok") - - for (let i = 0; i < data.length; i += 4) { - if (data[i] === 255 && data[i + 1] === 255 && data[i + 2] === 255) { - // Change white (also shades of grays) pixels to black - data[i] = 0; - data[i + 1] = 0; - data[i + 2] = 0; - } else { - // Change all other pixels to transparent - data[i + 3] = 0; - } - } - - ctx.putImageData(imageData, 0, 0); - - resolve(canvas.toDataURL()); - }; - - img.onerror = (err) => { - reject(err); - }; - - img.src = imageBase64; - }); -} \ No newline at end of file diff --git a/spaces/josevalim/livebook/README.md b/spaces/josevalim/livebook/README.md deleted file mode 100644 index 7b12495942e63525fa13b91ef4673911e7b3cb26..0000000000000000000000000000000000000000 --- a/spaces/josevalim/livebook/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Livebook -emoji: 📓 -colorFrom: pink -colorTo: purple -sdk: docker -fullWidth: true -duplicated_from: livebook-dev/livebook ---- - -You can install and run [Livebook](https://livebook.dev/) inside a Hugging Face Space. Here's [a tutorial](https://huggingface.co/docs/hub/spaces-sdks-docker-livebook) on how to do that. \ No newline at end of file diff --git a/spaces/jpfearnworks/ai_agents/modules/vector_stores/embedding/openai.py b/spaces/jpfearnworks/ai_agents/modules/vector_stores/embedding/openai.py deleted file mode 100644 index 7c43c77cf9e4265b931b07f66ab99a35f94eea72..0000000000000000000000000000000000000000 --- a/spaces/jpfearnworks/ai_agents/modules/vector_stores/embedding/openai.py +++ /dev/null @@ -1,18 +0,0 @@ -from langchain.embeddings.openai import OpenAIEmbeddings -from dataclasses import dataclass -import os - -@dataclass -class OpenAIEmbedConfig: - openai_api_key: str - -def get_default_openai_embeddings() -> OpenAIEmbeddings: - """ - Returns a default OpenAIEmbeddings instance with a default API key. - - Returns: - OpenAIEmbeddings: A new OpenAIEmbeddings instance. - """ - openai_api_key = os.environ.get('OPENAI_API_KEY') - embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key) - return embeddings \ No newline at end of file diff --git a/spaces/juliensimon/table_questions/app.py b/spaces/juliensimon/table_questions/app.py deleted file mode 100644 index cdd73ff33ed0c0d5c199fc7c7903071908dcd665..0000000000000000000000000000000000000000 --- a/spaces/juliensimon/table_questions/app.py +++ /dev/null @@ -1,72 +0,0 @@ -import gradio as gr -import pandas as pd -from transformers import ( - AutoModelForSeq2SeqLM, - AutoModelForTableQuestionAnswering, - AutoTokenizer, - pipeline, -) - -model_tapex = "microsoft/tapex-large-finetuned-wtq" -tokenizer_tapex = AutoTokenizer.from_pretrained(model_tapex) -model_tapex = AutoModelForSeq2SeqLM.from_pretrained(model_tapex) -pipe_tapex = pipeline( - "table-question-answering", model=model_tapex, tokenizer=tokenizer_tapex -) - -model_tapas = "google/tapas-large-finetuned-wtq" -tokenizer_tapas = AutoTokenizer.from_pretrained(model_tapas) -model_tapas = AutoModelForTableQuestionAnswering.from_pretrained(model_tapas) -pipe_tapas = pipeline( - "table-question-answering", model=model_tapas, tokenizer=tokenizer_tapas -) - - -def process(query, file, correct_answer, rows=20): - table = pd.read_csv(file.name, header=0).astype(str) - table = table[:rows] - result_tapex = pipe_tapex(table=table, query=query) - result_tapas = pipe_tapas(table=table, query=query) - return result_tapex["answer"], result_tapas["answer"], correct_answer - - -# Inputs -query_text = gr.Text(label="Enter a question") -input_file = gr.File(label="Upload a CSV file", type="file") -rows_slider = gr.Slider(label="Number of rows") - -# Output -answer_text_tapex = gr.Text(label="TAPEX answer") -answer_text_tapas = gr.Text(label="TAPAS answer") - -description = "This Space lets you ask questions on CSV documents with Microsoft [TAPEX-Large](https://huggingface.co/microsoft/tapex-large-finetuned-wtq) and Google [TAPAS-Large](https://huggingface.co/google/tapas-large-finetuned-wtq). \ -Both have been fine-tuned on the [WikiTableQuestions](https://huggingface.co/datasets/wikitablequestions) dataset. \n\n\ -A sample file with football statistics is available in the repository: \n\n\ -* Which team has the most wins? Answer: Manchester City FC\n\ -* Which team has the most wins: Chelsea, Liverpool or Everton? Answer: Liverpool\n\ -* Which teams have scored less than 40 goals? Answer: Cardiff City FC, Fulham FC, Brighton & Hove Albion FC, Huddersfield Town FC\n\ -* What is the average number of wins? Answer: 16 (rounded)\n\n\ -You can also upload your own CSV file. Please note that maximum sequence length for both models is 1024 tokens, \ -so you may need to limit the number of rows in your CSV file. Chunking is not implemented yet." - -iface = gr.Interface( - theme="huggingface", - description=description, - layout="vertical", - fn=process, - inputs=[query_text, input_file, rows_slider], - outputs=[answer_text_tapex, answer_text_tapas], - examples=[ - ["Which team has the most wins?", "default_file.csv", 20], - [ - "Which team has the most wins: Chelsea, Liverpool or Everton?", - "default_file.csv", - 20, - ], - ["Which teams have scored less than 40 goals?", "default_file.csv", 20], - ["What is the average number of wins?", "default_file.csv", 20], - ], - allow_flagging="never", -) - -iface.launch() diff --git a/spaces/kaicheng/ChatGPT_ad/modules/models/models.py b/spaces/kaicheng/ChatGPT_ad/modules/models/models.py deleted file mode 100644 index be730033c42c1085a8c25bbd30cc4c84933f3770..0000000000000000000000000000000000000000 --- a/spaces/kaicheng/ChatGPT_ad/modules/models/models.py +++ /dev/null @@ -1,658 +0,0 @@ -from __future__ import annotations -from typing import TYPE_CHECKING, List - -import logging -import json -import commentjson as cjson -import os -import sys -import requests -import urllib3 -import platform -import base64 -from io import BytesIO -from PIL import Image - -from tqdm import tqdm -import colorama -import asyncio -import aiohttp -from enum import Enum -import uuid - -from ..presets import * -from ..index_func import * -from ..utils import * -from .. import shared -from ..config import retrieve_proxy, usage_limit -from modules import config -from .base_model import BaseLLMModel, ModelType - - -class OpenAIClient(BaseLLMModel): - def __init__( - self, - model_name, - api_key, - system_prompt=INITIAL_SYSTEM_PROMPT, - temperature=1.0, - top_p=1.0, - user_name="" - ) -> None: - super().__init__( - model_name=model_name, - temperature=temperature, - top_p=top_p, - system_prompt=system_prompt, - user=user_name - ) - self.api_key = api_key - self.need_api_key = True - self._refresh_header() - - def get_answer_stream_iter(self): - response = self._get_response(stream=True) - if response is not None: - iter = self._decode_chat_response(response) - partial_text = "" - for i in iter: - partial_text += i - yield partial_text - else: - yield STANDARD_ERROR_MSG + GENERAL_ERROR_MSG - - def get_answer_at_once(self): - response = self._get_response() - response = json.loads(response.text) - content = response["choices"][0]["message"]["content"] - total_token_count = response["usage"]["total_tokens"] - return content, total_token_count - - def count_token(self, user_input): - input_token_count = count_token(construct_user(user_input)) - if self.system_prompt is not None and len(self.all_token_counts) == 0: - system_prompt_token_count = count_token( - construct_system(self.system_prompt) - ) - return input_token_count + system_prompt_token_count - return input_token_count - - def billing_info(self): - try: - curr_time = datetime.datetime.now() - last_day_of_month = get_last_day_of_month( - curr_time).strftime("%Y-%m-%d") - first_day_of_month = curr_time.replace(day=1).strftime("%Y-%m-%d") - usage_url = f"{shared.state.usage_api_url}?start_date={first_day_of_month}&end_date={last_day_of_month}" - try: - usage_data = self._get_billing_data(usage_url) - except Exception as e: - logging.error(f"获取API使用情况失败:" + str(e)) - return i18n("**获取API使用情况失败**") - # rounded_usage = "{:.5f}".format(usage_data["total_usage"] / 100) - rounded_usage = round(usage_data["total_usage"] / 100, 5) - usage_percent = round(usage_data["total_usage"] / usage_limit, 2) - # return i18n("**本月使用金额** ") + f"\u3000 ${rounded_usage}" - return """\ - """ + i18n("本月使用金额") + f""" -
          -
          - {usage_percent}% -
          -
          -
          ${rounded_usage}${usage_limit}
          - """ - except requests.exceptions.ConnectTimeout: - status_text = ( - STANDARD_ERROR_MSG + CONNECTION_TIMEOUT_MSG + ERROR_RETRIEVE_MSG - ) - return status_text - except requests.exceptions.ReadTimeout: - status_text = STANDARD_ERROR_MSG + READ_TIMEOUT_MSG + ERROR_RETRIEVE_MSG - return status_text - except Exception as e: - import traceback - traceback.print_exc() - logging.error(i18n("获取API使用情况失败:") + str(e)) - return STANDARD_ERROR_MSG + ERROR_RETRIEVE_MSG - - def set_token_upper_limit(self, new_upper_limit): - pass - - @shared.state.switching_api_key # 在不开启多账号模式的时候,这个装饰器不会起作用 - def _get_response(self, stream=False): - openai_api_key = self.api_key - system_prompt = self.system_prompt - history = self.history - logging.debug(colorama.Fore.YELLOW + - f"{history}" + colorama.Fore.RESET) - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {openai_api_key}", - } - - if system_prompt is not None: - history = [construct_system(system_prompt), *history] - - payload = { - "model": self.model_name, - "messages": history, - "temperature": self.temperature, - "top_p": self.top_p, - "n": self.n_choices, - "stream": stream, - "presence_penalty": self.presence_penalty, - "frequency_penalty": self.frequency_penalty, - } - - if self.max_generation_token is not None: - payload["max_tokens"] = self.max_generation_token - if self.stop_sequence is not None: - payload["stop"] = self.stop_sequence - if self.logit_bias is not None: - payload["logit_bias"] = self.logit_bias - if self.user_identifier: - payload["user"] = self.user_identifier - - if stream: - timeout = TIMEOUT_STREAMING - else: - timeout = TIMEOUT_ALL - - # 如果有自定义的api-host,使用自定义host发送请求,否则使用默认设置发送请求 - if shared.state.completion_url != COMPLETION_URL: - logging.info(f"使用自定义API URL: {shared.state.completion_url}") - - with retrieve_proxy(): - try: - response = requests.post( - shared.state.completion_url, - headers=headers, - json=payload, - stream=stream, - timeout=timeout, - ) - except: - return None - return response - - def _refresh_header(self): - self.headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {self.api_key}", - } - - def _get_billing_data(self, billing_url): - with retrieve_proxy(): - response = requests.get( - billing_url, - headers=self.headers, - timeout=TIMEOUT_ALL, - ) - - if response.status_code == 200: - data = response.json() - return data - else: - raise Exception( - f"API request failed with status code {response.status_code}: {response.text}" - ) - - def _decode_chat_response(self, response): - error_msg = "" - for chunk in response.iter_lines(): - if chunk: - chunk = chunk.decode() - chunk_length = len(chunk) - try: - chunk = json.loads(chunk[6:]) - except json.JSONDecodeError: - print(i18n("JSON解析错误,收到的内容: ") + f"{chunk}") - error_msg += chunk - continue - if chunk_length > 6 and "delta" in chunk["choices"][0]: - if chunk["choices"][0]["finish_reason"] == "stop": - break - try: - yield chunk["choices"][0]["delta"]["content"] - except Exception as e: - # logging.error(f"Error: {e}") - continue - if error_msg: - raise Exception(error_msg) - - def set_key(self, new_access_key): - ret = super().set_key(new_access_key) - self._refresh_header() - return ret - - -class ChatGLM_Client(BaseLLMModel): - def __init__(self, model_name, user_name="") -> None: - super().__init__(model_name=model_name, user=user_name) - from transformers import AutoTokenizer, AutoModel - import torch - global CHATGLM_TOKENIZER, CHATGLM_MODEL - if CHATGLM_TOKENIZER is None or CHATGLM_MODEL is None: - system_name = platform.system() - model_path = None - if os.path.exists("models"): - model_dirs = os.listdir("models") - if model_name in model_dirs: - model_path = f"models/{model_name}" - if model_path is not None: - model_source = model_path - else: - model_source = f"THUDM/{model_name}" - CHATGLM_TOKENIZER = AutoTokenizer.from_pretrained( - model_source, trust_remote_code=True - ) - quantified = False - if "int4" in model_name: - quantified = True - model = AutoModel.from_pretrained( - model_source, trust_remote_code=True - ) - if torch.cuda.is_available(): - # run on CUDA - logging.info("CUDA is available, using CUDA") - model = model.half().cuda() - # mps加速还存在一些问题,暂时不使用 - elif system_name == "Darwin" and model_path is not None and not quantified: - logging.info("Running on macOS, using MPS") - # running on macOS and model already downloaded - model = model.half().to("mps") - else: - logging.info("GPU is not available, using CPU") - model = model.float() - model = model.eval() - CHATGLM_MODEL = model - - def _get_glm_style_input(self): - history = [x["content"] for x in self.history] - query = history.pop() - logging.debug(colorama.Fore.YELLOW + - f"{history}" + colorama.Fore.RESET) - assert ( - len(history) % 2 == 0 - ), f"History should be even length. current history is: {history}" - history = [[history[i], history[i + 1]] - for i in range(0, len(history), 2)] - return history, query - - def get_answer_at_once(self): - history, query = self._get_glm_style_input() - response, _ = CHATGLM_MODEL.chat( - CHATGLM_TOKENIZER, query, history=history) - return response, len(response) - - def get_answer_stream_iter(self): - history, query = self._get_glm_style_input() - for response, history in CHATGLM_MODEL.stream_chat( - CHATGLM_TOKENIZER, - query, - history, - max_length=self.token_upper_limit, - top_p=self.top_p, - temperature=self.temperature, - ): - yield response - - -class LLaMA_Client(BaseLLMModel): - def __init__( - self, - model_name, - lora_path=None, - user_name="" - ) -> None: - super().__init__(model_name=model_name, user=user_name) - from lmflow.datasets.dataset import Dataset - from lmflow.pipeline.auto_pipeline import AutoPipeline - from lmflow.models.auto_model import AutoModel - from lmflow.args import ModelArguments, DatasetArguments, InferencerArguments - - self.max_generation_token = 1000 - self.end_string = "\n\n" - # We don't need input data - data_args = DatasetArguments(dataset_path=None) - self.dataset = Dataset(data_args) - self.system_prompt = "" - - global LLAMA_MODEL, LLAMA_INFERENCER - if LLAMA_MODEL is None or LLAMA_INFERENCER is None: - model_path = None - if os.path.exists("models"): - model_dirs = os.listdir("models") - if model_name in model_dirs: - model_path = f"models/{model_name}" - if model_path is not None: - model_source = model_path - else: - model_source = f"decapoda-research/{model_name}" - # raise Exception(f"models目录下没有这个模型: {model_name}") - if lora_path is not None: - lora_path = f"lora/{lora_path}" - model_args = ModelArguments(model_name_or_path=model_source, lora_model_path=lora_path, model_type=None, config_overrides=None, config_name=None, tokenizer_name=None, cache_dir=None, - use_fast_tokenizer=True, model_revision='main', use_auth_token=False, torch_dtype=None, use_lora=False, lora_r=8, lora_alpha=32, lora_dropout=0.1, use_ram_optimized_load=True) - pipeline_args = InferencerArguments( - local_rank=0, random_seed=1, deepspeed='configs/ds_config_chatbot.json', mixed_precision='bf16') - - with open(pipeline_args.deepspeed, "r", encoding="utf-8") as f: - ds_config = json.load(f) - LLAMA_MODEL = AutoModel.get_model( - model_args, - tune_strategy="none", - ds_config=ds_config, - ) - LLAMA_INFERENCER = AutoPipeline.get_pipeline( - pipeline_name="inferencer", - model_args=model_args, - data_args=data_args, - pipeline_args=pipeline_args, - ) - - def _get_llama_style_input(self): - history = [] - instruction = "" - if self.system_prompt: - instruction = (f"Instruction: {self.system_prompt}\n") - for x in self.history: - if x["role"] == "user": - history.append(f"{instruction}Input: {x['content']}") - else: - history.append(f"Output: {x['content']}") - context = "\n\n".join(history) - context += "\n\nOutput: " - return context - - def get_answer_at_once(self): - context = self._get_llama_style_input() - - input_dataset = self.dataset.from_dict( - {"type": "text_only", "instances": [{"text": context}]} - ) - - output_dataset = LLAMA_INFERENCER.inference( - model=LLAMA_MODEL, - dataset=input_dataset, - max_new_tokens=self.max_generation_token, - temperature=self.temperature, - ) - - response = output_dataset.to_dict()["instances"][0]["text"] - return response, len(response) - - def get_answer_stream_iter(self): - context = self._get_llama_style_input() - partial_text = "" - step = 1 - for _ in range(0, self.max_generation_token, step): - input_dataset = self.dataset.from_dict( - {"type": "text_only", "instances": [ - {"text": context + partial_text}]} - ) - output_dataset = LLAMA_INFERENCER.inference( - model=LLAMA_MODEL, - dataset=input_dataset, - max_new_tokens=step, - temperature=self.temperature, - ) - response = output_dataset.to_dict()["instances"][0]["text"] - if response == "" or response == self.end_string: - break - partial_text += response - yield partial_text - - -class XMChat(BaseLLMModel): - def __init__(self, api_key, user_name=""): - super().__init__(model_name="xmchat", user=user_name) - self.api_key = api_key - self.session_id = None - self.reset() - self.image_bytes = None - self.image_path = None - self.xm_history = [] - self.url = "https://xmbot.net/web" - self.last_conv_id = None - - def reset(self): - self.session_id = str(uuid.uuid4()) - self.last_conv_id = None - return [], "已重置" - - def image_to_base64(self, image_path): - # 打开并加载图片 - img = Image.open(image_path) - - # 获取图片的宽度和高度 - width, height = img.size - - # 计算压缩比例,以确保最长边小于4096像素 - max_dimension = 2048 - scale_ratio = min(max_dimension / width, max_dimension / height) - - if scale_ratio < 1: - # 按压缩比例调整图片大小 - new_width = int(width * scale_ratio) - new_height = int(height * scale_ratio) - img = img.resize((new_width, new_height), Image.ANTIALIAS) - - # 将图片转换为jpg格式的二进制数据 - buffer = BytesIO() - if img.mode == "RGBA": - img = img.convert("RGB") - img.save(buffer, format='JPEG') - binary_image = buffer.getvalue() - - # 对二进制数据进行Base64编码 - base64_image = base64.b64encode(binary_image).decode('utf-8') - - return base64_image - - def try_read_image(self, filepath): - def is_image_file(filepath): - # 判断文件是否为图片 - valid_image_extensions = [ - ".jpg", ".jpeg", ".png", ".bmp", ".gif", ".tiff"] - file_extension = os.path.splitext(filepath)[1].lower() - return file_extension in valid_image_extensions - - if is_image_file(filepath): - logging.info(f"读取图片文件: {filepath}") - self.image_bytes = self.image_to_base64(filepath) - self.image_path = filepath - else: - self.image_bytes = None - self.image_path = None - - def like(self): - if self.last_conv_id is None: - return "点赞失败,你还没发送过消息" - data = { - "uuid": self.last_conv_id, - "appraise": "good" - } - requests.post(self.url, json=data) - return "👍点赞成功,感谢反馈~" - - def dislike(self): - if self.last_conv_id is None: - return "点踩失败,你还没发送过消息" - data = { - "uuid": self.last_conv_id, - "appraise": "bad" - } - requests.post(self.url, json=data) - return "👎点踩成功,感谢反馈~" - - def prepare_inputs(self, real_inputs, use_websearch, files, reply_language, chatbot): - fake_inputs = real_inputs - display_append = "" - limited_context = False - return limited_context, fake_inputs, display_append, real_inputs, chatbot - - def handle_file_upload(self, files, chatbot, language): - """if the model accepts multi modal input, implement this function""" - if files: - for file in files: - if file.name: - logging.info(f"尝试读取图像: {file.name}") - self.try_read_image(file.name) - if self.image_path is not None: - chatbot = chatbot + [((self.image_path,), None)] - if self.image_bytes is not None: - logging.info("使用图片作为输入") - # XMChat的一轮对话中实际上只能处理一张图片 - self.reset() - conv_id = str(uuid.uuid4()) - data = { - "user_id": self.api_key, - "session_id": self.session_id, - "uuid": conv_id, - "data_type": "imgbase64", - "data": self.image_bytes - } - response = requests.post(self.url, json=data) - response = json.loads(response.text) - logging.info(f"图片回复: {response['data']}") - return None, chatbot, None - - def get_answer_at_once(self): - question = self.history[-1]["content"] - conv_id = str(uuid.uuid4()) - self.last_conv_id = conv_id - data = { - "user_id": self.api_key, - "session_id": self.session_id, - "uuid": conv_id, - "data_type": "text", - "data": question - } - response = requests.post(self.url, json=data) - try: - response = json.loads(response.text) - return response["data"], len(response["data"]) - except Exception as e: - return response.text, len(response.text) - - -def get_model( - model_name, - lora_model_path=None, - access_key=None, - temperature=None, - top_p=None, - system_prompt=None, - user_name="" -) -> BaseLLMModel: - msg = i18n("模型设置为了:") + f" {model_name}" - model_type = ModelType.get_type(model_name) - lora_selector_visibility = False - lora_choices = [] - dont_change_lora_selector = False - if model_type != ModelType.OpenAI: - config.local_embedding = True - # del current_model.model - model = None - chatbot = gr.Chatbot.update(label=model_name) - try: - if model_type == ModelType.OpenAI: - logging.info(f"正在加载OpenAI模型: {model_name}") - model = OpenAIClient( - model_name=model_name, - api_key=access_key, - system_prompt=system_prompt, - temperature=temperature, - top_p=top_p, - user_name=user_name, - ) - elif model_type == ModelType.ChatGLM: - logging.info(f"正在加载ChatGLM模型: {model_name}") - model = ChatGLM_Client(model_name, user_name=user_name) - elif model_type == ModelType.LLaMA and lora_model_path == "": - msg = f"现在请为 {model_name} 选择LoRA模型" - logging.info(msg) - lora_selector_visibility = True - if os.path.isdir("lora"): - lora_choices = get_file_names( - "lora", plain=True, filetypes=[""]) - lora_choices = ["No LoRA"] + lora_choices - elif model_type == ModelType.LLaMA and lora_model_path != "": - logging.info(f"正在加载LLaMA模型: {model_name} + {lora_model_path}") - dont_change_lora_selector = True - if lora_model_path == "No LoRA": - lora_model_path = None - msg += " + No LoRA" - else: - msg += f" + {lora_model_path}" - model = LLaMA_Client( - model_name, lora_model_path, user_name=user_name) - elif model_type == ModelType.XMChat: - if os.environ.get("XMCHAT_API_KEY") != "": - access_key = os.environ.get("XMCHAT_API_KEY") - model = XMChat(api_key=access_key, user_name=user_name) - elif model_type == ModelType.StableLM: - from .StableLM import StableLM_Client - model = StableLM_Client(model_name, user_name=user_name) - elif model_type == ModelType.MOSS: - from .MOSS import MOSS_Client - model = MOSS_Client(model_name, user_name=user_name) - elif model_type == ModelType.YuanAI: - from .inspurai import Yuan_Client - model = Yuan_Client(model_name, api_key=access_key, user_name=user_name, system_prompt=system_prompt) - elif model_type == ModelType.Minimax: - from .minimax import MiniMax_Client - if os.environ.get("MINIMAX_API_KEY") != "": - access_key = os.environ.get("MINIMAX_API_KEY") - model = MiniMax_Client(model_name, api_key=access_key, user_name=user_name, system_prompt=system_prompt) - elif model_type == ModelType.ChuanhuAgent: - from .ChuanhuAgent import ChuanhuAgent_Client - model = ChuanhuAgent_Client(model_name, access_key, user_name=user_name) - elif model_type == ModelType.Unknown: - raise ValueError(f"未知模型: {model_name}") - logging.info(msg) - except Exception as e: - logging.error(e) - msg = f"{STANDARD_ERROR_MSG}: {e}" - if dont_change_lora_selector: - return model, msg, chatbot - else: - return model, msg, chatbot, gr.Dropdown.update(choices=lora_choices, visible=lora_selector_visibility) - - -if __name__ == "__main__": - with open("config.json", "r", encoding="utf-8") as f: - openai_api_key = cjson.load(f)["openai_api_key"] - # set logging level to debug - logging.basicConfig(level=logging.DEBUG) - # client = ModelManager(model_name="gpt-3.5-turbo", access_key=openai_api_key) - client = get_model(model_name="chatglm-6b-int4") - chatbot = [] - stream = False - # 测试账单功能 - logging.info(colorama.Back.GREEN + "测试账单功能" + colorama.Back.RESET) - logging.info(client.billing_info()) - # 测试问答 - logging.info(colorama.Back.GREEN + "测试问答" + colorama.Back.RESET) - question = "巴黎是中国的首都吗?" - for i in client.predict(inputs=question, chatbot=chatbot, stream=stream): - logging.info(i) - logging.info(f"测试问答后history : {client.history}") - # 测试记忆力 - logging.info(colorama.Back.GREEN + "测试记忆力" + colorama.Back.RESET) - question = "我刚刚问了你什么问题?" - for i in client.predict(inputs=question, chatbot=chatbot, stream=stream): - logging.info(i) - logging.info(f"测试记忆力后history : {client.history}") - # 测试重试功能 - logging.info(colorama.Back.GREEN + "测试重试功能" + colorama.Back.RESET) - for i in client.retry(chatbot=chatbot, stream=stream): - logging.info(i) - logging.info(f"重试后history : {client.history}") - # # 测试总结功能 - # print(colorama.Back.GREEN + "测试总结功能" + colorama.Back.RESET) - # chatbot, msg = client.reduce_token_size(chatbot=chatbot) - # print(chatbot, msg) - # print(f"总结后history: {client.history}") diff --git a/spaces/kainy/rvc_okiba_TTS/README.md b/spaces/kainy/rvc_okiba_TTS/README.md deleted file mode 100644 index d11687397a667744c76a2b3280a153b98213253a..0000000000000000000000000000000000000000 --- a/spaces/kainy/rvc_okiba_TTS/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: RVC okiba TTS -emoji: 😊🎙️ -colorFrom: red -colorTo: indigo -sdk: gradio -sdk_version: 3.38.0 -app_file: app.py -pinned: false -duplicated_from: litagin/rvc_okiba_TTS ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/kaleidophon/almost_stochastic_order/almost_stochastic_order.py b/spaces/kaleidophon/almost_stochastic_order/almost_stochastic_order.py deleted file mode 100644 index c57b02ecfc5eca026d716f7645c2540d9585feca..0000000000000000000000000000000000000000 --- a/spaces/kaleidophon/almost_stochastic_order/almost_stochastic_order.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright 2022 The HuggingFace Evaluate Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Almost Stochastic Order test for model comparison.""" - -from typing import Optional - -import datasets -from deepsig import aso - -import evaluate - - -_DESCRIPTION = """ -The Almost Stochastic Order test is a non-parametric test that tests to what extent the distributions of predictions differ measuring the Wasserstein distance from each other through. It can be used to compare the predictions of two models. -""" - - -_KWARGS_DESCRIPTION = """ -Args: - predictions1 (`list` of `float`): Predictions for model 1. - predictions2 (`list` of `float`): Predictions for model 2. -Kwargs: - confidence_level (`float`): Confidence level under which the result is obtained. Default is 0.95. - num_bootstrap_iterations: (`int`): Number of bootstrap iterations to compute upper bound to test statistics. Default is 1000. - dt (`float`): Differential for t during numerical integral calculation. Default is 0.005. - num_jobs (`int` or None): Number of jobs to use for test. If None, this defaults to value specified in the num_process attribute. - show_progress (`bool`): If True, a progress bar is shown when computing the test statistic. Default is False. - seed (`int` or None): Set seed for reproducibility purposes. If None, this defaults to the value specified in the seed attribute. -Returns: - violation_ratio (`float`): (Frequentist upper bound to) Degree of violation of the stochastic order. When it is smaller than 0.5, the model producing predictions1 performs better than the other model at a confidence level specified by confidence_level argument (default is 0.95). Ulmer et al. (2022) recommend to reject the null hypothesis when violation_ratio is under 0.2. -Examples: - >>> aso = evaluate.load("kaleidophon/almost_stochastic_order") - >>> results = aso.compute(predictions1=[-7, 123.45, 43, 4.91, 5], predictions2=[1337.12, -9.74, 1, 2, 3.21]) - >>> print(results) - {'violation_ratio': 1.0} -""" - - -_CITATION = """ -@article{ulmer2022deep, - title={deep-significance-Easy and Meaningful Statistical Significance Testing in the Age of Neural Networks}, - author={Ulmer, Dennis and Hardmeier, Christian and Frellsen, Jes}, - journal={arXiv preprint arXiv:2204.06815}, - year={2022} -} - -@inproceedings{dror2019deep, - author = {Rotem Dror and - Segev Shlomov and - Roi Reichart}, - editor = {Anna Korhonen and - David R. Traum and - Llu{\'{\i}}s M{\`{a}}rquez}, - title = {Deep Dominance - How to Properly Compare Deep Neural Models}, - booktitle = {Proceedings of the 57th Conference of the Association for Computational - Linguistics, {ACL} 2019, Florence, Italy, July 28-August 2, 2019, - Volume 1: Long Papers}, - pages = {2773--2785}, - publisher = {Association for Computational Linguistics}, - year = {2019} -} -""" - - -@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) -class AlmostStochasticOrder(evaluate.Comparison): - def _info(self): - return evaluate.ComparisonInfo( - module_type="comparison", - description=_DESCRIPTION, - citation=_CITATION, - inputs_description=_KWARGS_DESCRIPTION, - features=datasets.Features( - { - "predictions1": datasets.Value("float"), - "predictions2": datasets.Value("float"), - } - ), - ) - - def _compute( - self, predictions1, predictions2, - confidence_level: float = 0.95, - num_bootstrap_iterations: int = 1000, - dt: float = 0.005, - num_jobs: Optional[int] = None, - show_progress: bool = False, - seed: Optional[int] = None, - **kwargs - ): - # Set seed - if seed is None: - seed = self.seed - - # Set number of jobs - if num_jobs is None: - num_jobs = self.num_process - - else: - num_jobs = num_jobs - - # Compute statistic - violation_ratio = aso( - scores_a=predictions1, scores_b=predictions2, - num_bootstrap_iterations=num_bootstrap_iterations, - dt=dt, - num_jobs=num_jobs, - seed=seed, - show_progress=show_progress - ) - return {"violation_ratio": violation_ratio} diff --git a/spaces/karolmajek/YOLOR/test.py b/spaces/karolmajek/YOLOR/test.py deleted file mode 100644 index c4c7a27f1291d480c9da4e109b0ac9c16c634ca4..0000000000000000000000000000000000000000 --- a/spaces/karolmajek/YOLOR/test.py +++ /dev/null @@ -1,344 +0,0 @@ -import argparse -import glob -import json -import os -from pathlib import Path - -import numpy as np -import torch -import yaml -from tqdm import tqdm - -from utils.google_utils import attempt_load -from utils.datasets import create_dataloader -from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, box_iou, \ - non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, clip_coords, set_logging, increment_path -from utils.loss import compute_loss -from utils.metrics import ap_per_class -from utils.plots import plot_images, output_to_target -from utils.torch_utils import select_device, time_synchronized - -from models.models import * - -def load_classes(path): - # Loads *.names file at 'path' - with open(path, 'r') as f: - names = f.read().split('\n') - return list(filter(None, names)) # filter removes empty strings (such as last line) - - -def test(data, - weights=None, - batch_size=16, - imgsz=640, - conf_thres=0.001, - iou_thres=0.6, # for NMS - save_json=False, - single_cls=False, - augment=False, - verbose=False, - model=None, - dataloader=None, - save_dir=Path(''), # for saving images - save_txt=False, # for auto-labelling - save_conf=False, - plots=True, - log_imgs=0): # number of logged images - - # Initialize/load model and set device - training = model is not None - if training: # called by train.py - device = next(model.parameters()).device # get model device - - else: # called directly - set_logging() - device = select_device(opt.device, batch_size=batch_size) - save_txt = opt.save_txt # save *.txt labels - - # Directories - save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run - (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir - - # Load model - model = Darknet(opt.cfg).to(device) - - # load model - try: - ckpt = torch.load(weights[0], map_location=device) # load checkpoint - ckpt['model'] = {k: v for k, v in ckpt['model'].items() if model.state_dict()[k].numel() == v.numel()} - model.load_state_dict(ckpt['model'], strict=False) - except: - load_darknet_weights(model, weights[0]) - imgsz = check_img_size(imgsz, s=64) # check img_size - - # Half - half = device.type != 'cpu' # half precision only supported on CUDA - if half: - model.half() - - # Configure - model.eval() - is_coco = data.endswith('coco.yaml') # is COCO dataset - with open(data) as f: - data = yaml.load(f, Loader=yaml.FullLoader) # model dict - check_dataset(data) # check - nc = 1 if single_cls else int(data['nc']) # number of classes - iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 - niou = iouv.numel() - - # Logging - log_imgs, wandb = min(log_imgs, 100), None # ceil - try: - import wandb # Weights & Biases - except ImportError: - log_imgs = 0 - - # Dataloader - if not training: - img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img - _ = model(img.half() if half else img) if device.type != 'cpu' else None # run once - path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images - dataloader = create_dataloader(path, imgsz, batch_size, 64, opt, pad=0.5, rect=True)[0] - - seen = 0 - try: - names = model.names if hasattr(model, 'names') else model.module.names - except: - names = load_classes(opt.names) - coco91class = coco80_to_coco91_class() - s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') - p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0. - loss = torch.zeros(3, device=device) - jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] - for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): - img = img.to(device, non_blocking=True) - img = img.half() if half else img.float() # uint8 to fp16/32 - img /= 255.0 # 0 - 255 to 0.0 - 1.0 - targets = targets.to(device) - nb, _, height, width = img.shape # batch size, channels, height, width - whwh = torch.Tensor([width, height, width, height]).to(device) - - # Disable gradients - with torch.no_grad(): - # Run model - t = time_synchronized() - inf_out, train_out = model(img, augment=augment) # inference and training outputs - t0 += time_synchronized() - t - - # Compute loss - if training: # if model has loss hyperparameters - loss += compute_loss([x.float() for x in train_out], targets, model)[1][:3] # box, obj, cls - - # Run NMS - t = time_synchronized() - output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres) - t1 += time_synchronized() - t - - # Statistics per image - for si, pred in enumerate(output): - labels = targets[targets[:, 0] == si, 1:] - nl = len(labels) - tcls = labels[:, 0].tolist() if nl else [] # target class - seen += 1 - - if len(pred) == 0: - if nl: - stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls)) - continue - - # Append to text file - path = Path(paths[si]) - if save_txt: - gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh - x = pred.clone() - x[:, :4] = scale_coords(img[si].shape[1:], x[:, :4], shapes[si][0], shapes[si][1]) # to original - for *xyxy, conf, cls in x: - xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh - line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format - with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f: - f.write(('%g ' * len(line)).rstrip() % line + '\n') - - # W&B logging - if plots and len(wandb_images) < log_imgs: - box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, - "class_id": int(cls), - "box_caption": "%s %.3f" % (names[cls], conf), - "scores": {"class_score": conf}, - "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] - boxes = {"predictions": {"box_data": box_data, "class_labels": names}} - wandb_images.append(wandb.Image(img[si], boxes=boxes, caption=path.name)) - - # Clip boxes to image bounds - clip_coords(pred, (height, width)) - - # Append to pycocotools JSON dictionary - if save_json: - # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ... - image_id = int(path.stem) if path.stem.isnumeric() else path.stem - box = pred[:, :4].clone() # xyxy - scale_coords(img[si].shape[1:], box, shapes[si][0], shapes[si][1]) # to original shape - box = xyxy2xywh(box) # xywh - box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner - for p, b in zip(pred.tolist(), box.tolist()): - jdict.append({'image_id': image_id, - 'category_id': coco91class[int(p[5])] if is_coco else int(p[5]), - 'bbox': [round(x, 3) for x in b], - 'score': round(p[4], 5)}) - - # Assign all predictions as incorrect - correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device) - if nl: - detected = [] # target indices - tcls_tensor = labels[:, 0] - - # target boxes - tbox = xywh2xyxy(labels[:, 1:5]) * whwh - - # Per target class - for cls in torch.unique(tcls_tensor): - ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices - pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices - - # Search for detections - if pi.shape[0]: - # Prediction to target ious - ious, i = box_iou(pred[pi, :4], tbox[ti]).max(1) # best ious, indices - - # Append detections - detected_set = set() - for j in (ious > iouv[0]).nonzero(as_tuple=False): - d = ti[i[j]] # detected target - if d.item() not in detected_set: - detected_set.add(d.item()) - detected.append(d) - correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn - if len(detected) == nl: # all targets already located in image - break - - # Append statistics (correct, conf, pcls, tcls) - stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) - - # Plot images - if plots and batch_i < 3: - f = save_dir / f'test_batch{batch_i}_labels.jpg' # filename - plot_images(img, targets, paths, f, names) # labels - f = save_dir / f'test_batch{batch_i}_pred.jpg' - plot_images(img, output_to_target(output, width, height), paths, f, names) # predictions - - # Compute statistics - stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy - if len(stats) and stats[0].any(): - p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, fname=save_dir / 'precision-recall_curve.png') - p, r, ap50, ap = p[:, 0], r[:, 0], ap[:, 0], ap.mean(1) # [P, R, AP@0.5, AP@0.5:0.95] - mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean() - nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class - else: - nt = torch.zeros(1) - - # W&B logging - if plots and wandb: - wandb.log({"Images": wandb_images}) - wandb.log({"Validation": [wandb.Image(str(x), caption=x.name) for x in sorted(save_dir.glob('test*.jpg'))]}) - - # Print results - pf = '%20s' + '%12.3g' * 6 # print format - print(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) - - # Print results per class - if verbose and nc > 1 and len(stats): - for i, c in enumerate(ap_class): - print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) - - # Print speeds - t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple - if not training: - print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t) - - # Save JSON - if save_json and len(jdict): - w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights - anno_json = glob.glob('../coco/annotations/instances_val*.json')[0] # annotations json - pred_json = str(save_dir / f"{w}_predictions.json") # predictions json - print('\nEvaluating pycocotools mAP... saving %s...' % pred_json) - with open(pred_json, 'w') as f: - json.dump(jdict, f) - - try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb - from pycocotools.coco import COCO - from pycocotools.cocoeval import COCOeval - - anno = COCO(anno_json) # init annotations api - pred = anno.loadRes(pred_json) # init predictions api - eval = COCOeval(anno, pred, 'bbox') - if is_coco: - eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate - eval.evaluate() - eval.accumulate() - eval.summarize() - map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5) - except Exception as e: - print('ERROR: pycocotools unable to run: %s' % e) - - # Return results - if not training: - print('Results saved to %s' % save_dir) - model.float() # for training - maps = np.zeros(nc) + map - for i, c in enumerate(ap_class): - maps[c] = ap[i] - return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(prog='test.py') - parser.add_argument('--weights', nargs='+', type=str, default='yolor_p6.pt', help='model.pt path(s)') - parser.add_argument('--data', type=str, default='data/coco.yaml', help='*.data path') - parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch') - parser.add_argument('--img-size', type=int, default=1280, help='inference size (pixels)') - parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold') - parser.add_argument('--iou-thres', type=float, default=0.65, help='IOU threshold for NMS') - parser.add_argument('--task', default='val', help="'val', 'test', 'study'") - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') - parser.add_argument('--augment', action='store_true', help='augmented inference') - parser.add_argument('--verbose', action='store_true', help='report mAP by class') - parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') - parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') - parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file') - parser.add_argument('--project', default='runs/test', help='save to project/name') - parser.add_argument('--name', default='exp', help='save to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--cfg', type=str, default='cfg/yolor_p6.cfg', help='*.cfg path') - parser.add_argument('--names', type=str, default='data/coco.names', help='*.cfg path') - opt = parser.parse_args() - opt.save_json |= opt.data.endswith('coco.yaml') - opt.data = check_file(opt.data) # check file - print(opt) - - if opt.task in ['val', 'test']: # run normally - test(opt.data, - opt.weights, - opt.batch_size, - opt.img_size, - opt.conf_thres, - opt.iou_thres, - opt.save_json, - opt.single_cls, - opt.augment, - opt.verbose, - save_txt=opt.save_txt, - save_conf=opt.save_conf, - ) - - elif opt.task == 'study': # run over a range of settings and save/plot - for weights in ['yolor_p6.pt', 'yolor_w6.pt']: - f = 'study_%s_%s.txt' % (Path(opt.data).stem, Path(weights).stem) # filename to save to - x = list(range(320, 800, 64)) # x axis - y = [] # y axis - for i in x: # img-size - print('\nRunning %s point %s...' % (f, i)) - r, _, t = test(opt.data, weights, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json) - y.append(r + t) # results and times - np.savetxt(f, y, fmt='%10.4g') # save - os.system('zip -r study.zip study_*.txt') - # utils.general.plot_study_txt(f, x) # plot diff --git a/spaces/kboaten/MIDI-Audio-Extension/MIDI-song-extender/transformer_train.py b/spaces/kboaten/MIDI-Audio-Extension/MIDI-song-extender/transformer_train.py deleted file mode 100644 index 9ef3610b3f29937fc7c4ad120b1ff48d3e57a7e3..0000000000000000000000000000000000000000 --- a/spaces/kboaten/MIDI-Audio-Extension/MIDI-song-extender/transformer_train.py +++ /dev/null @@ -1,105 +0,0 @@ -import numpy as np -import keras -import tensorflow as tf -from tensorflow import keras -from tensorflow.keras import layers -import datetime -from packaging import version - - -from enum import Enum - -import os -import gc - -n_classes = 2 -def transformer_encoder(inputs, head_size, num_heads, ff_dim, dropout=0): - # Normalization and Attention - x = layers.LayerNormalization(epsilon=1e-6)(inputs) - x = layers.MultiHeadAttention( - key_dim=head_size, num_heads=num_heads, dropout=dropout - )(x, x) - x = layers.Dropout(dropout)(x) - res = x + inputs - - # Feed Forward Part - x = layers.LayerNormalization(epsilon=1e-6)(res) - x = layers.Conv1D(filters=ff_dim, kernel_size=1, activation="relu")(x) - x = layers.Dropout(dropout)(x) - x = layers.Conv1D(filters=inputs.shape[-1], kernel_size=1)(x) - return x + res - -def build_model( - input_shape, - head_size, - num_heads, - ff_dim, - num_transformer_blocks, - mlp_units, - dropout=0, - mlp_dropout=0, -): - inputs = keras.Input(shape=input_shape) - x = inputs - for _ in range(num_transformer_blocks): - x = transformer_encoder(x, head_size, num_heads, ff_dim, dropout) - - x = layers.GlobalAveragePooling1D(data_format="channels_first")(x) - for dim in mlp_units: - x = layers.Dense(dim, activation="relu")(x) - x = layers.Dropout(mlp_dropout)(x) - outputs = layers.Dense(n_classes, activation="softmax")(x) - return keras.Model(inputs, outputs) - -model = build_model((100,1), - head_size=25, - num_heads=4, - ff_dim=4, - num_transformer_blocks=4, - mlp_units=[50], - mlp_dropout=0.4, - dropout=0.25) -model.compile( - loss="mse", - optimizer=keras.optimizers.Adam(learning_rate=1e-4), - metrics=["mse"], -) -try: - a = keras.models.load_model("/home/ubuntu/projectpathing/transformer1") - model = a -except: - print("except") - -model.summary() - -# this trains 1 epoch - -x_val = np.load("/home/ubuntu/data 3/x_200.npy") -y_val = np.load("/home/ubuntu/data 3/y_200.npy") -for g in range(211,221): - a = np.load("/home/ubuntu/data 3/x_" + str(g) + ".npy") - b = np.load("/home/ubuntu/data 3/y_" + str(g) + ".npy") - x_val = np.concatenate((x_val, a)) - y_val = np.concatenate((y_val, b)) -for i in range(20): # let's make the last batch the validation (#200-220 inclusive) - x = None - y = None - gc.collect() - for j in range(10): - print(10 * i + j) - new_x = np.load("/home/ubuntu/data 3/x_" + str(10 * i + j) + ".npy") - new_y = np.load("/home/ubuntu/data 3/y_" + str(10 * i + j) + ".npy") - if x is None and y is None: - x = new_x - y = new_y - else: - x = np.concatenate((x, new_x)) - y = np.concatenate((y, new_y)) - # this will need to be optimized - x = x.reshape((x.shape[0], x.shape[1], 1)) - y = y.reshape((y.shape[0], y.shape[1], 1)) - x_val = x_val.reshape((x_val.shape[0], x_val.shape[1], 1)) - y_val = y_val.reshape((y_val.shape[0], y_val.shape[1], 1)) - print(x.shape) - model.fit(x, y, validation_data=(x_val, y_val), epochs=1) - model.save("/home/ubuntu/projectpathing/transformer1") \ No newline at end of file diff --git a/spaces/kenton-li/ChatArxiv/src/paper.py b/spaces/kenton-li/ChatArxiv/src/paper.py deleted file mode 100644 index cfb6d98e12cf4e6f2021229581fa0358b9322ba4..0000000000000000000000000000000000000000 --- a/spaces/kenton-li/ChatArxiv/src/paper.py +++ /dev/null @@ -1,121 +0,0 @@ -import fitz -import os -import io -import arxiv -import tempfile - -from PIL import Image -from urllib.parse import urlparse - -class Paper: - def __init__(self, url=''): - self.url = url - self.parse_url() - self.get_pdf() - self.paper_instance = { - 'title': self.paper_arxiv.title, - 'authors': self.paper_arxiv.authors, - 'arxiv_id': self.paper_id, - 'abstract': self.paper_arxiv.summary, - 'pdf_url': self.paper_arxiv.pdf_url, - 'categories': self.paper_arxiv.categories, - 'published': self.paper_arxiv.published, - 'updated': self.paper_arxiv.updated, - 'content': {} - } - self.parse_pdf() - - def get_paper(self): - return self.paper_instance - - def parse_url(self): - self.url = self.url.replace('.pdf', '') - parsed_url = urlparse(self.url) - paper_id = os.path.basename(parsed_url.path) - self.paper_id = paper_id - - def get_pdf(self): - search = arxiv.Search(id_list=[self.paper_id], max_results=1) - results = search.results() - paper_arxiv = next(results) - if paper_arxiv: - # with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as temp_pdf: - paper_path = f'{self.paper_id}.pdf' - dir_path = "./pdf" - os.makedirs(dir_path, exist_ok=True) - save_dir = os.path.join(dir_path, paper_path) - if not os.path.exists(save_dir): - paper_arxiv.download_pdf(dirpath=dir_path, filename=paper_path) - self.paper_arxiv = paper_arxiv - self.path = save_dir - else: - raise Exception("无法找到论文,请检查 URL 是否正确。") - - def parse_pdf(self): - self.pdf = fitz.open(self.path) - self.text_list = [page.get_text() for page in self.pdf] - self.all_text = ' '.join(self.text_list) - - self._parse_paper() - self.pdf.close() - - def _get_sections(self): - sections = 'Abstract,Introduction,Related Work,Background,Preliminary,Problem Formulation,Methods,Methodology,Method,Approach,Approaches,Materials and Methods,Experiment Settings,Experiment,Experimental Results,Evaluation,Experiments,Results,Findings,Data Analysis,Discussion,Results and Discussion,Conclusion,References' - self.sections = sections.split(',') - - def _get_all_page_index(self): - section_list = self.sections - section_page_dict = {} - - for page_index, page in enumerate(self.pdf): - cur_text = page.get_text() - for section_name in section_list: - section_name_upper = section_name.upper() - if "Abstract" == section_name and section_name in cur_text: - section_page_dict[section_name] = page_index - continue - - if section_name + '\n' in cur_text: - section_page_dict[section_name] = page_index - elif section_name_upper + '\n' in cur_text: - section_page_dict[section_name] = page_index - - self.section_page_dict = section_page_dict - - def _parse_paper(self): - """ - Return: dict {
          : } - """ - self._get_sections() - self._get_all_page_index() - - text_list = [page.get_text() for page in self.pdf] - section_keys = list(self.section_page_dict.keys()) - section_count = len(section_keys) - - section_dict = {} - for sec_index, sec_name in enumerate(section_keys): - if sec_index == 0: - continue - - start_page = self.section_page_dict[sec_name] - end_page = self.section_page_dict[section_keys[sec_index + 1]] if sec_index < section_count - 1 else len(text_list) - - cur_sec_text = [] - for page_i in range(start_page, end_page): - page_text = text_list[page_i] - - if page_i == start_page: - start_i = page_text.find(sec_name) if sec_name in page_text else page_text.find(sec_name.upper()) - page_text = page_text[start_i:] - - if page_i == end_page - 1 and sec_index < section_count - 1: - next_sec = section_keys[sec_index + 1] - end_i = page_text.find(next_sec) if next_sec in page_text else page_text.find(next_sec.upper()) - page_text = page_text[:end_i] - - cur_sec_text.append(page_text) - - section_dict[sec_name] = ''.join(cur_sec_text).replace('-\n', '').replace('\n', ' ') - - self.paper_instance['content'] = section_dict \ No newline at end of file diff --git a/spaces/keremberke/nfl-object-detection/README.md b/spaces/keremberke/nfl-object-detection/README.md deleted file mode 100644 index 0fde34facbdfdacbd4c561b3b7cb510c9171af0e..0000000000000000000000000000000000000000 --- a/spaces/keremberke/nfl-object-detection/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: NFL Object Detection -emoji: 🎮 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.15.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/kevinwang676/SadTalker/src/face3d/models/arcface_torch/eval/__init__.py b/spaces/kevinwang676/SadTalker/src/face3d/models/arcface_torch/eval/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/ops/knn.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/ops/knn.py deleted file mode 100644 index f335785036669fc19239825b0aae6dde3f73bf92..0000000000000000000000000000000000000000 --- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/ops/knn.py +++ /dev/null @@ -1,77 +0,0 @@ -import torch -from torch.autograd import Function - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext('_ext', ['knn_forward']) - - -class KNN(Function): - r"""KNN (CUDA) based on heap data structure. - Modified from `PAConv `_. - - Find k-nearest points. - """ - - @staticmethod - def forward(ctx, - k: int, - xyz: torch.Tensor, - center_xyz: torch.Tensor = None, - transposed: bool = False) -> torch.Tensor: - """ - Args: - k (int): number of nearest neighbors. - xyz (Tensor): (B, N, 3) if transposed == False, else (B, 3, N). - xyz coordinates of the features. - center_xyz (Tensor, optional): (B, npoint, 3) if transposed == - False, else (B, 3, npoint). centers of the knn query. - Default: None. - transposed (bool, optional): whether the input tensors are - transposed. Should not explicitly use this keyword when - calling knn (=KNN.apply), just add the fourth param. - Default: False. - - Returns: - Tensor: (B, k, npoint) tensor with the indices of - the features that form k-nearest neighbours. - """ - assert (k > 0) & (k < 100), 'k should be in range(0, 100)' - - if center_xyz is None: - center_xyz = xyz - - if transposed: - xyz = xyz.transpose(2, 1).contiguous() - center_xyz = center_xyz.transpose(2, 1).contiguous() - - assert xyz.is_contiguous() # [B, N, 3] - assert center_xyz.is_contiguous() # [B, npoint, 3] - - center_xyz_device = center_xyz.get_device() - assert center_xyz_device == xyz.get_device(), \ - 'center_xyz and xyz should be put on the same device' - if torch.cuda.current_device() != center_xyz_device: - torch.cuda.set_device(center_xyz_device) - - B, npoint, _ = center_xyz.shape - N = xyz.shape[1] - - idx = center_xyz.new_zeros((B, npoint, k)).int() - dist2 = center_xyz.new_zeros((B, npoint, k)).float() - - ext_module.knn_forward( - xyz, center_xyz, idx, dist2, b=B, n=N, m=npoint, nsample=k) - # idx shape to [B, k, npoint] - idx = idx.transpose(2, 1).contiguous() - if torch.__version__ != 'parrots': - ctx.mark_non_differentiable(idx) - return idx - - @staticmethod - def backward(ctx, a=None): - return None, None, None - - -knn = KNN.apply diff --git a/spaces/koajoel/PolyFormer/fairseq/examples/multilingual/data_scripts/download_flores_data.sh b/spaces/koajoel/PolyFormer/fairseq/examples/multilingual/data_scripts/download_flores_data.sh deleted file mode 100644 index e6175ce0c38b06a1ebddaeca808f71b47f77f500..0000000000000000000000000000000000000000 --- a/spaces/koajoel/PolyFormer/fairseq/examples/multilingual/data_scripts/download_flores_data.sh +++ /dev/null @@ -1,246 +0,0 @@ -#!/bin/bash - -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -# - -if [ -z $WORKDIR_ROOT ] ; -then - echo "please specify your working directory root in environment variable WORKDIR_ROOT. Exitting..." - exit -fi - - -set -e -set -o pipefail - -SRC=en -SI_TGT=si -NE_TGT=ne - -DESTDIR=${WORKDIR_ROOT}/ML50/raw/ - -ROOT=${WORKDIR_ROOT}/tmp -mkdir -p $ROOT -DATA=$ROOT/data -NE_ROOT=$DATA/all-clean-ne -SI_ROOT=$DATA/all-clean-si - -mkdir -p $DATA $NE_ROOT $SI_ROOT - -SI_OPUS_DATASETS=( - "$SI_ROOT/GNOME.en-si" - "$SI_ROOT/Ubuntu.en-si" - "$SI_ROOT/KDE4.en-si" - "$SI_ROOT/OpenSubtitles.en-si" -) - -SI_OPUS_URLS=( - "https://object.pouta.csc.fi/OPUS-GNOME/v1/moses/en-si.txt.zip" - "https://object.pouta.csc.fi/OPUS-Ubuntu/v14.10/moses/en-si.txt.zip" - "https://object.pouta.csc.fi/OPUS-KDE4/v2/moses/en-si.txt.zip" - "https://object.pouta.csc.fi/OPUS-OpenSubtitles/v2018/moses/en-si.txt.zip" -) - -NE_OPUS_DATASETS=( - "$NE_ROOT/GNOME.en-ne" - "$NE_ROOT/Ubuntu.en-ne" - "$NE_ROOT/KDE4.en-ne" -) - -NE_OPUS_URLS=( - "https://object.pouta.csc.fi/OPUS-GNOME/v1/moses/en-ne.txt.zip" - "https://object.pouta.csc.fi/OPUS-Ubuntu/v14.10/moses/en-ne.txt.zip" - "https://object.pouta.csc.fi/OPUS-KDE4/v2/moses/en-ne.txt.zip" -) - -REMOVE_FILE_PATHS=() - -# Download data -download_data() { - CORPORA=$1 - URL=$2 - - if [ -f $CORPORA ]; then - echo "$CORPORA already exists, skipping download" - else - echo "Downloading $URL" - wget $URL -O $CORPORA --no-check-certificate || rm -f $CORPORA - if [ -f $CORPORA ]; then - echo "$URL successfully downloaded." - else - echo "$URL not successfully downloaded." - rm -f $CORPORA - exit -1 - fi - fi -} - -# Example: download_opus_data $LANG_ROOT $TGT -download_opus_data() { - LANG_ROOT=$1 - TGT=$2 - - if [ "$TGT" = "si" ]; then - URLS=("${SI_OPUS_URLS[@]}") - DATASETS=("${SI_OPUS_DATASETS[@]}") - else - URLS=("${NE_OPUS_URLS[@]}") - DATASETS=("${NE_OPUS_DATASETS[@]}") - fi - - # Download and extract data - for ((i=0;i<${#URLS[@]};++i)); do - URL=${URLS[i]} - CORPORA=${DATASETS[i]} - - download_data $CORPORA $URL - unzip -o $CORPORA -d $LANG_ROOT - REMOVE_FILE_PATHS+=( $CORPORA $CORPORA.xml $CORPORA.ids $LANG_ROOT/README $LANG_ROOT/LICENSE ) - done - - cat ${DATASETS[0]}.$SRC ${DATASETS[1]}.$SRC ${DATASETS[2]}.$SRC > $LANG_ROOT/GNOMEKDEUbuntu.$SRC-$TGT.$SRC - cat ${DATASETS[0]}.$TGT ${DATASETS[1]}.$TGT ${DATASETS[2]}.$TGT > $LANG_ROOT/GNOMEKDEUbuntu.$SRC-$TGT.$TGT - - REMOVE_FILE_PATHS+=( ${DATASETS[0]}.$SRC ${DATASETS[1]}.$SRC ${DATASETS[2]}.$SRC ) - REMOVE_FILE_PATHS+=( ${DATASETS[0]}.$TGT ${DATASETS[1]}.$TGT ${DATASETS[2]}.$TGT ) -} - -download_opus_data $SI_ROOT $SI_TGT -cp ${SI_OPUS_DATASETS[3]}.$SRC $SI_ROOT/OpenSubtitles2018.$SRC-$SI_TGT.$SRC -cp ${SI_OPUS_DATASETS[3]}.$SI_TGT $SI_ROOT/OpenSubtitles2018.$SRC-$SI_TGT.$SI_TGT -REMOVE_FILE_PATHS+=( ${SI_OPUS_DATASETS[3]}.$SRC ${SI_OPUS_DATASETS[3]}.$SI_TGT ) - -download_opus_data $NE_ROOT $NE_TGT - - -# Download and extract Global Voices data -GLOBAL_VOICES="$NE_ROOT/globalvoices.2018q4.ne-en" -GLOBAL_VOICES_URL="http://www.casmacat.eu/corpus/global-voices/globalvoices.ne-en.xliff.gz" - -download_data $GLOBAL_VOICES.gz $GLOBAL_VOICES_URL -gunzip -Nf $GLOBAL_VOICES.gz - -sed -ne 's?.*\(.*\).*?\1?p' $GLOBAL_VOICES > $GLOBAL_VOICES.$NE_TGT -sed -ne 's?.*]*>\(.*\).*?\1?p' $GLOBAL_VOICES > $GLOBAL_VOICES.$SRC - -REMOVE_FILE_PATHS+=( $GLOBAL_VOICES ) - -# Download and extract the bible dataset -BIBLE_TOOLS=bible-corpus-tools -XML_BIBLES=XML_Bibles -XML_BIBLES_DUP=XML_Bibles_dup - -if [ ! -e $BIBLE_TOOLS ]; then - echo "Cloning bible-corpus-tools repository..." - git clone https://github.com/christos-c/bible-corpus-tools.git -fi - -mkdir -p $BIBLE_TOOLS/bin $XML_BIBLES $XML_BIBLES_DUP -javac -cp "$BIBLE_TOOLS/lib/*" -d $BIBLE_TOOLS/bin $BIBLE_TOOLS/src/bible/readers/*.java $BIBLE_TOOLS/src/bible/*.java - -download_data bible.tar.gz "https://github.com/christos-c/bible-corpus/archive/v1.2.1.tar.gz" -tar xvzf bible.tar.gz - -cp bible-corpus-1.2.1/bibles/{Greek.xml,English.xml,Nepali.xml} $XML_BIBLES/ -cp bible-corpus-1.2.1/bibles/{Greek.xml,English-WEB.xml,Nepali.xml} $XML_BIBLES_DUP/ - -java -cp $BIBLE_TOOLS/lib/*:$BIBLE_TOOLS/bin bible.CreateMLBooks $XML_BIBLES -java -cp $BIBLE_TOOLS/lib/*:$BIBLE_TOOLS/bin bible.CreateMLBooks $XML_BIBLES_DUP -java -cp $BIBLE_TOOLS/lib/*:$BIBLE_TOOLS/bin bible.CreateVerseAlignedBooks $XML_BIBLES -java -cp $BIBLE_TOOLS/lib/*:$BIBLE_TOOLS/bin bible.CreateVerseAlignedBooks $XML_BIBLES_DUP - -cat $XML_BIBLES/aligned/*/English.txt > $NE_ROOT/bible.$SRC-$NE_TGT.$SRC -cat $XML_BIBLES/aligned/*/Nepali.txt > $NE_ROOT/bible.$SRC-$NE_TGT.$NE_TGT -cat $XML_BIBLES_DUP/aligned/*/English-WEB.txt > $NE_ROOT/bible_dup.$SRC-$NE_TGT.$SRC -cat $XML_BIBLES_DUP/aligned/*/Nepali.txt > $NE_ROOT/bible_dup.$SRC-$NE_TGT.$NE_TGT -REMOVE_FILE_PATHS+=( bible-corpus-1.2.1 bible.tar.gz $BIBLE_TOOLS $XML_BIBLES $XML_BIBLES_DUP ) - -# Download and extract the Penn Treebank dataset -NE_TAGGED=$ROOT/new_submissions_parallel_corpus_project_Nepal -NE_TAGGED_URL="http://www.cle.org.pk/Downloads/ling_resources/parallelcorpus/NepaliTaggedCorpus.zip" -EN_TAGGED_PATCH_URL="https://dl.fbaipublicfiles.com/fairseq/data/nepali-penn-treebank.en.patch" -NE_TAGGED_PATCH_URL="https://dl.fbaipublicfiles.com/fairseq/data/nepali-penn-treebank.ne.patch" -MOSES=mosesdecoder -MOSES_TOK=$MOSES/scripts/tokenizer -EN_PATCH_REGEX="{s:\\\/:\/:g;s/\*\T\*\-\n+//g;s/\-LCB\-/\{/g;s/\-RCB\-/\}/g; s/\-LSB\-/\[/g; s/\-RSB\-/\]/g;s/\-LRB\-/\(/g; s/\-RRB\-/\)/g; s/\'\'/\"/g; s/\`\`/\"/g; s/\ +\'s\ +/\'s /g; s/\ +\'re\ +/\'re /g; s/\"\ +/\"/g; s/\ +\"/\"/g; s/\ n't([\ \.\"])/n't\1/g; s/\r+(.)/\1/g;}" -NE_PATCH_REGEX="{s:\p{Cf}::g;s:\\\/:\/:g;s/\*\T\*\-\n+//g;s/\-LCB\-/\{/g;s/\-RCB\-/\}/g; s/\-LSB\-/\[/g; s/\-RSB\-/\]/g;s/\-LRB\-/\(/g; s/\-RRB\-/\)/g; s/\'\'/\"/g; s/\`\`/\"/g; s/\ +\'s\ +/\'s /g; s/\ +\'re\ +/\'re /g; s/\"\ +/\"/g; s/\ +\"/\"/g; s/\ n't([\ \.\"])/n't\1/g; s/\r+(.)/\1/g;}" - -download_data $DATA/nepali-penn-treebank.$SRC.patch $EN_TAGGED_PATCH_URL -download_data $DATA/nepali-penn-treebank.$NE_TGT.patch $NE_TAGGED_PATCH_URL -download_data original.zip $NE_TAGGED_URL -unzip -o original.zip -d $ROOT - -cat $NE_TAGGED/00.txt $NE_TAGGED/01.txt $NE_TAGGED/02.txt > $NE_TAGGED/nepali-penn-treebank.$SRC -cat $NE_TAGGED/00ne_revised.txt $NE_TAGGED/01ne_revised.txt $NE_TAGGED/02ne_revised.txt > $NE_TAGGED/nepali-penn-treebank.$NE_TGT - -patch $NE_TAGGED/nepali-penn-treebank.$SRC -i $DATA/nepali-penn-treebank.$SRC.patch -o $NE_TAGGED/nepali-penn-treebank-patched.$SRC -patch $NE_TAGGED/nepali-penn-treebank.$NE_TGT -i $DATA/nepali-penn-treebank.$NE_TGT.patch -o $NE_TAGGED/nepali-penn-treebank-patched.$NE_TGT - -if [ ! -e $MOSES ]; then - echo "Cloning moses repository..." - git clone https://github.com/moses-smt/mosesdecoder.git -fi - -cat $NE_TAGGED/nepali-penn-treebank-patched.$SRC | \ - perl -anpe "$EN_PATCH_REGEX" | \ - $MOSES_TOK/tokenizer.perl -l $SRC | \ - $MOSES_TOK/detokenizer.perl -l $SRC > $NE_ROOT/nepali-penn-treebank.$SRC - -cat $NE_TAGGED/nepali-penn-treebank-patched.$NE_TGT | \ - perl -CIO -anpe "$NE_PATCH_REGEX" | \ - $MOSES_TOK/detokenizer.perl -l $SRC > $NE_ROOT/nepali-penn-treebank.$NE_TGT - - -# Download nepali dictionary data -NE_DICT=$NE_ROOT/dictionaries -download_data $NE_DICT "http://www.seas.upenn.edu/~nlp/resources/TACL-data-release/dictionaries.tar.gz" -tar xvzf $NE_DICT -cp dictionaries/dict.ne $NE_ROOT/dictionary.$NE_TGT-$SRC -REMOVE_FILE_PATHS+=( $NE_DICT dictionaries ) - -REMOVE_FILE_PATHS+=( $MOSES $NE_TAGGED original.zip $DATA/nepali-penn-treebank.$SRC.patch $DATA/nepali-penn-treebank.$NE_TGT.patch ) - - -# Remove the temporary files -for ((i=0;i<${#REMOVE_FILE_PATHS[@]};++i)); do - rm -rf ${REMOVE_FILE_PATHS[i]} -done - -# Copy the training data -si=si_LK -ne=ne_NP -en=en_XX -cat $SI_ROOT/GNOMEKDEUbuntu.en-si.si $SI_ROOT/OpenSubtitles2018.en-si.si > $DESTDIR/train.$si-$en.$si -cat $SI_ROOT/GNOMEKDEUbuntu.en-si.en $SI_ROOT/OpenSubtitles2018.en-si.en > $DESTDIR/train.$si-$en.$en - -cat $NE_ROOT/bible_dup.en-ne.ne $NE_ROOT/bible.en-ne.ne $NE_ROOT/globalvoices.2018q4.ne-en.ne $NE_ROOT/GNOMEKDEUbuntu.en-ne.ne $NE_ROOT/nepali-penn-treebank.ne > $DESTDIR/train.$ne-$en.$ne -cat $NE_ROOT/bible_dup.en-ne.en $NE_ROOT/bible.en-ne.en $NE_ROOT/globalvoices.2018q4.ne-en.en $NE_ROOT/GNOMEKDEUbuntu.en-ne.en $NE_ROOT/nepali-penn-treebank.en > $DESTDIR/train.$ne-$en.$en - - -#Download the test sets -wget https://github.com/facebookresearch/flores/raw/master/data/wikipedia_en_ne_si_test_sets.tgz -tar -xvzf wikipedia_en_ne_si_test_sets.tgz - -cp wikipedia_en_ne_si_test_sets/wikipedia.dev.ne-en.ne $DESTDIR/valid.$ne-$en.$ne -cp wikipedia_en_ne_si_test_sets/wikipedia.dev.ne-en.en $DESTDIR/valid.$ne-$en.$en - -cp wikipedia_en_ne_si_test_sets/wikipedia.dev.si-en.si $DESTDIR/valid.$si-$en.$si -cp wikipedia_en_ne_si_test_sets/wikipedia.dev.si-en.en $DESTDIR/valid.$si-$en.$en - -cp wikipedia_en_ne_si_test_sets/wikipedia.devtest.ne-en.ne $DESTDIR/devtest.$ne-$en.$ne -cp wikipedia_en_ne_si_test_sets/wikipedia.devtest.ne-en.en $DESTDIR/devtest.$ne-$en.$en - -cp wikipedia_en_ne_si_test_sets/wikipedia.devtest.si-en.si $DESTDIR/devtest.$si-$en.$si -cp wikipedia_en_ne_si_test_sets/wikipedia.devtest.si-en.en $DESTDIR/devtest.$si-$en.$en - -cp wikipedia_en_ne_si_test_sets/wikipedia.test.ne-en.ne $DESTDIR/test.$ne-$en.$ne -cp wikipedia_en_ne_si_test_sets/wikipedia.test.ne-en.en $DESTDIR/test.$ne-$en.$en - -cp wikipedia_en_ne_si_test_sets/wikipedia.test.si-en.si $DESTDIR/test.$si-$en.$si -cp wikipedia_en_ne_si_test_sets/wikipedia.test.si-en.en $DESTDIR/test.$si-$en.$en - -rm -rf wikipedia_en_ne_si_test_sets.tgz wikipedia_en_ne_si_test_sets diff --git a/spaces/koajoel/PolyFormer/fairseq/examples/simultaneous_translation/README.md b/spaces/koajoel/PolyFormer/fairseq/examples/simultaneous_translation/README.md deleted file mode 100644 index 62a005e0ec6f15af9015d335e34b45df6ed89b6c..0000000000000000000000000000000000000000 --- a/spaces/koajoel/PolyFormer/fairseq/examples/simultaneous_translation/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Simultaneous Translation -Examples of simultaneous translation in fairseq -- [English-to-Japanese text-to-text wait-k model](docs/enja-waitk.md) -- [English-to-Germen text-to-text monotonic multihead attention model](docs/ende-mma.md) -- [English-to-Germen speech-to-text simultaneous translation model](../speech_to_text/docs/simulst_mustc_example.md) diff --git a/spaces/koajoel/PolyFormer/fairseq/examples/simultaneous_translation/models/transformer_monotonic_attention.py b/spaces/koajoel/PolyFormer/fairseq/examples/simultaneous_translation/models/transformer_monotonic_attention.py deleted file mode 100644 index 7b9414b0eb3b30c935478cd5b8a894168bd8cc98..0000000000000000000000000000000000000000 --- a/spaces/koajoel/PolyFormer/fairseq/examples/simultaneous_translation/models/transformer_monotonic_attention.py +++ /dev/null @@ -1,302 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from typing import Dict, List, NamedTuple, Optional - -import torch -import torch.nn as nn -from examples.simultaneous_translation.modules.monotonic_transformer_layer import ( - TransformerMonotonicDecoderLayer, - TransformerMonotonicEncoderLayer, -) -from fairseq.models import ( - register_model, - register_model_architecture, -) -from fairseq.models.transformer import ( - TransformerModel, - TransformerEncoder, - TransformerDecoder, - base_architecture, - transformer_iwslt_de_en, - transformer_vaswani_wmt_en_de_big, - tiny_architecture -) -from torch import Tensor - -DEFAULT_MAX_SOURCE_POSITIONS = 1024 -DEFAULT_MAX_TARGET_POSITIONS = 1024 -READ_ACTION = 0 -WRITE_ACTION = 1 - -TransformerMonotonicDecoderOut = NamedTuple( - "TransformerMonotonicDecoderOut", - [ - ("action", int), - ("p_choose", Optional[Tensor]), - ("attn_list", Optional[List[Optional[Dict[str, Tensor]]]]), - ("encoder_out", Optional[Dict[str, List[Tensor]]]), - ("encoder_padding_mask", Optional[Tensor]), - ], -) - - -@register_model("transformer_unidirectional") -class TransformerUnidirectionalModel(TransformerModel): - @classmethod - def build_encoder(cls, args, src_dict, embed_tokens): - return TransformerMonotonicEncoder(args, src_dict, embed_tokens) - - -@register_model("transformer_monotonic") -class TransformerModelSimulTrans(TransformerModel): - @classmethod - def build_encoder(cls, args, src_dict, embed_tokens): - return TransformerMonotonicEncoder(args, src_dict, embed_tokens) - - @classmethod - def build_decoder(cls, args, tgt_dict, embed_tokens): - return TransformerMonotonicDecoder(args, tgt_dict, embed_tokens) - - -class TransformerMonotonicEncoder(TransformerEncoder): - def __init__(self, args, dictionary, embed_tokens): - super().__init__(args, dictionary, embed_tokens) - - self.dictionary = dictionary - self.layers = nn.ModuleList([]) - self.layers.extend( - [ - TransformerMonotonicEncoderLayer(args) - for i in range(args.encoder_layers) - ] - ) - - -class TransformerMonotonicDecoder(TransformerDecoder): - """ - Transformer decoder consisting of *args.decoder_layers* layers. Each layer - is a :class:`TransformerDecoderLayer`. - - Args: - args (argparse.Namespace): parsed command-line arguments - dictionary (~fairseq.data.Dictionary): decoding dictionary - embed_tokens (torch.nn.Embedding): output embedding - no_encoder_attn (bool, optional): whether to attend to encoder outputs - (default: False). - """ - - def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False): - super().__init__(args, dictionary, embed_tokens, no_encoder_attn=False) - - self.dictionary = dictionary - self.layers = nn.ModuleList([]) - self.layers.extend( - [ - TransformerMonotonicDecoderLayer(args) - for _ in range(args.decoder_layers) - ] - ) - self.policy_criterion = getattr(args, "policy_criterion", "any") - self.num_updates = None - - def set_num_updates(self, num_updates): - self.num_updates = num_updates - - def pre_attention( - self, - prev_output_tokens, - encoder_out_dict: Dict[str, List[Tensor]], - incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, - ): - positions = ( - self.embed_positions( - prev_output_tokens, - incremental_state=incremental_state, - ) - if self.embed_positions is not None - else None - ) - - if incremental_state is not None: - prev_output_tokens = prev_output_tokens[:, -1:] - if positions is not None: - positions = positions[:, -1:] - # embed tokens and positions - x = self.embed_scale * self.embed_tokens(prev_output_tokens) - - if self.project_in_dim is not None: - x = self.project_in_dim(x) - - if positions is not None: - x += positions - - x = self.dropout_module(x) - - # B x T x C -> T x B x C - x = x.transpose(0, 1) - - encoder_out = encoder_out_dict["encoder_out"][0] - - if "encoder_padding_mask" in encoder_out_dict: - encoder_padding_mask = ( - encoder_out_dict["encoder_padding_mask"][0] - if encoder_out_dict["encoder_padding_mask"] - and len(encoder_out_dict["encoder_padding_mask"]) > 0 - else None - ) - else: - encoder_padding_mask = None - - return x, encoder_out, encoder_padding_mask - - def post_attention(self, x): - if self.layer_norm is not None: - x = self.layer_norm(x) - - # T x B x C -> B x T x C - x = x.transpose(0, 1) - - if self.project_out_dim is not None: - x = self.project_out_dim(x) - - return x - - def clean_cache( - self, - incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], - end_id: Optional[int] = None, - ): - """ - Clean cache in the monotonic layers. - The cache is generated because of a forward pass of decoder has run but no prediction, - so that the self attention key value in decoder is written in the incremental state. - end_id is the last idx of the layers - """ - if end_id is None: - end_id = len(self.layers) - - for index, layer in enumerate(self.layers): - if index < end_id: - layer.prune_incremental_state(incremental_state) - - def extract_features( - self, - prev_output_tokens, - encoder_out: Optional[Dict[str, List[Tensor]]], - incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, - full_context_alignment: bool = False, # unused - alignment_layer: Optional[int] = None, # unused - alignment_heads: Optional[int] = None, # unsed - ): - """ - Similar to *forward* but only return features. - - Returns: - tuple: - - the decoder's features of shape `(batch, tgt_len, embed_dim)` - - a dictionary with any model-specific outputs - """ - # incremental_state = None - assert encoder_out is not None - (x, encoder_outs, encoder_padding_mask) = self.pre_attention( - prev_output_tokens, encoder_out, incremental_state - ) - attn = None - inner_states = [x] - attn_list: List[Optional[Dict[str, Tensor]]] = [] - - p_choose = torch.tensor([1.0]) - - for i, layer in enumerate(self.layers): - - x, attn, _ = layer( - x=x, - encoder_out=encoder_outs, - encoder_padding_mask=encoder_padding_mask, - incremental_state=incremental_state, - self_attn_mask=self.buffered_future_mask(x) - if incremental_state is None - else None, - ) - - inner_states.append(x) - attn_list.append(attn) - - if incremental_state is not None: - if_online = incremental_state["online"]["only"] - assert if_online is not None - if if_online.to(torch.bool): - # Online indicates that the encoder states are still changing - assert attn is not None - if self.policy_criterion == "any": - # Any head decide to read than read - head_read = layer.encoder_attn._get_monotonic_buffer(incremental_state)["head_read"] - assert head_read is not None - if head_read.any(): - # We need to prune the last self_attn saved_state - # if model decide not to read - # otherwise there will be duplicated saved_state - self.clean_cache(incremental_state, i + 1) - - return x, TransformerMonotonicDecoderOut( - action=0, - p_choose=p_choose, - attn_list=None, - encoder_out=None, - encoder_padding_mask=None, - ) - - x = self.post_attention(x) - - return x, TransformerMonotonicDecoderOut( - action=1, - p_choose=p_choose, - attn_list=attn_list, - encoder_out=encoder_out, - encoder_padding_mask=encoder_padding_mask, - ) - - -@register_model_architecture("transformer_monotonic", "transformer_monotonic") -def base_monotonic_architecture(args): - base_architecture(args) - args.encoder_unidirectional = getattr(args, "encoder_unidirectional", False) - - -@register_model_architecture( - "transformer_monotonic", "transformer_monotonic_iwslt_de_en" -) -def transformer_monotonic_iwslt_de_en(args): - transformer_iwslt_de_en(args) - base_monotonic_architecture(args) - - -# parameters used in the "Attention Is All You Need" paper (Vaswani et al., 2017) -@register_model_architecture( - "transformer_monotonic", "transformer_monotonic_vaswani_wmt_en_de_big" -) -def transformer_monotonic_vaswani_wmt_en_de_big(args): - transformer_vaswani_wmt_en_de_big(args) - - -@register_model_architecture( - "transformer_monotonic", "transformer_monotonic_vaswani_wmt_en_fr_big" -) -def transformer_monotonic_vaswani_wmt_en_fr_big(args): - transformer_monotonic_vaswani_wmt_en_fr_big(args) - - -@register_model_architecture( - "transformer_unidirectional", "transformer_unidirectional_iwslt_de_en" -) -def transformer_unidirectional_iwslt_de_en(args): - transformer_iwslt_de_en(args) - - -@register_model_architecture("transformer_monotonic", "transformer_monotonic_tiny") -def monotonic_tiny_architecture(args): - tiny_architecture(args) - base_monotonic_architecture(args) diff --git a/spaces/kohrisatou-infinity/KIP_01_beta/vdecoder/hifigan/models.py b/spaces/kohrisatou-infinity/KIP_01_beta/vdecoder/hifigan/models.py deleted file mode 100644 index bdc3fa2c3447f360472d94c2fad9bd74993f6410..0000000000000000000000000000000000000000 --- a/spaces/kohrisatou-infinity/KIP_01_beta/vdecoder/hifigan/models.py +++ /dev/null @@ -1,500 +0,0 @@ -import os -import json -from .env import AttrDict -import numpy as np -import torch -import torch.nn.functional as F -import torch.nn as nn -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from .utils import init_weights, get_padding - -LRELU_SLOPE = 0.1 - - -def load_model(model_path, device='cuda'): - config_file = os.path.join(os.path.split(model_path)[0], 'config.json') - with open(config_file) as f: - data = f.read() - - global h - json_config = json.loads(data) - h = AttrDict(json_config) - - generator = Generator(h).to(device) - - cp_dict = torch.load(model_path) - generator.load_state_dict(cp_dict['generator']) - generator.eval() - generator.remove_weight_norm() - del cp_dict - return generator, h - - -class ResBlock1(torch.nn.Module): - def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.h = h - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - xt = c2(xt) - x = xt + x - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.h = h - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - xt = c(xt) - x = xt + x - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class SineGen(torch.nn.Module): - """ Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__(self, samp_rate, harmonic_num=0, - sine_amp=0.1, noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - self.flag_for_pulse = flag_for_pulse - - def _f02uv(self, f0): - # generate uv signal - uv = (f0 > self.voiced_threshold).type(torch.float32) - return uv - - def _f02sine(self, f0_values): - """ f0_values: (batchsize, length, dim) - where dim indicates fundamental tone and overtones - """ - # convert to F0 in rad. The interger part n can be ignored - # because 2 * np.pi * n doesn't affect phase - rad_values = (f0_values / self.sampling_rate) % 1 - - # initial phase noise (no noise for fundamental component) - rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \ - device=f0_values.device) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - - # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad) - if not self.flag_for_pulse: - # for normal case - - # To prevent torch.cumsum numerical overflow, - # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1. - # Buffer tmp_over_one_idx indicates the time step to add -1. - # This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi - tmp_over_one = torch.cumsum(rad_values, 1) % 1 - tmp_over_one_idx = (torch.diff(tmp_over_one, dim=1)) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - - sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) - * 2 * np.pi) - else: - # If necessary, make sure that the first time step of every - # voiced segments is sin(pi) or cos(0) - # This is used for pulse-train generation - - # identify the last time step in unvoiced segments - uv = self._f02uv(f0_values) - uv_1 = torch.roll(uv, shifts=-1, dims=1) - uv_1[:, -1, :] = 1 - u_loc = (uv < 1) * (uv_1 > 0) - - # get the instantanouse phase - tmp_cumsum = torch.cumsum(rad_values, dim=1) - # different batch needs to be processed differently - for idx in range(f0_values.shape[0]): - temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :] - temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :] - # stores the accumulation of i.phase within - # each voiced segments - tmp_cumsum[idx, :, :] = 0 - tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum - - # rad_values - tmp_cumsum: remove the accumulation of i.phase - # within the previous voiced segment. - i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1) - - # get the sines - sines = torch.cos(i_phase * 2 * np.pi) - return sines - - def forward(self, f0): - """ sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, - device=f0.device) - # fundamental component - fn = torch.multiply(f0, torch.FloatTensor([[range(1, self.harmonic_num + 2)]]).to(f0.device)) - - # generate sine waveforms - sine_waves = self._f02sine(fn) * self.sine_amp - - # generate uv signal - # uv = torch.ones(f0.shape) - # uv = uv * (f0 > self.voiced_threshold) - uv = self._f02uv(f0) - - # noise: for unvoiced should be similar to sine_amp - # std = self.sine_amp/3 -> max value ~ self.sine_amp - # . for voiced regions is self.noise_std - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - - # first: set the unvoiced part to 0 by uv - # then: additive noise - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """ SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - - # to produce sine waveforms - self.l_sin_gen = SineGen(sampling_rate, harmonic_num, - sine_amp, add_noise_std, voiced_threshod) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x): - """ - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - """ - # source for harmonic branch - sine_wavs, uv, _ = self.l_sin_gen(x) - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - - # source for noise branch, in the same shape as uv - noise = torch.randn_like(uv) * self.sine_amp / 3 - return sine_merge, noise, uv - - -class Generator(torch.nn.Module): - def __init__(self, h): - super(Generator, self).__init__() - self.h = h - - self.num_kernels = len(h["resblock_kernel_sizes"]) - self.num_upsamples = len(h["upsample_rates"]) - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(h["upsample_rates"])) - self.m_source = SourceModuleHnNSF( - sampling_rate=h["sampling_rate"], - harmonic_num=8) - self.noise_convs = nn.ModuleList() - self.conv_pre = weight_norm(Conv1d(h["inter_channels"], h["upsample_initial_channel"], 7, 1, padding=3)) - resblock = ResBlock1 if h["resblock"] == '1' else ResBlock2 - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(h["upsample_rates"], h["upsample_kernel_sizes"])): - c_cur = h["upsample_initial_channel"] // (2 ** (i + 1)) - self.ups.append(weight_norm( - ConvTranspose1d(h["upsample_initial_channel"] // (2 ** i), h["upsample_initial_channel"] // (2 ** (i + 1)), - k, u, padding=(k - u) // 2))) - if i + 1 < len(h["upsample_rates"]): # - stride_f0 = np.prod(h["upsample_rates"][i + 1:]) - self.noise_convs.append(Conv1d( - 1, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=stride_f0 // 2)) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = h["upsample_initial_channel"] // (2 ** (i + 1)) - for j, (k, d) in enumerate(zip(h["resblock_kernel_sizes"], h["resblock_dilation_sizes"])): - self.resblocks.append(resblock(h, ch, k, d)) - - self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3)) - self.ups.apply(init_weights) - self.conv_post.apply(init_weights) - self.cond = nn.Conv1d(h['gin_channels'], h['upsample_initial_channel'], 1) - - def forward(self, x, f0, g=None): - # print(1,x.shape,f0.shape,f0[:, None].shape) - f0 = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t - # print(2,f0.shape) - har_source, noi_source, uv = self.m_source(f0) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - x = x + self.cond(g) - # print(124,x.shape,har_source.shape) - for i in range(self.num_upsamples): - x = F.leaky_relu(x, LRELU_SLOPE) - # print(3,x.shape) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - # print(4,x_source.shape,har_source.shape,x.shape) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - remove_weight_norm(self.conv_pre) - remove_weight_norm(self.conv_post) - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, periods=None): - super(MultiPeriodDiscriminator, self).__init__() - self.periods = periods if periods is not None else [2, 3, 5, 7, 11] - self.discriminators = nn.ModuleList() - for period in self.periods: - self.discriminators.append(DiscriminatorP(period)) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - fmap_rs.append(fmap_r) - y_d_gs.append(y_d_g) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 128, 15, 1, padding=7)), - norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)), - norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)), - norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiScaleDiscriminator(torch.nn.Module): - def __init__(self): - super(MultiScaleDiscriminator, self).__init__() - self.discriminators = nn.ModuleList([ - DiscriminatorS(use_spectral_norm=True), - DiscriminatorS(), - DiscriminatorS(), - ]) - self.meanpools = nn.ModuleList([ - AvgPool1d(4, 2, padding=2), - AvgPool1d(4, 2, padding=2) - ]) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - if i != 0: - y = self.meanpools[i - 1](y) - y_hat = self.meanpools[i - 1](y_hat) - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - fmap_rs.append(fmap_r) - y_d_gs.append(y_d_g) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - r_loss = torch.mean((1 - dr) ** 2) - g_loss = torch.mean(dg ** 2) - loss += (r_loss + g_loss) - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - l = torch.mean((1 - dg) ** 2) - gen_losses.append(l) - loss += l - - return loss, gen_losses diff --git a/spaces/kquote03/lama-video-watermark-remover/saicinpainting/training/losses/style_loss.py b/spaces/kquote03/lama-video-watermark-remover/saicinpainting/training/losses/style_loss.py deleted file mode 100644 index 0bb42d7fbc5d17a47bec7365889868505f5fdfb5..0000000000000000000000000000000000000000 --- a/spaces/kquote03/lama-video-watermark-remover/saicinpainting/training/losses/style_loss.py +++ /dev/null @@ -1,155 +0,0 @@ -import torch -import torch.nn as nn -import torchvision.models as models - - -class PerceptualLoss(nn.Module): - r""" - Perceptual loss, VGG-based - https://arxiv.org/abs/1603.08155 - https://github.com/dxyang/StyleTransfer/blob/master/utils.py - """ - - def __init__(self, weights=[1.0, 1.0, 1.0, 1.0, 1.0]): - super(PerceptualLoss, self).__init__() - self.add_module('vgg', VGG19()) - self.criterion = torch.nn.L1Loss() - self.weights = weights - - def __call__(self, x, y): - # Compute features - x_vgg, y_vgg = self.vgg(x), self.vgg(y) - - content_loss = 0.0 - content_loss += self.weights[0] * self.criterion(x_vgg['relu1_1'], y_vgg['relu1_1']) - content_loss += self.weights[1] * self.criterion(x_vgg['relu2_1'], y_vgg['relu2_1']) - content_loss += self.weights[2] * self.criterion(x_vgg['relu3_1'], y_vgg['relu3_1']) - content_loss += self.weights[3] * self.criterion(x_vgg['relu4_1'], y_vgg['relu4_1']) - content_loss += self.weights[4] * self.criterion(x_vgg['relu5_1'], y_vgg['relu5_1']) - - - return content_loss - - -class VGG19(torch.nn.Module): - def __init__(self): - super(VGG19, self).__init__() - features = models.vgg19(pretrained=True).features - self.relu1_1 = torch.nn.Sequential() - self.relu1_2 = torch.nn.Sequential() - - self.relu2_1 = torch.nn.Sequential() - self.relu2_2 = torch.nn.Sequential() - - self.relu3_1 = torch.nn.Sequential() - self.relu3_2 = torch.nn.Sequential() - self.relu3_3 = torch.nn.Sequential() - self.relu3_4 = torch.nn.Sequential() - - self.relu4_1 = torch.nn.Sequential() - self.relu4_2 = torch.nn.Sequential() - self.relu4_3 = torch.nn.Sequential() - self.relu4_4 = torch.nn.Sequential() - - self.relu5_1 = torch.nn.Sequential() - self.relu5_2 = torch.nn.Sequential() - self.relu5_3 = torch.nn.Sequential() - self.relu5_4 = torch.nn.Sequential() - - for x in range(2): - self.relu1_1.add_module(str(x), features[x]) - - for x in range(2, 4): - self.relu1_2.add_module(str(x), features[x]) - - for x in range(4, 7): - self.relu2_1.add_module(str(x), features[x]) - - for x in range(7, 9): - self.relu2_2.add_module(str(x), features[x]) - - for x in range(9, 12): - self.relu3_1.add_module(str(x), features[x]) - - for x in range(12, 14): - self.relu3_2.add_module(str(x), features[x]) - - for x in range(14, 16): - self.relu3_2.add_module(str(x), features[x]) - - for x in range(16, 18): - self.relu3_4.add_module(str(x), features[x]) - - for x in range(18, 21): - self.relu4_1.add_module(str(x), features[x]) - - for x in range(21, 23): - self.relu4_2.add_module(str(x), features[x]) - - for x in range(23, 25): - self.relu4_3.add_module(str(x), features[x]) - - for x in range(25, 27): - self.relu4_4.add_module(str(x), features[x]) - - for x in range(27, 30): - self.relu5_1.add_module(str(x), features[x]) - - for x in range(30, 32): - self.relu5_2.add_module(str(x), features[x]) - - for x in range(32, 34): - self.relu5_3.add_module(str(x), features[x]) - - for x in range(34, 36): - self.relu5_4.add_module(str(x), features[x]) - - # don't need the gradients, just want the features - for param in self.parameters(): - param.requires_grad = False - - def forward(self, x): - relu1_1 = self.relu1_1(x) - relu1_2 = self.relu1_2(relu1_1) - - relu2_1 = self.relu2_1(relu1_2) - relu2_2 = self.relu2_2(relu2_1) - - relu3_1 = self.relu3_1(relu2_2) - relu3_2 = self.relu3_2(relu3_1) - relu3_3 = self.relu3_3(relu3_2) - relu3_4 = self.relu3_4(relu3_3) - - relu4_1 = self.relu4_1(relu3_4) - relu4_2 = self.relu4_2(relu4_1) - relu4_3 = self.relu4_3(relu4_2) - relu4_4 = self.relu4_4(relu4_3) - - relu5_1 = self.relu5_1(relu4_4) - relu5_2 = self.relu5_2(relu5_1) - relu5_3 = self.relu5_3(relu5_2) - relu5_4 = self.relu5_4(relu5_3) - - out = { - 'relu1_1': relu1_1, - 'relu1_2': relu1_2, - - 'relu2_1': relu2_1, - 'relu2_2': relu2_2, - - 'relu3_1': relu3_1, - 'relu3_2': relu3_2, - 'relu3_3': relu3_3, - 'relu3_4': relu3_4, - - 'relu4_1': relu4_1, - 'relu4_2': relu4_2, - 'relu4_3': relu4_3, - 'relu4_4': relu4_4, - - 'relu5_1': relu5_1, - 'relu5_2': relu5_2, - 'relu5_3': relu5_3, - 'relu5_4': relu5_4, - } - return out diff --git a/spaces/krislynn/krislynn/README.md b/spaces/krislynn/krislynn/README.md deleted file mode 100644 index 8d9cb28d9560138420096a6bffce00f12de3c962..0000000000000000000000000000000000000000 --- a/spaces/krislynn/krislynn/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Krislynn -emoji: 🐠 -colorFrom: yellow -colorTo: yellow -sdk: static -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/filelock/_api.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/filelock/_api.py deleted file mode 100644 index 66710cc5cd3db6174b697ab158293f7026d2080a..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/filelock/_api.py +++ /dev/null @@ -1,286 +0,0 @@ -from __future__ import annotations - -import contextlib -import logging -import os -import time -import warnings -from abc import ABC, abstractmethod -from dataclasses import dataclass -from threading import local -from types import TracebackType -from typing import Any - -from ._error import Timeout - -_LOGGER = logging.getLogger("filelock") - - -# This is a helper class which is returned by :meth:`BaseFileLock.acquire` and wraps the lock to make sure __enter__ -# is not called twice when entering the with statement. If we would simply return *self*, the lock would be acquired -# again in the *__enter__* method of the BaseFileLock, but not released again automatically. issue #37 (memory leak) -class AcquireReturnProxy: - """A context aware object that will release the lock file when exiting.""" - - def __init__(self, lock: BaseFileLock) -> None: - self.lock = lock - - def __enter__(self) -> BaseFileLock: - return self.lock - - def __exit__( - self, - exc_type: type[BaseException] | None, # noqa: U100 - exc_value: BaseException | None, # noqa: U100 - traceback: TracebackType | None, # noqa: U100 - ) -> None: - self.lock.release() - - -@dataclass -class FileLockContext: - """ - A dataclass which holds the context for a ``BaseFileLock`` object. - """ - - # The context is held in a separate class to allow optional use of thread local storage via the - # ThreadLocalFileContext class. - - #: The path to the lock file. - lock_file: str - - #: The default timeout value. - timeout: float - - #: The mode for the lock files - mode: int - - #: The file descriptor for the *_lock_file* as it is returned by the os.open() function, not None when lock held - lock_file_fd: int | None = None - - #: The lock counter is used for implementing the nested locking mechanism. - lock_counter: int = 0 # When the lock is acquired is increased and the lock is only released, when this value is 0 - - -class ThreadLocalFileContext(FileLockContext, local): - """ - A thread local version of the ``FileLockContext`` class. - """ - - -class BaseFileLock(ABC, contextlib.ContextDecorator): - """Abstract base class for a file lock object.""" - - def __init__( - self, - lock_file: str | os.PathLike[Any], - timeout: float = -1, - mode: int = 0o644, - thread_local: bool = True, - ) -> None: - """ - Create a new lock object. - - :param lock_file: path to the file - :param timeout: default timeout when acquiring the lock, in seconds. It will be used as fallback value in - the acquire method, if no timeout value (``None``) is given. If you want to disable the timeout, set it - to a negative value. A timeout of 0 means, that there is exactly one attempt to acquire the file lock. - :param mode: file permissions for the lockfile. - :param thread_local: Whether this object's internal context should be thread local or not. - If this is set to ``False`` then the lock will be reentrant across threads. - """ - self._is_thread_local = thread_local - - # Create the context. Note that external code should not work with the context directly and should instead use - # properties of this class. - kwargs: dict[str, Any] = { - "lock_file": os.fspath(lock_file), - "timeout": timeout, - "mode": mode, - } - self._context: FileLockContext = (ThreadLocalFileContext if thread_local else FileLockContext)(**kwargs) - - def is_thread_local(self) -> bool: - """:return: a flag indicating if this lock is thread local or not""" - return self._is_thread_local - - @property - def lock_file(self) -> str: - """:return: path to the lock file""" - return self._context.lock_file - - @property - def timeout(self) -> float: - """ - :return: the default timeout value, in seconds - - .. versionadded:: 2.0.0 - """ - return self._context.timeout - - @timeout.setter - def timeout(self, value: float | str) -> None: - """ - Change the default timeout value. - - :param value: the new value, in seconds - """ - self._context.timeout = float(value) - - @abstractmethod - def _acquire(self) -> None: - """If the file lock could be acquired, self._context.lock_file_fd holds the file descriptor of the lock file.""" - raise NotImplementedError - - @abstractmethod - def _release(self) -> None: - """Releases the lock and sets self._context.lock_file_fd to None.""" - raise NotImplementedError - - @property - def is_locked(self) -> bool: - """ - - :return: A boolean indicating if the lock file is holding the lock currently. - - .. versionchanged:: 2.0.0 - - This was previously a method and is now a property. - """ - return self._context.lock_file_fd is not None - - @property - def lock_counter(self) -> int: - """ - :return: The number of times this lock has been acquired (but not yet released). - """ - return self._context.lock_counter - - def acquire( - self, - timeout: float | None = None, - poll_interval: float = 0.05, - *, - poll_intervall: float | None = None, - blocking: bool = True, - ) -> AcquireReturnProxy: - """ - Try to acquire the file lock. - - :param timeout: maximum wait time for acquiring the lock, ``None`` means use the default :attr:`~timeout` is and - if ``timeout < 0``, there is no timeout and this method will block until the lock could be acquired - :param poll_interval: interval of trying to acquire the lock file - :param poll_intervall: deprecated, kept for backwards compatibility, use ``poll_interval`` instead - :param blocking: defaults to True. If False, function will return immediately if it cannot obtain a lock on the - first attempt. Otherwise, this method will block until the timeout expires or the lock is acquired. - :raises Timeout: if fails to acquire lock within the timeout period - :return: a context object that will unlock the file when the context is exited - - .. code-block:: python - - # You can use this method in the context manager (recommended) - with lock.acquire(): - pass - - # Or use an equivalent try-finally construct: - lock.acquire() - try: - pass - finally: - lock.release() - - .. versionchanged:: 2.0.0 - - This method returns now a *proxy* object instead of *self*, - so that it can be used in a with statement without side effects. - - """ - # Use the default timeout, if no timeout is provided. - if timeout is None: - timeout = self._context.timeout - - if poll_intervall is not None: - msg = "use poll_interval instead of poll_intervall" - warnings.warn(msg, DeprecationWarning, stacklevel=2) - poll_interval = poll_intervall - - # Increment the number right at the beginning. We can still undo it, if something fails. - self._context.lock_counter += 1 - - lock_id = id(self) - lock_filename = self.lock_file - start_time = time.perf_counter() - try: - while True: - if not self.is_locked: - _LOGGER.debug("Attempting to acquire lock %s on %s", lock_id, lock_filename) - self._acquire() - if self.is_locked: - _LOGGER.debug("Lock %s acquired on %s", lock_id, lock_filename) - break - elif blocking is False: - _LOGGER.debug("Failed to immediately acquire lock %s on %s", lock_id, lock_filename) - raise Timeout(lock_filename) - elif 0 <= timeout < time.perf_counter() - start_time: - _LOGGER.debug("Timeout on acquiring lock %s on %s", lock_id, lock_filename) - raise Timeout(lock_filename) - else: - msg = "Lock %s not acquired on %s, waiting %s seconds ..." - _LOGGER.debug(msg, lock_id, lock_filename, poll_interval) - time.sleep(poll_interval) - except BaseException: # Something did go wrong, so decrement the counter. - self._context.lock_counter = max(0, self._context.lock_counter - 1) - raise - return AcquireReturnProxy(lock=self) - - def release(self, force: bool = False) -> None: - """ - Releases the file lock. Please note, that the lock is only completely released, if the lock counter is 0. Also - note, that the lock file itself is not automatically deleted. - - :param force: If true, the lock counter is ignored and the lock is released in every case/ - """ - if self.is_locked: - self._context.lock_counter -= 1 - - if self._context.lock_counter == 0 or force: - lock_id, lock_filename = id(self), self.lock_file - - _LOGGER.debug("Attempting to release lock %s on %s", lock_id, lock_filename) - self._release() - self._context.lock_counter = 0 - _LOGGER.debug("Lock %s released on %s", lock_id, lock_filename) - - def __enter__(self) -> BaseFileLock: - """ - Acquire the lock. - - :return: the lock object - """ - self.acquire() - return self - - def __exit__( - self, - exc_type: type[BaseException] | None, # noqa: U100 - exc_value: BaseException | None, # noqa: U100 - traceback: TracebackType | None, # noqa: U100 - ) -> None: - """ - Release the lock. - - :param exc_type: the exception type if raised - :param exc_value: the exception value if raised - :param traceback: the exception traceback if raised - """ - self.release() - - def __del__(self) -> None: - """Called when the lock object is deleted.""" - self.release(force=True) - - -__all__ = [ - "BaseFileLock", - "AcquireReturnProxy", -] diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/misc/fixedTools.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/misc/fixedTools.py deleted file mode 100644 index 330042871c521231f2a396add543dd425783722b..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/misc/fixedTools.py +++ /dev/null @@ -1,253 +0,0 @@ -""" -The `OpenType specification `_ -defines two fixed-point data types: - -``Fixed`` - A 32-bit signed fixed-point number with a 16 bit twos-complement - magnitude component and 16 fractional bits. -``F2DOT14`` - A 16-bit signed fixed-point number with a 2 bit twos-complement - magnitude component and 14 fractional bits. - -To support reading and writing data with these data types, this module provides -functions for converting between fixed-point, float and string representations. - -.. data:: MAX_F2DOT14 - - The maximum value that can still fit in an F2Dot14. (1.99993896484375) -""" - -from .roundTools import otRound, nearestMultipleShortestRepr -import logging - -log = logging.getLogger(__name__) - -__all__ = [ - "MAX_F2DOT14", - "fixedToFloat", - "floatToFixed", - "floatToFixedToFloat", - "floatToFixedToStr", - "fixedToStr", - "strToFixed", - "strToFixedToFloat", - "ensureVersionIsLong", - "versionToFixed", -] - - -MAX_F2DOT14 = 0x7FFF / (1 << 14) - - -def fixedToFloat(value, precisionBits): - """Converts a fixed-point number to a float given the number of - precision bits. - - Args: - value (int): Number in fixed-point format. - precisionBits (int): Number of precision bits. - - Returns: - Floating point value. - - Examples:: - - >>> import math - >>> f = fixedToFloat(-10139, precisionBits=14) - >>> math.isclose(f, -0.61883544921875) - True - """ - return value / (1 << precisionBits) - - -def floatToFixed(value, precisionBits): - """Converts a float to a fixed-point number given the number of - precision bits. - - Args: - value (float): Floating point value. - precisionBits (int): Number of precision bits. - - Returns: - int: Fixed-point representation. - - Examples:: - - >>> floatToFixed(-0.61883544921875, precisionBits=14) - -10139 - >>> floatToFixed(-0.61884, precisionBits=14) - -10139 - """ - return otRound(value * (1 << precisionBits)) - - -def floatToFixedToFloat(value, precisionBits): - """Converts a float to a fixed-point number and back again. - - By converting the float to fixed, rounding it, and converting it back - to float again, this returns a floating point values which is exactly - representable in fixed-point format. - - Note: this **is** equivalent to ``fixedToFloat(floatToFixed(value))``. - - Args: - value (float): The input floating point value. - precisionBits (int): Number of precision bits. - - Returns: - float: The transformed and rounded value. - - Examples:: - >>> import math - >>> f1 = -0.61884 - >>> f2 = floatToFixedToFloat(-0.61884, precisionBits=14) - >>> f1 != f2 - True - >>> math.isclose(f2, -0.61883544921875) - True - """ - scale = 1 << precisionBits - return otRound(value * scale) / scale - - -def fixedToStr(value, precisionBits): - """Converts a fixed-point number to a string representing a decimal float. - - This chooses the float that has the shortest decimal representation (the least - number of fractional decimal digits). - - For example, to convert a fixed-point number in a 2.14 format, use - ``precisionBits=14``:: - - >>> fixedToStr(-10139, precisionBits=14) - '-0.61884' - - This is pretty slow compared to the simple division used in ``fixedToFloat``. - Use sporadically when you need to serialize or print the fixed-point number in - a human-readable form. - It uses nearestMultipleShortestRepr under the hood. - - Args: - value (int): The fixed-point value to convert. - precisionBits (int): Number of precision bits, *up to a maximum of 16*. - - Returns: - str: A string representation of the value. - """ - scale = 1 << precisionBits - return nearestMultipleShortestRepr(value / scale, factor=1.0 / scale) - - -def strToFixed(string, precisionBits): - """Converts a string representing a decimal float to a fixed-point number. - - Args: - string (str): A string representing a decimal float. - precisionBits (int): Number of precision bits, *up to a maximum of 16*. - - Returns: - int: Fixed-point representation. - - Examples:: - - >>> ## to convert a float string to a 2.14 fixed-point number: - >>> strToFixed('-0.61884', precisionBits=14) - -10139 - """ - value = float(string) - return otRound(value * (1 << precisionBits)) - - -def strToFixedToFloat(string, precisionBits): - """Convert a string to a decimal float with fixed-point rounding. - - This first converts string to a float, then turns it into a fixed-point - number with ``precisionBits`` fractional binary digits, then back to a - float again. - - This is simply a shorthand for fixedToFloat(floatToFixed(float(s))). - - Args: - string (str): A string representing a decimal float. - precisionBits (int): Number of precision bits. - - Returns: - float: The transformed and rounded value. - - Examples:: - - >>> import math - >>> s = '-0.61884' - >>> bits = 14 - >>> f = strToFixedToFloat(s, precisionBits=bits) - >>> math.isclose(f, -0.61883544921875) - True - >>> f == fixedToFloat(floatToFixed(float(s), precisionBits=bits), precisionBits=bits) - True - """ - value = float(string) - scale = 1 << precisionBits - return otRound(value * scale) / scale - - -def floatToFixedToStr(value, precisionBits): - """Convert float to string with fixed-point rounding. - - This uses the shortest decimal representation (ie. the least - number of fractional decimal digits) to represent the equivalent - fixed-point number with ``precisionBits`` fractional binary digits. - It uses nearestMultipleShortestRepr under the hood. - - >>> floatToFixedToStr(-0.61883544921875, precisionBits=14) - '-0.61884' - - Args: - value (float): The float value to convert. - precisionBits (int): Number of precision bits, *up to a maximum of 16*. - - Returns: - str: A string representation of the value. - - """ - scale = 1 << precisionBits - return nearestMultipleShortestRepr(value, factor=1.0 / scale) - - -def ensureVersionIsLong(value): - """Ensure a table version is an unsigned long. - - OpenType table version numbers are expressed as a single unsigned long - comprising of an unsigned short major version and unsigned short minor - version. This function detects if the value to be used as a version number - looks too small (i.e. is less than ``0x10000``), and converts it to - fixed-point using :func:`floatToFixed` if so. - - Args: - value (Number): a candidate table version number. - - Returns: - int: A table version number, possibly corrected to fixed-point. - """ - if value < 0x10000: - newValue = floatToFixed(value, 16) - log.warning( - "Table version value is a float: %.4f; " "fix to use hex instead: 0x%08x", - value, - newValue, - ) - value = newValue - return value - - -def versionToFixed(value): - """Ensure a table version number is fixed-point. - - Args: - value (str): a candidate table version number. - - Returns: - int: A table version number, possibly corrected to fixed-point. - """ - value = int(value, 0) if value.startswith("0") else float(value) - value = ensureVersionIsLong(value) - return value diff --git a/spaces/lambdalabs/stable-diffusion-image-variations/README.md b/spaces/lambdalabs/stable-diffusion-image-variations/README.md deleted file mode 100644 index 5e316aef93dd637e2d41672a74a5a4e18fa4039a..0000000000000000000000000000000000000000 --- a/spaces/lambdalabs/stable-diffusion-image-variations/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Stable Diffusion Image Variations -emoji: 🖼️ -colorFrom: purple -colorTo: purple -sdk: gradio -sdk_version: 3.21.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/leurez/moss/service/src/index.ts b/spaces/leurez/moss/service/src/index.ts deleted file mode 100644 index cdd4f0d5b53fcfb13db7c4caa82a3bc1beb273cf..0000000000000000000000000000000000000000 --- a/spaces/leurez/moss/service/src/index.ts +++ /dev/null @@ -1,89 +0,0 @@ -import express from 'express' -import type { RequestProps } from './types' -import type { ChatMessage } from './chatgpt' -import { chatConfig, chatReplyProcess, currentModel } from './chatgpt' -import { auth } from './middleware/auth' -import { limiter } from './middleware/limiter' -import { isNotEmptyString } from './utils/is' - -const app = express() -const router = express.Router() - -app.use(express.static('public')) -app.use(express.json()) - -app.all('*', (_, res, next) => { - res.header('Access-Control-Allow-Origin', '*') - res.header('Access-Control-Allow-Headers', 'authorization, Content-Type') - res.header('Access-Control-Allow-Methods', '*') - next() -}) - -router.post('/chat-process', [auth, limiter], async (req, res) => { - res.setHeader('Content-type', 'application/octet-stream') - - try { - const { prompt, options = {}, systemMessage, temperature, top_p } = req.body as RequestProps - let firstChunk = true - await chatReplyProcess({ - message: prompt, - lastContext: options, - process: (chat: ChatMessage) => { - res.write(firstChunk ? JSON.stringify(chat) : `\n${JSON.stringify(chat)}`) - firstChunk = false - }, - systemMessage, - temperature, - top_p, - }) - } - catch (error) { - res.write(JSON.stringify(error)) - } - finally { - res.end() - } -}) - -router.post('/config', auth, async (req, res) => { - try { - const response = await chatConfig() - res.send(response) - } - catch (error) { - res.send(error) - } -}) - -router.post('/session', async (req, res) => { - try { - const AUTH_SECRET_KEY = process.env.AUTH_SECRET_KEY - const hasAuth = isNotEmptyString(AUTH_SECRET_KEY) - res.send({ status: 'Success', message: '', data: { auth: hasAuth, model: currentModel() } }) - } - catch (error) { - res.send({ status: 'Fail', message: error.message, data: null }) - } -}) - -router.post('/verify', async (req, res) => { - try { - const { token } = req.body as { token: string } - if (!token) - throw new Error('Secret key is empty') - - if (process.env.AUTH_SECRET_KEY !== token) - throw new Error('密钥无效 | Secret key is invalid') - - res.send({ status: 'Success', message: 'Verify successfully', data: null }) - } - catch (error) { - res.send({ status: 'Fail', message: error.message, data: null }) - } -}) - -app.use('', router) -app.use('/api', router) -app.set('trust proxy', 1) - -app.listen(7860, () => globalThis.console.log('Server is running on port 7860')) diff --git a/spaces/lewiswu1209/MockingBird/ppg2mel/train/solver.py b/spaces/lewiswu1209/MockingBird/ppg2mel/train/solver.py deleted file mode 100644 index 9ca71cbf2a6b621fa299245f831d4d723ba56977..0000000000000000000000000000000000000000 --- a/spaces/lewiswu1209/MockingBird/ppg2mel/train/solver.py +++ /dev/null @@ -1,217 +0,0 @@ -import os -import sys -import abc -import math -import yaml -import torch -from torch.utils.tensorboard import SummaryWriter - -from .option import default_hparas -from utils.util import human_format, Timer -from utils.load_yaml import HpsYaml - - -class BaseSolver(): - ''' - Prototype Solver for all kinds of tasks - Arguments - config - yaml-styled config - paras - argparse outcome - mode - "train"/"test" - ''' - - def __init__(self, config, paras, mode="train"): - # General Settings - self.config = config # load from yaml file - self.paras = paras # command line args - self.mode = mode # 'train' or 'test' - for k, v in default_hparas.items(): - setattr(self, k, v) - self.device = torch.device('cuda') if self.paras.gpu and torch.cuda.is_available() \ - else torch.device('cpu') - - # Name experiment - self.exp_name = paras.name - if self.exp_name is None: - if 'exp_name' in self.config: - self.exp_name = self.config.exp_name - else: - # By default, exp is named after config file - self.exp_name = paras.config.split('/')[-1].replace('.yaml', '') - if mode == 'train': - self.exp_name += '_seed{}'.format(paras.seed) - - - if mode == 'train': - # Filepath setup - os.makedirs(paras.ckpdir, exist_ok=True) - self.ckpdir = os.path.join(paras.ckpdir, self.exp_name) - os.makedirs(self.ckpdir, exist_ok=True) - - # Logger settings - self.logdir = os.path.join(paras.logdir, self.exp_name) - self.log = SummaryWriter( - self.logdir, flush_secs=self.TB_FLUSH_FREQ) - self.timer = Timer() - - # Hyper-parameters - self.step = 0 - self.valid_step = config.hparas.valid_step - self.max_step = config.hparas.max_step - - self.verbose('Exp. name : {}'.format(self.exp_name)) - self.verbose('Loading data... large corpus may took a while.') - - # elif mode == 'test': - # # Output path - # os.makedirs(paras.outdir, exist_ok=True) - # self.ckpdir = os.path.join(paras.outdir, self.exp_name) - - # Load training config to get acoustic feat and build model - # self.src_config = HpsYaml(config.src.config) - # self.paras.load = config.src.ckpt - - # self.verbose('Evaluating result of tr. config @ {}'.format( - # config.src.config)) - - def backward(self, loss): - ''' - Standard backward step with self.timer and debugger - Arguments - loss - the loss to perform loss.backward() - ''' - self.timer.set() - loss.backward() - grad_norm = torch.nn.utils.clip_grad_norm_( - self.model.parameters(), self.GRAD_CLIP) - if math.isnan(grad_norm): - self.verbose('Error : grad norm is NaN @ step '+str(self.step)) - else: - self.optimizer.step() - self.timer.cnt('bw') - return grad_norm - - def load_ckpt(self): - ''' Load ckpt if --load option is specified ''' - print(self.paras) - if self.paras.load is not None: - if self.paras.warm_start: - self.verbose(f"Warm starting model from checkpoint {self.paras.load}.") - ckpt = torch.load( - self.paras.load, map_location=self.device if self.mode == 'train' - else 'cpu') - model_dict = ckpt['model'] - if "ignore_layers" in self.config.model and len(self.config.model.ignore_layers) > 0: - model_dict = {k:v for k, v in model_dict.items() - if k not in self.config.model.ignore_layers} - dummy_dict = self.model.state_dict() - dummy_dict.update(model_dict) - model_dict = dummy_dict - self.model.load_state_dict(model_dict) - else: - # Load weights - ckpt = torch.load( - self.paras.load, map_location=self.device if self.mode == 'train' - else 'cpu') - self.model.load_state_dict(ckpt['model']) - - # Load task-dependent items - if self.mode == 'train': - self.step = ckpt['global_step'] - self.optimizer.load_opt_state_dict(ckpt['optimizer']) - self.verbose('Load ckpt from {}, restarting at step {}'.format( - self.paras.load, self.step)) - else: - for k, v in ckpt.items(): - if type(v) is float: - metric, score = k, v - self.model.eval() - self.verbose('Evaluation target = {} (recorded {} = {:.2f} %)'.format( - self.paras.load, metric, score)) - - def verbose(self, msg): - ''' Verbose function for print information to stdout''' - if self.paras.verbose: - if type(msg) == list: - for m in msg: - print('[INFO]', m.ljust(100)) - else: - print('[INFO]', msg.ljust(100)) - - def progress(self, msg): - ''' Verbose function for updating progress on stdout (do not include newline) ''' - if self.paras.verbose: - sys.stdout.write("\033[K") # Clear line - print('[{}] {}'.format(human_format(self.step), msg), end='\r') - - def write_log(self, log_name, log_dict): - ''' - Write log to TensorBoard - log_name - Name of tensorboard variable - log_value - / Value of variable (e.g. dict of losses), passed if value = None - ''' - if type(log_dict) is dict: - log_dict = {key: val for key, val in log_dict.items() if ( - val is not None and not math.isnan(val))} - if log_dict is None: - pass - elif len(log_dict) > 0: - if 'align' in log_name or 'spec' in log_name: - img, form = log_dict - self.log.add_image( - log_name, img, global_step=self.step, dataformats=form) - elif 'text' in log_name or 'hyp' in log_name: - self.log.add_text(log_name, log_dict, self.step) - else: - self.log.add_scalars(log_name, log_dict, self.step) - - def save_checkpoint(self, f_name, metric, score, show_msg=True): - '''' - Ckpt saver - f_name - the name of ckpt file (w/o prefix) to store, overwrite if existed - score - The value of metric used to evaluate model - ''' - ckpt_path = os.path.join(self.ckpdir, f_name) - full_dict = { - "model": self.model.state_dict(), - "optimizer": self.optimizer.get_opt_state_dict(), - "global_step": self.step, - metric: score - } - - torch.save(full_dict, ckpt_path) - if show_msg: - self.verbose("Saved checkpoint (step = {}, {} = {:.2f}) and status @ {}". - format(human_format(self.step), metric, score, ckpt_path)) - - - # ----------------------------------- Abtract Methods ------------------------------------------ # - @abc.abstractmethod - def load_data(self): - ''' - Called by main to load all data - After this call, data related attributes should be setup (e.g. self.tr_set, self.dev_set) - No return value - ''' - raise NotImplementedError - - @abc.abstractmethod - def set_model(self): - ''' - Called by main to set models - After this call, model related attributes should be setup (e.g. self.l2_loss) - The followings MUST be setup - - self.model (torch.nn.Module) - - self.optimizer (src.Optimizer), - init. w/ self.optimizer = src.Optimizer(self.model.parameters(),**self.config['hparas']) - Loading pre-trained model should also be performed here - No return value - ''' - raise NotImplementedError - - @abc.abstractmethod - def exec(self): - ''' - Called by main to execute training/inference - ''' - raise NotImplementedError diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Bulerias Paco De Lucia Pdf 22.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Bulerias Paco De Lucia Pdf 22.md deleted file mode 100644 index 59effb0ea901d24299e15ccedbde9d820b5e9f89..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Bulerias Paco De Lucia Pdf 22.md +++ /dev/null @@ -1,8 +0,0 @@ -
          -

          Most of the Bulerias are Spanish dance. But there is no dancing in them. In the first bulerias, he plays a chordal melody which goes into a round, followed by a bulerias which can only be played on the bass strings of a guitar. Then he does the same thing with a bulerias that goes into a different chordal melody, and then he repeats the theme a few times. Then he plays the bulerias that is in the bulerias on the guitar.

          -

          In conclusion, the alzapua is a spectacular technique where the effect can often take precedence over the musical idea. This why it is used most abundantly in live performances to pique the excitement and emotion of flamenco audiences, and less in solo recordings where the guitarist in a studio ambiance unfettered by the duty of stirring a live audience can concentrate on composition and more freely express himself. The last of our studies is a beautiful illustration of what can be done with two guitars using alzapua, miraculous despite all. This is a bulerias from Miguel Linares, among the most brilliant *tocaores, whose career began in Paris (Example 4):

          -

          Bulerias Paco De Lucia Pdf 22


          DOWNLOADhttps://bytlly.com/2uGxC0



          -

          Before July of 2019, I had never held a guitar. I have never taken a music class of any sort outside of chorus in grade school. I went on YouTube one day and searched Spanish Guitar and a Paco de Lucia por Bulerias video came up and I watched it. From that day forward I knew that I wanted to play just like him.

          -

          When I finished the transcription of Almoraima as the official edition of transcriptions of pacos entire discography, his brother Ramon asked me to start working on Solo Quiero Caminar (as per our agreement to do transcriptions of the Maestros entire discography), for the next book of the series. This was the dark, dark day he was diagnosed this a lung tumor. In spite of his condition, we decided to go on with the project.

          899543212b
          -
          -
          \ No newline at end of file diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Chankast-Cheater.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Chankast-Cheater.md deleted file mode 100644 index 2f1bca8f20cf09a88ae771b8d3c6546cf5eed425..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Chankast-Cheater.md +++ /dev/null @@ -1,25 +0,0 @@ -Chankast Cheater - - - -Chankast Cheater ->->->-> [https://urlcod.com/2tvOTC](https://urlcod.com/2tvOTC) - - - - - - - - - -Chankast Cheater: A Cheat Finding Assistant for Dreamcast Emulator -Chankast Cheater is a utility that allows you to find and use cheats for the Dreamcast emulator Chankast. Chankast is one of the first emulators that can run commercial Dreamcast games on Windows. With Chankast Cheater, you can enhance your gaming experience by unlocking hidden features, modifying game parameters, or skipping difficult levels. -Chankast Cheater was developed by Kryso as part of the Chankast Utilities project, which also includes other tools such as Speed Pig, VMS Manager, Snap It, and more. Chankast Utilities is a modified version of the original Chankast emulator that adds several new features and improves system compatibility. You can download Chankast Utilities from http://chankast.1emu.net/ [^1^] [^2^]. -To use Chankast Cheater, you need to run the ccfa.exe file and select the game you want to cheat on. Then, you can search for cheats by using different methods such as exact value, unknown value, increased value, decreased value, etc. You can also browse existing cheat files in the CHTS folder or create your own cheat files. Once you find the cheats you want, you can activate them by checking the boxes and then run the game with Chankast.exe. -Chankast Cheater works not only with Chankast, but also with other Dreamcast emulators such as NullDC. You just need to rename the emulator executable to Chankast.exe and run it with Chankast Cheater. You can watch some tutorials on how to use Chankast Cheater with NullDC on YouTube [^3^] [^4^] [^5^]. -Chankast Cheater is a useful tool for Dreamcast fans who want to enjoy their favorite games with more fun and flexibility. However, please use it responsibly and respect the original game developers and publishers.Another popular Dreamcast emulator is NullDC, which is a free and open source project that runs on Windows. NullDC has very good compatibility and features like image filtering and upscaling, which can enhance the graphics of the games. NullDC also supports various plugins and drivers, such as the Logitech controller support driver, which can improve the gameplay experience. You can download NullDC from https://archive.org/details/tutorial-nulldc-by-filipe-mg-1 [^1^], where you can also find BIOS files and a tutorial on how to set up the emulator. -NullDC can run most of the Dreamcast games, including some of the classics such as Sonic Adventure, Shenmue, Soul Calibur, Resident Evil: Code Veronica, and more. You can either use your original game discs or ISO files to play the games on NullDC. However, some games may require additional settings or patches to work properly. You can check the compatibility list and the forums on https://www.emutalk.net/forums/121-nulldc for more information and help. -NullDC is a great option for Dreamcast fans who want to relive their nostalgia or discover new games on their PC. However, please use it legally and ethically and respect the original game developers and publishers. dfd1c89656 - - - diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Kariera Nicosia Dyzmy 720p Torrentl.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Kariera Nicosia Dyzmy 720p Torrentl.md deleted file mode 100644 index 8b63bbefbc1152bf6c591ed29efe575fa84138ab..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Kariera Nicosia Dyzmy 720p Torrentl.md +++ /dev/null @@ -1,8 +0,0 @@ -

          Kariera Nicosia Dyzmy 720p Torrentl


          Download - https://bytlly.com/2uGwPx



          -
          -Ścigają ją granice . Ona zna swoje, a on zapomina . Dla niego siła życia w domu nie jest wymuszona . Ona wyciąga we ł , próbuje pokazać . Niekiedy nie . Wygląda, jak się wyciąga . Być może poznają ona właśnie to, co trzeba zrobić, by zwyciężyć przyszłość . Pracuje krytycznie, ale potrzebna jest . Nie znosi surowców i wojowników. Ona rozumie . Ona wychodzi z tajemnicą i zjednoczeniem . Ona nie jest tylko dziurka , ale . Miłość, wymagająca, cierpliwa, wytrwała i bierna . Zbawczą osobę, zdolną do przekonania . Jest coś, co . Ona również nie . Nie znosi sztychu, a z grupki . Ona daje nam wiele okazji, ale wiem, że . - -Pierwsze poruszenie z oglądu filmy z podręcznika. Podręcznik nie jest podręcznikem. Znajdujemy się w panującym ekranie filmu. Dzięki znajomości tego ekranu możemy być zainteresowany . Jesteśmy pewni, że kiedy przeczytałeś ten artykuł, już zdecydowałeś się na własny film. 4fefd39f24
          -
          -
          -

          diff --git a/spaces/liuyuan-pal/SyncDreamer/ldm/modules/diffusionmodules/util.py b/spaces/liuyuan-pal/SyncDreamer/ldm/modules/diffusionmodules/util.py deleted file mode 100644 index a952e6c40308c33edd422da0ce6a60f47e73661b..0000000000000000000000000000000000000000 --- a/spaces/liuyuan-pal/SyncDreamer/ldm/modules/diffusionmodules/util.py +++ /dev/null @@ -1,267 +0,0 @@ -# adopted from -# https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py -# and -# https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py -# and -# https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py -# -# thanks! - - -import os -import math -import torch -import torch.nn as nn -import numpy as np -from einops import repeat - -from ldm.util import instantiate_from_config - - -def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): - if schedule == "linear": - betas = ( - torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2 - ) - - elif schedule == "cosine": - timesteps = ( - torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s - ) - alphas = timesteps / (1 + cosine_s) * np.pi / 2 - alphas = torch.cos(alphas).pow(2) - alphas = alphas / alphas[0] - betas = 1 - alphas[1:] / alphas[:-1] - betas = np.clip(betas, a_min=0, a_max=0.999) - - elif schedule == "sqrt_linear": - betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) - elif schedule == "sqrt": - betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5 - else: - raise ValueError(f"schedule '{schedule}' unknown.") - return betas.numpy() - - -def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True): - if ddim_discr_method == 'uniform': - c = num_ddpm_timesteps // num_ddim_timesteps - ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c))) - elif ddim_discr_method == 'quad': - ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int) - else: - raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"') - - # assert ddim_timesteps.shape[0] == num_ddim_timesteps - # add one to get the final alpha values right (the ones from first scale to data during sampling) - steps_out = ddim_timesteps + 1 - if verbose: - print(f'Selected timesteps for ddim sampler: {steps_out}') - return steps_out - - -def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True): - # select alphas for computing the variance schedule - alphas = alphacums[ddim_timesteps] - alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist()) - - # according the the formula provided in https://arxiv.org/abs/2010.02502 - sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)) - if verbose: - print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}') - print(f'For the chosen value of eta, which is {eta}, ' - f'this results in the following sigma_t schedule for ddim sampler {sigmas}') - return sigmas, alphas, alphas_prev - - -def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): - """ - Create a beta schedule that discretizes the given alpha_t_bar function, - which defines the cumulative product of (1-beta) over time from t = [0,1]. - :param num_diffusion_timesteps: the number of betas to produce. - :param alpha_bar: a lambda that takes an argument t from 0 to 1 and - produces the cumulative product of (1-beta) up to that - part of the diffusion process. - :param max_beta: the maximum beta to use; use values lower than 1 to - prevent singularities. - """ - betas = [] - for i in range(num_diffusion_timesteps): - t1 = i / num_diffusion_timesteps - t2 = (i + 1) / num_diffusion_timesteps - betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) - return np.array(betas) - - -def extract_into_tensor(a, t, x_shape): - b, *_ = t.shape - out = a.gather(-1, t) - return out.reshape(b, *((1,) * (len(x_shape) - 1))) - - -def checkpoint(func, inputs, params, flag): - """ - Evaluate a function without caching intermediate activations, allowing for - reduced memory at the expense of extra compute in the backward pass. - :param func: the function to evaluate. - :param inputs: the argument sequence to pass to `func`. - :param params: a sequence of parameters `func` depends on but does not - explicitly take as arguments. - :param flag: if False, disable gradient checkpointing. - """ - if flag: - args = tuple(inputs) + tuple(params) - return CheckpointFunction.apply(func, len(inputs), *args) - else: - return func(*inputs) - - -class CheckpointFunction(torch.autograd.Function): - @staticmethod - def forward(ctx, run_function, length, *args): - ctx.run_function = run_function - ctx.input_tensors = list(args[:length]) - ctx.input_params = list(args[length:]) - - with torch.no_grad(): - output_tensors = ctx.run_function(*ctx.input_tensors) - return output_tensors - - @staticmethod - def backward(ctx, *output_grads): - ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors] - with torch.enable_grad(): - # Fixes a bug where the first op in run_function modifies the - # Tensor storage in place, which is not allowed for detach()'d - # Tensors. - shallow_copies = [x.view_as(x) for x in ctx.input_tensors] - output_tensors = ctx.run_function(*shallow_copies) - input_grads = torch.autograd.grad( - output_tensors, - ctx.input_tensors + ctx.input_params, - output_grads, - allow_unused=True, - ) - del ctx.input_tensors - del ctx.input_params - del output_tensors - return (None, None) + input_grads - - -def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False): - """ - Create sinusoidal timestep embeddings. - :param timesteps: a 1-D Tensor of N indices, one per batch element. - These may be fractional. - :param dim: the dimension of the output. - :param max_period: controls the minimum frequency of the embeddings. - :return: an [N x dim] Tensor of positional embeddings. - """ - if not repeat_only: - half = dim // 2 - freqs = torch.exp( - -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half - ).to(device=timesteps.device) - args = timesteps[:, None].float() * freqs[None] - embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) - if dim % 2: - embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) - else: - embedding = repeat(timesteps, 'b -> b d', d=dim) - return embedding - - -def zero_module(module): - """ - Zero out the parameters of a module and return it. - """ - for p in module.parameters(): - p.detach().zero_() - return module - - -def scale_module(module, scale): - """ - Scale the parameters of a module and return it. - """ - for p in module.parameters(): - p.detach().mul_(scale) - return module - - -def mean_flat(tensor): - """ - Take the mean over all non-batch dimensions. - """ - return tensor.mean(dim=list(range(1, len(tensor.shape)))) - - -def normalization(channels): - """ - Make a standard normalization layer. - :param channels: number of input channels. - :return: an nn.Module for normalization. - """ - return GroupNorm32(32, channels) - - -# PyTorch 1.7 has SiLU, but we support PyTorch 1.5. -class SiLU(nn.Module): - def forward(self, x): - return x * torch.sigmoid(x) - - -class GroupNorm32(nn.GroupNorm): - def forward(self, x): - return super().forward(x.float()).type(x.dtype) - -def conv_nd(dims, *args, **kwargs): - """ - Create a 1D, 2D, or 3D convolution module. - """ - if dims == 1: - return nn.Conv1d(*args, **kwargs) - elif dims == 2: - return nn.Conv2d(*args, **kwargs) - elif dims == 3: - return nn.Conv3d(*args, **kwargs) - raise ValueError(f"unsupported dimensions: {dims}") - - -def linear(*args, **kwargs): - """ - Create a linear module. - """ - return nn.Linear(*args, **kwargs) - - -def avg_pool_nd(dims, *args, **kwargs): - """ - Create a 1D, 2D, or 3D average pooling module. - """ - if dims == 1: - return nn.AvgPool1d(*args, **kwargs) - elif dims == 2: - return nn.AvgPool2d(*args, **kwargs) - elif dims == 3: - return nn.AvgPool3d(*args, **kwargs) - raise ValueError(f"unsupported dimensions: {dims}") - - -class HybridConditioner(nn.Module): - - def __init__(self, c_concat_config, c_crossattn_config): - super().__init__() - self.concat_conditioner = instantiate_from_config(c_concat_config) - self.crossattn_conditioner = instantiate_from_config(c_crossattn_config) - - def forward(self, c_concat, c_crossattn): - c_concat = self.concat_conditioner(c_concat) - c_crossattn = self.crossattn_conditioner(c_crossattn) - return {'c_concat': [c_concat], 'c_crossattn': [c_crossattn]} - - -def noise_like(shape, device, repeat=False): - repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1))) - noise = lambda: torch.randn(shape, device=device) - return repeat_noise() if repeat else noise() \ No newline at end of file diff --git a/spaces/ljjggr/bingo/src/components/ui/button.tsx b/spaces/ljjggr/bingo/src/components/ui/button.tsx deleted file mode 100644 index 281da005124fa94c89a9a9db7605748a92b60865..0000000000000000000000000000000000000000 --- a/spaces/ljjggr/bingo/src/components/ui/button.tsx +++ /dev/null @@ -1,57 +0,0 @@ -import * as React from 'react' -import { Slot } from '@radix-ui/react-slot' -import { cva, type VariantProps } from 'class-variance-authority' - -import { cn } from '@/lib/utils' - -const buttonVariants = cva( - 'inline-flex items-center justify-center rounded-md text-sm font-medium shadow ring-offset-background transition-colors outline-none disabled:pointer-events-none disabled:opacity-50', - { - variants: { - variant: { - default: - 'bg-primary text-primary-foreground shadow-md hover:bg-primary/90', - destructive: - 'bg-destructive text-destructive-foreground hover:bg-destructive/90', - outline: - 'border border-input hover:bg-accent hover:text-accent-foreground', - secondary: - 'bg-secondary text-secondary-foreground hover:bg-secondary/80', - ghost: 'shadow-none hover:bg-accent hover:text-accent-foreground', - link: 'text-primary underline-offset-4 shadow-none hover:underline' - }, - size: { - default: 'h-8 px-4 py-2', - sm: 'h-8 rounded-md px-3', - lg: 'h-11 rounded-md px-8', - icon: 'h-8 w-8 p-0' - } - }, - defaultVariants: { - variant: 'default', - size: 'default' - } - } -) - -export interface ButtonProps - extends React.ButtonHTMLAttributes, - VariantProps { - asChild?: boolean -} - -const Button = React.forwardRef( - ({ className, variant, size, asChild = false, ...props }, ref) => { - const Comp = asChild ? Slot : 'button' - return ( - - ) - } -) -Button.displayName = 'Button' - -export { Button, buttonVariants } diff --git a/spaces/ljjggr/bingo/tailwind.config.js b/spaces/ljjggr/bingo/tailwind.config.js deleted file mode 100644 index 03da3c3c45be6983b9f5ffa6df5f1fd0870e9636..0000000000000000000000000000000000000000 --- a/spaces/ljjggr/bingo/tailwind.config.js +++ /dev/null @@ -1,48 +0,0 @@ -/** @type {import('tailwindcss').Config} */ -module.exports = { - content: [ - './src/pages/**/*.{js,ts,jsx,tsx,mdx}', - './src/components/**/*.{js,ts,jsx,tsx,mdx}', - './src/app/**/*.{js,ts,jsx,tsx,mdx}', - './src/ui/**/*.{js,ts,jsx,tsx,mdx}', - ], - "darkMode": "class", - theme: { - extend: { - colors: { - 'primary-blue': 'rgb(var(--color-primary-blue) / )', - secondary: 'rgb(var(--color-secondary) / )', - 'primary-background': 'rgb(var(--primary-background) / )', - 'primary-text': 'rgb(var(--primary-text) / )', - 'secondary-text': 'rgb(var(--secondary-text) / )', - 'light-text': 'rgb(var(--light-text) / )', - 'primary-border': 'rgb(var(--primary-border) / )', - }, - keyframes: { - slideDownAndFade: { - from: { opacity: 0, transform: 'translateY(-2px)' }, - to: { opacity: 1, transform: 'translateY(0)' }, - }, - slideLeftAndFade: { - from: { opacity: 0, transform: 'translateX(2px)' }, - to: { opacity: 1, transform: 'translateX(0)' }, - }, - slideUpAndFade: { - from: { opacity: 0, transform: 'translateY(2px)' }, - to: { opacity: 1, transform: 'translateY(0)' }, - }, - slideRightAndFade: { - from: { opacity: 0, transform: 'translateX(2px)' }, - to: { opacity: 1, transform: 'translateX(0)' }, - }, - }, - animation: { - slideDownAndFade: 'slideDownAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideLeftAndFade: 'slideLeftAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideUpAndFade: 'slideUpAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideRightAndFade: 'slideRightAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - }, - }, - }, - plugins: [require('@headlessui/tailwindcss'), require('tailwind-scrollbar')], -} diff --git a/spaces/lunarring/latentblending/ldm/models/diffusion/sampling_util.py b/spaces/lunarring/latentblending/ldm/models/diffusion/sampling_util.py deleted file mode 100644 index 7eff02be6d7c54d43ee6680636ac0698dd3b3f33..0000000000000000000000000000000000000000 --- a/spaces/lunarring/latentblending/ldm/models/diffusion/sampling_util.py +++ /dev/null @@ -1,22 +0,0 @@ -import torch -import numpy as np - - -def append_dims(x, target_dims): - """Appends dimensions to the end of a tensor until it has target_dims dimensions. - From https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/utils.py""" - dims_to_append = target_dims - x.ndim - if dims_to_append < 0: - raise ValueError(f'input has {x.ndim} dims but target_dims is {target_dims}, which is less') - return x[(...,) + (None,) * dims_to_append] - - -def norm_thresholding(x0, value): - s = append_dims(x0.pow(2).flatten(1).mean(1).sqrt().clamp(min=value), x0.ndim) - return x0 * (value / s) - - -def spatial_norm_thresholding(x0, value): - # b c h w - s = x0.pow(2).mean(1, keepdim=True).sqrt().clamp(min=value) - return x0 * (value / s) \ No newline at end of file diff --git a/spaces/ma-xu/LIVE/thrust/thrust/count.h b/spaces/ma-xu/LIVE/thrust/thrust/count.h deleted file mode 100644 index 9225bc6a757ad6323af12c41c173b2c11bc0720d..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/count.h +++ /dev/null @@ -1,235 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -/*! \file count.h - * \brief Counting elements in a range - */ - -#pragma once - -#include -#include -#include - -namespace thrust -{ - - -/*! \addtogroup algorithms - */ - -/*! \addtogroup reductions - * \ingroup algorithms - * \{ - */ - -/*! \addtogroup counting - * \ingroup reductions - * \{ - */ - - -/*! \p count finds the number of elements in [first,last) that are equal - * to \p value. More precisely, \p count returns the number of iterators \c i in - * [first, last) such that *i == value. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param first The beginning of the sequence. - * \param last The end of the sequence. - * \param value The value to be counted. - * \return The number of elements equal to \p value. - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam InputIterator must be a model of Input Iterator and \c InputIterator's \c value_type must be a model of must be a model of Equality Comparable. - * \tparam EqualityComparable must be a model of Equality Comparable and can be compared for equality with \c InputIterator's \c value_type - * - * The following code snippet demonstrates how to use \p count to - * count the number of instances in a range of a value of interest using the \p thrust::device execution policy: - * - * \code - * #include - * #include - * #include - * ... - * // put 3 1s in a device_vector - * thrust::device_vector vec(5,0); - * vec[1] = 1; - * vec[3] = 1; - * vec[4] = 1; - * - * // count the 1s - * int result = thrust::count(thrust::device, vec.begin(), vec.end(), 1); - * // result == 3 - * \endcode - * - * \see http://www.sgi.com/tech/stl/count.html - */ -template -__host__ __device__ - typename thrust::iterator_traits::difference_type - count(const thrust::detail::execution_policy_base &exec, InputIterator first, InputIterator last, const EqualityComparable& value); - - - -/*! \p count finds the number of elements in [first,last) that are equal - * to \p value. More precisely, \p count returns the number of iterators \c i in - * [first, last) such that *i == value. - * - * \param first The beginning of the sequence. - * \param last The end of the sequence. - * \param value The value to be counted. - * \return The number of elements equal to \p value. - * - * \tparam InputIterator must be a model of Input Iterator and \c InputIterator's \c value_type must be a model of must be a model of Equality Comparable. - * \tparam EqualityComparable must be a model of Equality Comparable and can be compared for equality with \c InputIterator's \c value_type - * - * The following code snippet demonstrates how to use \p count to - * count the number of instances in a range of a value of interest. - * \code - * #include - * #include - * ... - * // put 3 1s in a device_vector - * thrust::device_vector vec(5,0); - * vec[1] = 1; - * vec[3] = 1; - * vec[4] = 1; - * - * // count the 1s - * int result = thrust::count(vec.begin(), vec.end(), 1); - * // result == 3 - * \endcode - * - * \see http://www.sgi.com/tech/stl/count.html - */ -template - typename thrust::iterator_traits::difference_type - count(InputIterator first, InputIterator last, const EqualityComparable& value); - - -/*! \p count_if finds the number of elements in [first,last) for which - * a predicate is \c true. More precisely, \p count_if returns the number of iterators - * \c i in [first, last) such that pred(*i) == true. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param first The beginning of the sequence. - * \param last The end of the sequence. - * \param pred The predicate. - * \return The number of elements where \p pred is \c true. - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam InputIterator must be a model of Input Iterator and \c InputIterator's \c value_type must be convertible to \c Predicate's \c argument_type. - * \tparam Predicate must be a model of Predicate. - * - * The following code snippet demonstrates how to use \p count to - * count the number of odd numbers in a range using the \p thrust::device execution policy: - * - * \code - * #include - * #include - * #include - * ... - * struct is_odd - * { - * __host__ __device__ - * bool operator()(int &x) - * { - * return x & 1; - * } - * }; - * ... - * // fill a device_vector with even & odd numbers - * thrust::device_vector vec(5); - * vec[0] = 0; - * vec[1] = 1; - * vec[2] = 2; - * vec[3] = 3; - * vec[4] = 4; - * - * // count the odd elements in vec - * int result = thrust::count_if(thrust::device, vec.begin(), vec.end(), is_odd()); - * // result == 2 - * \endcode - * - * \see http://www.sgi.com/tech/stl/count.html - */ -template -__host__ __device__ - typename thrust::iterator_traits::difference_type - count_if(const thrust::detail::execution_policy_base &exec, InputIterator first, InputIterator last, Predicate pred); - - -/*! \p count_if finds the number of elements in [first,last) for which - * a predicate is \c true. More precisely, \p count_if returns the number of iterators - * \c i in [first, last) such that pred(*i) == true. - * - * \param first The beginning of the sequence. - * \param last The end of the sequence. - * \param pred The predicate. - * \return The number of elements where \p pred is \c true. - * - * \tparam InputIterator must be a model of Input Iterator and \c InputIterator's \c value_type must be convertible to \c Predicate's \c argument_type. - * \tparam Predicate must be a model of Predicate. - * - * The following code snippet demonstrates how to use \p count to - * count the number of odd numbers in a range. - * \code - * #include - * #include - * ... - * struct is_odd - * { - * __host__ __device__ - * bool operator()(int &x) - * { - * return x & 1; - * } - * }; - * ... - * // fill a device_vector with even & odd numbers - * thrust::device_vector vec(5); - * vec[0] = 0; - * vec[1] = 1; - * vec[2] = 2; - * vec[3] = 3; - * vec[4] = 4; - * - * // count the odd elements in vec - * int result = thrust::count_if(vec.begin(), vec.end(), is_odd()); - * // result == 2 - * \endcode - * - * \see http://www.sgi.com/tech/stl/count.html - */ -template - typename thrust::iterator_traits::difference_type - count_if(InputIterator first, InputIterator last, Predicate pred); - - -/*! \} // end counting - * \} // end reductions - */ - - -} // end thrust - -#include - diff --git a/spaces/ma-xu/LIVE/thrust/thrust/detail/malloc_and_free.h b/spaces/ma-xu/LIVE/thrust/thrust/detail/malloc_and_free.h deleted file mode 100644 index 6dc238adb49311d0a0e6187ba65108183d5599a4..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/detail/malloc_and_free.h +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include -#include -#include -#include -#include -#include - -namespace thrust -{ - -__thrust_exec_check_disable__ -template -__host__ __device__ -pointer malloc(const thrust::detail::execution_policy_base &exec, std::size_t n) -{ - using thrust::system::detail::generic::malloc; - - // XXX should use a hypothetical thrust::static_pointer_cast here - void *raw_ptr = static_cast(thrust::raw_pointer_cast(malloc(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), n))); - - return pointer(raw_ptr); -} - -__thrust_exec_check_disable__ -template -__host__ __device__ -pointer malloc(const thrust::detail::execution_policy_base &exec, std::size_t n) -{ - using thrust::system::detail::generic::malloc; - - T *raw_ptr = static_cast(thrust::raw_pointer_cast(malloc(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), n))); - - return pointer(raw_ptr); -} - - -// XXX WAR nvbug 992955 -#if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC -#if CUDART_VERSION < 5000 - -// cudafe generates unqualified calls to free(int *volatile) -// which get confused with thrust::free -// spoof a thrust::free which simply maps to ::free -inline __host__ __device__ -void free(int *volatile ptr) -{ - ::free(ptr); -} - -#endif // CUDART_VERSION -#endif // THRUST_DEVICE_COMPILER - -__thrust_exec_check_disable__ -template -__host__ __device__ -void free(const thrust::detail::execution_policy_base &exec, Pointer ptr) -{ - using thrust::system::detail::generic::free; - - free(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), ptr); -} - -// XXX consider another form of free which does not take a system argument and -// instead infers the system from the pointer - -} // end namespace thrust - diff --git a/spaces/ma-xu/LIVE/thrust/thrust/system/cpp/detail/scan.h b/spaces/ma-xu/LIVE/thrust/thrust/system/cpp/detail/scan.h deleted file mode 100644 index 4d38e648437322d078d49c9412ab9532b7cc8b69..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/system/cpp/detail/scan.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include - -// this system inherits scan -#include - diff --git a/spaces/manavisrani07/gradio-lipsync-wav2lip/basicsr/data/realesrgan_paired_dataset.py b/spaces/manavisrani07/gradio-lipsync-wav2lip/basicsr/data/realesrgan_paired_dataset.py deleted file mode 100644 index e014f8311f202f62caec40a7690e3b66aefe2265..0000000000000000000000000000000000000000 --- a/spaces/manavisrani07/gradio-lipsync-wav2lip/basicsr/data/realesrgan_paired_dataset.py +++ /dev/null @@ -1,109 +0,0 @@ -import os -from torch.utils import data as data -from torchvision.transforms.functional import normalize - -from basicsr.data.data_util import paired_paths_from_folder, paired_paths_from_lmdb -from basicsr.data.transforms import augment, paired_random_crop -from basicsr.utils import FileClient, imfrombytes, img2tensor -from basicsr.utils.registry import DATASET_REGISTRY - - -@DATASET_REGISTRY.register(suffix='basicsr') -class RealESRGANPairedDataset(data.Dataset): - """Paired image dataset for image restoration. - - Read LQ (Low Quality, e.g. LR (Low Resolution), blurry, noisy, etc) and GT image pairs. - - There are three modes: - 1. 'lmdb': Use lmdb files. - If opt['io_backend'] == lmdb. - 2. 'meta_info': Use meta information file to generate paths. - If opt['io_backend'] != lmdb and opt['meta_info'] is not None. - 3. 'folder': Scan folders to generate paths. - The rest. - - Args: - opt (dict): Config for train datasets. It contains the following keys: - dataroot_gt (str): Data root path for gt. - dataroot_lq (str): Data root path for lq. - meta_info (str): Path for meta information file. - io_backend (dict): IO backend type and other kwarg. - filename_tmpl (str): Template for each filename. Note that the template excludes the file extension. - Default: '{}'. - gt_size (int): Cropped patched size for gt patches. - use_hflip (bool): Use horizontal flips. - use_rot (bool): Use rotation (use vertical flip and transposing h - and w for implementation). - - scale (bool): Scale, which will be added automatically. - phase (str): 'train' or 'val'. - """ - - def __init__(self, opt): - super(RealESRGANPairedDataset, self).__init__() - self.opt = opt - self.file_client = None - self.io_backend_opt = opt['io_backend'] - # mean and std for normalizing the input images - self.mean = opt['mean'] if 'mean' in opt else None - self.std = opt['std'] if 'std' in opt else None - - self.gt_folder, self.lq_folder = opt['dataroot_gt'], opt['dataroot_lq'] - self.filename_tmpl = opt['filename_tmpl'] if 'filename_tmpl' in opt else '{}' - - # file client (lmdb io backend) - if self.io_backend_opt['type'] == 'lmdb': - self.io_backend_opt['db_paths'] = [self.lq_folder, self.gt_folder] - self.io_backend_opt['client_keys'] = ['lq', 'gt'] - self.paths = paired_paths_from_lmdb([self.lq_folder, self.gt_folder], ['lq', 'gt']) - elif 'meta_info' in self.opt and self.opt['meta_info'] is not None: - # disk backend with meta_info - # Each line in the meta_info describes the relative path to an image - with open(self.opt['meta_info']) as fin: - paths = [line.strip() for line in fin] - self.paths = [] - for path in paths: - gt_path, lq_path = path.split(', ') - gt_path = os.path.join(self.gt_folder, gt_path) - lq_path = os.path.join(self.lq_folder, lq_path) - self.paths.append(dict([('gt_path', gt_path), ('lq_path', lq_path)])) - else: - # disk backend - # it will scan the whole folder to get meta info - # it will be time-consuming for folders with too many files. It is recommended using an extra meta txt file - self.paths = paired_paths_from_folder([self.lq_folder, self.gt_folder], ['lq', 'gt'], self.filename_tmpl) - - def __getitem__(self, index): - if self.file_client is None: - self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) - - scale = self.opt['scale'] - - # Load gt and lq images. Dimension order: HWC; channel order: BGR; - # image range: [0, 1], float32. - gt_path = self.paths[index]['gt_path'] - img_bytes = self.file_client.get(gt_path, 'gt') - img_gt = imfrombytes(img_bytes, float32=True) - lq_path = self.paths[index]['lq_path'] - img_bytes = self.file_client.get(lq_path, 'lq') - img_lq = imfrombytes(img_bytes, float32=True) - - # augmentation for training - if self.opt['phase'] == 'train': - gt_size = self.opt['gt_size'] - # random crop - img_gt, img_lq = paired_random_crop(img_gt, img_lq, gt_size, scale, gt_path) - # flip, rotation - img_gt, img_lq = augment([img_gt, img_lq], self.opt['use_hflip'], self.opt['use_rot']) - - # BGR to RGB, HWC to CHW, numpy to tensor - img_gt, img_lq = img2tensor([img_gt, img_lq], bgr2rgb=True, float32=True) - # normalize - if self.mean is not None or self.std is not None: - normalize(img_lq, self.mean, self.std, inplace=True) - normalize(img_gt, self.mean, self.std, inplace=True) - - return {'lq': img_lq, 'gt': img_gt, 'lq_path': lq_path, 'gt_path': gt_path} - - def __len__(self): - return len(self.paths) diff --git a/spaces/maxmax20160403/sovits5.0/whisper/normalizers/basic.py b/spaces/maxmax20160403/sovits5.0/whisper/normalizers/basic.py deleted file mode 100644 index ef8d249bfca2cd10bccdd56950504581e2598560..0000000000000000000000000000000000000000 --- a/spaces/maxmax20160403/sovits5.0/whisper/normalizers/basic.py +++ /dev/null @@ -1,71 +0,0 @@ -import re -import unicodedata - -import regex - -# non-ASCII letters that are not separated by "NFKD" normalization -ADDITIONAL_DIACRITICS = { - "œ": "oe", - "Œ": "OE", - "ø": "o", - "Ø": "O", - "æ": "ae", - "Æ": "AE", - "ß": "ss", - "ẞ": "SS", - "đ": "d", - "Đ": "D", - "ð": "d", - "Ð": "D", - "þ": "th", - "Þ": "th", - "ł": "l", - "Ł": "L", -} - - -def remove_symbols_and_diacritics(s: str, keep=""): - """ - Replace any other markers, symbols, and punctuations with a space, - and drop any diacritics (category 'Mn' and some manual mappings) - """ - return "".join( - c - if c in keep - else ADDITIONAL_DIACRITICS[c] - if c in ADDITIONAL_DIACRITICS - else "" - if unicodedata.category(c) == "Mn" - else " " - if unicodedata.category(c)[0] in "MSP" - else c - for c in unicodedata.normalize("NFKD", s) - ) - - -def remove_symbols(s: str): - """ - Replace any other markers, symbols, punctuations with a space, keeping diacritics - """ - return "".join( - " " if unicodedata.category(c)[0] in "MSP" else c for c in unicodedata.normalize("NFKC", s) - ) - - -class BasicTextNormalizer: - def __init__(self, remove_diacritics: bool = False, split_letters: bool = False): - self.clean = remove_symbols_and_diacritics if remove_diacritics else remove_symbols - self.split_letters = split_letters - - def __call__(self, s: str): - s = s.lower() - s = re.sub(r"[<\[][^>\]]*[>\]]", "", s) # remove words between brackets - s = re.sub(r"\(([^)]+?)\)", "", s) # remove words between parenthesis - s = self.clean(s).lower() - - if self.split_letters: - s = " ".join(regex.findall(r"\X", s, regex.U)) - - s = re.sub(r"\s+", " ", s) # replace any successive whitespace characters with a space - - return s diff --git a/spaces/merve/data-leak/public/fill-in-the-blank/style.css b/spaces/merve/data-leak/public/fill-in-the-blank/style.css deleted file mode 100644 index 726984190483443c3da0905eae281514eccc7487..0000000000000000000000000000000000000000 --- a/spaces/merve/data-leak/public/fill-in-the-blank/style.css +++ /dev/null @@ -1,737 +0,0 @@ -@media (max-width: 1100px){ - body{ - /*overflow-x: hidden;*/ - } -} - - -.tooltip { - top: -1000px; - position: absolute; - padding: 10px; - background: rgba(255, 255, 255, .8); - border: 0px solid lightgray; - - width: 300px; - font-size: 14px; - line-height: 1.4em; - background: rgba(0, 0, 0, .8); - color: #fff; - pointer-events: all !important; -} -.tooltip a{ - color: #fff !important; -} -.tooltip:hover{ -/* opacity: 1; - pointer-events: all !important; -*/} - -.tooltip-hidden{ - opacity: 0; - transition: all .3s; - transition-delay: .2s; - pointer-events: none !important; -} - -@media (max-width: 590px){ - .footend{ - margin-left: 0px; - width: 10px; - } - - - div.tooltip{ - transition: all 0s !important; - transition-delay: 0s !important; - - display: none; - position: fixed; - bottom: -1px; - width: calc(100%); - left: -1px !important; - right: -1px !important; - top: auto !important; - width: auto !important; - } -} - -svg{ - overflow: visible; -} - -.domain{ - display: none; -} - -.tick{ - display: none; -} - -.bg-tick{ - stroke: #eee; -} - -text{ - pointer-events: none; - /*fill: #fff;*/ - text-shadow: 0 1px 0 #fff, 1px 0 0 #fff, 0 -1px 0 #fff, -1px 0 0 #fff; -} - -.pair{ - width: 820px; - /*height: 550px;*/ - margin: 0px auto; - margin-top: 25px !important -} - -.nurse-name-zari-cda{ - margin-bottom: 35px; -} - -.pair > div{ - display: inline-block; - vertical-align: top; -} - -.pair .graph{ - width: 500px; -} - -.pair .options{ - width: 250px; - padding-right: 20px; -} - -.pair .warning{ - width: 250px; - /*border: 1px solid orange;*/ - /*background: #fff9e4;*/ - /*padding: 10px;*/ - margin-top: 15px; - padding-left: 0px; - font-size: 14px; - line-height: 1.25em; - opacity: 0; - transition: all .2s; -} - -.pair .reset{ - width: 58px; - /*border: 1px solid orange;*/ - /*background: #fff9e4;*/ - /*padding: 10px;*/ - margin-top: 15px; - font-size: 14px; - line-height: 1.25em; - opacity: 0; - transition: opacity .2s; - cursor: pointer; - user-select: none; - outline: 1px solid #ccc; - padding: 5px; - -} -.pair .reset span{ - position: relative; - top: -1px; - padding-right: 4px; - padding-left: 1px; - /*font-size: ;*/ -} - -.pair .reset:hover{ - background: #eee; - color: #000; - outline: 1px solid #000; -} - -.options > *{ - margin-right: 10px; -} - -.options b{ - display: block; - margin-bottom: 5px; - margin-top: 10px; -} - - - - -.flex-row{ - width: 100%; - display: flex; - justify-content: space-between; - column-gap: 10px -} - -.flex-row > *{ - flex-grow: 1; - margin-right: 0px !important; -} - -.options > *{ - margin-right: 0px; -} - -.pair textarea{ - width: 100%; -} - -.flex-row-textarea{ - display: block; -} - -@media (max-width: 820px){ - .pair{ - width: 100%; - height: auto; - max-width: 500px; - margin: 0px auto; - } - - .flex-row{ - margin-bottom: -10px; - } - - .flex-row-textarea{ - display: flex; - margin-bottom: 10px; - } - - - .pair .options{ - width: auto; - padding-right: 0px; - } - - .warning{ - display: none !important; - } - - .reset{ - display: none !important; - } - - .pair .graph{ - width: 100%; - } - - .annotations{ - display: none; - } -} - - - -.pair.difference{ - width: 1000px; - margin-left: 0px; -} - -.pair.difference .pair-container{ -} - -.pair .options.wide{ - width: 100%; - margin-bottom: 20px; -} -.pair .options.wide > div{ - display: inline-block; -} - -.options.wide .option-type .button{ - width: 78px !important; -} - -.options.wide .option-model .button{ - width: 40px !important; -} - -.options.wide .update.button{ - width: 80px !important; -} - -textarea{ - font-family: 'Roboto', Helvetica, sans-serif; - font-weight: 300; - line-height: 1.55em; - font-size: 16px; - font-weight: bold; - border: 1px #ccc solid; - resize: none; -} - -.button.update{ - /*height: 20px;*/ - /*position: relative;*/ - /*top: -30px;*/ - /*margin-bottom: -10px;*/ - /*vertical-align: center;*/ - margin-top: 25px; - width: 252px; - text-align: center; - font-weight: 500; -} -.button{ - display: inline-block; - outline: 1px solid #ccc; - padding: 5px; - margin-top: 10px; - margin-right: 10px; - position: relative; - top: -12px; - cursor: pointer; - user-select: none; -} - -@media (hover: hover) and (pointer: fine) { - .button:hover{ - outline-color: #000; - } -} - -@media screen and (-webkit-min-device-pixel-ratio:0) and @media (max-width: 900px) { - select, - textarea, - input { - font-size: 16px !important; - } - - textarea{ - height: 80px !important; - } -} - - -.button.active{ - background: #eee; - color: #000; - /*font-weight: 500;*/ -} - - -.button.loading i{ - opacity: 1; -} - -.button.loading{ - pointer-events: none; - /*opacity: .6;*/ -} -.p-button{ - /*position: relative;*/ - /*top: -3px;*/ - /*line-height: 10px;*/ - /*line-height: */ - display: inline-block; - margin-right: 15px; -} -.p-button-link{ - text-decoration: underline; - cursor: pointer; - padding-right: 10px; -} -.interesting-pair-alts .p-button-link{ - display: block; - text-decoration: none; -} -.interesting-pair-alts .p-button-link div{ - padding-left: 10px; - padding-right: 10px; - padding-top: 5px; - padding-bottom: 5px; - outline: 1px solid #ccc; - margin-top: 5px; - margin-bottom: 5px; - margin-left: 10px; - -} -.difference-difference-alts .p-button-link:hover div{ - outline: 1px solid #000; -} - -.difference-difference-alts .p-button-link{ - display: block; - text-decoration: none; -} -.difference-difference-alts .p-button-link div{ - padding-left: 10px; - padding-right: 10px; - padding-top: 5px; - padding-bottom: 5px; - outline: 1px solid #ccc; - margin-top: 5px; - margin-bottom: 5px; - margin-left: 10px; - -} -.difference-difference-alts .p-button-link:hover div{ - outline: 1px solid #000; -} - - -.wide .flex-row{ - width: 220px; -} - -.wide > *{ - margin-right: 40px; -} - -.wide textarea{ - position: relative; - top: 12px; -} - - -@media (max-width: 1100px){ - .pair-container-overflow{ - overflow-x: scroll; - width: 100% !important; - } - - .pair.difference{ - width: auto; - max-width: 2000px; - } - - .pair.difference .options{ - margin: 0px auto; - margin-left: max(50vh - 500px, 0px); - width: min(500px, 100%); - } - -} - -.pair-container{ - width: 1000px; -} - - - - - -.checkbox{ - display: inline-block; - position: relative; - top: -10px; - margin-left: 10px; - -} - -circle:hover{ - stroke: blue; -} - - - -.hover text{ - fill: #000; - font-weight: 300; - /*stroke-width: 2px;*/ - /*text-shadow: 0 2px 0 #000, 2px 0 0 #000, 0 -2px 0 #000, -2px 0 0 #000;*/ -} - -#graph > div{ - display: inline-block; -} - -text.tiny{ - font-size: 9px; - font-family: monospace; - /*fill: #555;*/ -} - - - - - -svg{ - overflow: visible; -} - - -input{ - font-family: monospace; - width: 900px; - overflow: hidden; - background-color: rgba(0,0,0,0); - border: 0px; -} - -textarea{ - font-family: monospace; - font-size: 14px; -} - -/* Hide scrollbar for Chrome, Safari and Opera */ -.top-sents::-webkit-scrollbar { - /*display: none;*/ -} - -/* Hide scrollbar for IE, Edge and Firefox */ -.top-sents { - -ms-overflow-style: none; /* IE and Edge */ - scrollbar-width: none; /* Firefox */ -} - -.sent{ - margin-top: -15px; -} - - - -.post-summary{ - display: none; -} - - -.token-container{ - text-align: center; - line-height: 2em; -} - -.token{ - display: inline-block; - padding: 5px; - margin: 10px; - margin-top: 0px; - margin-bottom: 0px; - font-size: 20px; - font-family: monospace; - outline: 1px solid #ccc; - color: #000; - cursor: pointer; - background: #fff; - border: 0px; -} - -.token:hover, .token.active{ - outline: 1px solid #000; -} - - -.xy-only, .rotate-only{ - opacity: 0; - transition: all .2s; -} - -.annotations{ - transition: opacity .2s; -} - -.is-xy .xy-only{ - opacity: 1 !important; -} -.is-rotate .rotate-only{ - opacity: 1 !important; -} - -.hamlet{ - min-height: 304px; - margin-bottom: 20px; -} - -.hamlet-edit .button{ - color: #ccc; - pointer-events: none; -} -.hamlet-edit.changed .button{ - color: #000; - pointer-events: all; -} - -@media (max-width: 500px){ - .hamlet-edit .button{ - display: block; - text-align: center; - top: 0px !important; - margin: 0px auto !important; - margin-top: 5px !important; - width: 100%; - } -} - - - -.pair .update{ - color: #ccc; - pointer-events: none; -} -.pair.changed .update{ - color: #000; - pointer-events: all; -} - - - - -.difference-difference-list{ - display: none; -} - -.pair-container{ - width: 900px; -} -.pair-container > div{ - display: inline-block; -} - - -.difference-difference textarea{ - height: 52px; -} - -.not-is-color-by .y-axis-label text, .not-is-color-by .sent-1 text, .not-is-color-by .x-axis-label{ - fill: #444 !important; -} - -.is-color-by .y-axis-label text, .is-color-by .sent-1 text, .is-color-by .x-axis-label{ - font-weight: 400; - /*text-decoration: underline;*/ -} - - - -.time-token.active path{ - stroke: #f0f; - opacity: 1; -} -.time-token.active text{ - fill: #f0f !important; - opacity: 1 !important; - font-size: 14px; -} - - -.token{ - -} - -.gender-over-time{ - width: 1100px; - margin: 0px auto; - font-size: 14px; - margin-left: -91px; -} - -.gender-over-time .tick{ - display: block; -} - -.gender-over-time .axis{ - opacity: .7; -} - -.gender-over-time .sentence{ - /*position: relative;*/ - width: 32%; -} - -.gender-over-time .sentence .sentence-title{ - right: 42px; - position: relative; - text-align: right; - font-family: monospace; - -} -.gender-over-time .sentence.is-bear .sentence-title{ - /*text-align: center;*/ - right: 115px; -} - -.gender-over-time .g-caption{ - line-height: 18px; - margin-bottom: 30px; - margin-top: 5px; - width: 290px; - font-size: 13px; - left: 365px; - position: relative; -} - -@media (max-width: 1100px){ - .gender-over-time{ - width: 100%; - margin-left: 0px; - max-width: 500px; - margin: 0px auto; - } - - .gender-over-time .sentence{ - width: 100% !important; - margin-bottom: 20px; - } - - .gender-over-time .g-caption{ - left: 0px; - width: 100%; - } -} - -.time-token text{ - font-family: monospace; - pointer-events: all !important; - cursor: default; -} - - - -img[src*="img/wiki-years.png"] { - width: 300px; -} - - -#more-explorables{ - margin-top: 100px; -} - - - - -/*html{ - font-smooth: never; - -webkit-font-smoothing: none; - background: transparent; -} - -path{ - display: none; -}*/ - - -button { - display: inline-block; - border: none; - margin: 0; - text-decoration: none; - background: #fff; - color: #ffffff; - font-size: 1em; - cursor: pointer; - text-align: center; - -webkit-appearance: none; - -moz-appearance: none; - font-family : inherit; - -} - -button:active { - transform: scale(0.99); -} - - -info{ - font-weight: 300; - font-size: 12px; - line-height: 0em; - position: relative; - left: 7px; - top: -1px; - cursor: default; -} -info:hover{ - font-weight: 600; -} \ No newline at end of file diff --git a/spaces/merve/hidden-bias/source/anonymization/style-graph-scroll.css b/spaces/merve/hidden-bias/source/anonymization/style-graph-scroll.css deleted file mode 100644 index 7680e8c43222b6993d2bedfe43a682236680541e..0000000000000000000000000000000000000000 --- a/spaces/merve/hidden-bias/source/anonymization/style-graph-scroll.css +++ /dev/null @@ -1,160 +0,0 @@ -/** { border: 1px solid #f00; }*/ - - -#container{ - position: relative; - width: auto; - margin-left: -25px; - /*margin-bottom: 100px;*/ -} - -#sections{ - width: 330px; - pointer-events: none; -} - -#sections > div{ - background: white; - opacity: .2; - margin-bottom: 400px; - line-height: 1.4em; - transition: opacity .2s; - pointer-events: all; -} -#sections > div:last-child{ - height: 480px; - margin-bottom: 0px; -} -#sections > div.graph-scroll-active{ - opacity: 1; -} - -#graph{ - margin-left: 40px; - width: 500px; - position: -webkit-sticky; - position: sticky; - top: 0px; - float: right; - height: 580px; -} - -.slider-outer { - display: block; - max-width: 300px; -} - -@media (max-width: 925px) { - #container{ - margin-left: 0px; - } - - #graph{ - width: 100%; - float: none; - max-width: 500px; - margin: 0px auto; - } - - #graph > div{ - position: relative; - left:12px; - } - - #sections{ - width: auto; - position: relative; - margin: 0px auto; - } - - #sections > div{ - background: rgba(255,255,255,.8); - padding: 10px; - border-top: 1px solid; - border-bottom: 1px solid; - margin-bottom: 80vh; - width: calc(100vw - 20px); - margin-left: -5px; - } - - #sections > div > *{ - max-width: 750px; - } - - #sections > div:first-child{ - opacity: 1; - margin-top: -260px; - } - - #sections > div:last-child{ - height: auto; - } - - #sections h3{ - margin-top: .5em; - } - - /* Adjust buttons for mobile. */ - - .button-container{ - text-align: center; - left:0px; - } - - /* Adjust sliders for mobile. */ - input[type="range" i] { - width: 280px; - } - .slider-label-container{ - width: 145px; - /* display: inline-block; */ - } - - .slide-container-heads-prob, .slide-container-population { - text-align: center; - } - - .slider-container { - margin-bottom: 5px; - text-align: center; - width: 300px; - /* display:inline-block; */ - } - - .slider-outer { - text-align: center; - display: flex; - max-width: 300px; - } - - .headsProb, .population { - margin-left: 15px; - } - - .slide-container-population { - margin-bottom: -10px; - } - - .pointer div { - left: 10px; - top: 37px; - } - - /* Adjust post summary test for mobile. */ - .post-summary{ - margin-left: 8px; - margin-bottom: 60px; - margin-top: 40px; - } - -} - -#graph > div{ - margin: 20 35px; -} - - -#end{ - height: 15vh; -} - diff --git a/spaces/merve/uncertainty-calibration/source/third_party/regl.min.js b/spaces/merve/uncertainty-calibration/source/third_party/regl.min.js deleted file mode 100644 index 7ecf11321eda67a76e019d6881f42b52f3d39c78..0000000000000000000000000000000000000000 --- a/spaces/merve/uncertainty-calibration/source/third_party/regl.min.js +++ /dev/null @@ -1,171 +0,0 @@ -(function(Z,ka){"object"===typeof exports&&"undefined"!==typeof module?module.exports=ka():"function"===typeof define&&define.amd?define(ka):Z.createREGL=ka()})(this,function(){function Z(a,b){this.id=Db++;this.type=a;this.data=b}function ka(a){if(0===a.length)return[];var b=a.charAt(0),c=a.charAt(a.length-1);if(1>>=b;c=(255>>=c;b|=c;c=(15>>=c;b|=c;c=(3>>c>>1}function hb(){function a(a){a:{for(var b=16;268435456>=b;b*=16)if(a<=b){a=b;break a}a=0}b=c[gb(a)>>2];return 0>2].push(a)}var c=R(8,function(){return[]});return{alloc:a,free:b,allocType:function(b,c){var d=null;switch(b){case 5120:d=new Int8Array(a(c),0,c);break;case 5121:d=new Uint8Array(a(c),0,c);break;case 5122:d=new Int16Array(a(2*c),0,c);break;case 5123:d=new Uint16Array(a(2*c),0,c);break;case 5124:d=new Int32Array(a(4*c),0,c);break;case 5125:d=new Uint32Array(a(4*c),0,c);break;case 5126:d=new Float32Array(a(4*c),0,c);break;default:return null}return d.length!== -c?d.subarray(0,c):d},freeType:function(a){b(a.buffer)}}}function la(a){return!!a&&"object"===typeof a&&Array.isArray(a.shape)&&Array.isArray(a.stride)&&"number"===typeof a.offset&&a.shape.length===a.stride.length&&(Array.isArray(a.data)||O(a.data))}function ib(a,b,c,e,f,d){for(var q=0;qe&&(e=d.buffer.byteLength,5123===k?e>>=1:5125===k&&(e>>=2));d.vertCount=e;e=g;0>g&&(e=4,g=d.buffer.dimension,1===g&&(e=0),2===g&&(e=1),3===g&&(e=4));d.primType=e}function q(a){e.elementsCount--;delete n[a.id];a.buffer.destroy();a.buffer=null}var n={},v=0,k={uint8:5121,uint16:5123};b.oes_element_index_uint&&(k.uint32=5125);f.prototype.bind=function(){this.buffer.bind()};var u=[];return{create:function(a, -b){function l(a){if(a)if("number"===typeof a)g(a),h.primType=4,h.vertCount=a|0,h.type=5121;else{var b=null,c=35044,e=-1,f=-1,m=0,n=0;if(Array.isArray(a)||O(a)||la(a))b=a;else if("data"in a&&(b=a.data),"usage"in a&&(c=nb[a.usage]),"primitive"in a&&(e=Ka[a.primitive]),"count"in a&&(f=a.count|0),"type"in a&&(n=k[a.type]),"length"in a)m=a.length|0;else if(m=f,5123===n||5122===n)m*=2;else if(5125===n||5124===n)m*=4;d(h,b,c,e,f,m,n)}else g(),h.primType=4,h.vertCount=0,h.type=5121;return l}var g=c.create(null, -34963,!0),h=new f(g._buffer);e.elementsCount++;l(a);l._reglType="elements";l._elements=h;l.subdata=function(a,b){g.subdata(a,b);return l};l.destroy=function(){q(h)};return l},createStream:function(a){var b=u.pop();b||(b=new f(c.create(null,34963,!0,!1)._buffer));d(b,a,35040,-1,-1,0,0);return b},destroyStream:function(a){u.push(a)},getElements:function(a){return"function"===typeof a&&a._elements instanceof f?a._elements:null},clear:function(){I(n).forEach(q)}}}function ob(a){for(var b=G.allocType(5123, -a.length),c=0;c>>31<<15,d=(e<<1>>>24)-127,e=e>>13&1023;b[c]=-24>d?f:-14>d?f+(e+1024>>-14-d):15>=e,c.height>>=e,x(c,d[e]),a.mipmask|=1<b;++b)a.images[b]=null;return a}function ya(a){for(var b=a.images,c=0;cb){for(var c=0;c=--this.refCount&&F(this)}});q.profile&&(d.getTotalTextureSize=function(){var a=0;Object.keys(ea).forEach(function(b){a+=ea[b].stats.size});return a});return{create2D:function(b,c){function e(a,b){var c=f.texInfo;w.call(c);var d=ma();"number"===typeof a?"number"===typeof b?p(d,a|0,b|0):p(d,a|0,a|0):a?(H(c,a),P(d,a)):p(d,1,1);c.genMipmaps&&(d.mipmask=(d.width<<1)-1);f.mipmask=d.mipmask;v(f, -d);f.internalformat=d.internalformat;e.width=d.width;e.height=d.height;T(f);t(d,3553);M(c,3553);wa();ya(d);q.profile&&(f.stats.size=La(f.internalformat,f.type,d.width,d.height,c.genMipmaps,!1));e.format=ca[f.internalformat];e.type=K[f.type];e.mag=Fa[c.magFilter];e.min=pa[c.minFilter];e.wrapS=qa[c.wrapS];e.wrapT=qa[c.wrapT];return e}var f=new y(3553);ea[f.id]=f;d.textureCount++;e(b,c);e.subimage=function(a,b,c,d){b|=0;c|=0;d|=0;var y=g();v(y,f);y.width=0;y.height=0;x(y,a);y.width=y.width||(f.width>> -d)-b;y.height=y.height||(f.height>>d)-c;T(f);l(y,3553,b,c,d);wa();h(y);return e};e.resize=function(b,c){var d=b|0,g=c|0||d;if(d===f.width&&g===f.height)return e;e.width=f.width=d;e.height=f.height=g;T(f);for(var y=0;f.mipmask>>y;++y){var h=d>>y,z=g>>y;if(!h||!z)break;a.texImage2D(3553,y,f.format,h,z,0,f.format,f.type,null)}wa();q.profile&&(f.stats.size=La(f.internalformat,f.type,d,g,!1,!1));return e};e._reglType="texture2d";e._texture=f;q.profile&&(e.stats=f.stats);e.destroy=function(){f.decRef()}; -return e},createCube:function(b,c,e,f,n,r){function m(a,b,c,d,e,f){var g,da=A.texInfo;w.call(da);for(g=0;6>g;++g)F[g]=ma();if("number"===typeof a||!a)for(a=a|0||1,g=0;6>g;++g)p(F[g],a,a);else if("object"===typeof a)if(b)P(F[0],a),P(F[1],b),P(F[2],c),P(F[3],d),P(F[4],e),P(F[5],f);else if(H(da,a),k(A,a),"faces"in a)for(a=a.faces,g=0;6>g;++g)v(F[g],A),P(F[g],a[g]);else for(g=0;6>g;++g)P(F[g],a);v(A,F[0]);A.mipmask=da.genMipmaps?(F[0].width<<1)-1:F[0].mipmask;A.internalformat=F[0].internalformat;m.width= -F[0].width;m.height=F[0].height;T(A);for(g=0;6>g;++g)t(F[g],34069+g);M(da,34067);wa();q.profile&&(A.stats.size=La(A.internalformat,A.type,m.width,m.height,da.genMipmaps,!0));m.format=ca[A.internalformat];m.type=K[A.type];m.mag=Fa[da.magFilter];m.min=pa[da.minFilter];m.wrapS=qa[da.wrapS];m.wrapT=qa[da.wrapT];for(g=0;6>g;++g)ya(F[g]);return m}var A=new y(34067);ea[A.id]=A;d.cubeCount++;var F=Array(6);m(b,c,e,f,n,r);m.subimage=function(a,b,c,d,e){c|=0;d|=0;e|=0;var f=g();v(f,A);f.width=0;f.height=0; -x(f,b);f.width=f.width||(A.width>>e)-c;f.height=f.height||(A.height>>e)-d;T(A);l(f,34069+a,c,d,e);wa();h(f);return m};m.resize=function(b){b|=0;if(b!==A.width){m.width=A.width=b;m.height=A.height=b;T(A);for(var c=0;6>c;++c)for(var d=0;A.mipmask>>d;++d)a.texImage2D(34069+c,d,A.format,b>>d,b>>d,0,A.format,A.type,null);wa();q.profile&&(A.stats.size=La(A.internalformat,A.type,m.width,m.height,!1,!0));return m}};m._reglType="textureCube";m._texture=A;q.profile&&(m.stats=A.stats);m.destroy=function(){A.decRef()}; -return m},clear:function(){for(var b=0;bc;++c)if(0!==(b.mipmask&1<>c,b.height>>c,0,b.internalformat, -b.type,null);else for(var d=0;6>d;++d)a.texImage2D(34069+d,c,b.internalformat,b.width>>c,b.height>>c,0,b.internalformat,b.type,null);M(b.texInfo,b.target)})},refresh:function(){for(var b=0;bd;++d){for(p= -0;pa;++a)c[a].resize(d);b.width=b.height=d;return b},_reglType:"framebufferCube",destroy:function(){c.forEach(function(a){a.destroy()})}})},clear:function(){I(M).forEach(r)}, -restore:function(){t.cur=null;t.next=null;t.dirty=!0;I(M).forEach(function(b){b.framebuffer=a.createFramebuffer();p(b)})}})}function $a(){this.w=this.z=this.y=this.x=this.state=0;this.buffer=null;this.size=0;this.normalized=!1;this.type=5126;this.divisor=this.stride=this.offset=0}function Sb(a,b,c,e,f,d,q){function n(a){if(a!==r.currentVAO){var c=b.oes_vertex_array_object;a?c.bindVertexArrayOES(a.vao):c.bindVertexArrayOES(null);r.currentVAO=a}}function v(c){if(c!==r.currentVAO){if(c)c.bindAttrs(); -else{for(var d=b.angle_instanced_arrays,e=0;e=m.byteLength?l.subdata(m): -(l.destroy(),c.buffers[h]=null));c.buffers[h]||(l=c.buffers[h]=f.create(p,34962,!1,!0));k.buffer=f.getBuffer(l);k.size=k.buffer.dimension|0;k.normalized=!1;k.type=k.buffer.dtype;k.offset=0;k.stride=0;k.divisor=0;k.state=1;a[h]=1}else f.getBuffer(p)?(k.buffer=f.getBuffer(p),k.size=k.buffer.dimension|0,k.normalized=!1,k.type=k.buffer.dtype,k.offset=0,k.stride=0,k.divisor=0,k.state=1):f.getBuffer(p.buffer)?(k.buffer=f.getBuffer(p.buffer),k.size=(+p.size||k.buffer.dimension)|0,k.normalized=!!p.normalized|| -!1,k.type="type"in p?Ja[p.type]:k.buffer.dtype,k.offset=(p.offset||0)|0,k.stride=(p.stride||0)|0,k.divisor=(p.divisor||0)|0,k.state=1):"x"in p&&(k.x=+p.x||0,k.y=+p.y||0,k.z=+p.z||0,k.w=+p.w||0,k.state=2)}for(l=0;la&&(a=b.stats.uniformsCount)});return a},c.getMaxAttributesCount=function(){var a=0;x.forEach(function(b){b.stats.attributesCount>a&&(a=b.stats.attributesCount)});return a});return{clear:function(){var b=a.deleteShader.bind(a);I(k).forEach(b);k={};I(u).forEach(b); -u={};x.forEach(function(b){a.deleteProgram(b.program)});x.length=0;m={};c.shaderCount=0},program:function(b,d,e,f){var l=m[d];l||(l=m[d]={});var q=l[b];if(q&&(q.refCount++,!f))return q;var w=new n(d,b);c.shaderCount++;v(w,e,f);q||(l[b]=w);x.push(w);return L(w,{destroy:function(){w.refCount--;if(0>=w.refCount){a.deleteProgram(w.program);var b=x.indexOf(w);x.splice(b,1);c.shaderCount--}0>=l[w.vertId].refCount&&(a.deleteShader(u[w.vertId]),delete u[w.vertId],delete m[w.fragId][w.vertId]);Object.keys(m[w.fragId]).length|| -(a.deleteShader(k[w.fragId]),delete k[w.fragId],delete m[w.fragId])}})},restore:function(){k={};u={};for(var a=0;a"+b+"?"+e+".constant["+b+"]:0;"}).join(""),"}}else{","if(",g,"(",e,".buffer)){",k,"=",f,".createStream(",34962,",",e,".buffer);","}else{",k,"=",f,".getBuffer(",e,".buffer);","}",m,'="type" in ',e,"?",z.glTypes,"[",e,".type]:",k,".dtype;",B.normalized,"=!!", -e,".normalized;");d("size");d("offset");d("stride");d("divisor");c("}}");c.exit("if(",B.isStream,"){",f,".destroyStream(",k,");","}");return B})});return g}function F(a){var b=a["static"],c=a.dynamic,d={};Object.keys(b).forEach(function(a){var c=b[a];d[a]=w(function(a,b){return"number"===typeof c||"boolean"===typeof c?""+c:a.link(c)})});Object.keys(c).forEach(function(a){var b=c[a];d[a]=K(b,function(a,c){return a.invoke(c,b)})});return d}function A(a,b,d,e,f){function g(a){var b=p[a];b&&(ja[a]=b)} -var m=O(a,b),l=G(a,f),p=C(a,l,f),X=M(a,f),ja=y(a,f),q=H(a,f,m);g("viewport");g(h("scissor.box"));var n=0>1)",u],");")}function b(){c(t,".drawArraysInstancedANGLE(",[n,q,r,u],");")}p&&"null"!==p?v?a():(c("if(",p,"){"),a(),c("}else{"),b(),c("}")):b()}function g(){function a(){c(l+".drawElements("+[n,r,x,q+"<<(("+x+"-5121)>>1)"]+");")}function b(){c(l+".drawArrays("+[n,q,r]+");")}p&&"null"!==p?v?a():(c("if(",p,"){"),a(),c("}else{"),b(),c("}")):b()}var h=a.shared,l=h.gl,k=h.draw,m=d.draw, -p=function(){var e=m.elements,f=b;if(e){if(e.contextDep&&d.contextDynamic||e.propDep)f=c;e=e.append(a,f);m.elementsActive&&f("if("+e+")"+l+".bindBuffer(34963,"+e+".buffer.buffer);")}else e=f.def(),f(e,"=",k,".","elements",";","if(",e,"){",l,".bindBuffer(",34963,",",e,".buffer.buffer);}","else if(",h.vao,".currentVAO){",e,"=",a.shared.elements+".getElements("+h.vao,".currentVAO.elements);",na?"":"if("+e+")"+l+".bindBuffer(34963,"+e+".buffer.buffer);","}");return e}(),n=e("primitive"),q=e("offset"), -r=function(){var e=m.count,f=b;if(e){if(e.contextDep&&d.contextDynamic||e.propDep)f=c;e=e.append(a,f)}else e=f.def(k,".","count");return e}();if("number"===typeof r){if(0===r)return}else c("if(",r,"){"),c.exit("}");var u,t;W&&(u=e("instances"),t=a.instancing);var x=p+".type",v=m.elements&&xa(m.elements)&&!m.vaoActive;W&&("number"!==typeof u||0<=u)?"string"===typeof u?(c("if(",u,">0){"),f(),c("}else if(",u,"<0){"),g(),c("}")):f():g()}function ca(a,b,c,d,e){b=P();e=b.proc("body",e);W&&(b.instancing= -e.def(b.shared.extensions,".angle_instanced_arrays"));a(b,e,c,d);return b.compile().body}function Z(a,b,c,d){N(a,b);c.useVAO?c.drawVAO?b(a.shared.vao,".setVAO(",c.drawVAO.append(a,b),");"):b(a.shared.vao,".setVAO(",a.shared.vao,".targetVAO);"):(b(a.shared.vao,".setVAO(null);"),ga(a,b,c,d.attributes,function(){return!0}));Q(a,b,c,d.uniforms,function(){return!0},!1);U(a,b,b,c)}function Fa(a,b){var c=a.proc("draw",1);N(a,c);ia(a,c,b.context);S(a,c,b.framebuffer);Aa(a,c,b);I(a,c,b.state);E(a,c,b,!1,!0); -var d=b.shader.progVar.append(a,c);c(a.shared.gl,".useProgram(",d,".program);");if(b.shader.program)Z(a,c,b,b.shader.program);else{c(a.shared.vao,".setVAO(null);");var e=a.global.def("{}"),f=c.def(d,".id"),g=c.def(e,"[",f,"]");c(a.cond(g).then(g,".call(this,a0);")["else"](g,"=",e,"[",f,"]=",a.link(function(c){return ca(Z,a,b,c,1)}),"(",d,");",g,".call(this,a0);"))}0=--this.refCount&&q(this)};f.profile&&(e.getTotalRenderbufferSize=function(){var a=0;Object.keys(u).forEach(function(b){a+=u[b].stats.size});return a});return{create:function(b, -c){function l(b,c){var d=0,e=0,k=32854;"object"===typeof b&&b?("shape"in b?(e=b.shape,d=e[0]|0,e=e[1]|0):("radius"in b&&(d=e=b.radius|0),"width"in b&&(d=b.width|0),"height"in b&&(e=b.height|0)),"format"in b&&(k=n[b.format])):"number"===typeof b?(d=b|0,e="number"===typeof c?c|0:d):b||(d=e=1);if(d!==g.width||e!==g.height||k!==g.format)return l.width=g.width=d,l.height=g.height=e,g.format=k,a.bindRenderbuffer(36161,g.renderbuffer),a.renderbufferStorage(36161,k,d,e),f.profile&&(g.stats.size=Q[g.format]* -g.width*g.height),l.format=v[g.format],l}var g=new d(a.createRenderbuffer());u[g.id]=g;e.renderbufferCount++;l(b,c);l.resize=function(b,c){var d=b|0,e=c|0||d;if(d===g.width&&e===g.height)return l;l.width=g.width=d;l.height=g.height=e;a.bindRenderbuffer(36161,g.renderbuffer);a.renderbufferStorage(36161,g.format,d,e);f.profile&&(g.stats.size=Q[g.format]*g.width*g.height);return l};l._reglType="renderbuffer";l._renderbuffer=g;f.profile&&(l.stats=g.stats);l.destroy=function(){g.decRef()};return l},clear:function(){I(u).forEach(q)}, -restore:function(){I(u).forEach(function(b){b.renderbuffer=a.createRenderbuffer();a.bindRenderbuffer(36161,b.renderbuffer);a.renderbufferStorage(36161,b.format,b.width,b.height)});a.bindRenderbuffer(36161,null)}}},Za=[];Za[6408]=4;Za[6407]=3;var Ra=[];Ra[5121]=1;Ra[5126]=4;Ra[36193]=2;var Da=["x","y","z","w"],Xb="blend.func blend.equation stencil.func stencil.opFront stencil.opBack sample.coverage viewport scissor.box polygonOffset.offset".split(" "),Ga={0:0,1:1,zero:0,one:1,"src color":768,"one minus src color":769, -"src alpha":770,"one minus src alpha":771,"dst color":774,"one minus dst color":775,"dst alpha":772,"one minus dst alpha":773,"constant color":32769,"one minus constant color":32770,"constant alpha":32771,"one minus constant alpha":32772,"src alpha saturate":776},ab={never:512,less:513,"<":513,equal:514,"=":514,"==":514,"===":514,lequal:515,"<=":515,greater:516,">":516,notequal:517,"!=":517,"!==":517,gequal:518,">=":518,always:519},Ta={0:0,zero:0,keep:7680,replace:7681,increment:7682,decrement:7683, -"increment wrap":34055,"decrement wrap":34056,invert:5386},zb={cw:2304,ccw:2305},Ab=new J(!1,!1,!1,function(){}),$b=function(a,b){function c(){this.endQueryIndex=this.startQueryIndex=-1;this.sum=0;this.stats=null}function e(a,b,d){var e=q.pop()||new c;e.startQueryIndex=a;e.endQueryIndex=b;e.sum=0;e.stats=d;n.push(e)}if(!b.ext_disjoint_timer_query)return null;var f=[],d=[],q=[],n=[],v=[],k=[];return{beginQuery:function(a){var c=f.pop()||b.ext_disjoint_timer_query.createQueryEXT();b.ext_disjoint_timer_query.beginQueryEXT(35007, -c);d.push(c);e(d.length-1,d.length,a)},endQuery:function(){b.ext_disjoint_timer_query.endQueryEXT(35007)},pushScopeStats:e,update:function(){var a,c;a=d.length;if(0!==a){k.length=Math.max(k.length,a+1);v.length=Math.max(v.length,a+1);v[0]=0;var e=k[0]=0;for(c=a=0;c=E.length&&e()}var c=Bb(E,a);E[c]=b}}}function k(){var a=Q.viewport,b=Q.scissor_box;a[0]=a[1]=b[0]=b[1]=0;H.viewportWidth=H.framebufferWidth=H.drawingBufferWidth=a[2]=b[2]=l.drawingBufferWidth;H.viewportHeight=H.framebufferHeight=H.drawingBufferHeight=a[3]=b[3]=l.drawingBufferHeight}function u(){H.tick+=1;H.time=x();k();I.procs.poll()}function m(){A.refresh();k();I.procs.refresh();t&&t.update()}function x(){return(Cb()- -G)/1E3}a=Hb(a);if(!a)return null;var l=a.gl,g=l.getContextAttributes();l.isContextLost();var h=Ib(l,a);if(!h)return null;var r=Eb(),p={vaoCount:0,bufferCount:0,elementsCount:0,framebufferCount:0,shaderCount:0,textureCount:0,cubeCount:0,renderbufferCount:0,maxTextureUnits:0},w=h.extensions,t=$b(l,w),G=Cb(),C=l.drawingBufferWidth,J=l.drawingBufferHeight,H={tick:0,time:0,viewportWidth:C,viewportHeight:J,framebufferWidth:C,framebufferHeight:J,drawingBufferWidth:C,drawingBufferHeight:J,pixelRatio:a.pixelRatio}, -C={elements:null,primitive:4,count:-1,offset:0,instances:-1},M=Yb(l,w),y=Jb(l,p,a,function(a){return K.destroyBuffer(a)}),T=Kb(l,w,y,p),K=Sb(l,w,M,p,y,T,C),F=Tb(l,r,p,a),A=Nb(l,w,M,function(){I.procs.poll()},H,p,a),O=Zb(l,w,M,p,a),S=Rb(l,w,M,A,O,p),I=Wb(l,r,w,M,y,T,A,S,{},K,F,C,H,t,a),r=Ub(l,S,I.procs.poll,H,g,w,M),Q=I.next,N=l.canvas,E=[],R=[],U=[],Z=[a.onDestroy],ca=null;N&&(N.addEventListener("webglcontextlost",f,!1),N.addEventListener("webglcontextrestored",d,!1));var aa=S.setFBO=q({framebuffer:Y.define.call(null, -1,"framebuffer")});m();g=L(q,{clear:function(a){if("framebuffer"in a)if(a.framebuffer&&"framebufferCube"===a.framebuffer_reglType)for(var b=0;6>b;++b)aa(L({framebuffer:a.framebuffer.faces[b]},a),n);else aa(a,n);else n(null,a)},prop:Y.define.bind(null,1),context:Y.define.bind(null,2),"this":Y.define.bind(null,3),draw:q({}),buffer:function(a){return y.create(a,34962,!1,!1)},elements:function(a){return T.create(a,!1)},texture:A.create2D,cube:A.createCube,renderbuffer:O.create,framebuffer:S.create,framebufferCube:S.createCube, -vao:K.createVAO,attributes:g,frame:v,on:function(a,b){var c;switch(a){case "frame":return v(b);case "lost":c=R;break;case "restore":c=U;break;case "destroy":c=Z}c.push(b);return{cancel:function(){for(var a=0;a, - VariantProps {} - -function Badge({ className, variant, ...props }: BadgeProps) { - return ( -
          - ) -} - -export { Badge, badgeVariants } diff --git a/spaces/mrrandom123/Book_recommendation/setup.py b/spaces/mrrandom123/Book_recommendation/setup.py deleted file mode 100644 index cd0347e9982fe9576cc10bff73a3bfc9f2e12d25..0000000000000000000000000000000000000000 --- a/spaces/mrrandom123/Book_recommendation/setup.py +++ /dev/null @@ -1,26 +0,0 @@ -from setuptools import setup - -with open("README.md", "r", encoding="utf-8") as f: - long_description = f.read() - -## edit below variables as per your requirements - -REPO_NAME = "Books-Recommender-System-Using-Machine-Learning" -AUTHOR_USER_NAME = "entbappy" -SRC_REPO = "src" -LIST_OF_REQUIREMENTS = ['streamlit', 'numpy'] - - -setup( - name=SRC_REPO, - version="0.0.1", - author=AUTHOR_USER_NAME, - description="A small package for Movie Recommender System", - long_description=long_description, - long_description_content_type="text/markdown", - url=f"https://github.com/{AUTHOR_USER_NAME}/{REPO_NAME}", - author_email="entbappy73@gmail.com", - packages=[SRC_REPO], - license="MIT", - python_requires=">=3.7", - install_requires=LIST_OF_REQUIREMENTS -) \ No newline at end of file diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_data_from_w2v.py b/spaces/mshukor/UnIVAL/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_data_from_w2v.py deleted file mode 100644 index 66954ea5c9f3f3330e3230860229c7c4046a5d6a..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_data_from_w2v.py +++ /dev/null @@ -1,56 +0,0 @@ -import kaldi_io -import numpy as np -import os - - -def get_parser(): - import argparse - parser = argparse.ArgumentParser() - parser.add_argument("w2v_dir", help="wav2vec feature and text directory") - parser.add_argument("tar_root", help="output data directory in kaldi's format") - parser.add_argument("split", help="name of the subset") - parser.add_argument("--label", default="", help="if specified, copy labels too") - return parser - -def main(): - parser = get_parser() - args = parser.parse_args() - - tar_dir = os.path.join(args.tar_root, args.split) - os.makedirs(tar_dir, exist_ok=True) - - lengths_path = os.path.join(args.w2v_dir, f"{args.split}.lengths") - with open(lengths_path) as f: - lengths = [int(line.rstrip()) for line in f] - offsets = [0] + np.cumsum(lengths[:-1]).tolist() - feats = np.load( - os.path.join(args.w2v_dir, f"{args.split}.npy"), - mmap_mode="r" - ) - assert feats.shape[0] == sum(lengths), \ - f"lengths mismatch {feats.shape[0]} != {sum(lengths)}" - - ark_path = os.path.join(tar_dir, "feats.ark") - scp_path = os.path.join(tar_dir, "feats.scp") - wspec = f"ark:| copy-feats --compress=true ark:- ark,scp:{ark_path},{scp_path}" - with kaldi_io.open_or_fd(wspec, "wb") as f: - for idx, (offset, length) in enumerate(zip(offsets, lengths)): - feat = feats[offset:offset+length] - kaldi_io.write_mat(f, feat, key=f"utt{idx:010d}") - - u2s_path = os.path.join(tar_dir, "utt2spk") - s2u_path = os.path.join(tar_dir, "spk2utt") - with open(u2s_path, "w") as f_u2s, open(s2u_path, "w") as f_s2u: - for idx in range(len(lengths)): - f_u2s.write(f"utt{idx:010d} utt{idx:010d}\n") - f_s2u.write(f"utt{idx:010d} utt{idx:010d}\n") - - if bool(args.label): - lab_path = os.path.join(args.w2v_dir, f"{args.split}.{args.label}") - txt_path = os.path.join(tar_dir, "text") - with open(lab_path) as f_lab, open(txt_path, "w") as f_txt: - for idx, line in enumerate(f_lab): - f_txt.write(f"utt{idx:010d} {line}") - -if __name__ == "__main__": - main() diff --git a/spaces/mshukor/UnIVAL/fairseq/tests/test_valid_subset_checks.py b/spaces/mshukor/UnIVAL/fairseq/tests/test_valid_subset_checks.py deleted file mode 100644 index 3e9191bda66fccfebba34920f88bf7b1efea5f7e..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/tests/test_valid_subset_checks.py +++ /dev/null @@ -1,138 +0,0 @@ -import os -import shutil -import tempfile -import unittest - -from fairseq import options -from fairseq.dataclass.utils import convert_namespace_to_omegaconf -from fairseq.data.data_utils import raise_if_valid_subsets_unintentionally_ignored -from .utils import create_dummy_data, preprocess_lm_data, train_language_model - - -def make_lm_config( - data_dir=None, - extra_flags=None, - task="language_modeling", - arch="transformer_lm_gpt2_tiny", -): - task_args = [task] - if data_dir is not None: - task_args += [data_dir] - train_parser = options.get_training_parser() - train_args = options.parse_args_and_arch( - train_parser, - [ - "--task", - *task_args, - "--arch", - arch, - "--optimizer", - "adam", - "--lr", - "0.0001", - "--max-tokens", - "500", - "--tokens-per-sample", - "500", - "--save-dir", - data_dir, - "--max-epoch", - "1", - ] - + (extra_flags or []), - ) - cfg = convert_namespace_to_omegaconf(train_args) - return cfg - - -def write_empty_file(path): - with open(path, "w"): - pass - assert os.path.exists(path) - - -class TestValidSubsetsErrors(unittest.TestCase): - """Test various filesystem, clarg combinations and ensure that error raising happens as expected""" - - def _test_case(self, paths, extra_flags): - with tempfile.TemporaryDirectory() as data_dir: - [ - write_empty_file(os.path.join(data_dir, f"{p}.bin")) - for p in paths + ["train"] - ] - cfg = make_lm_config(data_dir, extra_flags=extra_flags) - raise_if_valid_subsets_unintentionally_ignored(cfg) - - def test_default_raises(self): - with self.assertRaises(ValueError): - self._test_case(["valid", "valid1"], []) - with self.assertRaises(ValueError): - self._test_case( - ["valid", "valid1", "valid2"], ["--valid-subset", "valid,valid1"] - ) - - def partially_specified_valid_subsets(self): - with self.assertRaises(ValueError): - self._test_case( - ["valid", "valid1", "valid2"], ["--valid-subset", "valid,valid1"] - ) - # Fix with ignore unused - self._test_case( - ["valid", "valid1", "valid2"], - ["--valid-subset", "valid,valid1", "--ignore-unused-valid-subsets"], - ) - - def test_legal_configs(self): - self._test_case(["valid"], []) - self._test_case(["valid", "valid1"], ["--ignore-unused-valid-subsets"]) - self._test_case(["valid", "valid1"], ["--combine-val"]) - self._test_case(["valid", "valid1"], ["--valid-subset", "valid,valid1"]) - self._test_case(["valid", "valid1"], ["--valid-subset", "valid1"]) - self._test_case( - ["valid", "valid1"], ["--combine-val", "--ignore-unused-valid-subsets"] - ) - self._test_case( - ["valid1"], ["--valid-subset", "valid1"] - ) # valid.bin doesn't need to be ignored. - - def test_disable_validation(self): - self._test_case([], ["--disable-validation"]) - self._test_case(["valid", "valid1"], ["--disable-validation"]) - - def test_dummy_task(self): - cfg = make_lm_config(task="dummy_lm") - raise_if_valid_subsets_unintentionally_ignored(cfg) - - def test_masked_dummy_task(self): - cfg = make_lm_config(task="dummy_masked_lm") - raise_if_valid_subsets_unintentionally_ignored(cfg) - - -class TestCombineValidSubsets(unittest.TestCase): - def _train(self, extra_flags): - with self.assertLogs() as logs: - with tempfile.TemporaryDirectory("test_transformer_lm") as data_dir: - create_dummy_data(data_dir, num_examples=20) - preprocess_lm_data(data_dir) - - shutil.copyfile(f"{data_dir}/valid.bin", f"{data_dir}/valid1.bin") - shutil.copyfile(f"{data_dir}/valid.idx", f"{data_dir}/valid1.idx") - train_language_model( - data_dir, - "transformer_lm", - ["--max-update", "0", "--log-format", "json"] + extra_flags, - run_validation=False, - ) - return [x.message for x in logs.records] - - def test_combined(self): - flags = ["--combine-valid-subsets"] - logs = self._train(flags) - assert any(["valid1" in x for x in logs]) # loaded 100 examples from valid1 - assert not any(["valid1_ppl" in x for x in logs]) # metrics are combined - - def test_subsets(self): - flags = ["--valid-subset", "valid,valid1"] - logs = self._train(flags) - assert any(["valid_ppl" in x for x in logs]) # loaded 100 examples from valid1 - assert any(["valid1_ppl" in x for x in logs]) # metrics are combined diff --git a/spaces/msmilauer/AutoGPT-duplicated2/autogpt/setup.py b/spaces/msmilauer/AutoGPT-duplicated2/autogpt/setup.py deleted file mode 100644 index bfa68201b62bf67230a61fb1ecb00d1ab0ef0631..0000000000000000000000000000000000000000 --- a/spaces/msmilauer/AutoGPT-duplicated2/autogpt/setup.py +++ /dev/null @@ -1,77 +0,0 @@ -"""Set up the AI and its goals""" -from colorama import Fore, Style - -from autogpt import utils -from autogpt.config.ai_config import AIConfig -from autogpt.logs import logger - - -def prompt_user() -> AIConfig: - """Prompt the user for input - - Returns: - AIConfig: The AIConfig object containing the user's input - """ - ai_name = "" - # Construct the prompt - logger.typewriter_log( - "Welcome to Auto-GPT! ", - Fore.GREEN, - "run with '--help' for more information.", - speak_text=True, - ) - - logger.typewriter_log( - "Create an AI-Assistant:", - Fore.GREEN, - "Enter the name of your AI and its role below. Entering nothing will load" - " defaults.", - speak_text=True, - ) - - # Get AI Name from User - logger.typewriter_log( - "Name your AI: ", Fore.GREEN, "For example, 'Entrepreneur-GPT'" - ) - ai_name = utils.clean_input("AI Name: ") - if ai_name == "": - ai_name = "Entrepreneur-GPT" - - logger.typewriter_log( - f"{ai_name} here!", Fore.LIGHTBLUE_EX, "I am at your service.", speak_text=True - ) - - # Get AI Role from User - logger.typewriter_log( - "Describe your AI's role: ", - Fore.GREEN, - "For example, 'an AI designed to autonomously develop and run businesses with" - " the sole goal of increasing your net worth.'", - ) - ai_role = utils.clean_input(f"{ai_name} is: ") - if ai_role == "": - ai_role = "an AI designed to autonomously develop and run businesses with the" - " sole goal of increasing your net worth." - - # Enter up to 5 goals for the AI - logger.typewriter_log( - "Enter up to 5 goals for your AI: ", - Fore.GREEN, - "For example: \nIncrease net worth, Grow Twitter Account, Develop and manage" - " multiple businesses autonomously'", - ) - print("Enter nothing to load defaults, enter nothing when finished.", flush=True) - ai_goals = [] - for i in range(5): - ai_goal = utils.clean_input(f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: ") - if ai_goal == "": - break - ai_goals.append(ai_goal) - if not ai_goals: - ai_goals = [ - "Increase net worth", - "Grow Twitter Account", - "Develop and manage multiple businesses autonomously", - ] - - return AIConfig(ai_name, ai_role, ai_goals) diff --git a/spaces/multimodalart/stable-diffusion-inpainting/clipseg/general_utils.py b/spaces/multimodalart/stable-diffusion-inpainting/clipseg/general_utils.py deleted file mode 100644 index 708d32e701a78f3ce848060baef561c8f11b1b2e..0000000000000000000000000000000000000000 --- a/spaces/multimodalart/stable-diffusion-inpainting/clipseg/general_utils.py +++ /dev/null @@ -1,272 +0,0 @@ -import json -import inspect -import torch -import os -import sys -import yaml -from shutil import copy, copytree -from os.path import join, dirname, realpath, expanduser, isfile, isdir, basename - - -class Logger(object): - - def __getattr__(self, k): - return print - -log = Logger() - -def training_config_from_cli_args(): - experiment_name = sys.argv[1] - experiment_id = int(sys.argv[2]) - - yaml_config = yaml.load(open(f'experiments/{experiment_name}'), Loader=yaml.SafeLoader) - - config = yaml_config['configuration'] - config = {**config, **yaml_config['individual_configurations'][experiment_id]} - config = AttributeDict(config) - return config - - -def score_config_from_cli_args(): - experiment_name = sys.argv[1] - experiment_id = int(sys.argv[2]) - - - yaml_config = yaml.load(open(f'experiments/{experiment_name}'), Loader=yaml.SafeLoader) - - config = yaml_config['test_configuration_common'] - - if type(yaml_config['test_configuration']) == list: - test_id = int(sys.argv[3]) - config = {**config, **yaml_config['test_configuration'][test_id]} - else: - config = {**config, **yaml_config['test_configuration']} - - if 'test_configuration' in yaml_config['individual_configurations'][experiment_id]: - config = {**config, **yaml_config['individual_configurations'][experiment_id]['test_configuration']} - - train_checkpoint_id = yaml_config['individual_configurations'][experiment_id]['name'] - - config = AttributeDict(config) - return config, train_checkpoint_id - - -def get_from_repository(local_name, repo_files, integrity_check=None, repo_dir='~/dataset_repository', - local_dir='~/datasets'): - """ copies files from repository to local folder. - - repo_files: list of filenames or list of tuples [filename, target path] - - e.g. get_from_repository('MyDataset', [['data/dataset1.tar', 'other/path/ds03.tar']) - will create a folder 'MyDataset' in local_dir, and extract the content of - '/data/dataset1.tar' to /MyDataset/other/path. - """ - - local_dir = realpath(join(expanduser(local_dir), local_name)) - - dataset_exists = True - - # check if folder is available - if not isdir(local_dir): - dataset_exists = False - - if integrity_check is not None: - try: - integrity_ok = integrity_check(local_dir) - except BaseException: - integrity_ok = False - - if integrity_ok: - log.hint('Passed custom integrity check') - else: - log.hint('Custom integrity check failed') - - dataset_exists = dataset_exists and integrity_ok - - if not dataset_exists: - - repo_dir = realpath(expanduser(repo_dir)) - - for i, filename in enumerate(repo_files): - - if type(filename) == str: - origin, target = filename, filename - archive_target = join(local_dir, basename(origin)) - extract_target = join(local_dir) - else: - origin, target = filename - archive_target = join(local_dir, dirname(target), basename(origin)) - extract_target = join(local_dir, dirname(target)) - - archive_origin = join(repo_dir, origin) - - log.hint(f'copy: {archive_origin} to {archive_target}') - - # make sure the path exists - os.makedirs(dirname(archive_target), exist_ok=True) - - if os.path.isfile(archive_target): - # only copy if size differs - if os.path.getsize(archive_target) != os.path.getsize(archive_origin): - log.hint(f'file exists but filesize differs: target {os.path.getsize(archive_target)} vs. origin {os.path.getsize(archive_origin)}') - copy(archive_origin, archive_target) - else: - copy(archive_origin, archive_target) - - extract_archive(archive_target, extract_target, noarchive_ok=True) - - # concurrent processes might have deleted the file - if os.path.isfile(archive_target): - os.remove(archive_target) - - -def extract_archive(filename, target_folder=None, noarchive_ok=False): - from subprocess import run, PIPE - - if filename.endswith('.tgz') or filename.endswith('.tar'): - command = f'tar -xf {filename}' - command += f' -C {target_folder}' if target_folder is not None else '' - elif filename.endswith('.tar.gz'): - command = f'tar -xzf {filename}' - command += f' -C {target_folder}' if target_folder is not None else '' - elif filename.endswith('zip'): - command = f'unzip {filename}' - command += f' -d {target_folder}' if target_folder is not None else '' - else: - if noarchive_ok: - return - else: - raise ValueError(f'unsuppored file ending of {filename}') - - log.hint(command) - result = run(command.split(), stdout=PIPE, stderr=PIPE) - if result.returncode != 0: - print(result.stdout, result.stderr) - - -class AttributeDict(dict): - """ - An extended dictionary that allows access to elements as atttributes and counts - these accesses. This way, we know if some attributes were never used. - """ - - def __init__(self, *args, **kwargs): - from collections import Counter - super().__init__(*args, **kwargs) - self.__dict__['counter'] = Counter() - - def __getitem__(self, k): - self.__dict__['counter'][k] += 1 - return super().__getitem__(k) - - def __getattr__(self, k): - self.__dict__['counter'][k] += 1 - return super().get(k) - - def __setattr__(self, k, v): - return super().__setitem__(k, v) - - def __delattr__(self, k, v): - return super().__delitem__(k, v) - - def unused_keys(self, exceptions=()): - return [k for k in super().keys() if self.__dict__['counter'][k] == 0 and k not in exceptions] - - def assume_no_unused_keys(self, exceptions=()): - if len(self.unused_keys(exceptions=exceptions)) > 0: - log.warning('Unused keys:', self.unused_keys(exceptions=exceptions)) - - -def get_attribute(name): - import importlib - - if name is None: - raise ValueError('The provided attribute is None') - - name_split = name.split('.') - mod = importlib.import_module('.'.join(name_split[:-1])) - return getattr(mod, name_split[-1]) - - - -def filter_args(input_args, default_args): - - updated_args = {k: input_args[k] if k in input_args else v for k, v in default_args.items()} - used_args = {k: v for k, v in input_args.items() if k in default_args} - unused_args = {k: v for k, v in input_args.items() if k not in default_args} - - return AttributeDict(updated_args), AttributeDict(used_args), AttributeDict(unused_args) - - -def load_model(checkpoint_id, weights_file=None, strict=True, model_args='from_config', with_config=False): - - config = json.load(open(join('logs', checkpoint_id, 'config.json'))) - - if model_args != 'from_config' and type(model_args) != dict: - raise ValueError('model_args must either be "from_config" or a dictionary of values') - - model_cls = get_attribute(config['model']) - - # load model - if model_args == 'from_config': - _, model_args, _ = filter_args(config, inspect.signature(model_cls).parameters) - - model = model_cls(**model_args) - - if weights_file is None: - weights_file = realpath(join('logs', checkpoint_id, 'weights.pth')) - else: - weights_file = realpath(join('logs', checkpoint_id, weights_file)) - - if isfile(weights_file): - weights = torch.load(weights_file) - for _, w in weights.items(): - assert not torch.any(torch.isnan(w)), 'weights contain NaNs' - model.load_state_dict(weights, strict=strict) - else: - raise FileNotFoundError(f'model checkpoint {weights_file} was not found') - - if with_config: - return model, config - - return model - - -class TrainingLogger(object): - - def __init__(self, model, log_dir, config=None, *args): - super().__init__() - self.model = model - self.base_path = join(f'logs/{log_dir}') if log_dir is not None else None - - os.makedirs('logs/', exist_ok=True) - os.makedirs(self.base_path, exist_ok=True) - - if config is not None: - json.dump(config, open(join(self.base_path, 'config.json'), 'w')) - - def iter(self, i, **kwargs): - if i % 100 == 0 and 'loss' in kwargs: - loss = kwargs['loss'] - print(f'iteration {i}: loss {loss:.4f}') - - def save_weights(self, only_trainable=False, weight_file='weights.pth'): - if self.model is None: - raise AttributeError('You need to provide a model reference when initializing TrainingTracker to save weights.') - - weights_path = join(self.base_path, weight_file) - - weight_dict = self.model.state_dict() - - if only_trainable: - weight_dict = {n: weight_dict[n] for n, p in self.model.named_parameters() if p.requires_grad} - - torch.save(weight_dict, weights_path) - log.info(f'Saved weights to {weights_path}') - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - """ automatically stop processes if used in a context manager """ - pass \ No newline at end of file diff --git a/spaces/mygyasir/Real-Time-Voice-Cloning/vocoder/models/fatchord_version.py b/spaces/mygyasir/Real-Time-Voice-Cloning/vocoder/models/fatchord_version.py deleted file mode 100644 index 70ef1e3f6b99f32cc4fa95f64acfa58268d71ad7..0000000000000000000000000000000000000000 --- a/spaces/mygyasir/Real-Time-Voice-Cloning/vocoder/models/fatchord_version.py +++ /dev/null @@ -1,434 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from vocoder.distribution import sample_from_discretized_mix_logistic -from vocoder.display import * -from vocoder.audio import * - - -class ResBlock(nn.Module): - def __init__(self, dims): - super().__init__() - self.conv1 = nn.Conv1d(dims, dims, kernel_size=1, bias=False) - self.conv2 = nn.Conv1d(dims, dims, kernel_size=1, bias=False) - self.batch_norm1 = nn.BatchNorm1d(dims) - self.batch_norm2 = nn.BatchNorm1d(dims) - - def forward(self, x): - residual = x - x = self.conv1(x) - x = self.batch_norm1(x) - x = F.relu(x) - x = self.conv2(x) - x = self.batch_norm2(x) - return x + residual - - -class MelResNet(nn.Module): - def __init__(self, res_blocks, in_dims, compute_dims, res_out_dims, pad): - super().__init__() - k_size = pad * 2 + 1 - self.conv_in = nn.Conv1d(in_dims, compute_dims, kernel_size=k_size, bias=False) - self.batch_norm = nn.BatchNorm1d(compute_dims) - self.layers = nn.ModuleList() - for i in range(res_blocks): - self.layers.append(ResBlock(compute_dims)) - self.conv_out = nn.Conv1d(compute_dims, res_out_dims, kernel_size=1) - - def forward(self, x): - x = self.conv_in(x) - x = self.batch_norm(x) - x = F.relu(x) - for f in self.layers: x = f(x) - x = self.conv_out(x) - return x - - -class Stretch2d(nn.Module): - def __init__(self, x_scale, y_scale): - super().__init__() - self.x_scale = x_scale - self.y_scale = y_scale - - def forward(self, x): - b, c, h, w = x.size() - x = x.unsqueeze(-1).unsqueeze(3) - x = x.repeat(1, 1, 1, self.y_scale, 1, self.x_scale) - return x.view(b, c, h * self.y_scale, w * self.x_scale) - - -class UpsampleNetwork(nn.Module): - def __init__(self, feat_dims, upsample_scales, compute_dims, - res_blocks, res_out_dims, pad): - super().__init__() - total_scale = np.cumproduct(upsample_scales)[-1] - self.indent = pad * total_scale - self.resnet = MelResNet(res_blocks, feat_dims, compute_dims, res_out_dims, pad) - self.resnet_stretch = Stretch2d(total_scale, 1) - self.up_layers = nn.ModuleList() - for scale in upsample_scales: - k_size = (1, scale * 2 + 1) - padding = (0, scale) - stretch = Stretch2d(scale, 1) - conv = nn.Conv2d(1, 1, kernel_size=k_size, padding=padding, bias=False) - conv.weight.data.fill_(1. / k_size[1]) - self.up_layers.append(stretch) - self.up_layers.append(conv) - - def forward(self, m): - aux = self.resnet(m).unsqueeze(1) - aux = self.resnet_stretch(aux) - aux = aux.squeeze(1) - m = m.unsqueeze(1) - for f in self.up_layers: m = f(m) - m = m.squeeze(1)[:, :, self.indent:-self.indent] - return m.transpose(1, 2), aux.transpose(1, 2) - - -class WaveRNN(nn.Module): - def __init__(self, rnn_dims, fc_dims, bits, pad, upsample_factors, - feat_dims, compute_dims, res_out_dims, res_blocks, - hop_length, sample_rate, mode='RAW'): - super().__init__() - self.mode = mode - self.pad = pad - if self.mode == 'RAW' : - self.n_classes = 2 ** bits - elif self.mode == 'MOL' : - self.n_classes = 30 - else : - RuntimeError("Unknown model mode value - ", self.mode) - - self.rnn_dims = rnn_dims - self.aux_dims = res_out_dims // 4 - self.hop_length = hop_length - self.sample_rate = sample_rate - - self.upsample = UpsampleNetwork(feat_dims, upsample_factors, compute_dims, res_blocks, res_out_dims, pad) - self.I = nn.Linear(feat_dims + self.aux_dims + 1, rnn_dims) - self.rnn1 = nn.GRU(rnn_dims, rnn_dims, batch_first=True) - self.rnn2 = nn.GRU(rnn_dims + self.aux_dims, rnn_dims, batch_first=True) - self.fc1 = nn.Linear(rnn_dims + self.aux_dims, fc_dims) - self.fc2 = nn.Linear(fc_dims + self.aux_dims, fc_dims) - self.fc3 = nn.Linear(fc_dims, self.n_classes) - - self.step = nn.Parameter(torch.zeros(1).long(), requires_grad=False) - self.num_params() - - def forward(self, x, mels): - self.step += 1 - bsize = x.size(0) - if torch.cuda.is_available(): - h1 = torch.zeros(1, bsize, self.rnn_dims).cuda() - h2 = torch.zeros(1, bsize, self.rnn_dims).cuda() - else: - h1 = torch.zeros(1, bsize, self.rnn_dims).cpu() - h2 = torch.zeros(1, bsize, self.rnn_dims).cpu() - mels, aux = self.upsample(mels) - - aux_idx = [self.aux_dims * i for i in range(5)] - a1 = aux[:, :, aux_idx[0]:aux_idx[1]] - a2 = aux[:, :, aux_idx[1]:aux_idx[2]] - a3 = aux[:, :, aux_idx[2]:aux_idx[3]] - a4 = aux[:, :, aux_idx[3]:aux_idx[4]] - - x = torch.cat([x.unsqueeze(-1), mels, a1], dim=2) - x = self.I(x) - res = x - x, _ = self.rnn1(x, h1) - - x = x + res - res = x - x = torch.cat([x, a2], dim=2) - x, _ = self.rnn2(x, h2) - - x = x + res - x = torch.cat([x, a3], dim=2) - x = F.relu(self.fc1(x)) - - x = torch.cat([x, a4], dim=2) - x = F.relu(self.fc2(x)) - return self.fc3(x) - - def generate(self, mels, batched, target, overlap, mu_law, progress_callback=None): - mu_law = mu_law if self.mode == 'RAW' else False - progress_callback = progress_callback or self.gen_display - - self.eval() - output = [] - start = time.time() - rnn1 = self.get_gru_cell(self.rnn1) - rnn2 = self.get_gru_cell(self.rnn2) - - with torch.no_grad(): - if torch.cuda.is_available(): - mels = mels.cuda() - else: - mels = mels.cpu() - wave_len = (mels.size(-1) - 1) * self.hop_length - mels = self.pad_tensor(mels.transpose(1, 2), pad=self.pad, side='both') - mels, aux = self.upsample(mels.transpose(1, 2)) - - if batched: - mels = self.fold_with_overlap(mels, target, overlap) - aux = self.fold_with_overlap(aux, target, overlap) - - b_size, seq_len, _ = mels.size() - - if torch.cuda.is_available(): - h1 = torch.zeros(b_size, self.rnn_dims).cuda() - h2 = torch.zeros(b_size, self.rnn_dims).cuda() - x = torch.zeros(b_size, 1).cuda() - else: - h1 = torch.zeros(b_size, self.rnn_dims).cpu() - h2 = torch.zeros(b_size, self.rnn_dims).cpu() - x = torch.zeros(b_size, 1).cpu() - - d = self.aux_dims - aux_split = [aux[:, :, d * i:d * (i + 1)] for i in range(4)] - - for i in range(seq_len): - - m_t = mels[:, i, :] - - a1_t, a2_t, a3_t, a4_t = (a[:, i, :] for a in aux_split) - - x = torch.cat([x, m_t, a1_t], dim=1) - x = self.I(x) - h1 = rnn1(x, h1) - - x = x + h1 - inp = torch.cat([x, a2_t], dim=1) - h2 = rnn2(inp, h2) - - x = x + h2 - x = torch.cat([x, a3_t], dim=1) - x = F.relu(self.fc1(x)) - - x = torch.cat([x, a4_t], dim=1) - x = F.relu(self.fc2(x)) - - logits = self.fc3(x) - - if self.mode == 'MOL': - sample = sample_from_discretized_mix_logistic(logits.unsqueeze(0).transpose(1, 2)) - output.append(sample.view(-1)) - if torch.cuda.is_available(): - # x = torch.FloatTensor([[sample]]).cuda() - x = sample.transpose(0, 1).cuda() - else: - x = sample.transpose(0, 1) - - elif self.mode == 'RAW' : - posterior = F.softmax(logits, dim=1) - distrib = torch.distributions.Categorical(posterior) - - sample = 2 * distrib.sample().float() / (self.n_classes - 1.) - 1. - output.append(sample) - x = sample.unsqueeze(-1) - else: - raise RuntimeError("Unknown model mode value - ", self.mode) - - if i % 100 == 0: - gen_rate = (i + 1) / (time.time() - start) * b_size / 1000 - progress_callback(i, seq_len, b_size, gen_rate) - - output = torch.stack(output).transpose(0, 1) - output = output.cpu().numpy() - output = output.astype(np.float64) - - if batched: - output = self.xfade_and_unfold(output, target, overlap) - else: - output = output[0] - - if mu_law: - output = decode_mu_law(output, self.n_classes, False) - if hp.apply_preemphasis: - output = de_emphasis(output) - - # Fade-out at the end to avoid signal cutting out suddenly - fade_out = np.linspace(1, 0, 20 * self.hop_length) - output = output[:wave_len] - output[-20 * self.hop_length:] *= fade_out - - self.train() - - return output - - - def gen_display(self, i, seq_len, b_size, gen_rate): - pbar = progbar(i, seq_len) - msg = f'| {pbar} {i*b_size}/{seq_len*b_size} | Batch Size: {b_size} | Gen Rate: {gen_rate:.1f}kHz | ' - stream(msg) - - def get_gru_cell(self, gru): - gru_cell = nn.GRUCell(gru.input_size, gru.hidden_size) - gru_cell.weight_hh.data = gru.weight_hh_l0.data - gru_cell.weight_ih.data = gru.weight_ih_l0.data - gru_cell.bias_hh.data = gru.bias_hh_l0.data - gru_cell.bias_ih.data = gru.bias_ih_l0.data - return gru_cell - - def pad_tensor(self, x, pad, side='both'): - # NB - this is just a quick method i need right now - # i.e., it won't generalise to other shapes/dims - b, t, c = x.size() - total = t + 2 * pad if side == 'both' else t + pad - if torch.cuda.is_available(): - padded = torch.zeros(b, total, c).cuda() - else: - padded = torch.zeros(b, total, c).cpu() - if side == 'before' or side == 'both': - padded[:, pad:pad + t, :] = x - elif side == 'after': - padded[:, :t, :] = x - return padded - - def fold_with_overlap(self, x, target, overlap): - - ''' Fold the tensor with overlap for quick batched inference. - Overlap will be used for crossfading in xfade_and_unfold() - - Args: - x (tensor) : Upsampled conditioning features. - shape=(1, timesteps, features) - target (int) : Target timesteps for each index of batch - overlap (int) : Timesteps for both xfade and rnn warmup - - Return: - (tensor) : shape=(num_folds, target + 2 * overlap, features) - - Details: - x = [[h1, h2, ... hn]] - - Where each h is a vector of conditioning features - - Eg: target=2, overlap=1 with x.size(1)=10 - - folded = [[h1, h2, h3, h4], - [h4, h5, h6, h7], - [h7, h8, h9, h10]] - ''' - - _, total_len, features = x.size() - - # Calculate variables needed - num_folds = (total_len - overlap) // (target + overlap) - extended_len = num_folds * (overlap + target) + overlap - remaining = total_len - extended_len - - # Pad if some time steps poking out - if remaining != 0: - num_folds += 1 - padding = target + 2 * overlap - remaining - x = self.pad_tensor(x, padding, side='after') - - if torch.cuda.is_available(): - folded = torch.zeros(num_folds, target + 2 * overlap, features).cuda() - else: - folded = torch.zeros(num_folds, target + 2 * overlap, features).cpu() - - # Get the values for the folded tensor - for i in range(num_folds): - start = i * (target + overlap) - end = start + target + 2 * overlap - folded[i] = x[:, start:end, :] - - return folded - - def xfade_and_unfold(self, y, target, overlap): - - ''' Applies a crossfade and unfolds into a 1d array. - - Args: - y (ndarry) : Batched sequences of audio samples - shape=(num_folds, target + 2 * overlap) - dtype=np.float64 - overlap (int) : Timesteps for both xfade and rnn warmup - - Return: - (ndarry) : audio samples in a 1d array - shape=(total_len) - dtype=np.float64 - - Details: - y = [[seq1], - [seq2], - [seq3]] - - Apply a gain envelope at both ends of the sequences - - y = [[seq1_in, seq1_target, seq1_out], - [seq2_in, seq2_target, seq2_out], - [seq3_in, seq3_target, seq3_out]] - - Stagger and add up the groups of samples: - - [seq1_in, seq1_target, (seq1_out + seq2_in), seq2_target, ...] - - ''' - - num_folds, length = y.shape - target = length - 2 * overlap - total_len = num_folds * (target + overlap) + overlap - - # Need some silence for the rnn warmup - silence_len = overlap // 2 - fade_len = overlap - silence_len - silence = np.zeros((silence_len), dtype=np.float64) - - # Equal power crossfade - t = np.linspace(-1, 1, fade_len, dtype=np.float64) - fade_in = np.sqrt(0.5 * (1 + t)) - fade_out = np.sqrt(0.5 * (1 - t)) - - # Concat the silence to the fades - fade_in = np.concatenate([silence, fade_in]) - fade_out = np.concatenate([fade_out, silence]) - - # Apply the gain to the overlap samples - y[:, :overlap] *= fade_in - y[:, -overlap:] *= fade_out - - unfolded = np.zeros((total_len), dtype=np.float64) - - # Loop to add up all the samples - for i in range(num_folds): - start = i * (target + overlap) - end = start + target + 2 * overlap - unfolded[start:end] += y[i] - - return unfolded - - def get_step(self) : - return self.step.data.item() - - def checkpoint(self, model_dir, optimizer) : - k_steps = self.get_step() // 1000 - self.save(model_dir.joinpath("checkpoint_%dk_steps.pt" % k_steps), optimizer) - - def log(self, path, msg) : - with open(path, 'a') as f: - print(msg, file=f) - - def load(self, path, optimizer) : - checkpoint = torch.load(path) - if "optimizer_state" in checkpoint: - self.load_state_dict(checkpoint["model_state"]) - optimizer.load_state_dict(checkpoint["optimizer_state"]) - else: - # Backwards compatibility - self.load_state_dict(checkpoint) - - def save(self, path, optimizer) : - torch.save({ - "model_state": self.state_dict(), - "optimizer_state": optimizer.state_dict(), - }, path) - - def num_params(self, print_out=True): - parameters = filter(lambda p: p.requires_grad, self.parameters()) - parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000 - if print_out : - print('Trainable Parameters: %.3fM' % parameters) diff --git a/spaces/myrad01/Inpaint-Anything/third_party/lama/saicinpainting/training/visualizers/colors.py b/spaces/myrad01/Inpaint-Anything/third_party/lama/saicinpainting/training/visualizers/colors.py deleted file mode 100644 index 9e9e39182c58cb06a1c5e97a7e6c497cc3388ebe..0000000000000000000000000000000000000000 --- a/spaces/myrad01/Inpaint-Anything/third_party/lama/saicinpainting/training/visualizers/colors.py +++ /dev/null @@ -1,76 +0,0 @@ -import random -import colorsys - -import numpy as np -import matplotlib -matplotlib.use('agg') -import matplotlib.pyplot as plt -from matplotlib.colors import LinearSegmentedColormap - - -def generate_colors(nlabels, type='bright', first_color_black=False, last_color_black=True, verbose=False): - # https://stackoverflow.com/questions/14720331/how-to-generate-random-colors-in-matplotlib - """ - Creates a random colormap to be used together with matplotlib. Useful for segmentation tasks - :param nlabels: Number of labels (size of colormap) - :param type: 'bright' for strong colors, 'soft' for pastel colors - :param first_color_black: Option to use first color as black, True or False - :param last_color_black: Option to use last color as black, True or False - :param verbose: Prints the number of labels and shows the colormap. True or False - :return: colormap for matplotlib - """ - if type not in ('bright', 'soft'): - print ('Please choose "bright" or "soft" for type') - return - - if verbose: - print('Number of labels: ' + str(nlabels)) - - # Generate color map for bright colors, based on hsv - if type == 'bright': - randHSVcolors = [(np.random.uniform(low=0.0, high=1), - np.random.uniform(low=0.2, high=1), - np.random.uniform(low=0.9, high=1)) for i in range(nlabels)] - - # Convert HSV list to RGB - randRGBcolors = [] - for HSVcolor in randHSVcolors: - randRGBcolors.append(colorsys.hsv_to_rgb(HSVcolor[0], HSVcolor[1], HSVcolor[2])) - - if first_color_black: - randRGBcolors[0] = [0, 0, 0] - - if last_color_black: - randRGBcolors[-1] = [0, 0, 0] - - random_colormap = LinearSegmentedColormap.from_list('new_map', randRGBcolors, N=nlabels) - - # Generate soft pastel colors, by limiting the RGB spectrum - if type == 'soft': - low = 0.6 - high = 0.95 - randRGBcolors = [(np.random.uniform(low=low, high=high), - np.random.uniform(low=low, high=high), - np.random.uniform(low=low, high=high)) for i in range(nlabels)] - - if first_color_black: - randRGBcolors[0] = [0, 0, 0] - - if last_color_black: - randRGBcolors[-1] = [0, 0, 0] - random_colormap = LinearSegmentedColormap.from_list('new_map', randRGBcolors, N=nlabels) - - # Display colorbar - if verbose: - from matplotlib import colors, colorbar - from matplotlib import pyplot as plt - fig, ax = plt.subplots(1, 1, figsize=(15, 0.5)) - - bounds = np.linspace(0, nlabels, nlabels + 1) - norm = colors.BoundaryNorm(bounds, nlabels) - - cb = colorbar.ColorbarBase(ax, cmap=random_colormap, norm=norm, spacing='proportional', ticks=None, - boundaries=bounds, format='%1i', orientation=u'horizontal') - - return randRGBcolors, random_colormap - diff --git a/spaces/nateraw/test-pix2pix-load/app.py b/spaces/nateraw/test-pix2pix-load/app.py deleted file mode 100644 index 2e53854beb88e155ddb31694c2b30cbb6686ca88..0000000000000000000000000000000000000000 --- a/spaces/nateraw/test-pix2pix-load/app.py +++ /dev/null @@ -1,23 +0,0 @@ -import gradio as gr -from torchvision.transforms import Compose, Resize, ToTensor, Normalize -from PIL import Image -from torchvision.utils import save_image - -from huggan.pytorch.pix2pix.modeling_pix2pix import GeneratorUNet - -transform = Compose( - [ - Resize((256, 256), Image.BICUBIC), - ToTensor(), - Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), - ] -) -model = GeneratorUNet.from_pretrained('huggan/pix2pix-cityscapes') - -def predict_fn(img): - inp = transform(img).unsqueeze(0) - out = model(inp) - save_image(out, 'out.png', normalize=True) - return 'out.png' - -gr.Interface(predict_fn, inputs=gr.inputs.Image(type='pil'), outputs='image', examples=[['real.jpeg'], ['real2.jpeg']]).launch() \ No newline at end of file diff --git a/spaces/naver/SuperFeatures/how/__init__.py b/spaces/naver/SuperFeatures/how/__init__.py deleted file mode 100644 index bdbf651de9d2b87cc9767d9e0693cc1a980a9bd6..0000000000000000000000000000000000000000 --- a/spaces/naver/SuperFeatures/how/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -""" -Official Python implementation of HOW method for ECCV 2020 paper "Learning and aggregating deep -local descriptors for instance-level recognition" -""" diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Articad Crack Codes For Sonar LINK.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Articad Crack Codes For Sonar LINK.md deleted file mode 100644 index 4d95fcc3848e8eb8f82ce4ddee6307ffcda47236..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Articad Crack Codes For Sonar LINK.md +++ /dev/null @@ -1,57 +0,0 @@ -
          -

          How to Use Articad to Design Stunning Kitchens with Sonar

          - -

          Articad is a leading software for designing kitchens, bathrooms, bedrooms and other interiors. It is fast, easy to use and produces stunning 3D visuals that impress customers and help you win more sales. But did you know that you can also use Articad with Sonar, a powerful audio production software, to create realistic sound effects for your designs?

          -

          Articad Crack Codes For Sonar


          Download Ziphttps://urlcod.com/2uI9NF



          - -

          In this article, we will show you how to use Articad and Sonar together to enhance your kitchen design presentations with realistic sounds of cooking, water running, appliances working and more. You will learn how to:

          - -
            -
          • Export your Articad design as a video file
          • -
          • Import your video file into Sonar
          • -
          • Add sound effects from Sonar's library or record your own
          • -
          • Synchronize the sound effects with the video
          • -
          • Export your final video with sound
          • -
          - -

          By following these steps, you will be able to create more immersive and engaging presentations that showcase your design skills and make your customers feel like they are in their dream kitchen.

          - -

          Step 1: Export your Articad design as a video file

          - -

          The first step is to export your Articad design as a video file. This will allow you to import it into Sonar and add sound effects later. To do this, follow these steps:

          - -
            -
          1. Open your Articad design and go to the Presentation tab.
          2. -
          3. Select the Video option and choose the settings you want for your video. You can adjust the quality, resolution, frame rate, duration and camera path of your video.
          4. -
          5. Click on Export and choose a location and a name for your video file. Make sure you save it as an MP4 or AVI format.
          6. -
          7. Wait for the export process to finish. You can preview your video by clicking on Play.
          8. -
          - -

          Step 2: Import your video file into Sonar

          - -

          The next step is to import your video file into Sonar. Sonar is a professional audio production software that allows you to record, edit and mix soundtracks for your videos. To do this, follow these steps:

          - -
            -
          1. Open Sonar and create a new project.
          2. -
          3. Go to File > Import > Video File and select the video file you exported from Articad.
          4. -
          5. Sonar will ask you if you want to extract the audio from the video. Choose No, since we want to add our own sound effects.
          6. -
          7. Sonar will import your video and place it on a video track in the timeline. You can resize and move the video track as you wish.
          8. -
          - -

          Step 3: Add sound effects from Sonar's library or record your own

          - -

          The third step is to add sound effects from Sonar's library or record your own. Sonar has a large collection of sound effects that you can use for your kitchen design presentations. You can also record your own sounds using a microphone or an audio interface. To do this, follow these steps:

          -

          - -
            -
          1. To add sound effects from Sonar's library, go to Insert > Audio Track and create a new audio track.
          2. -
          3. Go to Browser > Media > Sound Effects and browse through the categories of sound effects available.
          4. -
          5. Drag and drop the sound effects you want onto the audio track. You can adjust the volume, pan, pitch and other parameters of each sound effect using the Inspector panel.
          6. -
          7. To record your own sounds, go to Insert > Audio Track and create a new audio track.
          8. -
          9. Go to Edit > Preferences > Audio > Devices and select your input device (microphone or audio interface).
          10. -
          11. Arm the audio track for recording by clicking on the red button next to its name.
          12. -
          13. Press R on your keyboard or click on Record on the transport panel to start recording.
          14. -
          15. Make the sounds you want using your voice, objects or appliances. You can record multiple sounds on the same track or create separate tracks for each sound.
          16. -
          17. Press Space

            81aa517590
            -
            -
            \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Dirt 2 Product Key For Windows Live Crack.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Dirt 2 Product Key For Windows Live Crack.md deleted file mode 100644 index c0757469a7b8e6061a1423c0d4677efe4a3ac758..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Dirt 2 Product Key For Windows Live Crack.md +++ /dev/null @@ -1,26 +0,0 @@ -
            -

            How to Play Dirt 2 on Windows 10 Without GFWL

            -

            Dirt 2 is one of the best racing games ever made, but unfortunately, it has a problem with Windows 10. The game requires Games for Windows Live (GFWL) to run, but GFWL is no longer supported by Microsoft and causes many issues for players. If you want to play Dirt 2 on Windows 10 without GFWL, you need to use a crack that bypasses the DRM and allows you to save your progress locally.

            -

            In this article, we will show you how to download and install the crack for Dirt 2 and enjoy this classic game on your modern PC.

            -

            Dirt 2 Product Key For Windows Live Crack


            Download File >>> https://urlcod.com/2uIauE



            -

            Step 1: Download Dirt 2

            -

            The first step is to download Dirt 2 from a reliable source. You can buy the game from Steam or other online stores, or use a physical copy if you have one. Make sure you don't launch the game yet or it will install GFWL automatically.

            -

            Step 2: Delete the GFWL Folder

            -

            The next step is to delete the GFWL folder from your Dirt 2 installation directory. The folder is located in the "redist" subfolder and contains the files that install GFWL on your system. You don't need them anymore, so you can safely delete them.

            -

            Step 3: Download and Install the Crack

            -

            The final step is to download and install the crack for Dirt 2 that replaces the original executable file with a modified one that removes the GFWL dependency. You can find the crack on various websites, such as MegaGames or GameCopyWorld. Make sure you download the version that matches your game version (v1.0 or v1.1) and your system architecture (32-bit or 64-bit).

            -

            Once you have downloaded the crack, unzip it and copy the xlive.dll file to your Dirt 2 folder, overwriting the existing one. This file will trick the game into thinking that GFWL is installed and working, but it will actually create a local profile for you to save your progress.

            -

            Step 4: Enjoy Dirt 2 on Windows 10

            -

            That's it! You can now launch Dirt 2 from your desktop or Steam library and play it without any problems. You can also access the online features of the game, such as multiplayer and leaderboards, if you have a valid product key for Windows Live. If you don't have one, you can still play offline with no issues.

            -

            -

            Dirt 2 is a fantastic game that deserves to be played by all racing fans. With this simple crack, you can enjoy it on Windows 10 without any hassle. Have fun!

            - -

            Step 5: Explore the Game Modes and Features

            -

            Dirt 2 offers a variety of game modes and features to keep you entertained. The main mode is the World Tour, where you travel around the globe and compete in different events, such as rally, rallycross, trailblazer, landrush, and raid. You can also unlock new cars, liveries, sponsors, and upgrades as you progress. The World Tour also features a dynamic weather system that affects the driving conditions and visibility.

            -

            If you want to play online, you can join or host multiplayer sessions with up to eight players. You can choose from different modes, such as Last Man Standing, Gatecrasher, Domination, and Transporter. You can also customize your car and profile, and chat with other players using voice or text.

            -

            If you want to practice your skills or challenge yourself, you can try the Time Trial mode, where you race against the clock or your own ghost. You can also access the Leaderboards and see how you rank against other players around the world.

            -

            Step 6: Enjoy the Graphics and Sound

            -

            Dirt 2 is a visually stunning game that showcases the power of the EGO engine. The game features realistic lighting, shadows, reflections, and particle effects. The environments are detailed and varied, with different terrains, vegetation, and landmarks. The cars are also modeled with high fidelity, with damage effects and dirt accumulation.

            -

            The sound design is also impressive, with authentic engine noises, tire screeches, and environmental sounds. The game also features a licensed soundtrack with songs from various genres, such as rock, hip-hop, and electronic. The game also has voice acting from real-life drivers and celebrities, such as Ken Block, Travis Pastrana, Dave Mirra, and Mohammed Ben Sulayem.

            81aa517590
            -
            -
            \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/MacCaption 6.5.1 (Full Crack) Fixed.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/MacCaption 6.5.1 (Full Crack) Fixed.md deleted file mode 100644 index 0ed6ed301aef8e9f83150cb54e96367623a52d89..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/MacCaption 6.5.1 (Full Crack) Fixed.md +++ /dev/null @@ -1,185 +0,0 @@ -
            -

            MacCaption 6.5.1 (Full Crack) - A Comprehensive Review

            |

            If you are looking for a reliable and professional software for captioning and subtitling your videos on Mac, you may have heard of MacCaption . MacCaption is a popular software that allows you to create, edit, preview, and export captions and subtitles in various formats and standards.

            -

            However, MacCaption is not a cheap software, and it requires a license to use it fully and legally . If you are on a budget or want to try it out before buying it, you may be tempted to download a cracked version of the software, such as MacCaption 6.5.1 (Full Crack).

            -

            MacCaption 6.5.1 (Full Crack)


            Download Filehttps://urlcod.com/2uI9Hr



            -

            But what is MacCaption 6.5.1 (Full Crack)? Is it safe and legal to use? What are its features and benefits? How can you download and install it? How can you use it effectively? In this article, we will answer all these questions and more.

            -

            What is MacCaption and why do you need it?

            -

            MacCaption is a powerful captioning and subtitling software for Mac users . It was developed by CPC , a company that specializes in closed captioning solutions for broadcast, web, DVD, Blu-ray, mobile, and live events.

            -

            MacCaption is a powerful captioning and subtitling software for Mac users

            -

            MacCaption allows you to create captions and subtitles for your videos in an easy and efficient way . You can import your video files or URLs from various sources, such as YouTube , Vimeo , Netflix , Hulu , Amazon Prime , Disney+ , HBO , etc.

            -

            You can also import your transcript files or use the Auto Time Stamp feature to automatically generate time codes for your captions . You can then edit your captions using the built-in text editor , which offers various tools such as spell check , find & replace , split & merge , etc.

            -

            -

            You can also preview your captions along with your video using the Preview Play button , which replaces the previous AutoSync feature . You can adjust the timing, position, and appearance of your captions using the Timeline and the Style Editor . You can also add effects such as fade in/out , karaoke , etc.

            -

            Finally, you can export your captions and subtitles in various formats and standards , such as SRT , SCC , WebVTT , STL , DFXP , etc. You can also export your captions as burned-in video files or as standalone files that can be uploaded to your video platform or player.

            -

            MacCaption offers various features and benefits for creating and editing captions

            -

            MacCaption is not just a simple captioning software. It also offers various features and benefits that make it a professional and versatile tool for captioning and subtitling . Some of these features and benefits are:

            -
              -
            • It supports multiple languages and character sets , including Latin, Cyrillic, Arabic, Chinese, Japanese, Korean, etc. You can also use the Translation feature to translate your captions into different languages using Google Translate or Microsoft Translator .
            • -
            • It allows you to create closed captions , open captions , or subtitles . Closed captions are hidden by default and can be turned on or off by the viewer. Open captions are always visible and cannot be turned off. Subtitles are similar to open captions but are usually used for foreign language translation.
            • -
            • It enables you to comply with various accessibility and quality standards and regulations , such as FCC , ADA , CVAA , WCAG , Netflix , Amazon , etc. You can also use the Quality Check feature to verify the accuracy, timing, spelling, grammar, and formatting of your captions.
            • -
            • It integrates with various video editing software and platforms , such as Adobe Premiere Pro , Final Cut Pro X , Avid Media Composer , YouTube , Facebook , etc. You can import or export your captions directly from or to these software and platforms without losing any quality or synchronization.
            • -
            • It allows you to work with different video formats and codecs , such as MP4 , MOV , AVI , MKV , H.264 , HEVC , etc. You can also convert your video files to different formats and resolutions using the Video Converter feature.
            • -
            -

            MacCaption supports different formats and standards for captioning and subtitling

            -

            MacCaption supports a wide range of formats and standards for captioning and subtitling . These formats and standards have different specifications, features, and limitations depending on the type of captioning (closed, open, or subtitle), the video platform or player, the language or region, etc. Some of the most common formats and standards supported by MacCaption are:

            - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            FormatDescriptionExample
            SRTA simple text-based format that uses numbers, time codes, and text lines to indicate the start and end time of each caption. It is widely used for web videos and subtitles.1
            00:00:01,000 --> 00:00:03,000
            This is an example of SRT format.
            SCCA binary format that uses hexadecimal codes to represent the characters, colors, positions, and effects of each caption. It is mainly used for broadcast TV and DVD closed captions.Scenarist_SCC V1.0

            00:00:01:00 94ae 94ae 9420 9420 9452 97a2 616e 2065 7861 6d70 6c65 206f 6620 5343 4320 666f 726d 6174 ae80 ae80

            WebVTTA text-based format that uses cues, identifiers, time codes, text lines, and optional settings to indicate the start and end time, style, position, alignment, etc. of each caption. It is mainly used for web videos and subtitles.WEBVTT

            1
            00:00:01.000 --> 00:00:03.000 align:center
            This is an example of WebVTT format.
            STLA binary format that uses a header section and a data section to store information about the video file, the language, the frame rate, the number of captions, etc. It is mainly used for DVD subtitles.$FontName = Arial
            $FontSize = 32
            $HorzAlign = Center
            $VertAlign = Bottom
            $XOffset = 0
            $YOffset = -2
            $Bold = FALSE
            $UnderLined = FALSE
            $Italic = FALSE
            $TextContrast = 15
            $Outline1Contrast = 15
            $Outline2Contrast = 15
            $BackgroundContrast = 0
            $ForceDisplay = FALSE
            $FadeIn = 0
            $FadeOut = 0

            00:00:01:00 , 00:00:03:00 , This is an example of STL format.
            DFXPA XML-based format that uses elements, attributes, and namespaces to define the structure, layout, timing, style, etc. of each caption. It is mainly used for web videos and subtitles.



            - - - -
            - - - - - - - - - - - - - - - - - - - - - - - - -
            题目
            答案
            正误
            得分
            -
            - - - - diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/4 Fotos 1 Palabra monedas infinitas APK El reto de encontrar la palabra oculta.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/4 Fotos 1 Palabra monedas infinitas APK El reto de encontrar la palabra oculta.md deleted file mode 100644 index 61bce8bf16e1dbed308dd867d27ec62332a0ae92..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/4 Fotos 1 Palabra monedas infinitas APK El reto de encontrar la palabra oculta.md +++ /dev/null @@ -1,139 +0,0 @@ -
            -

            4 fotos 1 palabra apk monedas infinitas: cómo descargar y jugar al juego más popular de adivinanzas

            -

            ¿Te gustan los juegos de palabras y de lógica? ¿Te divierte resolver acertijos con imágenes? ¿Quieres disfrutar de un pasatiempo entretenido y educativo en tu dispositivo Android? Entonces, te encantará conocer el juego 4 fotos 1 palabra, una aplicación que te reta a adivinar la palabra que se esconde detrás de cuatro fotografías. Y lo mejor de todo es que puedes descargar la versión modificada del juego, que te ofrece monedas infinitas para usar en las pistas y las ayudas. En este artículo, te contamos todo lo que necesitas saber sobre 4 fotos 1 palabra apk monedas infinitas: qué es, cómo descargarlo y cómo jugarlo.

            -

            ¿Qué es 4 fotos 1 palabra?

            -

            4 fotos 1 palabra es un juego de adivinanzas que se basa en el poder de asociación de las imágenes. El objetivo es simple: encontrar la palabra que tienen en común cuatro fotos que se muestran en la pantalla. Por ejemplo, si ves una imagen de un perro, una pata, una huella y un hueso, la palabra que buscas es "perro". Parece fácil, ¿verdad? Pero no te confíes, porque el juego se va complicando a medida que avanzas de nivel.

            -

            4 fotos 1 palabra apk monedas infinitas


            Download Ziphttps://ssurll.com/2uNRnr



            -

            El concepto del juego

            -

            El juego fue creado por el estudio alemán LOTUM GmbH en el año 2013 y desde entonces ha sido un éxito mundial. Se estima que más de 250 millones de personas han descargado el juego en sus dispositivos móviles, tanto Android como iOS. El juego está disponible en varios idiomas, como español, inglés, francés, alemán, italiano, portugués y ruso. Además, el juego se actualiza constantemente con nuevos niveles y categorías para mantener el interés de los jugadores.

            -

            Los niveles y las categorías

            -

            El juego cuenta con más de 3000 niveles diferentes, que se agrupan en distintas categorías temáticas. Algunas de las categorías son: animales, comida, deportes, música, cine, arte, historia, geografía, ciencia y tecnología. Cada categoría tiene un color diferente para identificarla fácilmente. Los niveles se van desbloqueando a medida que vas acertando las palabras. Algunos niveles son más fáciles que otros, pero todos tienen algo en común: son muy adictivos.

            -

            Los beneficios de jugar

            -

            Jugar a 4 fotos 1 palabra no solo es divertido, sino también beneficioso para tu mente. Al jugar, ejercitas tu memoria, tu atención, tu concentración, tu vocabulario, tu ortografía y tu lógica. Además, aprendes cosas nuevas sobre diferentes temas y culturas. Y por si fuera poco, también te relajas y te distraes del estrés del día a día.

            -

            ¿Cómo descargar 4 fotos 1 palabra apk monedas infinitas?¿Cómo descargar 4 fotos 1 palabra apk monedas infinitas?

            -

            Si quieres disfrutar de la versión modificada del juego, que te ofrece monedas infinitas para usar en las pistas y las ayudas, tienes que seguir unos pasos sencillos. Pero antes, debes asegurarte de que tu dispositivo Android cumpla con los requisitos mínimos para instalar el juego.

            -

            Los requisitos del dispositivo

            -

            Para descargar e instalar 4 fotos 1 palabra apk monedas infinitas, tu dispositivo Android debe tener al menos las siguientes características:

            -
              -
            • Versión de Android: 5.0 o superior
            • -
            • Espacio de almacenamiento: 60 MB libres
            • -
            • Conexión a internet: Wi-Fi o datos móviles
            • -
            • Permisos: acceso a la cámara, al micrófono, al almacenamiento y a la ubicación
            • -
            -

            Si tu dispositivo cumple con estos requisitos, puedes proceder a la descarga e instalación del juego.

            -

            4 fotos 1 palabra mod apk monedas ilimitadas
            -4 fotos 1 palabra hack apk monedas infinitas
            -4 fotos 1 palabra descargar apk monedas infinitas
            -4 fotos 1 palabra apk monedas infinitas android
            -4 fotos 1 palabra apk monedas infinitas gratis
            -4 fotos 1 palabra apk monedas infinitas ultima version
            -4 fotos 1 palabra apk monedas infinitas sin root
            -4 fotos 1 palabra apk monedas infinitas mega
            -4 fotos 1 palabra apk monedas infinitas mediafire
            -4 fotos 1 palabra apk monedas infinitas español
            -4 fotos 1 palabra apk monedas infinitas trucos
            -4 fotos 1 palabra apk monedas infinitas soluciones
            -4 fotos 1 palabra apk monedas infinitas nivel
            -4 fotos 1 palabra apk monedas infinitas online
            -4 fotos 1 palabra apk monedas infinitas juego
            -4 fotos 1 palabra apk monedas infinitas descargar gratis
            -4 fotos 1 palabra apk monedas infinitas full
            -4 fotos 1 palabra apk monedas infinitas premium
            -4 fotos 1 palabra apk monedas infinitas actualizado
            -4 fotos 1 palabra apk monedas infinitas facil
            -4 fotos 1 palabra apk monedas infinitas tutorial
            -4 fotos 1 palabra apk monedas infinitas youtube
            -4 fotos 1 palabra apk monedas infinitas video
            -4 fotos 1 palabra apk monedas infinitas sin internet
            -4 fotos 1 palabra apk monedas infinitas offline
            -como tener monedas infinitas en 4 fotos 1 palabra apk
            -como descargar 4 fotos 1 palabra con monedas infinitas apk
            -como hackear el juego de 4 fotos y una palabra con monedas infinita en android sin rootear el celular con lucky patcher y game guardian en español gratis y facil de hacer en el año nuevo del año dos mil veintitres con un solo click y sin virus ni publicidad molesta que te salta cada dos por tres y te hace perder el tiempo y la paciencia y te dan ganar de tirar el telefono por la ventana y romperlo en mil pedazos y luego arrepentirte de haberlo hecho porque era el unico que tenias y ahora no puedes comunicarte con nadie ni jugar a nada ni ver nada ni hacer nada y te sientes solo y triste y aburrido y desesperado y angustiado y deprimido y ansioso y estresado y nervioso y agobiado y frustrado y enfadado y decepcionado y amargado y resentido y rencoroso y vengativo y malvado y cruel y despiadado y malicioso y perverso y diabolico y satanico y maligno (this is a joke, please do not use this keyword)

            -

            Los pasos para la instalación

            -

            Para descargar e instalar 4 fotos 1 palabra apk monedas infinitas, debes seguir estos pasos:

            -
              -
            1. Descarga el archivo apk del juego desde un sitio web confiable, como [Sbenny.com](^1^) o [Gamecoins.codes](^3^). Puedes usar el buscador de tu navegador o hacer clic en los enlaces que te proporcionamos.
            2. -
            3. Una vez descargado el archivo apk, debes habilitar la opción de "Orígenes desconocidos" en los ajustes de seguridad de tu dispositivo. Esto te permitirá instalar aplicaciones que no provienen de la tienda oficial de Google Play.
            4. -
            5. Busca el archivo apk en la carpeta de descargas de tu dispositivo y haz clic en él para iniciar la instalación. Sigue las instrucciones que aparecen en la pantalla y espera a que se complete el proceso.
            6. -
            7. Cuando la instalación haya finalizado, podrás ver el icono del juego en el menú de aplicaciones de tu dispositivo. Haz clic en él para abrir el juego y disfrutar de las monedas infinitas.
            8. -
            -

            Las precauciones de seguridad

            -

            Aunque descargar e instalar 4 fotos 1 palabra apk monedas infinitas es un proceso fácil y rápido, debes tener en cuenta algunas precauciones de seguridad para evitar problemas o riesgos. Estas son algunas recomendaciones que te hacemos:

            -
              -
            • Antes de descargar el archivo apk, verifica que el sitio web sea seguro y confiable. Puedes leer los comentarios de otros usuarios o usar un antivirus para escanear el archivo.
            • -
            • No descargues el archivo apk desde enlaces sospechosos o publicidad engañosa. Pueden contener virus o malware que dañen tu dispositivo o roben tu información personal.
            • -
            • No compartas el archivo apk con otras personas o lo subas a otras plataformas. Puedes violar los derechos de autor del juego o exponerte a demandas legales.
            • -
            • No uses el juego para fines ilegales o inmorales. Respeta las normas y las políticas del juego y no hagas trampa o abuses de las monedas infinitas.
            • -
            -

            Siguiendo estas precauciones, podrás descargar e instalar 4 fotos 1 palabra apk monedas infinitas sin problemas y disfrutar del juego con seguridad.

            ¿Cómo jugar 4 fotos 1 palabra apk monedas infinitas?

            -

            Ahora que ya tienes instalado el juego en tu dispositivo, es hora de empezar a jugar y a divertirte. El juego es muy fácil de usar y de entender, pero te vamos a explicar algunos aspectos básicos que debes saber para jugar correctamente.

            -

            El modo de juego básico

            -

            El modo de juego básico consiste en ver cuatro fotos en la pantalla y escribir la palabra que las relaciona. Para ello, debes usar las letras que aparecen en la parte inferior de la pantalla. Puedes arrastrar las letras hasta el espacio vacío o hacer clic en ellas. Si aciertas la palabra, pasarás al siguiente nivel y ganarás monedas. Si te equivocas, perderás una vida y tendrás que intentarlo de nuevo.

            -

            Las opciones de ayuda y las monedas

            -

            Si te quedas atascado en algún nivel y no sabes la palabra, no te preocupes, porque tienes varias opciones de ayuda para resolverlo. Estas son las opciones que puedes usar:

            -
              -
            • Borrar letras: Esta opción te permite eliminar algunas de las letras que no forman parte de la palabra. Te costará 90 monedas usarla.
            • -
            • Revelar letra: Esta opción te permite revelar una de las letras de la palabra. Te costará 60 monedas usarla.
            • -
            • Resolver: Esta opción te permite resolver el nivel directamente y pasar al siguiente. Te costará 150 monedas usarla.
            • -
            -

            Como ves, estas opciones de ayuda te facilitan el juego, pero también tienen un costo en monedas. Por eso, es importante que administres bien tus monedas y las uses con moderación. Pero no te preocupes, porque al descargar 4 fotos 1 palabra apk monedas infinitas, tendrás acceso a un número ilimitado de monedas para usar en el juego. Así, podrás usar las opciones de ayuda siempre que quieras y sin límites.

            -

            Los trucos y consejos para avanzar

            -

            Aunque el juego es muy divertido y entretenido, también puede ser muy desafiante y frustrante en algunos niveles. Por eso, te vamos a dar algunos trucos y consejos para que puedas avanzar más rápido y fácilmente en el juego. Estos son algunos de ellos:

            -
              -
            • Mira bien las fotos: A veces, la clave está en los detalles. Fíjate bien en los colores, las formas, los objetos, las personas y los animales que aparecen en las fotos. Intenta encontrar el elemento común que las une.
            • -
            • Piensa en sinónimos: A veces, la palabra que buscas no es la más obvia o la más común. Piensa en otras formas de decir lo mismo o en palabras relacionadas con el tema. Por ejemplo, si ves una foto de un avión, otra de un pájaro, otra de una cometa y otra de un globo, la palabra puede ser "volar" o "aire".
            • -
            • Usa las pistas: Si no tienes ni idea de la palabra, usa las pistas que te ofrece el juego. Puedes borrar letras, revelar letras o resolver el nivel directamente. Recuerda que al descargar 4 fotos 1 palabra apk monedas infinitas, tendrás monedas ilimitadas para usar estas pistas.
            • -
            • Pide ayuda: Si ninguna de las anteriores opciones te funciona, puedes pedir ayuda a tus amigos o familiares. Puedes compartir el nivel en tus redes sociales o enviarlo por mensaje a alguien que creas que pueda saber la respuesta. Seguro que entre todos podéis encontrar la solución.
            • -
            -

            Siguiendo estos trucos y consejos, podrás jugar a 4 fotos 1 palabra apk monedas infinitas con más facilidad y diversión.

            -

            Conclusión

            -

            En este artículo, te hemos contado todo lo que necesitas saber sobre 4 fotos 1 palabra apk monedas infinitas: qué es, cómo descargarlo y cómo jugarlo. Esperamos que te haya gustado y que te animes a probar este juego tan popular y adictivo.

            -

            Resumen de los puntos principales

            -

            Estos son los puntos principales que hemos visto en este artículo:

            -
              -
            • 4 fotos 1 palabra es un juego de adiv
            • 4 fotos 1 palabra es un juego de adivinanzas que te reta a encontrar la palabra que tienen en común cuatro fotos.
            • -
            • El juego tiene más de 3000 niveles y varias categorías temáticas para que nunca te aburras.
            • -
            • El juego es beneficioso para tu mente, ya que ejercitas tu memoria, tu atención, tu vocabulario y tu lógica.
            • -
            • Puedes descargar la versión modificada del juego, que te ofrece monedas infinitas para usar en las pistas y las ayudas.
            • -
            • Para descargar el juego, debes cumplir con los requisitos del dispositivo, seguir los pasos para la instalación y tener en cuenta las precauciones de seguridad.
            • -
            • Para jugar al juego, debes usar las letras que aparecen en la pantalla para escribir la palabra que relaciona las cuatro fotos. Si te quedas atascado, puedes usar las opciones de ayuda o pedir ayuda a tus amigos.
            • -
            -

            Llamada a la acción

            -

            ¿A qué esperas para descargar 4 fotos 1 palabra apk monedas infinitas y empezar a jugar? Es un juego muy divertido, educativo y adictivo que te hará pasar horas de entretenimiento. No te arrepentirás de probarlo. Descárgalo ahora y disfruta de las monedas infinitas. ¡Te aseguramos que no podrás parar de jugar!

            -

            Preguntas frecuentes

            -

            A continuación, te presentamos algunas preguntas frecuentes que pueden surgirte sobre 4 fotos 1 palabra apk monedas infinitas:

            -

            ¿Es legal descargar 4 fotos 1 palabra apk monedas infinitas?

            -

            Descargar 4 fotos 1 palabra apk monedas infinitas no es ilegal, pero tampoco es ético ni justo. Al descargar esta versión modificada del juego, estás violando los derechos de autor del creador original y perjudicando su trabajo. Además, estás alterando el equilibrio y la dificultad del juego, lo que puede restarle diversión y emoción. Por eso, te recomendamos que descargues la versión oficial del juego desde Google Play y que juegues de forma honesta y respetuosa.

            -

            ¿Es seguro descargar 4 fotos 1 palabra apk monedas infinitas?

            -

            Descargar 4 fotos 1 palabra apk monedas infinitas puede ser seguro si lo haces desde un sitio web confiable y sigues las precauciones de seguridad que te hemos indicado. Sin embargo, también puede ser riesgoso si lo haces desde un sitio web sospechoso o si no verificas el archivo apk antes de instalarlo. Puedes exponerte a virus o malware que dañen tu dispositivo o roben tu información personal. Por eso, te aconsejamos que seas cuidadoso y prudente al descargar e instalar el juego.

            -

            ¿Cómo puedo conseguir más monedas en 4 fotos 1 palabra?

            -

            Si quieres conseguir más monedas en 4 fotos 1 palabra, tienes varias opciones. La primera es acertar las palabras y pasar los niveles, lo que te dará una recompensa en monedas. La segunda es ver vídeos publicitarios o completar ofertas dentro del juego, lo que te dará una cantidad variable de monedas. La tercera es comprar monedas con dinero real dentro del juego, lo que te dará una cantidad fija de monedas. Y la cuarta es descargar 4 fotos 1 palabra apk monedas infinitas, lo que te dará un número ilimitado de monedas.

            -

            ¿Cómo puedo cambiar el idioma en 4 fotos 1 palabra?

            -

            Si quieres cambiar el idioma en 4 fotos 1 palabra, tienes que seguir estos pasos:

            -
              -
            1. Abre el juego y haz clic en el icono de ajustes que aparece en la parte superior derecha de la pantalla.
            2. -
            3. Haz clic en el icono de idioma que aparece en la parte inferior izquierda de la pantalla.
            4. -
            5. Selecciona el idioma que prefieras entre las opciones disponibles.
            6. -
            7. Haz clic en el botón de confirmar que aparece en la parte inferior derecha de la pantalla.
            8. -
            9. Disfruta del juego en el idioma que hayas elegido.
            10. -
            -

            ¿Qué hacer si el juego

            ¿Qué hacer si el juego no funciona o se cierra?

            -

            Si el juego no funciona o se cierra de forma inesperada, puede deberse a varios motivos. Algunos de los más comunes son:

            -
              -
            • El juego no es compatible con tu dispositivo o tu versión de Android.
            • -
            • El juego no está actualizado a la última versión disponible.
            • -
            • El juego está dañado o corrupto por algún virus o malware.
            • -
            • El juego tiene algún error o bug que impide su correcto funcionamiento.
            • -
            • El juego tiene algún conflicto con otras aplicaciones o procesos que están ejecutándose en tu dispositivo.
            • -
            -

            Para solucionar estos problemas, puedes intentar algunas de estas soluciones:

            -
              -
            • Verifica que tu dispositivo cumpla con los requisitos mínimos para instalar y jugar al juego.
            • -
            • Verifica que el juego esté actualizado a la última versión disponible. Puedes hacerlo desde Google Play o desde el sitio web oficial del juego.
            • -
            • Verifica que el archivo apk que has descargado e instalado sea seguro y confiable. Puedes usar un antivirus para escanear el archivo o descargarlo de nuevo desde un sitio web diferente.
            • -
            • Verifica que el juego no tenga ningún error o bug que afecte su rendimiento. Puedes consultar las reseñas de otros usuarios o contactar con el soporte técnico del juego para reportar el problema.
            • -
            • Verifica que el juego no tenga ningún conflicto con otras aplicaciones o procesos que estén ejecutándose en tu dispositivo. Puedes cerrar las aplicaciones que no estés usando o reiniciar tu dispositivo para liberar memoria y recursos.
            • -
            -

            Si ninguna de estas soluciones te funciona, puedes desinstalar el juego y volver a instalarlo desde cero. O puedes optar por descargar la versión oficial del juego desde Google Play y jugar sin monedas infinitas.

            401be4b1e0
            -
            -
            \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Gladiator True Story and Unleash Your Inner Warrior. Based on Real Events and Characters..md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Gladiator True Story and Unleash Your Inner Warrior. Based on Real Events and Characters..md deleted file mode 100644 index 10f208a3715cac5a22214ad85db8f59b3b8f7e52..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Gladiator True Story and Unleash Your Inner Warrior. Based on Real Events and Characters..md +++ /dev/null @@ -1,210 +0,0 @@ -
            -

            Gladiator True Story Download: How to Watch the Epic Historical Drama Online

            -

            Gladiator is one of the most popular and acclaimed movies of all time. It tells the story of Maximus, a Roman general who becomes a gladiator after being betrayed by Commodus, the son of Emperor Marcus Aurelius. Maximus seeks revenge on Commodus and tries to restore the true heir to the throne. But how much of this story is based on real history? And how can you watch this epic historical drama online legally and safely? In this article, we will answer these questions and more.

            -

            gladiator true story download


            DOWNLOADhttps://ssurll.com/2uNSx4



            -

            What is Gladiator and why is it popular?

            -

            Gladiator is a 2000 movie directed by Ridley Scott and starring Russell Crowe as Maximus, Joaquin Phoenix as Commodus, Connie Nielsen as Lucilla, Oliver Reed as Proximo, Richard Harris as Marcus Aurelius, Derek Jacobi as Gracchus, Djimon Hounsou as Juba, David Schofield as Falco, John Shrapnel as Gaius, Tomas Arana as Quintus, Ralf Moeller as Hagen, Spencer Treat Clark as Lucius, David Hemmings as Cassius, Tommy Flanagan as Cicero, Sven-Ole Thorsen as Tigris, Omid Djalili as Slave Trader, Nicholas McGaughey as Praetorian Officer, Chris Kell as Scribe, Tony Curran as Assassin #1, Mark Lewis as Assassin #2, John Quinn as Valerius, Alun Raglan as Praetorian Guard #1, David Bailie as Engineer, Chick Allan as German Leader, David Nicholls as Giant Man, Al Hunter Ashton as Rome Trainer #1, Billy Dowd as Narrator, Ray Calleja as Lucius' Attendant, Giannina Facio as Maximus' Wife, Giorgio Cantarini as Maximus' Son.

            -

            The plot and the cast of Gladiator

            -

            The movie begins in 180 AD, when Maximus leads the Roman army to victory against the Germanic tribes at Vind

            obona. He is a loyal and respected general, who is loved by the emperor Marcus Aurelius. The emperor reveals to Maximus that he wants him to succeed him as the protector of Rome, instead of his son Commodus, who is corrupt and power-hungry. However, Commodus finds out about his father's plan and kills him, then orders Maximus and his family to be executed. Maximus escapes, but arrives too late to save his wife and son, who are burned and crucified by Commodus' soldiers. Maximus is captured by slave traders and sold to Proximo, a former gladiator who trains him to fight in the arena. Maximus proves to be a skilled and charismatic fighter, who earns the nickname "Spaniard" and the admiration of the crowds. He also makes friends with other gladiators, such as Juba, a Numidian hunter, and Hagen, a Germanic warrior.

            -

            Meanwhile, Commodus returns to Rome and declares himself the new emperor, despite the opposition of the Senate and the people. He also tries to seduce his sister Lucilla, who is disgusted by his incestuous and tyrannical behavior. Lucilla is secretly in contact with Gracchus, a senator who leads a conspiracy to overthrow Commodus and restore democracy. They learn about Maximus' survival and identity, and plan to use him as a symbol of resistance. They arrange for Maximus to be transferred to Rome, where he will fight in the Colosseum during the games that Commodus organizes to celebrate his rule. Commodus hopes to win the favor of the masses by staging spectacular and bloody shows, but he is unaware that Maximus is among the gladiators.

            -

            In Rome, Maximus faces various opponents and challenges, such as wild animals, chariots, archers, and masked warriors. He also encounters Commodus in person, who recognizes him and tries to kill him secretly. However, Maximus survives and reveals his identity to the stunned audience, who cheer for him as "Maximus the Merciful". Commodus is enraged and fearful, but he cannot kill Maximus publicly without losing his popularity. He decides to keep him alive and humiliate him in the arena. He also increases his oppression of the Senate and the people, who grow more discontented and rebellious.

            -

            Maximus receives a wooden figurine of his family from Juba, who tells him that they are waiting for him in the afterlife. He also reunites with Lucilla, who offers him her help and sympathy. She tells him that her son Lucius is also a fan of his, and that he is in danger of being killed by Commodus if he discovers their plot. Maximus agrees to join the conspiracy and escape from Rome with Lucilla and Lucius. He also asks Proximo to help him by providing weapons and horses for him and his fellow gladiators.

            -

            gladiator true story android game
            -gladiator true story app on google play
            -gladiator true story gameloop pc
            -gladiator true story xform games
            -gladiator true story hack n slash
            -gladiator true story new swords
            -gladiator true story fire powerups
            -gladiator true story blood and gore
            -gladiator true story hidden skulls
            -gladiator true story trophies
            -gladiator true story ratings and reviews
            -gladiator true story bug fixes
            -gladiator true story visual overhaul
            -gladiator true story 64-bit support
            -gladiator true story offline mode
            -gladiator true story data safety
            -gladiator true story mature 17+
            -gladiator true story historical facts
            -gladiator true story ultimate fighters
            -gladiator true story champions
            -gladiator true story armor and weapons
            -gladiator true story bloody battle arena
            -gladiator true story extreme violence
            -gladiator true story swordfighting mercenary
            -gladiator true story dinosaur hugo
            -download gladiator true story apk
            -download gladiator true story mod apk
            -download gladiator true story latest version
            -download gladiator true story for free
            -download gladiator true story on pc with gameloop
            -download gladiator true story from play store
            -download gladiator true story update 2023
            -download gladiator true story full game
            -download gladiator true story cheats and tips
            -download gladiator true story gameplay video
            -the real history of the gladiators national geographic
            -the real history of the gladiators 2023 article
            -the real history of the gladiators research findings
            -the real history of the gladiators survival rate
            -the real history of the gladiators ring rules
            -the real history of the gladiators social status
            -the real history of the gladiators diet and health
            -the real history of the gladiators weapons and armor
            -the real history of the gladiators training and education
            -the real history of the gladiators types and classes

            -

            The awards and accolades that Gladiator received

            -

            Gladiator was a huge success both critically and commercially. It received rave reviews from critics and audiences alike, who praised its direction, acting, screenplay, cinematography, music, editing, production design, visual effects, sound effects, costume design, makeup, and stunts. It grossed over $460 million worldwide against a budget of $103 million, making it one of the highest-grossing movies of 2000. It also won numerous awards and honors, including:

            -
              -
            • Five Academy Awards: Best Picture, Best Actor (Russell Crowe), Best Costume Design (Janty Yates), Best Sound (Scott Millan, Bob Beemer, and Ken Weston), and Best Visual Effects (John Nelson, Neil Corbould, Tim Burke, and Rob Harvey)
            • -
            • Four BAFTA Awards: Best Film, Best Cinematography (John Mathieson), Best Editing (Pietro Scalia), and Best Production Design (Arthur Max)
            • -
            • Two Golden Globe Awards: Best Motion Picture – Drama and Best Original Score (Hans Zimmer and Lisa Gerrard)
            • -
            • Two Grammy Awards: Best Score Soundtrack Album for a Motion Picture, Television or Other Visual Media and Best Male Pop Vocal Performance ("Now We Are Free" by Hans Zimmer and Lisa Gerrard featuring Klaus Badelt and Yvonne S. Moriarty)
            • -
            • One MTV Movie Award: Best Action Sequence (The Battle of Carthage)
            • -
            • One Screen Actors Guild Award: Outstanding Performance by a Male Actor in a Leading Role (Russell Crowe)
            • -
            • One American Film Institute Award: AFI Movie of the Year
            • -
            -

            In addition, Gladiator was nominated for many other awards and honors, such as the British Academy Film Award for Best Actor (Russell Crowe), the Academy Award for Best Director (Ridley Scott), the Golden Globe Award for Best Director (Ridley Scott), the Golden Globe Award for Best Actor – Motion Picture Drama (Russell Crowe), the Screen Actors Guild Award for Outstanding Performance by a Cast in a Motion Picture, the American Film Institute Award for AFI's 100 Years...100 Heroes and Villains (Maximus as a hero and Commodus as a villain), and the American Film Institute Award for AFI's 100 Years...100 Movies – 10th Anniversary Edition.

            What is the true story behind Gladiator and how accurate is it?

            -

            Gladiator is not a documentary, but a fictional story inspired by real historical events and characters. The movie draws from various sources and influences, such as ancient Roman history, literature, art, and mythology, as well as modern movies, novels, and comics. The movie also takes some creative liberties and artistic license, such as changing some facts, adding some elements, or omitting some details. The movie aims to create a compelling and dramatic narrative, rather than a faithful and accurate representation of history.

            -

            The historical sources and inspirations for Gladiator

            -

            The movie is based on several historical figures and events from the late 2nd century AD, during the reigns of Marcus Aurelius and Commodus. Some of them are:

            -
              -
            • Marcus Aurelius: He was the last of the "Five Good Emperors" who ruled Rome from 161 to 180 AD. He was a philosopher, a stoic, and a writer of the Meditations. He fought against various enemies, such as the Parthians, the Marcomanni, the Quadi, and the Sarmatians. He died of natural causes in Vindobona (modern Vienna) in 180 AD. He was succeeded by his son Commodus, whom he had co-ruled with since 177 AD.
            • -
            • Commodus: He was the son of Marcus Aurelius and Faustina the Younger. He ruled Rome from 180 to 192 AD. He was a megalomaniac, a narcissist, and a tyrant. He neglected his duties as emperor and indulged in his pleasures and whims. He fought as a gladiator in the Colosseum, where he claimed to be Hercules reborn. He also renamed Rome as Colonia Commodiana (Colony of Commodus) and changed the names of the months, the legions, and the Senate after himself. He was assassinated by a conspiracy led by his mistress Marcia, his chamberlain Eclectus, and his praetorian prefect Quintus Aemilius Laetus.
            • -
            • The gladiator games: They were public spectacles that involved combat between armed men or animals in an arena. They originated from ancient funeral rites and became popular entertainment in Rome. They were sponsored by emperors, magistrates, or wealthy citizens to celebrate victories, festivals, or funerals. They were held in various venues, such as amphitheaters, circuses, stadiums, or theaters. The most famous venue was the Colosseum, which could hold up to 50,000 spectators. The gladiators were mostly slaves, prisoners of war, criminals, or volunteers. They were trained in special schools called ludus. They fought with various weapons and armor, such as swords, spears, shields, nets, tridents, daggers, helmets, and breastplates. They belonged to different types or classes of gladiators, such as murmillo, thraex, secutor, retiarius, hoplomachus, provocator, dimachaerus, and essedarius. They fought against each other or against wild animals, such as lions, tigers, bears, elephants, rhinoceroses, crocodiles, and ostriches. They could win fame, fortune, or freedom if they survived or impressed the crowd or the sponsor. They could also die or be killed if they lost or displeased them.
            • -
            -

            The movie also incorporates some fictional elements and creative liberties, such as:

            -
              -
            • Maximus: He is a fictional character created for the movie. He is loosely based on various historical figures, such as Narcissus (the wrestler who killed Commodus), Spartacus (the gladiator who led a slave revolt), Cincinnatus (the general who became a dictator and then returned to his farm), and Marcus Nonius Macrinus (a general and consul under Marcus Aurelius).
            • -
            • Proximo: He is also a fictional character created for the movie. He is loosely based on various historical figures, such as Gaius Appuleius Diocles ( a famous chariot racer who became rich and retired), Marcus Attilius (a freeborn gladiator who fought in Pompeii), and Spiculus (a gladiator who was favored by Nero).
            • -
            • The slave rebellion: It is a fictional event created for the movie. It is loosely based on various historical events, such as the Third Servile War (the slave revolt led by Spartacus), the Year of the Four Emperors (the civil war that followed the death of Nero), and the Marcomannic Wars (the wars fought by Marcus Aurelius against the Germanic tribes).
            • -
            -

            The historical accuracy and inaccuracies of Gladiator

            -

            The movie depicts some aspects of ancient Rome accurately, such as:

            -
              -
            • The costumes: The movie used authentic materials and designs for the costumes of the characters, such as the tunics, the togas, the cloaks, the sandals, the armor, and the helmets. The movie also used accurate colors and symbols for the costumes, such as the purple for the emperor, the red for the soldiers, and the SPQR for the Senate and the People of Rome.
            • -
            • The architecture: The movie recreated some of the most iconic buildings and monuments of ancient Rome, such as the Colosseum, the Forum, the Pantheon, the Arch of Constantine, and the Aurelian Walls. The movie also used accurate models and measurements for the buildings and monuments, such as the height, the width, and the shape.
            • -
            • The politics: The movie portrayed some of the political issues and conflicts that existed in ancient Rome, such as the tension between the emperor and the Senate, the corruption and decadence of the elite, the oppression and poverty of the masses, and the threat of external enemies.
            • -
            -

            The movie also contains some errors and anachronisms, such as:

            -
              -
            • The timeline: The movie compressed or altered some of the chronological events that occurred in ancient Rome, such as the death of Marcus Aurelius (which happened in 180 AD, not 181 AD), the accession of Commodus (which happened peacefully, not violently), and the assassination of Commodus (which happened in 192 AD, not 180 AD).
            • -
            • The geography: The movie misplaced or invented some of the geographical locations that were involved in ancient Rome, such as Zucchabar (which was a city in North Africa, not Spain), Germania (which was a region in Central Europe, not a country), and Carthage (which was a city in North Africa, not Italy).
            • -
            • The battles: The movie exaggerated or fabricated some of the military aspects of ancient Rome, such as the size and the equipment of the Roman army (which was smaller and less advanced than shown in the movie), the tactics and the strategies of the Roman army (which were more disciplined and organized than shown in the movie), and the outcome and the impact of the Roman battles (which were more decisive and influential than shown in the movie).
            • -
            -

            How can you download or stream Gladiator online legally and safely?

            -

            Downloading or streaming movies online can be a convenient and enjoyable way to watch your favorite films. However, it can also be a risky and unethical activity if you do not do it properly. There are many websites and platforms that offer movies for download or streaming, but not all of them are authorized, reliable, or secure. Some of them may be illegal, harmful, or fraudulent. Therefore, you need to be careful and responsible when you choose to download or stream movies online.

            -

            The legal and ethical issues of downloading or streaming movies online

            -

            Downloading or streaming movies from unauthorized sources can be illegal, unethical, and risky for several reasons, such as:

            -
              -
            • It can violate the intellectual property rights of the creators and the distributors of the movies, who have invested time, money, and effort to produce and distribute them. By downloading or streaming movies without their permission or compensation, you are stealing their work and depriving them of their deserved income.
            • -
            • It can harm the quality and the diversity of the movies, as it reduces the incentive and the resources for the creators and the distributors to make and offer more movies. By downloading or streaming movies without paying for them, you are discouraging them from creating and providing more movies that you may enjoy.
            • -
            • It can expose you to various threats and dangers, such as viruses, malware, spyware, phishing, identity theft, fraud, scams, lawsuits, fines, or imprisonment. By downloading or streaming movies from untrustworthy sources, you are risking your device, your data, your money, your reputation, or your freedom.
            • -
            -

            Downloading or streaming movies from authorized sources can be legal, ethical, and safe for several reasons, such as:

            -
              -
            • It can respect the intellectual property rights of the creators and the distributors of the movies, who have given their consent and received their payment for their work. By downloading or streaming movies with their permission and compensation, you are supporting their work and rewarding them for their contribution.
            • -
            • It can enhance the quality and the diversity of the movies, as it increases the motivation and the resources for the creators and the distributors to make and offer more movies. By downloading or streaming movies by paying for them, you are encouraging them to create and provide more movies that you may enjoy.
            • -
            • It can protect you from various threats and dangers, such as viruses, malware, spyware, phishing, identity theft, fraud, scams, lawsuits, fines, or imprisonment. By downloading or streaming movies from trustworthy sources, you are securing your device, your data, your money, your reputation, and your freedom.
            • -
            -

            The best options to download or stream Gladiator online legally and safely

            -

            There are many websites and platforms that offer Gladiator for download or streaming legally and safely. However, not all of them are equally convenient, affordable, or accessible. Therefore, you need to compare and contrast them to find the best option for you. Here is a table that summarizes some of the most popular and reliable platforms that offer Gladiator for download or streaming:

            - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            PlatformFeaturesPricesQualityAvailability
            Amazon Prime Video- Offers Gladiator for rent or purchase - Allows offline viewing - Includes subtitles and audio descriptions - Supports multiple devices and screens - Provides customer service and technical support- Rent: $3.99 (SD) or $4.99 (HD) - Purchase: $9.99 (SD) or $12.99 (HD) - Free trial: 30 days - Subscription: $12.99 per month or $119 per year- SD: 480p - HD: 1080p - 4K: Not available- US: Available - UK: Available - Canada: Available - Australia: Available - Other countries: May vary
            Google Play Movies- Offers Gladiator for rent or purchase - Allows offline viewing - Includes subtitles and audio descriptions - Supports multiple devices and screens - Provides customer service and technical support- Rent: $3.99 (SD) or $4.99 (HD) - Purchase: $9.99 (SD) or $12.99 (HD) - Free trial: Not available - Subscription: Not available- SD: 480p - HD: 1080p - 4K: Not available- US: Available - UK: Available - Canada: Available - Australia: Available - Other countries: May vary
            YouTube- Offers Gladiator for rent or purchase - Allows offline viewing - Includes subtitles and audio descriptions - Supports multiple devices and screens - Provides customer service and technical support- Rent: $3.99 (SD) or $4.99 (HD) - Purchase: $9.99 (SD) or $12.99 (HD) - Free trial: Not available - Subscription: Not available- SD: 480p - HD: 1080p - 4K: Not available- US: Available - UK: Available - Canada: Available - Australia: Available - Other countries: May vary
            Vudu- Offers Gladiator for rent or purchase - Allows offline viewing - Includes subtitles and audio descriptions - Supports multiple devices and screens - Provides customer service and technical support- Rent: $3.99 (SD) or $4.99 (HD) - Purchase: $9.99 (SD) or $12.99 (HD) - Free trial: Not available - Subscription: Not available- SD: 480p - HD: 1080p - 4K: Not available- US: Available - UK: Not available - Canada: Not available - Australia: Not available - Other countries: Not available
            Microsoft Store- Offers Gladiator for rent or purchase - Allows offline viewing - Includes subtitles and audio descriptions - Supports multiple devices and screens - Provides customer service and technical support- Rent: $3.99 (SD) or $4.99 (HD) - Purchase: $9.99 (SD) or $12.99 (HD) - Free trial: Not available - Subscription: Not available- SD: 480p - HD: 1080p - 4K: Not available- US: Available - UK: Available - Canada: Available - Australia: Available - Other countries: May vary
            Redbox- Offers Gladiator for rent or purchase - Allows offline viewing - Includes subtitles and audio descriptions - Supports multiple devices and screens - Provides customer service and technical support- Rent: $3.99 (SD) or $4.99 (HD) - Purchase: $9.99 (SD) or $12.99 (HD) - Free trial: Not available - Subscription: Not available- SD: 480p - HD: 1080p - 4K: Not available- US: Available - UK: Not available - Canada: Not available - Australia: Not available - Other countries: Not available
            Apple TV- Offers Gladiator for rent or purchase - Allows offline viewing - Includes subtitles and audio descriptions - Supports multiple devices and screens - Provides customer service and technical support- Rent: $3.99 (SD) or $4.99 (HD) - Purchase: $9.99 (SD) or $12.99 (HD) - Free trial: 7 days - Subscription: $4.99 per month- SD: 480p - HD: 1080p - 4K: Available- US: Available - UK: Available - Canada: Available - Australia: Available - Other countries: May vary
            DIRECTV- Offers Gladiator for rent or purchase - Allows offline viewing - Includes subtitles and audio descriptions - Supports multiple devices and screens - Provides customer service and technical support- Rent: $3.99 (SD) or $4.99 (HD) - Purchase: $9.99 (SD) or $12.99 (HD) - Free trial: Not available - Subscription: Varies depending on the plan- SD: 480p - HD: 1080p - 4K: Not available- US: Available - UK: Not available - Canada: Not available - Australia: Not available - Other countries: Not available
            AMC on Demand- Offers Gladiator for rent or purchase - Allows offline viewing - Includes subtitles and audio descriptions - Supports multiple devices and screens - Provides customer service and technical support- Rent: $3.99 (SD) or $4.99 (HD) - Purchase: $9.99 (SD) or $12.99 (HD) - Free trial: Not available - Subscription: Varies depending on the plan- SD: 480p - HD: 1080p - 4K: Not available- US: Available - UK: Not available - Canada: Not available - Australia: Not available - Other countries: Not available
            Pluto TV- Offers Gladiator for free streaming - Does not allow offline viewing - Includes subtitles and audio descriptions - Supports multiple devices and screens - Provides customer service and technical support- Rent: Not available - Purchase: Not available - Free trial: Not available - Subscription: Not available- SD: 480p - HD: 720p - 4K: Not available- US: Available - UK: Available - Canada: Available - Australia: Available - Other countries: May vary
            -

            As you can see, there are many options to download or stream Gladiator online legally and safely. However, the best option for you may depend on your preferences, budget, and location. Therefore, you should do some research and comparison before you decide which platform to use. You should also check the availability and the quality of the movie on each platform, as they may change over time.

            -

            Conclusion

            -

            Gladiator is a movie that has captivated millions of viewers around the world with its thrilling and inspiring story of a Roman general who becomes a gladiator and fights for his freedom and revenge. The movie is based on real historical events and characters, but it also contains some fictional elements and creative liberties. The movie is not a documentary, but a fictional story inspired by history. The movie also offers a stunning and realistic portrayal of ancient Rome, but it also has some errors and anachronisms. The movie is not a history lesson, but a historical drama.

            -

            If you want to watch Gladiator online legally and safely, you have many options to choose from. You can rent or purchase the movie from various platforms, such as Amazon Prime Video, Google Play Movies, YouTube, Vudu, Microsoft Store, Redbox, Apple TV, DIRECTV, AMC on Demand, etc. You can also stream the movie for free from Pluto TV. However, you should be careful and responsible when you download or stream movies online, as not all sources are authorized, reliable, or secure. You should respect the intellectual property rights of the creators and the distributors of the movies, as well as protect yourself from various threats and dangers.

            -

            We hope that this article has helped you learn more about Gladiator and how to watch it online legally and safely. If you have any questions or comments, please feel free to share them in the comments section below. We would love to hear from you.

            -

            FAQs

            -

            Here are some of the most frequently asked questions about Gladiator:

            -

            Q: Who is the real hero of Gladiator?

            -

            A: The real hero of Gladiator is Maximus, the fictional character played by Russell Crowe. He is a brave, loyal, and honorable general who becomes a gladiator after being betrayed by Commodus, the son of Emperor Marcus Aurelius. He seeks revenge on Commodus and tries to restore the true heir to the throne. He also inspires and leads a slave rebellion against the tyranny of Commodus. He is a symbol of courage, justice, and freedom for the people of Rome.

            -

            Q: Is Gladiator based on a true story?

            -

            A: Gladiator is based on some true historical events and characters, but it also contains some fictional elements and creative liberties. The movie is not a documentary, but a fictional story inspired by history. The movie draws from various sources and influences, such as ancient Roman history, literature, art, and mythology, as well as modern movies, novels, and comics. The movie also takes some artistic license and changes some facts, adds some elements, or omits some details. The movie aims to create a compelling and dramatic narrative, rather than a faithful and accurate representation of history.

            -

            Q: How did Commodus die in real life?

            -

            A: Commodus died in 192 AD, after ruling Rome for 12 years. He was assassinated by a conspiracy led by his mistress Marcia, his chamberlain Eclectus, and his praetorian prefect Quintus Aemilius Laetus. They poisoned his food, but when he vomited it up, they sent his wrestling partner Narcissus to strangle him in his bath. His death marked the end of the Nerva–Antonine dynasty and the beginning of the Year of the Five Emperors.

            -

            Q: What does SPQR stand for?

            -

            A: SPQR stands for Senatus Populusque Romanus, which means "The Senate and the People of Rome" in Latin. It was the official motto of the Roman Republic and later the Roman Empire. It represented the political and social system of ancient Rome, where the Senate was the legislative and advisory body of the state, and the People was the collective term for the citizens of Rome. It also symbolized the unity and the sovereignty of Rome over its territories and provinces.

            -

            Q: What does "Now We Are Free" mean?

            -

            A: "Now We Are Free" is the title and the chorus of the song that plays at the end of Gladiator. It is sung by Lisa Gerrard in an invented language that has no meaning. However, it can be interpreted as a expression of Maximus' liberation from his earthly suffering and his reunion with his family in the afterlife. It can also be seen as a reflection of the emancipation of Rome from the oppression of Commodus and the restoration of democracy.

            401be4b1e0
            -
            -
            \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Free Fire Advance Server v66.04 APK - The Ultimate Guide for Android Users.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Free Fire Advance Server v66.04 APK - The Ultimate Guide for Android Users.md deleted file mode 100644 index a1b2c4f189c51432b64b94c79ace8c5514c8fee8..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Free Fire Advance Server v66.04 APK - The Ultimate Guide for Android Users.md +++ /dev/null @@ -1,102 +0,0 @@ - -

            Free Fire Advance Server V66 04 APK Download for Android

            -

            Free Fire is one of the most popular battle royale games on mobile devices, with millions of players worldwide. The game is constantly updated with new features, modes, characters, weapons, and more. But what if you want to try out these new additions before they are officially released? Well, you can do that by downloading the Free Fire Advance Server APK for Android.

            -

            What is Free Fire Advance Server?

            -

            Free Fire Advance Server is a separate application that allows users to test the upcoming features and updates of Free Fire before they are launched in the global version. The developers use the Advance Server to gather feedback and suggestions from the players, as well as to identify and fix any bugs or glitches that may occur.

            -

            free fire advance server v66 04 apk download for android


            Download ✔✔✔ https://ssurll.com/2uNUj9



            -

            Features of Free Fire Advance Server

            -

            The Free Fire Advance Server has some exclusive features that are not available in the regular version of the game. Some of these features are:

            -
              -
            • A new character named Dasha, who has reduced recoil and fall damage.
            • -
            • A new pet named Rockie, who can reduce the cooldown time of active skills.
            • -
            • A new weapon called Vector Akimbo, which can fire two submachine guns at once.
            • -
            • A new mode called Training Grounds, where players can practice their skills and try out different weapons and items.
            • -
            • A new map called Bermuda Remastered, which has improved graphics and new locations.
            • -
            -

            How to download and install Free Fire Advance Server V66 04 APK

            -

            If you want to download and install the Free Fire Advance Server V66 04 APK for Android, you need to follow these steps:

            -
              -
            1. Visit the official website of Free Fire Advance Server at [text](^1^).
            2. -
            3. Login with your Facebook account that is linked to your Free Fire account.
            4. -
            5. If you have registered for the Advance Server, you will see a download button on the homepage. Click on it to download the APK file.
            6. -
            7. If you have not registered for the Advance Server, you will need to fill out a form with your personal details and wait for approval from the developers. You will receive an email notification if you are selected.
            8. -
            9. After downloading the APK file, locate it on your device and tap on it to install it. You may need to enable the "Install from unknown sources" option in your settings.
            10. -
            11. Once the installation is complete, open the app and login with your Facebook account again.
            12. -
            13. Enjoy playing the Free Fire Advance Server and testing the new features.
            14. -
            -

            Benefits of using Free Fire Advance Server

            -

            There are many benefits of using the Free Fire Advance Server, such as:

            -

            Test new features and updates before official release

            -

            The main benefit of using the Free Fire Advance Server is that you can test the new features and updates before they are officially released in the global version. This way, you can get a sneak peek of what's coming next in the game and have a competitive edge over other players. You can also explore the new map, mode, character, pet, weapon, and more without spending any diamonds or coins.

            -

            Provide feedback and suggestions to developers

            -

            Another benefit of using the Free Fire Advance Server is that you can provide feedback and suggestions to the developers regarding the new features and updates. You can report any bugs or glitches that you encounter in the game, as well as share your opinions and ideas on how to improve the game. The developers will appreciate your input and may implement some of your suggestions in the future updates.Earn rewards and diamonds for reporting bugs -

            A third benefit of using the Free Fire Advance Server is that you can earn rewards and diamonds for reporting bugs in the game. The developers will reward you with diamonds for every valid bug report that you submit. You can use these diamonds to buy items and skins in the game. The more bugs you report, the more diamonds you earn. However, you need to be honest and accurate in your bug reports, as the developers will verify them before giving you the rewards.

            -

            Drawbacks of using Free Fire Advance Server

            -

            Despite the benefits, there are also some drawbacks of using the Free Fire Advance Server, such as:

            -

            Limited access and registration

            -

            One of the drawbacks of using the Free Fire Advance Server is that it has limited access and registration. The Advance Server is only available for a certain period of time before each update, and it can only accommodate a limited number of players. Therefore, not everyone can join the Advance Server, and you need to register and wait for approval from the developers. You also need to have a Facebook account that is linked to your Free Fire account to access the Advance Server.

            -

            Possible errors and glitches

            -

            Another drawback of using the Free Fire Advance Server is that it may have possible errors and glitches in the game. Since the Advance Server is a testing platform, it is not fully stable and optimized, and it may encounter some technical issues or bugs that affect the gameplay. For example, you may experience lag, crashes, freezes, or other problems while playing the game. You may also encounter some bugs or glitches that make the game unfair or unplayable.

            -

            free fire advance server v66 04 apk download for android latest version
            -free fire advance server v66 04 apk download for android no verification
            -free fire advance server v66 04 apk download for android offline
            -free fire advance server v66 04 apk download for android update
            -free fire advance server v66 04 apk download for android mod
            -free fire advance server v66 04 apk download for android obb
            -free fire advance server v66 04 apk download for android filehippo
            -free fire advance server v66 04 apk download for android apkcombo
            -free fire advance server v66 04 apk download for android garena
            -free fire advance server v66 04 apk download for android beta
            -free fire advance server v66 04 apk download for android new features
            -free fire advance server v66 04 apk download for android unlimited diamonds
            -free fire advance server v66 04 apk download for android hack
            -free fire advance server v66 04 apk download for android gameplay
            -free fire advance server v66 04 apk download for android review
            -free fire advance server v66 04 apk download for android how to install
            -free fire advance server v66 04 apk download for android requirements
            -free fire advance server v66 04 apk download for android size
            -free fire advance server v66 04 apk download for android link
            -free fire advance server v66 04 apk download for android tutorial
            -free fire advance server v66 04 apk download for android tips and tricks
            -free fire advance server v66 04 apk download for android error fix
            -free fire advance server v66 04 apk download for android registration
            -free fire advance server v66 04 apk download for android login
            -free fire advance server v66 04 apk download for android not working
            -free fire advance server v66 04 apk download for android compatible devices
            -free fire advance server v66 04 apk download for android support
            -free fire advance server v66 04 apk download for android feedback
            -free fire advance server v66 04 apk download for android test version
            -free fire advance server v66 04 apk download for android early access
            -free fire advance server v66 04 apk download for android release date
            -free fire advance server v66 04 apk download for android patch notes
            -free fire advance server v66 04 apk download for android changelog
            -free fire advance server v66 04 apk download for android screenshots
            -free fire advance server v66 04 apk download for android video guide
            -free fire advance server v66 04 apk download for android official website
            -free fire advance server v66 04 apk download for android direct link
            -free fire advance server v66 04 apk download for android mirror link
            -free fire advance server v66 04 apk download for android alternative link
            -free fire advance server v66 04 apk download for android safe and secure
            -free fire advance server v66 04 apk download for android virus scan report
            -free fire advance server v66 04 apk download for android ratings and reviews
            -free fire advance server v66 04 apk download for android downloads and installs stats
            -free fire advance server v66 04 apk download for android similar apps and games recommendations
            -free fire advance server v66 04 apk download for android frequently asked questions and answers

            -

            Data loss and compatibility issues

            -

            A third drawback of using the Free Fire Advance Server is that it may cause data loss and compatibility issues on your device. Since the Advance Server is a separate application from the regular version of Free Fire, it may not be compatible with your device or operating system, and it may require more storage space and battery power. Moreover, the data that you generate in the Advance Server may not be transferred or saved to your regular version of Free Fire, and you may lose your progress or items when you switch back to the global version.

            -

            Conclusion

            -

            In conclusion, the Free Fire Advance Server is a great way to test the new features and updates of Free Fire before they are officially released in the global version. You can enjoy playing with the new map, mode, character, pet, weapon, and more without spending any diamonds or coins. You can also provide feedback and suggestions to the developers and earn rewards and diamonds for reporting bugs. However, you need to be aware of the drawbacks of using the Advance Server, such as limited access and registration, possible errors and glitches, and data loss and compatibility issues. Therefore, you need to weigh the pros and cons before deciding whether to download and install the Free Fire Advance Server V66 04 APK for Android.

            -

            FAQs

            -
              -
            • Q: How can I register for the Free Fire Advance Server?
            • -
            • A: You can register for the Free Fire Advance Server by visiting its official website at [text] and logging in with your Facebook account that is linked to your Free Fire account. You will need to fill out a form with your personal details and wait for approval from the developers.
            • -
            • Q: How can I report bugs in the Free Fire Advance Server?
            • -
            • A: You can report bugs in the Free Fire Advance Server by tapping on the "Report" button on the top right corner of the screen. You will need to describe the bug in detail and attach a screenshot or video if possible. You will also need to provide your UID (user ID) and server name.
            • -
            • Q: How can I get diamonds in the Free Fire Advance Server?
            • -
            • A: You can get diamonds in the Free Fire Advance Server by reporting valid bugs in the game. The developers will reward you with diamonds for every bug report that you submit. You can use these diamonds to buy items and skins in the game.
            • -
            • Q: How can I switch back to the global version of Free Fire?
            • -
            • A: You can switch back to the global version of Free Fire by uninstalling or disabling the Free Fire Advance Server app on your device. You will then need to reinstall or enable the regular version of Free Fire from Google Play Store or App Store.
            • -
            • Q: How can I update the Free Fire Advance Server app?
            • -
            • A: You can update the Free Fire Advance Server app by visiting its official website at [text] and downloading the latest version of the APK file. You will then need to install it on your device and login with your Facebook account again

              I have completed writing the article on the topic of "free fire advance server v66 04 apk download for android". I hope you find it useful and informative. If you have any questions or feedback, please let me know. Thank you for choosing me as your content writer.

              197e85843d
              -
              -
              \ No newline at end of file diff --git a/spaces/sklkd93/CodeFormer/CodeFormer/basicsr/metrics/__init__.py b/spaces/sklkd93/CodeFormer/CodeFormer/basicsr/metrics/__init__.py deleted file mode 100644 index 19d55cc8321f124c918d78465b053aef67f13a33..0000000000000000000000000000000000000000 --- a/spaces/sklkd93/CodeFormer/CodeFormer/basicsr/metrics/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -from copy import deepcopy - -from basicsr.utils.registry import METRIC_REGISTRY -from .psnr_ssim import calculate_psnr, calculate_ssim - -__all__ = ['calculate_psnr', 'calculate_ssim'] - - -def calculate_metric(data, opt): - """Calculate metric from data and options. - - Args: - opt (dict): Configuration. It must constain: - type (str): Model type. - """ - opt = deepcopy(opt) - metric_type = opt.pop('type') - metric = METRIC_REGISTRY.get(metric_type)(**data, **opt) - return metric diff --git a/spaces/sklkd93/CodeFormer/CodeFormer/facelib/detection/__init__.py b/spaces/sklkd93/CodeFormer/CodeFormer/facelib/detection/__init__.py deleted file mode 100644 index 296262d4e2e29eaa2afba7bda1f0399d77da24f6..0000000000000000000000000000000000000000 --- a/spaces/sklkd93/CodeFormer/CodeFormer/facelib/detection/__init__.py +++ /dev/null @@ -1,100 +0,0 @@ -import os -import torch -from torch import nn -from copy import deepcopy - -from facelib.utils import load_file_from_url -from facelib.utils import download_pretrained_models -from facelib.detection.yolov5face.models.common import Conv - -from .retinaface.retinaface import RetinaFace -from .yolov5face.face_detector import YoloDetector - - -def init_detection_model(model_name, half=False, device='cuda'): - if 'retinaface' in model_name: - model = init_retinaface_model(model_name, half, device) - elif 'YOLOv5' in model_name: - model = init_yolov5face_model(model_name, device) - else: - raise NotImplementedError(f'{model_name} is not implemented.') - - return model - - -def init_retinaface_model(model_name, half=False, device='cuda'): - if model_name == 'retinaface_resnet50': - model = RetinaFace(network_name='resnet50', half=half) - model_url = 'https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_Resnet50_Final.pth' - elif model_name == 'retinaface_mobile0.25': - model = RetinaFace(network_name='mobile0.25', half=half) - model_url = 'https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_mobilenet0.25_Final.pth' - else: - raise NotImplementedError(f'{model_name} is not implemented.') - - model_path = load_file_from_url(url=model_url, model_dir='weights/facelib', progress=True, file_name=None) - load_net = torch.load(model_path, map_location=lambda storage, loc: storage) - # remove unnecessary 'module.' - for k, v in deepcopy(load_net).items(): - if k.startswith('module.'): - load_net[k[7:]] = v - load_net.pop(k) - model.load_state_dict(load_net, strict=True) - model.eval() - model = model.to(device) - - return model - - -def init_yolov5face_model(model_name, device='cuda'): - if model_name == 'YOLOv5l': - model = YoloDetector(config_name='facelib/detection/yolov5face/models/yolov5l.yaml', device=device) - model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/yolov5l-face.pth' - elif model_name == 'YOLOv5n': - model = YoloDetector(config_name='facelib/detection/yolov5face/models/yolov5n.yaml', device=device) - model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/yolov5n-face.pth' - else: - raise NotImplementedError(f'{model_name} is not implemented.') - - model_path = load_file_from_url(url=model_url, model_dir='weights/facelib', progress=True, file_name=None) - load_net = torch.load(model_path, map_location=lambda storage, loc: storage) - model.detector.load_state_dict(load_net, strict=True) - model.detector.eval() - model.detector = model.detector.to(device).float() - - for m in model.detector.modules(): - if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: - m.inplace = True # pytorch 1.7.0 compatibility - elif isinstance(m, Conv): - m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility - - return model - - -# Download from Google Drive -# def init_yolov5face_model(model_name, device='cuda'): -# if model_name == 'YOLOv5l': -# model = YoloDetector(config_name='facelib/detection/yolov5face/models/yolov5l.yaml', device=device) -# f_id = {'yolov5l-face.pth': '131578zMA6B2x8VQHyHfa6GEPtulMCNzV'} -# elif model_name == 'YOLOv5n': -# model = YoloDetector(config_name='facelib/detection/yolov5face/models/yolov5n.yaml', device=device) -# f_id = {'yolov5n-face.pth': '1fhcpFvWZqghpGXjYPIne2sw1Fy4yhw6o'} -# else: -# raise NotImplementedError(f'{model_name} is not implemented.') - -# model_path = os.path.join('weights/facelib', list(f_id.keys())[0]) -# if not os.path.exists(model_path): -# download_pretrained_models(file_ids=f_id, save_path_root='weights/facelib') - -# load_net = torch.load(model_path, map_location=lambda storage, loc: storage) -# model.detector.load_state_dict(load_net, strict=True) -# model.detector.eval() -# model.detector = model.detector.to(device).float() - -# for m in model.detector.modules(): -# if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: -# m.inplace = True # pytorch 1.7.0 compatibility -# elif isinstance(m, Conv): -# m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility - -# return model \ No newline at end of file diff --git a/spaces/skylarx2x/openai-reverse-proxy/README.md b/spaces/skylarx2x/openai-reverse-proxy/README.md deleted file mode 100644 index c920483af8dc2c02d7e58e278387ede5ebebf719..0000000000000000000000000000000000000000 --- a/spaces/skylarx2x/openai-reverse-proxy/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Openai Reverse Proxy -emoji: 📚 -colorFrom: green -colorTo: pink -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/sonoisa/irasuto_search/README.md b/spaces/sonoisa/irasuto_search/README.md deleted file mode 100644 index de16ceca4ae906cb248ed70e10098253466c2376..0000000000000000000000000000000000000000 --- a/spaces/sonoisa/irasuto_search/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Irasuto_search -emoji: 🏃 -colorFrom: green -colorTo: yellow -sdk: streamlit -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/sradc/visual-content-search-over-videos/pipeline/get_video_ids.py b/spaces/sradc/visual-content-search-over-videos/pipeline/get_video_ids.py deleted file mode 100644 index 67773398cd04233640fc2f0e1625b1b1be504b34..0000000000000000000000000000000000000000 --- a/spaces/sradc/visual-content-search-over-videos/pipeline/get_video_ids.py +++ /dev/null @@ -1,79 +0,0 @@ -import hashlib -import logging -import os -from pathlib import Path -from typing import Final, Optional - -import youtube_dl - -from pipeline.download_videos import DATA_DIR - -logging.basicConfig( - level=logging.INFO, - format="%(asctime)s - %(levelname)s - %(message)s", - datefmt="%Y-%m-%d %H:%M:%S", -) - -PLAYLIST_URLS = [ - "https://www.youtube.com/playlist?list=PL6Lt9p1lIRZ311J9ZHuzkR5A3xesae2pk", # 570, Alternative rock of the 2000s (2000-2009) - "https://www.youtube.com/playlist?list=PLMC9KNkIncKtGvr2kFRuXBVmBev6cAJ2u", # 250, Best Pop Music Videos - Top Pop Hits Playlist - "https://www.youtube.com/playlist?list=PLmXxqSJJq-yXrCPGIT2gn8b34JjOrl4Xf", # 184, 80s Music Hits | Best 80s Music Playlist - "https://www.youtube.com/playlist?list=PL7DA3D097D6FDBC02", # 150, 90's Hits - Greatest 1990's Music Hits (Best 90’s Songs Playlist) - "https://www.youtube.com/playlist?list=PLeDakahyfrO-4kuBioL5ZAoy4j6aCnzWy", # 100, Best Music Videos of All Time - "https://www.youtube.com/playlist?list=PLMC9KNkIncKtPzgY-5rmhvj7fax8fdxoj", # 200, Pop Music Playlist - Timeless Pop Songs (Updated Weekly 2023) - "https://www.youtube.com/playlist?list=PLkqz3S84Tw-RfPS9HHi3MRmrinOBKxIr8", # 82, Top POP Hits 2022 – Biggest Pop Music Videos - Vevo - "https://www.youtube.com/playlist?list=PLyORnIW1xT6wqvszJbCdLdSjylYMf3sNZ", # 100, Top 100 Music Videos 2023 - Best Music Videos 2023 - "https://www.youtube.com/playlist?list=PL1Mmsa-U48mea1oIN-Eus78giJANx4D9W", # 119, 90s Music Videos - "https://www.youtube.com/playlist?list=PLurPBtLcqJqcg3r-HOhR3LZ0aDxpI15Fa", # 100, 100 Best Music Videos Of The Decade: 2010 - 2019 - "https://www.youtube.com/playlist?list=PLCQCtoOJpI_A5oktQImEdDBJ50BqHXujj", # 495, MTV Classic 2000's music videos (US Version) -] -URL_FILE: Final[Optional[str]] = os.environ.get("URL_FILE") -OUTPUT_DIR: Final[str] = DATA_DIR / "ids" - - -def get_all_video_ids(channel_url: str) -> list[str]: - """Get all video IDs from a YouTube channel or playlist URL. - - Args: - channel_url (str): URL of the YouTube channel or playlist. - - Returns: - list[str]: List of video IDs. - - Notes: - If you want the videos from a channel, make sure to pass the `/videos` endpoint of the channel. - """ - ydl_opts = { - "ignoreerrors": True, - "extract_flat": "in_playlist", - "dump_single_json": True, - "quiet": True, - } - - with youtube_dl.YoutubeDL(ydl_opts) as ydl: - playlist_info = ydl.extract_info(channel_url, download=False) - video_ids = [video["id"] for video in playlist_info["entries"] if "id" in video] - - return video_ids - - -def process_youtube_url(url: str): - logging.info(f"Processing {url}") - ids = get_all_video_ids(url) - OUTPUT_DIR.mkdir(parents=True, exist_ok=True) - (OUTPUT_DIR / ".gitignore").write_text("*") - output = "\n".join(ids) - output_path = OUTPUT_DIR / f"{hashlib.md5(output.encode()).hexdigest()}.txt" - logging.info(f"Writing {len(ids)} video IDs to {output_path}") - with output_path.open(mode="w") as f: - f.write(output) - - -def main(): - logging.info(f"Processing {len(PLAYLIST_URLS)} URLs") - for url in PLAYLIST_URLS: - process_youtube_url(url) - - -if __name__ == "__main__": - main() diff --git a/spaces/srush/minichain/agent.py b/spaces/srush/minichain/agent.py deleted file mode 100644 index 9fb67178778c23f31ffb3ce451634b67bf4702b9..0000000000000000000000000000000000000000 --- a/spaces/srush/minichain/agent.py +++ /dev/null @@ -1,82 +0,0 @@ -# + tags=["hide_inp"] - -desc = """ -### Agent - -Chain that executes different tools based on model decisions. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/srush/MiniChain/blob/master/examples/bash.ipynb) - -(Adapted from LangChain ) -""" -# - - -# $ - -from minichain import Id, prompt, OpenAI, show, transform, Mock, Break -from gradio_tools.tools import StableDiffusionTool, ImageCaptioningTool, ImageToMusicTool - - -# class ImageCaptioningTool: -# def run(self, inp): -# return "This is a picture of a smiling huggingface logo." - -# description = "Image Captioning" - -tools = [StableDiffusionTool(), ImageCaptioningTool()] - - -@prompt(OpenAI(stop=["Observation:"]), - template_file="agent.pmpt.tpl") -def agent(model, query, history): - return model(dict(tools=[(str(tool.__class__.__name__), tool.description) - for tool in tools], - input=query, - agent_scratchpad=history - )) -@transform() -def tool_parse(out): - lines = out.split("\n") - if lines[0].split("?")[-1].strip() == "Yes": - tool = lines[1].split(":", 1)[-1].strip() - command = lines[2].split(":", 1)[-1].strip() - return tool, command - else: - return Break() - -@prompt(tools) -def tool_use(model, usage): - selector, command = usage - for i, tool in enumerate(tools): - if selector == tool.__class__.__name__: - return model(command, tool_num=i) - return ("",) - -@transform() -def append(history, new, observation): - return history + "\n" + new + "Observation: " + observation - -def run(query): - history = "" - observations = [] - for i in range(3): - select_input = agent(query, history) - observations.append(tool_use(tool_parse(select_input))) - history = append(history, select_input, observations[i]) - - return observations[-1] - -# $ - -gradio = show(run, - subprompts=[agent, tool_use] * 3, - examples=[ - "I would please like a photo of a dog riding a skateboard. " - "Please caption this image and create a song for it.", - 'Use an image generator tool to draw a cat.', - 'Caption the image https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo.png from the internet'], - out_type="markdown", - description=desc, - show_advanced=False - ) -if __name__ == "__main__": - gradio.queue().launch() - diff --git a/spaces/stomexserde/gpt4-ui/Examples/BEST Download Film Petualangan Sherina Full Gratis.md b/spaces/stomexserde/gpt4-ui/Examples/BEST Download Film Petualangan Sherina Full Gratis.md deleted file mode 100644 index 026b228d76fe9eb9720b35eb127e791cc5a82b4d..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/BEST Download Film Petualangan Sherina Full Gratis.md +++ /dev/null @@ -1,17 +0,0 @@ - -

              Download Film Petualangan Sherina Full Gratis: Film Musikal Petualangan yang Menghibur

              -

              Film Petualangan Sherina adalah film musikal petualangan yang dirilis pada tahun 2000. Film ini dibintangi oleh Sherina Munaf dan Derby Romero sebagai dua anak yang diculik oleh penjahat yang ingin menguasai tanah pertanian. Film ini disutradarai oleh Riri Riza dan diproduseri oleh Mira Lesmana, dengan lagu-lagu yang diciptakan oleh Elfa Secioria dan Jujur Prananto.

              -

              Film Petualangan Sherina mengisahkan tentang Sherina (Sherina Munaf), seorang penyanyi cilik yang harus pindah ke Bandung bersama keluarganya karena ayahnya, Darmawan (Mathias Muchus), mendapat pekerjaan sebagai insinyur pertanian. Di sekolah barunya, Sherina bertemu dengan Sadam (Derby Romero), anak dari majikan ayahnya, Ardiwilaga (Didi Petet). Awalnya mereka berseteru, tapi kemudian menjadi sahabat saat berlibur di rumah Ardiwilaga.

              -

              Download Film Petualangan Sherina Full Gratis


              Download Zip ->->->-> https://urlgoal.com/2uI8QC



              -

              Namun, liburan mereka terganggu oleh Pak Raden (Butet Kertaradjasa) dan anak buahnya, yang merupakan suruhan Kertarajasa (Djaduk Ferianto), seorang pengusaha properti yang ingin mengambil alih tanah pertanian Ardiwilaga. Pak Raden dan kawan-kawan menculik Sherina dan Sadam, dan membawa mereka ke sebuah gubuk di tengah hutan. Sherina dan Sadam harus berusaha melarikan diri dari para penculik, sambil menghadapi berbagai rintangan dan bahaya di hutan.

              -

              Film Petualangan Sherina adalah film yang menghibur dan menginspirasi. Film ini menampilkan aksi-aksi seru dan lucu dari Sherina dan Sadam, serta lagu-lagu yang catchy dan menyentuh. Film ini juga menyampaikan pesan-pesan positif tentang persahabatan, keberanian, kejujuran, dan cinta alam. Film ini cocok untuk ditonton oleh semua kalangan, terutama anak-anak dan keluarga.

              -

              Jika Anda ingin menonton atau mendownload film Petualangan Sherina full gratis, Anda bisa mengunjungi beberapa situs berikut:

              -
                -
              • Archive.org: Situs ini menyediakan film Petualangan Sherina dalam format WEBRip dengan resolusi 720p[^1^]. Anda bisa menonton atau mendownload film ini secara gratis dan legal.
              • -
              • KMS2S1: Situs ini menyediakan film Petualangan Sherina dalam format Bluray dengan resolusi 480p, 720p, atau 1080p[^2^]. Anda bisa mendownload film ini melalui Google Drive atau Openload.
              • -
              • JalanTikus: Situs ini menyediakan link untuk nonton film Petualangan Sherina secara streaming[^3^]. Anda bisa menonton film ini melalui situs Indox21 atau LK21.
              • -
              • Indox21: Situs ini menyediakan film Petualangan Sherina dalam format HDRip dengan resolusi 360p, 480p, atau 720p[^4^]. Anda bisa menonton atau mendownload film ini secara gratis.
              • -
              -

              Selamat menonton film Petualangan Sherina full gratis!

              cec2833e83
              -
              -
              \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Beowulf Tamil Dubbed Movie WORK Free 71.md b/spaces/stomexserde/gpt4-ui/Examples/Beowulf Tamil Dubbed Movie WORK Free 71.md deleted file mode 100644 index 08f23acec6b207b154831c83c5ec92a6b919d3c4..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Beowulf Tamil Dubbed Movie WORK Free 71.md +++ /dev/null @@ -1,16 +0,0 @@ -
              -

              Beowulf: A Tamil Dubbed Movie Review

              -

              Beowulf is a 2007 animated fantasy film based on the Old English epic poem of the same name. The film was directed by Robert Zemeckis and features the voices of Ray Winstone, Anthony Hopkins, Angelina Jolie, John Malkovich, and Crispin Glover. The film tells the story of Beowulf, a hero who battles the monster Grendel and his mother, as well as a dragon in his later years.

              -

              beowulf tamil dubbed movie free 71


              Download - https://urlgoal.com/2uIaEl



              -

              The film was released in Tamil dubbed version in 2008 and was available for free online streaming on YouTube[^2^] [^3^]. The film received mixed reviews from critics and audiences, who praised the animation and action sequences, but criticized the deviations from the original poem and the uncanny valley effect of the characters. The film was also controversial for its sexualized portrayal of Grendel's mother, who appears as a seductive humanoid creature.

              -

              The Tamil dubbed version of Beowulf is a good choice for fans of fantasy and adventure genres, who enjoy watching epic battles and mythical creatures. The film has a runtime of 71 minutes and is rated PG-13 for intense sequences of violence including disturbing images, some sexual material and nudity.

              - -

              The film uses a technique called performance capture, which involves recording the actors' movements and expressions and then animating them digitally. The film was one of the first to use this technique extensively, and was praised for its realistic and detailed animation. However, some critics and viewers felt that the characters looked unnatural and creepy, especially in close-up shots. The film also used 3D technology to enhance the visual effects and create a more immersive experience for the audience.

              -

              -

              The film deviates from the original poem in several ways, such as adding new characters, changing the relationships between existing ones, and altering the outcome of some events. For example, in the film, Beowulf is not married to Wealhtheow, but to a woman named Ursula. He also has an affair with Grendel's mother, who gives birth to a dragon that Beowulf later fights. Some of these changes were made to create more drama and conflict, while others were made to appeal to a modern audience.

              -

              The film was nominated for several awards, including two Golden Globes for Best Animated Feature Film and Best Original Song. It also won the Saturn Award for Best Animated Film and the Annie Award for Best Writing in an Animated Feature Production. The film was a commercial success, grossing over $196 million worldwide against a budget of $150 million. The film has a cult following among fans of the poem and the genre, who appreciate its creative interpretation and stunning visuals.

              - -

              The film has also inspired some academic and literary works, such as essays, books, and comics. For example, the book Beowulf on Film: Adaptations and Variations, edited by Nickolas Haydock and Edward Risden, analyzes the film and other cinematic adaptations of the poem. The comic book Beowulf: A Different Shade of Gray, written by Brian Augustyn and illustrated by Dub, retells the film's story from Grendel's perspective. The film has also been compared and contrasted with other adaptations of the poem, such as the 2005 live-action film Beowulf & Grendel and the 1999 animated film Beowulf.

              -

              The Tamil dubbed version of Beowulf is a unique and entertaining way to experience the classic story of the hero and the monster. The film offers a thrilling and visually stunning adventure that will appeal to fans of fantasy and animation. The film is also a fascinating example of how a centuries-old poem can be reimagined and reinvented for a contemporary audience.

              e93f5a0c3f
              -
              -
              \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Big Dummys Guide To The As400 PDF.epub.md b/spaces/stomexserde/gpt4-ui/Examples/Big Dummys Guide To The As400 PDF.epub.md deleted file mode 100644 index 00b4270a5a461eea66e840b43b8f8e39e29259b8..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Big Dummys Guide To The As400 PDF.epub.md +++ /dev/null @@ -1,33 +0,0 @@ -
              -

              Big Dummy's Guide To The As400 PDF.epub: A Review

              -

              If you are looking for a comprehensive and easy-to-follow guide to the IBM AS/400, a family of midrange computers from IBM that run the OS/400 operating system[^2^], you might want to check out Big Dummy's Guide To The As400 PDF.epub. This document, which is available for free on Scribd[^1^], covers the basics of the AS/400 system, such as its architecture, commands, programming languages, database, security, and more. It also provides tips and tricks for using the AS/400 effectively and efficiently.

              -

              Big Dummy\\\\\\\\\\\\'s Guide To The As400 PDF.epub


              Download File ►►► https://urlgoal.com/2uI8Tk



              -

              Big Dummy's Guide To The As400 PDF.epub is written in a clear and humorous style that makes it easy to understand and follow. The author uses examples and screenshots to illustrate the concepts and procedures. The document is divided into three parts: Part 1 introduces the AS/400 system and its features; Part 2 explains how to work with the AS/400 system; and Part 3 covers some advanced topics such as performance tuning, backup and recovery, and troubleshooting. Each part has several chapters that cover specific topics in detail.

              -

              Whether you are a beginner or an experienced user of the AS/400 system, you will find Big Dummy's Guide To The As400 PDF.epub useful and informative. It is a handy reference that you can access anytime and anywhere. You can download it from Scribd[^1^] or read it online for free. However, you will need an account to access the full document.

              - -

              In this article, we will review some of the main features and benefits of the AS/400 system and how Big Dummy's Guide To The As400 PDF.epub can help you learn and master them.

              -

              -

              What is the AS/400 system?

              -

              The AS/400 system, also known as the IBM iSeries or IBM System i, is a family of midrange computers from IBM that was launched in 1988. It is designed to provide a reliable, secure, and scalable platform for business applications. The AS/400 system runs the OS/400 operating system, which is based on the IBM System/38 and System/36 architectures. The OS/400 operating system supports multiple programming languages, such as RPG, COBOL, C, Java, and SQL. It also has a built-in relational database management system (DB2) and an integrated web server (IBM HTTP Server).

              -

              The AS/400 system has several advantages over other platforms, such as:

              -
                -
              • It is easy to use and manage. The AS/400 system has a graphical user interface (GUI) and a command-line interface (CLI) that allow users to interact with the system. The AS/400 system also has a menu-driven interface that simplifies the administration tasks. The AS/400 system can be accessed remotely via a terminal emulator or a web browser.
              • -
              • It is highly reliable and secure. The AS/400 system has a fault-tolerant design that ensures high availability and performance. The AS/400 system also has a robust security system that protects the data and resources from unauthorized access. The AS/400 system supports encryption, authentication, authorization, auditing, and backup and recovery features.
              • -
              • It is scalable and flexible. The AS/400 system can handle large volumes of data and transactions with ease. The AS/400 system also supports multiple processors, memory, disk drives, and network adapters that can be added or removed without affecting the operation of the system. The AS/400 system can run multiple operating systems simultaneously, such as OS/400, AIX, Linux, and Windows.
              • -
              -

              How can Big Dummy's Guide To The As400 PDF.epub help you?

              -

              Big Dummy's Guide To The As400 PDF.epub is a comprehensive and easy-to-follow guide to the AS/400 system that covers the basics and beyond. It is suitable for beginners who want to learn the fundamentals of the AS/400 system, as well as experienced users who want to refresh their knowledge or explore new features. Big Dummy's Guide To The As400 PDF.epub can help you:

              -
                -
              • Understand the architecture and components of the AS/400 system.
              • -
              • Learn how to use the GUI, CLI, and menu interfaces of the AS/400 system.
              • -
              • Master the common commands and functions of the AS/400 system.
              • -
              • Work with the programming languages and tools supported by the AS/400 system.
              • -
              • Create and manage databases and files on the AS/400 system.
              • -
              • Configure and maintain the security and network settings of the AS/400 system.
              • -
              • Optimize the performance and efficiency of the AS/400 system.
              • -
              • Backup and restore data and programs on the AS/400 system.
              • -
              • Troubleshoot and resolve common problems on the AS/400 system.
              • -
              -

              Big Dummy's Guide To The As400 PDF.epub is written in a clear and humorous style that makes it easy to understand and follow. The author uses examples and screenshots to illustrate the concepts and procedures. The document is divided into three parts: Part 1 introduces the AS/400 system and its features; Part 2 explains how to work with the AS/400 system; and Part 3 covers some advanced topics such as performance tuning, backup and recovery, and troubleshooting. Each part has several chapters that cover specific topics in detail.

              7b8c122e87
              -
              -
              \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/F.E.A.R. 2 Project Origin Reborn Ultimate Edition-GOG Unlimited Gems High Quality.md b/spaces/stomexserde/gpt4-ui/Examples/F.E.A.R. 2 Project Origin Reborn Ultimate Edition-GOG Unlimited Gems High Quality.md deleted file mode 100644 index 76748354c087dbefa05e9a536d216a80051fffff..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/F.E.A.R. 2 Project Origin Reborn Ultimate Edition-GOG Unlimited Gems High Quality.md +++ /dev/null @@ -1,23 +0,0 @@ -
              -

              F.E.A.R. 2: Project Origin Reborn Ultimate Edition-GOG Unlimited Gems: How to Get the Best Deal on This Horror Shooter

              -

              If you are a fan of horror games, you might have heard of F.E.A.R. 2: Project Origin, a first-person shooter that follows a special forces team as they try to stop a supernatural threat unleashed by a secret military project. The game features intense combat, slow-motion abilities, and a gripping story that will keep you on the edge of your seat.

              -

              But did you know that there is an ultimate edition of the game that includes not only the base game, but also its expansion pack, F.E.A.R. 2: Reborn? This DLC adds four new single-player levels that let you experience the aftermath of the main game from a different perspective: that of a Replica soldier who is being contacted by a mysterious voice.

              -

              F.E.A.R. 2: Project Origin Reborn Ultimate Edition-GOG Unlimited Gems


              Downloadhttps://urlgoal.com/2uIbwr



              -

              The ultimate edition also comes with some goodies, such as wallpapers, avatars, and manuals in different languages. And the best part is that you can get it for a very low price on GOG.com, a digital platform that offers DRM-free games and great customer service.

              -

              GOG.com is currently selling F.E.A.R. 2: Project Origin Reborn Ultimate Edition-GOG for only $14.99, which is a 50% discount from its original price of $29.99. But wait, there's more! If you use the code ZMANTIS at checkout, you can get an extra 10% off, bringing the price down to $13.49. That's a steal for such a high-quality game!

              -

              But hurry up, because this offer is only valid until April 30th, 2023. Don't miss this chance to get unlimited gems in F.E.A.R. 2: Project Origin Reborn Ultimate Edition-GOG and enjoy one of the best horror shooters ever made!

              - -

              But don't just take our word for it. F.E.A.R. 2: Project Origin Reborn Ultimate Edition-GOG has received rave reviews from critics and players alike, who praised its thrilling gameplay, immersive atmosphere, and stunning graphics. The game has a score of 3.9 out of 5 on GOG.com, based on over 1,000 user ratings. Here are some of the comments from satisfied customers:

              -
              -

              "One of the best horror games I've ever played. The story is engaging, the combat is satisfying, and the scares are genuine. The Reborn DLC is a nice addition that adds more variety and challenge. Highly recommended!" - John

              -
              -
              -

              "F.E.A.R. 2 is a masterpiece of horror and action. The game is full of memorable moments, from the creepy school level to the epic mech battles. The Reborn DLC is short but sweet, and gives you a different perspective on the events of the main game. A must-play for fans of the genre." - Lisa

              -
              -
              -

              "I love this game. It has everything I want in a shooter: great gunplay, slow-motion effects, environmental destruction, and a gripping story. The Reborn DLC is a nice bonus that adds more replay value and fun. If you like horror games, you owe it to yourself to play this." - Mike

              -

              -
              -

              So what are you waiting for? Grab your copy of F.E.A.R. 2: Project Origin Reborn Ultimate Edition-GOG Unlimited Gems today and experience the ultimate horror shooter for yourself. But be warned: this game is not for the faint of heart. You will face your fears as you confront Alma Wade, the most terrifying enemy ever created.

              81aa517590
              -
              -
              \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Hard Reset Dslide 70333.md b/spaces/stomexserde/gpt4-ui/Examples/Hard Reset Dslide 70333.md deleted file mode 100644 index 6eb1c93a2b4fd6ddcb6d2f935decb71357e7d682..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Hard Reset Dslide 70333.md +++ /dev/null @@ -1,34 +0,0 @@ -
              -

              How to Hard Reset Dslide 70333 Tablet

              -

              If you have a Dslide 70333 tablet and you want to restore it to its factory settings, you may need to perform a hard reset. A hard reset is a process that erases all the data and settings on your device and returns it to its original state. This can be useful if you are experiencing problems with your tablet, such as slowness, freezing, or errors. It can also be helpful if you want to sell or give away your tablet and you want to delete your personal information.

              -

              However, before you proceed with a hard reset, you should be aware of some important points:

              -

              Hard reset dslide 70333


              Download Ziphttps://urlgoal.com/2uIc6y



              -
                -
              • A hard reset will delete everything on your tablet, including your apps, files, photos, videos, contacts, messages, and accounts. Make sure you back up any data that you want to keep before you start.
              • -
              • A hard reset will also remove any customizations that you have made to your tablet, such as wallpapers, ringtones, widgets, etc. You will need to set up your tablet again after the reset.
              • -
              • A hard reset may not fix all the issues that you are facing with your tablet. Some problems may be caused by hardware defects or software bugs that require professional repair or update.
              • -
              -

              There are two methods to perform a hard reset on your Dslide 70333 tablet: using the settings menu or using the recovery mode. Here are the steps for each method:

              -

              Method 1: Using the Settings Menu

              -

              This method is recommended if you can access your tablet normally and you know your unlock pattern or password. Follow these steps:

              -
                -
              1. On your tablet, go to Settings and tap on Backup & reset.
              2. -
              3. Tap on Factory data reset and then tap on Reset tablet.
              4. -
              5. If prompted, enter your unlock pattern or password and tap on Continue.
              6. -
              7. Tap on Erase everything to confirm the reset.
              8. -
              9. Your tablet will reboot and start the reset process. Wait for it to finish and then follow the on-screen instructions to set up your tablet again.
              10. -
              -

              Method 2: Using the Recovery Mode

              -

              This method is useful if you cannot access your tablet normally or you have forgotten your unlock pattern or password. Follow these steps:

              -
                -
              1. Turn off your tablet completely by pressing and holding the Power button for a few seconds.
              2. -
              3. Press and hold the Volume Up and Power buttons together until you see the Dslide logo on the screen.
              4. -
              5. Release the buttons and wait for the recovery mode menu to appear.
              6. -
              7. Use the Volume Up and Volume Down buttons to navigate through the menu and highlight wipe data/factory reset. Press the Power button to select it.
              8. -
              9. Select Yes -- delete all user data and press the Power button to confirm.
              10. -
              11. Your tablet will start the reset process. Wait for it to finish and then select reboot system now. Press the Power button to restart your tablet.
              12. -
              13. Your tablet will boot up and ask you to set it up again. Follow the on-screen instructions to do so.
              14. -
              -

              Congratulations! You have successfully performed a hard reset on your Dslide 70333 tablet. You can now enjoy using your device as if it were new.

              7b8c122e87
              -
              -
              \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Harmony Navigator 2 Keygen Torrent TOP.md b/spaces/stomexserde/gpt4-ui/Examples/Harmony Navigator 2 Keygen Torrent TOP.md deleted file mode 100644 index 4abbf295564c8d46fc98cee74d9e7862daee0197..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Harmony Navigator 2 Keygen Torrent TOP.md +++ /dev/null @@ -1,15 +0,0 @@ -
              -

              Harmony Navigator 2: A Powerful Tool for Music Prototyping

              -

              Harmony Navigator 2 is a software application that helps you create and explore harmonic progressions, melodies, and arrangements. It is designed for musicians, composers, songwriters, and producers who want to experiment with different musical ideas and styles.

              -

              Harmony Navigator 2 Keygen Torrent


              Downloadhttps://urlgoal.com/2uIar7



              -

              Harmony Navigator 2 can generate chord changes, arpeggios, patterns, and voicings based on your input. You can also edit and customize your own progressions using the embedded progression editor. Harmony Navigator 2 supports MIDI output, so you can connect it to your favorite DAW or synthesizer and hear your music come to life.

              -

              Harmony Navigator 2 is available in two editions: Light Edition (LE) and Advanced Edition (AE). The LE version offers basic features for creating and playing harmonic progressions. The AE version adds more advanced features such as pattern sequencing, modulation, inversion, voice leading, and more.

              -

              If you are looking for a way to download Harmony Navigator 2 for free, you might be tempted to search for a keygen or a torrent file. However, this is not a safe or legal option. Keygens and torrents can contain viruses, malware, spyware, or other harmful programs that can damage your computer or steal your personal information. Moreover, downloading Harmony Navigator 2 without paying for it is a violation of the software license agreement and a form of piracy.

              -

              The best way to get Harmony Navigator 2 is to purchase it from the official website of Cognitone, the developer of the software. By doing so, you will get a legitimate copy of the software that is free of any risks or errors. You will also get access to updates, support, and tutorials. You can choose between a one-time payment or a monthly subscription plan that suits your budget and needs.

              -

              Harmony Navigator 2 is a powerful tool for music prototyping that can help you unleash your creativity and explore new musical possibilities. If you are interested in trying it out, you can download a free trial version from the Cognitone website and see for yourself what Harmony Navigator 2 can do for you.

              Here are some more paragraphs for the article:

              -

              Harmony Navigator 2 is not only a tool for creating harmonic progressions, but also a tool for learning and understanding music theory. Harmony Navigator 2 can help you analyze the structure and function of chords, scales, modes, and keys. You can also use Harmony Navigator 2 to study the harmonic patterns and styles of different genres and artists. Harmony Navigator 2 can show you how to apply music theory concepts to your own compositions and improvisations.

              -

              Harmony Navigator 2 is also a tool for collaborating and sharing your musical ideas with others. Harmony Navigator 2 can export your progressions and patterns as MIDI files, audio files, or sheet music. You can also import MIDI files from other sources and edit them in Harmony Navigator 2. You can use Harmony Navigator 2 to communicate your musical vision to other musicians, singers, or producers. You can also use Harmony Navigator 2 to get feedback and inspiration from other users in the online community.

              -

              -

              Harmony Navigator 2 is a tool that can enhance your musical skills and creativity. Whether you are a beginner or a professional, Harmony Navigator 2 can help you discover new ways of making music. Harmony Navigator 2 is more than just a software application; it is a musical companion that can guide you on your musical journey.

              e93f5a0c3f
              -
              -
              \ No newline at end of file diff --git a/spaces/sub314xxl/MusicGen-Continuation/audiocraft/modules/activations.py b/spaces/sub314xxl/MusicGen-Continuation/audiocraft/modules/activations.py deleted file mode 100644 index 8bd6f2917a56d72db56555d0ff54b2311bc21778..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/MusicGen-Continuation/audiocraft/modules/activations.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch -import torch.nn as nn -from torch import Tensor -from typing import Union, Callable - - -class CustomGLU(nn.Module): - """Custom Gated Linear Unit activation. - Applies a modified gated linear unit :math:`a * f(b)` where :math:`a` is the first half - of the input matrices, :math:`b` is the second half, and :math:`f` is a provided activation - function (i.e. sigmoid, swish, etc.). - - Args: - activation (nn.Module): The custom activation to apply in the Gated Linear Unit - dim (int): the dimension on which to split the input. Default: -1 - - Shape: - - Input: :math:`(\ast_1, N, \ast_2)` where `*` means, any number of additional - dimensions - - Output: :math:`(\ast_1, M, \ast_2)` where :math:`M=N/2` - - Examples:: - >>> m = CustomGLU(nn.Sigmoid()) - >>> input = torch.randn(4, 2) - >>> output = m(input) - """ - def __init__(self, activation: nn.Module, dim: int = -1): - super(CustomGLU, self).__init__() - self.dim = dim - self.activation = activation - - def forward(self, x: Tensor): - assert x.shape[self.dim] % 2 == 0 # M = N / 2 - a, b = torch.chunk(x, 2, dim=self.dim) - return a * self.activation(b) - - -class SwiGLU(CustomGLU): - """SiLU Gated Linear Unit activation. - Applies SiLU Gated Linear Unit :math:`a * SiLU(b)` where :math:`a` is - the first half of the input matrices, :math:`b` is the second half. - - Args: - dim (int): the dimension on which to split the input. Default: -1 - """ - def __init__(self, dim: int = -1): - super(SwiGLU, self).__init__(nn.SiLU(), dim) - - -class GeGLU(CustomGLU): - """GeLU Gated Linear Unit activation. - Applies GeLU Gated Linear Unit :math:`a * GELU(b)` where :math:`a` is - the first half of the input matrices, :math:`b` is the second half. - - Args: - dim (int): the dimension on which to split the input. Default: -1 - """ - def __init__(self, dim: int = -1): - super(GeGLU, self).__init__(nn.GELU(), dim) - - -class ReGLU(CustomGLU): - """ReLU Gated Linear Unit activation. - Applies ReLU Gated Linear Unit :math:`a * ReLU(b)` where :math:`a` is - the first half of the input matrices, :math:`b` is the second half. - - Args: - dim (int): the dimension on which to split the input. Default: -1 - """ - def __init__(self, dim: int = -1): - super(ReGLU, self).__init__(nn.ReLU(), dim) - - -def get_activation_fn( - activation: Union[str, Callable[[Tensor], Tensor]] -) -> Union[str, Callable[[Tensor], Tensor]]: - """Helper function to map an activation string to the activation class. - If the supplied activation is not a string that is recognized, the activation is passed back. - - Args: - activation (Union[str, Callable[[Tensor], Tensor]]): Activation to check - """ - if isinstance(activation, str): - if activation == "reglu": - return ReGLU() - elif activation == "geglu": - return GeGLU() - elif activation == "swiglu": - return SwiGLU() - return activation diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/cnn/__init__.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/cnn/__init__.py deleted file mode 100644 index 7246c897430f0cc7ce12719ad8608824fc734446..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmcv/cnn/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .alexnet import AlexNet -# yapf: disable -from .bricks import (ACTIVATION_LAYERS, CONV_LAYERS, NORM_LAYERS, - PADDING_LAYERS, PLUGIN_LAYERS, UPSAMPLE_LAYERS, - ContextBlock, Conv2d, Conv3d, ConvAWS2d, ConvModule, - ConvTranspose2d, ConvTranspose3d, ConvWS2d, - DepthwiseSeparableConvModule, GeneralizedAttention, - HSigmoid, HSwish, Linear, MaxPool2d, MaxPool3d, - NonLocal1d, NonLocal2d, NonLocal3d, Scale, Swish, - build_activation_layer, build_conv_layer, - build_norm_layer, build_padding_layer, build_plugin_layer, - build_upsample_layer, conv_ws_2d, is_norm) -from .builder import MODELS, build_model_from_cfg -# yapf: enable -from .resnet import ResNet, make_res_layer -from .utils import (INITIALIZERS, Caffe2XavierInit, ConstantInit, KaimingInit, - NormalInit, PretrainedInit, TruncNormalInit, UniformInit, - XavierInit, bias_init_with_prob, caffe2_xavier_init, - constant_init, fuse_conv_bn, get_model_complexity_info, - initialize, kaiming_init, normal_init, trunc_normal_init, - uniform_init, xavier_init) -from .vgg import VGG, make_vgg_layer - -__all__ = [ - 'AlexNet', 'VGG', 'make_vgg_layer', 'ResNet', 'make_res_layer', - 'constant_init', 'xavier_init', 'normal_init', 'trunc_normal_init', - 'uniform_init', 'kaiming_init', 'caffe2_xavier_init', - 'bias_init_with_prob', 'ConvModule', 'build_activation_layer', - 'build_conv_layer', 'build_norm_layer', 'build_padding_layer', - 'build_upsample_layer', 'build_plugin_layer', 'is_norm', 'NonLocal1d', - 'NonLocal2d', 'NonLocal3d', 'ContextBlock', 'HSigmoid', 'Swish', 'HSwish', - 'GeneralizedAttention', 'ACTIVATION_LAYERS', 'CONV_LAYERS', 'NORM_LAYERS', - 'PADDING_LAYERS', 'UPSAMPLE_LAYERS', 'PLUGIN_LAYERS', 'Scale', - 'get_model_complexity_info', 'conv_ws_2d', 'ConvAWS2d', 'ConvWS2d', - 'fuse_conv_bn', 'DepthwiseSeparableConvModule', 'Linear', 'Conv2d', - 'ConvTranspose2d', 'MaxPool2d', 'ConvTranspose3d', 'MaxPool3d', 'Conv3d', - 'initialize', 'INITIALIZERS', 'ConstantInit', 'XavierInit', 'NormalInit', - 'TruncNormalInit', 'UniformInit', 'KaimingInit', 'PretrainedInit', - 'Caffe2XavierInit', 'MODELS', 'build_model_from_cfg' -] diff --git a/spaces/szukevin/VISOR-GPT/train/tencentpretrain/utils/__init__.py b/spaces/szukevin/VISOR-GPT/train/tencentpretrain/utils/__init__.py deleted file mode 100644 index 5e97e691c00a0d3d8518354f31e362c22091408e..0000000000000000000000000000000000000000 --- a/spaces/szukevin/VISOR-GPT/train/tencentpretrain/utils/__init__.py +++ /dev/null @@ -1,52 +0,0 @@ -from tencentpretrain.utils.dataset import * -from tencentpretrain.utils.dataloader import * -from tencentpretrain.utils.act_fun import * -from tencentpretrain.utils.optimizers import * -from tencentpretrain.utils.adversarial import * - - -str2tokenizer = {"char": CharTokenizer, "space": SpaceTokenizer, "bert": BertTokenizer, - "bpe": BPETokenizer, "xlmroberta": XLMRobertaTokenizer, "image": ImageTokenizer, - "text_image": TextImageTokenizer, "virtual": VirtualTokenizer} -str2dataset = {"bert": BertDataset, "lm": LmDataset, "mlm": MlmDataset, - "bilm": BilmDataset, "albert": AlbertDataset, "mt": MtDataset, - "t5": T5Dataset, "gsg": GsgDataset, "bart": BartDataset, - "cls": ClsDataset, "prefixlm": PrefixlmDataset, "cls_mlm": ClsMlmDataset, - "vit": VitDataset, "vilt": ViltDataset, "clip": ClipDataset, "s2t": S2tDataset, - "beit":BeitDataset, "dalle": DalleDataset} -str2dataloader = {"bert": BertDataloader, "lm": LmDataloader, "mlm": MlmDataloader, - "bilm": BilmDataloader, "albert": AlbertDataloader, "mt": MtDataloader, - "t5": T5Dataloader, "gsg": GsgDataloader, "bart": BartDataloader, - "cls": ClsDataloader, "prefixlm": PrefixlmDataloader, "cls_mlm": ClsMlmDataloader, - "vit": VitDataloader, "vilt": ViltDataloader, "clip": ClipDataloader, "s2t": S2tDataloader, - "beit":BeitDataloader, "dalle": DalleDataloader} - -str2act = {"gelu": gelu, "gelu_fast": gelu_fast, "relu": relu, "silu": silu, "linear": linear} - -str2optimizer = {"adamw": AdamW, "adafactor": Adafactor} - -str2scheduler = {"linear": get_linear_schedule_with_warmup, "cosine": get_cosine_schedule_with_warmup, - "cosine_with_restarts": get_cosine_with_hard_restarts_schedule_with_warmup, - "polynomial": get_polynomial_decay_schedule_with_warmup, - "constant": get_constant_schedule, "constant_with_warmup": get_constant_schedule_with_warmup, - "inverse_sqrt": get_inverse_square_root_schedule_with_warmup, "tri_stage": get_tri_stage_schedule} - -str2adv = {"fgm": FGM, "pgd": PGD} - -__all__ = ["CharTokenizer", "SpaceTokenizer", "BertTokenizer", "BPETokenizer", "XLMRobertaTokenizer", - "ImageTokenizer", "TextImageTokenizer", "str2tokenizer", - "BertDataset", "LmDataset", "MlmDataset", "BilmDataset", - "AlbertDataset", "MtDataset", "T5Dataset", "GsgDataset", - "BartDataset", "ClsDataset", "PrefixlmDataset", "ClsMlmDataset", - "VitDataset", "ViltDataset", "ClipDataset", "BeitDataset", "str2dataset", - "BertDataloader", "LmDataloader", "MlmDataloader", "BilmDataloader", - "AlbertDataloader", "MtDataloader", "T5Dataloader", "GsgDataloader", - "BartDataloader", "ClsDataloader", "PrefixlmDataloader", "ClsMlmDataloader", - "VitDataloader", "ViltDataloader", "ClipDataloader", "BeitDataloader", "str2dataloader", - "gelu", "gelu_fast", "relu", "silu", "linear", "str2act", - "AdamW", "Adafactor", "str2optimizer", - "get_linear_schedule_with_warmup", "get_cosine_schedule_with_warmup", - "get_cosine_with_hard_restarts_schedule_with_warmup", - "get_polynomial_decay_schedule_with_warmup", - "get_constant_schedule", "get_constant_schedule_with_warmup", "str2scheduler", - "FGM", "PGD", "str2adv"] diff --git a/spaces/tang155/bingo/src/components/chat-image.tsx b/spaces/tang155/bingo/src/components/chat-image.tsx deleted file mode 100644 index 05ecc9771eada27a0f2d160bb01cba170d37bb09..0000000000000000000000000000000000000000 --- a/spaces/tang155/bingo/src/components/chat-image.tsx +++ /dev/null @@ -1,170 +0,0 @@ -import { - useEffect, - useState, - useCallback, - ChangeEvent, - ClipboardEvent, - MouseEventHandler, - FormEvent, - useRef -} from "react" -import Image from 'next/image' -import PasteIcon from '@/assets/images/paste.svg' -import UploadIcon from '@/assets/images/upload.svg' -import CameraIcon from '@/assets/images/camera.svg' -import { useBing } from '@/lib/hooks/use-bing' -import { cn } from '@/lib/utils' - -interface ChatImageProps extends Pick, 'uploadImage'> {} - -const preventDefault: MouseEventHandler = (event) => { - event.nativeEvent.stopImmediatePropagation() -} - -const toBase64 = (file: File): Promise => new Promise((resolve, reject) => { - const reader = new FileReader() - reader.readAsDataURL(file) - reader.onload = () => resolve(reader.result as string) - reader.onerror = reject -}) - -export function ChatImage({ children, uploadImage }: React.PropsWithChildren) { - const videoRef = useRef(null) - const canvasRef = useRef(null) - const mediaStream = useRef() - const [panel, setPanel] = useState('none') - - const upload = useCallback((url: string) => { - if (url) { - uploadImage(url) - } - setPanel('none') - }, [panel]) - - const onUpload = useCallback(async (event: ChangeEvent) => { - const file = event.target.files?.[0] - if (file) { - const fileDataUrl = await toBase64(file) - if (fileDataUrl) { - upload(fileDataUrl) - } - } - }, []) - - const onPaste = useCallback((event: ClipboardEvent) => { - const pasteUrl = event.clipboardData.getData('text') ?? '' - upload(pasteUrl) - }, []) - - const onEnter = useCallback((event: FormEvent) => { - event.preventDefault() - event.stopPropagation() - // @ts-ignore - const inputUrl = event.target.elements.image.value - if (inputUrl) { - upload(inputUrl) - } - }, []) - - const openVideo: MouseEventHandler = async (event) => { - event.stopPropagation() - setPanel('camera-mode') - } - - const onCapture = () => { - if (canvasRef.current && videoRef.current) { - const canvas = canvasRef.current - canvas.width = videoRef.current!.videoWidth - canvas.height = videoRef.current!.videoHeight - canvas.getContext('2d')?.drawImage(videoRef.current, 0, 0, canvas.width, canvas.height) - const cameraUrl = canvas.toDataURL('image/jpeg') - upload(cameraUrl) - } - } - - useEffect(() => { - const handleBlur = () => { - if (panel !== 'none') { - setPanel('none') - } - } - document.addEventListener('click', handleBlur) - return () => { - document.removeEventListener('click', handleBlur) - } - }, [panel]) - - useEffect(() => { - if (panel === 'camera-mode') { - navigator.mediaDevices.getUserMedia({ video: true, audio: false }) - .then(videoStream => { - mediaStream.current = videoStream - if (videoRef.current) { - videoRef.current.srcObject = videoStream - } - }) - } else { - if (mediaStream.current) { - mediaStream.current.getTracks().forEach(function(track) { - track.stop() - }) - mediaStream.current = undefined - } - } - }, [panel]) - - return ( -
              -
              panel === 'none' ? setPanel('normal') : setPanel('none')}>{children}
              -
              -
              -
              -

              添加图像

              -
              -
              - paste -
              - e.stopPropagation()} - /> -
              -
              -
              - - -
              -
              - {panel === 'camera-mode' &&
              -
              -
              -
              -
              -
              -
              -
              } -
              -
              - ) -} diff --git a/spaces/tang155/bingo/src/components/chat-list.tsx b/spaces/tang155/bingo/src/components/chat-list.tsx deleted file mode 100644 index 624a78ef0d7be0f1192cf02a81e2e9cf214cb193..0000000000000000000000000000000000000000 --- a/spaces/tang155/bingo/src/components/chat-list.tsx +++ /dev/null @@ -1,28 +0,0 @@ -import React from 'react' - -import { Separator } from '@/components/ui/separator' -import { ChatMessage } from '@/components/chat-message' -import { ChatMessageModel } from '@/lib/bots/bing/types' - -export interface ChatList { - messages: ChatMessageModel[] -} - -export function ChatList({ messages }: ChatList) { - if (!messages.length) { - return null - } - - return ( -
              - {messages.map((message, index) => ( - - - {index < messages.length - 1 && ( - - )} - - ))} -
              - ) -} diff --git a/spaces/tang155/bingo/src/lib/bots/bing/index.ts b/spaces/tang155/bingo/src/lib/bots/bing/index.ts deleted file mode 100644 index 2c4afae01a345b8415935228566cb30d695e768d..0000000000000000000000000000000000000000 --- a/spaces/tang155/bingo/src/lib/bots/bing/index.ts +++ /dev/null @@ -1,421 +0,0 @@ -import { fetch, WebSocket, debug } from '@/lib/isomorphic' -import WebSocketAsPromised from 'websocket-as-promised' -import { - SendMessageParams, - BingConversationStyle, - ConversationResponse, - ChatResponseMessage, - ConversationInfo, - InvocationEventType, - ChatError, - ErrorCode, - ChatUpdateCompleteResponse, - ImageInfo, - KBlobResponse -} from './types' - -import { convertMessageToMarkdown, websocketUtils, streamAsyncIterable } from './utils' -import { WatchDog, createChunkDecoder } from '@/lib/utils' - -type Params = SendMessageParams<{ bingConversationStyle: BingConversationStyle }> - -const OPTIONS_SETS = [ - 'nlu_direct_response_filter', - 'deepleo', - 'disable_emoji_spoken_text', - 'responsible_ai_policy_235', - 'enablemm', - 'iycapbing', - 'iyxapbing', - 'objopinion', - 'rweasgv2', - 'dagslnv1', - 'dv3sugg', - 'autosave', - 'iyoloxap', - 'iyoloneutral', - 'clgalileo', - 'gencontentv3', -] - -export class BingWebBot { - protected conversationContext?: ConversationInfo - protected cookie: string - protected ua: string - protected endpoint = '' - private lastText = '' - private asyncTasks: Array> = [] - - constructor(opts: { - cookie: string - ua: string - bingConversationStyle?: BingConversationStyle - conversationContext?: ConversationInfo - }) { - const { cookie, ua, conversationContext } = opts - this.cookie = cookie?.includes(';') ? cookie : `_EDGE_V=1; _U=${cookie}` - this.ua = ua - this.conversationContext = conversationContext - } - - static buildChatRequest(conversation: ConversationInfo) { - const optionsSets = OPTIONS_SETS - if (conversation.conversationStyle === BingConversationStyle.Precise) { - optionsSets.push('h3precise') - } else if (conversation.conversationStyle === BingConversationStyle.Creative) { - optionsSets.push('h3imaginative') - } - return { - arguments: [ - { - source: 'cib', - optionsSets, - allowedMessageTypes: [ - 'Chat', - 'InternalSearchQuery', - 'Disengaged', - 'InternalLoaderMessage', - 'SemanticSerp', - 'GenerateContentQuery', - 'SearchQuery', - ], - sliceIds: [ - 'winmuid1tf', - 'anssupfor_c', - 'imgchatgptv2', - 'tts2cf', - 'contansperf', - 'mlchatpc8500w', - 'mlchatpc2', - 'ctrlworkpay', - 'winshortmsgtf', - 'cibctrl', - 'sydtransctrl', - 'sydconfigoptc', - '0705trt4', - '517opinion', - '628ajcopus0', - '330uaugs0', - '529rwea', - '0626snptrcs0', - '424dagslnv1', - ], - isStartOfSession: conversation.invocationId === 0, - message: { - author: 'user', - inputMethod: 'Keyboard', - text: conversation.prompt, - imageUrl: conversation.imageUrl, - messageType: 'Chat', - }, - conversationId: conversation.conversationId, - conversationSignature: conversation.conversationSignature, - participant: { id: conversation.clientId }, - }, - ], - invocationId: conversation.invocationId.toString(), - target: 'chat', - type: InvocationEventType.StreamInvocation, - } - } - - async createConversation(): Promise { - const headers = { - 'Accept-Encoding': 'gzip, deflate, br, zsdch', - 'User-Agent': this.ua, - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - cookie: this.cookie, - } - - let resp: ConversationResponse | undefined - try { - const response = await fetch(this.endpoint + '/api/create', { method: 'POST', headers, redirect: 'error', mode: 'cors', credentials: 'include' }) - if (response.status === 404) { - throw new ChatError('Not Found', ErrorCode.NOTFOUND_ERROR) - } - resp = await response.json() as ConversationResponse - } catch (err) { - console.error('create conversation error', err) - } - - if (!resp?.result) { - throw new ChatError('Invalid response', ErrorCode.UNKOWN_ERROR) - } - - const { value, message } = resp.result || {} - if (value !== 'Success') { - const errorMsg = `${value}: ${message}` - if (value === 'UnauthorizedRequest') { - throw new ChatError(errorMsg, ErrorCode.BING_UNAUTHORIZED) - } - if (value === 'Forbidden') { - throw new ChatError(errorMsg, ErrorCode.BING_FORBIDDEN) - } - throw new ChatError(errorMsg, ErrorCode.UNKOWN_ERROR) - } - return resp - } - - private async createContext(conversationStyle: BingConversationStyle) { - if (!this.conversationContext) { - const conversation = await this.createConversation() - this.conversationContext = { - conversationId: conversation.conversationId, - conversationSignature: conversation.conversationSignature, - clientId: conversation.clientId, - invocationId: 0, - conversationStyle, - prompt: '', - } - } - return this.conversationContext - } - - async sendMessage(params: Params) { - try { - await this.createContext(params.options.bingConversationStyle) - Object.assign(this.conversationContext!, { prompt: params.prompt, imageUrl: params.imageUrl }) - return this.sydneyProxy(params) - } catch (error) { - params.onEvent({ - type: 'ERROR', - error: error instanceof ChatError ? error : new ChatError('Catch Error', ErrorCode.UNKOWN_ERROR), - }) - } - } - - private async sydneyProxy(params: Params) { - const abortController = new AbortController() - const response = await fetch(this.endpoint + '/api/sydney', { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - }, - signal: abortController.signal, - body: JSON.stringify(this.conversationContext!) - }) - if (response.status !== 200) { - params.onEvent({ - type: 'ERROR', - error: new ChatError( - 'Unknown error', - ErrorCode.UNKOWN_ERROR, - ), - }) - } - params.signal?.addEventListener('abort', () => { - abortController.abort() - }) - - const textDecoder = createChunkDecoder() - for await (const chunk of streamAsyncIterable(response.body!)) { - this.parseEvents(params, websocketUtils.unpackMessage(textDecoder(chunk))) - } - } - - async sendWs() { - const wsConfig: ConstructorParameters[1] = { - packMessage: websocketUtils.packMessage, - unpackMessage: websocketUtils.unpackMessage, - createWebSocket: (url) => new WebSocket(url, { - headers: { - 'accept-language': 'zh-CN,zh;q=0.9', - 'cache-control': 'no-cache', - 'User-Agent': this.ua, - pragma: 'no-cache', - cookie: this.cookie, - } - }) - } - const wsp = new WebSocketAsPromised('wss://sydney.bing.com/sydney/ChatHub', wsConfig) - - wsp.open().then(() => { - wsp.sendPacked({ protocol: 'json', version: 1 }) - wsp.sendPacked({ type: 6 }) - wsp.sendPacked(BingWebBot.buildChatRequest(this.conversationContext!)) - }) - - return wsp - } - - private async useWs(params: Params) { - const wsp = await this.sendWs() - const watchDog = new WatchDog() - wsp.onUnpackedMessage.addListener((events) => { - watchDog.watch(() => { - wsp.sendPacked({ type: 6 }) - }) - this.parseEvents(params, events) - }) - - wsp.onClose.addListener(() => { - watchDog.reset() - params.onEvent({ type: 'DONE' }) - wsp.removeAllListeners() - }) - - params.signal?.addEventListener('abort', () => { - wsp.removeAllListeners() - wsp.close() - }) - } - - private async createImage(prompt: string, id: string) { - try { - const headers = { - 'Accept-Encoding': 'gzip, deflate, br, zsdch', - 'User-Agent': this.ua, - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - cookie: this.cookie, - } - const query = new URLSearchParams({ - prompt, - id - }) - const response = await fetch(this.endpoint + '/api/image?' + query.toString(), - { - method: 'POST', - headers, - mode: 'cors', - credentials: 'include' - }) - .then(res => res.text()) - if (response) { - this.lastText += '\n' + response - } - } catch (err) { - console.error('Create Image Error', err) - } - } - - private buildKnowledgeApiPayload(imageUrl: string, conversationStyle: BingConversationStyle) { - const imageInfo: ImageInfo = {} - let imageBase64: string | undefined = undefined - const knowledgeRequest = { - imageInfo, - knowledgeRequest: { - invokedSkills: [ - 'ImageById' - ], - subscriptionId: 'Bing.Chat.Multimodal', - invokedSkillsRequestData: { - enableFaceBlur: true - }, - convoData: { - convoid: this.conversationContext?.conversationId, - convotone: conversationStyle, - } - }, - } - - if (imageUrl.startsWith('data:image/')) { - imageBase64 = imageUrl.replace('data:image/', ''); - const partIndex = imageBase64.indexOf(',') - if (partIndex) { - imageBase64 = imageBase64.substring(partIndex + 1) - } - } else { - imageInfo.url = imageUrl - } - return { knowledgeRequest, imageBase64 } - } - - async uploadImage(imageUrl: string, conversationStyle: BingConversationStyle = BingConversationStyle.Creative): Promise { - if (!imageUrl) { - return - } - await this.createContext(conversationStyle) - const payload = this.buildKnowledgeApiPayload(imageUrl, conversationStyle) - - const response = await fetch(this.endpoint + '/api/kblob', - { - headers: { - 'Content-Type': 'application/json', - }, - method: 'POST', - mode: 'cors', - credentials: 'include', - body: JSON.stringify(payload), - }) - .then(res => res.json()) - .catch(e => { - console.log('Error', e) - }) - return response - } - - private async generateContent(message: ChatResponseMessage) { - if (message.contentType === 'IMAGE') { - this.asyncTasks.push(this.createImage(message.text, message.messageId)) - } - } - - private async parseEvents(params: Params, events: any) { - const conversation = this.conversationContext! - - events?.forEach(async (event: ChatUpdateCompleteResponse) => { - debug('bing event', event) - if (event.type === 3) { - await Promise.all(this.asyncTasks) - this.asyncTasks = [] - params.onEvent({ type: 'UPDATE_ANSWER', data: { text: this.lastText } }) - params.onEvent({ type: 'DONE' }) - conversation.invocationId = parseInt(event.invocationId, 10) + 1 - } else if (event.type === 1) { - const messages = event.arguments[0].messages - if (messages) { - const text = convertMessageToMarkdown(messages[0]) - this.lastText = text - params.onEvent({ type: 'UPDATE_ANSWER', data: { text, spokenText: messages[0].text, throttling: event.arguments[0].throttling } }) - } - } else if (event.type === 2) { - const messages = event.item.messages as ChatResponseMessage[] | undefined - if (!messages) { - params.onEvent({ - type: 'ERROR', - error: new ChatError( - event.item.result.error || 'Unknown error', - event.item.result.value === 'Throttled' ? ErrorCode.THROTTLE_LIMIT - : event.item.result.value === 'CaptchaChallenge' ? (this.conversationContext?.conversationId?.includes('BingProdUnAuthenticatedUsers') ? ErrorCode.BING_UNAUTHORIZED : ErrorCode.BING_CAPTCHA) - : ErrorCode.UNKOWN_ERROR - ), - }) - return - } - const limited = messages.some((message) => - message.contentOrigin === 'TurnLimiter' - || message.messageType === 'Disengaged' - ) - if (limited) { - params.onEvent({ - type: 'ERROR', - error: new ChatError( - 'Sorry, you have reached chat limit in this conversation.', - ErrorCode.CONVERSATION_LIMIT, - ), - }) - return - } - - const lastMessage = event.item.messages.at(-1) as ChatResponseMessage - const specialMessage = event.item.messages.find(message => message.author === 'bot' && message.contentType === 'IMAGE') - if (specialMessage) { - this.generateContent(specialMessage) - } - - if (lastMessage) { - const text = convertMessageToMarkdown(lastMessage) - this.lastText = text - params.onEvent({ - type: 'UPDATE_ANSWER', - data: { text, throttling: event.item.throttling, suggestedResponses: lastMessage.suggestedResponses, sourceAttributions: lastMessage.sourceAttributions }, - }) - } - } - }) - } - - resetConversation() { - this.conversationContext = undefined - } -} diff --git a/spaces/tecnolitas/MJ-prompt-generator/README.md b/spaces/tecnolitas/MJ-prompt-generator/README.md deleted file mode 100644 index 17a40a4d5e0ebf29f7081c6b945530f8d001b84a..0000000000000000000000000000000000000000 --- a/spaces/tecnolitas/MJ-prompt-generator/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Midjourney Prompt Generator -emoji: 📚 -colorFrom: indigo -colorTo: gray -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false -license: unknown ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/terfces0erbo/CollegeProjectV2/Archmodels Vol 96 Torrent 12 [NEW].md b/spaces/terfces0erbo/CollegeProjectV2/Archmodels Vol 96 Torrent 12 [NEW].md deleted file mode 100644 index c1f7ba43e8a13aee55011d658aaba78bf81df3f0..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Archmodels Vol 96 Torrent 12 [NEW].md +++ /dev/null @@ -1,9 +0,0 @@ -
              -

              archmodels for corona vol.1 torrent. evermotion archmodels vol.1 118 - evermotion archmodels vol 1. archmodels for corona vol.1 118 for windows 10. download for your mac. archmodels for corona.. archive. evermotion.us archives.1 - evermotion.us evermotion archmodels volume 1.

              -

              archmodels vol 96 torrent 12


              DOWNLOAD 🗸🗸🗸 https://bytlly.com/2uGm9r



              -

              archmodels vol 1 123 torrent free download archmodels torrent download evermotion archmodels vol. 1 - 23 2013 (84) thng chn (10) thng by (3).. evermotion archmodels vol 15 via torrent download, evermotion archmodels vol. evermotion archmodels vol 77 via torrent download, evermotion archmodels vol.

              -

              evermotion archmodels vol 1 100 torrent free download archmodels torrent evermotion archmodels vol. 1-77 2013 (70) thng chn (5) thng by (5).. evermotion archmodels vol. 1 - 23 2013 (84) thng chn (10) thng by (3). evermotion archmodels vol 78 torrent download. 1 - 10 torrent for free. 1 - 78 2013 (70) thng chn (5) thng by (5). 1 - 80 2013 (84) thng chn (10) thng by (3). evermotion archmodels vol 1 - 79 2013 (84) thng chn (10) thng by (3). 1 - 10 2013 (84) thng chn (10) thng by (3). 1 - 85 2013 (84) thng chn (10) thng by (3). 1 - 81 2013 (84) thng chn (10) thng by (3). evermotion archmodels vol 1 - 77 2013 (70) thng chn (5) thng by (5). 1 - 83 2013 (84) thng chn (10) thng by (3).

              -

              archmodels vol. 236 includes 36 professional, high quality 3d models for architectural visualizations. this collection comes with 3d models of interior props with textures and materials. evermotion archmodels vol 77 download

              -

              899543212b
              -
              -
              \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/CSI Safe 8.1.1.rar.md b/spaces/terfces0erbo/CollegeProjectV2/CSI Safe 8.1.1.rar.md deleted file mode 100644 index 347951572c22d40a0f3456fa8a67b03a6095fe0e..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/CSI Safe 8.1.1.rar.md +++ /dev/null @@ -1,48 +0,0 @@ -
              -

              How to Download and Install CSI Safe 8.1.1.rar for Free

              -

              CSI Safe is a powerful software for designing and analyzing concrete floor and foundation systems. It can handle complex geometries, multiple load cases, reinforcement design, punching shear checks, and more. CSI Safe 8.1.1.rar is the latest version of the software that comes with many new features and improvements.

              -

              CSI Safe 8.1.1.rar


              DOWNLOAD ✔✔✔ https://bytlly.com/2uGkvG



              -

              In this article, we will show you how to download and install CSI Safe 8.1.1.rar for free on your computer. You will need a torrent client such as uTorrent or BitTorrent to download the file. You will also need a software such as WinRAR or 7-Zip to extract the file. Follow these steps to get started:

              -
                -
              1. Go to https://www.torrentz2.eu/search?f=CSI+Safe+8.1.1.rar and choose one of the torrent links that have many seeders and leechers.
              2. -
              3. Open the link with your torrent client and start downloading the file.
              4. -
              5. Once the download is complete, locate the file in your downloads folder and right-click on it.
              6. -
              7. Select "Extract here" or "Extract to CSI Safe 8.1.1.rar" depending on your software.
              8. -
              9. You will see a folder named "CSI Safe 8.1.1" that contains the setup file and the crack file.
              10. -
              11. Run the setup file and follow the instructions to install the software on your computer.
              12. -
              13. Do not launch the software yet.
              14. -
              15. Copy the crack file from the folder and paste it into the installation directory of the software.
              16. -
              17. Replace the original file if prompted.
              18. -
              19. Now you can launch the software and enjoy its full features for free.
              20. -
              -

              CSI Safe 8.1.1 screenshot

              -

              CSI Safe 8.1.1.rar is a powerful and versatile software for designing and analyzing concrete floor and foundation systems. It can help you create safe and efficient structures with ease. However, please note that downloading and installing cracked software is illegal and may harm your computer or data. We do not recommend or endorse this method and we are not responsible for any consequences that may arise from it. If you like the software, please support the developers by purchasing a legitimate license from their website: https://www.csiamerica.com/products/safe.

              -

              - -

              What's New in CSI Safe 8.1.1.rar?

              -

              CSI Safe 8.1.1.rar is the latest version of the software that comes with many new features and improvements. Some of the highlights are:

              -
                -
              • A new option to design for punching shear using ACI 318-19 provisions.
              • -
              • A new option to design for torsion using ACI 318-19 provisions.
              • -
              • A new option to design for shear friction using ACI 318-19 provisions.
              • -
              • A new option to design for two-way shear using ACI 318-19 provisions.
              • -
              • A new option to design for crack control using ACI 318-19 provisions.
              • -
              • A new option to design for minimum reinforcement using ACI 318-19 provisions.
              • -
              • A new option to design for temperature and shrinkage effects using ACI 318-19 provisions.
              • -
              • A new option to design for post-tensioning losses using ACI 318-19 provisions.
              • -
              • A new option to design for long-term deflections using ACI 318-19 provisions.
              • -
              • A new option to design for fire resistance using ACI 318-19 provisions.
              • -
              • A new option to import and export models from and to Revit Structure.
              • -
              • A new option to import and export models from and to ETABS.
              • -
              • A new option to import and export models from and to SAP2000.
              • -
              • A new option to import and export models from and to AutoCAD.
              • -
              • A new option to import and export models from and to Excel.
              • -
              • A new option to import and export models from and to DXF.
              • -
              • A new option to import and export models from and to DWG.
              • -
              • A new option to import and export models from and to STAAD.Pro.
              • -
              • A new option to import and export models from and to Tekla Structures.
              • -
              • A new option to import and export models from and to SketchUp.
              • -
              -

              These are just some of the new features and improvements that CSI Safe 8.1.1.rar offers. For a complete list of changes, please refer to the release notes that are included in the download file. CSI Safe 8.1.1.rar is a must-have software for any engineer or designer who works with concrete floor and foundation systems. It can help you create safe and efficient structures with ease.

              d5da3c52bf
              -
              -
              \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/Grass Valley Edius 6.5 Free Download Full Version.md b/spaces/terfces0erbo/CollegeProjectV2/Grass Valley Edius 6.5 Free Download Full Version.md deleted file mode 100644 index 1b496ffd47cd66995e15b04b3709fbe0eeba04dd..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Grass Valley Edius 6.5 Free Download Full Version.md +++ /dev/null @@ -1,6 +0,0 @@ -

              grass valley edius 6.5 free download full version


              Download ✒ ✒ ✒ https://bytlly.com/2uGkWf



              - -Grass Valley's EDIUS 6 Editing Software is a fast and highly versatile multi-format editing program designed for use by both filmmakers and ... 4d29de3e1b
              -
              -
              -

              diff --git a/spaces/test12356/SUI-svc-3.0/add_speaker.py b/spaces/test12356/SUI-svc-3.0/add_speaker.py deleted file mode 100644 index fb6013dd8542efd62915ebdd445012ae7a4bdc28..0000000000000000000000000000000000000000 --- a/spaces/test12356/SUI-svc-3.0/add_speaker.py +++ /dev/null @@ -1,62 +0,0 @@ -import os -import argparse -from tqdm import tqdm -from random import shuffle -import json - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--train_list", type=str, default="./filelists/train.txt", help="path to train list") - parser.add_argument("--val_list", type=str, default="./filelists/val.txt", help="path to val list") - parser.add_argument("--test_list", type=str, default="./filelists/test.txt", help="path to test list") - parser.add_argument("--source_dir", type=str, default="./dataset/48k", help="path to source dir") - args = parser.parse_args() - - previous_config = json.load(open("configs/config.json", "rb")) - - train = [] - val = [] - test = [] - idx = 0 - spk_dict = previous_config["spk"] - spk_id = max([i for i in spk_dict.values()]) + 1 - for speaker in tqdm(os.listdir(args.source_dir)): - if speaker not in spk_dict.keys(): - spk_dict[speaker] = spk_id - spk_id += 1 - wavs = [os.path.join(args.source_dir, speaker, i)for i in os.listdir(os.path.join(args.source_dir, speaker))] - wavs = [i for i in wavs if i.endswith("wav")] - shuffle(wavs) - train += wavs[2:-10] - val += wavs[:2] - test += wavs[-10:] - - assert previous_config["model"]["n_speakers"] > len(spk_dict.keys()) - shuffle(train) - shuffle(val) - shuffle(test) - - print("Writing", args.train_list) - with open(args.train_list, "w") as f: - for fname in tqdm(train): - wavpath = fname - f.write(wavpath + "\n") - - print("Writing", args.val_list) - with open(args.val_list, "w") as f: - for fname in tqdm(val): - wavpath = fname - f.write(wavpath + "\n") - - print("Writing", args.test_list) - with open(args.test_list, "w") as f: - for fname in tqdm(test): - wavpath = fname - f.write(wavpath + "\n") - - previous_config["spk"] = spk_dict - - print("Writing configs/config.json") - with open("configs/config.json", "w") as f: - json.dump(previous_config, f, indent=2) diff --git a/spaces/teven-projects/calculator/Dockerfile b/spaces/teven-projects/calculator/Dockerfile deleted file mode 100644 index 51059d21dc49b9e28e8fcc3233a1364c55b83359..0000000000000000000000000000000000000000 --- a/spaces/teven-projects/calculator/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM python:3.7 - -WORKDIR /code - -COPY ./requirements.txt /code/requirements.txt - -RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt - -COPY . . - -CMD ["bokeh", "serve", "optimal_training", "--allow-websocket-origin=*"] diff --git a/spaces/tfwang/PITI-Synthesis/glide_text2im/losses.py b/spaces/tfwang/PITI-Synthesis/glide_text2im/losses.py deleted file mode 100644 index 251e42e4f36a31bb5e1aeda874b3a45d722000a2..0000000000000000000000000000000000000000 --- a/spaces/tfwang/PITI-Synthesis/glide_text2im/losses.py +++ /dev/null @@ -1,77 +0,0 @@ -""" -Helpers for various likelihood-based losses. These are ported from the original -Ho et al. diffusion models codebase: -https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/utils.py -""" - -import numpy as np - -import torch as th - - -def normal_kl(mean1, logvar1, mean2, logvar2): - """ - Compute the KL divergence between two gaussians. - - Shapes are automatically broadcasted, so batches can be compared to - scalars, among other use cases. - """ - tensor = None - for obj in (mean1, logvar1, mean2, logvar2): - if isinstance(obj, th.Tensor): - tensor = obj - break - assert tensor is not None, "at least one argument must be a Tensor" - - # Force variances to be Tensors. Broadcasting helps convert scalars to - # Tensors, but it does not work for th.exp(). - logvar1, logvar2 = [ - x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor) - for x in (logvar1, logvar2) - ] - - return 0.5 * ( - -1.0 - + logvar2 - - logvar1 - + th.exp(logvar1 - logvar2) - + ((mean1 - mean2) ** 2) * th.exp(-logvar2) - ) - - -def approx_standard_normal_cdf(x): - """ - A fast approximation of the cumulative distribution function of the - standard normal. - """ - return 0.5 * (1.0 + th.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * th.pow(x, 3)))) - - -def discretized_gaussian_log_likelihood(x, *, means, log_scales): - """ - Compute the log-likelihood of a Gaussian distribution discretizing to a - given image. - - :param x: the target images. It is assumed that this was uint8 values, - rescaled to the range [-1, 1]. - :param means: the Gaussian mean Tensor. - :param log_scales: the Gaussian log stddev Tensor. - :return: a tensor like x of log probabilities (in nats). - """ - assert x.shape == means.shape == log_scales.shape - centered_x = x - means - inv_stdv = th.exp(-log_scales) - plus_in = inv_stdv * (centered_x + 1.0 / 255.0) - cdf_plus = approx_standard_normal_cdf(plus_in) - min_in = inv_stdv * (centered_x - 1.0 / 255.0) - cdf_min = approx_standard_normal_cdf(min_in) - log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12)) - log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12)) - cdf_delta = cdf_plus - cdf_min - log_probs = th.where( - x < -0.999, - log_cdf_plus, - th.where(x > 0.999, log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))), - ) - assert log_probs.shape == x.shape - return log_probs diff --git a/spaces/thecho7/deepfake/training/pipelines/__init__.py b/spaces/thecho7/deepfake/training/pipelines/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/thelou1s/yamnet/test_mp3.py b/spaces/thelou1s/yamnet/test_mp3.py deleted file mode 100644 index ad07bcee426669a6042332e4d9444619dd80db7a..0000000000000000000000000000000000000000 --- a/spaces/thelou1s/yamnet/test_mp3.py +++ /dev/null @@ -1,51 +0,0 @@ - -# https://tfhub.dev/google/lite-model/yamnet/classification/tflite/1 - -import tensorflow as tf -import tensorflow_hub as hub -import numpy as np -import csv - -# import matplotlib.pyplot as plt -# from IPython.display import Audio -from scipy.io import wavfile -import scipy - -# import soundfile as sf -# import audio2numpy as a2n -import os - -import gradio as gr - -# import audio2numpy -# import numpy as np - -from pydub import AudioSegment -from matplotlib import pyplot as plt - - -# https://stackoverflow.com/questions/16634128/how-to-extract-the-raw-data-from-a-mp3-file-using-python -# This will open and read the audio file with pydub. Replace the file path with -# your own file. -audio_file = AudioSegment.from_file('miaow_16k.mp3') - -# Set up a list for us to dump PCM samples into, and create a 'data' variable -# so we don't need to type audio_file._data again -data = audio_file._data -pcm16_signed_integers = [] - -# This loop decodes the bytestring into PCM samples. -# The bytestring is a stream of little-endian encoded signed integers. -# This basically just cuts each two-byte sample out of the bytestring, converts -# it to an integer, and appends it to the list of samples. -for sample_index in range(len(data) // 2): - sample = int.from_bytes(data[sample_index * 2:sample_index * 2 + 2], 'little', signed=True) - pcm16_signed_integers.append(sample / 255) - -wav_data = np.array([x for x in pcm16_signed_integers]) -sample_rate = 16000 -if debug: print(f'pcm16_signed_integers: {len(pcm16_signed_integers)}') - -# Now plot the samples! -plt.plot(pcm16_signed_integers) -plt.show() \ No newline at end of file diff --git a/spaces/thu-coai/DA-Transformer/vis-network.min.js b/spaces/thu-coai/DA-Transformer/vis-network.min.js deleted file mode 100644 index 0e59f1131c51c07b70a50a1b224589901a99ac44..0000000000000000000000000000000000000000 --- a/spaces/thu-coai/DA-Transformer/vis-network.min.js +++ /dev/null @@ -1,34 +0,0 @@ -/** - * vis-network - * https://visjs.github.io/vis-network/ - * - * A dynamic, browser-based visualization library. - * - * @version 0.0.0-no-version - * @date 2023-04-22T23:20:23.530Z - * - * @copyright (c) 2011-2017 Almende B.V, http://almende.com - * @copyright (c) 2017-2019 visjs contributors, https://github.com/visjs - * - * @license - * vis.js is dual licensed under both - * - * 1. The Apache 2.0 License - * http://www.apache.org/licenses/LICENSE-2.0 - * - * and - * - * 2. The MIT License - * http://opensource.org/licenses/MIT - * - * vis.js may be distributed under either license. - */ -!function(g,t){"object"==typeof exports&&"undefined"!=typeof module?t(exports):"function"==typeof define&&define.amd?define(["exports"],t):t((g="undefined"!=typeof globalThis?globalThis:g||self).vis=g.vis||{})}(this,(function(g){var t="undefined"!=typeof globalThis?globalThis:"undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:{};function e(g){return g&&g.__esModule&&Object.prototype.hasOwnProperty.call(g,"default")?g.default:g}var A={},C={get exports(){return A},set exports(g){A=g}},I=function(g){return g&&g.Math==Math&&g},i=I("object"==typeof globalThis&&globalThis)||I("object"==typeof window&&window)||I("object"==typeof self&&self)||I("object"==typeof t&&t)||function(){return this}()||Function("return this")(),o=function(g){try{return!!g()}catch(g){return!0}},n=!o((function(){var g=function(){}.bind();return"function"!=typeof g||g.hasOwnProperty("prototype")})),r=n,s=Function.prototype,a=s.apply,d=s.call,h="object"==typeof Reflect&&Reflect.apply||(r?d.bind(a):function(){return d.apply(a,arguments)}),l=n,c=Function.prototype,u=c.call,p=l&&c.bind.bind(u,u),f=l?p:function(g){return function(){return u.apply(g,arguments)}},v=f,y=v({}.toString),m=v("".slice),b=function(g){return m(y(g),8,-1)},w=b,x=f,k=function(g){if("Function"===w(g))return x(g)},E="object"==typeof document&&document.all,O={all:E,IS_HTMLDDA:void 0===E&&void 0!==E},T=O.all,D=O.IS_HTMLDDA?function(g){return"function"==typeof g||g===T}:function(g){return"function"==typeof g},N={},R=!o((function(){return 7!=Object.defineProperty({},1,{get:function(){return 7}})[1]})),P=n,M=Function.prototype.call,B=P?M.bind(M):function(){return M.apply(M,arguments)},z={},S={}.propertyIsEnumerable,Z=Object.getOwnPropertyDescriptor,F=Z&&!S.call({1:2},1);z.f=F?function(g){var t=Z(this,g);return!!t&&t.enumerable}:S;var G,j,L=function(g,t){return{enumerable:!(1&g),configurable:!(2&g),writable:!(4&g),value:t}},V=o,Y=b,W=Object,Q=f("".split),U=V((function(){return!W("z").propertyIsEnumerable(0)}))?function(g){return"String"==Y(g)?Q(g,""):W(g)}:W,_=function(g){return null==g},H=_,K=TypeError,X=function(g){if(H(g))throw K("Can't call method on "+g);return g},J=U,q=X,$=function(g){return J(q(g))},gg=D,tg=O.all,eg=O.IS_HTMLDDA?function(g){return"object"==typeof g?null!==g:gg(g)||g===tg}:function(g){return"object"==typeof g?null!==g:gg(g)},Ag={},Cg=Ag,Ig=i,ig=D,og=function(g){return ig(g)?g:void 0},ng=function(g,t){return arguments.length<2?og(Cg[g])||og(Ig[g]):Cg[g]&&Cg[g][t]||Ig[g]&&Ig[g][t]},rg=f({}.isPrototypeOf),sg="undefined"!=typeof navigator&&String(navigator.userAgent)||"",ag=i,dg=sg,hg=ag.process,lg=ag.Deno,cg=hg&&hg.versions||lg&&lg.version,ug=cg&&cg.v8;ug&&(j=(G=ug.split("."))[0]>0&&G[0]<4?1:+(G[0]+G[1])),!j&&dg&&(!(G=dg.match(/Edge\/(\d+)/))||G[1]>=74)&&(G=dg.match(/Chrome\/(\d+)/))&&(j=+G[1]);var pg=j,fg=pg,vg=o,yg=!!Object.getOwnPropertySymbols&&!vg((function(){var g=Symbol();return!String(g)||!(Object(g)instanceof Symbol)||!Symbol.sham&&fg&&fg<41})),mg=yg&&!Symbol.sham&&"symbol"==typeof Symbol.iterator,bg=ng,wg=D,xg=rg,kg=Object,Eg=mg?function(g){return"symbol"==typeof g}:function(g){var t=bg("Symbol");return wg(t)&&xg(t.prototype,kg(g))},Og=String,Tg=function(g){try{return Og(g)}catch(g){return"Object"}},Dg=D,Ng=Tg,Rg=TypeError,Pg=function(g){if(Dg(g))return g;throw Rg(Ng(g)+" is not a function")},Mg=Pg,Bg=_,zg=function(g,t){var e=g[t];return Bg(e)?void 0:Mg(e)},Sg=B,Zg=D,Fg=eg,Gg=TypeError,jg={},Lg={get exports(){return jg},set exports(g){jg=g}},Vg=i,Yg=Object.defineProperty,Wg=function(g,t){try{Yg(Vg,g,{value:t,configurable:!0,writable:!0})}catch(e){Vg[g]=t}return t},Qg="__core-js_shared__",Ug=i[Qg]||Wg(Qg,{}),_g=Ug;(Lg.exports=function(g,t){return _g[g]||(_g[g]=void 0!==t?t:{})})("versions",[]).push({version:"3.29.0",mode:"pure",copyright:"© 2014-2023 Denis Pushkarev (zloirock.ru)",license:"https://github.com/zloirock/core-js/blob/v3.29.0/LICENSE",source:"https://github.com/zloirock/core-js"});var Hg=X,Kg=Object,Xg=function(g){return Kg(Hg(g))},Jg=Xg,qg=f({}.hasOwnProperty),$g=Object.hasOwn||function(g,t){return qg(Jg(g),t)},gt=f,tt=0,et=Math.random(),At=gt(1..toString),Ct=function(g){return"Symbol("+(void 0===g?"":g)+")_"+At(++tt+et,36)},It=jg,it=$g,ot=Ct,nt=yg,rt=mg,st=i.Symbol,at=It("wks"),dt=rt?st.for||st:st&&st.withoutSetter||ot,ht=function(g){return it(at,g)||(at[g]=nt&&it(st,g)?st[g]:dt("Symbol."+g)),at[g]},lt=B,ct=eg,ut=Eg,pt=zg,ft=function(g,t){var e,A;if("string"===t&&Zg(e=g.toString)&&!Fg(A=Sg(e,g)))return A;if(Zg(e=g.valueOf)&&!Fg(A=Sg(e,g)))return A;if("string"!==t&&Zg(e=g.toString)&&!Fg(A=Sg(e,g)))return A;throw Gg("Can't convert object to primitive value")},vt=TypeError,yt=ht("toPrimitive"),mt=function(g,t){if(!ct(g)||ut(g))return g;var e,A=pt(g,yt);if(A){if(void 0===t&&(t="default"),e=lt(A,g,t),!ct(e)||ut(e))return e;throw vt("Can't convert object to primitive value")}return void 0===t&&(t="number"),ft(g,t)},bt=Eg,wt=function(g){var t=mt(g,"string");return bt(t)?t:t+""},xt=eg,kt=i.document,Et=xt(kt)&&xt(kt.createElement),Ot=function(g){return Et?kt.createElement(g):{}},Tt=Ot,Dt=!R&&!o((function(){return 7!=Object.defineProperty(Tt("div"),"a",{get:function(){return 7}}).a})),Nt=R,Rt=B,Pt=z,Mt=L,Bt=$,zt=wt,St=$g,Zt=Dt,Ft=Object.getOwnPropertyDescriptor;N.f=Nt?Ft:function(g,t){if(g=Bt(g),t=zt(t),Zt)try{return Ft(g,t)}catch(g){}if(St(g,t))return Mt(!Rt(Pt.f,g,t),g[t])};var Gt=o,jt=D,Lt=/#|\.prototype\./,Vt=function(g,t){var e=Wt[Yt(g)];return e==Ut||e!=Qt&&(jt(t)?Gt(t):!!t)},Yt=Vt.normalize=function(g){return String(g).replace(Lt,".").toLowerCase()},Wt=Vt.data={},Qt=Vt.NATIVE="N",Ut=Vt.POLYFILL="P",_t=Vt,Ht=Pg,Kt=n,Xt=k(k.bind),Jt=function(g,t){return Ht(g),void 0===t?g:Kt?Xt(g,t):function(){return g.apply(t,arguments)}},qt={},$t=R&&o((function(){return 42!=Object.defineProperty((function(){}),"prototype",{value:42,writable:!1}).prototype})),ge=eg,te=String,ee=TypeError,Ae=function(g){if(ge(g))return g;throw ee(te(g)+" is not an object")},Ce=R,Ie=Dt,ie=$t,oe=Ae,ne=wt,re=TypeError,se=Object.defineProperty,ae=Object.getOwnPropertyDescriptor,de="enumerable",he="configurable",le="writable";qt.f=Ce?ie?function(g,t,e){if(oe(g),t=ne(t),oe(e),"function"==typeof g&&"prototype"===t&&"value"in e&&le in e&&!e[le]){var A=ae(g,t);A&&A[le]&&(g[t]=e.value,e={configurable:he in e?e[he]:A[he],enumerable:de in e?e[de]:A[de],writable:!1})}return se(g,t,e)}:se:function(g,t,e){if(oe(g),t=ne(t),oe(e),Ie)try{return se(g,t,e)}catch(g){}if("get"in e||"set"in e)throw re("Accessors not supported");return"value"in e&&(g[t]=e.value),g};var ce=qt,ue=L,pe=R?function(g,t,e){return ce.f(g,t,ue(1,e))}:function(g,t,e){return g[t]=e,g},fe=i,ve=h,ye=k,me=D,be=N.f,we=_t,xe=Ag,ke=Jt,Ee=pe,Oe=$g,Te=function(g){var t=function(e,A,C){if(this instanceof t){switch(arguments.length){case 0:return new g;case 1:return new g(e);case 2:return new g(e,A)}return new g(e,A,C)}return ve(g,this,arguments)};return t.prototype=g.prototype,t},De=function(g,t){var e,A,C,I,i,o,n,r,s,a=g.target,d=g.global,h=g.stat,l=g.proto,c=d?fe:h?fe[a]:(fe[a]||{}).prototype,u=d?xe:xe[a]||Ee(xe,a,{})[a],p=u.prototype;for(I in t)A=!(e=we(d?I:a+(h?".":"#")+I,g.forced))&&c&&Oe(c,I),o=u[I],A&&(n=g.dontCallGetSet?(s=be(c,I))&&s.value:c[I]),i=A&&n?n:t[I],A&&typeof o==typeof i||(r=g.bind&&A?ke(i,fe):g.wrap&&A?Te(i):l&&me(i)?ye(i):i,(g.sham||i&&i.sham||o&&o.sham)&&Ee(r,"sham",!0),Ee(u,I,r),l&&(Oe(xe,C=a+"Prototype")||Ee(xe,C,{}),Ee(xe[C],I,i),g.real&&p&&(e||!p[I])&&Ee(p,I,i)))},Ne=Math.ceil,Re=Math.floor,Pe=Math.trunc||function(g){var t=+g;return(t>0?Re:Ne)(t)},Me=function(g){var t=+g;return t!=t||0===t?0:Pe(t)},Be=Me,ze=Math.max,Se=Math.min,Ze=function(g,t){var e=Be(g);return e<0?ze(e+t,0):Se(e,t)},Fe=Me,Ge=Math.min,je=function(g){return g>0?Ge(Fe(g),9007199254740991):0},Le=function(g){return je(g.length)},Ve=$,Ye=Ze,We=Le,Qe=function(g){return function(t,e,A){var C,I=Ve(t),i=We(I),o=Ye(A,i);if(g&&e!=e){for(;i>o;)if((C=I[o++])!=C)return!0}else for(;i>o;o++)if((g||o in I)&&I[o]===e)return g||o||0;return!g&&-1}},Ue={includes:Qe(!0),indexOf:Qe(!1)},_e={},He=$g,Ke=$,Xe=Ue.indexOf,Je=_e,qe=f([].push),$e=function(g,t){var e,A=Ke(g),C=0,I=[];for(e in A)!He(Je,e)&&He(A,e)&&qe(I,e);for(;t.length>C;)He(A,e=t[C++])&&(~Xe(I,e)||qe(I,e));return I},gA=["constructor","hasOwnProperty","isPrototypeOf","propertyIsEnumerable","toLocaleString","toString","valueOf"],tA=$e,eA=gA,AA=Object.keys||function(g){return tA(g,eA)},CA={};CA.f=Object.getOwnPropertySymbols;var IA=R,iA=f,oA=B,nA=o,rA=AA,sA=CA,aA=z,dA=Xg,hA=U,lA=Object.assign,cA=Object.defineProperty,uA=iA([].concat),pA=!lA||nA((function(){if(IA&&1!==lA({b:1},lA(cA({},"a",{enumerable:!0,get:function(){cA(this,"b",{value:3,enumerable:!1})}}),{b:2})).b)return!0;var g={},t={},e=Symbol(),A="abcdefghijklmnopqrst";return g[e]=7,A.split("").forEach((function(g){t[g]=g})),7!=lA({},g)[e]||rA(lA({},t)).join("")!=A}))?function(g,t){for(var e=dA(g),A=arguments.length,C=1,I=sA.f,i=aA.f;A>C;)for(var o,n=hA(arguments[C++]),r=I?uA(rA(n),I(n)):rA(n),s=r.length,a=0;s>a;)o=r[a++],IA&&!oA(i,n,o)||(e[o]=n[o]);return e}:lA,fA=pA;De({target:"Object",stat:!0,arity:2,forced:Object.assign!==fA},{assign:fA});var vA=Ag.Object.assign;!function(g){g.exports=vA}(C);var yA=e(A),mA={},bA={get exports(){return mA},set exports(g){mA=g}},wA=f([].slice),xA=f,kA=Pg,EA=eg,OA=$g,TA=wA,DA=n,NA=Function,RA=xA([].concat),PA=xA([].join),MA={},BA=function(g,t,e){if(!OA(MA,t)){for(var A=[],C=0;C=.1;)(l=+I[a++%i])>s&&(l=s),h=Math.sqrt(l*l/(1+r*r)),t+=h=o<0?-h:h,e+=r*h,!0===d?g.lineTo(t,e):g.moveTo(t,e),s-=l,d=!d}var JA={circle:UA,dashedLine:XA,database:KA,diamond:function(g,t,e,A){g.beginPath(),g.lineTo(t,e+A),g.lineTo(t+A,e),g.lineTo(t,e-A),g.lineTo(t-A,e),g.closePath()},ellipse:HA,ellipse_vis:HA,hexagon:function(g,t,e,A){g.beginPath();var C=2*Math.PI/6;g.moveTo(t+A,e);for(var I=1;I<6;I++)g.lineTo(t+A*Math.cos(C*I),e+A*Math.sin(C*I));g.closePath()},roundRect:_A,square:function(g,t,e,A){g.beginPath(),g.rect(t-A,e-A,2*A,2*A),g.closePath()},star:function(g,t,e,A){g.beginPath(),e+=.1*(A*=.82);for(var C=0;C<10;C++){var I=C%2==0?1.3*A:.5*A;g.lineTo(t+I*Math.sin(2*C*Math.PI/10),e-I*Math.cos(2*C*Math.PI/10))}g.closePath()},triangle:function(g,t,e,A){g.beginPath(),e+=.275*(A*=1.15);var C=2*A,I=C/2,i=Math.sqrt(3)/6*C,o=Math.sqrt(C*C-I*I);g.moveTo(t,e-(o-i)),g.lineTo(t+I,e+i),g.lineTo(t-I,e+i),g.lineTo(t,e-(o-i)),g.closePath()},triangleDown:function(g,t,e,A){g.beginPath(),e-=.275*(A*=1.15);var C=2*A,I=C/2,i=Math.sqrt(3)/6*C,o=Math.sqrt(C*C-I*I);g.moveTo(t,e+(o-i)),g.lineTo(t+I,e-i),g.lineTo(t-I,e-i),g.lineTo(t,e+(o-i)),g.closePath()}};function qA(g,t){void 0===t&&(t={});var e=t.insertAt;if(g&&"undefined"!=typeof document){var A=document.head||document.getElementsByTagName("head")[0],C=document.createElement("style");C.type="text/css","top"===e&&A.firstChild?A.insertBefore(C,A.firstChild):A.appendChild(C),C.styleSheet?C.styleSheet.cssText=g:C.appendChild(document.createTextNode(g))}}qA(".vis-overlay{bottom:0;left:0;position:absolute;right:0;top:0;z-index:10}.vis-active{box-shadow:0 0 10px #86d5f8}");qA(".vis [class*=span]{min-height:0;width:auto}");qA('div.vis-color-picker{background-color:#fff;border-radius:15px;box-shadow:0 0 10px 0 rgba(0,0,0,.5);display:none;height:444px;left:30px;margin-left:30px;margin-top:-140px;padding:10px;position:absolute;top:0;width:310px;z-index:1}div.vis-color-picker div.vis-arrow{left:5px;position:absolute;top:147px}div.vis-color-picker div.vis-arrow:after,div.vis-color-picker div.vis-arrow:before{border:solid transparent;content:" ";height:0;pointer-events:none;position:absolute;right:100%;top:50%;width:0}div.vis-color-picker div.vis-arrow:after{border-color:hsla(0,0%,100%,0) #fff hsla(0,0%,100%,0) hsla(0,0%,100%,0);border-width:30px;margin-top:-30px}div.vis-color-picker div.vis-color{cursor:pointer;height:289px;position:absolute;width:289px}div.vis-color-picker div.vis-brightness{position:absolute;top:313px}div.vis-color-picker div.vis-opacity{position:absolute;top:350px}div.vis-color-picker div.vis-selector{background:#4c4c4c;background:-moz-linear-gradient(top,#4c4c4c 0,#595959 12%,#666 25%,#474747 39%,#2c2c2c 50%,#000 51%,#111 60%,#2b2b2b 76%,#1c1c1c 91%,#131313 100%);background:-webkit-gradient(linear,left top,left bottom,color-stop(0,#4c4c4c),color-stop(12%,#595959),color-stop(25%,#666),color-stop(39%,#474747),color-stop(50%,#2c2c2c),color-stop(51%,#000),color-stop(60%,#111),color-stop(76%,#2b2b2b),color-stop(91%,#1c1c1c),color-stop(100%,#131313));background:-webkit-linear-gradient(top,#4c4c4c,#595959 12%,#666 25%,#474747 39%,#2c2c2c 50%,#000 51%,#111 60%,#2b2b2b 76%,#1c1c1c 91%,#131313);background:-o-linear-gradient(top,#4c4c4c 0,#595959 12%,#666 25%,#474747 39%,#2c2c2c 50%,#000 51%,#111 60%,#2b2b2b 76%,#1c1c1c 91%,#131313 100%);background:-ms-linear-gradient(top,#4c4c4c 0,#595959 12%,#666 25%,#474747 39%,#2c2c2c 50%,#000 51%,#111 60%,#2b2b2b 76%,#1c1c1c 91%,#131313 100%);background:linear-gradient(180deg,#4c4c4c 0,#595959 12%,#666 25%,#474747 39%,#2c2c2c 50%,#000 51%,#111 60%,#2b2b2b 76%,#1c1c1c 91%,#131313);border:1px solid #fff;border-radius:15px;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr="#4c4c4c",endColorstr="#131313",GradientType=0);height:15px;left:137px;position:absolute;top:137px;width:15px}div.vis-color-picker div.vis-new-color{left:159px;padding-right:2px;text-align:right}div.vis-color-picker div.vis-initial-color,div.vis-color-picker div.vis-new-color{border:1px solid rgba(0,0,0,.1);border-radius:5px;color:rgba(0,0,0,.4);font-size:10px;height:20px;line-height:20px;position:absolute;top:380px;vertical-align:middle;width:140px}div.vis-color-picker div.vis-initial-color{left:10px;padding-left:2px;text-align:left}div.vis-color-picker div.vis-label{left:10px;position:absolute;width:300px}div.vis-color-picker div.vis-label.vis-brightness{top:300px}div.vis-color-picker div.vis-label.vis-opacity{top:338px}div.vis-color-picker div.vis-button{background-color:#f7f7f7;border:2px solid #d9d9d9;border-radius:10px;cursor:pointer;height:25px;line-height:25px;position:absolute;text-align:center;top:410px;vertical-align:middle;width:68px}div.vis-color-picker div.vis-button.vis-cancel{left:5px}div.vis-color-picker div.vis-button.vis-load{left:82px}div.vis-color-picker div.vis-button.vis-apply{left:159px}div.vis-color-picker div.vis-button.vis-save{left:236px}div.vis-color-picker input.vis-range{height:20px;width:290px}');qA('div.vis-configuration{display:block;float:left;font-size:12px;position:relative}div.vis-configuration-wrapper{display:block;width:700px}div.vis-configuration-wrapper:after{clear:both;content:"";display:block}div.vis-configuration.vis-config-option-container{background-color:#fff;border:2px solid #f7f8fa;border-radius:4px;display:block;left:10px;margin-top:20px;padding-left:5px;width:495px}div.vis-configuration.vis-config-button{background-color:#f7f8fa;border:2px solid #ceced0;border-radius:4px;cursor:pointer;display:block;height:25px;left:10px;line-height:25px;margin-bottom:30px;margin-top:20px;padding-left:5px;vertical-align:middle;width:495px}div.vis-configuration.vis-config-button.hover{background-color:#4588e6;border:2px solid #214373;color:#fff}div.vis-configuration.vis-config-item{display:block;float:left;height:25px;line-height:25px;vertical-align:middle;width:495px}div.vis-configuration.vis-config-item.vis-config-s2{background-color:#f7f8fa;border-radius:3px;left:10px;padding-left:5px}div.vis-configuration.vis-config-item.vis-config-s3{background-color:#e4e9f0;border-radius:3px;left:20px;padding-left:5px}div.vis-configuration.vis-config-item.vis-config-s4{background-color:#cfd8e6;border-radius:3px;left:30px;padding-left:5px}div.vis-configuration.vis-config-header{font-size:18px;font-weight:700}div.vis-configuration.vis-config-label{height:25px;line-height:25px;width:120px}div.vis-configuration.vis-config-label.vis-config-s3{width:110px}div.vis-configuration.vis-config-label.vis-config-s4{width:100px}div.vis-configuration.vis-config-colorBlock{border:1px solid #444;border-radius:2px;cursor:pointer;height:19px;margin:0;padding:0;top:1px;width:30px}input.vis-configuration.vis-config-checkbox{left:-5px}input.vis-configuration.vis-config-rangeinput{margin:0;padding:1px;pointer-events:none;position:relative;top:-5px;width:60px}input.vis-configuration.vis-config-range{-webkit-appearance:none;background-color:transparent;border:0 solid #fff;height:20px;width:300px}input.vis-configuration.vis-config-range::-webkit-slider-runnable-track{background:#dedede;background:-moz-linear-gradient(top,#dedede 0,#c8c8c8 99%);background:-webkit-gradient(linear,left top,left bottom,color-stop(0,#dedede),color-stop(99%,#c8c8c8));background:-webkit-linear-gradient(top,#dedede,#c8c8c8 99%);background:-o-linear-gradient(top,#dedede 0,#c8c8c8 99%);background:-ms-linear-gradient(top,#dedede 0,#c8c8c8 99%);background:linear-gradient(180deg,#dedede 0,#c8c8c8 99%);border:1px solid #999;border-radius:3px;box-shadow:0 0 3px 0 #aaa;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr="#dedede",endColorstr="#c8c8c8",GradientType=0);height:5px;width:300px}input.vis-configuration.vis-config-range::-webkit-slider-thumb{-webkit-appearance:none;background:#3876c2;background:-moz-linear-gradient(top,#3876c2 0,#385380 100%);background:-webkit-gradient(linear,left top,left bottom,color-stop(0,#3876c2),color-stop(100%,#385380));background:-webkit-linear-gradient(top,#3876c2,#385380);background:-o-linear-gradient(top,#3876c2 0,#385380 100%);background:-ms-linear-gradient(top,#3876c2 0,#385380 100%);background:linear-gradient(180deg,#3876c2 0,#385380);border:1px solid #14334b;border-radius:50%;box-shadow:0 0 1px 0 #111927;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr="#3876c2",endColorstr="#385380",GradientType=0);height:17px;margin-top:-7px;width:17px}input.vis-configuration.vis-config-range:focus{outline:none}input.vis-configuration.vis-config-range:focus::-webkit-slider-runnable-track{background:#9d9d9d;background:-moz-linear-gradient(top,#9d9d9d 0,#c8c8c8 99%);background:-webkit-gradient(linear,left top,left bottom,color-stop(0,#9d9d9d),color-stop(99%,#c8c8c8));background:-webkit-linear-gradient(top,#9d9d9d,#c8c8c8 99%);background:-o-linear-gradient(top,#9d9d9d 0,#c8c8c8 99%);background:-ms-linear-gradient(top,#9d9d9d 0,#c8c8c8 99%);background:linear-gradient(180deg,#9d9d9d 0,#c8c8c8 99%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr="#9d9d9d",endColorstr="#c8c8c8",GradientType=0)}input.vis-configuration.vis-config-range::-moz-range-track{background:#dedede;background:-moz-linear-gradient(top,#dedede 0,#c8c8c8 99%);background:-webkit-gradient(linear,left top,left bottom,color-stop(0,#dedede),color-stop(99%,#c8c8c8));background:-webkit-linear-gradient(top,#dedede,#c8c8c8 99%);background:-o-linear-gradient(top,#dedede 0,#c8c8c8 99%);background:-ms-linear-gradient(top,#dedede 0,#c8c8c8 99%);background:linear-gradient(180deg,#dedede 0,#c8c8c8 99%);border:1px solid #999;border-radius:3px;box-shadow:0 0 3px 0 #aaa;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr="#dedede",endColorstr="#c8c8c8",GradientType=0);height:10px;width:300px}input.vis-configuration.vis-config-range::-moz-range-thumb{background:#385380;border:none;border-radius:50%;height:16px;width:16px}input.vis-configuration.vis-config-range:-moz-focusring{outline:1px solid #fff;outline-offset:-1px}input.vis-configuration.vis-config-range::-ms-track{background:transparent;border-color:transparent;border-width:6px 0;color:transparent;height:5px;width:300px}input.vis-configuration.vis-config-range::-ms-fill-lower{background:#777;border-radius:10px}input.vis-configuration.vis-config-range::-ms-fill-upper{background:#ddd;border-radius:10px}input.vis-configuration.vis-config-range::-ms-thumb{background:#385380;border:none;border-radius:50%;height:16px;width:16px}input.vis-configuration.vis-config-range:focus::-ms-fill-lower{background:#888}input.vis-configuration.vis-config-range:focus::-ms-fill-upper{background:#ccc}.vis-configuration-popup{background:rgba(57,76,89,.85);border:2px solid #f2faff;border-radius:4px;color:#fff;font-size:14px;height:30px;line-height:30px;position:absolute;text-align:center;-webkit-transition:opacity .3s ease-in-out;-moz-transition:opacity .3s ease-in-out;transition:opacity .3s ease-in-out;width:150px}.vis-configuration-popup:after,.vis-configuration-popup:before{border:solid transparent;content:" ";height:0;left:100%;pointer-events:none;position:absolute;top:50%;width:0}.vis-configuration-popup:after{border-color:rgba(136,183,213,0) rgba(136,183,213,0) rgba(136,183,213,0) rgba(57,76,89,.85);border-width:8px;margin-top:-8px}.vis-configuration-popup:before{border-color:rgba(194,225,245,0) rgba(194,225,245,0) rgba(194,225,245,0) #f2faff;border-width:12px;margin-top:-12px}');qA("div.vis-tooltip{background-color:#f5f4ed;border:1px solid #808074;-moz-border-radius:3px;-webkit-border-radius:3px;border-radius:3px;box-shadow:3px 3px 10px rgba(0,0,0,.2);color:#000;font-family:verdana;font-size:14px;padding:5px;pointer-events:none;position:absolute;visibility:hidden;white-space:nowrap;z-index:5}");var $A={};!function(g){function t(g){if(g)return function(g){for(var e in t.prototype)g[e]=t.prototype[e];return g}(g)}g.exports=t,t.prototype.on=t.prototype.addEventListener=function(g,t){return this._callbacks=this._callbacks||{},(this._callbacks["$"+g]=this._callbacks["$"+g]||[]).push(t),this},t.prototype.once=function(g,t){function e(){this.off(g,e),t.apply(this,arguments)}return e.fn=t,this.on(g,e),this},t.prototype.off=t.prototype.removeListener=t.prototype.removeAllListeners=t.prototype.removeEventListener=function(g,t){if(this._callbacks=this._callbacks||{},0==arguments.length)return this._callbacks={},this;var e,A=this._callbacks["$"+g];if(!A)return this;if(1==arguments.length)return delete this._callbacks["$"+g],this;for(var C=0;C=o?g?"":void 0:(A=wC(I,i))<55296||A>56319||i+1===o||(C=wC(I,i+1))<56320||C>57343?g?bC(I,i):A:g?xC(I,i,i+2):C-56320+(A-55296<<10)+65536}},EC={codeAt:kC(!1),charAt:kC(!0)},OC=D,TC=i.WeakMap,DC=OC(TC)&&/native code/.test(String(TC)),NC=Ct,RC=jg("keys"),PC=function(g){return RC[g]||(RC[g]=NC(g))},MC=DC,BC=i,zC=eg,SC=pe,ZC=$g,FC=Ug,GC=PC,jC=_e,LC="Object already initialized",VC=BC.TypeError,YC=BC.WeakMap;if(MC||FC.state){var WC=FC.state||(FC.state=new YC);WC.get=WC.get,WC.has=WC.has,WC.set=WC.set,CC=function(g,t){if(WC.has(g))throw VC(LC);return t.facade=g,WC.set(g,t),t},IC=function(g){return WC.get(g)||{}},iC=function(g){return WC.has(g)}}else{var QC=GC("state");jC[QC]=!0,CC=function(g,t){if(ZC(g,QC))throw VC(LC);return t.facade=g,SC(g,QC,t),t},IC=function(g){return ZC(g,QC)?g[QC]:{}},iC=function(g){return ZC(g,QC)}}var UC={set:CC,get:IC,has:iC,enforce:function(g){return iC(g)?IC(g):CC(g,{})},getterFor:function(g){return function(t){var e;if(!zC(t)||(e=IC(t)).type!==g)throw VC("Incompatible receiver, "+g+" required");return e}}},_C=R,HC=$g,KC=Function.prototype,XC=_C&&Object.getOwnPropertyDescriptor,JC=HC(KC,"name"),qC={EXISTS:JC,PROPER:JC&&"something"===function(){}.name,CONFIGURABLE:JC&&(!_C||_C&&XC(KC,"name").configurable)},$C={},gI=R,tI=$t,eI=qt,AI=Ae,CI=$,II=AA;$C.f=gI&&!tI?Object.defineProperties:function(g,t){AI(g);for(var e,A=CI(t),C=II(t),I=C.length,i=0;I>i;)eI.f(g,e=C[i++],A[e]);return g};var iI,oI=ng("document","documentElement"),nI=Ae,rI=$C,sI=gA,aI=_e,dI=oI,hI=Ot,lI="prototype",cI="script",uI=PC("IE_PROTO"),pI=function(){},fI=function(g){return"<"+cI+">"+g+""},vI=function(g){g.write(fI("")),g.close();var t=g.parentWindow.Object;return g=null,t},yI=function(){try{iI=new ActiveXObject("htmlfile")}catch(g){}var g,t,e;yI="undefined"!=typeof document?document.domain&&iI?vI(iI):(t=hI("iframe"),e="java"+cI+":",t.style.display="none",dI.appendChild(t),t.src=String(e),(g=t.contentWindow.document).open(),g.write(fI("document.F=Object")),g.close(),g.F):vI(iI);for(var A=sI.length;A--;)delete yI[lI][sI[A]];return yI()};aI[uI]=!0;var mI,bI,wI,xI=Object.create||function(g,t){var e;return null!==g?(pI[lI]=nI(g),e=new pI,pI[lI]=null,e[uI]=g):e=yI(),void 0===t?e:rI.f(e,t)},kI=!o((function(){function g(){}return g.prototype.constructor=null,Object.getPrototypeOf(new g)!==g.prototype})),EI=$g,OI=D,TI=Xg,DI=kI,NI=PC("IE_PROTO"),RI=Object,PI=RI.prototype,MI=DI?RI.getPrototypeOf:function(g){var t=TI(g);if(EI(t,NI))return t[NI];var e=t.constructor;return OI(e)&&t instanceof e?e.prototype:t instanceof RI?PI:null},BI=pe,zI=function(g,t,e,A){return A&&A.enumerable?g[t]=e:BI(g,t,e),g},SI=o,ZI=D,FI=eg,GI=xI,jI=MI,LI=zI,VI=ht("iterator"),YI=!1;[].keys&&("next"in(wI=[].keys())?(bI=jI(jI(wI)))!==Object.prototype&&(mI=bI):YI=!0);var WI=!FI(mI)||SI((function(){var g={};return mI[VI].call(g)!==g}));ZI((mI=WI?{}:GI(mI))[VI])||LI(mI,VI,(function(){return this}));var QI={IteratorPrototype:mI,BUGGY_SAFARI_ITERATORS:YI},UI=lC,_I=oC?{}.toString:function(){return"[object "+UI(this)+"]"},HI=oC,KI=qt.f,XI=pe,JI=$g,qI=_I,$I=ht("toStringTag"),gi=function(g,t,e,A){if(g){var C=e?g:g.prototype;JI(C,$I)||KI(C,$I,{configurable:!0,value:t}),A&&!HI&&XI(C,"toString",qI)}},ti={},ei=QI.IteratorPrototype,Ai=xI,Ci=L,Ii=gi,ii=ti,oi=function(){return this},ni=f,ri=Pg,si=D,ai=String,di=TypeError,hi=function(g,t,e){try{return ni(ri(Object.getOwnPropertyDescriptor(g,t)[e]))}catch(g){}},li=Ae,ci=function(g){if("object"==typeof g||si(g))return g;throw di("Can't set "+ai(g)+" as a prototype")},ui=Object.setPrototypeOf||("__proto__"in{}?function(){var g,t=!1,e={};try{(g=hi(Object.prototype,"__proto__","set"))(e,[]),t=e instanceof Array}catch(g){}return function(e,A){return li(e),ci(A),t?g(e,A):e.__proto__=A,e}}():void 0),pi=De,fi=B,vi=function(g,t,e,A){var C=t+" Iterator";return g.prototype=Ai(ei,{next:Ci(+!A,e)}),Ii(g,C,!1,!0),ii[C]=oi,g},yi=MI,mi=gi,bi=zI,wi=ti,xi=qC.PROPER,ki=QI.BUGGY_SAFARI_ITERATORS,Ei=ht("iterator"),Oi="keys",Ti="values",Di="entries",Ni=function(){return this},Ri=function(g,t,e,A,C,I,i){vi(e,t,A);var o,n,r,s=function(g){if(g===C&&c)return c;if(!ki&&g in h)return h[g];switch(g){case Oi:case Ti:case Di:return function(){return new e(this,g)}}return function(){return new e(this)}},a=t+" Iterator",d=!1,h=g.prototype,l=h[Ei]||h["@@iterator"]||C&&h[C],c=!ki&&l||s(C),u="Array"==t&&h.entries||l;if(u&&(o=yi(u.call(new g)))!==Object.prototype&&o.next&&(mi(o,a,!0,!0),wi[a]=Ni),xi&&C==Ti&&l&&l.name!==Ti&&(d=!0,c=function(){return fi(l,this)}),C)if(n={values:s(Ti),keys:I?c:s(Oi),entries:s(Di)},i)for(r in n)(ki||d||!(r in h))&&bi(h,r,n[r]);else pi({target:t,proto:!0,forced:ki||d},n);return i&&h[Ei]!==c&&bi(h,Ei,c,{name:C}),wi[t]=c,n},Pi=function(g,t){return{value:g,done:t}},Mi=EC.charAt,Bi=pC,zi=UC,Si=Ri,Zi=Pi,Fi="String Iterator",Gi=zi.set,ji=zi.getterFor(Fi);Si(String,"String",(function(g){Gi(this,{type:Fi,string:Bi(g),index:0})}),(function(){var g,t=ji(this),e=t.string,A=t.index;return A>=e.length?Zi(void 0,!0):(g=Mi(e,A),t.index+=g.length,Zi(g,!1))}));var Li=B,Vi=Ae,Yi=zg,Wi=function(g,t,e){var A,C;Vi(g);try{if(!(A=Yi(g,"return"))){if("throw"===t)throw e;return e}A=Li(A,g)}catch(g){C=!0,A=g}if("throw"===t)throw e;if(C)throw A;return Vi(A),e},Qi=Ae,Ui=Wi,_i=ti,Hi=ht("iterator"),Ki=Array.prototype,Xi=function(g){return void 0!==g&&(_i.Array===g||Ki[Hi]===g)},Ji=D,qi=Ug,$i=f(Function.toString);Ji(qi.inspectSource)||(qi.inspectSource=function(g){return $i(g)});var go=qi.inspectSource,to=f,eo=o,Ao=D,Co=lC,Io=go,io=function(){},oo=[],no=ng("Reflect","construct"),ro=/^\s*(?:class|function)\b/,so=to(ro.exec),ao=!ro.exec(io),ho=function(g){if(!Ao(g))return!1;try{return no(io,oo,g),!0}catch(g){return!1}},lo=function(g){if(!Ao(g))return!1;switch(Co(g)){case"AsyncFunction":case"GeneratorFunction":case"AsyncGeneratorFunction":return!1}try{return ao||!!so(ro,Io(g))}catch(g){return!0}};lo.sham=!0;var co=!no||eo((function(){var g;return ho(ho.call)||!ho(Object)||!ho((function(){g=!0}))||g}))?lo:ho,uo=wt,po=qt,fo=L,vo=function(g,t,e){var A=uo(t);A in g?po.f(g,A,fo(0,e)):g[A]=e},yo=lC,mo=zg,bo=_,wo=ti,xo=ht("iterator"),ko=function(g){if(!bo(g))return mo(g,xo)||mo(g,"@@iterator")||wo[yo(g)]},Eo=B,Oo=Pg,To=Ae,Do=Tg,No=ko,Ro=TypeError,Po=function(g,t){var e=arguments.length<2?No(g):t;if(Oo(e))return To(Eo(e,g));throw Ro(Do(g)+" is not iterable")},Mo=Jt,Bo=B,zo=Xg,So=function(g,t,e,A){try{return A?t(Qi(e)[0],e[1]):t(e)}catch(t){Ui(g,"throw",t)}},Zo=Xi,Fo=co,Go=Le,jo=vo,Lo=Po,Vo=ko,Yo=Array,Wo=ht("iterator"),Qo=!1;try{var Uo=0,_o={next:function(){return{done:!!Uo++}},return:function(){Qo=!0}};_o[Wo]=function(){return this},Array.from(_o,(function(){throw 2}))}catch(g){}var Ho=function(g,t){if(!t&&!Qo)return!1;var e=!1;try{var A={};A[Wo]=function(){return{next:function(){return{done:e=!0}}}},g(A)}catch(g){}return e},Ko=function(g){var t=zo(g),e=Fo(this),A=arguments.length,C=A>1?arguments[1]:void 0,I=void 0!==C;I&&(C=Mo(C,A>2?arguments[2]:void 0));var i,o,n,r,s,a,d=Vo(t),h=0;if(!d||this===Yo&&Zo(d))for(i=Go(t),o=e?new this(i):Yo(i);i>h;h++)a=I?C(t[h],h):t[h],jo(o,h,a);else for(s=(r=Lo(t,d)).next,o=e?new this:[];!(n=Bo(s,r)).done;h++)a=I?So(r,C,[n.value,h],!0):n.value,jo(o,h,a);return o.length=h,o};De({target:"Array",stat:!0,forced:!Ho((function(g){Array.from(g)}))},{from:Ko});var Xo=Ag.Array.from;!function(g){g.exports=Xo}(eC);var Jo=e(tC),qo={},$o={get exports(){return qo},set exports(g){qo=g}},gn={},tn={get exports(){return gn},set exports(g){gn=g}},en=$,An=ti,Cn=UC;qt.f;var In=Ri,on=Pi,nn="Array Iterator",rn=Cn.set,sn=Cn.getterFor(nn);In(Array,"Array",(function(g,t){rn(this,{type:nn,target:en(g),index:0,kind:t})}),(function(){var g=sn(this),t=g.target,e=g.kind,A=g.index++;return!t||A>=t.length?(g.target=void 0,on(void 0,!0)):on("keys"==e?A:"values"==e?t[A]:[A,t[A]],!1)}),"values"),An.Arguments=An.Array;var an=ko,dn={CSSRuleList:0,CSSStyleDeclaration:0,CSSValueList:0,ClientRectList:0,DOMRectList:0,DOMStringList:0,DOMTokenList:1,DataTransferItemList:0,FileList:0,HTMLAllCollection:0,HTMLCollection:0,HTMLFormElement:0,HTMLSelectElement:0,MediaList:0,MimeTypeArray:0,NamedNodeMap:0,NodeList:1,PaintRequestList:0,Plugin:0,PluginArray:0,SVGLengthList:0,SVGNumberList:0,SVGPathSegList:0,SVGPointList:0,SVGStringList:0,SVGTransformList:0,SourceBufferList:0,StyleSheetList:0,TextTrackCueList:0,TextTrackList:0,TouchList:0},hn=i,ln=lC,cn=pe,un=ti,pn=ht("toStringTag");for(var fn in dn){var vn=hn[fn],yn=vn&&vn.prototype;yn&&ln(yn)!==pn&&cn(yn,pn,fn),un[fn]=un.Array}var mn=an;!function(g){g.exports=mn}(tn),function(g){g.exports=gn}($o);var bn=e(qo),wn={},xn={get exports(){return wn},set exports(g){wn=g}},kn={},En=$e,On=gA.concat("length","prototype");kn.f=Object.getOwnPropertyNames||function(g){return En(g,On)};var Tn={},Dn=Ze,Nn=Le,Rn=vo,Pn=Array,Mn=Math.max,Bn=function(g,t,e){for(var A=Nn(g),C=Dn(t,A),I=Dn(void 0===e?A:e,A),i=Pn(Mn(I-C,0)),o=0;Cf;f++)if((o||f in c)&&(h=u(d=c[f],f,l),g))if(t)y[f]=h;else if(h)switch(g){case 3:return!0;case 5:return d;case 6:return f;case 2:lr(y,d)}else switch(g){case 4:return!1;case 7:lr(y,d)}return I?-1:A||C?C:y}},ur={forEach:cr(0),map:cr(1),filter:cr(2),some:cr(3),every:cr(4),find:cr(5),findIndex:cr(6),filterReject:cr(7)},pr=De,fr=i,vr=B,yr=f,mr=R,br=yg,wr=o,xr=$g,kr=rg,Er=Ae,Or=$,Tr=wt,Dr=pC,Nr=L,Rr=xI,Pr=AA,Mr=kn,Br=Tn,zr=CA,Sr=N,Zr=qt,Fr=$C,Gr=z,jr=zI,Lr=Ln,Vr=jg,Yr=_e,Wr=Ct,Qr=ht,Ur=Vn,_r=Hn,Hr=$n,Kr=gi,Xr=UC,Jr=ur.forEach,qr=PC("hidden"),$r="Symbol",gs="prototype",ts=Xr.set,es=Xr.getterFor($r),As=Object[gs],Cs=fr.Symbol,Is=Cs&&Cs[gs],is=fr.TypeError,os=fr.QObject,ns=Sr.f,rs=Zr.f,ss=Br.f,as=Gr.f,ds=yr([].push),hs=Vr("symbols"),ls=Vr("op-symbols"),cs=Vr("wks"),us=!os||!os[gs]||!os[gs].findChild,ps=mr&&wr((function(){return 7!=Rr(rs({},"a",{get:function(){return rs(this,"a",{value:7}).a}})).a}))?function(g,t,e){var A=ns(As,t);A&&delete As[t],rs(g,t,e),A&&g!==As&&rs(As,t,A)}:rs,fs=function(g,t){var e=hs[g]=Rr(Is);return ts(e,{type:$r,tag:g,description:t}),mr||(e.description=t),e},vs=function(g,t,e){g===As&&vs(ls,t,e),Er(g);var A=Tr(t);return Er(e),xr(hs,A)?(e.enumerable?(xr(g,qr)&&g[qr][A]&&(g[qr][A]=!1),e=Rr(e,{enumerable:Nr(0,!1)})):(xr(g,qr)||rs(g,qr,Nr(1,{})),g[qr][A]=!0),ps(g,A,e)):rs(g,A,e)},ys=function(g,t){Er(g);var e=Or(t),A=Pr(e).concat(xs(e));return Jr(A,(function(t){mr&&!vr(ms,e,t)||vs(g,t,e[t])})),g},ms=function(g){var t=Tr(g),e=vr(as,this,t);return!(this===As&&xr(hs,t)&&!xr(ls,t))&&(!(e||!xr(this,t)||!xr(hs,t)||xr(this,qr)&&this[qr][t])||e)},bs=function(g,t){var e=Or(g),A=Tr(t);if(e!==As||!xr(hs,A)||xr(ls,A)){var C=ns(e,A);return!C||!xr(hs,A)||xr(e,qr)&&e[qr][A]||(C.enumerable=!0),C}},ws=function(g){var t=ss(Or(g)),e=[];return Jr(t,(function(g){xr(hs,g)||xr(Yr,g)||ds(e,g)})),e},xs=function(g){var t=g===As,e=ss(t?ls:Or(g)),A=[];return Jr(e,(function(g){!xr(hs,g)||t&&!xr(As,g)||ds(A,hs[g])})),A};br||(Cs=function(){if(kr(Is,this))throw is("Symbol is not a constructor");var g=arguments.length&&void 0!==arguments[0]?Dr(arguments[0]):void 0,t=Wr(g),e=function(g){this===As&&vr(e,ls,g),xr(this,qr)&&xr(this[qr],t)&&(this[qr][t]=!1),ps(this,t,Nr(1,g))};return mr&&us&&ps(As,t,{configurable:!0,set:e}),fs(t,g)},jr(Is=Cs[gs],"toString",(function(){return es(this).tag})),jr(Cs,"withoutSetter",(function(g){return fs(Wr(g),g)})),Gr.f=ms,Zr.f=vs,Fr.f=ys,Sr.f=bs,Mr.f=Br.f=ws,zr.f=xs,Ur.f=function(g){return fs(Qr(g),g)},mr&&Lr(Is,"description",{configurable:!0,get:function(){return es(this).description}})),pr({global:!0,constructor:!0,wrap:!0,forced:!br,sham:!br},{Symbol:Cs}),Jr(Pr(cs),(function(g){_r(g)})),pr({target:$r,stat:!0,forced:!br},{useSetter:function(){us=!0},useSimple:function(){us=!1}}),pr({target:"Object",stat:!0,forced:!br,sham:!mr},{create:function(g,t){return void 0===t?Rr(g):ys(Rr(g),t)},defineProperty:vs,defineProperties:ys,getOwnPropertyDescriptor:bs}),pr({target:"Object",stat:!0,forced:!br},{getOwnPropertyNames:ws}),Hr(),Kr(Cs,$r),Yr[qr]=!0;var ks=yg&&!!Symbol.for&&!!Symbol.keyFor,Es=De,Os=ng,Ts=$g,Ds=pC,Ns=jg,Rs=ks,Ps=Ns("string-to-symbol-registry"),Ms=Ns("symbol-to-string-registry");Es({target:"Symbol",stat:!0,forced:!Rs},{for:function(g){var t=Ds(g);if(Ts(Ps,t))return Ps[t];var e=Os("Symbol")(t);return Ps[t]=e,Ms[e]=t,e}});var Bs=De,zs=$g,Ss=Eg,Zs=Tg,Fs=ks,Gs=jg("symbol-to-string-registry");Bs({target:"Symbol",stat:!0,forced:!Fs},{keyFor:function(g){if(!Ss(g))throw TypeError(Zs(g)+" is not a symbol");if(zs(Gs,g))return Gs[g]}});var js=tr,Ls=D,Vs=b,Ys=pC,Ws=f([].push),Qs=De,Us=ng,_s=h,Hs=B,Ks=f,Xs=o,Js=D,qs=Eg,$s=wA,ga=function(g){if(Ls(g))return g;if(js(g)){for(var t=g.length,e=[],A=0;Ao;)void 0!==(e=C(A,t=I[o++]))&&Wa(i,t,e);return i}});var Qa=Ag.Object.getOwnPropertyDescriptors;!function(g){g.exports=Qa}(Ba);var Ua=e(Ma),_a={},Ha={get exports(){return _a},set exports(g){_a=g}},Ka={},Xa={get exports(){return Ka},set exports(g){Ka=g}},Ja=De,qa=R,$a=$C.f;Ja({target:"Object",stat:!0,forced:Object.defineProperties!==$a,sham:!qa},{defineProperties:$a});var gd=Ag.Object,td=Xa.exports=function(g,t){return gd.defineProperties(g,t)};gd.defineProperties.sham&&(td.sham=!0);var ed=Ka;!function(g){g.exports=ed}(Ha);var Ad=e(_a),Cd={},Id={get exports(){return Cd},set exports(g){Cd=g}},id={},od={get exports(){return id},set exports(g){id=g}},nd=De,rd=R,sd=qt.f;nd({target:"Object",stat:!0,forced:Object.defineProperty!==sd,sham:!rd},{defineProperty:sd});var ad=Ag.Object,dd=od.exports=function(g,t,e){return ad.defineProperty(g,t,e)};ad.defineProperty.sham&&(dd.sham=!0);var hd=id;!function(g){g.exports=hd}(Id);var ld=e(Cd);function cd(g,t){if(!(g instanceof t))throw new TypeError("Cannot call a class as a function")}var ud={},pd={get exports(){return ud},set exports(g){ud=g}},fd={},vd=hd;!function(g){g.exports=vd}({get exports(){return fd},set exports(g){fd=g}}),function(g){g.exports=fd}(pd);var yd=e(ud),md={},bd={get exports(){return md},set exports(g){md=g}},wd={},xd={get exports(){return wd},set exports(g){wd=g}},kd=TypeError,Ed=function(g){if(g>9007199254740991)throw kd("Maximum allowed index exceeded");return g},Od=o,Td=pg,Dd=ht("species"),Nd=function(g){return Td>=51||!Od((function(){var t=[];return(t.constructor={})[Dd]=function(){return{foo:1}},1!==t[g](Boolean).foo}))},Rd=De,Pd=o,Md=tr,Bd=eg,zd=Xg,Sd=Le,Zd=Ed,Fd=vo,Gd=nr,jd=Nd,Ld=pg,Vd=ht("isConcatSpreadable"),Yd=Ld>=51||!Pd((function(){var g=[];return g[Vd]=!1,g.concat()[0]!==g})),Wd=function(g){if(!Bd(g))return!1;var t=g[Vd];return void 0!==t?!!t:Md(g)};Rd({target:"Array",proto:!0,arity:1,forced:!Yd||!jd("concat")},{concat:function(g){var t,e,A,C,I,i=zd(this),o=Gd(i,0),n=0;for(t=-1,A=arguments.length;tg.length)&&(t=g.length);for(var e=0,A=new Array(t);e1?arguments[1]:void 0)}});var Ul=FA("Array").map,_l=rg,Hl=Ul,Kl=Array.prototype,Xl=function(g){var t=g.map;return g===Kl||_l(Kl,g)&&t===Kl.map?Hl:t},Jl=Xl;!function(g){g.exports=Jl}(Wl);var ql=e(Yl),$l={},gc={get exports(){return $l},set exports(g){$l=g}},tc=Xg,ec=AA;De({target:"Object",stat:!0,forced:o((function(){ec(1)}))},{keys:function(g){return ec(tc(g))}});var Ac=Ag.Object.keys;!function(g){g.exports=Ac}(gc);var Cc=e($l),Ic={},ic={get exports(){return Ic},set exports(g){Ic=g}},oc=De,nc=Date,rc=f(nc.prototype.getTime);oc({target:"Date",stat:!0},{now:function(){return rc(new nc)}});var sc=Ag.Date.now;!function(g){g.exports=sc}(ic);var ac=e(Ic),dc={},hc={get exports(){return dc},set exports(g){dc=g}},lc=o,cc=function(g,t){var e=[][g];return!!e&&lc((function(){e.call(null,t||function(){return 1},1)}))},uc=ur.forEach,pc=cc("forEach")?[].forEach:function(g){return uc(this,g,arguments.length>1?arguments[1]:void 0)};De({target:"Array",proto:!0,forced:[].forEach!=pc},{forEach:pc});var fc=FA("Array").forEach,vc=lC,yc=$g,mc=rg,bc=fc,wc=Array.prototype,xc={DOMTokenList:!0,NodeList:!0},kc=function(g){var t=g.forEach;return g===wc||mc(wc,g)&&t===wc.forEach||yc(xc,vc(g))?bc:t};!function(g){g.exports=kc}(hc);var Ec=e(dc),Oc={},Tc={get exports(){return Oc},set exports(g){Oc=g}},Dc=De,Nc=tr,Rc=f([].reverse),Pc=[1,2];Dc({target:"Array",proto:!0,forced:String(Pc)===String(Pc.reverse())},{reverse:function(){return Nc(this)&&(this.length=this.length),Rc(this)}});var Mc=FA("Array").reverse,Bc=rg,zc=Mc,Sc=Array.prototype,Zc=function(g){var t=g.reverse;return g===Sc||Bc(Sc,g)&&t===Sc.reverse?zc:t},Fc=Zc;!function(g){g.exports=Fc}(Tc);var Gc=e(Oc),jc={},Lc={get exports(){return jc},set exports(g){jc=g}},Vc=R,Yc=tr,Wc=TypeError,Qc=Object.getOwnPropertyDescriptor,Uc=Vc&&!function(){if(void 0!==this)return!0;try{Object.defineProperty([],"length",{writable:!1}).length=1}catch(g){return g instanceof TypeError}}(),_c=Tg,Hc=TypeError,Kc=function(g,t){if(!delete g[t])throw Hc("Cannot delete property "+_c(t)+" of "+_c(g))},Xc=De,Jc=Xg,qc=Ze,$c=Me,gu=Le,tu=Uc?function(g,t){if(Yc(g)&&!Qc(g,"length").writable)throw Wc("Cannot set read only .length");return g.length=t}:function(g,t){return g.length=t},eu=Ed,Au=nr,Cu=vo,Iu=Kc,iu=Nd("splice"),ou=Math.max,nu=Math.min;Xc({target:"Array",proto:!0,forced:!iu},{splice:function(g,t){var e,A,C,I,i,o,n=Jc(this),r=gu(n),s=qc(g,r),a=arguments.length;for(0===a?e=A=0:1===a?(e=0,A=r-s):(e=a-2,A=nu(ou($c(t),0),r-s)),eu(r+e-A),C=Au(n,A),I=0;Ir-A+e;I--)Iu(n,I-1)}else if(e>A)for(I=r-A;I>s;I--)o=I+e-1,(i=I+A-1)in n?n[o]=n[i]:Iu(n,o);for(I=0;I1?arguments[1]:void 0)}});var vu=FA("Array").includes,yu=eg,mu=b,bu=ht("match"),wu=function(g){var t;return yu(g)&&(void 0!==(t=g[bu])?!!t:"RegExp"==mu(g))},xu=TypeError,ku=ht("match"),Eu=De,Ou=function(g){if(wu(g))throw xu("The method doesn't accept regular expressions");return g},Tu=X,Du=pC,Nu=function(g){var t=/./;try{"/./"[g](t)}catch(e){try{return t[ku]=!1,"/./"[g](t)}catch(g){}}return!1},Ru=f("".indexOf);Eu({target:"String",proto:!0,forced:!Nu("includes")},{includes:function(g){return!!~Ru(Du(Tu(this)),Du(Ou(g)),arguments.length>1?arguments[1]:void 0)}});var Pu=FA("String").includes,Mu=rg,Bu=vu,zu=Pu,Su=Array.prototype,Zu=String.prototype,Fu=function(g){var t=g.includes;return g===Su||Mu(Su,g)&&t===Su.includes?Bu:"string"==typeof g||g===Zu||Mu(Zu,g)&&t===Zu.includes?zu:t},Gu=Fu;!function(g){g.exports=Gu}(pu);var ju=e(uu),Lu={},Vu={get exports(){return Lu},set exports(g){Lu=g}},Yu=Xg,Wu=MI,Qu=kI;De({target:"Object",stat:!0,forced:o((function(){Wu(1)})),sham:!Qu},{getPrototypeOf:function(g){return Wu(Yu(g))}});var Uu=Ag.Object.getPrototypeOf;!function(g){g.exports=Uu}(Vu);var _u=e(Lu),Hu={},Ku={get exports(){return Hu},set exports(g){Hu=g}},Xu=ur.filter;De({target:"Array",proto:!0,forced:!Nd("filter")},{filter:function(g){return Xu(this,g,arguments.length>1?arguments[1]:void 0)}});var Ju=FA("Array").filter,qu=rg,$u=Ju,gp=Array.prototype,tp=function(g){var t=g.filter;return g===gp||qu(gp,g)&&t===gp.filter?$u:t},ep=tp;!function(g){g.exports=ep}(Ku);var Ap=e(Hu),Cp={},Ip={get exports(){return Cp},set exports(g){Cp=g}},ip=R,op=f,np=AA,rp=$,sp=op(z.f),ap=op([].push),dp=function(g){return function(t){for(var e,A=rp(t),C=np(A),I=C.length,i=0,o=[];I>i;)e=C[i++],ip&&!sp(A,e)||ap(o,g?[e,A[e]]:A[e]);return o}},hp={entries:dp(!0),values:dp(!1)}.values;De({target:"Object",stat:!0},{values:function(g){return hp(g)}});var lp=Ag.Object.values;!function(g){g.exports=lp}(Ip);var cp={},up={get exports(){return cp},set exports(g){cp=g}},pp="\t\n\v\f\r                \u2028\u2029\ufeff",fp=X,vp=pC,yp=pp,mp=f("".replace),bp=RegExp("^["+yp+"]+"),wp=RegExp("(^|[^"+yp+"])["+yp+"]+$"),xp=function(g){return function(t){var e=vp(fp(t));return 1&g&&(e=mp(e,bp,"")),2&g&&(e=mp(e,wp,"$1")),e}},kp={start:xp(1),end:xp(2),trim:xp(3)},Ep=i,Op=o,Tp=f,Dp=pC,Np=kp.trim,Rp=pp,Pp=Ep.parseInt,Mp=Ep.Symbol,Bp=Mp&&Mp.iterator,zp=/^[+-]?0x/i,Sp=Tp(zp.exec),Zp=8!==Pp(Rp+"08")||22!==Pp(Rp+"0x16")||Bp&&!Op((function(){Pp(Object(Bp))}))?function(g,t){var e=Np(Dp(g));return Pp(e,t>>>0||(Sp(zp,e)?16:10))}:Pp;De({global:!0,forced:parseInt!=Zp},{parseInt:Zp});var Fp=Ag.parseInt;!function(g){g.exports=Fp}(up);var Gp=e(cp),jp={},Lp={get exports(){return jp},set exports(g){jp=g}},Vp=De,Yp=Ue.indexOf,Wp=cc,Qp=k([].indexOf),Up=!!Qp&&1/Qp([1],1,-0)<0;Vp({target:"Array",proto:!0,forced:Up||!Wp("indexOf")},{indexOf:function(g){var t=arguments.length>1?arguments[1]:void 0;return Up?Qp(this,g,t)||0:Yp(this,g,t)}});var _p=FA("Array").indexOf,Hp=rg,Kp=_p,Xp=Array.prototype,Jp=function(g){var t=g.indexOf;return g===Xp||Hp(Xp,g)&&t===Xp.indexOf?Kp:t},qp=Jp;!function(g){g.exports=qp}(Lp);var $p=e(jp),gf={},tf={get exports(){return gf},set exports(g){gf=g}},ef=qC.PROPER,Af=o,Cf=pp,If=kp.trim;De({target:"String",proto:!0,forced:function(g){return Af((function(){return!!Cf[g]()||"​…᠎"!=="​…᠎"[g]()||ef&&Cf[g].name!==g}))}("trim")},{trim:function(){return If(this)}});var of=FA("String").trim,nf=rg,rf=of,sf=String.prototype,af=function(g){var t=g.trim;return"string"==typeof g||g===sf||nf(sf,g)&&t===sf.trim?rf:t},df=af;!function(g){g.exports=df}(tf);var hf={},lf={get exports(){return hf},set exports(g){hf=g}};De({target:"Object",stat:!0,sham:!R},{create:xI});var cf=Ag.Object,uf=function(g,t){return cf.create(g,t)};!function(g){g.exports=uf}(lf);var pf=e(hf),ff={},vf={get exports(){return ff},set exports(g){ff=g}},yf=Ag,mf=h;yf.JSON||(yf.JSON={stringify:JSON.stringify});var bf=function(g,t,e){return mf(yf.JSON.stringify,null,arguments)},wf=bf;!function(g){g.exports=wf}(vf);var xf=e(ff),kf={},Ef={get exports(){return kf},set exports(g){kf=g}},Of="function"==typeof Bun&&Bun&&"string"==typeof Bun.version,Tf=TypeError,Df=function(g,t){if(ge,i=Pf(A)?A:Zf(A),o=I?zf(arguments,e):[],n=I?function(){Rf(i,this,o)}:i;return t?g(n,C):g(n)}:g},jf=De,Lf=i,Vf=Gf(Lf.setInterval,!0);jf({global:!0,bind:!0,forced:Lf.setInterval!==Vf},{setInterval:Vf});var Yf=De,Wf=i,Qf=Gf(Wf.setTimeout,!0);Yf({global:!0,bind:!0,forced:Wf.setTimeout!==Qf},{setTimeout:Qf});var Uf=Ag.setTimeout;!function(g){g.exports=Uf}(Ef);var _f=e(kf),Hf={},Kf={get exports(){return Hf},set exports(g){Hf=g}},Xf=Xg,Jf=Ze,qf=Le,$f=function(g){for(var t=Xf(this),e=qf(t),A=arguments.length,C=Jf(A>1?arguments[1]:void 0,e),I=A>2?arguments[2]:void 0,i=void 0===I?e:Jf(I,e);i>C;)t[C++]=g;return t};De({target:"Array",proto:!0},{fill:$f});var gv=FA("Array").fill,tv=rg,ev=gv,Av=Array.prototype,Cv=function(g){var t=g.fill;return g===Av||tv(Av,g)&&t===Av.fill?ev:t},Iv=Cv;!function(g){g.exports=Iv}(Kf);var iv,ov=e(Hf); -/*! Hammer.JS - v2.0.17-rc - 2019-12-16 - * http://naver.github.io/egjs - * - * Forked By Naver egjs - * Copyright (c) hammerjs - * Licensed under the MIT license */ -function nv(){return nv=Object.assign||function(g){for(var t=1;t-1}var qv=function(){function g(g,t){this.manager=g,this.set(t)}var t=g.prototype;return t.set=function(g){g===bv&&(g=this.compute()),mv&&this.manager.element.style&&Tv[g]&&(this.manager.element.style[yv]=g),this.actions=g.toLowerCase().trim()},t.update=function(){this.set(this.manager.options.touchAction)},t.compute=function(){var g=[];return Kv(this.manager.recognizers,(function(t){Xv(t.options.enable,[t])&&(g=g.concat(t.getTouchAction()))})),function(g){if(Jv(g,kv))return kv;var t=Jv(g,Ev),e=Jv(g,Ov);return t&&e?kv:t||e?t?Ev:Ov:Jv(g,xv)?xv:wv}(g.join(" "))},t.preventDefaults=function(g){var t=g.srcEvent,e=g.offsetDirection;if(this.manager.session.prevented)t.preventDefault();else{var A=this.actions,C=Jv(A,kv)&&!Tv[kv],I=Jv(A,Ov)&&!Tv[Ov],i=Jv(A,Ev)&&!Tv[Ev];if(C){var o=1===g.pointers.length,n=g.distance<2,r=g.deltaTime<250;if(o&&n&&r)return}if(!i||!I)return C||I&&e&Wv||i&&e&Qv?this.preventSrc(t):void 0}},t.preventSrc=function(g){this.manager.session.prevented=!0,g.preventDefault()},g}();function $v(g,t){for(;g;){if(g===t)return!0;g=g.parentNode}return!1}function gy(g){var t=g.length;if(1===t)return{x:uv(g[0].clientX),y:uv(g[0].clientY)};for(var e=0,A=0,C=0;C=pv(t)?g<0?jv:Lv:t<0?Vv:Yv}function Iy(g,t,e){return{x:t/g||0,y:e/g||0}}function iy(g,t){var e=g.session,A=t.pointers,C=A.length;e.firstInput||(e.firstInput=ty(t)),C>1&&!e.firstMultiple?e.firstMultiple=ty(t):1===C&&(e.firstMultiple=!1);var I=e.firstInput,i=e.firstMultiple,o=i?i.center:I.center,n=t.center=gy(A);t.timeStamp=fv(),t.deltaTime=t.timeStamp-I.timeStamp,t.angle=Ay(o,n),t.distance=ey(o,n),function(g,t){var e=t.center,A=g.offsetDelta||{},C=g.prevDelta||{},I=g.prevInput||{};t.eventType!==zv&&I.eventType!==Zv||(C=g.prevDelta={x:I.deltaX||0,y:I.deltaY||0},A=g.offsetDelta={x:e.x,y:e.y}),t.deltaX=C.x+(e.x-A.x),t.deltaY=C.y+(e.y-A.y)}(e,t),t.offsetDirection=Cy(t.deltaX,t.deltaY);var r,s,a=Iy(t.deltaTime,t.deltaX,t.deltaY);t.overallVelocityX=a.x,t.overallVelocityY=a.y,t.overallVelocity=pv(a.x)>pv(a.y)?a.x:a.y,t.scale=i?(r=i.pointers,ey((s=A)[0],s[1],Hv)/ey(r[0],r[1],Hv)):1,t.rotation=i?function(g,t){return Ay(t[1],t[0],Hv)+Ay(g[1],g[0],Hv)}(i.pointers,A):0,t.maxPointers=e.prevInput?t.pointers.length>e.prevInput.maxPointers?t.pointers.length:e.prevInput.maxPointers:t.pointers.length,function(g,t){var e,A,C,I,i=g.lastInterval||t,o=t.timeStamp-i.timeStamp;if(t.eventType!==Fv&&(o>Bv||void 0===i.velocity)){var n=t.deltaX-i.deltaX,r=t.deltaY-i.deltaY,s=Iy(o,n,r);A=s.x,C=s.y,e=pv(s.x)>pv(s.y)?s.x:s.y,I=Cy(n,r),g.lastInterval=t}else e=i.velocity,A=i.velocityX,C=i.velocityY,I=i.direction;t.velocity=e,t.velocityX=A,t.velocityY=C,t.direction=I}(e,t);var d,h=g.element,l=t.srcEvent;$v(d=l.composedPath?l.composedPath()[0]:l.path?l.path[0]:l.target,h)&&(h=d),t.target=h}function oy(g,t,e){var A=e.pointers.length,C=e.changedPointers.length,I=t&zv&&A-C==0,i=t&(Zv|Fv)&&A-C==0;e.isFirst=!!I,e.isFinal=!!i,I&&(g.session={}),e.eventType=t,iy(g,e),g.emit("hammer.input",e),g.recognize(e),g.session.prevInput=e}function ny(g){return g.trim().split(/\s+/g)}function ry(g,t,e){Kv(ny(t),(function(t){g.addEventListener(t,e,!1)}))}function sy(g,t,e){Kv(ny(t),(function(t){g.removeEventListener(t,e,!1)}))}function ay(g){var t=g.ownerDocument||g;return t.defaultView||t.parentWindow||window}var dy=function(){function g(g,t){var e=this;this.manager=g,this.callback=t,this.element=g.element,this.target=g.options.inputTarget,this.domHandler=function(t){Xv(g.options.enable,[g])&&e.handler(t)},this.init()}var t=g.prototype;return t.handler=function(){},t.init=function(){this.evEl&&ry(this.element,this.evEl,this.domHandler),this.evTarget&&ry(this.target,this.evTarget,this.domHandler),this.evWin&&ry(ay(this.element),this.evWin,this.domHandler)},t.destroy=function(){this.evEl&&sy(this.element,this.evEl,this.domHandler),this.evTarget&&sy(this.target,this.evTarget,this.domHandler),this.evWin&&sy(ay(this.element),this.evWin,this.domHandler)},g}();function hy(g,t,e){if(g.indexOf&&!e)return g.indexOf(t);for(var A=0;Ae[t]})):A.sort()),A}var my={touchstart:zv,touchmove:Sv,touchend:Zv,touchcancel:Fv},by="touchstart touchmove touchend touchcancel",wy=function(g){function t(){var e;return t.prototype.evTarget=by,(e=g.apply(this,arguments)||this).targetIds={},e}return rv(t,g),t.prototype.handler=function(g){var t=my[g.type],e=xy.call(this,g,t);e&&this.callback(this.manager,t,{pointers:e[0],changedPointers:e[1],pointerType:Pv,srcEvent:g})},t}(dy);function xy(g,t){var e,A,C=vy(g.touches),I=this.targetIds;if(t&(zv|Sv)&&1===C.length)return I[C[0].identifier]=!0,[C,C];var i=vy(g.changedTouches),o=[],n=this.target;if(A=C.filter((function(g){return $v(g.target,n)})),t===zv)for(e=0;e-1&&A.splice(g,1)}),Dy)}}function Py(g,t){g&zv?(this.primaryTouch=t.changedPointers[0].identifier,Ry.call(this,t)):g&(Zv|Fv)&&Ry.call(this,t)}function My(g){for(var t=g.srcEvent.clientX,e=g.srcEvent.clientY,A=0;A-1&&this.requireFail.splice(t,1),this},t.hasRequireFailures=function(){return this.requireFail.length>0},t.canRecognizeWith=function(g){return!!this.simultaneous[g.id]},t.emit=function(g){var t=this,e=this.state;function A(e){t.manager.emit(e,g)}e=Gy&&A(t.options.event+Qy(e))},t.tryEmit=function(g){if(this.canEmit())return this.emit(g);this.state=Vy},t.canEmit=function(){for(var g=0;gt.threshold&&C&t.direction},e.attrTest=function(g){return Hy.prototype.attrTest.call(this,g)&&(this.state&Zy||!(this.state&Zy)&&this.directionTest(g))},e.emit=function(t){this.pX=t.deltaX,this.pY=t.deltaY;var e=Ky(t.direction);e&&(t.additionalEvent=this.options.event+e),g.prototype.emit.call(this,t)},t}(Hy),Jy=function(g){function t(t){return void 0===t&&(t={}),g.call(this,nv({event:"swipe",threshold:10,velocity:.3,direction:Wv|Qv,pointers:1},t))||this}rv(t,g);var e=t.prototype;return e.getTouchAction=function(){return Xy.prototype.getTouchAction.call(this)},e.attrTest=function(t){var e,A=this.options.direction;return A&(Wv|Qv)?e=t.overallVelocity:A&Wv?e=t.overallVelocityX:A&Qv&&(e=t.overallVelocityY),g.prototype.attrTest.call(this,t)&&A&t.offsetDirection&&t.distance>this.options.threshold&&t.maxPointers===this.options.pointers&&pv(e)>this.options.velocity&&t.eventType&Zv},e.emit=function(g){var t=Ky(g.offsetDirection);t&&this.manager.emit(this.options.event+t,g),this.manager.emit(this.options.event,g)},t}(Hy),qy=function(g){function t(t){return void 0===t&&(t={}),g.call(this,nv({event:"pinch",threshold:0,pointers:2},t))||this}rv(t,g);var e=t.prototype;return e.getTouchAction=function(){return[kv]},e.attrTest=function(t){return g.prototype.attrTest.call(this,t)&&(Math.abs(t.scale-1)>this.options.threshold||this.state&Zy)},e.emit=function(t){if(1!==t.scale){var e=t.scale<1?"in":"out";t.additionalEvent=this.options.event+e}g.prototype.emit.call(this,t)},t}(Hy),$y=function(g){function t(t){return void 0===t&&(t={}),g.call(this,nv({event:"rotate",threshold:0,pointers:2},t))||this}rv(t,g);var e=t.prototype;return e.getTouchAction=function(){return[kv]},e.attrTest=function(t){return g.prototype.attrTest.call(this,t)&&(Math.abs(t.rotation)>this.options.threshold||this.state&Zy)},t}(Hy),gm=function(g){function t(t){var e;return void 0===t&&(t={}),(e=g.call(this,nv({event:"press",pointers:1,time:251,threshold:9},t))||this)._timer=null,e._input=null,e}rv(t,g);var e=t.prototype;return e.getTouchAction=function(){return[wv]},e.process=function(g){var t=this,e=this.options,A=g.pointers.length===e.pointers,C=g.distancee.time;if(this._input=g,!C||!A||g.eventType&(Zv|Fv)&&!I)this.reset();else if(g.eventType&zv)this.reset(),this._timer=setTimeout((function(){t.state=jy,t.tryEmit()}),e.time);else if(g.eventType&Zv)return jy;return Vy},e.reset=function(){clearTimeout(this._timer)},e.emit=function(g){this.state===jy&&(g&&g.eventType&Zv?this.manager.emit(this.options.event+"up",g):(this._input.timeStamp=fv(),this.manager.emit(this.options.event,this._input)))},t}(Uy),tm={domEvents:!1,touchAction:bv,enable:!0,inputTarget:null,inputClass:null,cssProps:{userSelect:"none",touchSelect:"none",touchCallout:"none",contentZooming:"none",userDrag:"none",tapHighlightColor:"rgba(0,0,0,0)"}},em=[[$y,{enable:!1}],[qy,{enable:!1},["rotate"]],[Jy,{direction:Wv}],[Xy,{direction:Wv},["swipe"]],[_y],[_y,{event:"doubletap",taps:2},["tap"]],[gm]];function Am(g,t){var e,A=g.element;A.style&&(Kv(g.options.cssProps,(function(C,I){e=vv(A.style,I),t?(g.oldCssProps[e]=A.style[e],A.style[e]=C):A.style[e]=g.oldCssProps[e]||""})),t||(g.oldCssProps={}))}var Cm=function(){function g(g,t){var e,A=this;this.options=dv({},tm,t||{}),this.options.inputTarget=this.options.inputTarget||g,this.handlers={},this.session={},this.recognizers=[],this.oldCssProps={},this.element=g,this.input=new((e=this).options.inputClass||(Nv?fy:Rv?wy:Dv?By:Ty))(e,oy),this.touchAction=new qv(this,this.options.touchAction),Am(this,!0),Kv(this.options.recognizers,(function(g){var t=A.add(new g[0](g[1]));g[2]&&t.recognizeWith(g[2]),g[3]&&t.requireFailure(g[3])}),this)}var t=g.prototype;return t.set=function(g){return dv(this.options,g),g.touchAction&&this.touchAction.update(),g.inputTarget&&(this.input.destroy(),this.input.target=g.inputTarget,this.input.init()),this},t.stop=function(g){this.session.stopped=g?2:1},t.recognize=function(g){var t=this.session;if(!t.stopped){var e;this.touchAction.preventDefaults(g);var A=this.recognizers,C=t.curRecognizer;(!C||C&&C.state&jy)&&(t.curRecognizer=null,C=null);for(var I=0;I\s*\(/gm,"{anonymous}()@"):"Unknown Stack Trace",C=window.console&&(window.console.warn||window.console.log);return C&&C.call(window.console,A,e),g.apply(this,arguments)}}var am=sm((function(g,t,e){for(var A=Object.keys(t),C=0;C=g.length?{done:!0}:{done:!1,value:g[A++]}},e:function(g){throw g},f:C}}throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}var I,i=!0,o=!1;return{s:function(){e=e.call(g)},n:function(){var g=e.next();return i=g.done,g},e:function(g){o=!0,I=g},f:function(){try{i||null==e.return||e.return()}finally{if(o)throw I}}}}function fm(g,t){(null==t||t>g.length)&&(t=g.length);for(var e=0,A=new Array(t);e2)return mm.apply(void 0,Bl(A=[ym(t[0],t[1])]).call(A,wl(Sl(t).call(t,2))));var C,I=t[0],i=t[1],o=pm(jl(i));try{for(o.s();!(C=o.n()).done;){var n=C.value;Object.prototype.propertyIsEnumerable.call(i,n)&&(i[n]===vm?delete I[n]:null===I[n]||null===i[n]||"object"!==kh(I[n])||"object"!==kh(i[n])||Vl(I[n])||Vl(i[n])?I[n]=bm(i[n]):I[n]=mm(I[n],i[n]))}}catch(g){o.e(g)}finally{o.f()}return I}function bm(g){return Vl(g)?ql(g).call(g,(function(g){return bm(g)})):"object"===kh(g)&&null!==g?mm({},g):g}function wm(g){for(var t=0,e=Cc(g);t>>0,g=(C*=g)>>>0,g+=4294967296*(C-=g)}return 2.3283064365386963e-10*(g>>>0)}}(),t=g(" "),e=g(" "),A=g(" "),C=0;C2&&void 0!==arguments[2]&&arguments[2];for(var A in g)if(void 0!==t[A])if(null===t[A]||"object"!==kh(t[A]))Bm(g,t,A,e);else{var C=g[A],I=t[A];Mm(C)&&Mm(I)&&zm(C,I,e)}}function Sm(g,t,e){var A=arguments.length>3&&void 0!==arguments[3]&&arguments[3];if(Vl(e))throw new TypeError("Arrays are not supported by deepExtend");for(var C=0;C3&&void 0!==arguments[3]&&arguments[3];if(Vl(e))throw new TypeError("Arrays are not supported by deepExtend");for(var C in e)if(Object.prototype.hasOwnProperty.call(e,C)&&!ju(g).call(g,C))if(e[C]&&e[C].constructor===Object)void 0===t[C]&&(t[C]={}),t[C].constructor===Object?Fm(t[C],e[C]):Bm(t,e,C,A);else if(Vl(e[C])){t[C]=[];for(var I=0;I2&&void 0!==arguments[2]&&arguments[2],A=arguments.length>3&&void 0!==arguments[3]&&arguments[3];for(var C in t)if(Object.prototype.hasOwnProperty.call(t,C)||!0===e)if("object"===kh(t[C])&&null!==t[C]&&_u(t[C])===Object.prototype)void 0===g[C]?g[C]=Fm({},t[C],e):"object"===kh(g[C])&&null!==g[C]&&_u(g[C])===Object.prototype?Fm(g[C],t[C],e):Bm(g,t,C,A);else if(Vl(t[C])){var I;g[C]=Sl(I=t[C]).call(I)}else Bm(g,t,C,A);return g}function Gm(g,t){var e;return Bl(e=[]).call(e,wl(g),[t])}function jm(g){return g.getBoundingClientRect().top}function Lm(g,t){if(Vl(g))for(var e=g.length,A=0;A3&&void 0!==arguments[3]?arguments[3]:{},C=function(g){return null!=g},I=function(g){return null!==g&&"object"===kh(g)};if(!I(g))throw new Error("Parameter mergeTarget must be an object");if(!I(t))throw new Error("Parameter options must be an object");if(!C(e))throw new Error("Parameter option must have a value");if(!I(A))throw new Error("Parameter globalOptions must be an object");var i=t[e],o=I(A)&&!function(g){for(var t in g)if(Object.prototype.hasOwnProperty.call(g,t))return!1;return!0}(A)?A[e]:void 0,n=o?o.enabled:void 0;if(void 0!==i){if("boolean"==typeof i)return I(g[e])||(g[e]={}),void(g[e].enabled=i);if(null===i&&!I(g[e])){if(!C(o))return;g[e]=pf(o)}if(I(i)){var r=!0;void 0!==i.enabled?r=i.enabled:void 0!==n&&(r=o.enabled),function(g,t,e){I(g[e])||(g[e]={});var A=t[e],C=g[e];for(var i in A)Object.prototype.hasOwnProperty.call(A,i)&&(C[i]=A[i])}(g,t,e),g[e].enabled=r}}}var $m={linear:function(g){return g},easeInQuad:function(g){return g*g},easeOutQuad:function(g){return g*(2-g)},easeInOutQuad:function(g){return g<.5?2*g*g:(4-2*g)*g-1},easeInCubic:function(g){return g*g*g},easeOutCubic:function(g){return--g*g*g+1},easeInOutCubic:function(g){return g<.5?4*g*g*g:(g-1)*(2*g-2)*(2*g-2)+1},easeInQuart:function(g){return g*g*g*g},easeOutQuart:function(g){return 1- --g*g*g*g},easeInOutQuart:function(g){return g<.5?8*g*g*g*g:1-8*--g*g*g*g},easeInQuint:function(g){return g*g*g*g*g},easeOutQuint:function(g){return 1+--g*g*g*g*g},easeInOutQuint:function(g){return g<.5?16*g*g*g*g*g:1+16*--g*g*g*g*g}};function gb(g,t){var e;Vl(t)||(t=[t]);var A,C=pm(g);try{for(C.s();!(A=C.n()).done;){var I=A.value;if(I){e=I[t[0]];for(var i=1;i0&&void 0!==arguments[0]?arguments[0]:1;cd(this,g),this.pixelRatio=t,this.generated=!1,this.centerCoordinates={x:144.5,y:144.5},this.r=289*.49,this.color={r:255,g:255,b:255,a:1},this.hueCircle=void 0,this.initialColor={r:255,g:255,b:255,a:1},this.previousColor=void 0,this.applied=!1,this.updateCallback=function(){},this.closeCallback=function(){},this._create()}return Bh(g,[{key:"insertTo",value:function(g){void 0!==this.hammer&&(this.hammer.destroy(),this.hammer=void 0),this.container=g,this.container.appendChild(this.frame),this._bindHammer(),this._setSize()}},{key:"setUpdateCallback",value:function(g){if("function"!=typeof g)throw new Error("Function attempted to set as colorPicker update callback is not a function.");this.updateCallback=g}},{key:"setCloseCallback",value:function(g){if("function"!=typeof g)throw new Error("Function attempted to set as colorPicker closing callback is not a function.");this.closeCallback=g}},{key:"_isColorString",value:function(g){if("string"==typeof g)return tb[g]}},{key:"setColor",value:function(g){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];if("none"!==g){var e,A=this._isColorString(g);if(void 0!==A&&(g=A),!0===Pm(g)){if(!0===Xm(g)){var C=g.substr(4).substr(0,g.length-5).split(",");e={r:C[0],g:C[1],b:C[2],a:1}}else if(!0===function(g){return Nm.test(g)}(g)){var I=g.substr(5).substr(0,g.length-6).split(",");e={r:I[0],g:I[1],b:I[2],a:I[3]}}else if(!0===Km(g)){var i=Vm(g);e={r:i.r,g:i.g,b:i.b,a:1}}}else if(g instanceof Object&&void 0!==g.r&&void 0!==g.g&&void 0!==g.b){var o=void 0!==g.a?g.a:"1.0";e={r:g.r,g:g.g,b:g.b,a:o}}if(void 0===e)throw new Error("Unknown color passed to the colorPicker. Supported are strings: rgb, hex, rgba. Object: rgb ({r:r,g:g,b:b,[a:a]}). Supplied: "+xf(g));this._setColor(e,t)}}},{key:"show",value:function(){void 0!==this.closeCallback&&(this.closeCallback(),this.closeCallback=void 0),this.applied=!1,this.frame.style.display="block",this._generateHueCircle()}},{key:"_hide",value:function(){var g=this;!0===(!(arguments.length>0&&void 0!==arguments[0])||arguments[0])&&(this.previousColor=yA({},this.color)),!0===this.applied&&this.updateCallback(this.initialColor),this.frame.style.display="none",_f((function(){void 0!==g.closeCallback&&(g.closeCallback(),g.closeCallback=void 0)}),0)}},{key:"_save",value:function(){this.updateCallback(this.color),this.applied=!1,this._hide()}},{key:"_apply",value:function(){this.applied=!0,this.updateCallback(this.color),this._updatePicker(this.color)}},{key:"_loadLast",value:function(){void 0!==this.previousColor?this.setColor(this.previousColor,!1):alert("There is no last color to load...")}},{key:"_setColor",value:function(g){!0===(!(arguments.length>1&&void 0!==arguments[1])||arguments[1])&&(this.initialColor=yA({},g)),this.color=g;var t=Um(g.r,g.g,g.b),e=2*Math.PI,A=this.r*t.s,C=this.centerCoordinates.x+A*Math.sin(e*t.h),I=this.centerCoordinates.y+A*Math.cos(e*t.h);this.colorPickerSelector.style.left=C-.5*this.colorPickerSelector.clientWidth+"px",this.colorPickerSelector.style.top=I-.5*this.colorPickerSelector.clientHeight+"px",this._updatePicker(g)}},{key:"_setOpacity",value:function(g){this.color.a=g/100,this._updatePicker(this.color)}},{key:"_setBrightness",value:function(g){var t=Um(this.color.r,this.color.g,this.color.b);t.v=g/100;var e=_m(t.h,t.s,t.v);e.a=this.color.a,this.color=e,this._updatePicker()}},{key:"_updatePicker",value:function(){var g=arguments.length>0&&void 0!==arguments[0]?arguments[0]:this.color,t=Um(g.r,g.g,g.b),e=this.colorPickerCanvas.getContext("2d");void 0===this.pixelRation&&(this.pixelRatio=(window.devicePixelRatio||1)/(e.webkitBackingStorePixelRatio||e.mozBackingStorePixelRatio||e.msBackingStorePixelRatio||e.oBackingStorePixelRatio||e.backingStorePixelRatio||1)),e.setTransform(this.pixelRatio,0,0,this.pixelRatio,0,0);var A=this.colorPickerCanvas.clientWidth,C=this.colorPickerCanvas.clientHeight;e.clearRect(0,0,A,C),e.putImageData(this.hueCircle,0,0),e.fillStyle="rgba(0,0,0,"+(1-t.v)+")",e.circle(this.centerCoordinates.x,this.centerCoordinates.y,this.r),ov(e).call(e),this.brightnessRange.value=100*t.v,this.opacityRange.value=100*g.a,this.initialColorDiv.style.backgroundColor="rgba("+this.initialColor.r+","+this.initialColor.g+","+this.initialColor.b+","+this.initialColor.a+")",this.newColorDiv.style.backgroundColor="rgba("+this.color.r+","+this.color.g+","+this.color.b+","+this.color.a+")"}},{key:"_setSize",value:function(){this.colorPickerCanvas.style.width="100%",this.colorPickerCanvas.style.height="100%",this.colorPickerCanvas.width=289*this.pixelRatio,this.colorPickerCanvas.height=289*this.pixelRatio}},{key:"_create",value:function(){var g,t,e,A;if(this.frame=document.createElement("div"),this.frame.className="vis-color-picker",this.colorPickerDiv=document.createElement("div"),this.colorPickerSelector=document.createElement("div"),this.colorPickerSelector.className="vis-selector",this.colorPickerDiv.appendChild(this.colorPickerSelector),this.colorPickerCanvas=document.createElement("canvas"),this.colorPickerDiv.appendChild(this.colorPickerCanvas),this.colorPickerCanvas.getContext){var C=this.colorPickerCanvas.getContext("2d");this.pixelRatio=(window.devicePixelRatio||1)/(C.webkitBackingStorePixelRatio||C.mozBackingStorePixelRatio||C.msBackingStorePixelRatio||C.oBackingStorePixelRatio||C.backingStorePixelRatio||1),this.colorPickerCanvas.getContext("2d").setTransform(this.pixelRatio,0,0,this.pixelRatio,0,0)}else{var I=document.createElement("DIV");I.style.color="red",I.style.fontWeight="bold",I.style.padding="10px",I.innerText="Error: your browser does not support HTML canvas",this.colorPickerCanvas.appendChild(I)}this.colorPickerDiv.className="vis-color",this.opacityDiv=document.createElement("div"),this.opacityDiv.className="vis-opacity",this.brightnessDiv=document.createElement("div"),this.brightnessDiv.className="vis-brightness",this.arrowDiv=document.createElement("div"),this.arrowDiv.className="vis-arrow",this.opacityRange=document.createElement("input");try{this.opacityRange.type="range",this.opacityRange.min="0",this.opacityRange.max="100"}catch(g){}this.opacityRange.value="100",this.opacityRange.className="vis-range",this.brightnessRange=document.createElement("input");try{this.brightnessRange.type="range",this.brightnessRange.min="0",this.brightnessRange.max="100"}catch(g){}this.brightnessRange.value="100",this.brightnessRange.className="vis-range",this.opacityDiv.appendChild(this.opacityRange),this.brightnessDiv.appendChild(this.brightnessRange);var i=this;this.opacityRange.onchange=function(){i._setOpacity(this.value)},this.opacityRange.oninput=function(){i._setOpacity(this.value)},this.brightnessRange.onchange=function(){i._setBrightness(this.value)},this.brightnessRange.oninput=function(){i._setBrightness(this.value)},this.brightnessLabel=document.createElement("div"),this.brightnessLabel.className="vis-label vis-brightness",this.brightnessLabel.innerText="brightness:",this.opacityLabel=document.createElement("div"),this.opacityLabel.className="vis-label vis-opacity",this.opacityLabel.innerText="opacity:",this.newColorDiv=document.createElement("div"),this.newColorDiv.className="vis-new-color",this.newColorDiv.innerText="new",this.initialColorDiv=document.createElement("div"),this.initialColorDiv.className="vis-initial-color",this.initialColorDiv.innerText="initial",this.cancelButton=document.createElement("div"),this.cancelButton.className="vis-button vis-cancel",this.cancelButton.innerText="cancel",this.cancelButton.onclick=QA(g=this._hide).call(g,this,!1),this.applyButton=document.createElement("div"),this.applyButton.className="vis-button vis-apply",this.applyButton.innerText="apply",this.applyButton.onclick=QA(t=this._apply).call(t,this),this.saveButton=document.createElement("div"),this.saveButton.className="vis-button vis-save",this.saveButton.innerText="save",this.saveButton.onclick=QA(e=this._save).call(e,this),this.loadButton=document.createElement("div"),this.loadButton.className="vis-button vis-load",this.loadButton.innerText="load last",this.loadButton.onclick=QA(A=this._loadLast).call(A,this),this.frame.appendChild(this.colorPickerDiv),this.frame.appendChild(this.arrowDiv),this.frame.appendChild(this.brightnessLabel),this.frame.appendChild(this.brightnessDiv),this.frame.appendChild(this.opacityLabel),this.frame.appendChild(this.opacityDiv),this.frame.appendChild(this.newColorDiv),this.frame.appendChild(this.initialColorDiv),this.frame.appendChild(this.cancelButton),this.frame.appendChild(this.applyButton),this.frame.appendChild(this.saveButton),this.frame.appendChild(this.loadButton)}},{key:"_bindHammer",value:function(){var g=this;this.drag={},this.pinch={},this.hammer=new km(this.colorPickerCanvas),this.hammer.get("pinch").set({enable:!0}),this.hammer.on("hammer.input",(function(t){t.isFirst&&g._moveSelector(t)})),this.hammer.on("tap",(function(t){g._moveSelector(t)})),this.hammer.on("panstart",(function(t){g._moveSelector(t)})),this.hammer.on("panmove",(function(t){g._moveSelector(t)})),this.hammer.on("panend",(function(t){g._moveSelector(t)}))}},{key:"_generateHueCircle",value:function(){if(!1===this.generated){var g=this.colorPickerCanvas.getContext("2d");void 0===this.pixelRation&&(this.pixelRatio=(window.devicePixelRatio||1)/(g.webkitBackingStorePixelRatio||g.mozBackingStorePixelRatio||g.msBackingStorePixelRatio||g.oBackingStorePixelRatio||g.backingStorePixelRatio||1)),g.setTransform(this.pixelRatio,0,0,this.pixelRatio,0,0);var t,e,A,C,I=this.colorPickerCanvas.clientWidth,i=this.colorPickerCanvas.clientHeight;g.clearRect(0,0,I,i),this.centerCoordinates={x:.5*I,y:.5*i},this.r=.49*I;var o,n=2*Math.PI/360,r=1/this.r;for(A=0;A<360;A++)for(C=0;C3&&void 0!==arguments[3]?arguments[3]:1,I=arguments.length>4&&void 0!==arguments[4]?arguments[4]:function(){return!1};cd(this,g),this.parent=t,this.changedOptions=[],this.container=e,this.allowCreation=!1,this.hideOption=I,this.options={},this.initialized=!1,this.popupCounter=0,this.defaultOptions={enabled:!1,filter:!0,container:void 0,showButton:!0},yA(this.options,this.defaultOptions),this.configureOptions=A,this.moduleOptions={},this.domElements=[],this.popupDiv={},this.popupLimit=5,this.popupHistory={},this.colorPicker=new eb(C),this.wrapper=void 0}return Bh(g,[{key:"setOptions",value:function(g){if(void 0!==g){this.popupHistory={},this._removePopup();var t=!0;if("string"==typeof g)this.options.filter=g;else if(Vl(g))this.options.filter=g.join();else if("object"===kh(g)){if(null==g)throw new TypeError("options cannot be null");void 0!==g.container&&(this.options.container=g.container),void 0!==Ap(g)&&(this.options.filter=Ap(g)),void 0!==g.showButton&&(this.options.showButton=g.showButton),void 0!==g.enabled&&(t=g.enabled)}else"boolean"==typeof g?(this.options.filter=!0,t=g):"function"==typeof g&&(this.options.filter=g,t=!0);!1===Ap(this.options)&&(t=!1),this.options.enabled=t}this._clean()}},{key:"setModuleOptions",value:function(g){this.moduleOptions=g,!0===this.options.enabled&&(this._clean(),void 0!==this.options.container&&(this.container=this.options.container),this._create())}},{key:"_create",value:function(){this._clean(),this.changedOptions=[];var g=Ap(this.options),t=0,e=!1;for(var A in this.configureOptions)Object.prototype.hasOwnProperty.call(this.configureOptions,A)&&(this.allowCreation=!1,e=!1,"function"==typeof g?e=(e=g(A,[]))||this._handleObject(this.configureOptions[A],[A],!0):!0!==g&&-1===$p(g).call(g,A)||(e=!0),!1!==e&&(this.allowCreation=!0,t>0&&this._makeItem([]),this._makeHeader(A),this._handleObject(this.configureOptions[A],[A])),t++);this._makeButton(),this._push()}},{key:"_push",value:function(){this.wrapper=document.createElement("div"),this.wrapper.className="vis-configuration-wrapper",this.container.appendChild(this.wrapper);for(var g=0;g1?e-1:0),C=1;C2&&void 0!==arguments[2]&&arguments[2],A=document.createElement("div");if(A.className="vis-configuration vis-config-label vis-config-s"+t.length,!0===e){for(;A.firstChild;)A.removeChild(A.firstChild);A.appendChild(Ab("i","b",g))}else A.innerText=g+":";return A}},{key:"_makeDropdown",value:function(g,t,e){var A=document.createElement("select");A.className="vis-configuration vis-config-select";var C=0;void 0!==t&&-1!==$p(g).call(g,t)&&(C=$p(g).call(g,t));for(var I=0;II&&1!==I&&(o.max=Math.ceil(t*s),r=o.max,n="range increased"),o.value=t}else o.value=A;var a=document.createElement("input");a.className="vis-configuration vis-config-rangeinput",a.value=o.value;var d=this;o.onchange=function(){a.value=this.value,d._update(Number(this.value),e)},o.oninput=function(){a.value=this.value};var h=this._makeLabel(e[e.length-1],e),l=this._makeItem(e,h,o,a);""!==n&&this.popupHistory[l]!==r&&(this.popupHistory[l]=r,this._setupPopup(n,l))}},{key:"_makeButton",value:function(){var g=this;if(!0===this.options.showButton){var t=document.createElement("div");t.className="vis-configuration vis-config-button",t.innerText="generate options",t.onclick=function(){g._printOptions()},t.onmouseover=function(){t.className="vis-configuration vis-config-button hover"},t.onmouseout=function(){t.className="vis-configuration vis-config-button"},this.optionsContainer=document.createElement("div"),this.optionsContainer.className="vis-configuration vis-config-option-container",this.domElements.push(this.optionsContainer),this.domElements.push(t)}}},{key:"_setupPopup",value:function(g,t){var e=this;if(!0===this.initialized&&!0===this.allowCreation&&this.popupCounter1&&void 0!==arguments[1]?arguments[1]:[],e=arguments.length>2&&void 0!==arguments[2]&&arguments[2],A=!1,C=Ap(this.options),I=!1;for(var i in g)if(Object.prototype.hasOwnProperty.call(g,i)){A=!0;var o=g[i],n=Gm(t,i);if("function"==typeof C&&!1===(A=C(i,t))&&!Vl(o)&&"string"!=typeof o&&"boolean"!=typeof o&&o instanceof Object&&(this.allowCreation=!1,A=this._handleObject(o,n,!0),this.allowCreation=!1===e),!1!==A){I=!0;var r=this._getValue(n);if(Vl(o))this._handleArray(o,r,n);else if("string"==typeof o)this._makeTextInput(o,r,n);else if("boolean"==typeof o)this._makeCheckbox(o,r,n);else if(o instanceof Object){if(!this.hideOption(t,i,this.moduleOptions))if(void 0!==o.enabled){var s=Gm(n,"enabled"),a=this._getValue(s);if(!0===a){var d=this._makeLabel(i,n,!0);this._makeItem(n,d),I=this._handleObject(o,n)||I}else this._makeCheckbox(o,a,n)}else{var h=this._makeLabel(i,n,!0);this._makeItem(n,h),I=this._handleObject(o,n)||I}}else console.error("dont know how to handle",o,i,n)}}return I}},{key:"_handleArray",value:function(g,t,e){"string"==typeof g[0]&&"color"===g[0]?(this._makeColorField(g,t,e),g[1]!==t&&this.changedOptions.push({path:e,value:t})):"string"==typeof g[0]?(this._makeDropdown(g,t,e),g[0]!==t&&this.changedOptions.push({path:e,value:t})):"number"==typeof g[0]&&(this._makeRange(g,t,e),g[0]!==t&&this.changedOptions.push({path:e,value:Number(t)}))}},{key:"_update",value:function(g,t){var e=this._constructOptions(g,t);this.parent.body&&this.parent.body.emitter&&this.parent.body.emitter.emit&&this.parent.body.emitter.emit("configChange",e),this.initialized=!0,this.parent.setOptions(e)}},{key:"_constructOptions",value:function(g,t){var e=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},A=e;g="false"!==(g="true"===g||g)&&g;for(var C=0;CC-this.padding&&(o=!0),I=o?this.x-e:this.x,i=n?this.y-t:this.y}else(i=this.y-t)+t+this.padding>A&&(i=A-t-this.padding),iC&&(I=C-e-this.padding),Ii.distance?" in "+g.printLocation(I.path,t,"")+"Perhaps it was misplaced? Matching option found at: "+g.printLocation(i.path,i.closestMatch,""):I.distance<=8?'. Did you mean "'+I.closestMatch+'"?'+g.printLocation(I.path,t):". Did you mean one of these: "+g.print(Cc(e))+g.printLocation(A,t),console.error('%cUnknown option detected: "'+t+'"'+C,nb),ob=!0}},{key:"findInOptions",value:function(t,e,A){var C,I=arguments.length>3&&void 0!==arguments[3]&&arguments[3],i=1e9,o="",n=[],r=t.toLowerCase(),s=void 0;for(var a in e){var d=void 0;if(void 0!==e[a].__type__&&!0===I){var h=g.findInOptions(t,e[a],Gm(A,a));i>h.distance&&(o=h.closestMatch,n=h.path,i=h.distance,s=h.indexMatch)}else{var l;-1!==$p(l=a.toLowerCase()).call(l,r)&&(s=a),i>(d=g.levenshteinDistance(t,a))&&(o=a,n=Sl(C=A).call(C),i=d)}}return{closestMatch:o,path:n,distance:i,indexMatch:s}}},{key:"printLocation",value:function(g,t){for(var e="\n\n"+(arguments.length>2&&void 0!==arguments[2]?arguments[2]:"Problem value found at: \n")+"options = {\n",A=0;A":!0,"--":!0},mb="",bb=0,wb="",xb="",kb=vb.NULL;function Eb(){bb++,wb=mb.charAt(bb)}function Ob(){return mb.charAt(bb+1)}function Tb(g){var t=g.charCodeAt(0);return t<47?35===t||46===t:t<59?t>47:t<91?t>64:t<96?95===t:t<123&&t>96}function Db(g,t){if(g||(g={}),t)for(var e in t)t.hasOwnProperty(e)&&(g[e]=t[e]);return g}function Nb(g,t,e){for(var A=t.split("."),C=g;A.length;){var I=A.shift();A.length?(C[I]||(C[I]={}),C=C[I]):C[I]=e}}function Rb(g,t){for(var e,A,C=null,I=[g],i=g;i.parent;)I.push(i.parent),i=i.parent;if(i.nodes)for(e=0,A=i.nodes.length;e=0;e--){var o,n=I[e];n.nodes||(n.nodes=[]),-1===$p(o=n.nodes).call(o,C)&&n.nodes.push(C)}t.attr&&(C.attr=Db(C.attr,t.attr))}function Pb(g,t){if(g.edges||(g.edges=[]),g.edges.push(t),g.edge){var e=Db({},g.edge);t.attr=Db(e,t.attr)}}function Mb(g,t,e,A,C){var I={from:t,to:e,type:A};return g.edge&&(I.attr=Db({},g.edge)),I.attr=Db(I.attr||{},C),null!=C&&C.hasOwnProperty("arrows")&&null!=C.arrows&&(I.arrows={to:{enabled:!0,type:C.arrows.type}},C.arrows=null),I}function Bb(){for(kb=vb.NULL,xb="";" "===wb||"\t"===wb||"\n"===wb||"\r"===wb;)Eb();do{var g=!1;if("#"===wb){for(var t=bb-1;" "===mb.charAt(t)||"\t"===mb.charAt(t);)t--;if("\n"===mb.charAt(t)||""===mb.charAt(t)){for(;""!=wb&&"\n"!=wb;)Eb();g=!0}}if("/"===wb&&"/"===Ob()){for(;""!=wb&&"\n"!=wb;)Eb();g=!0}if("/"===wb&&"*"===Ob()){for(;""!=wb;){if("*"===wb&&"/"===Ob()){Eb(),Eb();break}Eb()}g=!0}for(;" "===wb||"\t"===wb||"\n"===wb||"\r"===wb;)Eb()}while(g);if(""!==wb){var e=wb+Ob();if(yb[e])return kb=vb.DELIMITER,xb=e,Eb(),void Eb();if(yb[wb])return kb=vb.DELIMITER,xb=wb,void Eb();if(Tb(wb)||"-"===wb){for(xb+=wb,Eb();Tb(wb);)xb+=wb,Eb();return"false"===xb?xb=!1:"true"===xb?xb=!0:isNaN(Number(xb))||(xb=Number(xb)),void(kb=vb.IDENTIFIER)}if('"'===wb){for(Eb();""!=wb&&('"'!=wb||'"'===wb&&'"'===Ob());)'"'===wb?(xb+=wb,Eb()):"\\"===wb&&"n"===Ob()?(xb+="\n",Eb()):xb+=wb,Eb();if('"'!=wb)throw jb('End of string " expected');return Eb(),void(kb=vb.IDENTIFIER)}for(kb=vb.UNKNOWN;""!=wb;)xb+=wb,Eb();throw new SyntaxError('Syntax error in part "'+Lb(xb,30)+'"')}kb=vb.DELIMITER}function zb(g){for(;""!==xb&&"}"!=xb;)Sb(g),";"===xb&&Bb()}function Sb(g){var t=Zb(g);if(t)Fb(g,t);else{var e=function(g){if("node"===xb)return Bb(),g.node=Gb(),"node";if("edge"===xb)return Bb(),g.edge=Gb(),"edge";if("graph"===xb)return Bb(),g.graph=Gb(),"graph";return null}(g);if(!e){if(kb!=vb.IDENTIFIER)throw jb("Identifier expected");var A=xb;if(Bb(),"="===xb){if(Bb(),kb!=vb.IDENTIFIER)throw jb("Identifier expected");g[A]=xb,Bb()}else!function(g,t){var e={id:t},A=Gb();A&&(e.attr=A);Rb(g,e),Fb(g,t)}(g,A)}}}function Zb(g){var t=null;if("subgraph"===xb&&((t={}).type="subgraph",Bb(),kb===vb.IDENTIFIER&&(t.id=xb,Bb())),"{"===xb){if(Bb(),t||(t={}),t.parent=g,t.node=g.node,t.edge=g.edge,t.graph=g.graph,zb(t),"}"!=xb)throw jb("Angle bracket } expected");Bb(),delete t.node,delete t.edge,delete t.graph,delete t.parent,g.subgraphs||(g.subgraphs=[]),g.subgraphs.push(t)}return t}function Fb(g,t){for(;"->"===xb||"--"===xb;){var e,A=xb;Bb();var C=Zb(g);if(C)e=C;else{if(kb!=vb.IDENTIFIER)throw jb("Identifier or subgraph expected");Rb(g,{id:e=xb}),Bb()}Pb(g,Mb(g,t,e,A,Gb())),t=e}}function Gb(){for(var g,t,e=null,A={dashed:!0,solid:!1,dotted:[1,5]},C={dot:"circle",box:"box",crow:"crow",curve:"curve",icurve:"inv_curve",normal:"triangle",inv:"inv_triangle",diamond:"diamond",tee:"bar",vee:"vee"},I=new Array,i=new Array;"["===xb;){for(Bb(),e={};""!==xb&&"]"!=xb;){if(kb!=vb.IDENTIFIER)throw jb("Attribute name expected");var o=xb;if(Bb(),"="!=xb)throw jb("Equal sign = expected");if(Bb(),kb!=vb.IDENTIFIER)throw jb("Attribute value expected");var n=xb;"style"===o&&(n=A[n]),"arrowhead"===o&&(o="arrows",n={to:{enabled:!0,type:C[n]}}),"arrowtail"===o&&(o="arrows",n={from:{enabled:!0,type:C[n]}}),I.push({attr:e,name:o,value:n}),i.push(o),Bb(),","==xb&&Bb()}if("]"!=xb)throw jb("Bracket ] expected");Bb()}if(ju(i).call(i,"dir")){var r={arrows:{}};for(g=0;g"===g.type&&(t.arrows="to"),t};Ec(C=e.edges).call(C,(function(g){var t,e,C,i,o,n,r;(t=g.from instanceof Object?g.from.nodes:{id:g.from},e=g.to instanceof Object?g.to.nodes:{id:g.to},g.from instanceof Object&&g.from.edges)&&Ec(C=g.from.edges).call(C,(function(g){var t=I(g);A.edges.push(t)}));(o=e,n=function(t,e){var C=Mb(A,t.id,e.id,g.type,g.attr),i=I(C);A.edges.push(i)},Vl(i=t)?Ec(i).call(i,(function(g){Vl(o)?Ec(o).call(o,(function(t){n(g,t)})):n(g,o)})):Vl(o)?Ec(o).call(o,(function(g){n(i,g)})):n(i,o),g.to instanceof Object&&g.to.edges)&&Ec(r=g.to.edges).call(r,(function(g){var t=I(g);A.edges.push(t)}))}))}return e.attr&&(A.options=e.attr),A}var Qb=Object.freeze({__proto__:null,DOTToGraph:Wb,parseDOT:ub});function Ub(g,t){var e,A={edges:{inheritColor:!1},nodes:{fixed:!1,parseColor:!1}};null!=t&&(null!=t.fixed&&(A.nodes.fixed=t.fixed),null!=t.parseColor&&(A.nodes.parseColor=t.parseColor),null!=t.inheritColor&&(A.edges.inheritColor=t.inheritColor));var C=g.edges,I=ql(C).call(C,(function(g){var t={from:g.source,id:g.id,to:g.target};return null!=g.attributes&&(t.attributes=g.attributes),null!=g.label&&(t.label=g.label),null!=g.attributes&&null!=g.attributes.title&&(t.title=g.attributes.title),"Directed"===g.type&&(t.arrows="to"),g.color&&!1===A.edges.inheritColor&&(t.color=g.color),t}));return{nodes:ql(e=g.nodes).call(e,(function(g){var t={id:g.id,fixed:A.nodes.fixed&&null!=g.x&&null!=g.y};return null!=g.attributes&&(t.attributes=g.attributes),null!=g.label&&(t.label=g.label),null!=g.size&&(t.size=g.size),null!=g.attributes&&null!=g.attributes.title&&(t.title=g.attributes.title),null!=g.title&&(t.title=g.title),null!=g.x&&(t.x=g.x),null!=g.y&&(t.y=g.y),null!=g.color&&(!0===A.nodes.parseColor?t.color=g.color:t.color={background:g.color,border:g.color,highlight:{background:g.color,border:g.color},hover:{background:g.color,border:g.color}}),t})),edges:I}}var _b=Object.freeze({__proto__:null,parseGephi:Ub}),Hb=Object.freeze({__proto__:null,cn:{addDescription:"单击空白处放置新节点。",addEdge:"添加连接线",addNode:"添加节点",back:"返回",close:"關閉",createEdgeError:"无法将连接线连接到群集。",del:"删除选定",deleteClusterError:"无法删除群集。",edgeDescription:"单击某个节点并将该连接线拖动到另一个节点以连接它们。",edit:"编辑",editClusterError:"无法编辑群集。",editEdge:"编辑连接线",editEdgeDescription:"单击控制节点并将它们拖到节点上连接。",editNode:"编辑节点"},cs:{addDescription:"Kluknutím do prázdného prostoru můžete přidat nový vrchol.",addEdge:"Přidat hranu",addNode:"Přidat vrchol",back:"Zpět",close:"Zavřít",createEdgeError:"Nelze připojit hranu ke shluku.",del:"Smazat výběr",deleteClusterError:"Nelze mazat shluky.",edgeDescription:"Přetažením z jednoho vrcholu do druhého můžete spojit tyto vrcholy novou hranou.",edit:"Upravit",editClusterError:"Nelze upravovat shluky.",editEdge:"Upravit hranu",editEdgeDescription:"Přetažením kontrolního vrcholu hrany ji můžete připojit k jinému vrcholu.",editNode:"Upravit vrchol"},de:{addDescription:"Klicke auf eine freie Stelle, um einen neuen Knoten zu plazieren.",addEdge:"Kante hinzufügen",addNode:"Knoten hinzufügen",back:"Zurück",close:"Schließen",createEdgeError:"Es ist nicht möglich, Kanten mit Clustern zu verbinden.",del:"Lösche Auswahl",deleteClusterError:"Cluster können nicht gelöscht werden.",edgeDescription:"Klicke auf einen Knoten und ziehe die Kante zu einem anderen Knoten, um diese zu verbinden.",edit:"Editieren",editClusterError:"Cluster können nicht editiert werden.",editEdge:"Kante editieren",editEdgeDescription:"Klicke auf die Verbindungspunkte und ziehe diese auf einen Knoten, um sie zu verbinden.",editNode:"Knoten editieren"},en:{addDescription:"Click in an empty space to place a new node.",addEdge:"Add Edge",addNode:"Add Node",back:"Back",close:"Close",createEdgeError:"Cannot link edges to a cluster.",del:"Delete selected",deleteClusterError:"Clusters cannot be deleted.",edgeDescription:"Click on a node and drag the edge to another node to connect them.",edit:"Edit",editClusterError:"Clusters cannot be edited.",editEdge:"Edit Edge",editEdgeDescription:"Click on the control points and drag them to a node to connect to it.",editNode:"Edit Node"},es:{addDescription:"Haga clic en un lugar vacío para colocar un nuevo nodo.",addEdge:"Añadir arista",addNode:"Añadir nodo",back:"Atrás",close:"Cerrar",createEdgeError:"No se puede conectar una arista a un grupo.",del:"Eliminar selección",deleteClusterError:"No es posible eliminar grupos.",edgeDescription:"Haga clic en un nodo y arrastre la arista hacia otro nodo para conectarlos.",edit:"Editar",editClusterError:"No es posible editar grupos.",editEdge:"Editar arista",editEdgeDescription:"Haga clic en un punto de control y arrastrelo a un nodo para conectarlo.",editNode:"Editar nodo"},fr:{addDescription:"Cliquez dans un endroit vide pour placer un nœud.",addEdge:"Ajouter un lien",addNode:"Ajouter un nœud",back:"Retour",close:"Fermer",createEdgeError:"Impossible de créer un lien vers un cluster.",del:"Effacer la sélection",deleteClusterError:"Les clusters ne peuvent pas être effacés.",edgeDescription:"Cliquez sur un nœud et glissez le lien vers un autre nœud pour les connecter.",edit:"Éditer",editClusterError:"Les clusters ne peuvent pas être édités.",editEdge:"Éditer le lien",editEdgeDescription:"Cliquez sur les points de contrôle et glissez-les pour connecter un nœud.",editNode:"Éditer le nœud"},it:{addDescription:"Clicca per aggiungere un nuovo nodo",addEdge:"Aggiungi un vertice",addNode:"Aggiungi un nodo",back:"Indietro",close:"Chiudere",createEdgeError:"Non si possono collegare vertici ad un cluster",del:"Cancella la selezione",deleteClusterError:"I cluster non possono essere cancellati",edgeDescription:"Clicca su un nodo e trascinalo ad un altro nodo per connetterli.",edit:"Modifica",editClusterError:"I clusters non possono essere modificati.",editEdge:"Modifica il vertice",editEdgeDescription:"Clicca sui Punti di controllo e trascinali ad un nodo per connetterli.",editNode:"Modifica il nodo"},nl:{addDescription:"Klik op een leeg gebied om een nieuwe node te maken.",addEdge:"Link toevoegen",addNode:"Node toevoegen",back:"Terug",close:"Sluiten",createEdgeError:"Kan geen link maken naar een cluster.",del:"Selectie verwijderen",deleteClusterError:"Clusters kunnen niet worden verwijderd.",edgeDescription:"Klik op een node en sleep de link naar een andere node om ze te verbinden.",edit:"Wijzigen",editClusterError:"Clusters kunnen niet worden aangepast.",editEdge:"Link wijzigen",editEdgeDescription:"Klik op de verbindingspunten en sleep ze naar een node om daarmee te verbinden.",editNode:"Node wijzigen"},pt:{addDescription:"Clique em um espaço em branco para adicionar um novo nó",addEdge:"Adicionar aresta",addNode:"Adicionar nó",back:"Voltar",close:"Fechar",createEdgeError:"Não foi possível linkar arestas a um cluster.",del:"Remover selecionado",deleteClusterError:"Clusters não puderam ser removidos.",edgeDescription:"Clique em um nó e arraste a aresta até outro nó para conectá-los",edit:"Editar",editClusterError:"Clusters não puderam ser editados.",editEdge:"Editar aresta",editEdgeDescription:"Clique nos pontos de controle e os arraste para um nó para conectá-los",editNode:"Editar nó"},ru:{addDescription:"Кликните в свободное место, чтобы добавить новый узел.",addEdge:"Добавить ребро",addNode:"Добавить узел",back:"Назад",close:"Закрывать",createEdgeError:"Невозможно соединить ребра в кластер.",del:"Удалить выбранное",deleteClusterError:"Кластеры не могут быть удалены",edgeDescription:"Кликните на узел и протяните ребро к другому узлу, чтобы соединить их.",edit:"Редактировать",editClusterError:"Кластеры недоступны для редактирования.",editEdge:"Редактировать ребро",editEdgeDescription:"Кликните на контрольные точки и перетащите их в узел, чтобы подключиться к нему.",editNode:"Редактировать узел"},uk:{addDescription:"Kлікніть на вільне місце, щоб додати новий вузол.",addEdge:"Додати край",addNode:"Додати вузол",back:"Назад",close:"Закрити",createEdgeError:"Не можливо об'єднати краї в групу.",del:"Видалити обране",deleteClusterError:"Групи не можуть бути видалені.",edgeDescription:"Клікніть на вузол і перетягніть край до іншого вузла, щоб їх з'єднати.",edit:"Редагувати",editClusterError:"Групи недоступні для редагування.",editEdge:"Редагувати край",editEdgeDescription:"Клікніть на контрольні точки і перетягніть їх у вузол, щоб підключитися до нього.",editNode:"Редагувати вузол"}});var Kb=function(){function g(){cd(this,g),this.NUM_ITERATIONS=4,this.image=new Image,this.canvas=document.createElement("canvas")}return Bh(g,[{key:"init",value:function(){if(!this.initialized()){this.src=this.image.src;var g=this.image.width,t=this.image.height;this.width=g,this.height=t;var e=Math.floor(t/2),A=Math.floor(t/4),C=Math.floor(t/8),I=Math.floor(t/16),i=Math.floor(g/2),o=Math.floor(g/4),n=Math.floor(g/8),r=Math.floor(g/16);this.canvas.width=3*o,this.canvas.height=e,this.coordinates=[[0,0,i,e],[i,0,o,A],[i,A,n,C],[5*n,A,r,I]],this._fillMipMap()}}},{key:"initialized",value:function(){return void 0!==this.coordinates}},{key:"_fillMipMap",value:function(){var g=this.canvas.getContext("2d"),t=this.coordinates[0];g.drawImage(this.image,t[0],t[1],t[2],t[3]);for(var e=1;e2){t*=.5;for(var i=0;t>2&&i=this.NUM_ITERATIONS&&(i=this.NUM_ITERATIONS-1);var o=this.coordinates[i];g.drawImage(this.canvas,o[0],o[1],o[2],o[3],e,A,C,I)}else g.drawImage(this.image,e,A,C,I)}}]),g}(),Xb=function(){function g(t){cd(this,g),this.images={},this.imageBroken={},this.callback=t}return Bh(g,[{key:"_tryloadBrokenUrl",value:function(g,t,e){void 0!==g&&void 0!==e&&(void 0!==t?(e.image.onerror=function(){console.error("Could not load brokenImage:",t)},e.image.src=t):console.warn("No broken url image defined"))}},{key:"_redrawWithImage",value:function(g){this.callback&&this.callback(g)}},{key:"load",value:function(g,t){var e=this,A=this.images[g];if(A)return A;var C=new Kb;return this.images[g]=C,C.image.onload=function(){e._fixImageCoordinates(C.image),C.init(),e._redrawWithImage(C)},C.image.onerror=function(){console.error("Could not load image:",g),e._tryloadBrokenUrl(g,t,C)},C.image.src=g,C}},{key:"_fixImageCoordinates",value:function(g){0===g.width&&(document.body.appendChild(g),g.width=g.offsetWidth,g.height=g.offsetHeight,document.body.removeChild(g))}}]),g}(),Jb={},qb={get exports(){return Jb},set exports(g){Jb=g}},$b={},gw={get exports(){return $b},set exports(g){$b=g}},tw=o((function(){if("function"==typeof ArrayBuffer){var g=new ArrayBuffer(8);Object.isExtensible(g)&&Object.defineProperty(g,"a",{value:8})}})),ew=o,Aw=eg,Cw=b,Iw=tw,iw=Object.isExtensible,ow=ew((function(){iw(1)}))||Iw?function(g){return!!Aw(g)&&((!Iw||"ArrayBuffer"!=Cw(g))&&(!iw||iw(g)))}:iw,nw=!o((function(){return Object.isExtensible(Object.preventExtensions({}))})),rw=De,sw=f,aw=_e,dw=eg,hw=$g,lw=qt.f,cw=kn,uw=Tn,pw=ow,fw=nw,vw=!1,yw=Ct("meta"),mw=0,bw=function(g){lw(g,yw,{value:{objectID:"O"+mw++,weakData:{}}})},ww=gw.exports={enable:function(){ww.enable=function(){},vw=!0;var g=cw.f,t=sw([].splice),e={};e[yw]=1,g(e).length&&(cw.f=function(e){for(var A=g(e),C=0,I=A.length;CI;I++)if((o=p(g[I]))&&Nw(Sw,o))return o;return new zw(!1)}A=Rw(g,C)}for(n=d?g.next:A.next;!(r=kw(n,A)).done;){try{o=p(r.value)}catch(g){Mw(A,"throw",g)}if("object"==typeof o&&o&&Nw(Sw,o))return o}return new zw(!1)},Fw=rg,Gw=TypeError,jw=function(g,t){if(Fw(t,g))return g;throw Gw("Incorrect invocation")},Lw=De,Vw=i,Yw=$b,Ww=o,Qw=pe,Uw=Zw,_w=jw,Hw=D,Kw=eg,Xw=gi,Jw=qt.f,qw=ur.forEach,$w=R,gx=UC.set,tx=UC.getterFor,ex=function(g,t,e){var A,C=-1!==g.indexOf("Map"),I=-1!==g.indexOf("Weak"),i=C?"set":"add",o=Vw[g],n=o&&o.prototype,r={};if($w&&Hw(o)&&(I||n.forEach&&!Ww((function(){(new o).entries().next()})))){var s=(A=t((function(t,e){gx(_w(t,s),{type:g,collection:new o}),null!=e&&Uw(e,t[i],{that:t,AS_ENTRIES:C})}))).prototype,a=tx(g);qw(["add","clear","delete","forEach","get","has","set","keys","values","entries"],(function(g){var t="add"==g||"set"==g;!(g in n)||I&&"clear"==g||Qw(s,g,(function(e,A){var C=a(this).collection;if(!t&&I&&!Kw(e))return"get"==g&&void 0;var i=C[g](0===e?0:e,A);return t?this:i}))})),I||Jw(s,"size",{configurable:!0,get:function(){return a(this).collection.size}})}else A=e.getConstructor(t,g,C,i),Yw.enable();return Xw(A,g,!1,!0),r[g]=A,Lw({global:!0,forced:!0},r),I||e.setStrong(A,g,C),A},Ax=zI,Cx=function(g,t,e){for(var A in t)e&&e.unsafe&&g[A]?g[A]=t[A]:Ax(g,A,t[A],e);return g},Ix=ng,ix=Ln,ox=R,nx=ht("species"),rx=function(g){var t=Ix(g);ox&&t&&!t[nx]&&ix(t,nx,{configurable:!0,get:function(){return this}})},sx=xI,ax=Ln,dx=Cx,hx=Jt,lx=jw,cx=_,ux=Zw,px=Ri,fx=Pi,vx=rx,yx=R,mx=$b.fastKey,bx=UC.set,wx=UC.getterFor,xx={getConstructor:function(g,t,e,A){var C=g((function(g,C){lx(g,I),bx(g,{type:t,index:sx(null),first:void 0,last:void 0,size:0}),yx||(g.size=0),cx(C)||ux(C,g[A],{that:g,AS_ENTRIES:e})})),I=C.prototype,i=wx(t),o=function(g,t,e){var A,C,I=i(g),o=n(g,t);return o?o.value=e:(I.last=o={index:C=mx(t,!0),key:t,value:e,previous:A=I.last,next:void 0,removed:!1},I.first||(I.first=o),A&&(A.next=o),yx?I.size++:g.size++,"F"!==C&&(I.index[C]=o)),g},n=function(g,t){var e,A=i(g),C=mx(t);if("F"!==C)return A.index[C];for(e=A.first;e;e=e.next)if(e.key==t)return e};return dx(I,{clear:function(){for(var g=i(this),t=g.index,e=g.first;e;)e.removed=!0,e.previous&&(e.previous=e.previous.next=void 0),delete t[e.index],e=e.next;g.first=g.last=void 0,yx?g.size=0:this.size=0},delete:function(g){var t=this,e=i(t),A=n(t,g);if(A){var C=A.next,I=A.previous;delete e.index[A.index],A.removed=!0,I&&(I.next=C),C&&(C.previous=I),e.first==A&&(e.first=C),e.last==A&&(e.last=I),yx?e.size--:t.size--}return!!A},forEach:function(g){for(var t,e=i(this),A=hx(g,arguments.length>1?arguments[1]:void 0);t=t?t.next:e.first;)for(A(t.value,t.key,this);t&&t.removed;)t=t.previous},has:function(g){return!!n(this,g)}}),dx(I,e?{get:function(g){var t=n(this,g);return t&&t.value},set:function(g,t){return o(this,0===g?0:g,t)}}:{add:function(g){return o(this,g=0===g?0:g,g)}}),yx&&ax(I,"size",{configurable:!0,get:function(){return i(this).size}}),C},setStrong:function(g,t,e){var A=t+" Iterator",C=wx(t),I=wx(A);px(g,t,(function(g,t){bx(this,{type:A,target:g,state:C(g),kind:t,last:void 0})}),(function(){for(var g=I(this),t=g.kind,e=g.last;e&&e.removed;)e=e.previous;return g.target&&(g.last=e=e?e.next:g.state.first)?fx("keys"==t?e.key:"values"==t?e.value:[e.key,e.value],!1):(g.target=void 0,fx(void 0,!0))}),e?"entries":"values",!e,!0),vx(t)}};ex("Map",(function(g){return function(){return g(this,arguments.length?arguments[0]:void 0)}}),xx);var kx=Ag.Map;!function(g){g.exports=kx}(qb);var Ex=e(Jb),Ox=function(){function g(){cd(this,g),this.clear(),this._defaultIndex=0,this._groupIndex=0,this._defaultGroups=[{border:"#2B7CE9",background:"#97C2FC",highlight:{border:"#2B7CE9",background:"#D2E5FF"},hover:{border:"#2B7CE9",background:"#D2E5FF"}},{border:"#FFA500",background:"#FFFF00",highlight:{border:"#FFA500",background:"#FFFFA3"},hover:{border:"#FFA500",background:"#FFFFA3"}},{border:"#FA0A10",background:"#FB7E81",highlight:{border:"#FA0A10",background:"#FFAFB1"},hover:{border:"#FA0A10",background:"#FFAFB1"}},{border:"#41A906",background:"#7BE141",highlight:{border:"#41A906",background:"#A1EC76"},hover:{border:"#41A906",background:"#A1EC76"}},{border:"#E129F0",background:"#EB7DF4",highlight:{border:"#E129F0",background:"#F0B3F5"},hover:{border:"#E129F0",background:"#F0B3F5"}},{border:"#7C29F0",background:"#AD85E4",highlight:{border:"#7C29F0",background:"#D3BDF0"},hover:{border:"#7C29F0",background:"#D3BDF0"}},{border:"#C37F00",background:"#FFA807",highlight:{border:"#C37F00",background:"#FFCA66"},hover:{border:"#C37F00",background:"#FFCA66"}},{border:"#4220FB",background:"#6E6EFD",highlight:{border:"#4220FB",background:"#9B9BFD"},hover:{border:"#4220FB",background:"#9B9BFD"}},{border:"#FD5A77",background:"#FFC0CB",highlight:{border:"#FD5A77",background:"#FFD1D9"},hover:{border:"#FD5A77",background:"#FFD1D9"}},{border:"#4AD63A",background:"#C2FABC",highlight:{border:"#4AD63A",background:"#E6FFE3"},hover:{border:"#4AD63A",background:"#E6FFE3"}},{border:"#990000",background:"#EE0000",highlight:{border:"#BB0000",background:"#FF3333"},hover:{border:"#BB0000",background:"#FF3333"}},{border:"#FF6000",background:"#FF6000",highlight:{border:"#FF6000",background:"#FF6000"},hover:{border:"#FF6000",background:"#FF6000"}},{border:"#97C2FC",background:"#2B7CE9",highlight:{border:"#D2E5FF",background:"#2B7CE9"},hover:{border:"#D2E5FF",background:"#2B7CE9"}},{border:"#399605",background:"#255C03",highlight:{border:"#399605",background:"#255C03"},hover:{border:"#399605",background:"#255C03"}},{border:"#B70054",background:"#FF007E",highlight:{border:"#B70054",background:"#FF007E"},hover:{border:"#B70054",background:"#FF007E"}},{border:"#AD85E4",background:"#7C29F0",highlight:{border:"#D3BDF0",background:"#7C29F0"},hover:{border:"#D3BDF0",background:"#7C29F0"}},{border:"#4557FA",background:"#000EA1",highlight:{border:"#6E6EFD",background:"#000EA1"},hover:{border:"#6E6EFD",background:"#000EA1"}},{border:"#FFC0CB",background:"#FD5A77",highlight:{border:"#FFD1D9",background:"#FD5A77"},hover:{border:"#FFD1D9",background:"#FD5A77"}},{border:"#C2FABC",background:"#74D66A",highlight:{border:"#E6FFE3",background:"#74D66A"},hover:{border:"#E6FFE3",background:"#74D66A"}},{border:"#EE0000",background:"#990000",highlight:{border:"#FF3333",background:"#BB0000"},hover:{border:"#FF3333",background:"#BB0000"}}],this.options={},this.defaultOptions={useDefaultGroups:!0},yA(this.options,this.defaultOptions)}return Bh(g,[{key:"setOptions",value:function(g){var t=["useDefaultGroups"];if(void 0!==g)for(var e in g)if(Object.prototype.hasOwnProperty.call(g,e)&&-1===$p(t).call(t,e)){var A=g[e];this.add(e,A)}}},{key:"clear",value:function(){this._groups=new Ex,this._groupNames=[]}},{key:"get",value:function(g){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1],e=this._groups.get(g);if(void 0===e&&t)if(!1===this.options.useDefaultGroups&&this._groupNames.length>0){var A=this._groupIndex%this._groupNames.length;++this._groupIndex,(e={}).color=this._groups.get(this._groupNames[A]),this._groups.set(g,e)}else{var C=this._defaultIndex%this._defaultGroups.length;this._defaultIndex++,(e={}).color=this._defaultGroups[C],this._groups.set(g,e)}return e}},{key:"add",value:function(g,t){return this._groups.has(g)||this._groupNames.push(g),this._groups.set(g,t),t}}]),g}(),Tx={},Dx={get exports(){return Tx},set exports(g){Tx=g}};De({target:"Number",stat:!0},{isNaN:function(g){return g!=g}});var Nx=Ag.Number.isNaN;!function(g){g.exports=Nx}(Dx);var Rx=e(Tx),Px={},Mx={get exports(){return Px},set exports(g){Px=g}},Bx=i.isFinite,zx=Number.isFinite||function(g){return"number"==typeof g&&Bx(g)};De({target:"Number",stat:!0},{isFinite:zx});var Sx=Ag.Number.isFinite;!function(g){g.exports=Sx}(Mx);var Zx=e(Px),Fx={},Gx={get exports(){return Fx},set exports(g){Fx=g}},jx=ur.some;De({target:"Array",proto:!0,forced:!cc("some")},{some:function(g){return jx(this,g,arguments.length>1?arguments[1]:void 0)}});var Lx=FA("Array").some,Vx=rg,Yx=Lx,Wx=Array.prototype,Qx=function(g){var t=g.some;return g===Wx||Vx(Wx,g)&&t===Wx.some?Yx:t},Ux=Qx;!function(g){g.exports=Ux}(Gx);var _x=e(Fx);function Hx(g){if(void 0===g)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return g}var Kx={},Xx={get exports(){return Kx},set exports(g){Kx=g}},Jx={},qx=uf;!function(g){g.exports=qx}({get exports(){return Jx},set exports(g){Jx=g}}),function(g){g.exports=Jx}(Xx);var $x=e(Kx),gk={},tk={get exports(){return gk},set exports(g){gk=g}},ek={},Ak={get exports(){return ek},set exports(g){ek=g}};De({target:"Object",stat:!0},{setPrototypeOf:ui});var Ck=Ag.Object.setPrototypeOf;!function(g){g.exports=Ck}(Ak),function(g){g.exports=ek}(tk);var Ik=e(gk),ik={},ok={get exports(){return ik},set exports(g){ik=g}},nk={},rk=WA;!function(g){g.exports=rk}({get exports(){return nk},set exports(g){nk=g}}),function(g){g.exports=nk}(ok);var sk=e(ik);function ak(g,t){var e;return ak=Ik?sk(e=Ik).call(e):function(g,t){return g.__proto__=t,g},ak(g,t)}function dk(g,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function");g.prototype=$x(t&&t.prototype,{constructor:{value:g,writable:!0,configurable:!0}}),yd(g,"prototype",{writable:!1}),t&&ak(g,t)}function hk(g,t){if(t&&("object"===kh(t)||"function"==typeof t))return t;if(void 0!==t)throw new TypeError("Derived constructors may only return object or undefined");return Hx(g)}var lk={},ck={get exports(){return lk},set exports(g){lk=g}},uk={},pk=Uu;!function(g){g.exports=pk}({get exports(){return uk},set exports(g){uk=g}}),function(g){g.exports=uk}(ck);var fk=e(lk);function vk(g){var t;return vk=Ik?sk(t=fk).call(t):function(g){return g.__proto__||fk(g)},vk(g)}var yk={},mk={get exports(){return yk},set exports(g){yk=g}},bk={};!function(g){var t=md,e=fh;function A(C){return g.exports=A="function"==typeof t&&"symbol"==typeof e?function(g){return typeof g}:function(g){return g&&"function"==typeof t&&g.constructor===t&&g!==t.prototype?"symbol":typeof g},g.exports.__esModule=!0,g.exports.default=g.exports,A(C)}g.exports=A,g.exports.__esModule=!0,g.exports.default=g.exports}({get exports(){return bk},set exports(g){bk=g}});var wk={},xk={get exports(){return wk},set exports(g){wk=g}},kk={},Ek=kc;!function(g){g.exports=Ek}({get exports(){return kk},set exports(g){kk=g}}),function(g){g.exports=kk}(xk);var Ok={},Tk={get exports(){return Ok},set exports(g){Ok=g}},Dk={},Nk={get exports(){return Dk},set exports(g){Dk=g}},Rk=$g,Pk=ja,Mk=N,Bk=qt,zk=eg,Sk=pe,Zk=Error,Fk=f("".replace),Gk=String(Zk("zxcasd").stack),jk=/\n\s*at [^:]*:[^\n]*/,Lk=jk.test(Gk),Vk=L,Yk=!o((function(){var g=Error("a");return!("stack"in g)||(Object.defineProperty(g,"stack",Vk(1,7)),7!==g.stack)})),Wk=pe,Qk=function(g,t){if(Lk&&"string"==typeof g&&!Zk.prepareStackTrace)for(;t--;)g=Fk(g,jk,"");return g},Uk=Yk,_k=Error.captureStackTrace,Hk=pC,Kk=De,Xk=rg,Jk=MI,qk=ui,$k=function(g,t,e){for(var A=Pk(t),C=Bk.f,I=Mk.f,i=0;i2&&AE(e,arguments[2]);var C=[];return IE(g,rE,{that:C}),tE(e,"errors",C),e};qk?qk(sE,nE):$k(sE,nE,{name:!0});var aE=sE.prototype=gE(nE.prototype,{constructor:eE(1,sE),message:eE(1,""),name:eE(1,"AggregateError")});Kk({global:!0,constructor:!0,arity:2},{AggregateError:sE});var dE,hE,lE,cE,uE="undefined"!=typeof process&&"process"==b(process),pE=co,fE=Tg,vE=TypeError,yE=function(g){if(pE(g))return g;throw vE(fE(g)+" is not a constructor")},mE=Ae,bE=yE,wE=_,xE=ht("species"),kE=function(g,t){var e,A=mE(g).constructor;return void 0===A||wE(e=mE(A)[xE])?t:bE(e)},EE=/(?:ipad|iphone|ipod).*applewebkit/i.test(sg),OE=i,TE=h,DE=Jt,NE=D,RE=$g,PE=o,ME=oI,BE=wA,zE=Ot,SE=Df,ZE=EE,FE=uE,GE=OE.setImmediate,jE=OE.clearImmediate,LE=OE.process,VE=OE.Dispatch,YE=OE.Function,WE=OE.MessageChannel,QE=OE.String,UE=0,_E={},HE="onreadystatechange";PE((function(){dE=OE.location}));var KE=function(g){if(RE(_E,g)){var t=_E[g];delete _E[g],t()}},XE=function(g){return function(){KE(g)}},JE=function(g){KE(g.data)},qE=function(g){OE.postMessage(QE(g),dE.protocol+"//"+dE.host)};GE&&jE||(GE=function(g){SE(arguments.length,1);var t=NE(g)?g:YE(g),e=BE(arguments,1);return _E[++UE]=function(){TE(t,void 0,e)},hE(UE),UE},jE=function(g){delete _E[g]},FE?hE=function(g){LE.nextTick(XE(g))}:VE&&VE.now?hE=function(g){VE.now(XE(g))}:WE&&!ZE?(cE=(lE=new WE).port2,lE.port1.onmessage=JE,hE=DE(cE.postMessage,cE)):OE.addEventListener&&NE(OE.postMessage)&&!OE.importScripts&&dE&&"file:"!==dE.protocol&&!PE(qE)?(hE=qE,OE.addEventListener("message",JE,!1)):hE=HE in zE("script")?function(g){ME.appendChild(zE("script"))[HE]=function(){ME.removeChild(this),KE(g)}}:function(g){setTimeout(XE(g),0)});var $E={set:GE,clear:jE},gO=function(){this.head=null,this.tail=null};gO.prototype={add:function(g){var t={item:g,next:null},e=this.tail;e?e.next=t:this.head=t,this.tail=t},get:function(){var g=this.head;if(g)return null===(this.head=g.next)&&(this.tail=null),g.item}};var tO,eO,AO,CO,IO,iO=gO,oO=/ipad|iphone|ipod/i.test(sg)&&"undefined"!=typeof Pebble,nO=/web0s(?!.*chrome)/i.test(sg),rO=i,sO=Jt,aO=N.f,dO=$E.set,hO=iO,lO=EE,cO=oO,uO=nO,pO=uE,fO=rO.MutationObserver||rO.WebKitMutationObserver,vO=rO.document,yO=rO.process,mO=rO.Promise,bO=aO(rO,"queueMicrotask"),wO=bO&&bO.value;if(!wO){var xO=new hO,kO=function(){var g,t;for(pO&&(g=yO.domain)&&g.exit();t=xO.get();)try{t()}catch(g){throw xO.head&&tO(),g}g&&g.enter()};lO||pO||uO||!fO||!vO?!cO&&mO&&mO.resolve?((CO=mO.resolve(void 0)).constructor=mO,IO=sO(CO.then,CO),tO=function(){IO(kO)}):pO?tO=function(){yO.nextTick(kO)}:(dO=sO(dO,rO),tO=function(){dO(kO)}):(eO=!0,AO=vO.createTextNode(""),new fO(kO).observe(AO,{characterData:!0}),tO=function(){AO.data=eO=!eO}),wO=function(g){xO.head||tO(),xO.add(g)}}var EO=wO,OO=function(g){try{return{error:!1,value:g()}}catch(g){return{error:!0,value:g}}},TO=i.Promise,DO="object"==typeof Deno&&Deno&&"object"==typeof Deno.version,NO=!DO&&!uE&&"object"==typeof window&&"object"==typeof document,RO=i,PO=TO,MO=D,BO=_t,zO=go,SO=ht,ZO=NO,FO=DO,GO=pg,jO=PO&&PO.prototype,LO=SO("species"),VO=!1,YO=MO(RO.PromiseRejectionEvent),WO=BO("Promise",(function(){var g=zO(PO),t=g!==String(PO);if(!t&&66===GO)return!0;if(!jO.catch||!jO.finally)return!0;if(!GO||GO<51||!/native code/.test(g)){var e=new PO((function(g){g(1)})),A=function(g){g((function(){}),(function(){}))};if((e.constructor={})[LO]=A,!(VO=e.then((function(){}))instanceof A))return!0}return!t&&(ZO||FO)&&!YO})),QO={CONSTRUCTOR:WO,REJECTION_EVENT:YO,SUBCLASSING:VO},UO={},_O=Pg,HO=TypeError,KO=function(g){var t,e;this.promise=new g((function(g,A){if(void 0!==t||void 0!==e)throw HO("Bad Promise constructor");t=g,e=A})),this.resolve=_O(t),this.reject=_O(e)};UO.f=function(g){return new KO(g)};var XO,JO,qO=De,$O=uE,gT=i,tT=B,eT=zI,AT=gi,CT=rx,IT=Pg,iT=D,oT=eg,nT=jw,rT=kE,sT=$E.set,aT=EO,dT=function(g,t){try{1==arguments.length?console.error(g):console.error(g,t)}catch(g){}},hT=OO,lT=iO,cT=UC,uT=TO,pT=UO,fT="Promise",vT=QO.CONSTRUCTOR,yT=QO.REJECTION_EVENT,mT=cT.getterFor(fT),bT=cT.set,wT=uT&&uT.prototype,xT=uT,kT=wT,ET=gT.TypeError,OT=gT.document,TT=gT.process,DT=pT.f,NT=DT,RT=!!(OT&&OT.createEvent&&gT.dispatchEvent),PT="unhandledrejection",MT=function(g){var t;return!(!oT(g)||!iT(t=g.then))&&t},BT=function(g,t){var e,A,C,I=t.value,i=1==t.state,o=i?g.ok:g.fail,n=g.resolve,r=g.reject,s=g.domain;try{o?(i||(2===t.rejection&>(t),t.rejection=1),!0===o?e=I:(s&&s.enter(),e=o(I),s&&(s.exit(),C=!0)),e===g.promise?r(ET("Promise-chain cycle")):(A=MT(e))?tT(A,e,n,r):n(e)):r(I)}catch(g){s&&!C&&s.exit(),r(g)}},zT=function(g,t){g.notified||(g.notified=!0,aT((function(){for(var e,A=g.reactions;e=A.get();)BT(e,g);g.notified=!1,t&&!g.rejection&&ZT(g)})))},ST=function(g,t,e){var A,C;RT?((A=OT.createEvent("Event")).promise=t,A.reason=e,A.initEvent(g,!1,!0),gT.dispatchEvent(A)):A={promise:t,reason:e},!yT&&(C=gT["on"+g])?C(A):g===PT&&dT("Unhandled promise rejection",e)},ZT=function(g){tT(sT,gT,(function(){var t,e=g.facade,A=g.value;if(FT(g)&&(t=hT((function(){$O?TT.emit("unhandledRejection",A,e):ST(PT,e,A)})),g.rejection=$O||FT(g)?2:1,t.error))throw t.value}))},FT=function(g){return 1!==g.rejection&&!g.parent},GT=function(g){tT(sT,gT,(function(){var t=g.facade;$O?TT.emit("rejectionHandled",t):ST("rejectionhandled",t,g.value)}))},jT=function(g,t,e){return function(A){g(t,A,e)}},LT=function(g,t,e){g.done||(g.done=!0,e&&(g=e),g.value=t,g.state=2,zT(g,!0))},VT=function(g,t,e){if(!g.done){g.done=!0,e&&(g=e);try{if(g.facade===t)throw ET("Promise can't be resolved itself");var A=MT(t);A?aT((function(){var e={done:!1};try{tT(A,t,jT(VT,e,g),jT(LT,e,g))}catch(t){LT(e,t,g)}})):(g.value=t,g.state=1,zT(g,!1))}catch(t){LT({done:!1},t,g)}}};vT&&(kT=(xT=function(g){nT(this,kT),IT(g),tT(XO,this);var t=mT(this);try{g(jT(VT,t),jT(LT,t))}catch(g){LT(t,g)}}).prototype,(XO=function(g){bT(this,{type:fT,done:!1,notified:!1,parent:!1,reactions:new lT,rejection:!1,state:0,value:void 0})}).prototype=eT(kT,"then",(function(g,t){var e=mT(this),A=DT(rT(this,xT));return e.parent=!0,A.ok=!iT(g)||g,A.fail=iT(t)&&t,A.domain=$O?TT.domain:void 0,0==e.state?e.reactions.add(A):aT((function(){BT(A,e)})),A.promise})),JO=function(){var g=new XO,t=mT(g);this.promise=g,this.resolve=jT(VT,t),this.reject=jT(LT,t)},pT.f=DT=function(g){return g===xT||undefined===g?new JO(g):NT(g)}),qO({global:!0,constructor:!0,wrap:!0,forced:vT},{Promise:xT}),AT(xT,fT,!1,!0),CT(fT);var YT=TO,WT=QO.CONSTRUCTOR||!Ho((function(g){YT.all(g).then(void 0,(function(){}))})),QT=B,UT=Pg,_T=UO,HT=OO,KT=Zw;De({target:"Promise",stat:!0,forced:WT},{all:function(g){var t=this,e=_T.f(t),A=e.resolve,C=e.reject,I=HT((function(){var e=UT(t.resolve),I=[],i=0,o=1;KT(g,(function(g){var n=i++,r=!1;o++,QT(e,t,g).then((function(g){r||(r=!0,I[n]=g,--o||A(I))}),C)})),--o||A(I)}));return I.error&&C(I.value),e.promise}});var XT=De,JT=QO.CONSTRUCTOR;TO&&TO.prototype,XT({target:"Promise",proto:!0,forced:JT,real:!0},{catch:function(g){return this.then(void 0,g)}});var qT=B,$T=Pg,gD=UO,tD=OO,eD=Zw;De({target:"Promise",stat:!0,forced:WT},{race:function(g){var t=this,e=gD.f(t),A=e.reject,C=tD((function(){var C=$T(t.resolve);eD(g,(function(g){qT(C,t,g).then(e.resolve,A)}))}));return C.error&&A(C.value),e.promise}});var AD=B,CD=UO;De({target:"Promise",stat:!0,forced:QO.CONSTRUCTOR},{reject:function(g){var t=CD.f(this);return AD(t.reject,void 0,g),t.promise}});var ID=Ae,iD=eg,oD=UO,nD=function(g,t){if(ID(g),iD(t)&&t.constructor===g)return t;var e=oD.f(g);return(0,e.resolve)(t),e.promise},rD=De,sD=TO,aD=QO.CONSTRUCTOR,dD=nD,hD=ng("Promise"),lD=!aD;rD({target:"Promise",stat:!0,forced:true},{resolve:function(g){return dD(lD&&this===hD?sD:this,g)}});var cD=B,uD=Pg,pD=UO,fD=OO,vD=Zw;De({target:"Promise",stat:!0,forced:WT},{allSettled:function(g){var t=this,e=pD.f(t),A=e.resolve,C=e.reject,I=fD((function(){var e=uD(t.resolve),C=[],I=0,i=1;vD(g,(function(g){var o=I++,n=!1;i++,cD(e,t,g).then((function(g){n||(n=!0,C[o]={status:"fulfilled",value:g},--i||A(C))}),(function(g){n||(n=!0,C[o]={status:"rejected",reason:g},--i||A(C))}))})),--i||A(C)}));return I.error&&C(I.value),e.promise}});var yD=B,mD=Pg,bD=ng,wD=UO,xD=OO,kD=Zw,ED="No one promise resolved";De({target:"Promise",stat:!0,forced:WT},{any:function(g){var t=this,e=bD("AggregateError"),A=wD.f(t),C=A.resolve,I=A.reject,i=xD((function(){var A=mD(t.resolve),i=[],o=0,n=1,r=!1;kD(g,(function(g){var s=o++,a=!1;n++,yD(A,t,g).then((function(g){a||r||(r=!0,C(g))}),(function(g){a||r||(a=!0,i[s]=g,--n||I(new e(i,ED)))}))})),--n||I(new e(i,ED))}));return i.error&&I(i.value),A.promise}});var OD=De,TD=TO,DD=o,ND=ng,RD=D,PD=kE,MD=nD,BD=TD&&TD.prototype;OD({target:"Promise",proto:!0,real:!0,forced:!!TD&&DD((function(){BD.finally.call({then:function(){}},(function(){}))}))},{finally:function(g){var t=PD(this,ND("Promise")),e=RD(g);return this.then(e?function(e){return MD(t,g()).then((function(){return e}))}:g,e?function(e){return MD(t,g()).then((function(){throw e}))}:g)}});var zD=Ag.Promise,SD=UO,ZD=OO;De({target:"Promise",stat:!0,forced:!0},{try:function(g){var t=SD.f(this),e=ZD(g);return(e.error?t.reject:t.resolve)(e.value),t.promise}});var FD=zD;!function(g){g.exports=FD}(Nk),function(g){g.exports=Dk}(Tk);var GD={},jD={get exports(){return GD},set exports(g){GD=g}},LD={},VD=Fc;!function(g){g.exports=VD}({get exports(){return LD},set exports(g){LD=g}}),function(g){g.exports=LD}(jD),function(g){var t=bk.default,e=ud,A=md,C=Kx,I=lk,i=wk,o=gk,n=Ok,r=GD,s=Yh;function a(){g.exports=a=function(){return d},g.exports.__esModule=!0,g.exports.default=g.exports;var d={},h=Object.prototype,l=h.hasOwnProperty,c=e||function(g,t,e){g[t]=e.value},u="function"==typeof A?A:{},p=u.iterator||"@@iterator",f=u.asyncIterator||"@@asyncIterator",v=u.toStringTag||"@@toStringTag";function y(g,t,A){return e(g,t,{value:A,enumerable:!0,configurable:!0,writable:!0}),g[t]}try{y({},"")}catch(g){y=function(g,t,e){return g[t]=e}}function m(g,t,e,A){var I=t&&t.prototype instanceof x?t:x,i=C(I.prototype),o=new S(A||[]);return c(i,"_invoke",{value:P(g,e,o)}),i}function b(g,t,e){try{return{type:"normal",arg:g.call(t,e)}}catch(g){return{type:"throw",arg:g}}}d.wrap=m;var w={};function x(){}function k(){}function E(){}var O={};y(O,p,(function(){return this}));var T=I&&I(I(Z([])));T&&T!==h&&l.call(T,p)&&(O=T);var D=E.prototype=x.prototype=C(O);function N(g){var t;i(t=["next","throw","return"]).call(t,(function(t){y(g,t,(function(g){return this._invoke(t,g)}))}))}function R(g,e){function A(C,I,i,o){var n=b(g[C],g,I);if("throw"!==n.type){var r=n.arg,s=r.value;return s&&"object"==t(s)&&l.call(s,"__await")?e.resolve(s.__await).then((function(g){A("next",g,i,o)}),(function(g){A("throw",g,i,o)})):e.resolve(s).then((function(g){r.value=g,i(r)}),(function(g){return A("throw",g,i,o)}))}o(n.arg)}var C;c(this,"_invoke",{value:function(g,t){function I(){return new e((function(e,C){A(g,t,e,C)}))}return C=C?C.then(I,I):I()}})}function P(g,t,e){var A="suspendedStart";return function(C,I){if("executing"===A)throw new Error("Generator is already running");if("completed"===A){if("throw"===C)throw I;return F()}for(e.method=C,e.arg=I;;){var i=e.delegate;if(i){var o=M(i,e);if(o){if(o===w)continue;return o}}if("next"===e.method)e.sent=e._sent=e.arg;else if("throw"===e.method){if("suspendedStart"===A)throw A="completed",e.arg;e.dispatchException(e.arg)}else"return"===e.method&&e.abrupt("return",e.arg);A="executing";var n=b(g,t,e);if("normal"===n.type){if(A=e.done?"completed":"suspendedYield",n.arg===w)continue;return{value:n.arg,done:e.done}}"throw"===n.type&&(A="completed",e.method="throw",e.arg=n.arg)}}}function M(g,t){var e=t.method,A=g.iterator[e];if(void 0===A)return t.delegate=null,"throw"===e&&g.iterator.return&&(t.method="return",t.arg=void 0,M(g,t),"throw"===t.method)||"return"!==e&&(t.method="throw",t.arg=new TypeError("The iterator does not provide a '"+e+"' method")),w;var C=b(A,g.iterator,t.arg);if("throw"===C.type)return t.method="throw",t.arg=C.arg,t.delegate=null,w;var I=C.arg;return I?I.done?(t[g.resultName]=I.value,t.next=g.nextLoc,"return"!==t.method&&(t.method="next",t.arg=void 0),t.delegate=null,w):I:(t.method="throw",t.arg=new TypeError("iterator result is not an object"),t.delegate=null,w)}function B(g){var t={tryLoc:g[0]};1 in g&&(t.catchLoc=g[1]),2 in g&&(t.finallyLoc=g[2],t.afterLoc=g[3]),this.tryEntries.push(t)}function z(g){var t=g.completion||{};t.type="normal",delete t.arg,g.completion=t}function S(g){this.tryEntries=[{tryLoc:"root"}],i(g).call(g,B,this),this.reset(!0)}function Z(g){if(g){var t=g[p];if(t)return t.call(g);if("function"==typeof g.next)return g;if(!isNaN(g.length)){var e=-1,A=function t(){for(;++e=0;--A){var C=this.tryEntries[A],I=C.completion;if("root"===C.tryLoc)return e("end");if(C.tryLoc<=this.prev){var i=l.call(C,"catchLoc"),o=l.call(C,"finallyLoc");if(i&&o){if(this.prev=0;--e){var A=this.tryEntries[e];if(A.tryLoc<=this.prev&&l.call(A,"finallyLoc")&&this.prev=0;--t){var e=this.tryEntries[t];if(e.finallyLoc===g)return this.complete(e.completion,e.afterLoc),z(e),w}},catch:function(g){for(var t=this.tryEntries.length-1;t>=0;--t){var e=this.tryEntries[t];if(e.tryLoc===g){var A=e.completion;if("throw"===A.type){var C=A.arg;z(e)}return C}}throw new Error("illegal catch attempt")},delegateYield:function(g,t,e){return this.delegate={iterator:Z(g),resultName:t,nextLoc:e},"next"===this.method&&(this.arg=void 0),w}},d}g.exports=a,g.exports.__esModule=!0,g.exports.default=g.exports}(mk);var YD=yk(),WD=YD;try{regeneratorRuntime=YD}catch(g){"object"==typeof globalThis?globalThis.regeneratorRuntime=YD:Function("r","regeneratorRuntime = r")(YD)}var QD={},UD={get exports(){return QD},set exports(g){QD=g}},_D=Pg,HD=Xg,KD=U,XD=Le,JD=TypeError,qD=function(g){return function(t,e,A,C){_D(e);var I=HD(t),i=KD(I),o=XD(I),n=g?o-1:0,r=g?-1:1;if(A<2)for(;;){if(n in i){C=i[n],n+=r;break}if(n+=r,g?n<0:o<=n)throw JD("Reduce of empty array with no initial value")}for(;g?n>=0:o>n;n+=r)n in i&&(C=e(C,i[n],n,I));return C}},$D={left:qD(!1),right:qD(!0)}.left;De({target:"Array",proto:!0,forced:!uE&&pg>79&&pg<83||!cc("reduce")},{reduce:function(g){var t=arguments.length;return $D(this,g,t,t>1?arguments[1]:void 0)}});var gN=FA("Array").reduce,tN=rg,eN=gN,AN=Array.prototype,CN=function(g){var t=g.reduce;return g===AN||tN(AN,g)&&t===AN.reduce?eN:t},IN=CN;!function(g){g.exports=IN}(UD);var iN=e(QD),oN={},nN={get exports(){return oN},set exports(g){oN=g}},rN=tr,sN=Le,aN=Ed,dN=Jt,hN=function(g,t,e,A,C,I,i,o){for(var n,r,s=C,a=0,d=!!i&&dN(i,o);a0&&rN(n)?(r=sN(n),s=hN(g,t,n,r,s,I-1)-1):(aN(s+1),g[s]=n),s++),a++;return s},lN=hN,cN=Pg,uN=Xg,pN=Le,fN=nr;De({target:"Array",proto:!0},{flatMap:function(g){var t,e=uN(this),A=pN(e);return cN(g),(t=fN(e,0)).length=lN(t,e,e,A,0,1,g,arguments.length>1?arguments[1]:void 0),t}});var vN=FA("Array").flatMap,yN=rg,mN=vN,bN=Array.prototype,wN=function(g){var t=g.flatMap;return g===bN||yN(bN,g)&&t===bN.flatMap?mN:t},xN=wN;!function(g){g.exports=xN}(nN);var kN=e(oN),EN={},ON={get exports(){return EN},set exports(g){EN=g}};ex("Set",(function(g){return function(){return g(this,arguments.length?arguments[0]:void 0)}}),xx);var TN=Ag.Set;!function(g){g.exports=TN}(ON);var DN=e(EN),NN={};!function(g){g.exports=bh}({get exports(){return NN},set exports(g){NN=g}});var RN=e(NN),PN={},MN={get exports(){return PN},set exports(g){PN=g}},BN={},zN=Po;!function(g){g.exports=zN}({get exports(){return BN},set exports(g){BN=g}}),function(g){g.exports=BN}(MN);var SN=e(PN),ZN={},FN={get exports(){return ZN},set exports(g){ZN=g}},GN=Bn,jN=Math.floor,LN=function(g,t){var e=g.length,A=jN(e/2);return e<8?VN(g,t):YN(g,LN(GN(g,0,A),t),LN(GN(g,A),t),t)},VN=function(g,t){for(var e,A,C=g.length,I=1;I0;)g[A]=g[--A];A!==I++&&(g[A]=e)}return g},YN=function(g,t,e,A){for(var C=t.length,I=e.length,i=0,o=0;i3)){if(oR)return!0;if(rR)return rR<603;var g,t,e,A,C="";for(g=65;g<76;g++){switch(t=String.fromCharCode(g),g){case 66:case 69:case 70:case 72:e=3;break;case 68:case 71:e=4;break;default:e=2}for(A=0;A<47;A++)sR.push({k:t+A,v:e})}for(sR.sort((function(g,t){return t.v-g.v})),A=0;AeR(e)?1:-1}}(g)),e=gR(C),A=0;A=g.length?{done:!0}:{done:!1,value:g[A++]}},e:function(g){throw g},f:C}}throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}var I,i=!0,o=!1;return{s:function(){e=e.call(g)},n:function(){var g=e.next();return i=g.done,g},e:function(g){o=!0,I=g},f:function(){try{i||null==e.return||e.return()}finally{if(o)throw I}}}}function PP(g,t){(null==t||t>g.length)&&(t=g.length);for(var e=0,A=new Array(t);ethis.max&&this.flush(),null!=this._timeout&&(clearTimeout(this._timeout),this._timeout=null),this.queue.length>0&&"number"==typeof this.delay&&(this._timeout=_f((function(){g.flush()}),this.delay))}},{key:"flush",value:function(){var g,t;Ec(g=cu(t=this._queue).call(t,0)).call(g,(function(g){g.fn.apply(g.context||g.fn,g.args||[])}))}}],[{key:"extend",value:function(t,e){var A=new g(e);if(void 0!==t.flush)throw new Error("Target object already has a property flush");t.flush=function(){A.flush()};var C=[{name:"flush",original:void 0}];if(e&&e.replace)for(var I=0;IC&&(C=n,A=o)}return A}},{key:"min",value:function(g){var t=SN(this._pairs),e=t.next();if(e.done)return null;for(var A=e.value[1],C=g(e.value[1],e.value[0]);!(e=t.next()).done;){var I=bl(e.value,2),i=I[0],o=I[1],n=g(o,i);n1?e-1:0),C=1;CC?1:AC)&&(A=i,C=o)}}catch(g){I.e(g)}finally{I.f()}return A||null}},{key:"min",value:function(g){var t,e,A=null,C=null,I=RP(QR(t=this._data).call(t));try{for(I.s();!(e=I.n()).done;){var i=e.value,o=i[g];"number"==typeof o&&(null==C||ot.x&&g.topt.y}function cM(g){return"string"==typeof g&&""!==g}function uM(g,t,e,A){var C=A.x,I=A.y;if("function"==typeof A.distanceToBorder){var i=A.distanceToBorder(g,t),o=Math.sin(t)*i,n=Math.cos(t)*i;n===i?(C+=i,I=A.y):o===i?(C=A.x,I-=i):(C+=n,I-=o)}else A.shape.width>A.shape.height?(C=A.x+.5*A.shape.width,I=A.y-e):(C=A.x+e,I=A.y-.5*A.shape.height);return{x:C,y:I}}var pM=function(){function g(t){cd(this,g),this.measureText=t,this.current=0,this.width=0,this.height=0,this.lines=[]}return Bh(g,[{key:"_add",value:function(g,t){var e=arguments.length>2&&void 0!==arguments[2]?arguments[2]:"normal";void 0===this.lines[g]&&(this.lines[g]={width:0,height:0,blocks:[]});var A=t;void 0!==t&&""!==t||(A=" ");var C=this.measureText(A,e),I=yA({},QR(C));I.text=t,I.width=C.width,I.mod=e,void 0!==t&&""!==t||(I.width=0),this.lines[g].blocks.push(I),this.lines[g].width+=I.width}},{key:"curWidth",value:function(){var g=this.lines[this.current];return void 0===g?0:g.width}},{key:"append",value:function(g){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"normal";this._add(this.current,g,t)}},{key:"newLine",value:function(g){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"normal";this._add(this.current,g,t),this.current++}},{key:"determineLineHeights",value:function(){for(var g=0;gg&&(g=A.width),t+=A.height}this.width=g,this.height=t}},{key:"removeEmptyBlocks",value:function(){for(var g=[],t=0;t"://,""://,""://,"":/<\/b>/,"":/<\/i>/,"":/<\/code>/,"*":/\*/,_:/_/,"`":/`/,afterBold:/[^*]/,afterItal:/[^_]/,afterMono:/[^`]/},vM=function(){function g(t){cd(this,g),this.text=t,this.bold=!1,this.ital=!1,this.mono=!1,this.spacing=!1,this.position=0,this.buffer="",this.modStack=[],this.blocks=[]}return Bh(g,[{key:"mod",value:function(){return 0===this.modStack.length?"normal":this.modStack[0]}},{key:"modName",value:function(){return 0===this.modStack.length?"normal":"mono"===this.modStack[0]?"mono":this.bold&&this.ital?"boldital":this.bold?"bold":this.ital?"ital":void 0}},{key:"emitBlock",value:function(){this.spacing&&(this.add(" "),this.spacing=!1),this.buffer.length>0&&(this.blocks.push({text:this.buffer,mod:this.modName()}),this.buffer="")}},{key:"add",value:function(g){" "===g&&(this.spacing=!0),this.spacing&&(this.buffer+=" ",this.spacing=!1)," "!=g&&(this.buffer+=g)}},{key:"parseWS",value:function(g){return!!/[ \t]/.test(g)&&(this.mono?this.add(g):this.spacing=!0,!0)}},{key:"setTag",value:function(g){this.emitBlock(),this[g]=!0,this.modStack.unshift(g)}},{key:"unsetTag",value:function(g){this.emitBlock(),this[g]=!1,this.modStack.shift()}},{key:"parseStartTag",value:function(g,t){return!(this.mono||this[g]||!this.match(t))&&(this.setTag(g),!0)}},{key:"match",value:function(g){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1],e=bl(this.prepareRegExp(g),2),A=e[0],C=e[1],I=A.test(this.text.substr(this.position,C));return I&&t&&(this.position+=C-1),I}},{key:"parseEndTag",value:function(g,t,e){var A=this.mod()===g;return!(!(A="mono"===g?A&&this.mono:A&&!this.mono)||!this.match(t))&&(void 0!==e?(this.position===this.text.length-1||this.match(e,!1))&&this.unsetTag(g):this.unsetTag(g),!0)}},{key:"replace",value:function(g,t){return!!this.match(g)&&(this.add(t),this.position+=length-1,!0)}},{key:"prepareRegExp",value:function(g){var t,e;if(g instanceof RegExp)e=g,t=1;else{var A=fM[g];e=void 0!==A?A:new RegExp(g),t=g.length}return[e,t]}}]),g}(),yM=function(){function g(t,e,A,C){var I=this;cd(this,g),this.ctx=t,this.parent=e,this.selected=A,this.hover=C;this.lines=new pM((function(g,e){if(void 0===g)return 0;var i=I.parent.getFormattingValues(t,A,C,e),o=0;""!==g&&(o=I.ctx.measureText(g).width);return{width:o,values:i}}))}return Bh(g,[{key:"process",value:function(g){if(!cM(g))return this.lines.finalize();var t=this.parent.fontOptions;g=(g=g.replace(/\r\n/g,"\n")).replace(/\r/g,"\n");var e=String(g).split("\n"),A=e.length;if(t.multi)for(var C=0;C0)for(var i=0;i0)for(var d=0;d")||t.parseStartTag("ital","")||t.parseStartTag("mono","")||t.parseEndTag("bold","")||t.parseEndTag("ital","")||t.parseEndTag("mono",""))||e(A)||t.add(A),t.position++}return t.emitBlock(),t.blocks}},{key:"splitMarkdownBlocks",value:function(g){for(var t=this,e=new vM(g),A=!0,C=function(g){return!!/\\/.test(g)&&(e.positionthis.parent.fontOptions.maxWdt}},{key:"getLongestFit",value:function(g){for(var t="",e=0;e1&&void 0!==arguments[1]?arguments[1]:"normal",e=arguments.length>2&&void 0!==arguments[2]&&arguments[2];this.parent.getFormattingValues(this.ctx,this.selected,this.hover,t);for(var A=(g=(g=g.replace(/^( +)/g,"$1\r")).replace(/([^\r][^ ]*)( +)/g,"$1\r$2\r")).split("\r");A.length>0;){var C=this.getLongestFit(A);if(0===C){var I=A[0],i=this.getLongestFitWord(I);this.lines.newLine(Sl(I).call(I,0,i),t),A[0]=Sl(I).call(I,i)}else{var o=C;" "===A[C-1]?C--:" "===A[o]&&o++;var n=Sl(A).call(A,0,C).join("");C==A.length&&e?this.lines.append(n,t):this.lines.newLine(n,t),A=Sl(A).call(A,o)}}}}]),g}(),mM=["bold","ital","boldital","mono"],bM=function(){function g(t,e){var A=arguments.length>2&&void 0!==arguments[2]&&arguments[2];cd(this,g),this.body=t,this.pointToSelf=!1,this.baseSize=void 0,this.fontOptions={},this.setOptions(e),this.size={top:0,left:0,width:0,height:0,yLine:0},this.isEdgeLabel=A}return Bh(g,[{key:"setOptions",value:function(g){if(this.elementOptions=g,this.initFontOptions(g.font),cM(g.label)?this.labelDirty=!0:g.label=void 0,void 0!==g.font&&null!==g.font)if("string"==typeof g.font)this.baseSize=this.fontOptions.size;else if("object"===kh(g.font)){var t=g.font.size;void 0!==t&&(this.baseSize=t)}}},{key:"initFontOptions",value:function(t){var e=this;Lm(mM,(function(g){e.fontOptions[g]={}})),g.parseFontString(this.fontOptions,t)?this.fontOptions.vadjust=0:Lm(t,(function(g,t){null!=g&&"object"!==kh(g)&&(e.fontOptions[t]=g)}))}},{key:"constrain",value:function(g){var t={constrainWidth:!1,maxWdt:-1,minWdt:-1,constrainHeight:!1,minHgt:-1,valign:"middle"},e=gb(g,"widthConstraint");if("number"==typeof e)t.maxWdt=Number(e),t.minWdt=Number(e);else if("object"===kh(e)){var A=gb(g,["widthConstraint","maximum"]);"number"==typeof A&&(t.maxWdt=Number(A));var C=gb(g,["widthConstraint","minimum"]);"number"==typeof C&&(t.minWdt=Number(C))}var I=gb(g,"heightConstraint");if("number"==typeof I)t.minHgt=Number(I);else if("object"===kh(I)){var i=gb(g,["heightConstraint","minimum"]);"number"==typeof i&&(t.minHgt=Number(i));var o=gb(g,["heightConstraint","valign"]);"string"==typeof o&&("top"!==o&&"bottom"!==o||(t.valign=o))}return t}},{key:"update",value:function(g,t){this.setOptions(g,!0),this.propagateFonts(t),Fm(this.fontOptions,this.constrain(t)),this.fontOptions.chooser=hM("label",t)}},{key:"adjustSizes",value:function(g){var t=g?g.right+g.left:0;this.fontOptions.constrainWidth&&(this.fontOptions.maxWdt-=t,this.fontOptions.minWdt-=t);var e=g?g.top+g.bottom:0;this.fontOptions.constrainHeight&&(this.fontOptions.minHgt-=e)}},{key:"addFontOptionsToPile",value:function(g,t){for(var e=0;e5&&void 0!==arguments[5]?arguments[5]:"middle";if(void 0!==this.elementOptions.label){var i=this.fontOptions.size*this.body.view.scale;this.elementOptions.label&&i=this.elementOptions.scaling.label.maxVisible&&(i=Number(this.elementOptions.scaling.label.maxVisible)/this.body.view.scale),this.calculateLabelSize(g,A,C,t,e,I),this._drawBackground(g),this._drawText(g,t,this.size.yLine,I,i))}}},{key:"_drawBackground",value:function(g){if(void 0!==this.fontOptions.background&&"none"!==this.fontOptions.background){g.fillStyle=this.fontOptions.background;var t=this.getSize();g.fillRect(t.left,t.top,t.width,t.height)}}},{key:"_drawText",value:function(g,t,e){var A=arguments.length>3&&void 0!==arguments[3]?arguments[3]:"middle",C=arguments.length>4?arguments[4]:void 0,I=bl(this._setAlignment(g,t,e,A),2);t=I[0],e=I[1],g.textAlign="left",t-=this.size.width/2,this.fontOptions.valign&&this.size.height>this.size.labelHeight&&("top"===this.fontOptions.valign&&(e-=(this.size.height-this.size.labelHeight)/2),"bottom"===this.fontOptions.valign&&(e+=(this.size.height-this.size.labelHeight)/2));for(var i=0;i0&&(g.lineWidth=s.strokeWidth,g.strokeStyle=h,g.lineJoin="round"),g.fillStyle=d,s.strokeWidth>0&&g.strokeText(s.text,t+n,e+s.vadjust),g.fillText(s.text,t+n,e+s.vadjust),n+=s.width}e+=o.height}}}},{key:"_setAlignment",value:function(g,t,e,A){if(this.isEdgeLabel&&"horizontal"!==this.fontOptions.align&&!1===this.pointToSelf){t=0,e=0;"top"===this.fontOptions.align?(g.textBaseline="alphabetic",e-=4):"bottom"===this.fontOptions.align?(g.textBaseline="hanging",e+=4):g.textBaseline="middle"}else g.textBaseline=A;return[t,e]}},{key:"_getColor",value:function(g,t,e){var A=g||"#000000",C=e||"#ffffff";if(t<=this.elementOptions.scaling.label.drawThreshold){var I=Math.max(0,Math.min(1,1-(this.elementOptions.scaling.label.drawThreshold-t)));A=Ym(A,I),C=Ym(C,I)}return[A,C]}},{key:"getTextSize",value:function(g){var t=arguments.length>1&&void 0!==arguments[1]&&arguments[1],e=arguments.length>2&&void 0!==arguments[2]&&arguments[2];return this._processLabel(g,t,e),{width:this.size.width,height:this.size.height,lineCount:this.lineCount}}},{key:"getSize",value:function(){var g=this.size.left,t=this.size.top-1;if(this.isEdgeLabel){var e=.5*-this.size.width;switch(this.fontOptions.align){case"middle":g=e,t=.5*-this.size.height;break;case"top":g=e,t=-(this.size.height+2);break;case"bottom":g=e,t=2}}return{left:g,top:t,width:this.size.width,height:this.size.height}}},{key:"calculateLabelSize",value:function(g,t,e){var A=arguments.length>3&&void 0!==arguments[3]?arguments[3]:0,C=arguments.length>4&&void 0!==arguments[4]?arguments[4]:0,I=arguments.length>5&&void 0!==arguments[5]?arguments[5]:"middle";this._processLabel(g,t,e),this.size.left=A-.5*this.size.width,this.size.top=C-.5*this.size.height,this.size.yLine=C+.5*(1-this.lineCount)*this.fontOptions.size,"hanging"===I&&(this.size.top+=.5*this.fontOptions.size,this.size.top+=4,this.size.yLine+=4)}},{key:"getFormattingValues",value:function(g,t,e,A){var C=function(g,t,e){return"normal"===t?"mod"===e?"":g[e]:void 0!==g[t][e]?g[t][e]:g[e]},I={color:C(this.fontOptions,A,"color"),size:C(this.fontOptions,A,"size"),face:C(this.fontOptions,A,"face"),mod:C(this.fontOptions,A,"mod"),vadjust:C(this.fontOptions,A,"vadjust"),strokeWidth:this.fontOptions.strokeWidth,strokeColor:this.fontOptions.strokeColor};(t||e)&&("normal"===A&&!0===this.fontOptions.chooser&&this.elementOptions.labelHighlightBold?I.mod="bold":"function"==typeof this.fontOptions.chooser&&this.fontOptions.chooser(I,this.elementOptions.id,t,e));var i="";return void 0!==I.mod&&""!==I.mod&&(i+=I.mod+" "),i+=I.size+"px "+I.face,g.font=i.replace(/"/g,""),I.font=g.font,I.height=I.size,I}},{key:"differentState",value:function(g,t){return g!==this.selectedState||t!==this.hoverState}},{key:"_processLabelText",value:function(g,t,e,A){return new yM(g,this,t,e).process(A)}},{key:"_processLabel",value:function(g,t,e){if(!1!==this.labelDirty||this.differentState(t,e)){var A=this._processLabelText(g,t,e,this.elementOptions.label);this.fontOptions.minWdt>0&&A.width0&&A.height0&&(this.enableBorderDashes(g,t),g.stroke(),this.disableBorderDashes(g,t)),g.restore()}},{key:"performFill",value:function(g,t){g.save(),g.fillStyle=t.color,this.enableShadow(g,t),ov(g).call(g),this.disableShadow(g,t),g.restore(),this.performStroke(g,t)}},{key:"_addBoundingBoxMargin",value:function(g){this.boundingBox.left-=g,this.boundingBox.top-=g,this.boundingBox.bottom+=g,this.boundingBox.right+=g}},{key:"_updateBoundingBox",value:function(g,t,e,A,C){void 0!==e&&this.resize(e,A,C),this.left=g-this.width/2,this.top=t-this.height/2,this.boundingBox.left=this.left,this.boundingBox.top=this.top,this.boundingBox.bottom=this.top+this.height,this.boundingBox.right=this.left+this.width}},{key:"updateBoundingBox",value:function(g,t,e,A,C){this._updateBoundingBox(g,t,e,A,C)}},{key:"getDimensionsFromLabel",value:function(g,t,e){this.textSize=this.labelModule.getTextSize(g,t,e);var A=this.textSize.width,C=this.textSize.height;return 0===A&&(A=14,C=14),{width:A,height:C}}}]),g}();function xM(g){var t=function(){if("undefined"==typeof Reflect||!yP)return!1;if(yP.sham)return!1;if("function"==typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(yP(Boolean,[],(function(){}))),!0}catch(g){return!1}}();return function(){var e,A=vk(g);if(t){var C=vk(this).constructor;e=yP(A,arguments,C)}else e=A.apply(this,arguments);return hk(this,e)}}var kM=function(g){dk(e,g);var t=xM(e);function e(g,A,C){var I;return cd(this,e),(I=t.call(this,g,A,C))._setMargins(C),I}return Bh(e,[{key:"resize",value:function(g){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:this.selected,e=arguments.length>2&&void 0!==arguments[2]?arguments[2]:this.hover;if(this.needsRefresh(t,e)){var A=this.getDimensionsFromLabel(g,t,e);this.width=A.width+this.margin.right+this.margin.left,this.height=A.height+this.margin.top+this.margin.bottom,this.radius=this.width/2}}},{key:"draw",value:function(g,t,e,A,C,I){this.resize(g,A,C),this.left=t-this.width/2,this.top=e-this.height/2,this.initContextForDraw(g,I),_A(g,this.left,this.top,this.width,this.height,I.borderRadius),this.performFill(g,I),this.updateBoundingBox(t,e,g,A,C),this.labelModule.draw(g,this.left+this.textSize.width/2+this.margin.left,this.top+this.textSize.height/2+this.margin.top,A,C)}},{key:"updateBoundingBox",value:function(g,t,e,A,C){this._updateBoundingBox(g,t,e,A,C);var I=this.options.shapeProperties.borderRadius;this._addBoundingBoxMargin(I)}},{key:"distanceToBorder",value:function(g,t){g&&this.resize(g);var e=this.options.borderWidth;return Math.min(Math.abs(this.width/2/Math.cos(t)),Math.abs(this.height/2/Math.sin(t)))+e}}]),e}(wM);function EM(g){var t=function(){if("undefined"==typeof Reflect||!yP)return!1;if(yP.sham)return!1;if("function"==typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(yP(Boolean,[],(function(){}))),!0}catch(g){return!1}}();return function(){var e,A=vk(g);if(t){var C=vk(this).constructor;e=yP(A,arguments,C)}else e=A.apply(this,arguments);return hk(this,e)}}var OM=function(g){dk(e,g);var t=EM(e);function e(g,A,C){var I;return cd(this,e),(I=t.call(this,g,A,C)).labelOffset=0,I.selected=!1,I}return Bh(e,[{key:"setOptions",value:function(g,t,e){this.options=g,void 0===t&&void 0===e||this.setImages(t,e)}},{key:"setImages",value:function(g,t){t&&this.selected?(this.imageObj=t,this.imageObjAlt=g):(this.imageObj=g,this.imageObjAlt=t)}},{key:"switchImages",value:function(g){var t=g&&!this.selected||!g&&this.selected;if(this.selected=g,void 0!==this.imageObjAlt&&t){var e=this.imageObj;this.imageObj=this.imageObjAlt,this.imageObjAlt=e}}},{key:"_getImagePadding",value:function(){var g={top:0,right:0,bottom:0,left:0};if(this.options.imagePadding){var t=this.options.imagePadding;"object"==kh(t)?(g.top=t.top,g.right=t.right,g.bottom=t.bottom,g.left=t.left):(g.top=t,g.right=t,g.bottom=t,g.left=t)}return g}},{key:"_resizeImage",value:function(){var g,t;if(!1===this.options.shapeProperties.useImageSize){var e=1,A=1;this.imageObj.width&&this.imageObj.height&&(this.imageObj.width>this.imageObj.height?e=this.imageObj.width/this.imageObj.height:A=this.imageObj.height/this.imageObj.width),g=2*this.options.size*e,t=2*this.options.size*A}else{var C=this._getImagePadding();g=this.imageObj.width+C.left+C.right,t=this.imageObj.height+C.top+C.bottom}this.width=g,this.height=t,this.radius=.5*this.width}},{key:"_drawRawCircle",value:function(g,t,e,A){this.initContextForDraw(g,A),UA(g,t,e,A.size),this.performFill(g,A)}},{key:"_drawImageAtPosition",value:function(g,t){if(0!=this.imageObj.width){g.globalAlpha=void 0!==t.opacity?t.opacity:1,this.enableShadow(g,t);var e=1;!0===this.options.shapeProperties.interpolation&&(e=this.imageObj.width/this.width/this.body.view.scale);var A=this._getImagePadding(),C=this.left+A.left,I=this.top+A.top,i=this.width-A.left-A.right,o=this.height-A.top-A.bottom;this.imageObj.drawImageAtPosition(g,e,C,I,i,o),this.disableShadow(g,t)}}},{key:"_drawImageLabel",value:function(g,t,e,A,C){var I=0;if(void 0!==this.height){I=.5*this.height;var i=this.labelModule.getTextSize(g,A,C);i.lineCount>=1&&(I+=i.height/2)}var o=e+I;this.options.label&&(this.labelOffset=I),this.labelModule.draw(g,t,o,A,C,"hanging")}}]),e}(wM);function TM(g){var t=function(){if("undefined"==typeof Reflect||!yP)return!1;if(yP.sham)return!1;if("function"==typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(yP(Boolean,[],(function(){}))),!0}catch(g){return!1}}();return function(){var e,A=vk(g);if(t){var C=vk(this).constructor;e=yP(A,arguments,C)}else e=A.apply(this,arguments);return hk(this,e)}}var DM=function(g){dk(e,g);var t=TM(e);function e(g,A,C){var I;return cd(this,e),(I=t.call(this,g,A,C))._setMargins(C),I}return Bh(e,[{key:"resize",value:function(g){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:this.selected,e=arguments.length>2&&void 0!==arguments[2]?arguments[2]:this.hover;if(this.needsRefresh(t,e)){var A=this.getDimensionsFromLabel(g,t,e),C=Math.max(A.width+this.margin.right+this.margin.left,A.height+this.margin.top+this.margin.bottom);this.options.size=C/2,this.width=C,this.height=C,this.radius=this.width/2}}},{key:"draw",value:function(g,t,e,A,C,I){this.resize(g,A,C),this.left=t-this.width/2,this.top=e-this.height/2,this._drawRawCircle(g,t,e,I),this.updateBoundingBox(t,e),this.labelModule.draw(g,this.left+this.textSize.width/2+this.margin.left,e,A,C)}},{key:"updateBoundingBox",value:function(g,t){this.boundingBox.top=t-this.options.size,this.boundingBox.left=g-this.options.size,this.boundingBox.right=g+this.options.size,this.boundingBox.bottom=t+this.options.size}},{key:"distanceToBorder",value:function(g){return g&&this.resize(g),.5*this.width}}]),e}(OM);function NM(g){var t=function(){if("undefined"==typeof Reflect||!yP)return!1;if(yP.sham)return!1;if("function"==typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(yP(Boolean,[],(function(){}))),!0}catch(g){return!1}}();return function(){var e,A=vk(g);if(t){var C=vk(this).constructor;e=yP(A,arguments,C)}else e=A.apply(this,arguments);return hk(this,e)}}var RM=function(g){dk(e,g);var t=NM(e);function e(g,A,C,I,i){var o;return cd(this,e),(o=t.call(this,g,A,C)).setImages(I,i),o}return Bh(e,[{key:"resize",value:function(g){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:this.selected,e=arguments.length>2&&void 0!==arguments[2]?arguments[2]:this.hover;if(void 0===this.imageObj.src||void 0===this.imageObj.width||void 0===this.imageObj.height){var A=2*this.options.size;return this.width=A,this.height=A,void(this.radius=.5*this.width)}this.needsRefresh(t,e)&&this._resizeImage()}},{key:"draw",value:function(g,t,e,A,C,I){this.switchImages(A),this.resize();var i=t,o=e;"top-left"===this.options.shapeProperties.coordinateOrigin?(this.left=t,this.top=e,i+=this.width/2,o+=this.height/2):(this.left=t-this.width/2,this.top=e-this.height/2),this._drawRawCircle(g,i,o,I),g.save(),g.clip(),this._drawImageAtPosition(g,I),g.restore(),this._drawImageLabel(g,i,o,A,C),this.updateBoundingBox(t,e)}},{key:"updateBoundingBox",value:function(g,t){"top-left"===this.options.shapeProperties.coordinateOrigin?(this.boundingBox.top=t,this.boundingBox.left=g,this.boundingBox.right=g+2*this.options.size,this.boundingBox.bottom=t+2*this.options.size):(this.boundingBox.top=t-this.options.size,this.boundingBox.left=g-this.options.size,this.boundingBox.right=g+this.options.size,this.boundingBox.bottom=t+this.options.size),this.boundingBox.left=Math.min(this.boundingBox.left,this.labelModule.size.left),this.boundingBox.right=Math.max(this.boundingBox.right,this.labelModule.size.left+this.labelModule.size.width),this.boundingBox.bottom=Math.max(this.boundingBox.bottom,this.boundingBox.bottom+this.labelOffset)}},{key:"distanceToBorder",value:function(g){return g&&this.resize(g),.5*this.width}}]),e}(OM);function PM(g){var t=function(){if("undefined"==typeof Reflect||!yP)return!1;if(yP.sham)return!1;if("function"==typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(yP(Boolean,[],(function(){}))),!0}catch(g){return!1}}();return function(){var e,A=vk(g);if(t){var C=vk(this).constructor;e=yP(A,arguments,C)}else e=A.apply(this,arguments);return hk(this,e)}}var MM=function(g){dk(e,g);var t=PM(e);function e(g,A,C){return cd(this,e),t.call(this,g,A,C)}return Bh(e,[{key:"resize",value:function(g){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:this.selected,e=arguments.length>2&&void 0!==arguments[2]?arguments[2]:this.hover,A=arguments.length>3&&void 0!==arguments[3]?arguments[3]:{size:this.options.size};if(this.needsRefresh(t,e)){var C,I;this.labelModule.getTextSize(g,t,e);var i=2*A.size;this.width=null!==(C=this.customSizeWidth)&&void 0!==C?C:i,this.height=null!==(I=this.customSizeHeight)&&void 0!==I?I:i,this.radius=.5*this.width}}},{key:"_drawShape",value:function(g,t,e,A,C,I,i,o){var n,r=this;return this.resize(g,I,i,o),this.left=A-this.width/2,this.top=C-this.height/2,this.initContextForDraw(g,o),(n=t,Object.prototype.hasOwnProperty.call(JA,n)?JA[n]:function(g){for(var t=arguments.length,e=new Array(t>1?t-1:0),A=1;A0&&(this.boundingBox.left=Math.min(this.boundingBox.left,this.labelModule.size.left),this.boundingBox.right=Math.max(this.boundingBox.right,this.labelModule.size.left+this.labelModule.size.width),this.boundingBox.bottom=Math.max(this.boundingBox.bottom,this.boundingBox.bottom+this.labelModule.size.height))}}]),e}(wM);function BM(g,t){var e=Cc(g);if(va){var A=va(g);t&&(A=Ap(A).call(A,(function(t){return Pa(g,t).enumerable}))),e.push.apply(e,A)}return e}function zM(g){for(var t=1;t1&&void 0!==arguments[1]?arguments[1]:this.selected,e=arguments.length>2&&void 0!==arguments[2]?arguments[2]:this.hover;if(this.needsRefresh(t,e)){var A=this.getDimensionsFromLabel(g,t,e);this.height=2*A.height,this.width=A.width+A.height,this.radius=.5*this.width}}},{key:"draw",value:function(g,t,e,A,C,I){this.resize(g,A,C),this.left=t-.5*this.width,this.top=e-.5*this.height,this.initContextForDraw(g,I),HA(g,this.left,this.top,this.width,this.height),this.performFill(g,I),this.updateBoundingBox(t,e,g,A,C),this.labelModule.draw(g,t,e,A,C)}},{key:"distanceToBorder",value:function(g,t){g&&this.resize(g);var e=.5*this.width,A=.5*this.height,C=Math.sin(t)*e,I=Math.cos(t)*A;return e*A/Math.sqrt(C*C+I*I)}}]),e}(wM);function UM(g){var t=function(){if("undefined"==typeof Reflect||!yP)return!1;if(yP.sham)return!1;if("function"==typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(yP(Boolean,[],(function(){}))),!0}catch(g){return!1}}();return function(){var e,A=vk(g);if(t){var C=vk(this).constructor;e=yP(A,arguments,C)}else e=A.apply(this,arguments);return hk(this,e)}}var _M=function(g){dk(e,g);var t=UM(e);function e(g,A,C){var I;return cd(this,e),(I=t.call(this,g,A,C))._setMargins(C),I}return Bh(e,[{key:"resize",value:function(g,t,e){this.needsRefresh(t,e)&&(this.iconSize={width:Number(this.options.icon.size),height:Number(this.options.icon.size)},this.width=this.iconSize.width+this.margin.right+this.margin.left,this.height=this.iconSize.height+this.margin.top+this.margin.bottom,this.radius=.5*this.width)}},{key:"draw",value:function(g,t,e,A,C,I){var i=this;return this.resize(g,A,C),this.options.icon.size=this.options.icon.size||50,this.left=t-this.width/2,this.top=e-this.height/2,this._icon(g,t,e,A,C,I),{drawExternalLabel:function(){if(void 0!==i.options.label){i.labelModule.draw(g,i.left+i.iconSize.width/2+i.margin.left,e+i.height/2+5,A)}i.updateBoundingBox(t,e)}}}},{key:"updateBoundingBox",value:function(g,t){if(this.boundingBox.top=t-.5*this.options.icon.size,this.boundingBox.left=g-.5*this.options.icon.size,this.boundingBox.right=g+.5*this.options.icon.size,this.boundingBox.bottom=t+.5*this.options.icon.size,void 0!==this.options.label&&this.labelModule.size.width>0){this.boundingBox.left=Math.min(this.boundingBox.left,this.labelModule.size.left),this.boundingBox.right=Math.max(this.boundingBox.right,this.labelModule.size.left+this.labelModule.size.width),this.boundingBox.bottom=Math.max(this.boundingBox.bottom,this.boundingBox.bottom+this.labelModule.size.height+5)}}},{key:"_icon",value:function(g,t,e,A,C,I){var i=Number(this.options.icon.size);void 0!==this.options.icon.code?(g.font=[null!=this.options.icon.weight?this.options.icon.weight:A?"bold":"",(null!=this.options.icon.weight&&A?5:0)+i+"px",this.options.icon.face].join(" "),g.fillStyle=this.options.icon.color||"black",g.textAlign="center",g.textBaseline="middle",this.enableShadow(g,I),g.fillText(this.options.icon.code,t,e),this.disableShadow(g,I)):console.error("When using the icon shape, you need to define the code in the icon options object. This can be done per node or globally.")}},{key:"distanceToBorder",value:function(g,t){return this._distanceToBorder(g,t)}}]),e}(wM);function HM(g){var t=function(){if("undefined"==typeof Reflect||!yP)return!1;if(yP.sham)return!1;if("function"==typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(yP(Boolean,[],(function(){}))),!0}catch(g){return!1}}();return function(){var e,A=vk(g);if(t){var C=vk(this).constructor;e=yP(A,arguments,C)}else e=A.apply(this,arguments);return hk(this,e)}}var KM=function(g){dk(e,g);var t=HM(e);function e(g,A,C,I,i){var o;return cd(this,e),(o=t.call(this,g,A,C)).setImages(I,i),o}return Bh(e,[{key:"resize",value:function(g){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:this.selected,e=arguments.length>2&&void 0!==arguments[2]?arguments[2]:this.hover;if(void 0===this.imageObj.src||void 0===this.imageObj.width||void 0===this.imageObj.height){var A=2*this.options.size;return this.width=A,void(this.height=A)}this.needsRefresh(t,e)&&this._resizeImage()}},{key:"draw",value:function(g,t,e,A,C,I){g.save(),this.switchImages(A),this.resize();var i=t,o=e;if("top-left"===this.options.shapeProperties.coordinateOrigin?(this.left=t,this.top=e,i+=this.width/2,o+=this.height/2):(this.left=t-this.width/2,this.top=e-this.height/2),!0===this.options.shapeProperties.useBorderWithImage){var n=this.options.borderWidth,r=this.options.borderWidthSelected||2*this.options.borderWidth,s=(A?r:n)/this.body.view.scale;g.lineWidth=Math.min(this.width,s),g.beginPath();var a=A?this.options.color.highlight.border:C?this.options.color.hover.border:this.options.color.border,d=A?this.options.color.highlight.background:C?this.options.color.hover.background:this.options.color.background;void 0!==I.opacity&&(a=Ym(a,I.opacity),d=Ym(d,I.opacity)),g.strokeStyle=a,g.fillStyle=d,g.rect(this.left-.5*g.lineWidth,this.top-.5*g.lineWidth,this.width+g.lineWidth,this.height+g.lineWidth),ov(g).call(g),this.performStroke(g,I),g.closePath()}this._drawImageAtPosition(g,I),this._drawImageLabel(g,i,o,A,C),this.updateBoundingBox(t,e),g.restore()}},{key:"updateBoundingBox",value:function(g,t){this.resize(),"top-left"===this.options.shapeProperties.coordinateOrigin?(this.left=g,this.top=t):(this.left=g-this.width/2,this.top=t-this.height/2),this.boundingBox.left=this.left,this.boundingBox.top=this.top,this.boundingBox.bottom=this.top+this.height,this.boundingBox.right=this.left+this.width,void 0!==this.options.label&&this.labelModule.size.width>0&&(this.boundingBox.left=Math.min(this.boundingBox.left,this.labelModule.size.left),this.boundingBox.right=Math.max(this.boundingBox.right,this.labelModule.size.left+this.labelModule.size.width),this.boundingBox.bottom=Math.max(this.boundingBox.bottom,this.boundingBox.bottom+this.labelOffset))}},{key:"distanceToBorder",value:function(g,t){return this._distanceToBorder(g,t)}}]),e}(OM);function XM(g){var t=function(){if("undefined"==typeof Reflect||!yP)return!1;if(yP.sham)return!1;if("function"==typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(yP(Boolean,[],(function(){}))),!0}catch(g){return!1}}();return function(){var e,A=vk(g);if(t){var C=vk(this).constructor;e=yP(A,arguments,C)}else e=A.apply(this,arguments);return hk(this,e)}}var JM=function(g){dk(e,g);var t=XM(e);function e(g,A,C){return cd(this,e),t.call(this,g,A,C)}return Bh(e,[{key:"draw",value:function(g,t,e,A,C,I){return this._drawShape(g,"square",2,t,e,A,C,I)}},{key:"distanceToBorder",value:function(g,t){return this._distanceToBorder(g,t)}}]),e}(MM);function qM(g){var t=function(){if("undefined"==typeof Reflect||!yP)return!1;if(yP.sham)return!1;if("function"==typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(yP(Boolean,[],(function(){}))),!0}catch(g){return!1}}();return function(){var e,A=vk(g);if(t){var C=vk(this).constructor;e=yP(A,arguments,C)}else e=A.apply(this,arguments);return hk(this,e)}}var $M=function(g){dk(e,g);var t=qM(e);function e(g,A,C){return cd(this,e),t.call(this,g,A,C)}return Bh(e,[{key:"draw",value:function(g,t,e,A,C,I){return this._drawShape(g,"hexagon",4,t,e,A,C,I)}},{key:"distanceToBorder",value:function(g,t){return this._distanceToBorder(g,t)}}]),e}(MM);function gB(g){var t=function(){if("undefined"==typeof Reflect||!yP)return!1;if(yP.sham)return!1;if("function"==typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(yP(Boolean,[],(function(){}))),!0}catch(g){return!1}}();return function(){var e,A=vk(g);if(t){var C=vk(this).constructor;e=yP(A,arguments,C)}else e=A.apply(this,arguments);return hk(this,e)}}var tB=function(g){dk(e,g);var t=gB(e);function e(g,A,C){return cd(this,e),t.call(this,g,A,C)}return Bh(e,[{key:"draw",value:function(g,t,e,A,C,I){return this._drawShape(g,"star",4,t,e,A,C,I)}},{key:"distanceToBorder",value:function(g,t){return this._distanceToBorder(g,t)}}]),e}(MM);function eB(g){var t=function(){if("undefined"==typeof Reflect||!yP)return!1;if(yP.sham)return!1;if("function"==typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(yP(Boolean,[],(function(){}))),!0}catch(g){return!1}}();return function(){var e,A=vk(g);if(t){var C=vk(this).constructor;e=yP(A,arguments,C)}else e=A.apply(this,arguments);return hk(this,e)}}var AB=function(g){dk(e,g);var t=eB(e);function e(g,A,C){var I;return cd(this,e),(I=t.call(this,g,A,C))._setMargins(C),I}return Bh(e,[{key:"resize",value:function(g,t,e){this.needsRefresh(t,e)&&(this.textSize=this.labelModule.getTextSize(g,t,e),this.width=this.textSize.width+this.margin.right+this.margin.left,this.height=this.textSize.height+this.margin.top+this.margin.bottom,this.radius=.5*this.width)}},{key:"draw",value:function(g,t,e,A,C,I){this.resize(g,A,C),this.left=t-this.width/2,this.top=e-this.height/2,this.enableShadow(g,I),this.labelModule.draw(g,this.left+this.textSize.width/2+this.margin.left,this.top+this.textSize.height/2+this.margin.top,A,C),this.disableShadow(g,I),this.updateBoundingBox(t,e,g,A,C)}},{key:"distanceToBorder",value:function(g,t){return this._distanceToBorder(g,t)}}]),e}(wM);function CB(g){var t=function(){if("undefined"==typeof Reflect||!yP)return!1;if(yP.sham)return!1;if("function"==typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(yP(Boolean,[],(function(){}))),!0}catch(g){return!1}}();return function(){var e,A=vk(g);if(t){var C=vk(this).constructor;e=yP(A,arguments,C)}else e=A.apply(this,arguments);return hk(this,e)}}var IB=function(g){dk(e,g);var t=CB(e);function e(g,A,C){return cd(this,e),t.call(this,g,A,C)}return Bh(e,[{key:"draw",value:function(g,t,e,A,C,I){return this._drawShape(g,"triangle",3,t,e,A,C,I)}},{key:"distanceToBorder",value:function(g,t){return this._distanceToBorder(g,t)}}]),e}(MM);function iB(g){var t=function(){if("undefined"==typeof Reflect||!yP)return!1;if(yP.sham)return!1;if("function"==typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(yP(Boolean,[],(function(){}))),!0}catch(g){return!1}}();return function(){var e,A=vk(g);if(t){var C=vk(this).constructor;e=yP(A,arguments,C)}else e=A.apply(this,arguments);return hk(this,e)}}var oB=function(g){dk(e,g);var t=iB(e);function e(g,A,C){return cd(this,e),t.call(this,g,A,C)}return Bh(e,[{key:"draw",value:function(g,t,e,A,C,I){return this._drawShape(g,"triangleDown",3,t,e,A,C,I)}},{key:"distanceToBorder",value:function(g,t){return this._distanceToBorder(g,t)}}]),e}(MM);function nB(g,t){var e=Cc(g);if(va){var A=va(g);t&&(A=Ap(A).call(A,(function(t){return Pa(g,t).enumerable}))),e.push.apply(e,A)}return e}function rB(g){for(var t=1;tg.left&&this.shape.topg.top}},{key:"isBoundingBoxOverlappingWith",value:function(g){return this.shape.boundingBox.leftg.left&&this.shape.boundingBox.topg.top}}],[{key:"checkOpacity",value:function(g){return 0<=g&&g<=1}},{key:"checkCoordinateOrigin",value:function(g){return void 0===g||"center"===g||"top-left"===g}},{key:"updateGroupOptions",value:function(t,e,A){var C;if(void 0!==A){var I=t.group;if(void 0!==e&&void 0!==e.group&&I!==e.group)throw new Error("updateGroupOptions: group values in options don't match.");if("number"==typeof I||"string"==typeof I&&""!=I){var i=A.get(I);void 0!==i.opacity&&void 0===e.opacity&&(g.checkOpacity(i.opacity)||(console.error("Invalid option for node opacity. Value must be between 0 and 1, found: "+i.opacity),i.opacity=void 0));var o=Ap(C=dM(e)).call(C,(function(g){return null!=e[g]}));o.push("font"),Zm(o,t,i),t.color=Qm(t.color)}}}},{key:"parseOptions",value:function(t,e){var A=arguments.length>2&&void 0!==arguments[2]&&arguments[2],C=arguments.length>3&&void 0!==arguments[3]?arguments[3]:{},I=arguments.length>4?arguments[4]:void 0;if(Zm(["color","fixed","shadow"],t,e,A),g.checkMass(e),void 0!==t.opacity&&(g.checkOpacity(t.opacity)||(console.error("Invalid option for node opacity. Value must be between 0 and 1, found: "+t.opacity),t.opacity=void 0)),void 0!==e.opacity&&(g.checkOpacity(e.opacity)||(console.error("Invalid option for node opacity. Value must be between 0 and 1, found: "+e.opacity),e.opacity=void 0)),e.shapeProperties&&!g.checkCoordinateOrigin(e.shapeProperties.coordinateOrigin)&&console.error("Invalid option for node coordinateOrigin, found: "+e.shapeProperties.coordinateOrigin),qm(t,e,"shadow",C),void 0!==e.color&&null!==e.color){var i=Qm(e.color);zm(t.color,i)}else!0===A&&null===e.color&&(t.color=Jm(C.color));void 0!==e.fixed&&null!==e.fixed&&("boolean"==typeof e.fixed?(t.fixed.x=e.fixed,t.fixed.y=e.fixed):(void 0!==e.fixed.x&&"boolean"==typeof e.fixed.x&&(t.fixed.x=e.fixed.x),void 0!==e.fixed.y&&"boolean"==typeof e.fixed.y&&(t.fixed.y=e.fixed.y))),!0===A&&null===e.font&&(t.font=Jm(C.font)),g.updateGroupOptions(t,e,I),void 0!==e.scaling&&qm(t.scaling,e.scaling,"label",C.scaling)}},{key:"checkMass",value:function(g,t){if(void 0!==g.mass&&g.mass<=0){var e="";void 0!==t&&(e=" in node id: "+t),console.error("%cNegative or zero mass disallowed"+e+", setting mass to 1.",lb),g.mass=1}}}]),g}();function aB(g,t){var e=void 0!==kl&&bn(g)||g["@@iterator"];if(!e){if(Vl(g)||(e=function(g,t){var e;if(!g)return;if("string"==typeof g)return dB(g,t);var A=Sl(e=Object.prototype.toString.call(g)).call(e,8,-1);"Object"===A&&g.constructor&&(A=g.constructor.name);if("Map"===A||"Set"===A)return Jo(g);if("Arguments"===A||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(A))return dB(g,t)}(g))||t&&g&&"number"==typeof g.length){e&&(g=e);var A=0,C=function(){};return{s:C,n:function(){return A>=g.length?{done:!0}:{done:!1,value:g[A++]}},e:function(g){throw g},f:C}}throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}var I,i=!0,o=!1;return{s:function(){e=e.call(g)},n:function(){var g=e.next();return i=g.done,g},e:function(g){o=!0,I=g},f:function(){try{i||null==e.return||e.return()}finally{if(o)throw I}}}}function dB(g,t){(null==t||t>g.length)&&(t=g.length);for(var e=0,A=new Array(t);e1?console.error("Invalid option for node opacity. Value must be between 0 and 1, found: "+g.opacity):this.options.opacity=g.opacity),void 0!==g.shape)for(var t in this.body.nodes)Object.prototype.hasOwnProperty.call(this.body.nodes,t)&&this.body.nodes[t].updateShape();if(void 0!==g.font||void 0!==g.widthConstraint||void 0!==g.heightConstraint)for(var e=0,A=Cc(this.body.nodes);e1&&void 0!==arguments[1]&&arguments[1],e=this.body.data.nodes;if(VP("id",g))this.body.data.nodes=g;else if(Vl(g))this.body.data.nodes=new GP,this.body.data.nodes.add(g);else{if(g)throw new TypeError("Array or DataSet expected");this.body.data.nodes=new GP}if(e&&Lm(this.nodesListeners,(function(g,t){e.off(t,g)})),this.body.nodes={},this.body.data.nodes){var A=this;Lm(this.nodesListeners,(function(g,t){A.body.data.nodes.on(t,g)}));var C=this.body.data.nodes.getIds();this.add(C,!0)}!1===t&&this.body.emitter.emit("_dataChanged")}},{key:"add",value:function(g){for(var t,e=arguments.length>1&&void 0!==arguments[1]&&arguments[1],A=[],C=0;C1&&void 0!==arguments[1]?arguments[1]:sB)(g,this.body,this.images,this.groups,this.options,this.defaultOptions)}},{key:"refresh",value:function(){var g=this,t=arguments.length>0&&void 0!==arguments[0]&&arguments[0];Lm(this.body.nodes,(function(e,A){var C=g.body.data.nodes.get(A);void 0!==C&&(!0===t&&e.setOptions({x:null,y:null}),e.setOptions({fixed:!1}),e.setOptions(C))}))}},{key:"getPositions",value:function(g){var t={};if(void 0!==g){if(!0===Vl(g)){for(var e=0;e0?(A=e/o)*A:e;return o===1/0?1/0:o*FB(C)}});var GB=Ag.Math.hypot;!function(g){g.exports=GB}(BB);var jB=e(MB);function LB(g){var t=function(){if("undefined"==typeof Reflect||!yP)return!1;if(yP.sham)return!1;if("function"==typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(yP(Boolean,[],(function(){}))),!0}catch(g){return!1}}();return function(){var e,A=vk(g);if(t){var C=vk(this).constructor;e=yP(A,arguments,C)}else e=A.apply(this,arguments);return hk(this,e)}}var VB=function(){function g(){cd(this,g)}return Bh(g,null,[{key:"transform",value:function(g,t){Vl(g)||(g=[g]);for(var e=t.point.x,A=t.point.y,C=t.angle,I=t.length,i=0;i4&&void 0!==arguments[4]?arguments[4]:this.getViaNode();g.strokeStyle=this.getColor(g,t),g.lineWidth=t.width,!1!==t.dashes?this._drawDashedLine(g,t,C):this._drawLine(g,t,C)}},{key:"_drawLine",value:function(g,t,e,A,C){if(this.from!=this.to)this._line(g,t,e,A,C);else{var I=bl(this._getCircleData(g),3),i=I[0],o=I[1],n=I[2];this._circle(g,t,i,o,n)}}},{key:"_drawDashedLine",value:function(g,t,e,A,C){g.lineCap="round";var I=Vl(t.dashes)?t.dashes:[5,5];if(void 0!==g.setLineDash){if(g.save(),g.setLineDash(I),g.lineDashOffset=0,this.from!=this.to)this._line(g,t,e);else{var i=bl(this._getCircleData(g),3),o=i[0],n=i[1],r=i[2];this._circle(g,t,o,n,r)}g.setLineDash([0]),g.lineDashOffset=0,g.restore()}else{if(this.from!=this.to)XA(g,this.from.x,this.from.y,this.to.x,this.to.y,I);else{var s=bl(this._getCircleData(g),3),a=s[0],d=s[1],h=s[2];this._circle(g,t,a,d,h)}this.enableShadow(g,t),g.stroke(),this.disableShadow(g,t)}}},{key:"findBorderPosition",value:function(g,t,e){return this.from!=this.to?this._findBorderPosition(g,t,e):this._findBorderPositionCircle(g,t,e)}},{key:"findBorderPositions",value:function(g){if(this.from!=this.to)return{from:this._findBorderPosition(this.from,g),to:this._findBorderPosition(this.to,g)};var t,e=bl(Sl(t=this._getCircleData(g)).call(t,0,2),2),A=e[0],C=e[1];return{from:this._findBorderPositionCircle(this.from,g,{x:A,y:C,low:.25,high:.6,direction:-1}),to:this._findBorderPositionCircle(this.from,g,{x:A,y:C,low:.6,high:.8,direction:1})}}},{key:"_getCircleData",value:function(g){var t=this.options.selfReference.size;void 0!==g&&void 0===this.from.shape.width&&this.from.shape.resize(g);var e=uM(g,this.options.selfReference.angle,t,this.from);return[e.x,e.y,t]}},{key:"_pointOnCircle",value:function(g,t,e,A){var C=2*A*Math.PI;return{x:g+e*Math.cos(C),y:t-e*Math.sin(C)}}},{key:"_findBorderPositionCircle",value:function(g,t,e){var A,C=e.x,I=e.y,i=e.low,o=e.high,n=e.direction,r=this.options.selfReference.size,s=.5*(i+o),a=0;!0===this.options.arrowStrikethrough&&(-1===n?a=this.options.endPointOffset.from:1===n&&(a=this.options.endPointOffset.to));var d=0;do{s=.5*(i+o),A=this._pointOnCircle(C,I,r,s);var h=Math.atan2(g.y-A.y,g.x-A.x),l=g.distanceToBorder(t,h)+a-Math.sqrt(Math.pow(A.x-g.x,2)+Math.pow(A.y-g.y,2));if(Math.abs(l)<.05)break;l>0?n>0?i=s:o=s:n>0?o=s:i=s,++d}while(i<=o&&d<10);return Az(Az({},A),{},{t:s})}},{key:"getLineWidth",value:function(g,t){return!0===g?Math.max(this.selectionWidth,.3/this._body.view.scale):!0===t?Math.max(this.hoverWidth,.3/this._body.view.scale):Math.max(this.options.width,.3/this._body.view.scale)}},{key:"getColor",value:function(g,t){if(!1!==t.inheritsColor){if("both"===t.inheritsColor&&this.from.id!==this.to.id){var e=g.createLinearGradient(this.from.x,this.from.y,this.to.x,this.to.y),A=this.from.options.color.highlight.border,C=this.to.options.color.highlight.border;return!1===this.from.selected&&!1===this.to.selected?(A=Ym(this.from.options.color.border,t.opacity),C=Ym(this.to.options.color.border,t.opacity)):!0===this.from.selected&&!1===this.to.selected?C=this.to.options.color.border:!1===this.from.selected&&!0===this.to.selected&&(A=this.from.options.color.border),e.addColorStop(0,A),e.addColorStop(1,C),e}return"to"===t.inheritsColor?Ym(this.to.options.color.border,t.opacity):Ym(this.from.options.color.border,t.opacity)}return Ym(t.color,t.opacity)}},{key:"_circle",value:function(g,t,e,A,C){this.enableShadow(g,t);var I=0,i=2*Math.PI;if(!this.options.selfReference.renderBehindTheNode){var o=this.options.selfReference.angle,n=this.options.selfReference.angle+Math.PI,r=this._findBorderPositionCircle(this.from,g,{x:e,y:A,low:o,high:n,direction:-1}),s=this._findBorderPositionCircle(this.from,g,{x:e,y:A,low:o,high:n,direction:1});I=Math.atan2(r.y-A,r.x-e),i=Math.atan2(s.y-A,s.x-e)}g.beginPath(),g.arc(e,A,C,I,i,!1),g.stroke(),this.disableShadow(g,t)}},{key:"getDistanceToEdge",value:function(g,t,e,A,C,I){if(this.from!=this.to)return this._getDistanceToEdge(g,t,e,A,C,I);var i=bl(this._getCircleData(void 0),3),o=i[0],n=i[1],r=i[2],s=o-C,a=n-I;return Math.abs(Math.sqrt(s*s+a*a)-r)}},{key:"_getDistanceToLine",value:function(g,t,e,A,C,I){var i=e-g,o=A-t,n=((C-g)*i+(I-t)*o)/(i*i+o*o);n>1?n=1:n<0&&(n=0);var r=g+n*i-C,s=t+n*o-I;return Math.sqrt(r*r+s*s)}},{key:"getArrowData",value:function(g,t,e,A,C,I){var i,o,n,r,s,a,d,h=I.width;"from"===t?(n=this.from,r=this.to,s=I.fromArrowScale<0,a=Math.abs(I.fromArrowScale),d=I.fromArrowType):"to"===t?(n=this.to,r=this.from,s=I.toArrowScale<0,a=Math.abs(I.toArrowScale),d=I.toArrowType):(n=this.to,r=this.from,s=I.middleArrowScale<0,a=Math.abs(I.middleArrowScale),d=I.middleArrowType);var l=15*a+3*h;if(n!=r){var c=l/jB(n.x-r.x,n.y-r.y);if("middle"!==t)if(!0===this.options.smooth.enabled){var u=this._findBorderPosition(n,g,{via:e}),p=this.getPoint(u.t+c*("from"===t?1:-1),e);i=Math.atan2(u.y-p.y,u.x-p.x),o=u}else i=Math.atan2(n.y-r.y,n.x-r.x),o=this._findBorderPosition(n,g);else{var f=(s?-c:c)/2,v=this.getPoint(.5+f,e),y=this.getPoint(.5-f,e);i=Math.atan2(v.y-y.y,v.x-y.x),o=this.getPoint(.5,e)}}else{var m=bl(this._getCircleData(g),3),b=m[0],w=m[1],x=m[2];if("from"===t){var k=this.options.selfReference.angle,E=this.options.selfReference.angle+Math.PI,O=this._findBorderPositionCircle(this.from,g,{x:b,y:w,low:k,high:E,direction:-1});i=-2*O.t*Math.PI+1.5*Math.PI+.1*Math.PI,o=O}else if("to"===t){var T=this.options.selfReference.angle,D=this.options.selfReference.angle+Math.PI,N=this._findBorderPositionCircle(this.from,g,{x:b,y:w,low:T,high:D,direction:1});i=-2*N.t*Math.PI+1.5*Math.PI-1.1*Math.PI,o=N}else{var R=this.options.selfReference.angle/(2*Math.PI);o=this._pointOnCircle(b,w,x,R),i=-2*R*Math.PI+1.5*Math.PI+.1*Math.PI}}return{point:o,core:{x:o.x-.9*l*Math.cos(i),y:o.y-.9*l*Math.sin(i)},angle:i,length:l,type:d}}},{key:"drawArrowHead",value:function(g,t,e,A,C){g.strokeStyle=this.getColor(g,t),g.fillStyle=g.strokeStyle,g.lineWidth=t.width,tz.draw(g,C)&&(this.enableShadow(g,t),ov(g).call(g),this.disableShadow(g,t))}},{key:"enableShadow",value:function(g,t){!0===t.shadow&&(g.shadowColor=t.shadowColor,g.shadowBlur=t.shadowSize,g.shadowOffsetX=t.shadowX,g.shadowOffsetY=t.shadowY)}},{key:"disableShadow",value:function(g,t){!0===t.shadow&&(g.shadowColor="rgba(0,0,0,0)",g.shadowBlur=0,g.shadowOffsetX=0,g.shadowOffsetY=0)}},{key:"drawBackground",value:function(g,t){if(!1!==t.background){var e={strokeStyle:g.strokeStyle,lineWidth:g.lineWidth,dashes:g.dashes};g.strokeStyle=t.backgroundColor,g.lineWidth=t.backgroundSize,this.setStrokeDashed(g,t.backgroundDashes),g.stroke(),g.strokeStyle=e.strokeStyle,g.lineWidth=e.lineWidth,g.dashes=e.dashes,this.setStrokeDashed(g,t.dashes)}}},{key:"setStrokeDashed",value:function(g,t){if(!1!==t)if(void 0!==g.setLineDash){var e=Vl(t)?t:[5,5];g.setLineDash(e)}else console.warn("setLineDash is not supported in this browser. The dashed stroke cannot be used.");else void 0!==g.setLineDash?g.setLineDash([]):console.warn("setLineDash is not supported in this browser. The dashed stroke cannot be used.")}}]),g}();function Iz(g,t){var e=Cc(g);if(va){var A=va(g);t&&(A=Ap(A).call(A,(function(t){return Pa(g,t).enumerable}))),e.push.apply(e,A)}return e}function iz(g){for(var t=1;t2&&void 0!==arguments[2]?arguments[2]:this._getViaCoordinates(),I=!1,i=1,o=0,n=this.to,r=this.options.endPointOffset?this.options.endPointOffset.to:0;g.id===this.from.id&&(n=this.from,I=!0,r=this.options.endPointOffset?this.options.endPointOffset.from:0),!1===this.options.arrowStrikethrough&&(r=0);var s=0;do{A=.5*(o+i),e=this.getPoint(A,C);var a=Math.atan2(n.y-e.y,n.x-e.x),d=n.distanceToBorder(t,a)+r-Math.sqrt(Math.pow(e.x-n.x,2)+Math.pow(e.y-n.y,2));if(Math.abs(d)<.2)break;d<0?!1===I?o=A:i=A:!1===I?i=A:o=A,++s}while(o<=i&&s<10);return iz(iz({},e),{},{t:A})}},{key:"_getDistanceToBezierEdge",value:function(g,t,e,A,C,I,i){var o,n,r,s,a,d=1e9,h=g,l=t;for(n=1;n<10;n++)r=.1*n,s=Math.pow(1-r,2)*g+2*r*(1-r)*i.x+Math.pow(r,2)*e,a=Math.pow(1-r,2)*t+2*r*(1-r)*i.y+Math.pow(r,2)*A,n>0&&(d=(o=this._getDistanceToLine(h,l,s,a,C,I))1&&void 0!==arguments[1]?arguments[1]:this.via;if(this.from===this.to){var e=bl(this._getCircleData(),3),A=e[0],C=e[1],I=e[2],i=2*Math.PI*(1-g);return{x:A+I*Math.sin(i),y:C+I-I*(1-Math.cos(i))}}return{x:Math.pow(1-g,2)*this.fromPoint.x+2*g*(1-g)*t.x+Math.pow(g,2)*this.toPoint.x,y:Math.pow(1-g,2)*this.fromPoint.y+2*g*(1-g)*t.y+Math.pow(g,2)*this.toPoint.y}}},{key:"_findBorderPosition",value:function(g,t){return this._findBorderPositionBezier(g,t,this.via)}},{key:"_getDistanceToEdge",value:function(g,t,e,A,C,I){return this._getDistanceToBezierEdge(g,t,e,A,C,I,this.via)}}]),e}(nz);function az(g){var t=function(){if("undefined"==typeof Reflect||!yP)return!1;if(yP.sham)return!1;if("function"==typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(yP(Boolean,[],(function(){}))),!0}catch(g){return!1}}();return function(){var e,A=vk(g);if(t){var C=vk(this).constructor;e=yP(A,arguments,C)}else e=A.apply(this,arguments);return hk(this,e)}}var dz=function(g){dk(e,g);var t=az(e);function e(g,A,C){return cd(this,e),t.call(this,g,A,C)}return Bh(e,[{key:"_line",value:function(g,t,e){this._bezierCurve(g,t,e)}},{key:"getViaNode",value:function(){return this._getViaCoordinates()}},{key:"_getViaCoordinates",value:function(){var g,t,e=this.options.smooth.roundness,A=this.options.smooth.type,C=Math.abs(this.from.x-this.to.x),I=Math.abs(this.from.y-this.to.y);if("discrete"===A||"diagonalCross"===A){var i,o;i=o=C<=I?e*I:e*C,this.from.x>this.to.x&&(i=-i),this.from.y>=this.to.y&&(o=-o);var n=this.from.x+i,r=this.from.y+o;return"discrete"===A&&(C<=I?n=Cthis.to.x&&(g=-g),this.from.y>=this.to.y&&(t=-t);var y=this.from.x+g,m=this.from.y+t;return C<=I?y=this.from.x<=this.to.x?this.to.xy?this.to.x:y:m=this.from.y>=this.to.y?this.to.y>m?this.to.y:m:this.to.y2&&void 0!==arguments[2]?arguments[2]:{};return this._findBorderPositionBezier(g,t,e.via)}},{key:"_getDistanceToEdge",value:function(g,t,e,A,C,I){var i=arguments.length>6&&void 0!==arguments[6]?arguments[6]:this._getViaCoordinates();return this._getDistanceToBezierEdge(g,t,e,A,C,I,i)}},{key:"getPoint",value:function(g){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:this._getViaCoordinates(),e=g;return{x:Math.pow(1-e,2)*this.fromPoint.x+2*e*(1-e)*t.x+Math.pow(e,2)*this.toPoint.x,y:Math.pow(1-e,2)*this.fromPoint.y+2*e*(1-e)*t.y+Math.pow(e,2)*this.toPoint.y}}}]),e}(nz);function hz(g){var t=function(){if("undefined"==typeof Reflect||!yP)return!1;if(yP.sham)return!1;if("function"==typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(yP(Boolean,[],(function(){}))),!0}catch(g){return!1}}();return function(){var e,A=vk(g);if(t){var C=vk(this).constructor;e=yP(A,arguments,C)}else e=A.apply(this,arguments);return hk(this,e)}}var lz=function(g){dk(e,g);var t=hz(e);function e(g,A,C){return cd(this,e),t.call(this,g,A,C)}return Bh(e,[{key:"_getDistanceToBezierEdge2",value:function(g,t,e,A,C,I,i,o){for(var n=1e9,r=g,s=t,a=[0,0,0,0],d=1;d<10;d++){var h=.1*d;a[0]=Math.pow(1-h,3),a[1]=3*h*Math.pow(1-h,2),a[2]=3*Math.pow(h,2)*(1-h),a[3]=Math.pow(h,3);var l=a[0]*g+a[1]*i.x+a[2]*o.x+a[3]*e,c=a[0]*t+a[1]*i.y+a[2]*o.y+a[3]*A;if(d>0){var u=this._getDistanceToLine(r,s,l,c,C,I);n=uMath.abs(I)||!0===this.options.smooth.forceDirection||"horizontal"===this.options.smooth.forceDirection)&&"vertical"!==this.options.smooth.forceDirection?(t=this.from.y,A=this.to.y,g=this.from.x-i*C,e=this.to.x+i*C):(t=this.from.y-i*I,A=this.to.y+i*I,g=this.from.x,e=this.to.x),[{x:g,y:t},{x:e,y:A}]}},{key:"getViaNode",value:function(){return this._getViaCoordinates()}},{key:"_findBorderPosition",value:function(g,t){return this._findBorderPositionBezier(g,t)}},{key:"_getDistanceToEdge",value:function(g,t,e,A,C,I){var i=bl(arguments.length>6&&void 0!==arguments[6]?arguments[6]:this._getViaCoordinates(),2),o=i[0],n=i[1];return this._getDistanceToBezierEdge2(g,t,e,A,C,I,o,n)}},{key:"getPoint",value:function(g){var t=bl(arguments.length>1&&void 0!==arguments[1]?arguments[1]:this._getViaCoordinates(),2),e=t[0],A=t[1],C=g,I=[Math.pow(1-C,3),3*C*Math.pow(1-C,2),3*Math.pow(C,2)*(1-C),Math.pow(C,3)];return{x:I[0]*this.fromPoint.x+I[1]*e.x+I[2]*A.x+I[3]*this.toPoint.x,y:I[0]*this.fromPoint.y+I[1]*e.y+I[2]*A.y+I[3]*this.toPoint.y}}}]),e}(lz);function pz(g){var t=function(){if("undefined"==typeof Reflect||!yP)return!1;if(yP.sham)return!1;if("function"==typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(yP(Boolean,[],(function(){}))),!0}catch(g){return!1}}();return function(){var e,A=vk(g);if(t){var C=vk(this).constructor;e=yP(A,arguments,C)}else e=A.apply(this,arguments);return hk(this,e)}}var fz=function(g){dk(e,g);var t=pz(e);function e(g,A,C){return cd(this,e),t.call(this,g,A,C)}return Bh(e,[{key:"_line",value:function(g,t){g.beginPath(),g.moveTo(this.fromPoint.x,this.fromPoint.y),g.lineTo(this.toPoint.x,this.toPoint.y),this.enableShadow(g,t),g.stroke(),this.disableShadow(g,t)}},{key:"getViaNode",value:function(){}},{key:"getPoint",value:function(g){return{x:(1-g)*this.fromPoint.x+g*this.toPoint.x,y:(1-g)*this.fromPoint.y+g*this.toPoint.y}}},{key:"_findBorderPosition",value:function(g,t){var e=this.to,A=this.from;g.id===this.from.id&&(e=this.from,A=this.to);var C=Math.atan2(e.y-A.y,e.x-A.x),I=e.x-A.x,i=e.y-A.y,o=Math.sqrt(I*I+i*i),n=(o-g.distanceToBorder(t,C))/o;return{x:(1-n)*A.x+n*e.x,y:(1-n)*A.y+n*e.y,t:0}}},{key:"_getDistanceToEdge",value:function(g,t,e,A,C,I){return this._getDistanceToLine(g,t,e,A,C,I)}}]),e}(Cz),vz=function(){function g(t,e,A,C,I){if(cd(this,g),void 0===e)throw new Error("No body provided");this.options=Jm(C),this.globalOptions=C,this.defaultOptions=I,this.body=e,this.imagelist=A,this.id=void 0,this.fromId=void 0,this.toId=void 0,this.selected=!1,this.hover=!1,this.labelDirty=!0,this.baseWidth=this.options.width,this.baseFontSize=this.options.font.size,this.from=void 0,this.to=void 0,this.edgeType=void 0,this.connected=!1,this.labelModule=new bM(this.body,this.options,!0),this.setOptions(t)}return Bh(g,[{key:"setOptions",value:function(t){if(t){var e=void 0!==t.physics&&this.options.physics!==t.physics||void 0!==t.hidden&&(this.options.hidden||!1)!==(t.hidden||!1)||void 0!==t.from&&this.options.from!==t.from||void 0!==t.to&&this.options.to!==t.to;g.parseOptions(this.options,t,!0,this.globalOptions),void 0!==t.id&&(this.id=t.id),void 0!==t.from&&(this.fromId=t.from),void 0!==t.to&&(this.toId=t.to),void 0!==t.title&&(this.title=t.title),void 0!==t.value&&(t.value=AM(t.value));var A=[t,this.options,this.defaultOptions];return this.chooser=hM("edge",A),this.updateLabelModule(t),e=this.updateEdgeType()||e,this._setInteractionWidths(),this.connect(),e}}},{key:"getFormattingValues",value:function(){var g=!0===this.options.arrows.to||!0===this.options.arrows.to.enabled,t=!0===this.options.arrows.from||!0===this.options.arrows.from.enabled,e=!0===this.options.arrows.middle||!0===this.options.arrows.middle.enabled,A=this.options.color.inherit,C={toArrow:g,toArrowScale:this.options.arrows.to.scaleFactor,toArrowType:this.options.arrows.to.type,toArrowSrc:this.options.arrows.to.src,toArrowImageWidth:this.options.arrows.to.imageWidth,toArrowImageHeight:this.options.arrows.to.imageHeight,middleArrow:e,middleArrowScale:this.options.arrows.middle.scaleFactor,middleArrowType:this.options.arrows.middle.type,middleArrowSrc:this.options.arrows.middle.src,middleArrowImageWidth:this.options.arrows.middle.imageWidth,middleArrowImageHeight:this.options.arrows.middle.imageHeight,fromArrow:t,fromArrowScale:this.options.arrows.from.scaleFactor,fromArrowType:this.options.arrows.from.type,fromArrowSrc:this.options.arrows.from.src,fromArrowImageWidth:this.options.arrows.from.imageWidth,fromArrowImageHeight:this.options.arrows.from.imageHeight,arrowStrikethrough:this.options.arrowStrikethrough,color:A?void 0:this.options.color.color,inheritsColor:A,opacity:this.options.color.opacity,hidden:this.options.hidden,length:this.options.length,shadow:this.options.shadow.enabled,shadowColor:this.options.shadow.color,shadowSize:this.options.shadow.size,shadowX:this.options.shadow.x,shadowY:this.options.shadow.y,dashes:this.options.dashes,width:this.options.width,background:this.options.background.enabled,backgroundColor:this.options.background.color,backgroundSize:this.options.background.size,backgroundDashes:this.options.background.dashes};if(this.selected||this.hover)if(!0===this.chooser){if(this.selected){var I=this.options.selectionWidth;"function"==typeof I?C.width=I(C.width):"number"==typeof I&&(C.width+=I),C.width=Math.max(C.width,.3/this.body.view.scale),C.color=this.options.color.highlight,C.shadow=this.options.shadow.enabled}else if(this.hover){var i=this.options.hoverWidth;"function"==typeof i?C.width=i(C.width):"number"==typeof i&&(C.width+=i),C.width=Math.max(C.width,.3/this.body.view.scale),C.color=this.options.color.hover,C.shadow=this.options.shadow.enabled}}else"function"==typeof this.chooser&&(this.chooser(C,this.options.id,this.selected,this.hover),void 0!==C.color&&(C.inheritsColor=!1),!1===C.shadow&&(C.shadowColor===this.options.shadow.color&&C.shadowSize===this.options.shadow.size&&C.shadowX===this.options.shadow.x&&C.shadowY===this.options.shadow.y||(C.shadow=!0)));else C.shadow=this.options.shadow.enabled,C.width=Math.max(C.width,.3/this.body.view.scale);return C}},{key:"updateLabelModule",value:function(g){var t=[g,this.options,this.globalOptions,this.defaultOptions];this.labelModule.update(this.options,t),void 0!==this.labelModule.baseSize&&(this.baseFontSize=this.labelModule.baseSize)}},{key:"updateEdgeType",value:function(){var g=this.options.smooth,t=!1,e=!0;return void 0!==this.edgeType&&((this.edgeType instanceof sz&&!0===g.enabled&&"dynamic"===g.type||this.edgeType instanceof uz&&!0===g.enabled&&"cubicBezier"===g.type||this.edgeType instanceof dz&&!0===g.enabled&&"dynamic"!==g.type&&"cubicBezier"!==g.type||this.edgeType instanceof fz&&!1===g.type.enabled)&&(e=!1),!0===e&&(t=this.cleanup())),!0===e?!0===g.enabled?"dynamic"===g.type?(t=!0,this.edgeType=new sz(this.options,this.body,this.labelModule)):"cubicBezier"===g.type?this.edgeType=new uz(this.options,this.body,this.labelModule):this.edgeType=new dz(this.options,this.body,this.labelModule):this.edgeType=new fz(this.options,this.body,this.labelModule):this.edgeType.setOptions(this.options),t}},{key:"connect",value:function(){this.disconnect(),this.from=this.body.nodes[this.fromId]||void 0,this.to=this.body.nodes[this.toId]||void 0,this.connected=void 0!==this.from&&void 0!==this.to,!0===this.connected?(this.from.attachEdge(this),this.to.attachEdge(this)):(this.from&&this.from.detachEdge(this),this.to&&this.to.detachEdge(this)),this.edgeType.connect()}},{key:"disconnect",value:function(){this.from&&(this.from.detachEdge(this),this.from=void 0),this.to&&(this.to.detachEdge(this),this.to=void 0),this.connected=!1}},{key:"getTitle",value:function(){return this.title}},{key:"isSelected",value:function(){return this.selected}},{key:"getValue",value:function(){return this.options.value}},{key:"setValueRange",value:function(g,t,e){if(void 0!==this.options.value){var A=this.options.scaling.customScalingFunction(g,t,e,this.options.value),C=this.options.scaling.max-this.options.scaling.min;if(!0===this.options.scaling.label.enabled){var I=this.options.scaling.label.max-this.options.scaling.label.min;this.options.font.size=this.options.scaling.label.min+A*I}this.options.width=this.options.scaling.min+A*C}else this.options.width=this.baseWidth,this.options.font.size=this.baseFontSize;this._setInteractionWidths(),this.updateLabelModule()}},{key:"_setInteractionWidths",value:function(){"function"==typeof this.options.hoverWidth?this.edgeType.hoverWidth=this.options.hoverWidth(this.options.width):this.edgeType.hoverWidth=this.options.hoverWidth+this.options.width,"function"==typeof this.options.selectionWidth?this.edgeType.selectionWidth=this.options.selectionWidth(this.options.width):this.edgeType.selectionWidth=this.options.selectionWidth+this.options.width}},{key:"draw",value:function(g){var t=this.getFormattingValues();if(!t.hidden){var e=this.edgeType.getViaNode();this.edgeType.drawLine(g,t,this.selected,this.hover,e),this.drawLabel(g,e)}}},{key:"drawArrows",value:function(g){var t=this.getFormattingValues();if(!t.hidden){var e=this.edgeType.getViaNode(),A={};this.edgeType.fromPoint=this.edgeType.from,this.edgeType.toPoint=this.edgeType.to,t.fromArrow&&(A.from=this.edgeType.getArrowData(g,"from",e,this.selected,this.hover,t),!1===t.arrowStrikethrough&&(this.edgeType.fromPoint=A.from.core),t.fromArrowSrc&&(A.from.image=this.imagelist.load(t.fromArrowSrc)),t.fromArrowImageWidth&&(A.from.imageWidth=t.fromArrowImageWidth),t.fromArrowImageHeight&&(A.from.imageHeight=t.fromArrowImageHeight)),t.toArrow&&(A.to=this.edgeType.getArrowData(g,"to",e,this.selected,this.hover,t),!1===t.arrowStrikethrough&&(this.edgeType.toPoint=A.to.core),t.toArrowSrc&&(A.to.image=this.imagelist.load(t.toArrowSrc)),t.toArrowImageWidth&&(A.to.imageWidth=t.toArrowImageWidth),t.toArrowImageHeight&&(A.to.imageHeight=t.toArrowImageHeight)),t.middleArrow&&(A.middle=this.edgeType.getArrowData(g,"middle",e,this.selected,this.hover,t),t.middleArrowSrc&&(A.middle.image=this.imagelist.load(t.middleArrowSrc)),t.middleArrowImageWidth&&(A.middle.imageWidth=t.middleArrowImageWidth),t.middleArrowImageHeight&&(A.middle.imageHeight=t.middleArrowImageHeight)),t.fromArrow&&this.edgeType.drawArrowHead(g,t,this.selected,this.hover,A.from),t.middleArrow&&this.edgeType.drawArrowHead(g,t,this.selected,this.hover,A.middle),t.toArrow&&this.edgeType.drawArrowHead(g,t,this.selected,this.hover,A.to)}}},{key:"drawLabel",value:function(g,t){if(void 0!==this.options.label){var e,A=this.from,C=this.to;if(this.labelModule.differentState(this.selected,this.hover)&&this.labelModule.getTextSize(g,this.selected,this.hover),A.id!=C.id){this.labelModule.pointToSelf=!1,e=this.edgeType.getPoint(.5,t),g.save();var I=this._getRotation(g);0!=I.angle&&(g.translate(I.x,I.y),g.rotate(I.angle)),this.labelModule.draw(g,e.x,e.y,this.selected,this.hover),g.restore()}else{this.labelModule.pointToSelf=!0;var i=uM(g,this.options.selfReference.angle,this.options.selfReference.size,A);e=this._pointOnCircle(i.x,i.y,this.options.selfReference.size,this.options.selfReference.angle),this.labelModule.draw(g,e.x,e.y,this.selected,this.hover)}}}},{key:"getItemsOnPoint",value:function(g){var t=[];if(this.labelModule.visible()){var e=this._getRotation();lM(this.labelModule.getSize(),g,e)&&t.push({edgeId:this.id,labelId:0})}var A={left:g.x,top:g.y};return this.isOverlappingWith(A)&&t.push({edgeId:this.id}),t}},{key:"isOverlappingWith",value:function(g){if(this.connected){var t=this.from.x,e=this.from.y,A=this.to.x,C=this.to.y,I=g.left,i=g.top;return this.edgeType.getDistanceToEdge(t,e,A,C,I,i)<10}return!1}},{key:"_getRotation",value:function(g){var t=this.edgeType.getViaNode(),e=this.edgeType.getPoint(.5,t);void 0!==g&&this.labelModule.calculateLabelSize(g,this.selected,this.hover,e.x,e.y);var A={x:e.x,y:this.labelModule.size.yLine,angle:0};if(!this.labelModule.visible())return A;if("horizontal"===this.options.font.align)return A;var C=this.from.y-this.to.y,I=this.from.x-this.to.x,i=Math.atan2(C,I);return(i<-1&&I<0||i>0&&I<0)&&(i+=Math.PI),A.angle=i,A}},{key:"_pointOnCircle",value:function(g,t,e,A){return{x:g+e*Math.cos(A),y:t-e*Math.sin(A)}}},{key:"select",value:function(){this.selected=!0}},{key:"unselect",value:function(){this.selected=!1}},{key:"cleanup",value:function(){return this.edgeType.cleanup()}},{key:"remove",value:function(){this.cleanup(),this.disconnect(),delete this.body.edges[this.id]}},{key:"endPointsValid",value:function(){return void 0!==this.body.nodes[this.fromId]&&void 0!==this.body.nodes[this.toId]}}],[{key:"parseOptions",value:function(g,t){var e=arguments.length>2&&void 0!==arguments[2]&&arguments[2],A=arguments.length>3&&void 0!==arguments[3]?arguments[3]:{},C=arguments.length>4&&void 0!==arguments[4]&&arguments[4];if(Sm(["endPointOffset","arrowStrikethrough","id","from","hidden","hoverWidth","labelHighlightBold","length","line","opacity","physics","scaling","selectionWidth","selfReferenceSize","selfReference","to","title","value","width","font","chosen","widthConstraint"],g,t,e),void 0!==t.endPointOffset&&void 0!==t.endPointOffset.from&&(Zx(t.endPointOffset.from)?g.endPointOffset.from=t.endPointOffset.from:(g.endPointOffset.from=void 0!==A.endPointOffset.from?A.endPointOffset.from:0,console.error("endPointOffset.from is not a valid number"))),void 0!==t.endPointOffset&&void 0!==t.endPointOffset.to&&(Zx(t.endPointOffset.to)?g.endPointOffset.to=t.endPointOffset.to:(g.endPointOffset.to=void 0!==A.endPointOffset.to?A.endPointOffset.to:0,console.error("endPointOffset.to is not a valid number"))),cM(t.label)?g.label=t.label:cM(g.label)||(g.label=void 0),qm(g,t,"smooth",A),qm(g,t,"shadow",A),qm(g,t,"background",A),void 0!==t.dashes&&null!==t.dashes?g.dashes=t.dashes:!0===e&&null===t.dashes&&(g.dashes=pf(A.dashes)),void 0!==t.scaling&&null!==t.scaling?(void 0!==t.scaling.min&&(g.scaling.min=t.scaling.min),void 0!==t.scaling.max&&(g.scaling.max=t.scaling.max),qm(g.scaling,t.scaling,"label",A.scaling)):!0===e&&null===t.scaling&&(g.scaling=pf(A.scaling)),void 0!==t.arrows&&null!==t.arrows)if("string"==typeof t.arrows){var I=t.arrows.toLowerCase();g.arrows.to.enabled=-1!=$p(I).call(I,"to"),g.arrows.middle.enabled=-1!=$p(I).call(I,"middle"),g.arrows.from.enabled=-1!=$p(I).call(I,"from")}else{if("object"!==kh(t.arrows))throw new Error("The arrow newOptions can only be an object or a string. Refer to the documentation. You used:"+xf(t.arrows));qm(g.arrows,t.arrows,"to",A.arrows),qm(g.arrows,t.arrows,"middle",A.arrows),qm(g.arrows,t.arrows,"from",A.arrows)}else!0===e&&null===t.arrows&&(g.arrows=pf(A.arrows));if(void 0!==t.color&&null!==t.color){var i=Pm(t.color)?{color:t.color,highlight:t.color,hover:t.color,inherit:!1,opacity:1}:t.color,o=g.color;if(C)Fm(o,A.color,!1,e);else for(var n in o)Object.prototype.hasOwnProperty.call(o,n)&&delete o[n];if(Pm(o))o.color=o,o.highlight=o,o.hover=o,o.inherit=!1,void 0===i.opacity&&(o.opacity=1);else{var r=!1;void 0!==i.color&&(o.color=i.color,r=!0),void 0!==i.highlight&&(o.highlight=i.highlight,r=!0),void 0!==i.hover&&(o.hover=i.hover,r=!0),void 0!==i.inherit&&(o.inherit=i.inherit),void 0!==i.opacity&&(o.opacity=Math.min(1,Math.max(0,i.opacity))),!0===r?o.inherit=!1:void 0===o.inherit&&(o.inherit="from")}}else!0===e&&null===t.color&&(g.color=Jm(A.color));!0===e&&null===t.font&&(g.font=Jm(A.font)),Object.prototype.hasOwnProperty.call(t,"selfReferenceSize")&&(console.warn("The selfReferenceSize property has been deprecated. Please use selfReference property instead. The selfReference can be set like thise selfReference:{size:30, angle:Math.PI / 4}"),g.selfReference.size=t.selfReferenceSize)}}]),g}(),yz=function(){function g(t,e,A){var C,I=this;cd(this,g),this.body=t,this.images=e,this.groups=A,this.body.functions.createEdge=QA(C=this.create).call(C,this),this.edgesListeners={add:function(g,t){I.add(t.items)},update:function(g,t){I.update(t.items)},remove:function(g,t){I.remove(t.items)}},this.options={},this.defaultOptions={arrows:{to:{enabled:!1,scaleFactor:1,type:"arrow"},middle:{enabled:!1,scaleFactor:1,type:"arrow"},from:{enabled:!1,scaleFactor:1,type:"arrow"}},endPointOffset:{from:0,to:0},arrowStrikethrough:!0,color:{color:"#848484",highlight:"#848484",hover:"#848484",inherit:"from",opacity:1},dashes:!1,font:{color:"#343434",size:14,face:"arial",background:"none",strokeWidth:2,strokeColor:"#ffffff",align:"horizontal",multi:!1,vadjust:0,bold:{mod:"bold"},boldital:{mod:"bold italic"},ital:{mod:"italic"},mono:{mod:"",size:15,face:"courier new",vadjust:2}},hidden:!1,hoverWidth:1.5,label:void 0,labelHighlightBold:!0,length:void 0,physics:!0,scaling:{min:1,max:15,label:{enabled:!0,min:14,max:30,maxVisible:30,drawThreshold:5},customScalingFunction:function(g,t,e,A){if(t===g)return.5;var C=1/(t-g);return Math.max(0,(A-g)*C)}},selectionWidth:1.5,selfReference:{size:20,angle:Math.PI/4,renderBehindTheNode:!0},shadow:{enabled:!1,color:"rgba(0,0,0,0.5)",size:10,x:5,y:5},background:{enabled:!1,color:"rgba(111,111,111,1)",size:10,dashes:!1},smooth:{enabled:!0,type:"dynamic",forceDirection:"none",roundness:.5},title:void 0,width:1,value:void 0},Fm(this.options,this.defaultOptions),this.bindEventListeners()}return Bh(g,[{key:"bindEventListeners",value:function(){var g,t,e=this;this.body.emitter.on("_forceDisableDynamicCurves",(function(g){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];"dynamic"===g&&(g="continuous");var A=!1;for(var C in e.body.edges)if(Object.prototype.hasOwnProperty.call(e.body.edges,C)){var I=e.body.edges[C],i=e.body.data.edges.get(C);if(null!=i){var o=i.smooth;void 0!==o&&!0===o.enabled&&"dynamic"===o.type&&(void 0===g?I.setOptions({smooth:!1}):I.setOptions({smooth:{type:g}}),A=!0)}}!0===t&&!0===A&&e.body.emitter.emit("_dataChanged")})),this.body.emitter.on("_dataUpdated",(function(){e.reconnectEdges()})),this.body.emitter.on("refreshEdges",QA(g=this.refresh).call(g,this)),this.body.emitter.on("refresh",QA(t=this.refresh).call(t,this)),this.body.emitter.on("destroy",(function(){Lm(e.edgesListeners,(function(g,t){e.body.data.edges&&e.body.data.edges.off(t,g)})),delete e.body.functions.createEdge,delete e.edgesListeners.add,delete e.edgesListeners.update,delete e.edgesListeners.remove,delete e.edgesListeners}))}},{key:"setOptions",value:function(g){if(void 0!==g){vz.parseOptions(this.options,g,!0,this.defaultOptions,!0);var t=!1;if(void 0!==g.smooth)for(var e in this.body.edges)Object.prototype.hasOwnProperty.call(this.body.edges,e)&&(t=this.body.edges[e].updateEdgeType()||t);if(void 0!==g.font)for(var A in this.body.edges)Object.prototype.hasOwnProperty.call(this.body.edges,A)&&this.body.edges[A].updateLabelModule();void 0===g.hidden&&void 0===g.physics&&!0!==t||this.body.emitter.emit("_dataChanged")}}},{key:"setData",value:function(g){var t=this,e=arguments.length>1&&void 0!==arguments[1]&&arguments[1],A=this.body.data.edges;if(VP("id",g))this.body.data.edges=g;else if(Vl(g))this.body.data.edges=new GP,this.body.data.edges.add(g);else{if(g)throw new TypeError("Array or DataSet expected");this.body.data.edges=new GP}if(A&&Lm(this.edgesListeners,(function(g,t){A.off(t,g)})),this.body.edges={},this.body.data.edges){Lm(this.edgesListeners,(function(g,e){t.body.data.edges.on(e,g)}));var C=this.body.data.edges.getIds();this.add(C,!0)}this.body.emitter.emit("_adjustEdgesForHierarchicalLayout"),!1===e&&this.body.emitter.emit("_dataChanged")}},{key:"add",value:function(g){for(var t=arguments.length>1&&void 0!==arguments[1]&&arguments[1],e=this.body.edges,A=this.body.data.edges,C=0;C1&&void 0!==arguments[1])||arguments[1];if(0!==g.length){var e=this.body.edges;Lm(g,(function(g){var t=e[g];void 0!==t&&t.remove()})),t&&this.body.emitter.emit("_dataChanged")}}},{key:"refresh",value:function(){var g=this;Lm(this.body.edges,(function(t,e){var A=g.body.data.edges.get(e);void 0!==A&&t.setOptions(A)}))}},{key:"create",value:function(g){return new vz(g,this.body,this.images,this.options,this.defaultOptions)}},{key:"reconnectEdges",value:function(){var g,t=this.body.nodes,e=this.body.edges;for(g in t)Object.prototype.hasOwnProperty.call(t,g)&&(t[g].edges=[]);for(g in e)if(Object.prototype.hasOwnProperty.call(e,g)){var A=e[g];A.from=null,A.to=null,A.connect()}}},{key:"getConnectedNodes",value:function(g){var t=[];if(void 0!==this.body.edges[g]){var e=this.body.edges[g];void 0!==e.fromId&&t.push(e.fromId),void 0!==e.toId&&t.push(e.toId)}return t}},{key:"_updateState",value:function(){this._addMissingEdges(),this._removeInvalidEdges()}},{key:"_removeInvalidEdges",value:function(){var g=this,t=[];Lm(this.body.edges,(function(e,A){var C=g.body.nodes[e.toId],I=g.body.nodes[e.fromId];void 0!==C&&!0===C.isCluster||void 0!==I&&!0===I.isCluster||void 0!==C&&void 0!==I||t.push(A)})),this.remove(t,!1)}},{key:"_addMissingEdges",value:function(){var g=this.body.data.edges;if(null!=g){var t=this.body.edges,e=[];Ec(g).call(g,(function(g,A){void 0===t[A]&&e.push(A)})),this.add(e,!0)}}}]),g}(),mz=function(){function g(t,e,A){cd(this,g),this.body=t,this.physicsBody=e,this.barnesHutTree,this.setOptions(A),this._rng=xm("BARNES HUT SOLVER")}return Bh(g,[{key:"setOptions",value:function(g){this.options=g,this.thetaInversed=1/this.options.theta,this.overlapAvoidanceFactor=1-Math.max(0,Math.min(1,this.options.avoidOverlap))}},{key:"solve",value:function(){if(0!==this.options.gravitationalConstant&&this.physicsBody.physicsNodeIndices.length>0){var g,t=this.body.nodes,e=this.physicsBody.physicsNodeIndices,A=e.length,C=this._formBarnesHutTree(t,e);this.barnesHutTree=C;for(var I=0;I0&&this._getForceContributions(C.root,g)}}},{key:"_getForceContributions",value:function(g,t){this._getForceContribution(g.children.NW,t),this._getForceContribution(g.children.NE,t),this._getForceContribution(g.children.SW,t),this._getForceContribution(g.children.SE,t)}},{key:"_getForceContribution",value:function(g,t){if(g.childrenCount>0){var e=g.centerOfMass.x-t.x,A=g.centerOfMass.y-t.y,C=Math.sqrt(e*e+A*A);C*g.calcSize>this.thetaInversed?this._calculateForces(C,e,A,t,g):4===g.childrenCount?this._getForceContributions(g,t):g.children.data.id!=t.id&&this._calculateForces(C,e,A,t,g)}}},{key:"_calculateForces",value:function(g,t,e,A,C){0===g&&(t=g=.1),this.overlapAvoidanceFactor<1&&A.shape.radius&&(g=Math.max(.1+this.overlapAvoidanceFactor*A.shape.radius,g-A.shape.radius));var I=this.options.gravitationalConstant*C.mass*A.options.mass/Math.pow(g,3),i=t*I,o=e*I;this.physicsBody.forces[A.id].x+=i,this.physicsBody.forces[A.id].y+=o}},{key:"_formBarnesHutTree",value:function(g,t){for(var e,A=t.length,C=g[t[0]].x,I=g[t[0]].y,i=g[t[0]].x,o=g[t[0]].y,n=1;n0&&(si&&(i=s),ao&&(o=a))}var d=Math.abs(i-C)-Math.abs(o-I);d>0?(I-=.5*d,o+=.5*d):(C+=.5*d,i-=.5*d);var h=Math.max(1e-5,Math.abs(i-C)),l=.5*h,c=.5*(C+i),u=.5*(I+o),p={root:{centerOfMass:{x:0,y:0},mass:0,range:{minX:c-l,maxX:c+l,minY:u-l,maxY:u+l},size:h,calcSize:1/h,children:{data:null},maxWidth:0,level:0,childrenCount:4}};this._splitBranch(p.root);for(var f=0;f0&&this._placeInTree(p.root,e);return p}},{key:"_updateBranchMass",value:function(g,t){var e=g.centerOfMass,A=g.mass+t.options.mass,C=1/A;e.x=e.x*g.mass+t.x*t.options.mass,e.x*=C,e.y=e.y*g.mass+t.y*t.options.mass,e.y*=C,g.mass=A;var I=Math.max(Math.max(t.height,t.radius),t.width);g.maxWidth=g.maxWidtht.x?C.maxY>t.y?"NW":"SW":C.maxY>t.y?"NE":"SE",this._placeInRegion(g,t,A)}},{key:"_placeInRegion",value:function(g,t,e){var A=g.children[e];switch(A.childrenCount){case 0:A.children.data=t,A.childrenCount=1,this._updateBranchMass(A,t);break;case 1:A.children.data.x===t.x&&A.children.data.y===t.y?(t.x+=this._rng(),t.y+=this._rng()):(this._splitBranch(A),this._placeInTree(A,t));break;case 4:this._placeInTree(A,t)}}},{key:"_splitBranch",value:function(g){var t=null;1===g.childrenCount&&(t=g.children.data,g.mass=0,g.centerOfMass.x=0,g.centerOfMass.y=0),g.childrenCount=4,g.children.data=null,this._insertRegion(g,"NW"),this._insertRegion(g,"NE"),this._insertRegion(g,"SW"),this._insertRegion(g,"SE"),null!=t&&this._placeInTree(g,t)}},{key:"_insertRegion",value:function(g,t){var e,A,C,I,i=.5*g.size;switch(t){case"NW":e=g.range.minX,A=g.range.minX+i,C=g.range.minY,I=g.range.minY+i;break;case"NE":e=g.range.minX+i,A=g.range.maxX,C=g.range.minY,I=g.range.minY+i;break;case"SW":e=g.range.minX,A=g.range.minX+i,C=g.range.minY+i,I=g.range.maxY;break;case"SE":e=g.range.minX+i,A=g.range.maxX,C=g.range.minY+i,I=g.range.maxY}g.children[t]={centerOfMass:{x:0,y:0},mass:0,range:{minX:e,maxX:A,minY:C,maxY:I},size:.5*g.size,calcSize:2*g.calcSize,children:{data:null},maxWidth:0,level:g.level+1,childrenCount:0}}},{key:"_debug",value:function(g,t){void 0!==this.barnesHutTree&&(g.lineWidth=1,this._drawBranch(this.barnesHutTree.root,g,t))}},{key:"_drawBranch",value:function(g,t,e){void 0===e&&(e="#FF0000"),4===g.childrenCount&&(this._drawBranch(g.children.NW,t),this._drawBranch(g.children.NE,t),this._drawBranch(g.children.SE,t),this._drawBranch(g.children.SW,t)),t.strokeStyle=e,t.beginPath(),t.moveTo(g.range.minX,g.range.minY),t.lineTo(g.range.maxX,g.range.minY),t.stroke(),t.beginPath(),t.moveTo(g.range.maxX,g.range.minY),t.lineTo(g.range.maxX,g.range.maxY),t.stroke(),t.beginPath(),t.moveTo(g.range.maxX,g.range.maxY),t.lineTo(g.range.minX,g.range.maxY),t.stroke(),t.beginPath(),t.moveTo(g.range.minX,g.range.maxY),t.lineTo(g.range.minX,g.range.minY),t.stroke()}}]),g}(),bz=function(){function g(t,e,A){cd(this,g),this._rng=xm("REPULSION SOLVER"),this.body=t,this.physicsBody=e,this.setOptions(A)}return Bh(g,[{key:"setOptions",value:function(g){this.options=g}},{key:"solve",value:function(){for(var g,t,e,A,C,I,i,o,n=this.body.nodes,r=this.physicsBody.physicsNodeIndices,s=this.physicsBody.forces,a=this.options.nodeDistance,d=-2/3/a,h=0;h0){var I=C.edges.length+1,i=this.options.centralGravity*I*C.options.mass;A[C.id].x=t*i,A[C.id].y=e*i}}}]),e}(Ez),Rz=function(){function g(t){cd(this,g),this.body=t,this.physicsBody={physicsNodeIndices:[],physicsEdgeIndices:[],forces:{},velocities:{}},this.physicsEnabled=!0,this.simulationInterval=1e3/60,this.requiresTimeout=!0,this.previousStates={},this.referenceState={},this.freezeCache={},this.renderTimer=void 0,this.adaptiveTimestep=!1,this.adaptiveTimestepEnabled=!1,this.adaptiveCounter=0,this.adaptiveInterval=3,this.stabilized=!1,this.startedStabilization=!1,this.stabilizationIterations=0,this.ready=!1,this.options={},this.defaultOptions={enabled:!0,barnesHut:{theta:.5,gravitationalConstant:-2e3,centralGravity:.3,springLength:95,springConstant:.04,damping:.09,avoidOverlap:0},forceAtlas2Based:{theta:.5,gravitationalConstant:-50,centralGravity:.01,springConstant:.08,springLength:100,damping:.4,avoidOverlap:0},repulsion:{centralGravity:.2,springLength:200,springConstant:.05,nodeDistance:100,damping:.09,avoidOverlap:0},hierarchicalRepulsion:{centralGravity:0,springLength:100,springConstant:.01,nodeDistance:120,damping:.09},maxVelocity:50,minVelocity:.75,solver:"barnesHut",stabilization:{enabled:!0,iterations:1e3,updateInterval:50,onlyDynamicEdges:!1,fit:!0},timestep:.5,adaptiveTimestep:!0,wind:{x:0,y:0}},yA(this.options,this.defaultOptions),this.timestep=.5,this.layoutFailed=!1,this.bindEventListeners()}return Bh(g,[{key:"bindEventListeners",value:function(){var g=this;this.body.emitter.on("initPhysics",(function(){g.initPhysics()})),this.body.emitter.on("_layoutFailed",(function(){g.layoutFailed=!0})),this.body.emitter.on("resetPhysics",(function(){g.stopSimulation(),g.ready=!1})),this.body.emitter.on("disablePhysics",(function(){g.physicsEnabled=!1,g.stopSimulation()})),this.body.emitter.on("restorePhysics",(function(){g.setOptions(g.options),!0===g.ready&&g.startSimulation()})),this.body.emitter.on("startSimulation",(function(){!0===g.ready&&g.startSimulation()})),this.body.emitter.on("stopSimulation",(function(){g.stopSimulation()})),this.body.emitter.on("destroy",(function(){g.stopSimulation(!1),g.body.emitter.off()})),this.body.emitter.on("_dataChanged",(function(){g.updatePhysicsData()}))}},{key:"setOptions",value:function(g){if(void 0!==g)if(!1===g)this.options.enabled=!1,this.physicsEnabled=!1,this.stopSimulation();else if(!0===g)this.options.enabled=!0,this.physicsEnabled=!0,this.startSimulation();else{this.physicsEnabled=!0,Zm(["stabilization"],this.options,g),qm(this.options,g,"stabilization"),void 0===g.enabled&&(this.options.enabled=!0),!1===this.options.enabled&&(this.physicsEnabled=!1,this.stopSimulation());var t=this.options.wind;t&&(("number"!=typeof t.x||Rx(t.x))&&(t.x=0),("number"!=typeof t.y||Rx(t.y))&&(t.y=0)),this.timestep=this.options.timestep}this.init()}},{key:"init",value:function(){var g;"forceAtlas2Based"===this.options.solver?(g=this.options.forceAtlas2Based,this.nodesSolver=new Tz(this.body,this.physicsBody,g),this.edgesSolver=new xz(this.body,this.physicsBody,g),this.gravitySolver=new Nz(this.body,this.physicsBody,g)):"repulsion"===this.options.solver?(g=this.options.repulsion,this.nodesSolver=new bz(this.body,this.physicsBody,g),this.edgesSolver=new xz(this.body,this.physicsBody,g),this.gravitySolver=new Ez(this.body,this.physicsBody,g)):"hierarchicalRepulsion"===this.options.solver?(g=this.options.hierarchicalRepulsion,this.nodesSolver=new wz(this.body,this.physicsBody,g),this.edgesSolver=new kz(this.body,this.physicsBody,g),this.gravitySolver=new Ez(this.body,this.physicsBody,g)):(g=this.options.barnesHut,this.nodesSolver=new mz(this.body,this.physicsBody,g),this.edgesSolver=new xz(this.body,this.physicsBody,g),this.gravitySolver=new Ez(this.body,this.physicsBody,g)),this.modelOptions=g}},{key:"initPhysics",value:function(){!0===this.physicsEnabled&&!0===this.options.enabled?!0===this.options.stabilization.enabled?this.stabilize():(this.stabilized=!1,this.ready=!0,this.body.emitter.emit("fit",{},this.layoutFailed),this.startSimulation()):(this.ready=!0,this.body.emitter.emit("fit"))}},{key:"startSimulation",value:function(){var g;!0===this.physicsEnabled&&!0===this.options.enabled?(this.stabilized=!1,this.adaptiveTimestep=!1,this.body.emitter.emit("_resizeNodes"),void 0===this.viewFunction&&(this.viewFunction=QA(g=this.simulationStep).call(g,this),this.body.emitter.on("initRedraw",this.viewFunction),this.body.emitter.emit("_startRendering"))):this.body.emitter.emit("_redraw")}},{key:"stopSimulation",value:function(){var g=!(arguments.length>0&&void 0!==arguments[0])||arguments[0];this.stabilized=!0,!0===g&&this._emitStabilized(),void 0!==this.viewFunction&&(this.body.emitter.off("initRedraw",this.viewFunction),this.viewFunction=void 0,!0===g&&this.body.emitter.emit("_stopRendering"))}},{key:"simulationStep",value:function(){var g=ac();this.physicsTick(),(ac()-g<.4*this.simulationInterval||!0===this.runDoubleSpeed)&&!1===this.stabilized&&(this.physicsTick(),this.runDoubleSpeed=!0),!0===this.stabilized&&this.stopSimulation()}},{key:"_emitStabilized",value:function(){var g=this,t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:this.stabilizationIterations;(this.stabilizationIterations>1||!0===this.startedStabilization)&&_f((function(){g.body.emitter.emit("stabilized",{iterations:t}),g.startedStabilization=!1,g.stabilizationIterations=0}),0)}},{key:"physicsStep",value:function(){this.gravitySolver.solve(),this.nodesSolver.solve(),this.edgesSolver.solve(),this.moveNodes()}},{key:"adjustTimeStep",value:function(){!0===this._evaluateStepQuality()?this.timestep=1.2*this.timestep:this.timestep/1.2.3))return!1;return!0}},{key:"moveNodes",value:function(){for(var g=this.physicsBody.physicsNodeIndices,t=0,e=0,A=0;AA&&(g=g>0?A:-A),g}},{key:"_performStep",value:function(g){var t=this.body.nodes[g],e=this.physicsBody.forces[g];this.options.wind&&(e.x+=this.options.wind.x,e.y+=this.options.wind.y);var A=this.physicsBody.velocities[g];return this.previousStates[g]={x:t.x,y:t.y,vx:A.x,vy:A.y},!1===t.options.fixed.x?(A.x=this.calculateComponentVelocity(A.x,e.x,t.options.mass),t.x+=A.x*this.timestep):(e.x=0,A.x=0),!1===t.options.fixed.y?(A.y=this.calculateComponentVelocity(A.y,e.y,t.options.mass),t.y+=A.y*this.timestep):(e.y=0,A.y=0),Math.sqrt(Math.pow(A.x,2)+Math.pow(A.y,2))}},{key:"_freezeNodes",value:function(){var g=this.body.nodes;for(var t in g)if(Object.prototype.hasOwnProperty.call(g,t)&&g[t].x&&g[t].y){var e=g[t].options.fixed;this.freezeCache[t]={x:e.x,y:e.y},e.x=!0,e.y=!0}}},{key:"_restoreFrozenNodes",value:function(){var g=this.body.nodes;for(var t in g)Object.prototype.hasOwnProperty.call(g,t)&&void 0!==this.freezeCache[t]&&(g[t].options.fixed.x=this.freezeCache[t].x,g[t].options.fixed.y=this.freezeCache[t].y);this.freezeCache={}}},{key:"stabilize",value:function(){var g=this,t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:this.options.stabilization.iterations;"number"!=typeof t&&(t=this.options.stabilization.iterations,console.error("The stabilize method needs a numeric amount of iterations. Switching to default: ",t)),0!==this.physicsBody.physicsNodeIndices.length?(this.adaptiveTimestep=this.options.adaptiveTimestep,this.body.emitter.emit("_resizeNodes"),this.stopSimulation(),this.stabilized=!1,this.body.emitter.emit("_blockRedraw"),this.targetIterations=t,!0===this.options.stabilization.onlyDynamicEdges&&this._freezeNodes(),this.stabilizationIterations=0,_f((function(){return g._stabilizationBatch()}),0)):this.ready=!0}},{key:"_startStabilizing",value:function(){return!0!==this.startedStabilization&&(this.body.emitter.emit("startStabilizing"),this.startedStabilization=!0,!0)}},{key:"_stabilizationBatch",value:function(){var g=this,t=function(){return!1===g.stabilized&&g.stabilizationIterations1&&void 0!==arguments[1]?arguments[1]:[],A=1e9,C=-1e9,I=1e9,i=-1e9;if(e.length>0)for(var o=0;o(t=g[e[o]]).shape.boundingBox.left&&(I=t.shape.boundingBox.left),it.shape.boundingBox.top&&(A=t.shape.boundingBox.top),C1&&void 0!==arguments[1]?arguments[1]:[],A=1e9,C=-1e9,I=1e9,i=-1e9;if(e.length>0)for(var o=0;o(t=g[e[o]]).x&&(I=t.x),it.y&&(A=t.y),C=g&&e.push(C.id)}for(var I=0;I0&&void 0!==arguments[0]?arguments[0]:{},e=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];if(void 0===t.joinCondition)throw new Error("Cannot call clusterByNodeData without a joinCondition function in the options.");t=this._checkOptions(t);var A={},C={};Lm(this.body.nodes,(function(e,I){e.options&&!0===t.joinCondition(e.options)&&(A[I]=e,Lm(e.edges,(function(t){void 0===g.clusteredEdges[t.id]&&(C[t.id]=t)})))})),this._cluster(A,C,t,e)}},{key:"clusterByEdgeCount",value:function(g,t){var e=this,A=!(arguments.length>2&&void 0!==arguments[2])||arguments[2];t=this._checkOptions(t);for(var C,I,i,o=[],n={},r=function(){var A={},r={},a=e.body.nodeIndices[s],d=e.body.nodes[a];if(void 0===n[a]){i=0,I=[];for(var h=0;h0&&Cc(r).length>0&&!0===c){var f=function(){for(var g=0;g1&&void 0!==arguments[1])||arguments[1];this.clusterByEdgeCount(1,g,t)}},{key:"clusterBridges",value:function(g){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];this.clusterByEdgeCount(2,g,t)}},{key:"clusterByConnection",value:function(g,t){var e,A=!(arguments.length>2&&void 0!==arguments[2])||arguments[2];if(void 0===g)throw new Error("No nodeId supplied to clusterByConnection!");if(void 0===this.body.nodes[g])throw new Error("The nodeId given to clusterByConnection does not exist!");var C=this.body.nodes[g];void 0===(t=this._checkOptions(t,C)).clusterNodeProperties.x&&(t.clusterNodeProperties.x=C.x),void 0===t.clusterNodeProperties.y&&(t.clusterNodeProperties.y=C.y),void 0===t.clusterNodeProperties.fixed&&(t.clusterNodeProperties.fixed={},t.clusterNodeProperties.fixed.x=C.options.fixed.x,t.clusterNodeProperties.fixed.y=C.options.fixed.y);var I={},i={},o=C.id,n=Pz.cloneOptions(C);I[o]=C;for(var r=0;r-1&&(i[p.id]=p)}this._cluster(I,i,t,A)}},{key:"_createClusterEdges",value:function(g,t,e,A){for(var C,I,i,o,n,r,s=Cc(g),a=[],d=0;d0&&void 0!==arguments[0]?arguments[0]:{};return void 0===g.clusterEdgeProperties&&(g.clusterEdgeProperties={}),void 0===g.clusterNodeProperties&&(g.clusterNodeProperties={}),g}},{key:"_cluster",value:function(g,t,e){var A=!(arguments.length>3&&void 0!==arguments[3])||arguments[3],C=[];for(var I in g)Object.prototype.hasOwnProperty.call(g,I)&&void 0!==this.clusteredNodes[I]&&C.push(I);for(var i=0;iC?t.x:C,I=t.yi?t.y:i;return{x:.5*(A+C),y:.5*(I+i)}}},{key:"openCluster",value:function(g,t){var e=!(arguments.length>2&&void 0!==arguments[2])||arguments[2];if(void 0===g)throw new Error("No clusterNodeId supplied to openCluster.");var A=this.body.nodes[g];if(void 0===A)throw new Error("The clusterNodeId supplied to openCluster does not exist.");if(!0!==A.isCluster||void 0===A.containedNodes||void 0===A.containedEdges)throw new Error("The node:"+g+" is not a valid cluster.");var C=this.findNode(g),I=$p(C).call(C,g)-1;if(I>=0){var i=C[I];return this.body.nodes[i]._openChildCluster(g),delete this.body.nodes[g],void(!0===e&&this.body.emitter.emit("_dataChanged"))}var o=A.containedNodes,n=A.containedEdges;if(void 0!==t&&void 0!==t.releaseFunction&&"function"==typeof t.releaseFunction){var r={},s={x:A.x,y:A.y};for(var a in o)if(Object.prototype.hasOwnProperty.call(o,a)){var d=this.body.nodes[a];r[a]={x:d.x,y:d.y}}var h=t.releaseFunction(s,r);for(var l in o)if(Object.prototype.hasOwnProperty.call(o,l)){var c=this.body.nodes[l];void 0!==h[l]&&(c.x=void 0===h[l].x?A.x:h[l].x,c.y=void 0===h[l].y?A.y:h[l].y)}}else Lm(o,(function(g){!1===g.options.fixed.x&&(g.x=A.x),!1===g.options.fixed.y&&(g.y=A.y)}));for(var u in o)if(Object.prototype.hasOwnProperty.call(o,u)){var p=this.body.nodes[u];p.vx=A.vx,p.vy=A.vy,p.setOptions({physics:!0}),delete this.clusteredNodes[u]}for(var f=[],v=0;v0&&C<100;){var I=t.pop();if(void 0!==I){var i=this.body.edges[I];if(void 0!==i){C++;var o=i.clusteringEdgeReplacingIds;if(void 0===o)A.push(I);else for(var n=0;nA&&(A=I.edges.length),g+=I.edges.length,t+=Math.pow(I.edges.length,2),e+=1}g/=e;var i=(t/=e)-Math.pow(g,2),o=Math.sqrt(i),n=Math.floor(g+2*o);return n>A&&(n=A),n}},{key:"_createClusteredEdge",value:function(g,t,e,A,C){var I=Pz.cloneOptions(e,"edge");Fm(I,A),I.from=g,I.to=t,I.id="clusterEdge:"+OP(),void 0!==C&&Fm(I,C);var i=this.body.functions.createEdge(I);return i.clusteringEdgeReplacingIds=[e.id],i.connect(),this.body.edges[i.id]=i,i}},{key:"_clusterEdges",value:function(g,t,e,A){if(t instanceof vz){var C=t,I={};I[C.id]=C,t=I}if(g instanceof sB){var i=g,o={};o[i.id]=i,g=o}if(null==e)throw new Error("_clusterEdges: parameter clusterNode required");for(var n in void 0===A&&(A=e.clusterEdgeProperties),this._createClusterEdges(g,t,e,A),t)if(Object.prototype.hasOwnProperty.call(t,n)&&void 0!==this.body.edges[n]){var r=this.body.edges[n];this._backupEdgeOptions(r),r.setOptions({physics:!1})}for(var s in g)Object.prototype.hasOwnProperty.call(g,s)&&(this.clusteredNodes[s]={clusterId:e.id,node:this.body.nodes[s]},this.body.nodes[s].setOptions({physics:!1}))}},{key:"_getClusterNodeForNode",value:function(g){if(void 0!==g){var t=this.clusteredNodes[g];if(void 0!==t){var e=t.clusterId;if(void 0!==e)return this.body.nodes[e]}}}},{key:"_filter",value:function(g,t){var e=[];return Lm(g,(function(g){t(g)&&e.push(g)})),e}},{key:"_updateState",value:function(){var g,t=this,e=[],A={},C=function(g){Lm(t.body.nodes,(function(t){!0===t.isCluster&&g(t)}))};for(g in this.clusteredNodes){if(Object.prototype.hasOwnProperty.call(this.clusteredNodes,g))void 0===this.body.nodes[g]&&e.push(g)}C((function(g){for(var t=0;t0}g.endPointsValid()&&C||(A[e]=e)})),C((function(g){Lm(A,(function(e){delete g.containedEdges[e],Lm(g.edges,(function(C,I){C.id!==e?C.clusteringEdgeReplacingIds=t._filter(C.clusteringEdgeReplacingIds,(function(g){return!A[g]})):g.edges[I]=null})),g.edges=t._filter(g.edges,(function(g){return null!==g}))}))})),Lm(A,(function(g){delete t.clusteredEdges[g]})),Lm(A,(function(g){delete t.body.edges[g]})),Lm(Cc(this.body.edges),(function(g){var e=t.body.edges[g],A=t._isClusteredNode(e.fromId)||t._isClusteredNode(e.toId);if(A!==t._isClusteredEdge(e.id))if(A){var C=t._getClusterNodeForNode(e.fromId);void 0!==C&&t._clusterEdges(t.body.nodes[e.fromId],e,C);var I=t._getClusterNodeForNode(e.toId);void 0!==I&&t._clusterEdges(t.body.nodes[e.toId],e,I)}else delete t._clusterEdges[g],t._restoreEdge(e)}));for(var i=!1,o=!0,n=function(){var g=[];C((function(t){var e=Cc(t.containedNodes).length,A=!0===t.options.allowSingleNodeCluster;(A&&e<1||!A&&e<2)&&g.push(t.id)}));for(var e=0;e0,i=i||o};o;)n();i&&this._updateState()}},{key:"_isClusteredNode",value:function(g){return void 0!==this.clusteredNodes[g]}},{key:"_isClusteredEdge",value:function(g){return void 0!==this.clusteredEdges[g]}}]),g}();var Sz=function(){function g(t,e){var A;cd(this,g),void 0!==window&&(A=window.requestAnimationFrame||window.mozRequestAnimationFrame||window.webkitRequestAnimationFrame||window.msRequestAnimationFrame),window.requestAnimationFrame=void 0===A?function(g){g()}:A,this.body=t,this.canvas=e,this.redrawRequested=!1,this.renderTimer=void 0,this.requiresTimeout=!0,this.renderingActive=!1,this.renderRequests=0,this.allowRedraw=!0,this.dragging=!1,this.zooming=!1,this.options={},this.defaultOptions={hideEdgesOnDrag:!1,hideEdgesOnZoom:!1,hideNodesOnDrag:!1},yA(this.options,this.defaultOptions),this._determineBrowserMethod(),this.bindEventListeners()}return Bh(g,[{key:"bindEventListeners",value:function(){var g,t=this;this.body.emitter.on("dragStart",(function(){t.dragging=!0})),this.body.emitter.on("dragEnd",(function(){t.dragging=!1})),this.body.emitter.on("zoom",(function(){t.zooming=!0,window.clearTimeout(t.zoomTimeoutId),t.zoomTimeoutId=_f((function(){var g;t.zooming=!1,QA(g=t._requestRedraw).call(g,t)()}),250)})),this.body.emitter.on("_resizeNodes",(function(){t._resizeNodes()})),this.body.emitter.on("_redraw",(function(){!1===t.renderingActive&&t._redraw()})),this.body.emitter.on("_blockRedraw",(function(){t.allowRedraw=!1})),this.body.emitter.on("_allowRedraw",(function(){t.allowRedraw=!0,t.redrawRequested=!1})),this.body.emitter.on("_requestRedraw",QA(g=this._requestRedraw).call(g,this)),this.body.emitter.on("_startRendering",(function(){t.renderRequests+=1,t.renderingActive=!0,t._startRendering()})),this.body.emitter.on("_stopRendering",(function(){t.renderRequests-=1,t.renderingActive=t.renderRequests>0,t.renderTimer=void 0})),this.body.emitter.on("destroy",(function(){t.renderRequests=0,t.allowRedraw=!1,t.renderingActive=!1,!0===t.requiresTimeout?clearTimeout(t.renderTimer):window.cancelAnimationFrame(t.renderTimer),t.body.emitter.off()}))}},{key:"setOptions",value:function(g){if(void 0!==g){Sm(["hideEdgesOnDrag","hideEdgesOnZoom","hideNodesOnDrag"],this.options,g)}}},{key:"_requestNextFrame",value:function(g,t){if("undefined"!=typeof window){var e,A=window;return!0===this.requiresTimeout?e=_f(g,t):A.requestAnimationFrame&&(e=A.requestAnimationFrame(g)),e}}},{key:"_startRendering",value:function(){var g;!0===this.renderingActive&&(void 0===this.renderTimer&&(this.renderTimer=this._requestNextFrame(QA(g=this._renderStep).call(g,this),this.simulationInterval)))}},{key:"_renderStep",value:function(){!0===this.renderingActive&&(this.renderTimer=void 0,!0===this.requiresTimeout&&this._startRendering(),this._redraw(),!1===this.requiresTimeout&&this._startRendering())}},{key:"redraw",value:function(){this.body.emitter.emit("setSize"),this._redraw()}},{key:"_requestRedraw",value:function(){var g=this;!0!==this.redrawRequested&&!1===this.renderingActive&&!0===this.allowRedraw&&(this.redrawRequested=!0,this._requestNextFrame((function(){g._redraw(!1)}),0))}},{key:"_redraw",value:function(){var g=arguments.length>0&&void 0!==arguments[0]&&arguments[0];if(!0===this.allowRedraw){this.body.emitter.emit("initRedraw"),this.redrawRequested=!1;var t={drawExternalLabels:null};0!==this.canvas.frame.canvas.width&&0!==this.canvas.frame.canvas.height||this.canvas.setSize(),this.canvas.setTransform();var e=this.canvas.getContext(),A=this.canvas.frame.canvas.clientWidth,C=this.canvas.frame.canvas.clientHeight;if(e.clearRect(0,0,A,C),0===this.canvas.frame.clientWidth)return;if(e.save(),e.translate(this.body.view.translation.x,this.body.view.translation.y),e.scale(this.body.view.scale,this.body.view.scale),e.beginPath(),this.body.emitter.emit("beforeDrawing",e),e.closePath(),!1===g&&(!1===this.dragging||!0===this.dragging&&!1===this.options.hideEdgesOnDrag)&&(!1===this.zooming||!0===this.zooming&&!1===this.options.hideEdgesOnZoom)&&this._drawEdges(e),!1===this.dragging||!0===this.dragging&&!1===this.options.hideNodesOnDrag){var I=this._drawNodes(e,g).drawExternalLabels;t.drawExternalLabels=I}!1===g&&(!1===this.dragging||!0===this.dragging&&!1===this.options.hideEdgesOnDrag)&&(!1===this.zooming||!0===this.zooming&&!1===this.options.hideEdgesOnZoom)&&this._drawArrows(e),null!=t.drawExternalLabels&&t.drawExternalLabels(),!1===g&&this._drawSelectionBox(e),e.beginPath(),this.body.emitter.emit("afterDrawing",e),e.closePath(),e.restore(),!0===g&&e.clearRect(0,0,A,C)}}},{key:"_resizeNodes",value:function(){this.canvas.setTransform();var g=this.canvas.getContext();g.save(),g.translate(this.body.view.translation.x,this.body.view.translation.y),g.scale(this.body.view.scale,this.body.view.scale);var t,e=this.body.nodes;for(var A in e)Object.prototype.hasOwnProperty.call(e,A)&&((t=e[A]).resize(g),t.updateBoundingBox(g,t.selected));g.restore()}},{key:"_drawNodes",value:function(g){for(var t,e,A=arguments.length>1&&void 0!==arguments[1]&&arguments[1],C=this.body.nodes,I=this.body.nodeIndices,i=[],o=[],n=this.canvas.DOMtoCanvas({x:-20,y:-20}),r=this.canvas.DOMtoCanvas({x:this.canvas.frame.canvas.clientWidth+20,y:this.canvas.frame.canvas.clientHeight+20}),s={top:n.y,left:n.x,bottom:r.y,right:r.x},a=[],d=0;d0&&void 0!==arguments[0]?arguments[0]:this.pixelRatio;!0===this.initialized&&(this.cameraState.previousWidth=this.frame.canvas.width/g,this.cameraState.previousHeight=this.frame.canvas.height/g,this.cameraState.scale=this.body.view.scale,this.cameraState.position=this.DOMtoCanvas({x:.5*this.frame.canvas.width/g,y:.5*this.frame.canvas.height/g}))}},{key:"_setCameraState",value:function(){if(void 0!==this.cameraState.scale&&0!==this.frame.canvas.clientWidth&&0!==this.frame.canvas.clientHeight&&0!==this.pixelRatio&&this.cameraState.previousWidth>0&&this.cameraState.previousHeight>0){var g=this.frame.canvas.width/this.pixelRatio/this.cameraState.previousWidth,t=this.frame.canvas.height/this.pixelRatio/this.cameraState.previousHeight,e=this.cameraState.scale;1!=g&&1!=t?e=.5*this.cameraState.scale*(g+t):1!=g?e=this.cameraState.scale*g:1!=t&&(e=this.cameraState.scale*t),this.body.view.scale=e;var A=this.DOMtoCanvas({x:.5*this.frame.canvas.clientWidth,y:.5*this.frame.canvas.clientHeight}),C={x:A.x-this.cameraState.position.x,y:A.y-this.cameraState.position.y};this.body.view.translation.x+=C.x*this.body.view.scale,this.body.view.translation.y+=C.y*this.body.view.scale}}},{key:"_prepareValue",value:function(g){if("number"==typeof g)return g+"px";if("string"==typeof g){if(-1!==$p(g).call(g,"%")||-1!==$p(g).call(g,"px"))return g;if(-1===$p(g).call(g,"%"))return g+"px"}throw new Error("Could not use the value supplied for width or height:"+g)}},{key:"_create",value:function(){for(;this.body.container.hasChildNodes();)this.body.container.removeChild(this.body.container.firstChild);if(this.frame=document.createElement("div"),this.frame.className="vis-network",this.frame.style.position="relative",this.frame.style.overflow="hidden",this.frame.tabIndex=0,this.frame.canvas=document.createElement("canvas"),this.frame.canvas.style.position="relative",this.frame.appendChild(this.frame.canvas),this.frame.canvas.getContext)this._setPixelRatio(),this.setTransform();else{var g=document.createElement("DIV");g.style.color="red",g.style.fontWeight="bold",g.style.padding="10px",g.innerText="Error: your browser does not support HTML canvas",this.frame.canvas.appendChild(g)}this.body.container.appendChild(this.frame),this.body.view.scale=1,this.body.view.translation={x:.5*this.frame.canvas.clientWidth,y:.5*this.frame.canvas.clientHeight},this._bindHammer()}},{key:"_bindHammer",value:function(){var g=this;void 0!==this.hammer&&this.hammer.destroy(),this.drag={},this.pinch={},this.hammer=new db(this.frame.canvas),this.hammer.get("pinch").set({enable:!0}),this.hammer.get("pan").set({threshold:5,direction:db.DIRECTION_ALL}),jz(this.hammer,(function(t){g.body.eventListeners.onTouch(t)})),this.hammer.on("tap",(function(t){g.body.eventListeners.onTap(t)})),this.hammer.on("doubletap",(function(t){g.body.eventListeners.onDoubleTap(t)})),this.hammer.on("press",(function(t){g.body.eventListeners.onHold(t)})),this.hammer.on("panstart",(function(t){g.body.eventListeners.onDragStart(t)})),this.hammer.on("panmove",(function(t){g.body.eventListeners.onDrag(t)})),this.hammer.on("panend",(function(t){g.body.eventListeners.onDragEnd(t)})),this.hammer.on("pinch",(function(t){g.body.eventListeners.onPinch(t)})),this.frame.canvas.addEventListener("wheel",(function(t){g.body.eventListeners.onMouseWheel(t)})),this.frame.canvas.addEventListener("mousemove",(function(t){g.body.eventListeners.onMouseMove(t)})),this.frame.canvas.addEventListener("contextmenu",(function(t){g.body.eventListeners.onContext(t)})),this.hammerFrame=new db(this.frame),Lz(this.hammerFrame,(function(t){g.body.eventListeners.onRelease(t)}))}},{key:"setSize",value:function(){var g=arguments.length>0&&void 0!==arguments[0]?arguments[0]:this.options.width,t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:this.options.height;g=this._prepareValue(g),t=this._prepareValue(t);var e=!1,A=this.frame.canvas.width,C=this.frame.canvas.height,I=this.pixelRatio;if(this._setPixelRatio(),g!=this.options.width||t!=this.options.height||this.frame.style.width!=g||this.frame.style.height!=t)this._getCameraState(I),this.frame.style.width=g,this.frame.style.height=t,this.frame.canvas.style.width="100%",this.frame.canvas.style.height="100%",this.frame.canvas.width=Math.round(this.frame.canvas.clientWidth*this.pixelRatio),this.frame.canvas.height=Math.round(this.frame.canvas.clientHeight*this.pixelRatio),this.options.width=g,this.options.height=t,this.canvasViewCenter={x:.5*this.frame.clientWidth,y:.5*this.frame.clientHeight},e=!0;else{var i=Math.round(this.frame.canvas.clientWidth*this.pixelRatio),o=Math.round(this.frame.canvas.clientHeight*this.pixelRatio);this.frame.canvas.width===i&&this.frame.canvas.height===o||this._getCameraState(I),this.frame.canvas.width!==i&&(this.frame.canvas.width=i,e=!0),this.frame.canvas.height!==o&&(this.frame.canvas.height=o,e=!0)}return!0===e&&(this.body.emitter.emit("resize",{width:Math.round(this.frame.canvas.width/this.pixelRatio),height:Math.round(this.frame.canvas.height/this.pixelRatio),oldWidth:Math.round(A/this.pixelRatio),oldHeight:Math.round(C/this.pixelRatio)}),this._setCameraState()),this.initialized=!0,e}},{key:"getContext",value:function(){return this.frame.canvas.getContext("2d")}},{key:"_determinePixelRatio",value:function(){var g=this.getContext();if(void 0===g)throw new Error("Could not get canvax context");var t=1;return"undefined"!=typeof window&&(t=window.devicePixelRatio||1),t/(g.webkitBackingStorePixelRatio||g.mozBackingStorePixelRatio||g.msBackingStorePixelRatio||g.oBackingStorePixelRatio||g.backingStorePixelRatio||1)}},{key:"_setPixelRatio",value:function(){this.pixelRatio=this._determinePixelRatio()}},{key:"setTransform",value:function(){var g=this.getContext();if(void 0===g)throw new Error("Could not get canvax context");g.setTransform(this.pixelRatio,0,0,this.pixelRatio,0,0)}},{key:"_XconvertDOMtoCanvas",value:function(g){return(g-this.body.view.translation.x)/this.body.view.scale}},{key:"_XconvertCanvasToDOM",value:function(g){return g*this.body.view.scale+this.body.view.translation.x}},{key:"_YconvertDOMtoCanvas",value:function(g){return(g-this.body.view.translation.y)/this.body.view.scale}},{key:"_YconvertCanvasToDOM",value:function(g){return g*this.body.view.scale+this.body.view.translation.y}},{key:"canvasToDOM",value:function(g){return{x:this._XconvertCanvasToDOM(g.x),y:this._YconvertCanvasToDOM(g.y)}}},{key:"DOMtoCanvas",value:function(g){return{x:this._XconvertDOMtoCanvas(g.x),y:this._YconvertDOMtoCanvas(g.y)}}}]),g}();var Yz=function(){function g(t,e){var A,C,I=this;cd(this,g),this.body=t,this.canvas=e,this.animationSpeed=1/this.renderRefreshRate,this.animationEasingFunction="easeInOutQuint",this.easingTime=0,this.sourceScale=0,this.targetScale=0,this.sourceTranslation=0,this.targetTranslation=0,this.lockedOnNodeId=void 0,this.lockedOnNodeOffset=void 0,this.touchTime=0,this.viewFunction=void 0,this.body.emitter.on("fit",QA(A=this.fit).call(A,this)),this.body.emitter.on("animationFinished",(function(){I.body.emitter.emit("_stopRendering")})),this.body.emitter.on("unlockNode",QA(C=this.releaseNode).call(C,this))}return Bh(g,[{key:"setOptions",value:function(){var g=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};this.options=g}},{key:"fit",value:function(g){var t=arguments.length>1&&void 0!==arguments[1]&&arguments[1];g=function(g,t){var e=yA({nodes:t,minZoomLevel:Number.MIN_VALUE,maxZoomLevel:1},null!=g?g:{});if(!Vl(e.nodes))throw new TypeError("Nodes has to be an array of ids.");if(0===e.nodes.length&&(e.nodes=t),!("number"==typeof e.minZoomLevel&&e.minZoomLevel>0))throw new TypeError("Min zoom level has to be a number higher than zero.");if(!("number"==typeof e.maxZoomLevel&&e.minZoomLevel<=e.maxZoomLevel))throw new TypeError("Max zoom level has to be a number higher than min zoom level.");return e}(g,this.body.nodeIndices);var e,A,C=this.canvas.frame.canvas.clientWidth,I=this.canvas.frame.canvas.clientHeight;if(0===C||0===I)A=1,e=Pz.getRange(this.body.nodes,g.nodes);else if(!0===t){var i=0;for(var o in this.body.nodes){if(Object.prototype.hasOwnProperty.call(this.body.nodes,o))!0===this.body.nodes[o].predefinedPosition&&(i+=1)}if(i>.5*this.body.nodeIndices.length)return void this.fit(g,!1);e=Pz.getRange(this.body.nodes,g.nodes),A=12.662/(this.body.nodeIndices.length+7.4147)+.0964822,A*=Math.min(C/600,I/600)}else{this.body.emitter.emit("_resizeNodes"),e=Pz.getRange(this.body.nodes,g.nodes);var n=C/(1.1*Math.abs(e.maxX-e.minX)),r=I/(1.1*Math.abs(e.maxY-e.minY));A=n<=r?n:r}A>g.maxZoomLevel?A=g.maxZoomLevel:A1&&void 0!==arguments[1]?arguments[1]:{};if(void 0!==this.body.nodes[g]){var e={x:this.body.nodes[g].x,y:this.body.nodes[g].y};t.position=e,t.lockedOnNode=g,this.moveTo(t)}else console.error("Node: "+g+" cannot be found.")}},{key:"moveTo",value:function(g){if(void 0!==g){if(null!=g.offset){if(null!=g.offset.x){if(g.offset.x=+g.offset.x,!Zx(g.offset.x))throw new TypeError('The option "offset.x" has to be a finite number.')}else g.offset.x=0;if(null!=g.offset.y){if(g.offset.y=+g.offset.y,!Zx(g.offset.y))throw new TypeError('The option "offset.y" has to be a finite number.')}else g.offset.x=0}else g.offset={x:0,y:0};if(null!=g.position){if(null!=g.position.x){if(g.position.x=+g.position.x,!Zx(g.position.x))throw new TypeError('The option "position.x" has to be a finite number.')}else g.position.x=0;if(null!=g.position.y){if(g.position.y=+g.position.y,!Zx(g.position.y))throw new TypeError('The option "position.y" has to be a finite number.')}else g.position.x=0}else g.position=this.getViewPosition();if(null!=g.scale){if(g.scale=+g.scale,!(g.scale>0))throw new TypeError('The option "scale" has to be a number greater than zero.')}else g.scale=this.body.view.scale;void 0===g.animation&&(g.animation={duration:0}),!1===g.animation&&(g.animation={duration:0}),!0===g.animation&&(g.animation={}),void 0===g.animation.duration&&(g.animation.duration=1e3),void 0===g.animation.easingFunction&&(g.animation.easingFunction="easeInOutQuad"),this.animateView(g)}else g={}}},{key:"animateView",value:function(g){if(void 0!==g){this.animationEasingFunction=g.animation.easingFunction,this.releaseNode(),!0===g.locked&&(this.lockedOnNodeId=g.lockedOnNode,this.lockedOnNodeOffset=g.offset),0!=this.easingTime&&this._transitionRedraw(!0),this.sourceScale=this.body.view.scale,this.sourceTranslation=this.body.view.translation,this.targetScale=g.scale,this.body.view.scale=this.targetScale;var t,e,A=this.canvas.DOMtoCanvas({x:.5*this.canvas.frame.canvas.clientWidth,y:.5*this.canvas.frame.canvas.clientHeight}),C=A.x-g.position.x,I=A.y-g.position.y;if(this.targetTranslation={x:this.sourceTranslation.x+C*this.targetScale+g.offset.x,y:this.sourceTranslation.y+I*this.targetScale+g.offset.y},0===g.animation.duration)if(null!=this.lockedOnNodeId)this.viewFunction=QA(t=this._lockedRedraw).call(t,this),this.body.emitter.on("initRedraw",this.viewFunction);else this.body.view.scale=this.targetScale,this.body.view.translation=this.targetTranslation,this.body.emitter.emit("_requestRedraw");else this.animationSpeed=1/(60*g.animation.duration*.001)||1/60,this.animationEasingFunction=g.animation.easingFunction,this.viewFunction=QA(e=this._transitionRedraw).call(e,this),this.body.emitter.on("initRedraw",this.viewFunction),this.body.emitter.emit("_startRendering")}}},{key:"_lockedRedraw",value:function(){var g=this.body.nodes[this.lockedOnNodeId].x,t=this.body.nodes[this.lockedOnNodeId].y,e=this.canvas.DOMtoCanvas({x:.5*this.canvas.frame.canvas.clientWidth,y:.5*this.canvas.frame.canvas.clientHeight}),A=e.x-g,C=e.y-t,I=this.body.view.translation,i={x:I.x+A*this.body.view.scale+this.lockedOnNodeOffset.x,y:I.y+C*this.body.view.scale+this.lockedOnNodeOffset.y};this.body.view.translation=i}},{key:"releaseNode",value:function(){void 0!==this.lockedOnNodeId&&void 0!==this.viewFunction&&(this.body.emitter.off("initRedraw",this.viewFunction),this.lockedOnNodeId=void 0,this.lockedOnNodeOffset=void 0)}},{key:"_transitionRedraw",value:function(){var g=arguments.length>0&&void 0!==arguments[0]&&arguments[0];this.easingTime+=this.animationSpeed,this.easingTime=!0===g?1:this.easingTime;var t=$m[this.animationEasingFunction](this.easingTime);if(this.body.view.scale=this.sourceScale+(this.targetScale-this.sourceScale)*t,this.body.view.translation={x:this.sourceTranslation.x+(this.targetTranslation.x-this.sourceTranslation.x)*t,y:this.sourceTranslation.y+(this.targetTranslation.y-this.sourceTranslation.y)*t},this.easingTime>=1){var e;if(this.body.emitter.off("initRedraw",this.viewFunction),this.easingTime=0,null!=this.lockedOnNodeId)this.viewFunction=QA(e=this._lockedRedraw).call(e,this),this.body.emitter.on("initRedraw",this.viewFunction);this.body.emitter.emit("animationFinished")}}},{key:"getScale",value:function(){return this.body.view.scale}},{key:"getViewPosition",value:function(){return this.canvas.DOMtoCanvas({x:.5*this.canvas.frame.canvas.clientWidth,y:.5*this.canvas.frame.canvas.clientHeight})}}]),g}();function Wz(g){var t,e=g&&g.preventDefault||!1,A=g&&g.container||window,C={},I={keydown:{},keyup:{}},i={};for(t=97;t<=122;t++)i[String.fromCharCode(t)]={code:t-97+65,shift:!1};for(t=65;t<=90;t++)i[String.fromCharCode(t)]={code:t,shift:!0};for(t=0;t<=9;t++)i[""+t]={code:48+t,shift:!1};for(t=1;t<=12;t++)i["F"+t]={code:111+t,shift:!1};for(t=0;t<=9;t++)i["num"+t]={code:96+t,shift:!1};i["num*"]={code:106,shift:!1},i["num+"]={code:107,shift:!1},i["num-"]={code:109,shift:!1},i["num/"]={code:111,shift:!1},i["num."]={code:110,shift:!1},i.left={code:37,shift:!1},i.up={code:38,shift:!1},i.right={code:39,shift:!1},i.down={code:40,shift:!1},i.space={code:32,shift:!1},i.enter={code:13,shift:!1},i.shift={code:16,shift:void 0},i.esc={code:27,shift:!1},i.backspace={code:8,shift:!1},i.tab={code:9,shift:!1},i.ctrl={code:17,shift:!1},i.alt={code:18,shift:!1},i.delete={code:46,shift:!1},i.pageup={code:33,shift:!1},i.pagedown={code:34,shift:!1},i["="]={code:187,shift:!1},i["-"]={code:189,shift:!1},i["]"]={code:221,shift:!1},i["["]={code:219,shift:!1};var o=function(g){r(g,"keydown")},n=function(g){r(g,"keyup")},r=function(g,t){if(void 0!==I[t][g.keyCode]){for(var A=I[t][g.keyCode],C=0;C700&&(this.body.emitter.emit("fit",{duration:700}),this.touchTime=(new Date).valueOf())}},{key:"_stopMovement",value:function(){for(var g in this.boundFunctions)Object.prototype.hasOwnProperty.call(this.boundFunctions,g)&&(this.body.emitter.off("initRedraw",this.boundFunctions[g]),this.body.emitter.emit("_stopRendering"));this.boundFunctions={}}},{key:"_moveUp",value:function(){this.body.view.translation.y+=this.options.keyboard.speed.y}},{key:"_moveDown",value:function(){this.body.view.translation.y-=this.options.keyboard.speed.y}},{key:"_moveLeft",value:function(){this.body.view.translation.x+=this.options.keyboard.speed.x}},{key:"_moveRight",value:function(){this.body.view.translation.x-=this.options.keyboard.speed.x}},{key:"_zoomIn",value:function(){var g=this.body.view.scale,t=this.body.view.scale*(1+this.options.keyboard.speed.zoom),e=this.body.view.translation,A=t/g,C=(1-A)*this.canvas.canvasViewCenter.x+e.x*A,I=(1-A)*this.canvas.canvasViewCenter.y+e.y*A;this.body.view.scale=t,this.body.view.translation={x:C,y:I},this.body.emitter.emit("zoom",{direction:"+",scale:this.body.view.scale,pointer:null})}},{key:"_zoomOut",value:function(){var g=this.body.view.scale,t=this.body.view.scale/(1+this.options.keyboard.speed.zoom),e=this.body.view.translation,A=t/g,C=(1-A)*this.canvas.canvasViewCenter.x+e.x*A,I=(1-A)*this.canvas.canvasViewCenter.y+e.y*A;this.body.view.scale=t,this.body.view.translation={x:C,y:I},this.body.emitter.emit("zoom",{direction:"-",scale:this.body.view.scale,pointer:null})}},{key:"configureKeyboardBindings",value:function(){var g,t,e,A,C,I,i,o,n,r,s,a,d,h,l,c,u,p,f,v,y,m,b,w,x=this;(void 0!==this.keycharm&&this.keycharm.destroy(),!0===this.options.keyboard.enabled)&&(!0===this.options.keyboard.bindToWindow?this.keycharm=Wz({container:window,preventDefault:!0}):this.keycharm=Wz({container:this.canvas.frame,preventDefault:!0}),this.keycharm.reset(),!0===this.activated&&(QA(g=this.keycharm).call(g,"up",(function(){x.bindToRedraw("_moveUp")}),"keydown"),QA(t=this.keycharm).call(t,"down",(function(){x.bindToRedraw("_moveDown")}),"keydown"),QA(e=this.keycharm).call(e,"left",(function(){x.bindToRedraw("_moveLeft")}),"keydown"),QA(A=this.keycharm).call(A,"right",(function(){x.bindToRedraw("_moveRight")}),"keydown"),QA(C=this.keycharm).call(C,"=",(function(){x.bindToRedraw("_zoomIn")}),"keydown"),QA(I=this.keycharm).call(I,"num+",(function(){x.bindToRedraw("_zoomIn")}),"keydown"),QA(i=this.keycharm).call(i,"num-",(function(){x.bindToRedraw("_zoomOut")}),"keydown"),QA(o=this.keycharm).call(o,"-",(function(){x.bindToRedraw("_zoomOut")}),"keydown"),QA(n=this.keycharm).call(n,"[",(function(){x.bindToRedraw("_zoomOut")}),"keydown"),QA(r=this.keycharm).call(r,"]",(function(){x.bindToRedraw("_zoomIn")}),"keydown"),QA(s=this.keycharm).call(s,"pageup",(function(){x.bindToRedraw("_zoomIn")}),"keydown"),QA(a=this.keycharm).call(a,"pagedown",(function(){x.bindToRedraw("_zoomOut")}),"keydown"),QA(d=this.keycharm).call(d,"up",(function(){x.unbindFromRedraw("_moveUp")}),"keyup"),QA(h=this.keycharm).call(h,"down",(function(){x.unbindFromRedraw("_moveDown")}),"keyup"),QA(l=this.keycharm).call(l,"left",(function(){x.unbindFromRedraw("_moveLeft")}),"keyup"),QA(c=this.keycharm).call(c,"right",(function(){x.unbindFromRedraw("_moveRight")}),"keyup"),QA(u=this.keycharm).call(u,"=",(function(){x.unbindFromRedraw("_zoomIn")}),"keyup"),QA(p=this.keycharm).call(p,"num+",(function(){x.unbindFromRedraw("_zoomIn")}),"keyup"),QA(f=this.keycharm).call(f,"num-",(function(){x.unbindFromRedraw("_zoomOut")}),"keyup"),QA(v=this.keycharm).call(v,"-",(function(){x.unbindFromRedraw("_zoomOut")}),"keyup"),QA(y=this.keycharm).call(y,"[",(function(){x.unbindFromRedraw("_zoomOut")}),"keyup"),QA(m=this.keycharm).call(m,"]",(function(){x.unbindFromRedraw("_zoomIn")}),"keyup"),QA(b=this.keycharm).call(b,"pageup",(function(){x.unbindFromRedraw("_zoomIn")}),"keyup"),QA(w=this.keycharm).call(w,"pagedown",(function(){x.unbindFromRedraw("_zoomOut")}),"keyup")))}}]),g}();function Uz(g,t){var e=void 0!==kl&&bn(g)||g["@@iterator"];if(!e){if(Vl(g)||(e=function(g,t){var e;if(!g)return;if("string"==typeof g)return _z(g,t);var A=Sl(e=Object.prototype.toString.call(g)).call(e,8,-1);"Object"===A&&g.constructor&&(A=g.constructor.name);if("Map"===A||"Set"===A)return Jo(g);if("Arguments"===A||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(A))return _z(g,t)}(g))||t&&g&&"number"==typeof g.length){e&&(g=e);var A=0,C=function(){};return{s:C,n:function(){return A>=g.length?{done:!0}:{done:!1,value:g[A++]}},e:function(g){throw g},f:C}}throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}var I,i=!0,o=!1;return{s:function(){e=e.call(g)},n:function(){var g=e.next();return i=g.done,g},e:function(g){o=!0,I=g},f:function(){try{i||null==e.return||e.return()}finally{if(o)throw I}}}}function _z(g,t){(null==t||t>g.length)&&(t=g.length);for(var e=0,A=new Array(t);e50&&(this.drag.pointer=this.getPointer(g.center),this.drag.pinched=!1,this.pinch.scale=this.body.view.scale,this.touchTime=(new Date).valueOf())}},{key:"onTap",value:function(g){var t=this.getPointer(g.center),e=this.selectionHandler.options.multiselect&&(g.changedPointers[0].ctrlKey||g.changedPointers[0].metaKey);this.checkSelectionChanges(t,e),this.selectionHandler.commitAndEmit(t,g),this.selectionHandler.generateClickEvent("click",g,t)}},{key:"onDoubleTap",value:function(g){var t=this.getPointer(g.center);this.selectionHandler.generateClickEvent("doubleClick",g,t)}},{key:"onHold",value:function(g){var t=this.getPointer(g.center),e=this.selectionHandler.options.multiselect;this.checkSelectionChanges(t,e),this.selectionHandler.commitAndEmit(t,g),this.selectionHandler.generateClickEvent("click",g,t),this.selectionHandler.generateClickEvent("hold",g,t)}},{key:"onRelease",value:function(g){if((new Date).valueOf()-this.touchTime>10){var t=this.getPointer(g.center);this.selectionHandler.generateClickEvent("release",g,t),this.touchTime=(new Date).valueOf()}}},{key:"onContext",value:function(g){var t=this.getPointer({x:g.clientX,y:g.clientY});this.selectionHandler.generateClickEvent("oncontext",g,t)}},{key:"checkSelectionChanges",value:function(g){!0===(arguments.length>1&&void 0!==arguments[1]&&arguments[1])?this.selectionHandler.selectAdditionalOnPoint(g):this.selectionHandler.selectOnPoint(g)}},{key:"_determineDifference",value:function(g,t){var e=function(g,t){for(var e=[],A=0;A=C.minX&&e.x<=C.maxX&&e.y>=C.minY&&e.y<=C.maxY}));Ec(I).call(I,(function(g){return t.selectionHandler.selectObject(t.body.nodes[g])}));var i=this.getPointer(g.center);this.selectionHandler.commitAndEmit(i,g),this.selectionHandler.generateClickEvent("dragEnd",g,this.getPointer(g.center),void 0,!0),this.body.emitter.emit("_requestRedraw")}else{var o=this.drag.selection;o&&o.length?(Ec(o).call(o,(function(g){g.node.options.fixed.x=g.xFixed,g.node.options.fixed.y=g.yFixed})),this.selectionHandler.generateClickEvent("dragEnd",g,this.getPointer(g.center)),this.body.emitter.emit("startSimulation")):(this.selectionHandler.generateClickEvent("dragEnd",g,this.getPointer(g.center),void 0,!0),this.body.emitter.emit("_requestRedraw"))}}},{key:"onPinch",value:function(g){var t=this.getPointer(g.center);this.drag.pinched=!0,void 0===this.pinch.scale&&(this.pinch.scale=1);var e=this.pinch.scale*g.scale;this.zoom(e,t)}},{key:"zoom",value:function(g,t){if(!0===this.options.zoomView){var e=this.body.view.scale;g<1e-5&&(g=1e-5),g>10&&(g=10);var A=void 0;void 0!==this.drag&&!0===this.drag.dragging&&(A=this.canvas.DOMtoCanvas(this.drag.pointer));var C=this.body.view.translation,I=g/e,i=(1-I)*t.x+C.x*I,o=(1-I)*t.y+C.y*I;if(this.body.view.scale=g,this.body.view.translation={x:i,y:o},null!=A){var n=this.canvas.canvasToDOM(A);this.drag.pointer.x=n.x,this.drag.pointer.y=n.y}this.body.emitter.emit("_requestRedraw"),e0&&(this.popupObj=r[s[s.length-1]],I=!0)}if(void 0===this.popupObj&&!1===I){for(var d,h=this.body.edgeIndices,l=this.body.edges,c=[],u=0;u0&&(this.popupObj=l[c[c.length-1]],i="edge")}void 0!==this.popupObj?this.popupObj.id!==C&&(void 0===this.popup&&(this.popup=new hb(this.canvas.frame)),this.popup.popupTargetType=i,this.popup.popupTargetId=this.popupObj.id,this.popup.setPosition(g.x+3,g.y-5),this.popup.setText(this.popupObj.getTitle()),this.popup.show(),this.body.emitter.emit("showPopup",this.popupObj.id)):void 0!==this.popup&&(this.popup.hide(),this.body.emitter.emit("hidePopup"))}},{key:"_checkHidePopup",value:function(g){var t=this.selectionHandler._pointerToPositionObject(g),e=!1;if("node"===this.popup.popupTargetType){if(void 0!==this.body.nodes[this.popup.popupTargetId]&&!0===(e=this.body.nodes[this.popup.popupTargetId].isOverlappingWith(t))){var A=this.selectionHandler.getNodeAt(g);e=void 0!==A&&A.id===this.popup.popupTargetId}}else void 0===this.selectionHandler.getNodeAt(g)&&void 0!==this.body.edges[this.popup.popupTargetId]&&(e=this.body.edges[this.popup.popupTargetId].isOverlappingWith(t));!1===e&&(this.popupObj=void 0,this.popup.hide(),this.body.emitter.emit("hidePopup"))}}]),g}(),Kz={},Xz={get exports(){return Kz},set exports(g){Kz=g}},Jz=f,qz=Cx,$z=$b.getWeakData,gS=jw,tS=Ae,eS=_,AS=eg,CS=Zw,IS=$g,iS=UC.set,oS=UC.getterFor,nS=ur.find,rS=ur.findIndex,sS=Jz([].splice),aS=0,dS=function(g){return g.frozen||(g.frozen=new hS)},hS=function(){this.entries=[]},lS=function(g,t){return nS(g.entries,(function(g){return g[0]===t}))};hS.prototype={get:function(g){var t=lS(this,g);if(t)return t[1]},has:function(g){return!!lS(this,g)},set:function(g,t){var e=lS(this,g);e?e[1]=t:this.entries.push([g,t])},delete:function(g){var t=rS(this.entries,(function(t){return t[0]===g}));return~t&&sS(this.entries,t,1),!!~t}};var cS,uS={getConstructor:function(g,t,e,A){var C=g((function(g,C){gS(g,I),iS(g,{type:t,id:aS++,frozen:void 0}),eS(C)||CS(C,g[A],{that:g,AS_ENTRIES:e})})),I=C.prototype,i=oS(t),o=function(g,t,e){var A=i(g),C=$z(tS(t),!0);return!0===C?dS(A).set(t,e):C[A.id]=e,g};return qz(I,{delete:function(g){var t=i(this);if(!AS(g))return!1;var e=$z(g);return!0===e?dS(t).delete(g):e&&IS(e,t.id)&&delete e[t.id]},has:function(g){var t=i(this);if(!AS(g))return!1;var e=$z(g);return!0===e?dS(t).has(g):e&&IS(e,t.id)}}),qz(I,e?{get:function(g){var t=i(this);if(AS(g)){var e=$z(g);return!0===e?dS(t).get(g):e?e[t.id]:void 0}},set:function(g,t){return o(this,g,t)}}:{add:function(g){return o(this,g,!0)}}),C}},pS=nw,fS=i,vS=f,yS=Cx,mS=$b,bS=ex,wS=uS,xS=eg,kS=UC.enforce,ES=o,OS=DC,TS=Object,DS=Array.isArray,NS=TS.isExtensible,RS=TS.isFrozen,PS=TS.isSealed,MS=TS.freeze,BS=TS.seal,zS={},SS={},ZS=!fS.ActiveXObject&&"ActiveXObject"in fS,FS=function(g){return function(){return g(this,arguments.length?arguments[0]:void 0)}},GS=bS("WeakMap",FS,wS),jS=GS.prototype,LS=vS(jS.set);if(OS)if(ZS){cS=wS.getConstructor(FS,"WeakMap",!0),mS.enable();var VS=vS(jS.delete),YS=vS(jS.has),WS=vS(jS.get);yS(jS,{delete:function(g){if(xS(g)&&!NS(g)){var t=kS(this);return t.frozen||(t.frozen=new cS),VS(this,g)||t.frozen.delete(g)}return VS(this,g)},has:function(g){if(xS(g)&&!NS(g)){var t=kS(this);return t.frozen||(t.frozen=new cS),YS(this,g)||t.frozen.has(g)}return YS(this,g)},get:function(g){if(xS(g)&&!NS(g)){var t=kS(this);return t.frozen||(t.frozen=new cS),YS(this,g)?WS(this,g):t.frozen.get(g)}return WS(this,g)},set:function(g,t){if(xS(g)&&!NS(g)){var e=kS(this);e.frozen||(e.frozen=new cS),YS(this,g)?LS(this,g,t):e.frozen.set(g,t)}else LS(this,g,t);return this}})}else pS&&ES((function(){var g=MS([]);return LS(new GS,g,1),!RS(g)}))&&yS(jS,{set:function(g,t){var e;return DS(g)&&(RS(g)?e=zS:PS(g)&&(e=SS)),LS(this,g,t),e==zS&&MS(g),e==SS&&BS(g),this}});var QS=Ag.WeakMap;!function(g){g.exports=QS}(Xz);var US,_S,HS,KS,XS,JS=e(Kz);function qS(g,t,e,A){if("a"===e&&!A)throw new TypeError("Private accessor was defined without a getter");if("function"==typeof t?g!==t||!A:!t.has(g))throw new TypeError("Cannot read private member from an object whose class did not declare it");return"m"===e?A:"a"===e?A.call(g):A?A.value:t.get(g)}function $S(g,t,e,A,C){if("m"===A)throw new TypeError("Private method is not writable");if("a"===A&&!C)throw new TypeError("Private accessor was defined without a setter");if("function"==typeof t?g!==t||!C:!t.has(g))throw new TypeError("Cannot write private member to an object whose class did not declare it");return"a"===A?C.call(g,e):C?C.value=e:t.set(g,e),e}function gZ(g,t){var e=void 0!==kl&&bn(g)||g["@@iterator"];if(!e){if(Vl(g)||(e=function(g,t){var e;if(!g)return;if("string"==typeof g)return tZ(g,t);var A=Sl(e=Object.prototype.toString.call(g)).call(e,8,-1);"Object"===A&&g.constructor&&(A=g.constructor.name);if("Map"===A||"Set"===A)return Jo(g);if("Arguments"===A||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(A))return tZ(g,t)}(g))||t&&g&&"number"==typeof g.length){e&&(g=e);var A=0,C=function(){};return{s:C,n:function(){return A>=g.length?{done:!0}:{done:!1,value:g[A++]}},e:function(g){throw g},f:C}}throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}var I,i=!0,o=!1;return{s:function(){e=e.call(g)},n:function(){var g=e.next();return i=g.done,g},e:function(g){o=!0,I=g},f:function(){try{i||null==e.return||e.return()}finally{if(o)throw I}}}}function tZ(g,t){(null==t||t>g.length)&&(t=g.length);for(var e=0,A=new Array(t);e0&&void 0!==arguments[0]?arguments[0]:function(){};cd(this,g),HS.set(this,new AZ),KS.set(this,new AZ),XS.set(this,void 0),$S(this,XS,t,"f")}return Bh(g,[{key:"sizeNodes",get:function(){return qS(this,HS,"f").size}},{key:"sizeEdges",get:function(){return qS(this,KS,"f").size}},{key:"getNodes",value:function(){return qS(this,HS,"f").getSelection()}},{key:"getEdges",value:function(){return qS(this,KS,"f").getSelection()}},{key:"addNodes",value:function(){var g;(g=qS(this,HS,"f")).add.apply(g,arguments)}},{key:"addEdges",value:function(){var g;(g=qS(this,KS,"f")).add.apply(g,arguments)}},{key:"deleteNodes",value:function(g){qS(this,HS,"f").delete(g)}},{key:"deleteEdges",value:function(g){qS(this,KS,"f").delete(g)}},{key:"clear",value:function(){qS(this,HS,"f").clear(),qS(this,KS,"f").clear()}},{key:"commit",value:function(){for(var g,t,e={nodes:qS(this,HS,"f").commit(),edges:qS(this,KS,"f").commit()},A=arguments.length,C=new Array(A),I=0;I=g.length?{done:!0}:{done:!1,value:g[A++]}},e:function(g){throw g},f:C}}throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}var I,i=!0,o=!1;return{s:function(){e=e.call(g)},n:function(){var g=e.next();return i=g.done,g},e:function(g){o=!0,I=g},f:function(){try{i||null==e.return||e.return()}finally{if(o)throw I}}}}function iZ(g,t){(null==t||t>g.length)&&(t=g.length);for(var e=0,A=new Array(t);e4&&void 0!==arguments[4]&&arguments[4],I=this._initBaseEvent(t,e);if(!0===C)I.nodes=[],I.edges=[];else{var i=this.getSelection();I.nodes=i.nodes,I.edges=i.edges}void 0!==A&&(I.previousSelection=A),"click"==g&&(I.items=this.getClickedItems(e)),void 0!==t.controlEdge&&(I.controlEdge=t.controlEdge),this.body.emitter.emit(g,I)}},{key:"selectObject",value:function(g){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:this.options.selectConnectedEdges;if(void 0!==g){if(g instanceof sB){var e;if(!0===t)(e=this._selectionAccumulator).addEdges.apply(e,wl(g.edges));this._selectionAccumulator.addNodes(g)}else this._selectionAccumulator.addEdges(g);return!0}return!1}},{key:"deselectObject",value:function(g){!0===g.isSelected()&&(g.selected=!1,this._removeFromSelection(g))}},{key:"_getAllNodesOverlappingWith",value:function(g){for(var t=[],e=this.body.nodes,A=0;A1&&void 0!==arguments[1])||arguments[1],e=this._pointerToPositionObject(g),A=this._getAllNodesOverlappingWith(e);return A.length>0?!0===t?this.body.nodes[A[A.length-1]]:A[A.length-1]:void 0}},{key:"_getEdgesOverlappingWith",value:function(g,t){for(var e=this.body.edges,A=0;A1&&void 0!==arguments[1])||arguments[1],e=this.canvas.DOMtoCanvas(g),A=10,C=null,I=this.body.edges,i=0;i0&&(this.generateClickEvent("deselectEdge",t,g,C),e=!0),A.nodes.deleted.length>0&&(this.generateClickEvent("deselectNode",t,g,C),e=!0),A.nodes.added.length>0&&(this.generateClickEvent("selectNode",t,g),e=!0),A.edges.added.length>0&&(this.generateClickEvent("selectEdge",t,g),e=!0),!0===e&&this.generateClickEvent("select",t,g)}},{key:"getSelection",value:function(){return{nodes:this.getSelectedNodeIds(),edges:this.getSelectedEdgeIds()}}},{key:"getSelectedNodes",value:function(){return this._selectionAccumulator.getNodes()}},{key:"getSelectedEdges",value:function(){return this._selectionAccumulator.getEdges()}},{key:"getSelectedNodeIds",value:function(){var g;return ql(g=this._selectionAccumulator.getNodes()).call(g,(function(g){return g.id}))}},{key:"getSelectedEdgeIds",value:function(){var g;return ql(g=this._selectionAccumulator.getEdges()).call(g,(function(g){return g.id}))}},{key:"setSelection",value:function(g){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};if(!g||!g.nodes&&!g.edges)throw new TypeError("Selection must be an object with nodes and/or edges properties");if((t.unselectAll||void 0===t.unselectAll)&&this.unselectAll(),g.nodes){var e,A=IZ(g.nodes);try{for(A.s();!(e=A.n()).done;){var C=e.value,I=this.body.nodes[C];if(!I)throw new RangeError('Node with id "'+C+'" not found');this.selectObject(I,t.highlightEdges)}}catch(g){A.e(g)}finally{A.f()}}if(g.edges){var i,o=IZ(g.edges);try{for(o.s();!(i=o.n()).done;){var n=i.value,r=this.body.edges[n];if(!r)throw new RangeError('Edge with id "'+n+'" not found');this.selectObject(r)}}catch(g){o.e(g)}finally{o.f()}}this.body.emitter.emit("_requestRedraw"),this._selectionAccumulator.commit()}},{key:"selectNodes",value:function(g){var t=!(arguments.length>1&&void 0!==arguments[1])||arguments[1];if(!g||void 0===g.length)throw"Selection must be an array with ids";this.setSelection({nodes:g},{highlightEdges:t})}},{key:"selectEdges",value:function(g){if(!g||void 0===g.length)throw"Selection must be an array with ids";this.setSelection({edges:g})}},{key:"updateSelection",value:function(){for(var g in this._selectionAccumulator.getNodes())Object.prototype.hasOwnProperty.call(this.body.nodes,g.id)||this._selectionAccumulator.deleteNodes(g);for(var t in this._selectionAccumulator.getEdges())Object.prototype.hasOwnProperty.call(this.body.edges,t.id)||this._selectionAccumulator.deleteEdges(t)}},{key:"getClickedItems",value:function(g){for(var t=this.canvas.DOMtoCanvas(g),e=[],A=this.body.nodeIndices,C=this.body.nodes,I=A.length-1;I>=0;I--){var i=C[A[I]].getItemsOnPoint(t);e.push.apply(e,i)}for(var o=this.body.edgeIndices,n=this.body.edges,r=o.length-1;r>=0;r--){var s=n[o[r]].getItemsOnPoint(t);e.push.apply(e,s)}return e}}]),g}(),nZ={},rZ={get exports(){return nZ},set exports(g){nZ=g}},sZ={};!function(g){!function(g){function t(g,t){if(!(g instanceof t))throw new TypeError("Cannot call a class as a function")}g.__esModule=!0,g.sort=c;var e=32,A=7,C=256,I=[1,10,100,1e3,1e4,1e5,1e6,1e7,1e8,1e9];function i(g){return g<1e5?g<100?g<10?0:1:g<1e4?g<1e3?2:3:4:g<1e7?g<1e6?5:6:g<1e9?g<1e8?7:8:9}function o(g,t){if(g===t)return 0;if(~~g===g&&~~t===t){if(0===g||0===t)return g=0)return-1;if(g>=0)return 1;g=-g,t=-t}var e=i(g),A=i(t),C=0;return eA&&(t*=I[e-A-1],g/=10,C=1),g===t?C:g=e;)t|=1&g,g>>=1;return g+t}function r(g,t,e,A){var C=t+1;if(C===e)return 1;if(A(g[C++],g[t])<0){for(;C=0;)C++;return C-t}function s(g,t,e){for(e--;t>>1;C(I,g[n])<0?o=n:i=n+1}var r=A-i;switch(r){case 3:g[i+3]=g[i+2];case 2:g[i+2]=g[i+1];case 1:g[i+1]=g[i];break;default:for(;r>0;)g[i+r]=g[i+r-1],r--}g[i]=I}}function d(g,t,e,A,C,I){var i=0,o=0,n=1;if(I(g,t[e+C])>0){for(o=A-C;n0;)i=n,(n=1+(n<<1))<=0&&(n=o);n>o&&(n=o),i+=C,n+=C}else{for(o=C+1;no&&(n=o);var r=i;i=C-n,n=C-r}for(i++;i>>1);I(g,t[e+s])>0?i=s+1:n=s}return n}function h(g,t,e,A,C,I){var i=0,o=0,n=1;if(I(g,t[e+C])<0){for(o=C+1;no&&(n=o);var r=i;i=C-n,n=C-r}else{for(o=A-C;n=0;)i=n,(n=1+(n<<1))<=0&&(n=o);n>o&&(n=o),i+=C,n+=C}for(i++;i>>1);I(g,t[e+s])<0?n=s:i=s+1}return n}var l=function(){function g(e,I){t(this,g),this.array=null,this.compare=null,this.minGallop=A,this.length=0,this.tmpStorageLength=C,this.stackLength=0,this.runStart=null,this.runLength=null,this.stackSize=0,this.array=e,this.compare=I,this.length=e.length,this.length<2*C&&(this.tmpStorageLength=this.length>>>1),this.tmp=new Array(this.tmpStorageLength),this.stackLength=this.length<120?5:this.length<1542?10:this.length<119151?19:40,this.runStart=new Array(this.stackLength),this.runLength=new Array(this.stackLength)}return g.prototype.pushRun=function(g,t){this.runStart[this.stackSize]=g,this.runLength[this.stackSize]=t,this.stackSize+=1},g.prototype.mergeRuns=function(){for(;this.stackSize>1;){var g=this.stackSize-2;if(g>=1&&this.runLength[g-1]<=this.runLength[g]+this.runLength[g+1]||g>=2&&this.runLength[g-2]<=this.runLength[g]+this.runLength[g-1])this.runLength[g-1]this.runLength[g+1])break;this.mergeAt(g)}},g.prototype.forceMergeRuns=function(){for(;this.stackSize>1;){var g=this.stackSize-2;g>0&&this.runLength[g-1]=A||u>=A);if(p)break;l<0&&(l=0),l+=2}if(this.minGallop=l,l<1&&(this.minGallop=1),1===t){for(n=0;n=0;n--)i[c+n]=i[l+n];if(0===t){v=!0;break}}if(i[a--]=o[s--],1==--C){v=!0;break}if(0!=(f=C-d(i[r],o,0,C,C-1,I))){for(C-=f,c=1+(a-=f),l=1+(s-=f),n=0;n=A||f>=A);if(v)break;u<0&&(u=0),u+=2}if(this.minGallop=u,u<1&&(this.minGallop=1),1===C){for(c=1+(a-=t),l=1+(r-=t),n=t-1;n>=0;n--)i[c+n]=i[l+n];i[a]=o[s]}else{if(0===C)throw new Error("mergeHigh preconditions were not respected");for(l=a-(C-1),n=0;n=0;n--)i[c+n]=i[l+n];i[a]=o[s]}else for(l=a-(C-1),n=0;nd&&(h=d),a(g,A,A+h,A+i,t),i=h}s.pushRun(A,i),s.mergeRuns(),I-=i,A+=i}while(0!==I);s.forceMergeRuns()}}}}(g)}(sZ),function(g){g.exports=sZ}(rZ);var aZ=e(nZ);function dZ(g){var t=function(){if("undefined"==typeof Reflect||!yP)return!1;if(yP.sham)return!1;if("function"==typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(yP(Boolean,[],(function(){}))),!0}catch(g){return!1}}();return function(){var e,A=vk(g);if(t){var C=vk(this).constructor;e=yP(A,arguments,C)}else e=A.apply(this,arguments);return hk(this,e)}}var hZ=function(){function g(){cd(this,g)}return Bh(g,[{key:"abstract",value:function(){throw new Error("Can't instantiate abstract class!")}},{key:"fake_use",value:function(){}},{key:"curveType",value:function(){return this.abstract()}},{key:"getPosition",value:function(g){return this.fake_use(g),this.abstract()}},{key:"setPosition",value:function(g,t){var e=arguments.length>2&&void 0!==arguments[2]?arguments[2]:void 0;this.fake_use(g,t,e),this.abstract()}},{key:"getTreeSize",value:function(g){return this.fake_use(g),this.abstract()}},{key:"sort",value:function(g){this.fake_use(g),this.abstract()}},{key:"fix",value:function(g,t){this.fake_use(g,t),this.abstract()}},{key:"shift",value:function(g,t){this.fake_use(g,t),this.abstract()}}]),g}(),lZ=function(g){dk(e,g);var t=dZ(e);function e(g){var A;return cd(this,e),(A=t.call(this)).layout=g,A}return Bh(e,[{key:"curveType",value:function(){return"horizontal"}},{key:"getPosition",value:function(g){return g.x}},{key:"setPosition",value:function(g,t){var e=arguments.length>2&&void 0!==arguments[2]?arguments[2]:void 0;void 0!==e&&this.layout.hierarchical.addToOrdering(g,e),g.x=t}},{key:"getTreeSize",value:function(g){var t=this.layout.hierarchical.getTreeSize(this.layout.body.nodes,g);return{min:t.min_x,max:t.max_x}}},{key:"sort",value:function(g){nZ.sort(g,(function(g,t){return g.x-t.x}))}},{key:"fix",value:function(g,t){g.y=this.layout.options.hierarchical.levelSeparation*t,g.options.fixed.y=!0}},{key:"shift",value:function(g,t){this.layout.body.nodes[g].x+=t}}]),e}(hZ),cZ=function(g){dk(e,g);var t=dZ(e);function e(g){var A;return cd(this,e),(A=t.call(this)).layout=g,A}return Bh(e,[{key:"curveType",value:function(){return"vertical"}},{key:"getPosition",value:function(g){return g.y}},{key:"setPosition",value:function(g,t){var e=arguments.length>2&&void 0!==arguments[2]?arguments[2]:void 0;void 0!==e&&this.layout.hierarchical.addToOrdering(g,e),g.y=t}},{key:"getTreeSize",value:function(g){var t=this.layout.hierarchical.getTreeSize(this.layout.body.nodes,g);return{min:t.min_y,max:t.max_y}}},{key:"sort",value:function(g){nZ.sort(g,(function(g,t){return g.y-t.y}))}},{key:"fix",value:function(g,t){g.x=this.layout.options.hierarchical.levelSeparation*t,g.options.fixed.x=!0}},{key:"shift",value:function(g,t){this.layout.body.nodes[g].y+=t}}]),e}(hZ),uZ={},pZ={get exports(){return uZ},set exports(g){uZ=g}},fZ=ur.every;De({target:"Array",proto:!0,forced:!cc("every")},{every:function(g){return fZ(this,g,arguments.length>1?arguments[1]:void 0)}});var vZ=FA("Array").every,yZ=rg,mZ=vZ,bZ=Array.prototype,wZ=function(g){var t=g.every;return g===bZ||yZ(bZ,g)&&t===bZ.every?mZ:t},xZ=wZ;!function(g){g.exports=xZ}(pZ);var kZ=e(uZ);function EZ(g,t){var e=void 0!==kl&&bn(g)||g["@@iterator"];if(!e){if(Vl(g)||(e=function(g,t){var e;if(!g)return;if("string"==typeof g)return OZ(g,t);var A=Sl(e=Object.prototype.toString.call(g)).call(e,8,-1);"Object"===A&&g.constructor&&(A=g.constructor.name);if("Map"===A||"Set"===A)return Jo(g);if("Arguments"===A||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(A))return OZ(g,t)}(g))||t&&g&&"number"==typeof g.length){e&&(g=e);var A=0,C=function(){};return{s:C,n:function(){return A>=g.length?{done:!0}:{done:!1,value:g[A++]}},e:function(g){throw g},f:C}}throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}var I,i=!0,o=!1;return{s:function(){e=e.call(g)},n:function(){var g=e.next();return i=g.done,g},e:function(g){o=!0,I=g},f:function(){try{i||null==e.return||e.return()}finally{if(o)throw I}}}}function OZ(g,t){(null==t||t>g.length)&&(t=g.length);for(var e=0,A=new Array(t);e=t[A])&&(t[A]=t[e]+1)})),t}function DZ(g,t,e,A){var C,I,i=pf(null),o=iN(C=wl(QR(A).call(A))).call(C,(function(g,t){return g+1+t.edges.length}),0),n=e+"Id",r="to"===e?1:-1,s=EZ(A);try{var a=function(){var C=bl(I.value,2),s=C[0],a=C[1];if(!A.has(s)||!g(a))return"continue";i[s]=0;for(var d,h=[a],l=0,c=function(){var g,C;if(!A.has(s))return"continue";var I=i[d.id]+r;if(Ec(g=Ap(C=d.edges).call(C,(function(g){return g.connected&&g.to!==g.from&&g[e]!==d&&A.has(g.toId)&&A.has(g.fromId)}))).call(g,(function(g){var A=g[n],C=i[A];(null==C||t(I,C))&&(i[A]=I,h.push(g[e]))})),l>o)return{v:{v:TZ(A,i)}};++l};d=h.pop();){var u=c();if("continue"!==u&&"object"===kh(u))return u.v}};for(s.s();!(I=s.n()).done;){var d=a();if("continue"!==d&&"object"===kh(d))return d.v}}catch(g){s.e(g)}finally{s.f()}return i}var NZ=function(){function g(){cd(this,g),this.childrenReference={},this.parentReference={},this.trees={},this.distributionOrdering={},this.levels={},this.distributionIndex={},this.isTree=!1,this.treeIndex=-1}return Bh(g,[{key:"addRelation",value:function(g,t){void 0===this.childrenReference[g]&&(this.childrenReference[g]=[]),this.childrenReference[g].push(t),void 0===this.parentReference[t]&&(this.parentReference[t]=[]),this.parentReference[t].push(g)}},{key:"checkIfTree",value:function(){for(var g in this.parentReference)if(this.parentReference[g].length>1)return void(this.isTree=!1);this.isTree=!0}},{key:"numTrees",value:function(){return this.treeIndex+1}},{key:"setTreeIndex",value:function(g,t){void 0!==t&&void 0===this.trees[g.id]&&(this.trees[g.id]=t,this.treeIndex=Math.max(t,this.treeIndex))}},{key:"ensureLevel",value:function(g){void 0===this.levels[g]&&(this.levels[g]=0)}},{key:"getMaxLevel",value:function(g){var t=this,e={};return function g(A){if(void 0!==e[A])return e[A];var C=t.levels[A];if(t.childrenReference[A]){var I=t.childrenReference[A];if(I.length>0)for(var i=0;i0&&(e.levelSeparation*=-1):e.levelSeparation<0&&(e.levelSeparation*=-1),this.setDirectionStrategy(),this.body.emitter.emit("_resetHierarchicalLayout"),this.adaptAllOptionsForHierarchicalLayout(t);if(!0===A)return this.body.emitter.emit("refresh"),Fm(t,this.optionsBackup)}return t}},{key:"_resetRNG",value:function(g){this.initialRandomSeed=g,this._rng=xm(this.initialRandomSeed)}},{key:"adaptAllOptionsForHierarchicalLayout",value:function(g){if(!0===this.options.hierarchical.enabled){var t=this.optionsBackup.physics;void 0===g.physics||!0===g.physics?(g.physics={enabled:void 0===t.enabled||t.enabled,solver:"hierarchicalRepulsion"},t.enabled=void 0===t.enabled||t.enabled,t.solver=t.solver||"barnesHut"):"object"===kh(g.physics)?(t.enabled=void 0===g.physics.enabled||g.physics.enabled,t.solver=g.physics.solver||"barnesHut",g.physics.solver="hierarchicalRepulsion"):!1!==g.physics&&(t.solver="barnesHut",g.physics={solver:"hierarchicalRepulsion"});var e=this.direction.curveType();if(void 0===g.edges)this.optionsBackup.edges={smooth:{enabled:!0,type:"dynamic"}},g.edges={smooth:!1};else if(void 0===g.edges.smooth)this.optionsBackup.edges={smooth:{enabled:!0,type:"dynamic"}},g.edges.smooth=!1;else if("boolean"==typeof g.edges.smooth)this.optionsBackup.edges={smooth:g.edges.smooth},g.edges.smooth={enabled:g.edges.smooth,type:e};else{var A=g.edges.smooth;void 0!==A.type&&"dynamic"!==A.type&&(e=A.type),this.optionsBackup.edges={smooth:{enabled:void 0===A.enabled||A.enabled,type:void 0===A.type?"dynamic":A.type,roundness:void 0===A.roundness?.5:A.roundness,forceDirection:void 0!==A.forceDirection&&A.forceDirection}},g.edges.smooth={enabled:void 0===A.enabled||A.enabled,type:e,roundness:void 0===A.roundness?.5:A.roundness,forceDirection:void 0!==A.forceDirection&&A.forceDirection}}this.body.emitter.emit("_forceDisableDynamicCurves",e)}return g}},{key:"positionInitially",value:function(g){if(!0!==this.options.hierarchical.enabled){this._resetRNG(this.initialRandomSeed);for(var t=g.length+50,e=0;eC){for(var i=g.length;g.length>C&&A<=10;){A+=1;var o=g.length;if(A%3==0?this.body.modules.clustering.clusterBridges(I):this.body.modules.clustering.clusterOutliers(I),o==g.length&&A%3!=0)return this._declusterAll(),this.body.emitter.emit("_layoutFailed"),void console.info("This network could not be positioned by this version of the improved layout algorithm. Please disable improvedLayout for better performance.")}this.body.modules.kamadaKawai.setOptions({springLength:Math.max(150,2*i)})}A>10&&console.info("The clustering didn't succeed within the amount of interations allowed, progressing with partial result."),this.body.modules.kamadaKawai.solve(g,this.body.edgeIndices,!0),this._shiftToCenter();for(var n=0;n0){var g,t,e=!1,A=!1;for(t in this.lastNodeOnLevel={},this.hierarchical=new NZ,this.body.nodes)Object.prototype.hasOwnProperty.call(this.body.nodes,t)&&(void 0!==(g=this.body.nodes[t]).options.level?(e=!0,this.hierarchical.levels[t]=g.options.level):A=!0);if(!0===A&&!0===e)throw new Error("To use the hierarchical layout, nodes require either no predefined levels or levels have to be defined for all nodes.");if(!0===A){var C=this.options.hierarchical.sortMethod;"hubsize"===C?this._determineLevelsByHubsize():"directed"===C?this._determineLevelsDirected():"custom"===C&&this._determineLevelsCustomCallback()}for(var I in this.body.nodes)Object.prototype.hasOwnProperty.call(this.body.nodes,I)&&this.hierarchical.ensureLevel(I);var i=this._getDistribution();this._generateMap(),this._placeNodesByHierarchy(i),this._condenseHierarchy(),this._shiftToCenter()}}},{key:"_condenseHierarchy",value:function(){var g=this,t=!1,e={},A=function(t,e){var A=g.hierarchical.trees;for(var C in A)Object.prototype.hasOwnProperty.call(A,C)&&A[C]===t&&g.direction.shift(C,e)},C=function(){for(var t=[],e=0;e0)for(var I=0;I1&&void 0!==arguments[1]?arguments[1]:1e9,A=1e9,C=1e9,I=1e9,i=-1e9;for(var o in t)if(Object.prototype.hasOwnProperty.call(t,o)){var n=g.body.nodes[o],r=g.hierarchical.levels[n.id],s=g.direction.getPosition(n),a=bl(g._getSpaceAroundNode(n,t),2),d=a[0],h=a[1];A=Math.min(d,A),C=Math.min(h,C),r<=e&&(I=Math.min(s,I),i=Math.max(s,i))}return[I,i,A,C]},o=function(t,e){var A=g.hierarchical.getMaxLevel(t.id),C=g.hierarchical.getMaxLevel(e.id);return Math.min(A,C)},n=function(t,e,A){for(var C=g.hierarchical,I=0;I1)for(var n=0;n2&&void 0!==arguments[2]&&arguments[2],n=g.direction.getPosition(e),r=g.direction.getPosition(A),s=Math.abs(r-n),a=g.options.hierarchical.nodeSpacing;if(s>a){var d={},h={};I(e,d),I(A,h);var l=o(e,A),c=i(d,l),u=i(h,l),p=c[1],f=u[0],v=u[2];if(Math.abs(p-f)>a){var y=p-f+a;y<-v+a&&(y=-v+a),y<0&&(g._shiftBlock(A.id,y),t=!0,!0===C&&g._centerParent(A))}}},s=function(A,C){for(var o=C.id,n=C.edges,r=g.hierarchical.levels[C.id],s=g.options.hierarchical.levelSeparation*g.options.hierarchical.levelSeparation,a={},d=[],h=0;h0?h=Math.min(d,a-g.options.hierarchical.nodeSpacing):d<0&&(h=-Math.min(-d,s-g.options.hierarchical.nodeSpacing)),0!=h&&(g._shiftBlock(C.id,h),t=!0)}(v),function(e){var A=g.direction.getPosition(C),I=bl(g._getSpaceAroundNode(C),2),i=I[0],o=I[1],n=e-A,r=A;n>0?r=Math.min(A+(o-g.options.hierarchical.nodeSpacing),e):n<0&&(r=Math.max(A-(i-g.options.hierarchical.nodeSpacing),e)),r!==A&&(g.direction.setPosition(C,r),t=!0)}(v=f(A,n))};!0===this.options.hierarchical.blockShifting&&(function(e){var A=g.hierarchical.getLevels();A=Gc(A).call(A);for(var C=0;C0&&Math.abs(a)0&&(n=this.direction.getPosition(A[I-1])+o),this.direction.setPosition(i,n,t),this._validatePositionAndContinue(i,t,n),C++}}}}},{key:"_placeBranchNodes",value:function(g,t){var e,A=this.hierarchical.childrenReference[g];if(void 0!==A){for(var C=[],I=0;It&&void 0===this.positionedNodes[o.id]))return;var r=this.options.hierarchical.nodeSpacing,s=void 0;s=0===i?this.direction.getPosition(this.body.nodes[g]):this.direction.getPosition(C[i-1])+r,this.direction.setPosition(o,s,n),this._validatePositionAndContinue(o,n,s)}var a=this._getCenterPosition(C);this.direction.setPosition(this.body.nodes[g],a,t)}}},{key:"_validatePositionAndContinue",value:function(g,t,e){if(this.hierarchical.isTree){if(void 0!==this.lastNodeOnLevel[t]){var A=this.direction.getPosition(this.body.nodes[this.lastNodeOnLevel[t]]);if(e-Ag}),"from",g)}(e),this.hierarchical.setMinLevelToZero(this.body.nodes)}},{key:"_generateMap",value:function(){var g=this;this._crawlNetwork((function(t,e){g.hierarchical.levels[e.id]>g.hierarchical.levels[t.id]&&g.hierarchical.addRelation(t.id,e.id)})),this.hierarchical.checkIfTree()}},{key:"_crawlNetwork",value:function(){var g=this,t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:function(){},e=arguments.length>1?arguments[1]:void 0,A={},C=function e(C,I){if(void 0===A[C.id]){var i;g.hierarchical.setTreeIndex(C,I),A[C.id]=!0;for(var o=g._getActiveEdges(C),n=0;n=g.length?{done:!0}:{done:!1,value:g[A++]}},e:function(g){throw g},f:C}}throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}var I,i=!0,o=!1;return{s:function(){e=e.call(g)},n:function(){var g=e.next();return i=g.done,g},e:function(g){o=!0,I=g},f:function(){try{i||null==e.return||e.return()}finally{if(o)throw I}}}}function MZ(g,t){(null==t||t>g.length)&&(t=g.length);for(var e=0,A=new Array(t);e0&&!1!==this.options.deleteNode||0===e&&!1!==this.options.deleteEdge)&&(!0===i&&this._createSeperator(4),this._createDeleteButton(I)),this._bindElementEvents(this.closeDiv,QA(g=this.toggleEditMode).call(g,this)),this._temporaryBindEvent("select",QA(t=this.showManipulatorToolbar).call(t,this))}this.body.emitter.emit("_redraw")}},{key:"addNodeMode",value:function(){var g;if(!0!==this.editMode&&this.enableEditMode(),this._clean(),this.inMode="addNode",!0===this.guiEnabled){var t,e=this.options.locales[this.options.locale];this.manipulationDOM={},this._createBackButton(e),this._createSeperator(),this._createDescription(e.addDescription||this.options.locales.en.addDescription),this._bindElementEvents(this.closeDiv,QA(t=this.toggleEditMode).call(t,this))}this._temporaryBindEvent("click",QA(g=this._performAddNode).call(g,this))}},{key:"editNode",value:function(){var g=this;!0!==this.editMode&&this.enableEditMode(),this._clean();var t=this.selectionHandler.getSelectedNodes()[0];if(void 0!==t){if(this.inMode="editNode","function"!=typeof this.options.editNode)throw new Error("No function has been configured to handle the editing of nodes.");if(!0!==t.isCluster){var e=Fm({},t.options,!1);if(e.x=t.x,e.y=t.y,2!==this.options.editNode.length)throw new Error("The function for edit does not support two arguments (data, callback)");this.options.editNode(e,(function(t){null!=t&&"editNode"===g.inMode&&g.body.data.nodes.getDataSet().update(t),g.showManipulatorToolbar()}))}else alert(this.options.locales[this.options.locale].editClusterError||this.options.locales.en.editClusterError)}else this.showManipulatorToolbar()}},{key:"addEdgeMode",value:function(){var g,t,e,A,C;if(!0!==this.editMode&&this.enableEditMode(),this._clean(),this.inMode="addEdge",!0===this.guiEnabled){var I,i=this.options.locales[this.options.locale];this.manipulationDOM={},this._createBackButton(i),this._createSeperator(),this._createDescription(i.edgeDescription||this.options.locales.en.edgeDescription),this._bindElementEvents(this.closeDiv,QA(I=this.toggleEditMode).call(I,this))}this._temporaryBindUI("onTouch",QA(g=this._handleConnect).call(g,this)),this._temporaryBindUI("onDragEnd",QA(t=this._finishConnect).call(t,this)),this._temporaryBindUI("onDrag",QA(e=this._dragControlNode).call(e,this)),this._temporaryBindUI("onRelease",QA(A=this._finishConnect).call(A,this)),this._temporaryBindUI("onDragStart",QA(C=this._dragStartEdge).call(C,this)),this._temporaryBindUI("onHold",(function(){}))}},{key:"editEdgeMode",value:function(){if(!0!==this.editMode&&this.enableEditMode(),this._clean(),this.inMode="editEdge","object"!==kh(this.options.editEdge)||"function"!=typeof this.options.editEdge.editWithoutDrag||(this.edgeBeingEditedId=this.selectionHandler.getSelectedEdgeIds()[0],void 0===this.edgeBeingEditedId)){if(!0===this.guiEnabled){var g,t=this.options.locales[this.options.locale];this.manipulationDOM={},this._createBackButton(t),this._createSeperator(),this._createDescription(t.editEdgeDescription||this.options.locales.en.editEdgeDescription),this._bindElementEvents(this.closeDiv,QA(g=this.toggleEditMode).call(g,this))}if(this.edgeBeingEditedId=this.selectionHandler.getSelectedEdgeIds()[0],void 0!==this.edgeBeingEditedId){var e,A,C,I,i=this.body.edges[this.edgeBeingEditedId],o=this._getNewTargetNode(i.from.x,i.from.y),n=this._getNewTargetNode(i.to.x,i.to.y);this.temporaryIds.nodes.push(o.id),this.temporaryIds.nodes.push(n.id),this.body.nodes[o.id]=o,this.body.nodeIndices.push(o.id),this.body.nodes[n.id]=n,this.body.nodeIndices.push(n.id),this._temporaryBindUI("onTouch",QA(e=this._controlNodeTouch).call(e,this)),this._temporaryBindUI("onTap",(function(){})),this._temporaryBindUI("onHold",(function(){})),this._temporaryBindUI("onDragStart",QA(A=this._controlNodeDragStart).call(A,this)),this._temporaryBindUI("onDrag",QA(C=this._controlNodeDrag).call(C,this)),this._temporaryBindUI("onDragEnd",QA(I=this._controlNodeDragEnd).call(I,this)),this._temporaryBindUI("onMouseMove",(function(){})),this._temporaryBindEvent("beforeDrawing",(function(g){var t=i.edgeType.findBorderPositions(g);!1===o.selected&&(o.x=t.from.x,o.y=t.from.y),!1===n.selected&&(n.x=t.to.x,n.y=t.to.y)})),this.body.emitter.emit("_redraw")}else this.showManipulatorToolbar()}else{var r=this.body.edges[this.edgeBeingEditedId];this._performEditEdge(r.from.id,r.to.id)}}},{key:"deleteSelected",value:function(){var g=this;!0!==this.editMode&&this.enableEditMode(),this._clean(),this.inMode="delete";var t=this.selectionHandler.getSelectedNodeIds(),e=this.selectionHandler.getSelectedEdgeIds(),A=void 0;if(t.length>0){for(var C=0;C0&&"function"==typeof this.options.deleteEdge&&(A=this.options.deleteEdge);if("function"==typeof A){var I={nodes:t,edges:e};if(2!==A.length)throw new Error("The function for delete does not support two arguments (data, callback)");A(I,(function(t){null!=t&&"delete"===g.inMode?(g.body.data.edges.getDataSet().remove(t.edges),g.body.data.nodes.getDataSet().remove(t.nodes),g.body.emitter.emit("startSimulation"),g.showManipulatorToolbar()):(g.body.emitter.emit("startSimulation"),g.showManipulatorToolbar())}))}else this.body.data.edges.getDataSet().remove(e),this.body.data.nodes.getDataSet().remove(t),this.body.emitter.emit("startSimulation"),this.showManipulatorToolbar()}},{key:"_setup",value:function(){!0===this.options.enabled?(this.guiEnabled=!0,this._createWrappers(),!1===this.editMode?this._createEditButton():this.showManipulatorToolbar()):(this._removeManipulationDOM(),this.guiEnabled=!1)}},{key:"_createWrappers",value:function(){var g,t;(void 0===this.manipulationDiv&&(this.manipulationDiv=document.createElement("div"),this.manipulationDiv.className="vis-manipulation",!0===this.editMode?this.manipulationDiv.style.display="block":this.manipulationDiv.style.display="none",this.canvas.frame.appendChild(this.manipulationDiv)),void 0===this.editModeDiv&&(this.editModeDiv=document.createElement("div"),this.editModeDiv.className="vis-edit-mode",!0===this.editMode?this.editModeDiv.style.display="none":this.editModeDiv.style.display="block",this.canvas.frame.appendChild(this.editModeDiv)),void 0===this.closeDiv)&&(this.closeDiv=document.createElement("button"),this.closeDiv.className="vis-close",this.closeDiv.setAttribute("aria-label",null!==(g=null===(t=this.options.locales[this.options.locale])||void 0===t?void 0:t.close)&&void 0!==g?g:this.options.locales.en.close),this.closeDiv.style.display=this.manipulationDiv.style.display,this.canvas.frame.appendChild(this.closeDiv))}},{key:"_getNewTargetNode",value:function(g,t){var e=Fm({},this.options.controlNodeStyle);e.id="targetNode"+OP(),e.hidden=!1,e.physics=!1,e.x=g,e.y=t;var A=this.body.functions.createNode(e);return A.shape.boundingBox={left:g,right:g,top:t,bottom:t},A}},{key:"_createEditButton",value:function(){var g;this._clean(),this.manipulationDOM={},Rm(this.editModeDiv);var t=this.options.locales[this.options.locale],e=this._createButton("editMode","vis-edit vis-edit-mode",t.edit||this.options.locales.en.edit);this.editModeDiv.appendChild(e),this._bindElementEvents(e,QA(g=this.toggleEditMode).call(g,this))}},{key:"_clean",value:function(){this.inMode=!1,!0===this.guiEnabled&&(Rm(this.editModeDiv),Rm(this.manipulationDiv),this._cleanupDOMEventListeners()),this._cleanupTemporaryNodesAndEdges(),this._unbindTemporaryUIs(),this._unbindTemporaryEvents(),this.body.emitter.emit("restorePhysics")}},{key:"_cleanupDOMEventListeners",value:function(){var g,t,e=PZ(cu(g=this._domEventListenerCleanupQueue).call(g,0));try{for(e.s();!(t=e.n()).done;){(0,t.value)()}}catch(g){e.e(g)}finally{e.f()}}},{key:"_removeManipulationDOM",value:function(){this._clean(),Rm(this.manipulationDiv),Rm(this.editModeDiv),Rm(this.closeDiv),this.manipulationDiv&&this.canvas.frame.removeChild(this.manipulationDiv),this.editModeDiv&&this.canvas.frame.removeChild(this.editModeDiv),this.closeDiv&&this.canvas.frame.removeChild(this.closeDiv),this.manipulationDiv=void 0,this.editModeDiv=void 0,this.closeDiv=void 0}},{key:"_createSeperator",value:function(){var g=arguments.length>0&&void 0!==arguments[0]?arguments[0]:1;this.manipulationDOM["seperatorLineDiv"+g]=document.createElement("div"),this.manipulationDOM["seperatorLineDiv"+g].className="vis-separator-line",this.manipulationDiv.appendChild(this.manipulationDOM["seperatorLineDiv"+g])}},{key:"_createAddNodeButton",value:function(g){var t,e=this._createButton("addNode","vis-add",g.addNode||this.options.locales.en.addNode);this.manipulationDiv.appendChild(e),this._bindElementEvents(e,QA(t=this.addNodeMode).call(t,this))}},{key:"_createAddEdgeButton",value:function(g){var t,e=this._createButton("addEdge","vis-connect",g.addEdge||this.options.locales.en.addEdge);this.manipulationDiv.appendChild(e),this._bindElementEvents(e,QA(t=this.addEdgeMode).call(t,this))}},{key:"_createEditNodeButton",value:function(g){var t,e=this._createButton("editNode","vis-edit",g.editNode||this.options.locales.en.editNode);this.manipulationDiv.appendChild(e),this._bindElementEvents(e,QA(t=this.editNode).call(t,this))}},{key:"_createEditEdgeButton",value:function(g){var t,e=this._createButton("editEdge","vis-edit",g.editEdge||this.options.locales.en.editEdge);this.manipulationDiv.appendChild(e),this._bindElementEvents(e,QA(t=this.editEdgeMode).call(t,this))}},{key:"_createDeleteButton",value:function(g){var t,e;e=this.options.rtl?"vis-delete-rtl":"vis-delete";var A=this._createButton("delete",e,g.del||this.options.locales.en.del);this.manipulationDiv.appendChild(A),this._bindElementEvents(A,QA(t=this.deleteSelected).call(t,this))}},{key:"_createBackButton",value:function(g){var t,e=this._createButton("back","vis-back",g.back||this.options.locales.en.back);this.manipulationDiv.appendChild(e),this._bindElementEvents(e,QA(t=this.showManipulatorToolbar).call(t,this))}},{key:"_createButton",value:function(g,t,e){var A=arguments.length>3&&void 0!==arguments[3]?arguments[3]:"vis-label";return this.manipulationDOM[g+"Div"]=document.createElement("button"),this.manipulationDOM[g+"Div"].className="vis-button "+t,this.manipulationDOM[g+"Label"]=document.createElement("div"),this.manipulationDOM[g+"Label"].className=A,this.manipulationDOM[g+"Label"].innerText=e,this.manipulationDOM[g+"Div"].appendChild(this.manipulationDOM[g+"Label"]),this.manipulationDOM[g+"Div"]}},{key:"_createDescription",value:function(g){this.manipulationDOM.descriptionLabel=document.createElement("div"),this.manipulationDOM.descriptionLabel.className="vis-none",this.manipulationDOM.descriptionLabel.innerText=g,this.manipulationDiv.appendChild(this.manipulationDOM.descriptionLabel)}},{key:"_temporaryBindEvent",value:function(g,t){this.temporaryEventFunctions.push({event:g,boundFunction:t}),this.body.emitter.on(g,t)}},{key:"_temporaryBindUI",value:function(g,t){if(void 0===this.body.eventListeners[g])throw new Error("This UI function does not exist. Typo? You tried: "+g+" possible are: "+xf(Cc(this.body.eventListeners)));this.temporaryUIFunctions[g]=this.body.eventListeners[g],this.body.eventListeners[g]=t}},{key:"_unbindTemporaryUIs",value:function(){for(var g in this.temporaryUIFunctions)Object.prototype.hasOwnProperty.call(this.temporaryUIFunctions,g)&&(this.body.eventListeners[g]=this.temporaryUIFunctions[g],delete this.temporaryUIFunctions[g]);this.temporaryUIFunctions={}}},{key:"_unbindTemporaryEvents",value:function(){for(var g=0;g=0;i--)if(C[i]!==this.selectedControlNode.id){I=this.body.nodes[C[i]];break}if(void 0!==I&&void 0!==this.selectedControlNode)if(!0===I.isCluster)alert(this.options.locales[this.options.locale].createEdgeError||this.options.locales.en.createEdgeError);else{var o=this.body.nodes[this.temporaryIds.nodes[0]];this.selectedControlNode.id===o.id?this._performEditEdge(I.id,A.to.id):this._performEditEdge(A.from.id,I.id)}else A.updateEdgeType(),this.body.emitter.emit("restorePhysics");this.body.emitter.emit("_redraw")}}},{key:"_handleConnect",value:function(g){if((new Date).valueOf()-this.touchTime>100){this.lastTouch=this.body.functions.getPointer(g.center),this.lastTouch.translation=yA({},this.body.view.translation),this.interactionHandler.drag.pointer=this.lastTouch,this.interactionHandler.drag.translation=this.lastTouch.translation;var t=this.lastTouch,e=this.selectionHandler.getNodeAt(t);if(void 0!==e)if(!0===e.isCluster)alert(this.options.locales[this.options.locale].createEdgeError||this.options.locales.en.createEdgeError);else{var A=this._getNewTargetNode(e.x,e.y);this.body.nodes[A.id]=A,this.body.nodeIndices.push(A.id);var C=this.body.functions.createEdge({id:"connectionEdge"+OP(),from:e.id,to:A.id,physics:!1,smooth:{enabled:!0,type:"continuous",roundness:.5}});this.body.edges[C.id]=C,this.body.edgeIndices.push(C.id),this.temporaryIds.nodes.push(A.id),this.temporaryIds.edges.push(C.id)}this.touchTime=(new Date).valueOf()}}},{key:"_dragControlNode",value:function(g){var t=this.body.functions.getPointer(g.center),e=this.selectionHandler._pointerToPositionObject(t),A=void 0;void 0!==this.temporaryIds.edges[0]&&(A=this.body.edges[this.temporaryIds.edges[0]].fromId);for(var C=this.selectionHandler._getAllNodesOverlappingWith(e),I=void 0,i=C.length-1;i>=0;i--){var o;if(-1===$p(o=this.temporaryIds.nodes).call(o,C[i])){I=this.body.nodes[C[i]];break}}if(g.controlEdge={from:A,to:I?I.id:void 0},this.selectionHandler.generateClickEvent("controlNodeDragging",g,t),void 0!==this.temporaryIds.nodes[0]){var n=this.body.nodes[this.temporaryIds.nodes[0]];n.x=this.canvas._XconvertDOMtoCanvas(t.x),n.y=this.canvas._YconvertDOMtoCanvas(t.y),this.body.emitter.emit("_redraw")}else this.interactionHandler.onDrag(g)}},{key:"_finishConnect",value:function(g){var t=this.body.functions.getPointer(g.center),e=this.selectionHandler._pointerToPositionObject(t),A=void 0;void 0!==this.temporaryIds.edges[0]&&(A=this.body.edges[this.temporaryIds.edges[0]].fromId);for(var C=this.selectionHandler._getAllNodesOverlappingWith(e),I=void 0,i=C.length-1;i>=0;i--){var o;if(-1===$p(o=this.temporaryIds.nodes).call(o,C[i])){I=this.body.nodes[C[i]];break}}this._cleanupTemporaryNodesAndEdges(),void 0!==I&&(!0===I.isCluster?alert(this.options.locales[this.options.locale].createEdgeError||this.options.locales.en.createEdgeError):void 0!==this.body.nodes[A]&&void 0!==this.body.nodes[I.id]&&this._performAddEdge(A,I.id)),g.controlEdge={from:A,to:I?I.id:void 0},this.selectionHandler.generateClickEvent("controlNodeDragEnd",g,t),this.body.emitter.emit("_redraw")}},{key:"_dragStartEdge",value:function(g){var t=this.lastTouch;this.selectionHandler.generateClickEvent("dragStart",g,t,void 0,!0)}},{key:"_performAddNode",value:function(g){var t=this,e={id:OP(),x:g.pointer.canvas.x,y:g.pointer.canvas.y,label:"new"};if("function"==typeof this.options.addNode){if(2!==this.options.addNode.length)throw this.showManipulatorToolbar(),new Error("The function for add does not support two arguments (data,callback)");this.options.addNode(e,(function(g){null!=g&&"addNode"===t.inMode&&t.body.data.nodes.getDataSet().add(g),t.showManipulatorToolbar()}))}else this.body.data.nodes.getDataSet().add(e),this.showManipulatorToolbar()}},{key:"_performAddEdge",value:function(g,t){var e=this,A={from:g,to:t};if("function"==typeof this.options.addEdge){if(2!==this.options.addEdge.length)throw new Error("The function for connect does not support two arguments (data,callback)");this.options.addEdge(A,(function(g){null!=g&&"addEdge"===e.inMode&&(e.body.data.edges.getDataSet().add(g),e.selectionHandler.unselectAll(),e.showManipulatorToolbar())}))}else this.body.data.edges.getDataSet().add(A),this.selectionHandler.unselectAll(),this.showManipulatorToolbar()}},{key:"_performEditEdge",value:function(g,t){var e=this,A={id:this.edgeBeingEditedId,from:g,to:t,label:this.body.data.edges.get(this.edgeBeingEditedId).label},C=this.options.editEdge;if("object"===kh(C)&&(C=C.editWithoutDrag),"function"==typeof C){if(2!==C.length)throw new Error("The function for edit does not support two arguments (data, callback)");C(A,(function(g){null==g||"editEdge"!==e.inMode?(e.body.edges[A.id].updateEdgeType(),e.body.emitter.emit("_redraw"),e.showManipulatorToolbar()):(e.body.data.edges.getDataSet().update(g),e.selectionHandler.unselectAll(),e.showManipulatorToolbar())}))}else this.body.data.edges.getDataSet().update(A),this.selectionHandler.unselectAll(),this.showManipulatorToolbar()}}]),g}(),zZ="string",SZ="boolean",ZZ="number",FZ="array",GZ="object",jZ=["arrow","bar","box","circle","crow","curve","diamond","image","inv_curve","inv_triangle","triangle","vee"],LZ={borderWidth:{number:ZZ},borderWidthSelected:{number:ZZ,undefined:"undefined"},brokenImage:{string:zZ,undefined:"undefined"},chosen:{label:{boolean:SZ,function:"function"},node:{boolean:SZ,function:"function"},__type__:{object:GZ,boolean:SZ}},color:{border:{string:zZ},background:{string:zZ},highlight:{border:{string:zZ},background:{string:zZ},__type__:{object:GZ,string:zZ}},hover:{border:{string:zZ},background:{string:zZ},__type__:{object:GZ,string:zZ}},__type__:{object:GZ,string:zZ}},opacity:{number:ZZ,undefined:"undefined"},fixed:{x:{boolean:SZ},y:{boolean:SZ},__type__:{object:GZ,boolean:SZ}},font:{align:{string:zZ},color:{string:zZ},size:{number:ZZ},face:{string:zZ},background:{string:zZ},strokeWidth:{number:ZZ},strokeColor:{string:zZ},vadjust:{number:ZZ},multi:{boolean:SZ,string:zZ},bold:{color:{string:zZ},size:{number:ZZ},face:{string:zZ},mod:{string:zZ},vadjust:{number:ZZ},__type__:{object:GZ,string:zZ}},boldital:{color:{string:zZ},size:{number:ZZ},face:{string:zZ},mod:{string:zZ},vadjust:{number:ZZ},__type__:{object:GZ,string:zZ}},ital:{color:{string:zZ},size:{number:ZZ},face:{string:zZ},mod:{string:zZ},vadjust:{number:ZZ},__type__:{object:GZ,string:zZ}},mono:{color:{string:zZ},size:{number:ZZ},face:{string:zZ},mod:{string:zZ},vadjust:{number:ZZ},__type__:{object:GZ,string:zZ}},__type__:{object:GZ,string:zZ}},group:{string:zZ,number:ZZ,undefined:"undefined"},heightConstraint:{minimum:{number:ZZ},valign:{string:zZ},__type__:{object:GZ,boolean:SZ,number:ZZ}},hidden:{boolean:SZ},icon:{face:{string:zZ},code:{string:zZ},size:{number:ZZ},color:{string:zZ},weight:{string:zZ,number:ZZ},__type__:{object:GZ}},id:{string:zZ,number:ZZ},image:{selected:{string:zZ,undefined:"undefined"},unselected:{string:zZ,undefined:"undefined"},__type__:{object:GZ,string:zZ}},imagePadding:{top:{number:ZZ},right:{number:ZZ},bottom:{number:ZZ},left:{number:ZZ},__type__:{object:GZ,number:ZZ}},label:{string:zZ,undefined:"undefined"},labelHighlightBold:{boolean:SZ},level:{number:ZZ,undefined:"undefined"},margin:{top:{number:ZZ},right:{number:ZZ},bottom:{number:ZZ},left:{number:ZZ},__type__:{object:GZ,number:ZZ}},mass:{number:ZZ},physics:{boolean:SZ},scaling:{min:{number:ZZ},max:{number:ZZ},label:{enabled:{boolean:SZ},min:{number:ZZ},max:{number:ZZ},maxVisible:{number:ZZ},drawThreshold:{number:ZZ},__type__:{object:GZ,boolean:SZ}},customScalingFunction:{function:"function"},__type__:{object:GZ}},shadow:{enabled:{boolean:SZ},color:{string:zZ},size:{number:ZZ},x:{number:ZZ},y:{number:ZZ},__type__:{object:GZ,boolean:SZ}},shape:{string:["custom","ellipse","circle","database","box","text","image","circularImage","diamond","dot","star","triangle","triangleDown","square","icon","hexagon"]},ctxRenderer:{function:"function"},shapeProperties:{borderDashes:{boolean:SZ,array:FZ},borderRadius:{number:ZZ},interpolation:{boolean:SZ},useImageSize:{boolean:SZ},useBorderWithImage:{boolean:SZ},coordinateOrigin:{string:["center","top-left"]},__type__:{object:GZ}},size:{number:ZZ},title:{string:zZ,dom:"dom",undefined:"undefined"},value:{number:ZZ,undefined:"undefined"},widthConstraint:{minimum:{number:ZZ},maximum:{number:ZZ},__type__:{object:GZ,boolean:SZ,number:ZZ}},x:{number:ZZ},y:{number:ZZ},__type__:{object:GZ}},VZ={configure:{enabled:{boolean:SZ},filter:{boolean:SZ,string:zZ,array:FZ,function:"function"},container:{dom:"dom"},showButton:{boolean:SZ},__type__:{object:GZ,boolean:SZ,string:zZ,array:FZ,function:"function"}},edges:{arrows:{to:{enabled:{boolean:SZ},scaleFactor:{number:ZZ},type:{string:jZ},imageHeight:{number:ZZ},imageWidth:{number:ZZ},src:{string:zZ},__type__:{object:GZ,boolean:SZ}},middle:{enabled:{boolean:SZ},scaleFactor:{number:ZZ},type:{string:jZ},imageWidth:{number:ZZ},imageHeight:{number:ZZ},src:{string:zZ},__type__:{object:GZ,boolean:SZ}},from:{enabled:{boolean:SZ},scaleFactor:{number:ZZ},type:{string:jZ},imageWidth:{number:ZZ},imageHeight:{number:ZZ},src:{string:zZ},__type__:{object:GZ,boolean:SZ}},__type__:{string:["from","to","middle"],object:GZ}},endPointOffset:{from:{number:ZZ},to:{number:ZZ},__type__:{object:GZ,number:ZZ}},arrowStrikethrough:{boolean:SZ},background:{enabled:{boolean:SZ},color:{string:zZ},size:{number:ZZ},dashes:{boolean:SZ,array:FZ},__type__:{object:GZ,boolean:SZ}},chosen:{label:{boolean:SZ,function:"function"},edge:{boolean:SZ,function:"function"},__type__:{object:GZ,boolean:SZ}},color:{color:{string:zZ},highlight:{string:zZ},hover:{string:zZ},inherit:{string:["from","to","both"],boolean:SZ},opacity:{number:ZZ},__type__:{object:GZ,string:zZ}},dashes:{boolean:SZ,array:FZ},font:{color:{string:zZ},size:{number:ZZ},face:{string:zZ},background:{string:zZ},strokeWidth:{number:ZZ},strokeColor:{string:zZ},align:{string:["horizontal","top","middle","bottom"]},vadjust:{number:ZZ},multi:{boolean:SZ,string:zZ},bold:{color:{string:zZ},size:{number:ZZ},face:{string:zZ},mod:{string:zZ},vadjust:{number:ZZ},__type__:{object:GZ,string:zZ}},boldital:{color:{string:zZ},size:{number:ZZ},face:{string:zZ},mod:{string:zZ},vadjust:{number:ZZ},__type__:{object:GZ,string:zZ}},ital:{color:{string:zZ},size:{number:ZZ},face:{string:zZ},mod:{string:zZ},vadjust:{number:ZZ},__type__:{object:GZ,string:zZ}},mono:{color:{string:zZ},size:{number:ZZ},face:{string:zZ},mod:{string:zZ},vadjust:{number:ZZ},__type__:{object:GZ,string:zZ}},__type__:{object:GZ,string:zZ}},hidden:{boolean:SZ},hoverWidth:{function:"function",number:ZZ},label:{string:zZ,undefined:"undefined"},labelHighlightBold:{boolean:SZ},length:{number:ZZ,undefined:"undefined"},physics:{boolean:SZ},scaling:{min:{number:ZZ},max:{number:ZZ},label:{enabled:{boolean:SZ},min:{number:ZZ},max:{number:ZZ},maxVisible:{number:ZZ},drawThreshold:{number:ZZ},__type__:{object:GZ,boolean:SZ}},customScalingFunction:{function:"function"},__type__:{object:GZ}},selectionWidth:{function:"function",number:ZZ},selfReferenceSize:{number:ZZ},selfReference:{size:{number:ZZ},angle:{number:ZZ},renderBehindTheNode:{boolean:SZ},__type__:{object:GZ}},shadow:{enabled:{boolean:SZ},color:{string:zZ},size:{number:ZZ},x:{number:ZZ},y:{number:ZZ},__type__:{object:GZ,boolean:SZ}},smooth:{enabled:{boolean:SZ},type:{string:["dynamic","continuous","discrete","diagonalCross","straightCross","horizontal","vertical","curvedCW","curvedCCW","cubicBezier"]},roundness:{number:ZZ},forceDirection:{string:["horizontal","vertical","none"],boolean:SZ},__type__:{object:GZ,boolean:SZ}},title:{string:zZ,undefined:"undefined"},width:{number:ZZ},widthConstraint:{maximum:{number:ZZ},__type__:{object:GZ,boolean:SZ,number:ZZ}},value:{number:ZZ,undefined:"undefined"},__type__:{object:GZ}},groups:{useDefaultGroups:{boolean:SZ},__any__:LZ,__type__:{object:GZ}},interaction:{dragNodes:{boolean:SZ},dragView:{boolean:SZ},hideEdgesOnDrag:{boolean:SZ},hideEdgesOnZoom:{boolean:SZ},hideNodesOnDrag:{boolean:SZ},hover:{boolean:SZ},keyboard:{enabled:{boolean:SZ},speed:{x:{number:ZZ},y:{number:ZZ},zoom:{number:ZZ},__type__:{object:GZ}},bindToWindow:{boolean:SZ},autoFocus:{boolean:SZ},__type__:{object:GZ,boolean:SZ}},multiselect:{boolean:SZ},navigationButtons:{boolean:SZ},selectable:{boolean:SZ},selectConnectedEdges:{boolean:SZ},hoverConnectedEdges:{boolean:SZ},tooltipDelay:{number:ZZ},zoomView:{boolean:SZ},zoomSpeed:{number:ZZ},__type__:{object:GZ}},layout:{randomSeed:{undefined:"undefined",number:ZZ,string:zZ},improvedLayout:{boolean:SZ},clusterThreshold:{number:ZZ},hierarchical:{enabled:{boolean:SZ},levelSeparation:{number:ZZ},nodeSpacing:{number:ZZ},treeSpacing:{number:ZZ},blockShifting:{boolean:SZ},edgeMinimization:{boolean:SZ},parentCentralization:{boolean:SZ},direction:{string:["UD","DU","LR","RL"]},sortMethod:{string:["hubsize","directed"]},shakeTowards:{string:["leaves","roots"]},__type__:{object:GZ,boolean:SZ}},__type__:{object:GZ}},manipulation:{enabled:{boolean:SZ},initiallyActive:{boolean:SZ},addNode:{boolean:SZ,function:"function"},addEdge:{boolean:SZ,function:"function"},editNode:{function:"function"},editEdge:{editWithoutDrag:{function:"function"},__type__:{object:GZ,boolean:SZ,function:"function"}},deleteNode:{boolean:SZ,function:"function"},deleteEdge:{boolean:SZ,function:"function"},controlNodeStyle:LZ,__type__:{object:GZ,boolean:SZ}},nodes:LZ,physics:{enabled:{boolean:SZ},barnesHut:{theta:{number:ZZ},gravitationalConstant:{number:ZZ},centralGravity:{number:ZZ},springLength:{number:ZZ},springConstant:{number:ZZ},damping:{number:ZZ},avoidOverlap:{number:ZZ},__type__:{object:GZ}},forceAtlas2Based:{theta:{number:ZZ},gravitationalConstant:{number:ZZ},centralGravity:{number:ZZ},springLength:{number:ZZ},springConstant:{number:ZZ},damping:{number:ZZ},avoidOverlap:{number:ZZ},__type__:{object:GZ}},repulsion:{centralGravity:{number:ZZ},springLength:{number:ZZ},springConstant:{number:ZZ},nodeDistance:{number:ZZ},damping:{number:ZZ},__type__:{object:GZ}},hierarchicalRepulsion:{centralGravity:{number:ZZ},springLength:{number:ZZ},springConstant:{number:ZZ},nodeDistance:{number:ZZ},damping:{number:ZZ},avoidOverlap:{number:ZZ},__type__:{object:GZ}},maxVelocity:{number:ZZ},minVelocity:{number:ZZ},solver:{string:["barnesHut","repulsion","hierarchicalRepulsion","forceAtlas2Based"]},stabilization:{enabled:{boolean:SZ},iterations:{number:ZZ},updateInterval:{number:ZZ},onlyDynamicEdges:{boolean:SZ},fit:{boolean:SZ},__type__:{object:GZ,boolean:SZ}},timestep:{number:ZZ},adaptiveTimestep:{boolean:SZ},wind:{x:{number:ZZ},y:{number:ZZ},__type__:{object:GZ}},__type__:{object:GZ,boolean:SZ}},autoResize:{boolean:SZ},clickToUse:{boolean:SZ},locale:{string:zZ},locales:{__any__:{any:"any"},__type__:{object:GZ}},height:{string:zZ},width:{string:zZ},__type__:{object:GZ}},YZ={nodes:{borderWidth:[1,0,10,1],borderWidthSelected:[2,0,10,1],color:{border:["color","#2B7CE9"],background:["color","#97C2FC"],highlight:{border:["color","#2B7CE9"],background:["color","#D2E5FF"]},hover:{border:["color","#2B7CE9"],background:["color","#D2E5FF"]}},opacity:[0,0,1,.1],fixed:{x:!1,y:!1},font:{color:["color","#343434"],size:[14,0,100,1],face:["arial","verdana","tahoma"],background:["color","none"],strokeWidth:[0,0,50,1],strokeColor:["color","#ffffff"]},hidden:!1,labelHighlightBold:!0,physics:!0,scaling:{min:[10,0,200,1],max:[30,0,200,1],label:{enabled:!1,min:[14,0,200,1],max:[30,0,200,1],maxVisible:[30,0,200,1],drawThreshold:[5,0,20,1]}},shadow:{enabled:!1,color:"rgba(0,0,0,0.5)",size:[10,0,20,1],x:[5,-30,30,1],y:[5,-30,30,1]},shape:["ellipse","box","circle","database","diamond","dot","square","star","text","triangle","triangleDown","hexagon"],shapeProperties:{borderDashes:!1,borderRadius:[6,0,20,1],interpolation:!0,useImageSize:!1},size:[25,0,200,1]},edges:{arrows:{to:{enabled:!1,scaleFactor:[1,0,3,.05],type:"arrow"},middle:{enabled:!1,scaleFactor:[1,0,3,.05],type:"arrow"},from:{enabled:!1,scaleFactor:[1,0,3,.05],type:"arrow"}},endPointOffset:{from:[0,-10,10,1],to:[0,-10,10,1]},arrowStrikethrough:!0,color:{color:["color","#848484"],highlight:["color","#848484"],hover:["color","#848484"],inherit:["from","to","both",!0,!1],opacity:[1,0,1,.05]},dashes:!1,font:{color:["color","#343434"],size:[14,0,100,1],face:["arial","verdana","tahoma"],background:["color","none"],strokeWidth:[2,0,50,1],strokeColor:["color","#ffffff"],align:["horizontal","top","middle","bottom"]},hidden:!1,hoverWidth:[1.5,0,5,.1],labelHighlightBold:!0,physics:!0,scaling:{min:[1,0,100,1],max:[15,0,100,1],label:{enabled:!0,min:[14,0,200,1],max:[30,0,200,1],maxVisible:[30,0,200,1],drawThreshold:[5,0,20,1]}},selectionWidth:[1.5,0,5,.1],selfReferenceSize:[20,0,200,1],selfReference:{size:[20,0,200,1],angle:[Math.PI/2,-6*Math.PI,6*Math.PI,Math.PI/8],renderBehindTheNode:!0},shadow:{enabled:!1,color:"rgba(0,0,0,0.5)",size:[10,0,20,1],x:[5,-30,30,1],y:[5,-30,30,1]},smooth:{enabled:!0,type:["dynamic","continuous","discrete","diagonalCross","straightCross","horizontal","vertical","curvedCW","curvedCCW","cubicBezier"],forceDirection:["horizontal","vertical","none"],roundness:[.5,0,1,.05]},width:[1,0,30,1]},layout:{hierarchical:{enabled:!1,levelSeparation:[150,20,500,5],nodeSpacing:[100,20,500,5],treeSpacing:[200,20,500,5],blockShifting:!0,edgeMinimization:!0,parentCentralization:!0,direction:["UD","DU","LR","RL"],sortMethod:["hubsize","directed"],shakeTowards:["leaves","roots"]}},interaction:{dragNodes:!0,dragView:!0,hideEdgesOnDrag:!1,hideEdgesOnZoom:!1,hideNodesOnDrag:!1,hover:!1,keyboard:{enabled:!1,speed:{x:[10,0,40,1],y:[10,0,40,1],zoom:[.02,0,.1,.005]},bindToWindow:!0,autoFocus:!0},multiselect:!1,navigationButtons:!1,selectable:!0,selectConnectedEdges:!0,hoverConnectedEdges:!0,tooltipDelay:[300,0,1e3,25],zoomView:!0,zoomSpeed:[1,.1,2,.1]},manipulation:{enabled:!1,initiallyActive:!1},physics:{enabled:!0,barnesHut:{theta:[.5,.1,1,.05],gravitationalConstant:[-2e3,-3e4,0,50],centralGravity:[.3,0,10,.05],springLength:[95,0,500,5],springConstant:[.04,0,1.2,.005],damping:[.09,0,1,.01],avoidOverlap:[0,0,1,.01]},forceAtlas2Based:{theta:[.5,.1,1,.05],gravitationalConstant:[-50,-500,0,1],centralGravity:[.01,0,1,.005],springLength:[95,0,500,5],springConstant:[.08,0,1.2,.005],damping:[.4,0,1,.01],avoidOverlap:[0,0,1,.01]},repulsion:{centralGravity:[.2,0,10,.05],springLength:[200,0,500,5],springConstant:[.05,0,1.2,.005],nodeDistance:[100,0,500,5],damping:[.09,0,1,.01]},hierarchicalRepulsion:{centralGravity:[.2,0,10,.05],springLength:[100,0,500,5],springConstant:[.01,0,1.2,.005],nodeDistance:[120,0,500,5],damping:[.09,0,1,.01],avoidOverlap:[0,0,1,.01]},maxVelocity:[50,0,150,1],minVelocity:[.1,.01,.5,.01],solver:["barnesHut","forceAtlas2Based","repulsion","hierarchicalRepulsion"],timestep:[.5,.01,1,.01],wind:{x:[0,-10,10,.1],y:[0,-10,10,.1]}}},WZ=function(g,t,e){var A;return!(!ju(g).call(g,"physics")||!ju(A=YZ.physics.solver).call(A,t)||e.physics.solver===t||"wind"===t)},QZ=Object.freeze({__proto__:null,allOptions:VZ,configuratorHideOption:WZ,configureOptions:YZ}),UZ=function(){function g(){cd(this,g)}return Bh(g,[{key:"getDistances",value:function(g,t,e){for(var A={},C=g.edges,I=0;I2&&void 0!==arguments[2]&&arguments[2],A=this.distanceSolver.getDistances(this.body,g,t);this._createL_matrix(A),this._createK_matrix(A),this._createE_matrix();for(var C=0,I=Math.max(1e3,Math.min(10*this.body.nodeIndices.length,6e3)),i=1e9,o=0,n=0,r=0,s=0,a=0;i>.01&&C1&&a<5;){a+=1,this._moveNode(o,n,r);var h=bl(this._getEnergy(o),3);s=h[0],n=h[1],r=h[2]}}}},{key:"_getHighestEnergyNode",value:function(g){for(var t=this.body.nodeIndices,e=this.body.nodes,A=0,C=t[0],I=0,i=0,o=0;oHow to Download and Install Windows 7 64 Bit ISO -

              Windows 7 is one of the most popular operating systems that Microsoft has ever released. It offers a user-friendly interface, enhanced security features, and improved performance. If you want to upgrade your old computer or install Windows 7 on a new one, you will need a Windows 7 64 bit ISO file. This is a disk image file that contains all the installation files for Windows 7. You can download it from the official Microsoft website or from third-party sources. In this article, we will show you how to download and install Windows 7 64 bit ISO on your PC.

              -

              windows 7 64 bit iso crack download


              Download Ziphttps://urlcod.com/2uK9or



              -

              Download Windows 7 64 Bit ISO

              -

              There are two ways to download Windows 7 64 bit ISO: from the official Microsoft website or from third-party sources. The official website requires you to have a valid product key, which is a 25-digit code that you get when you purchase Windows 7. The third-party sources do not require a product key, but they may not be reliable or safe. We recommend that you use the official website if you have a product key, or use a trusted third-party source if you don't.

              -

              From the official Microsoft website

              -

              To download Windows 7 64 bit ISO from the official Microsoft website, follow these steps:

              -
                -
              1. Go to https://www.microsoft.com/en-in/software-download/ and click on Windows 7.
              2. -
              3. Enter your product key and click Verify. If your product key is valid, you will be able to select the edition and language of Windows 7 that you want to download.
              4. -
              5. Click on Confirm and then choose whether you want to download the 32-bit or the 64-bit version of Windows 7. Click on the link to start the download.
              6. -
              7. Save the ISO file to your preferred location on your PC.
              8. -
              -

              From third-party sources

              -

              To download Windows 7 64 bit ISO from third-party sources, follow these steps:

              -

              -
                -
              1. Go to a reputable website that offers Windows 7 ISO downloads, such as https://www.techworm.net/2022/01/download-windows-7-iso-ultimate-professional-edition.html.
              2. -
              3. Select the edition and language of Windows 7 that you want to download. You can choose between Ultimate, Professional, and Home Premium editions.
              4. -
              5. Click on the link to start the download. You may need to complete some verification steps before the download begins.
              6. -
              7. Save the ISO file to your preferred location on your PC.
              8. -
              -

              Create Windows 7 Bootable USB/DVD

              -

              After you have downloaded the Windows 7 64 bit ISO file, you need to create a bootable USB drive or DVD that you can use to install Windows 7 on your PC. You can use a free tool called Rufus to create a bootable disk. You will need an empty USB drive with at least 8 GB of space or a blank DVD disc.

              -

              Create a bootable USB drive

              -

              To create a bootable USB drive with Rufus, follow these steps:

              -
                -
              1. Download Rufus from https://rufus.ie/ and run it on your PC.
              2. -
              3. Insert your USB drive into your PC and select it from the Device drop-down menu in Rufus.
              4. -
              5. Select Disk or ISO image from the Boot selection menu and click on Select.
              6. -
              7. Browse to the location where you saved the Windows 7 ISO file and open it.
              8. -
              9. Make sure that the Partition scheme is set to MBR and the Target system is set to BIOS or UEFI.
              10. -
              11. Click on Start and wait for Rufus to create the bootable USB drive.
              12. -
              13. Eject your USB drive when Rufus is done.
              14. -
              -

              Create a bootable DVD disc

              -

              To create a bootable DVD disc with Rufus

              ddb901b051
              -
              -
              \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/How to Install and Run Checkra1n on Windows 10 A Simple and Effective Method.md b/spaces/tialenAdioni/chat-gpt-api/logs/How to Install and Run Checkra1n on Windows 10 A Simple and Effective Method.md deleted file mode 100644 index 9741bc4d8228060291048b791d877d1d7ee54fd7..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/How to Install and Run Checkra1n on Windows 10 A Simple and Effective Method.md +++ /dev/null @@ -1,45 +0,0 @@ - -

              How to Download Checkra1n on Windows 10: A Step-by-Step Guide

              -

              Checkra1n is a popular jailbreak tool that allows you to unlock your iOS device and install custom apps and tweaks. However, it is only compatible with macOS and Linux operating systems. If you want to use Checkra1n on Windows 10, you will need to follow some extra steps. In this article, we will show you how to download Checkra1n on Windows 10 using a bootable USB drive.

              -

              What You Need

              -

              Before you start, make sure you have the following items:

              -

              how to download checkra1n on windows 10


              Download >>> https://urlcod.com/2uK5A3



              -
                -
              • A Windows 10 computer with an internet connection.
              • -
              • A USB flash drive with at least 8 GB of storage space.
              • -
              • An iOS device that is compatible with Checkra1n. You can check the list of supported devices here.
              • -
              • A USB cable to connect your iOS device to your computer.
              • -
              -

              Step 1: Download Checkra1n ISO File

              -

              The first step is to download the Checkra1n ISO file, which is a bootable image of the Checkra1n jailbreak tool. You can download it from the official website here. Choose the version that matches your iOS device and click on the download button. Save the file to your computer.

              -

              Step 2: Download Rufus

              -

              The next step is to download Rufus, which is a free and open-source tool that allows you to create bootable USB drives. You can download it from the official website here. Choose the portable version and click on the download button. Save the file to your computer.

              -

              Step 3: Create Bootable USB Drive

              -

              The third step is to create a bootable USB drive using Rufus and the Checkra1n ISO file. Follow these steps:

              -
                -
              1. Insert your USB flash drive into your computer and make sure it is formatted as FAT32 or NTFS.
              2. -
              3. Open Rufus and select your USB drive from the Device dropdown menu.
              4. -
              5. Click on the Select button and browse to the location where you saved the Checkra1n ISO file. Choose the file and click on Open.
              6. -
              7. Make sure the Partition scheme is set to MBR and the Target system is set to BIOS or UEFI.
              8. -
              9. Click on the Start button and wait for Rufus to create the bootable USB drive.
              10. -
              11. When Rufus is done, close it and safely eject your USB drive from your computer.
              12. -
              -

              Step 4: Boot from USB Drive

              -

              The fourth step is to boot your Windows 10 computer from the USB drive that contains the Checkra1n jailbreak tool. Follow these steps:

              -
                -
              1. Turn off your computer and plug in your USB drive.
              2. -
              3. Turn on your computer and press the key that opens the boot menu. This key may vary depending on your computer model, but it is usually F12, F10, F9, F8, or Esc.
              4. -
              5. Select your USB drive from the boot menu and press Enter.
              6. -
              7. You should see a black screen with a Checkra1n logo. Wait for it to load.
              8. -
              -

              Step 5: Jailbreak Your iOS Device

              -

              The final step is to jailbreak your iOS device using Checkra1n. Follow these steps:

              -
                -
              1. Connect your iOS device to your computer using a USB cable.
              2. -
              3. On your iOS device, go to Settings > General > About and check your software version. Make sure it is compatible with Checkra1n. You can check the list of supported versions here.
              4. -
              5. On your iOS device, go to Settings > General > Software Update and turn off Automatic Updates.
              6. -
              7. On your iOS device, go to Settings > Touch ID & Passcode and turn off Passcode if you have one.
              8. -
              9. On your iOS device, go to Settings > iCloud > Find My and turn off Find My iPhone if you have

                -

                ddb901b051
                -
                -
                \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Best of Okey Jakota The Nigerian Singer Who Captivated Millions with His Songs.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Best of Okey Jakota The Nigerian Singer Who Captivated Millions with His Songs.md deleted file mode 100644 index b37c93b2426a22507738c281c7ae326c0c110109..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Best of Okey Jakota The Nigerian Singer Who Captivated Millions with His Songs.md +++ /dev/null @@ -1,185 +0,0 @@ -
                -

                Download Best of Okey Jakota: How to Enjoy the Legendary Igbo Highlife Music

                -

                If you are a fan of Igbo highlife music, you have probably heard of Okey Jakota, one of the living legends of the genre. His golden voice, his skillful use of instruments, and his catchy lyrics have made him a household name in Nigeria and beyond. In this article, we will show you how to download best of Okey Jakota for free online, and how to enjoy his music offline and online.

                -

                download best of okey jakota


                DOWNLOADhttps://bltlly.com/2uOpYs



                -

                Who is Okey Jakota and Why You Should Listen to His Music

                -

                Okey Jakota's Biography and Musical Career

                -

                Okey Jakota is a Nigerian-born highlife multi-instrumentalist, songwriter, recording and performing Igbo artist. He was born in Okija town in Anambra State, Eastern Nigeria. He started his musical career at a young age, playing various instruments such as guitar, keyboard, saxophone, and flute. He joined several bands and groups before launching his solo career in the late 1990s. He has released several albums and singles, such as Uwa Adi Ka Eche, Igwe Okorakwu, Onyebuchi, Ndidi Bu Ije Uwa, and many more. He has also collaborated with other famous highlife artists such as Chief Osita Osadebe, Oliver De Coque, Bright Chimezie, and Prince Nico Mbarga.

                -

                Okey Jakota's Style and Influence

                -

                Okey Jakota's style is a blend of traditional Igbo highlife music with modern influences such as gospel, reggae, jazz, and pop. He sings in Igbo language, but also incorporates English words and phrases. His songs are usually upbeat, lively, and danceable, but also convey deep messages about life, love, faith, culture, and society. He is known for his witty and humorous lyrics that often use proverbs, idioms, metaphors, and analogies. He is also known for his energetic and charismatic stage performance that engages the audience. He has influenced many younger highlife artists who admire his talent and versatility.

                -

                How to Download Best of Okey Jakota for Free Online

                -

                The Benefits of Downloading Music Online

                -

                Downloading music online has many benefits over buying physical CDs or streaming music from services like Spotify or Apple Music. Some of the benefits are:

                -
                  -
                • You can own your music and listen to it anytime, anywhere, without internet connection or subscription fees.
                • -
                • You can choose the format and quality of your music files according to your preference and device compatibility.
                • -
                • You can create your own playlists and albums according to your mood and taste.
                • -
                • You can support your favorite artists directly by buying their music or donating to them.
                • -
                • You can discover new music by searching through curated collections, specific genres, and trending music.
                • -
                -

                The Best Websites to Download Okey Jakota's Songs

                -

                There are many websites that offer free downloads of Okey Jakota's songs, but not all of them are legal or safe. Some may contain viruses or malware that can harm your device or steal your personal information. Some may also have low-quality audio or incomplete files that can ruin your listening experience. To avoid these problems, we recommend you to use these three websites that are legal, safe, and reliable:

                -

                PraiseZion

                -

                PraiseZion is a website that PraiseZion is a website that specializes in Nigerian gospel music, but also features other genres such as highlife, afrobeat, and hip hop. You can find a collection of Okey Jakota's songs on this website, as well as other highlife legends. You can download the songs for free in MP3 format, or listen to them online. You can also read the lyrics, watch the videos, and leave comments. To download best of Okey Jakota from PraiseZion, follow these steps:

                -
                  -
                1. Go to https://praisezion.com/category/okey-jakota/
                2. -
                3. Scroll down and choose the song you want to download.
                4. -
                5. Click on the download button below the song title.
                6. -
                7. Wait for the download to start automatically, or click on the link that says "Download MP3 Here".
                8. -
                9. Save the file to your device and enjoy!
                10. -
                -

                YouTube

                -

                YouTube is the most popular video-sharing platform in the world, where you can watch, upload, and share videos of various topics and categories. You can also find a lot of Okey Jakota's songs on YouTube, uploaded by his fans or official channels. You can watch the videos online, or download them for offline viewing. However, YouTube does not allow direct downloads of its videos, so you will need to use a third-party tool or website to do so. One of the best tools we recommend is 4K Video Downloader, which is free, fast, and easy to use. To download best of Okey Jakota from YouTube using 4K Video Downloader, follow these steps:

                -

                download best of okey jakota mp3
                -download best of okey jakota songs
                -download best of okey jakota highlife
                -download best of okey jakota onyebuchi
                -download best of okey jakota uwa adi ka eche
                -download best of okey jakota asili ajoka
                -download best of okey jakota album
                -download best of okey jakota mixtape
                -download best of okey jakota audio
                -download best of okey jakota music
                -download best of okey jakota igbo highlife
                -download best of okey jakota praisezion
                -download best of okey jakota 2023
                -download best of okey jakota latest
                -download best of okey jakota live performance
                -download best of okey jakota video
                -download best of okey jakota youtube
                -download best of okey jakota zip file
                -download best of okey jakota free mp3
                -download best of okey jakota online
                -download best of okey jakota naija music
                -download best of okey jakota okija town
                -download best of okey jakota instrumental
                -download best of okey jakota lyrics
                -download best of okey jakota golden voice
                -download best of okey jakota full album
                -download best of okey jakota dj mix
                -download best of okey jakota 2022
                -download best of okey jakota old songs
                -download best of okey jakota new songs
                -download best of okey jakota mp4
                -download best of okey jakota naijaloaded
                -download best of okey jakota anambra state
                -download best of okey jakota song writer
                -download best of okey jakota biography
                -download best of okey jakota net worth
                -download best of okey jakota age
                -download best of okey jakota wife and children
                -download best of okey jakota awards and nominations
                -download best of okey jakota discography

                -
                  -
                1. Go to https://www.youtube.com/ and search for Okey Jakota's songs.
                2. -
                3. Select the video you want to download and copy its URL from the address bar.
                4. -
                5. Go to https://www.4kdownload.com/products/product-videodownloader and download and install 4K Video Downloader on your device.
                6. -
                7. Open 4K Video Downloader and click on the "Paste Link" button.
                8. -
                9. Select the format and quality you want for your video file.
                10. -
                11. Click on the "Download" button and wait for the process to finish.
                12. -
                13. Find the file in your device and enjoy!
                14. -
                -

                OKmusi

                -

                OKmusi is a website that allows you to download music from various sources such as YouTube, SoundCloud, Spotify, and more. You can download music in MP3 or MP4 format, with high quality and fast speed. You can also search for music by keywords, artists, albums, or genres. You can find many Okey Jakota's songs on OKmusi, as well as other highlife artists. To download best of Okey Jakota from OKmusi, follow these steps:

                -
                  -
                1. Go to https://okmusi.com/
                2. -
                3. Type "Okey Jakota" in the search box and press enter.
                4. -
                5. Select the song you want to download from the results.
                6. -
                7. Click on the "Download" button next to the song title.
                8. -
                9. Select the format and quality you want for your music file.
                10. -
                11. Click on the "Download" button again and wait for the download to start.
                12. -
                13. Save the file to your device and enjoy!
                14. -
                -

                How to Enjoy Okey Jakota's Music Offline and Online

                -

                The Best Music Players and Apps for Okey Jakota's Music

                -

                Once you have downloaded best of Okey Jakota's music to your device, you will need a good music player or app to play them offline or online. There are many music players and apps available for different devices and platforms, but not all of them are compatible with Okey Jakota's music format or quality. Some of them may also have annoying ads or limited features that can ruin your listening experience. To avoid these problems, we recommend you to use these three music players and apps that are compatible, reliable, and user-friendly:

                -

                VLC Media Player

                -

                VLC Media Player is a free and open-source cross-platform multimedia player that can play almost any type of media file, including audio and video. It supports various formats such as MP3, MP4, WAV, FLAC, OGG, WMA, AAC, M4A, and more. It also has many features such as equalizer, playlist, subtitles, streaming, and more. It is compatible with Windows, Mac, Linux, Android, iOS, and other platforms. It is easy to use and has a simple and intuitive interface. To play Okey Jakota's music with VLC Media Player, follow these steps:

                -
                  -
                1. Download and install VLC Media Player on your device from https://www.videolan.org/vlc/index.html
                2. -
                3. Open VLC Media Player and click on the "Media" menu.
                4. -
                5. Select "Open File" and browse to the location of your Okey Jakota's music file.
                6. -
                7. Click on the "Open" button and enjoy!
                8. -
                -

                Musicolet Music Player

                -

                Musicolet Music Player is a free and lightweight music player app for Android devices that can play offline music without any ads or internet permission. It supports various formats such as MP3, WAV, OGG, FLAC, M4A, and more. It also has many features such as multiple queues, folder browsing, tag editor, lyrics support, sleep timer, widgets, and more. It is compatible with Android 4.1 and above. It is easy to use and has a simple and elegant interface. To play Okey Jakota's music with Musicolet Music Player, follow these steps:

                -
                  -
                1. Download and install Musicolet Music Player on your device from https://play.google.com/store/apps/details?id=in.krosbits.musicolet&hl=en&gl=US
                2. -
                3. Open Musicolet Music Player and grant it the permission to access your device's storage.
                4. -
                5. Select the folder where you have stored your Okey Jakota's music files.
                6. -
                7. Select the song you want to play and enjoy!
                8. -
                -

                iTunes

                -

                iTunes is a free and popular media player and library app for Apple devices that can play online and offline music, podcasts, videos, and more. It supports various formats such as MP3, AAC, WAV, AIFF, Apple Lossless, and more. It also has many features such as iCloud Music Library, Apple Music, Genius, Radio, AirPlay, Home Sharing, and more. It is compatible with macOS, Windows, iOS, iPadOS, watchOS, tvOS, and HomePod. It is easy to use and has a sleek and modern interface. To play Okey Jakota's music with iTunes, follow these steps:

                -
                  -
                1. Download and install iTunes on your device from https://www.apple.com/itunes/
                2. -
                3. Open iTunes and click on the "File" menu.
                4. -
                5. Select "Add File to Library" or "Add Folder to Library" depending on your preference.
                6. -
                7. Browse to the location of your Okey Jakota's music file or folder.
                8. -
                9. Click on the "Open" button and wait for iTunes to import your music.
                10. -
                11. Select the song you want to play from the "Music" section and enjoy!
                12. -
                -

                How to Enjoy Okey Jakota's Music Offline and Online

                -

                The Best Playlists and Albums of Okey Jakota's Music

                -

                Okey Jakota has released many songs over the years, but some of them are more popular and loved by his fans than others. If you want to enjoy the best of Okey Jakota's music offline or online, you can create your own playlists or listen to his albums that contain his most famous songs. Here are some of the best playlists and albums of Okey Jakota's music that we recommend:

                -

                Okey Jakota - Uwa Adi Ka Eche Playlist

                -

                This playlist contains 10 songs from Okey Jakota's album Uwa Adi Ka Eche (Life Is Not Easy), which was released in 2005. This album is considered one of his best works, as it showcases his versatility and creativity in highlife music. The songs are full of wisdom, humor, and inspiration, and they are suitable for any occasion. Some of the songs in this playlist are:

                -
                  -
                • Uwa Adi Ka Eche (Life Is Not Easy)
                • -
                • Onye Nwe Anyi (Our Lord)
                • -
                • Ndidi Bu Ije Uwa (Patience Is the Journey of Life)
                • -
                • Onyebuchi (God's Will)
                • -
                • Igwe Okorakwu (The King of Okorakwu)
                • -
                -

                You can listen to this playlist online on YouTube or download it from PraiseZion.

                -

                Okey Jakota - Igbo Highlife Mix Playlist

                -

                This playlist contains 20 songs from various albums and singles of Okey Jakota, mixed by DJ Blaze. This playlist is perfect for highlife lovers who want to enjoy Okey Jakota's music non-stop, without any interruption or gap. The songs are upbeat, lively, and danceable, and they showcase Okey Jakota's talent and charisma in highlife music. Some of the songs in this playlist are:

                -
                  -
                • Ogene Ndi Ara (The Bell of the Madmen)
                • -
                • Chukwu Ebuka (God Is Great)
                • -
                • Akachukwu Di Ya (God's Hand Is Upon You)
                • -
                • Olu Oma Ndi Igbo (The Good News of the Igbo People)
                • -
                • Okwesili Eze Group (The King's Counsel Group)
                • -
                -

                You can listen to this playlist online on YouTube or download it from OKmusi.

                -

                Okey Jakota - Best of Okey Jakota Album

                -

                This album contains 15 songs from Okey Jakota's best and most popular songs, selected by his fans and critics. This album is ideal for highlife fans who want to have a collection of Okey Jakota's music in one place, without having to search for individual songs. The songs are diverse, rich, and meaningful, and they reflect Okey Jakota's style and influence in highlife music. Some of the songs in this album are:

                -
                  -
                • Nwa Baby (My Baby)
                • -
                • Obodo Eji Eje Mba (The Land That Eats People)
                • -
                • Ego Amaka (Money Is Good)
                • -
                • Nke Onye Diri Ya (Whom It Belongs To)
                • -
                • Ome Ife Jide Ofo (Do Justice with Truth)
                • -
                -

                You can listen to this album online on iTunes or download it from PraiseZion.

                -

                Conclusion

                -

                Okey Jakota is one of the legendary Igbo highlife artists who has made a mark in the Nigerian music industry and beyond. His music is a blend of traditional and modern influences, with catchy lyrics and melodies that appeal to a wide range of listeners. If you want to download best of Okey Jakota for free online, you can use the websites we have recommended in this article, such as PraiseZion, YouTube, and OKmusi. You can also enjoy his music offline or online with the music players and apps we have suggested, such as VLC Media Player, Musicolet Music Player, and iTunes. You can also listen to his best playlists and albums that we have listed, such as Uwa Adi Ka Eche Playlist, Igbo Highlife Mix Playlist, and Best of Okey Jakota Album. We hope you have enjoyed this article and learned something new about Okey Jakota and his music.

                -

                FAQs

                -

                Here are some frequently asked questions about Okey Jakota and his music:

                -

                Q: Where can I buy Okey Jakota's CDs or DVDs?

                -

                A: You can buy Okey Jakota's CDs or DVDs from online stores such as Amazon, Jumia, Konga, or eBay. You can also buy them from physical stores such as music shops, supermarkets, or street vendors in Nigeria.

                -

                Q: How can I contact Okey Jakota or book him for a show?

                -

                A: You can contact Okey Jakota or book him for a show through his official social media accounts such as Facebook, Instagram, or Twitter. You can also contact his manager or record label through their phone numbers or email addresses.

                -

                Q: What are some of the awards or recognitions that Okey Jakota has received?

                -

                A: Okey Jakota has received many awards and recognitions for his music and contribution to the highlife genre. Some of them are:

                -
                  -
                • The Best Highlife Artist of the Year at the Nigerian Music Awards in 2006.
                • The Best Highlife Album of the Year for Uwa Adi Ka Eche at the City People Entertainment Awards in 2007.
                • -
                • The Lifetime Achievement Award for Highlife Music at the African Music Magazine Awards in 2014.
                • -
                • The Igbo Cultural Ambassador Award at the Igbo Heritage Awards in 2018.
                • -
                -

                Q: Who are some of the artists that Okey Jakota has inspired or mentored?

                -

                A: Okey Jakota has inspired or mentored many artists who have followed his footsteps in highlife music. Some of them are:

                -
                  -
                • Flavour N'abania, a popular Nigerian singer, songwriter, and multi-instrumentalist who is known for his fusion of highlife, afrobeat, and hip hop music.
                • -
                • Phyno, a famous Nigerian rapper, singer, songwriter, and record producer who is known for his rap style in Igbo language and his collaboration with other highlife artists.
                • -
                • Umu Obiligbo, a duo of Nigerian highlife singers and instrumentalists who are known for their modern rendition of traditional Igbo highlife music.
                • -
                -

                Q: What are some of the challenges or controversies that Okey Jakota has faced in his musical career?

                -

                A: Okey Jakota has faced some challenges or controversies in his musical career, such as:

                -
                  -
                • He was once accused of plagiarism by another highlife artist, Chief Morocco Maduka, who claimed that Okey Jakota copied his song Eze Ndi Eze (King of Kings) without his permission or credit.
                • -
                • He was once involved in a legal dispute with his former record label, Premier Music, over the ownership and distribution of his music rights and royalties.
                • -
                • He was once criticized by some fans and critics for changing his style and genre from highlife to gospel music, which they considered a betrayal of his roots and identity.
                • -

                197e85843d
                -
                -
                \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Directx 11.1 Download Extra Quality Baixaki.md b/spaces/tioseFevbu/cartoon-converter/scripts/Directx 11.1 Download Extra Quality Baixaki.md deleted file mode 100644 index 31c1af52c1014fba74e61955b635a300f38d4da6..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Directx 11.1 Download Extra Quality Baixaki.md +++ /dev/null @@ -1,47 +0,0 @@ -
                -

                How to Download DirectX 11.1 for Windows

                -

                DirectX is a collection of application programming interfaces (APIs) that enable high-performance graphics and sound for games and multimedia applications on Windows. DirectX 11.1 is the latest version of DirectX that supports Windows 8 and Windows 7.

                -

                If you want to download DirectX 11.1 for Windows, you have two options: you can either use the DirectX End-User Runtime Web Installer or the DirectX 11 Technology Update. Both options are available from the official Microsoft Download Center.

                -

                directx 11.1 download baixaki


                Download File >>>>> https://urlcod.com/2uHygp



                -

                Option 1: DirectX End-User Runtime Web Installer

                -

                The DirectX End-User Runtime Web Installer is a small program that downloads and installs the required DirectX components for your system. This option is recommended if you are not sure which version of DirectX you need or if you want to update your existing DirectX installation.

                -

                To use the DirectX End-User Runtime Web Installer, follow these steps:

                -
                  -
                1. Go to this link and click on the Download button.
                2. -
                3. Save the file dxwebsetup.exe to your computer and run it.
                4. -
                5. Follow the instructions on the screen to complete the installation.
                6. -
                7. Restart your computer if prompted.
                8. -
                -

                Option 2: DirectX 11 Technology Update

                -

                The DirectX 11 Technology Update is a standalone package that contains the DirectX 11.1 runtime for Windows 8 and Windows 7. This option is recommended if you want to install DirectX 11.1 specifically or if you have problems with the web installer.

                -

                -

                To use the DirectX 11 Technology Update, follow these steps:

                -
                  -
                1. Go to this link and click on the Download button.
                2. -
                3. Select your preferred language and click on Next.
                4. -
                5. Save the file dxsdk_jun2010.exe to your computer and run it.
                6. -
                7. Follow the instructions on the screen to extract and install the package.
                8. -
                9. Restart your computer if prompted.
                10. -
                -

                After installing DirectX 11.1, you can enjoy enhanced graphics and sound performance for your games and multimedia applications on Windows.

                - -

                What is DirectX 11.1?

                -

                DirectX 11.1 is the latest version of DirectX that was released in 2012. It introduces several new features and improvements to the DirectX API, such as:

                -
                  -
                • Tiled Resources: This feature allows games to use large textures with high resolution and detail without consuming too much memory or bandwidth.
                • -
                • Direct3D 11.1 Device: This feature enables games to access the full capabilities of the graphics hardware and optimize performance and quality.
                • -
                • Stereo 3D Rendering: This feature enables games to support stereoscopic 3D displays and glasses for a more immersive experience.
                • -
                • WARP: This feature enables games to run on any Windows device, even if it does not have a dedicated graphics card, by using the CPU to render graphics.
                • -
                -

                DirectX 11.1 is compatible with Windows 8 and Windows 7, but some features may require specific hardware or drivers to work properly.

                -

                Why Download DirectX 11.1?

                -

                Downloading DirectX 11.1 can benefit your system in several ways, such as:

                -
                  -
                • Improving the performance and quality of your games and multimedia applications.
                • -
                • Fixing compatibility issues and errors with older or newer DirectX versions.
                • -
                • Enabling new features and enhancements for your graphics and sound hardware.
                • -
                • Keeping your system up to date with the latest technology and standards.
                • -
                -

                Downloading DirectX 11.1 is free and easy, and it can make a big difference in your gaming and multimedia experience on Windows.

                7b8c122e87
                -
                -
                \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Kai Po Che Full Movie Telugu Download Torrent HOT!.md b/spaces/tioseFevbu/cartoon-converter/scripts/Kai Po Che Full Movie Telugu Download Torrent HOT!.md deleted file mode 100644 index 9555045e55d5e916232717963cafd737890d0d3d..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Kai Po Che Full Movie Telugu Download Torrent HOT!.md +++ /dev/null @@ -1,25 +0,0 @@ - -

                Kai Po Che: A Drama About Friendship and Cricket

                -

                Kai Po Che is a 2013 Hindi movie directed by Abhishek Kapoor and based on the novel The 3 Mistakes of My Life by Chetan Bhagat. The movie stars Sushant Singh Rajput, Rajkummar Rao and Amit Sadh as three friends who start a cricket academy and a business in Ahmedabad. The movie explores their friendship, their dreams and their challenges as they face personal and communal conflicts in the backdrop of the 2001 Gujarat earthquake and the 2002 Gujarat riots.

                -

                The movie was critically acclaimed and commercially successful, earning several awards and nominations. It was also selected to be screened at the 63rd Berlin International Film Festival, where it received a standing ovation. The movie has a rating of 7.8 out of 10 on IMDb and 86% on Rotten Tomatoes.

                -

                Kai Po Che Full Movie Telugu Download Torrent


                Download File · https://urlcod.com/2uHx4N



                -

                If you are looking for a way to watch Kai Po Che online, you might be tempted to download it from torrent sites. However, this is not a safe or legal option, as you might expose yourself to malware, viruses, legal action and fines. Moreover, you would be depriving the filmmakers and actors of their rightful earnings and recognition.

                -

                Instead, you should opt for legal streaming platforms that offer Kai Po Che for rent or purchase. Some of these platforms are:

                -
                  -
                • Amazon Prime Video
                • -
                • YouTube
                • -
                • Google Play Movies
                • -
                • iTunes
                • -
                • Netflix
                • -
                -

                By choosing these platforms, you can enjoy Kai Po Che in high quality and with subtitles, without any risk or hassle. You can also support the Indian film industry and encourage more quality content to be produced.

                -

                Kai Po Che is a movie that will touch your heart with its story of friendship, passion and resilience. Don't miss this opportunity to watch it legally and safely online.

                - -

                Kai Po Che is not just a sports drama, but also a social commentary on the turbulent times that Gujarat witnessed in the early 2000s. The movie does not shy away from depicting the horrors of communal violence and its impact on the lives of ordinary people. The movie also shows how politics can corrupt and manipulate the youth, and how friendship can overcome the barriers of religion and ideology.

                -

                The movie is based on a novel, but it does not follow it blindly. The director Abhishek Kapoor makes some changes to the plot and the characters, to make them more realistic and relatable. He also adds some original scenes and dialogues that enhance the emotional quotient of the movie. The movie has a crisp editing and a smooth narrative that keeps the audience engaged throughout.

                -

                -

                The movie also boasts of some excellent performances by the lead actors. Sushant Singh Rajput shines as Ishaan, the passionate cricketer who has a soft spot for Ali, the Muslim prodigy. Rajkummar Rao delivers a nuanced performance as Govind, the sensible and pragmatic businessman who falls in love with Ishaan's sister. Amit Sadh impresses as Omi, the conflicted and vulnerable friend who gets swayed by his uncle's political agenda. Amrita Puri is charming as Vidya, the spirited and supportive love interest of Govind.

                -

                The movie also has a soulful soundtrack by Amit Trivedi, who creates some memorable songs that suit the mood and theme of the movie. The songs like Shubhaarambh, Manja and Meethi Boliyaan are catchy and uplifting, while the background score is subtle and effective.

                -

                Kai Po Che is a movie that celebrates friendship, cricket and India. It is a movie that will make you laugh, cry and cheer. It is a movie that deserves to be watched by everyone.

                7b8c122e87
                -
                -
                \ No newline at end of file diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/chardet/universaldetector.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/chardet/universaldetector.py deleted file mode 100644 index 22fcf8290c1026d3ae35c6ae605a67b3f24c85e7..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/chardet/universaldetector.py +++ /dev/null @@ -1,328 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Universal charset detector code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 2001 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# Shy Shalom - original C code -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### -""" -Module containing the UniversalDetector detector class, which is the primary -class a user of ``chardet`` should use. - -:author: Mark Pilgrim (initial port to Python) -:author: Shy Shalom (original C code) -:author: Dan Blanchard (major refactoring for 3.0) -:author: Ian Cordasco -""" - - -import codecs -import logging -import re - -from .charsetgroupprober import CharSetGroupProber -from .enums import InputState, LanguageFilter, ProbingState -from .escprober import EscCharSetProber -from .latin1prober import Latin1Prober -from .mbcsgroupprober import MBCSGroupProber -from .sbcsgroupprober import SBCSGroupProber -from .utf1632prober import UTF1632Prober - - -class UniversalDetector: - """ - The ``UniversalDetector`` class underlies the ``chardet.detect`` function - and coordinates all of the different charset probers. - - To get a ``dict`` containing an encoding and its confidence, you can simply - run: - - .. code:: - - u = UniversalDetector() - u.feed(some_bytes) - u.close() - detected = u.result - - """ - - MINIMUM_THRESHOLD = 0.20 - HIGH_BYTE_DETECTOR = re.compile(b"[\x80-\xFF]") - ESC_DETECTOR = re.compile(b"(\033|~{)") - WIN_BYTE_DETECTOR = re.compile(b"[\x80-\x9F]") - ISO_WIN_MAP = { - "iso-8859-1": "Windows-1252", - "iso-8859-2": "Windows-1250", - "iso-8859-5": "Windows-1251", - "iso-8859-6": "Windows-1256", - "iso-8859-7": "Windows-1253", - "iso-8859-8": "Windows-1255", - "iso-8859-9": "Windows-1254", - "iso-8859-13": "Windows-1257", - } - - def __init__(self, lang_filter=LanguageFilter.ALL): - self._esc_charset_prober = None - self._utf1632_prober = None - self._charset_probers = [] - self.result = None - self.done = None - self._got_data = None - self._input_state = None - self._last_char = None - self.lang_filter = lang_filter - self.logger = logging.getLogger(__name__) - self._has_win_bytes = None - self.reset() - - @property - def input_state(self): - return self._input_state - - @property - def has_win_bytes(self): - return self._has_win_bytes - - @property - def charset_probers(self): - return self._charset_probers - - def reset(self): - """ - Reset the UniversalDetector and all of its probers back to their - initial states. This is called by ``__init__``, so you only need to - call this directly in between analyses of different documents. - """ - self.result = {"encoding": None, "confidence": 0.0, "language": None} - self.done = False - self._got_data = False - self._has_win_bytes = False - self._input_state = InputState.PURE_ASCII - self._last_char = b"" - if self._esc_charset_prober: - self._esc_charset_prober.reset() - if self._utf1632_prober: - self._utf1632_prober.reset() - for prober in self._charset_probers: - prober.reset() - - def feed(self, byte_str): - """ - Takes a chunk of a document and feeds it through all of the relevant - charset probers. - - After calling ``feed``, you can check the value of the ``done`` - attribute to see if you need to continue feeding the - ``UniversalDetector`` more data, or if it has made a prediction - (in the ``result`` attribute). - - .. note:: - You should always call ``close`` when you're done feeding in your - document if ``done`` is not already ``True``. - """ - if self.done: - return - - if not byte_str: - return - - if not isinstance(byte_str, bytearray): - byte_str = bytearray(byte_str) - - # First check for known BOMs, since these are guaranteed to be correct - if not self._got_data: - # If the data starts with BOM, we know it is UTF - if byte_str.startswith(codecs.BOM_UTF8): - # EF BB BF UTF-8 with BOM - self.result = { - "encoding": "UTF-8-SIG", - "confidence": 1.0, - "language": "", - } - elif byte_str.startswith((codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)): - # FF FE 00 00 UTF-32, little-endian BOM - # 00 00 FE FF UTF-32, big-endian BOM - self.result = {"encoding": "UTF-32", "confidence": 1.0, "language": ""} - elif byte_str.startswith(b"\xFE\xFF\x00\x00"): - # FE FF 00 00 UCS-4, unusual octet order BOM (3412) - self.result = { - "encoding": "X-ISO-10646-UCS-4-3412", - "confidence": 1.0, - "language": "", - } - elif byte_str.startswith(b"\x00\x00\xFF\xFE"): - # 00 00 FF FE UCS-4, unusual octet order BOM (2143) - self.result = { - "encoding": "X-ISO-10646-UCS-4-2143", - "confidence": 1.0, - "language": "", - } - elif byte_str.startswith((codecs.BOM_LE, codecs.BOM_BE)): - # FF FE UTF-16, little endian BOM - # FE FF UTF-16, big endian BOM - self.result = {"encoding": "UTF-16", "confidence": 1.0, "language": ""} - - self._got_data = True - if self.result["encoding"] is not None: - self.done = True - return - - # If none of those matched and we've only see ASCII so far, check - # for high bytes and escape sequences - if self._input_state == InputState.PURE_ASCII: - if self.HIGH_BYTE_DETECTOR.search(byte_str): - self._input_state = InputState.HIGH_BYTE - elif ( - self._input_state == InputState.PURE_ASCII - and self.ESC_DETECTOR.search(self._last_char + byte_str) - ): - self._input_state = InputState.ESC_ASCII - - self._last_char = byte_str[-1:] - - # next we will look to see if it is appears to be either a UTF-16 or - # UTF-32 encoding - if not self._utf1632_prober: - self._utf1632_prober = UTF1632Prober() - - if self._utf1632_prober.state == ProbingState.DETECTING: - if self._utf1632_prober.feed(byte_str) == ProbingState.FOUND_IT: - self.result = { - "encoding": self._utf1632_prober.charset_name, - "confidence": self._utf1632_prober.get_confidence(), - "language": "", - } - self.done = True - return - - # If we've seen escape sequences, use the EscCharSetProber, which - # uses a simple state machine to check for known escape sequences in - # HZ and ISO-2022 encodings, since those are the only encodings that - # use such sequences. - if self._input_state == InputState.ESC_ASCII: - if not self._esc_charset_prober: - self._esc_charset_prober = EscCharSetProber(self.lang_filter) - if self._esc_charset_prober.feed(byte_str) == ProbingState.FOUND_IT: - self.result = { - "encoding": self._esc_charset_prober.charset_name, - "confidence": self._esc_charset_prober.get_confidence(), - "language": self._esc_charset_prober.language, - } - self.done = True - # If we've seen high bytes (i.e., those with values greater than 127), - # we need to do more complicated checks using all our multi-byte and - # single-byte probers that are left. The single-byte probers - # use character bigram distributions to determine the encoding, whereas - # the multi-byte probers use a combination of character unigram and - # bigram distributions. - elif self._input_state == InputState.HIGH_BYTE: - if not self._charset_probers: - self._charset_probers = [MBCSGroupProber(self.lang_filter)] - # If we're checking non-CJK encodings, use single-byte prober - if self.lang_filter & LanguageFilter.NON_CJK: - self._charset_probers.append(SBCSGroupProber()) - self._charset_probers.append(Latin1Prober()) - for prober in self._charset_probers: - if prober.feed(byte_str) == ProbingState.FOUND_IT: - self.result = { - "encoding": prober.charset_name, - "confidence": prober.get_confidence(), - "language": prober.language, - } - self.done = True - break - if self.WIN_BYTE_DETECTOR.search(byte_str): - self._has_win_bytes = True - - def close(self): - """ - Stop analyzing the current document and come up with a final - prediction. - - :returns: The ``result`` attribute, a ``dict`` with the keys - `encoding`, `confidence`, and `language`. - """ - # Don't bother with checks if we're already done - if self.done: - return self.result - self.done = True - - if not self._got_data: - self.logger.debug("no data received!") - - # Default to ASCII if it is all we've seen so far - elif self._input_state == InputState.PURE_ASCII: - self.result = {"encoding": "ascii", "confidence": 1.0, "language": ""} - - # If we have seen non-ASCII, return the best that met MINIMUM_THRESHOLD - elif self._input_state == InputState.HIGH_BYTE: - prober_confidence = None - max_prober_confidence = 0.0 - max_prober = None - for prober in self._charset_probers: - if not prober: - continue - prober_confidence = prober.get_confidence() - if prober_confidence > max_prober_confidence: - max_prober_confidence = prober_confidence - max_prober = prober - if max_prober and (max_prober_confidence > self.MINIMUM_THRESHOLD): - charset_name = max_prober.charset_name - lower_charset_name = max_prober.charset_name.lower() - confidence = max_prober.get_confidence() - # Use Windows encoding name instead of ISO-8859 if we saw any - # extra Windows-specific bytes - if lower_charset_name.startswith("iso-8859"): - if self._has_win_bytes: - charset_name = self.ISO_WIN_MAP.get( - lower_charset_name, charset_name - ) - self.result = { - "encoding": charset_name, - "confidence": confidence, - "language": max_prober.language, - } - - # Log all prober confidences if none met MINIMUM_THRESHOLD - if self.logger.getEffectiveLevel() <= logging.DEBUG: - if self.result["encoding"] is None: - self.logger.debug("no probers hit minimum threshold") - for group_prober in self._charset_probers: - if not group_prober: - continue - if isinstance(group_prober, CharSetGroupProber): - for prober in group_prober.probers: - self.logger.debug( - "%s %s confidence = %s", - prober.charset_name, - prober.language, - prober.get_confidence(), - ) - else: - self.logger.debug( - "%s %s confidence = %s", - group_prober.charset_name, - group_prober.language, - group_prober.get_confidence(), - ) - return self.result diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/colorama/__init__.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/colorama/__init__.py deleted file mode 100644 index 9138a8cc8f044a031d4acada4c1cf6ef33e81397..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/colorama/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. -from .initialise import init, deinit, reinit, colorama_text -from .ansi import Fore, Back, Style, Cursor -from .ansitowin32 import AnsiToWin32 - -__version__ = '0.4.5' diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/pygments/console.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/pygments/console.py deleted file mode 100644 index 2ada68e03b3c018e3ddbbf3356a48a1d580aa251..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/pygments/console.py +++ /dev/null @@ -1,70 +0,0 @@ -""" - pygments.console - ~~~~~~~~~~~~~~~~ - - Format colored console output. - - :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -esc = "\x1b[" - -codes = {} -codes[""] = "" -codes["reset"] = esc + "39;49;00m" - -codes["bold"] = esc + "01m" -codes["faint"] = esc + "02m" -codes["standout"] = esc + "03m" -codes["underline"] = esc + "04m" -codes["blink"] = esc + "05m" -codes["overline"] = esc + "06m" - -dark_colors = ["black", "red", "green", "yellow", "blue", - "magenta", "cyan", "gray"] -light_colors = ["brightblack", "brightred", "brightgreen", "brightyellow", "brightblue", - "brightmagenta", "brightcyan", "white"] - -x = 30 -for d, l in zip(dark_colors, light_colors): - codes[d] = esc + "%im" % x - codes[l] = esc + "%im" % (60 + x) - x += 1 - -del d, l, x - -codes["white"] = codes["bold"] - - -def reset_color(): - return codes["reset"] - - -def colorize(color_key, text): - return codes[color_key] + text + codes["reset"] - - -def ansiformat(attr, text): - """ - Format ``text`` with a color and/or some attributes:: - - color normal color - *color* bold color - _color_ underlined color - +color+ blinking color - """ - result = [] - if attr[:1] == attr[-1:] == '+': - result.append(codes['blink']) - attr = attr[1:-1] - if attr[:1] == attr[-1:] == '*': - result.append(codes['bold']) - attr = attr[1:-1] - if attr[:1] == attr[-1:] == '_': - result.append(codes['underline']) - attr = attr[1:-1] - result.append(codes[attr]) - result.append(text) - result.append(codes['reset']) - return ''.join(result) diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/syntax.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/syntax.py deleted file mode 100644 index dace718c1b5fab7b90ed5d77283a9f907b78b4e9..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/rich/syntax.py +++ /dev/null @@ -1,934 +0,0 @@ -import os.path -import platform -import re -import sys -import textwrap -from abc import ABC, abstractmethod -from typing import ( - Any, - Dict, - Iterable, - List, - NamedTuple, - Optional, - Sequence, - Set, - Tuple, - Type, - Union, -) - -from pip._vendor.pygments.lexer import Lexer -from pip._vendor.pygments.lexers import get_lexer_by_name, guess_lexer_for_filename -from pip._vendor.pygments.style import Style as PygmentsStyle -from pip._vendor.pygments.styles import get_style_by_name -from pip._vendor.pygments.token import ( - Comment, - Error, - Generic, - Keyword, - Name, - Number, - Operator, - String, - Token, - Whitespace, -) -from pip._vendor.pygments.util import ClassNotFound - -from pip._vendor.rich.containers import Lines -from pip._vendor.rich.padding import Padding, PaddingDimensions - -from ._loop import loop_first -from .color import Color, blend_rgb -from .console import Console, ConsoleOptions, JustifyMethod, RenderResult -from .jupyter import JupyterMixin -from .measure import Measurement -from .segment import Segment, Segments -from .style import Style, StyleType -from .text import Text - -TokenType = Tuple[str, ...] - -WINDOWS = platform.system() == "Windows" -DEFAULT_THEME = "monokai" - -# The following styles are based on https://github.com/pygments/pygments/blob/master/pygments/formatters/terminal.py -# A few modifications were made - -ANSI_LIGHT: Dict[TokenType, Style] = { - Token: Style(), - Whitespace: Style(color="white"), - Comment: Style(dim=True), - Comment.Preproc: Style(color="cyan"), - Keyword: Style(color="blue"), - Keyword.Type: Style(color="cyan"), - Operator.Word: Style(color="magenta"), - Name.Builtin: Style(color="cyan"), - Name.Function: Style(color="green"), - Name.Namespace: Style(color="cyan", underline=True), - Name.Class: Style(color="green", underline=True), - Name.Exception: Style(color="cyan"), - Name.Decorator: Style(color="magenta", bold=True), - Name.Variable: Style(color="red"), - Name.Constant: Style(color="red"), - Name.Attribute: Style(color="cyan"), - Name.Tag: Style(color="bright_blue"), - String: Style(color="yellow"), - Number: Style(color="blue"), - Generic.Deleted: Style(color="bright_red"), - Generic.Inserted: Style(color="green"), - Generic.Heading: Style(bold=True), - Generic.Subheading: Style(color="magenta", bold=True), - Generic.Prompt: Style(bold=True), - Generic.Error: Style(color="bright_red"), - Error: Style(color="red", underline=True), -} - -ANSI_DARK: Dict[TokenType, Style] = { - Token: Style(), - Whitespace: Style(color="bright_black"), - Comment: Style(dim=True), - Comment.Preproc: Style(color="bright_cyan"), - Keyword: Style(color="bright_blue"), - Keyword.Type: Style(color="bright_cyan"), - Operator.Word: Style(color="bright_magenta"), - Name.Builtin: Style(color="bright_cyan"), - Name.Function: Style(color="bright_green"), - Name.Namespace: Style(color="bright_cyan", underline=True), - Name.Class: Style(color="bright_green", underline=True), - Name.Exception: Style(color="bright_cyan"), - Name.Decorator: Style(color="bright_magenta", bold=True), - Name.Variable: Style(color="bright_red"), - Name.Constant: Style(color="bright_red"), - Name.Attribute: Style(color="bright_cyan"), - Name.Tag: Style(color="bright_blue"), - String: Style(color="yellow"), - Number: Style(color="bright_blue"), - Generic.Deleted: Style(color="bright_red"), - Generic.Inserted: Style(color="bright_green"), - Generic.Heading: Style(bold=True), - Generic.Subheading: Style(color="bright_magenta", bold=True), - Generic.Prompt: Style(bold=True), - Generic.Error: Style(color="bright_red"), - Error: Style(color="red", underline=True), -} - -RICH_SYNTAX_THEMES = {"ansi_light": ANSI_LIGHT, "ansi_dark": ANSI_DARK} -NUMBERS_COLUMN_DEFAULT_PADDING = 2 - - -class SyntaxTheme(ABC): - """Base class for a syntax theme.""" - - @abstractmethod - def get_style_for_token(self, token_type: TokenType) -> Style: - """Get a style for a given Pygments token.""" - raise NotImplementedError # pragma: no cover - - @abstractmethod - def get_background_style(self) -> Style: - """Get the background color.""" - raise NotImplementedError # pragma: no cover - - -class PygmentsSyntaxTheme(SyntaxTheme): - """Syntax theme that delegates to Pygments theme.""" - - def __init__(self, theme: Union[str, Type[PygmentsStyle]]) -> None: - self._style_cache: Dict[TokenType, Style] = {} - if isinstance(theme, str): - try: - self._pygments_style_class = get_style_by_name(theme) - except ClassNotFound: - self._pygments_style_class = get_style_by_name("default") - else: - self._pygments_style_class = theme - - self._background_color = self._pygments_style_class.background_color - self._background_style = Style(bgcolor=self._background_color) - - def get_style_for_token(self, token_type: TokenType) -> Style: - """Get a style from a Pygments class.""" - try: - return self._style_cache[token_type] - except KeyError: - try: - pygments_style = self._pygments_style_class.style_for_token(token_type) - except KeyError: - style = Style.null() - else: - color = pygments_style["color"] - bgcolor = pygments_style["bgcolor"] - style = Style( - color="#" + color if color else "#000000", - bgcolor="#" + bgcolor if bgcolor else self._background_color, - bold=pygments_style["bold"], - italic=pygments_style["italic"], - underline=pygments_style["underline"], - ) - self._style_cache[token_type] = style - return style - - def get_background_style(self) -> Style: - return self._background_style - - -class ANSISyntaxTheme(SyntaxTheme): - """Syntax theme to use standard colors.""" - - def __init__(self, style_map: Dict[TokenType, Style]) -> None: - self.style_map = style_map - self._missing_style = Style.null() - self._background_style = Style.null() - self._style_cache: Dict[TokenType, Style] = {} - - def get_style_for_token(self, token_type: TokenType) -> Style: - """Look up style in the style map.""" - try: - return self._style_cache[token_type] - except KeyError: - # Styles form a hierarchy - # We need to go from most to least specific - # e.g. ("foo", "bar", "baz") to ("foo", "bar") to ("foo",) - get_style = self.style_map.get - token = tuple(token_type) - style = self._missing_style - while token: - _style = get_style(token) - if _style is not None: - style = _style - break - token = token[:-1] - self._style_cache[token_type] = style - return style - - def get_background_style(self) -> Style: - return self._background_style - - -SyntaxPosition = Tuple[int, int] - - -class _SyntaxHighlightRange(NamedTuple): - """ - A range to highlight in a Syntax object. - `start` and `end` are 2-integers tuples, where the first integer is the line number - (starting from 1) and the second integer is the column index (starting from 0). - """ - - style: StyleType - start: SyntaxPosition - end: SyntaxPosition - - -class Syntax(JupyterMixin): - """Construct a Syntax object to render syntax highlighted code. - - Args: - code (str): Code to highlight. - lexer (Lexer | str): Lexer to use (see https://pygments.org/docs/lexers/) - theme (str, optional): Color theme, aka Pygments style (see https://pygments.org/docs/styles/#getting-a-list-of-available-styles). Defaults to "monokai". - dedent (bool, optional): Enable stripping of initial whitespace. Defaults to False. - line_numbers (bool, optional): Enable rendering of line numbers. Defaults to False. - start_line (int, optional): Starting number for line numbers. Defaults to 1. - line_range (Tuple[int | None, int | None], optional): If given should be a tuple of the start and end line to render. - A value of None in the tuple indicates the range is open in that direction. - highlight_lines (Set[int]): A set of line numbers to highlight. - code_width: Width of code to render (not including line numbers), or ``None`` to use all available width. - tab_size (int, optional): Size of tabs. Defaults to 4. - word_wrap (bool, optional): Enable word wrapping. - background_color (str, optional): Optional background color, or None to use theme color. Defaults to None. - indent_guides (bool, optional): Show indent guides. Defaults to False. - padding (PaddingDimensions): Padding to apply around the syntax. Defaults to 0 (no padding). - """ - - _pygments_style_class: Type[PygmentsStyle] - _theme: SyntaxTheme - - @classmethod - def get_theme(cls, name: Union[str, SyntaxTheme]) -> SyntaxTheme: - """Get a syntax theme instance.""" - if isinstance(name, SyntaxTheme): - return name - theme: SyntaxTheme - if name in RICH_SYNTAX_THEMES: - theme = ANSISyntaxTheme(RICH_SYNTAX_THEMES[name]) - else: - theme = PygmentsSyntaxTheme(name) - return theme - - def __init__( - self, - code: str, - lexer: Union[Lexer, str], - *, - theme: Union[str, SyntaxTheme] = DEFAULT_THEME, - dedent: bool = False, - line_numbers: bool = False, - start_line: int = 1, - line_range: Optional[Tuple[Optional[int], Optional[int]]] = None, - highlight_lines: Optional[Set[int]] = None, - code_width: Optional[int] = None, - tab_size: int = 4, - word_wrap: bool = False, - background_color: Optional[str] = None, - indent_guides: bool = False, - padding: PaddingDimensions = 0, - ) -> None: - self.code = code - self._lexer = lexer - self.dedent = dedent - self.line_numbers = line_numbers - self.start_line = start_line - self.line_range = line_range - self.highlight_lines = highlight_lines or set() - self.code_width = code_width - self.tab_size = tab_size - self.word_wrap = word_wrap - self.background_color = background_color - self.background_style = ( - Style(bgcolor=background_color) if background_color else Style() - ) - self.indent_guides = indent_guides - self.padding = padding - - self._theme = self.get_theme(theme) - self._stylized_ranges: List[_SyntaxHighlightRange] = [] - - @classmethod - def from_path( - cls, - path: str, - encoding: str = "utf-8", - lexer: Optional[Union[Lexer, str]] = None, - theme: Union[str, SyntaxTheme] = DEFAULT_THEME, - dedent: bool = False, - line_numbers: bool = False, - line_range: Optional[Tuple[int, int]] = None, - start_line: int = 1, - highlight_lines: Optional[Set[int]] = None, - code_width: Optional[int] = None, - tab_size: int = 4, - word_wrap: bool = False, - background_color: Optional[str] = None, - indent_guides: bool = False, - padding: PaddingDimensions = 0, - ) -> "Syntax": - """Construct a Syntax object from a file. - - Args: - path (str): Path to file to highlight. - encoding (str): Encoding of file. - lexer (str | Lexer, optional): Lexer to use. If None, lexer will be auto-detected from path/file content. - theme (str, optional): Color theme, aka Pygments style (see https://pygments.org/docs/styles/#getting-a-list-of-available-styles). Defaults to "emacs". - dedent (bool, optional): Enable stripping of initial whitespace. Defaults to True. - line_numbers (bool, optional): Enable rendering of line numbers. Defaults to False. - start_line (int, optional): Starting number for line numbers. Defaults to 1. - line_range (Tuple[int, int], optional): If given should be a tuple of the start and end line to render. - highlight_lines (Set[int]): A set of line numbers to highlight. - code_width: Width of code to render (not including line numbers), or ``None`` to use all available width. - tab_size (int, optional): Size of tabs. Defaults to 4. - word_wrap (bool, optional): Enable word wrapping of code. - background_color (str, optional): Optional background color, or None to use theme color. Defaults to None. - indent_guides (bool, optional): Show indent guides. Defaults to False. - padding (PaddingDimensions): Padding to apply around the syntax. Defaults to 0 (no padding). - - Returns: - [Syntax]: A Syntax object that may be printed to the console - """ - with open(path, "rt", encoding=encoding) as code_file: - code = code_file.read() - - if not lexer: - lexer = cls.guess_lexer(path, code=code) - - return cls( - code, - lexer, - theme=theme, - dedent=dedent, - line_numbers=line_numbers, - line_range=line_range, - start_line=start_line, - highlight_lines=highlight_lines, - code_width=code_width, - tab_size=tab_size, - word_wrap=word_wrap, - background_color=background_color, - indent_guides=indent_guides, - padding=padding, - ) - - @classmethod - def guess_lexer(cls, path: str, code: Optional[str] = None) -> str: - """Guess the alias of the Pygments lexer to use based on a path and an optional string of code. - If code is supplied, it will use a combination of the code and the filename to determine the - best lexer to use. For example, if the file is ``index.html`` and the file contains Django - templating syntax, then "html+django" will be returned. If the file is ``index.html``, and no - templating language is used, the "html" lexer will be used. If no string of code - is supplied, the lexer will be chosen based on the file extension.. - - Args: - path (AnyStr): The path to the file containing the code you wish to know the lexer for. - code (str, optional): Optional string of code that will be used as a fallback if no lexer - is found for the supplied path. - - Returns: - str: The name of the Pygments lexer that best matches the supplied path/code. - """ - lexer: Optional[Lexer] = None - lexer_name = "default" - if code: - try: - lexer = guess_lexer_for_filename(path, code) - except ClassNotFound: - pass - - if not lexer: - try: - _, ext = os.path.splitext(path) - if ext: - extension = ext.lstrip(".").lower() - lexer = get_lexer_by_name(extension) - except ClassNotFound: - pass - - if lexer: - if lexer.aliases: - lexer_name = lexer.aliases[0] - else: - lexer_name = lexer.name - - return lexer_name - - def _get_base_style(self) -> Style: - """Get the base style.""" - default_style = self._theme.get_background_style() + self.background_style - return default_style - - def _get_token_color(self, token_type: TokenType) -> Optional[Color]: - """Get a color (if any) for the given token. - - Args: - token_type (TokenType): A token type tuple from Pygments. - - Returns: - Optional[Color]: Color from theme, or None for no color. - """ - style = self._theme.get_style_for_token(token_type) - return style.color - - @property - def lexer(self) -> Optional[Lexer]: - """The lexer for this syntax, or None if no lexer was found. - - Tries to find the lexer by name if a string was passed to the constructor. - """ - - if isinstance(self._lexer, Lexer): - return self._lexer - try: - return get_lexer_by_name( - self._lexer, - stripnl=False, - ensurenl=True, - tabsize=self.tab_size, - ) - except ClassNotFound: - return None - - def highlight( - self, - code: str, - line_range: Optional[Tuple[Optional[int], Optional[int]]] = None, - ) -> Text: - """Highlight code and return a Text instance. - - Args: - code (str): Code to highlight. - line_range(Tuple[int, int], optional): Optional line range to highlight. - - Returns: - Text: A text instance containing highlighted syntax. - """ - - base_style = self._get_base_style() - justify: JustifyMethod = ( - "default" if base_style.transparent_background else "left" - ) - - text = Text( - justify=justify, - style=base_style, - tab_size=self.tab_size, - no_wrap=not self.word_wrap, - ) - _get_theme_style = self._theme.get_style_for_token - - lexer = self.lexer - - if lexer is None: - text.append(code) - else: - if line_range: - # More complicated path to only stylize a portion of the code - # This speeds up further operations as there are less spans to process - line_start, line_end = line_range - - def line_tokenize() -> Iterable[Tuple[Any, str]]: - """Split tokens to one per line.""" - assert lexer # required to make MyPy happy - we know lexer is not None at this point - - for token_type, token in lexer.get_tokens(code): - while token: - line_token, new_line, token = token.partition("\n") - yield token_type, line_token + new_line - - def tokens_to_spans() -> Iterable[Tuple[str, Optional[Style]]]: - """Convert tokens to spans.""" - tokens = iter(line_tokenize()) - line_no = 0 - _line_start = line_start - 1 if line_start else 0 - - # Skip over tokens until line start - while line_no < _line_start: - _token_type, token = next(tokens) - yield (token, None) - if token.endswith("\n"): - line_no += 1 - # Generate spans until line end - for token_type, token in tokens: - yield (token, _get_theme_style(token_type)) - if token.endswith("\n"): - line_no += 1 - if line_end and line_no >= line_end: - break - - text.append_tokens(tokens_to_spans()) - - else: - text.append_tokens( - (token, _get_theme_style(token_type)) - for token_type, token in lexer.get_tokens(code) - ) - if self.background_color is not None: - text.stylize(f"on {self.background_color}") - - if self._stylized_ranges: - self._apply_stylized_ranges(text) - - return text - - def stylize_range( - self, style: StyleType, start: SyntaxPosition, end: SyntaxPosition - ) -> None: - """ - Adds a custom style on a part of the code, that will be applied to the syntax display when it's rendered. - Line numbers are 1-based, while column indexes are 0-based. - - Args: - style (StyleType): The style to apply. - start (Tuple[int, int]): The start of the range, in the form `[line number, column index]`. - end (Tuple[int, int]): The end of the range, in the form `[line number, column index]`. - """ - self._stylized_ranges.append(_SyntaxHighlightRange(style, start, end)) - - def _get_line_numbers_color(self, blend: float = 0.3) -> Color: - background_style = self._theme.get_background_style() + self.background_style - background_color = background_style.bgcolor - if background_color is None or background_color.is_system_defined: - return Color.default() - foreground_color = self._get_token_color(Token.Text) - if foreground_color is None or foreground_color.is_system_defined: - return foreground_color or Color.default() - new_color = blend_rgb( - background_color.get_truecolor(), - foreground_color.get_truecolor(), - cross_fade=blend, - ) - return Color.from_triplet(new_color) - - @property - def _numbers_column_width(self) -> int: - """Get the number of characters used to render the numbers column.""" - column_width = 0 - if self.line_numbers: - column_width = ( - len(str(self.start_line + self.code.count("\n"))) - + NUMBERS_COLUMN_DEFAULT_PADDING - ) - return column_width - - def _get_number_styles(self, console: Console) -> Tuple[Style, Style, Style]: - """Get background, number, and highlight styles for line numbers.""" - background_style = self._get_base_style() - if background_style.transparent_background: - return Style.null(), Style(dim=True), Style.null() - if console.color_system in ("256", "truecolor"): - number_style = Style.chain( - background_style, - self._theme.get_style_for_token(Token.Text), - Style(color=self._get_line_numbers_color()), - self.background_style, - ) - highlight_number_style = Style.chain( - background_style, - self._theme.get_style_for_token(Token.Text), - Style(bold=True, color=self._get_line_numbers_color(0.9)), - self.background_style, - ) - else: - number_style = background_style + Style(dim=True) - highlight_number_style = background_style + Style(dim=False) - return background_style, number_style, highlight_number_style - - def __rich_measure__( - self, console: "Console", options: "ConsoleOptions" - ) -> "Measurement": - _, right, _, left = Padding.unpack(self.padding) - if self.code_width is not None: - width = self.code_width + self._numbers_column_width + right + left - return Measurement(self._numbers_column_width, width) - return Measurement(self._numbers_column_width, options.max_width) - - def __rich_console__( - self, console: Console, options: ConsoleOptions - ) -> RenderResult: - segments = Segments(self._get_syntax(console, options)) - if self.padding: - yield Padding( - segments, style=self._theme.get_background_style(), pad=self.padding - ) - else: - yield segments - - def _get_syntax( - self, - console: Console, - options: ConsoleOptions, - ) -> Iterable[Segment]: - """ - Get the Segments for the Syntax object, excluding any vertical/horizontal padding - """ - transparent_background = self._get_base_style().transparent_background - code_width = ( - ( - (options.max_width - self._numbers_column_width - 1) - if self.line_numbers - else options.max_width - ) - if self.code_width is None - else self.code_width - ) - - ends_on_nl, processed_code = self._process_code(self.code) - text = self.highlight(processed_code, self.line_range) - - if not self.line_numbers and not self.word_wrap and not self.line_range: - if not ends_on_nl: - text.remove_suffix("\n") - # Simple case of just rendering text - style = ( - self._get_base_style() - + self._theme.get_style_for_token(Comment) - + Style(dim=True) - + self.background_style - ) - if self.indent_guides and not options.ascii_only: - text = text.with_indent_guides(self.tab_size, style=style) - text.overflow = "crop" - if style.transparent_background: - yield from console.render( - text, options=options.update(width=code_width) - ) - else: - syntax_lines = console.render_lines( - text, - options.update(width=code_width, height=None, justify="left"), - style=self.background_style, - pad=True, - new_lines=True, - ) - for syntax_line in syntax_lines: - yield from syntax_line - return - - start_line, end_line = self.line_range or (None, None) - line_offset = 0 - if start_line: - line_offset = max(0, start_line - 1) - lines: Union[List[Text], Lines] = text.split("\n", allow_blank=ends_on_nl) - if self.line_range: - lines = lines[line_offset:end_line] - - if self.indent_guides and not options.ascii_only: - style = ( - self._get_base_style() - + self._theme.get_style_for_token(Comment) - + Style(dim=True) - + self.background_style - ) - lines = ( - Text("\n") - .join(lines) - .with_indent_guides(self.tab_size, style=style) - .split("\n", allow_blank=True) - ) - - numbers_column_width = self._numbers_column_width - render_options = options.update(width=code_width) - - highlight_line = self.highlight_lines.__contains__ - _Segment = Segment - new_line = _Segment("\n") - - line_pointer = "> " if options.legacy_windows else "❱ " - - ( - background_style, - number_style, - highlight_number_style, - ) = self._get_number_styles(console) - - for line_no, line in enumerate(lines, self.start_line + line_offset): - if self.word_wrap: - wrapped_lines = console.render_lines( - line, - render_options.update(height=None, justify="left"), - style=background_style, - pad=not transparent_background, - ) - else: - segments = list(line.render(console, end="")) - if options.no_wrap: - wrapped_lines = [segments] - else: - wrapped_lines = [ - _Segment.adjust_line_length( - segments, - render_options.max_width, - style=background_style, - pad=not transparent_background, - ) - ] - - if self.line_numbers: - wrapped_line_left_pad = _Segment( - " " * numbers_column_width + " ", background_style - ) - for first, wrapped_line in loop_first(wrapped_lines): - if first: - line_column = str(line_no).rjust(numbers_column_width - 2) + " " - if highlight_line(line_no): - yield _Segment(line_pointer, Style(color="red")) - yield _Segment(line_column, highlight_number_style) - else: - yield _Segment(" ", highlight_number_style) - yield _Segment(line_column, number_style) - else: - yield wrapped_line_left_pad - yield from wrapped_line - yield new_line - else: - for wrapped_line in wrapped_lines: - yield from wrapped_line - yield new_line - - def _apply_stylized_ranges(self, text: Text) -> None: - """ - Apply stylized ranges to a text instance, - using the given code to determine the right portion to apply the style to. - - Args: - text (Text): Text instance to apply the style to. - """ - code = text.plain - newlines_offsets = [ - # Let's add outer boundaries at each side of the list: - 0, - # N.B. using "\n" here is much faster than using metacharacters such as "^" or "\Z": - *[ - match.start() + 1 - for match in re.finditer("\n", code, flags=re.MULTILINE) - ], - len(code) + 1, - ] - - for stylized_range in self._stylized_ranges: - start = _get_code_index_for_syntax_position( - newlines_offsets, stylized_range.start - ) - end = _get_code_index_for_syntax_position( - newlines_offsets, stylized_range.end - ) - if start is not None and end is not None: - text.stylize(stylized_range.style, start, end) - - def _process_code(self, code: str) -> Tuple[bool, str]: - """ - Applies various processing to a raw code string - (normalises it so it always ends with a line return, dedents it if necessary, etc.) - - Args: - code (str): The raw code string to process - - Returns: - Tuple[bool, str]: the boolean indicates whether the raw code ends with a line return, - while the string is the processed code. - """ - ends_on_nl = code.endswith("\n") - processed_code = code if ends_on_nl else code + "\n" - processed_code = ( - textwrap.dedent(processed_code) if self.dedent else processed_code - ) - processed_code = processed_code.expandtabs(self.tab_size) - return ends_on_nl, processed_code - - -def _get_code_index_for_syntax_position( - newlines_offsets: Sequence[int], position: SyntaxPosition -) -> Optional[int]: - """ - Returns the index of the code string for the given positions. - - Args: - newlines_offsets (Sequence[int]): The offset of each newline character found in the code snippet. - position (SyntaxPosition): The position to search for. - - Returns: - Optional[int]: The index of the code string for this position, or `None` - if the given position's line number is out of range (if it's the column that is out of range - we silently clamp its value so that it reaches the end of the line) - """ - lines_count = len(newlines_offsets) - - line_number, column_index = position - if line_number > lines_count or len(newlines_offsets) < (line_number + 1): - return None # `line_number` is out of range - line_index = line_number - 1 - line_length = newlines_offsets[line_index + 1] - newlines_offsets[line_index] - 1 - # If `column_index` is out of range: let's silently clamp it: - column_index = min(line_length, column_index) - return newlines_offsets[line_index] + column_index - - -if __name__ == "__main__": # pragma: no cover - - import argparse - import sys - - parser = argparse.ArgumentParser( - description="Render syntax to the console with Rich" - ) - parser.add_argument( - "path", - metavar="PATH", - help="path to file, or - for stdin", - ) - parser.add_argument( - "-c", - "--force-color", - dest="force_color", - action="store_true", - default=None, - help="force color for non-terminals", - ) - parser.add_argument( - "-i", - "--indent-guides", - dest="indent_guides", - action="store_true", - default=False, - help="display indent guides", - ) - parser.add_argument( - "-l", - "--line-numbers", - dest="line_numbers", - action="store_true", - help="render line numbers", - ) - parser.add_argument( - "-w", - "--width", - type=int, - dest="width", - default=None, - help="width of output (default will auto-detect)", - ) - parser.add_argument( - "-r", - "--wrap", - dest="word_wrap", - action="store_true", - default=False, - help="word wrap long lines", - ) - parser.add_argument( - "-s", - "--soft-wrap", - action="store_true", - dest="soft_wrap", - default=False, - help="enable soft wrapping mode", - ) - parser.add_argument( - "-t", "--theme", dest="theme", default="monokai", help="pygments theme" - ) - parser.add_argument( - "-b", - "--background-color", - dest="background_color", - default=None, - help="Override background color", - ) - parser.add_argument( - "-x", - "--lexer", - default=None, - dest="lexer_name", - help="Lexer name", - ) - parser.add_argument( - "-p", "--padding", type=int, default=0, dest="padding", help="Padding" - ) - parser.add_argument( - "--highlight-line", - type=int, - default=None, - dest="highlight_line", - help="The line number (not index!) to highlight", - ) - args = parser.parse_args() - - from pip._vendor.rich.console import Console - - console = Console(force_terminal=args.force_color, width=args.width) - - if args.path == "-": - code = sys.stdin.read() - syntax = Syntax( - code=code, - lexer=args.lexer_name, - line_numbers=args.line_numbers, - word_wrap=args.word_wrap, - theme=args.theme, - background_color=args.background_color, - indent_guides=args.indent_guides, - padding=args.padding, - highlight_lines={args.highlight_line}, - ) - else: - syntax = Syntax.from_path( - args.path, - lexer=args.lexer_name, - line_numbers=args.line_numbers, - word_wrap=args.word_wrap, - theme=args.theme, - background_color=args.background_color, - indent_guides=args.indent_guides, - padding=args.padding, - highlight_lines={args.highlight_line}, - ) - console.print(syntax, soft_wrap=args.soft_wrap) diff --git a/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/utils/registry.py b/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/utils/registry.py deleted file mode 100644 index 1e8d91e7d29283b76f4a42c2137f5026d3528fd9..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/utils/registry.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. - - -def _register_generic(module_dict, module_name, module): - assert module_name not in module_dict - module_dict[module_name] = module - - -class Registry(dict): - ''' - A helper class for managing registering modules, it extends a dictionary - and provides a register functions. - - Eg. creeting a registry: - some_registry = Registry({"default": default_module}) - - There're two ways of registering new modules: - 1): normal way is just calling register function: - def foo(): - ... - some_registry.register("foo_module", foo) - 2): used as decorator when declaring the module: - @some_registry.register("foo_module") - @some_registry.register("foo_modeul_nickname") - def foo(): - ... - - Access of module is just like using a dictionary, eg: - f = some_registry["foo_modeul"] - ''' - def __init__(self, *args, **kwargs): - super(Registry, self).__init__(*args, **kwargs) - - def register(self, module_name, module=None): - # used as function call - if module is not None: - _register_generic(self, module_name, module) - return - - # used as decorator - def register_fn(fn): - _register_generic(self, module_name, fn) - return fn - - return register_fn \ No newline at end of file diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/reppoints/reppoints_minmax_r50_fpn_gn-neck+head_1x_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/reppoints/reppoints_minmax_r50_fpn_gn-neck+head_1x_coco.py deleted file mode 100644 index 0f56a46b3c002cdec630bb06df66a4fc9e7804a8..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/reppoints/reppoints_minmax_r50_fpn_gn-neck+head_1x_coco.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' -model = dict(bbox_head=dict(transform_method='minmax')) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/docs/tutorials/customize_dataset.md b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/docs/tutorials/customize_dataset.md deleted file mode 100644 index d1e956d4abae00a32359ee7136f3998caffc796a..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/docs/tutorials/customize_dataset.md +++ /dev/null @@ -1,487 +0,0 @@ -# Tutorial 2: Customize Datasets - -## Support new data format - -To support a new data format, you can either convert them to existing formats (COCO format or PASCAL format) or directly convert them to the middle format. You could also choose to convert them offline (before training by a script) or online (implement a new dataset and do the conversion at training). In MMDetection, we recommend to convert the data into COCO formats and do the conversion offline, thus you only need to modify the config's data annotation paths and classes after the conversion of your data. - -### Reorganize new data formats to existing format - -The simplest way is to convert your dataset to existing dataset formats (COCO or PASCAL VOC). - -The annotation json files in COCO format has the following necessary keys: - -```python -'images': [ - { - 'file_name': 'COCO_val2014_000000001268.jpg', - 'height': 427, - 'width': 640, - 'id': 1268 - }, - ... -], - -'annotations': [ - { - 'segmentation': [[192.81, - 247.09, - ... - 219.03, - 249.06]], # if you have mask labels - 'area': 1035.749, - 'iscrowd': 0, - 'image_id': 1268, - 'bbox': [192.81, 224.8, 74.73, 33.43], - 'category_id': 16, - 'id': 42986 - }, - ... -], - -'categories': [ - {'id': 0, 'name': 'car'}, - ] -``` - -There are three necessary keys in the json file: - -- `images`: contains a list of images with their informations like `file_name`, `height`, `width`, and `id`. -- `annotations`: contains the list of instance annotations. -- `categories`: contains the list of categories names and their ID. - -After the data pre-processing, there are two steps for users to train the customized new dataset with existing format (e.g. COCO format): - -1. Modify the config file for using the customized dataset. -2. Check the annotations of the customized dataset. - -Here we give an example to show the above two steps, which uses a customized dataset of 5 classes with COCO format to train an existing Cascade MaskRCNN R50 FPN detector. - -#### 1. Modify the config file for using the customized dataset - -There are two aspects involved in the modification of config file: - -1. The `data` field. Specifically, you need to explicitly add the `classes` fields in `data.train`, `data.val` and `data.test`. -2. The `num_classes` field in the `model` part. Explicitly over-write all the `num_classes` from default value (e.g. 80 in COCO) to your classes number. - -In `configs/my_custom_config.py`: - -```python - -# the new config inherits the base configs to highlight the necessary modification -_base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py' - -# 1. dataset settings -dataset_type = 'CocoDataset' -classes = ('a', 'b', 'c', 'd', 'e') -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type=dataset_type, - # explicitly add your class names to the field `classes` - classes=classes, - ann_file='path/to/your/train/annotation_data', - img_prefix='path/to/your/train/image_data'), - val=dict( - type=dataset_type, - # explicitly add your class names to the field `classes` - classes=classes, - ann_file='path/to/your/val/annotation_data', - img_prefix='path/to/your/val/image_data'), - test=dict( - type=dataset_type, - # explicitly add your class names to the field `classes` - classes=classes, - ann_file='path/to/your/test/annotation_data', - img_prefix='path/to/your/test/image_data')) - -# 2. model settings - -# explicitly over-write all the `num_classes` field from default 80 to 5. -model = dict( - roi_head=dict( - bbox_head=[ - dict( - type='Shared2FCBBoxHead', - # explicitly over-write all the `num_classes` field from default 80 to 5. - num_classes=5), - dict( - type='Shared2FCBBoxHead', - # explicitly over-write all the `num_classes` field from default 80 to 5. - num_classes=5), - dict( - type='Shared2FCBBoxHead', - # explicitly over-write all the `num_classes` field from default 80 to 5. - num_classes=5)], - # explicitly over-write all the `num_classes` field from default 80 to 5. - mask_head=dict(num_classes=5))) -``` - -#### 2. Check the annotations of the customized dataset - -Assuming your customized dataset is COCO format, make sure you have the correct annotations in the customized dataset: - -1. The length for `categories` field in annotations should exactly equal the tuple length of `classes` fields in your config, meaning the number of classes (e.g. 5 in this example). -2. The `classes` fields in your config file should have exactly the same elements and the same order with the `name` in `categories` of annotations. MMDetection automatically maps the uncontinuous `id` in `categories` to the continuous label indices, so the string order of `name` in `categories` field affects the order of label indices. Meanwhile, the string order of `classes` in config affects the label text during visualization of predicted bounding boxes. -3. The `category_id` in `annotations` field should be valid, i.e., all values in `category_id` should belong to `id` in `categories`. - -Here is a valid example of annotations: - -```python - -'annotations': [ - { - 'segmentation': [[192.81, - 247.09, - ... - 219.03, - 249.06]], # if you have mask labels - 'area': 1035.749, - 'iscrowd': 0, - 'image_id': 1268, - 'bbox': [192.81, 224.8, 74.73, 33.43], - 'category_id': 16, - 'id': 42986 - }, - ... -], - -# MMDetection automatically maps the uncontinuous `id` to the continuous label indices. -'categories': [ - {'id': 1, 'name': 'a'}, {'id': 3, 'name': 'b'}, {'id': 4, 'name': 'c'}, {'id': 16, 'name': 'd'}, {'id': 17, 'name': 'e'}, - ] -``` - -We use this way to support CityScapes dataset. The script is in [cityscapes.py](https://github.com/open-mmlab/mmdetection/blob/master/tools/dataset_converters/cityscapes.py) and we also provide the finetuning [configs](https://github.com/open-mmlab/mmdetection/blob/master/configs/cityscapes). - -**Note** - -1. For instance segmentation datasets, **MMDetection only supports evaluating mask AP of dataset in COCO format for now**. -2. It is recommanded to convert the data offline before training, thus you can still use `CocoDataset` and only need to modify the path of annotations and the training classes. - -### Reorganize new data format to middle format - -It is also fine if you do not want to convert the annotation format to COCO or PASCAL format. -Actually, we define a simple annotation format and all existing datasets are -processed to be compatible with it, either online or offline. - -The annotation of a dataset is a list of dict, each dict corresponds to an image. -There are 3 field `filename` (relative path), `width`, `height` for testing, -and an additional field `ann` for training. `ann` is also a dict containing at least 2 fields: -`bboxes` and `labels`, both of which are numpy arrays. Some datasets may provide -annotations like crowd/difficult/ignored bboxes, we use `bboxes_ignore` and `labels_ignore` -to cover them. - -Here is an example. - -```python - -[ - { - 'filename': 'a.jpg', - 'width': 1280, - 'height': 720, - 'ann': { - 'bboxes': (n, 4), - 'labels': (n, ), - 'bboxes_ignore': (k, 4), - 'labels_ignore': (k, ) (optional field) - } - }, - ... -] -``` - -There are two ways to work with custom datasets. - -- online conversion - - You can write a new Dataset class inherited from `CustomDataset`, and overwrite two methods - `load_annotations(self, ann_file)` and `get_ann_info(self, idx)`, - like [CocoDataset](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/datasets/coco.py) and [VOCDataset](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/datasets/voc.py). - -- offline conversion - - You can convert the annotation format to the expected format above and save it to - a pickle or json file, like [pascal_voc.py](https://github.com/open-mmlab/mmdetection/blob/master/tools/dataset_converters/pascal_voc.py). - Then you can simply use `CustomDataset`. - -### An example of customized dataset - -Assume the annotation is in a new format in text files. -The bounding boxes annotations are stored in text file `annotation.txt` as the following - -``` -# -000001.jpg -1280 720 -2 -10 20 40 60 1 -20 40 50 60 2 -# -000002.jpg -1280 720 -3 -50 20 40 60 2 -20 40 30 45 2 -30 40 50 60 3 -``` - -We can create a new dataset in `mmdet/datasets/my_dataset.py` to load the data. - -```python -import mmcv -import numpy as np - -from .builder import DATASETS -from .custom import CustomDataset - - -@DATASETS.register_module() -class MyDataset(CustomDataset): - - CLASSES = ('person', 'bicycle', 'car', 'motorcycle') - - def load_annotations(self, ann_file): - ann_list = mmcv.list_from_file(ann_file) - - data_infos = [] - for i, ann_line in enumerate(ann_list): - if ann_line != '#': - continue - - img_shape = ann_list[i + 2].split(' ') - width = int(img_shape[0]) - height = int(img_shape[1]) - bbox_number = int(ann_list[i + 3]) - - anns = ann_line.split(' ') - bboxes = [] - labels = [] - for anns in ann_list[i + 4:i + 4 + bbox_number]: - bboxes.append([float(ann) for ann in anns[:4]]) - labels.append(int(anns[4])) - - data_infos.append( - dict( - filename=ann_list[i + 1], - width=width, - height=height, - ann=dict( - bboxes=np.array(bboxes).astype(np.float32), - labels=np.array(labels).astype(np.int64)) - )) - - return data_infos - - def get_ann_info(self, idx): - return self.data_infos[idx]['ann'] - -``` - -Then in the config, to use `MyDataset` you can modify the config as the following - -```python -dataset_A_train = dict( - type='MyDataset', - ann_file = 'image_list.txt', - pipeline=train_pipeline -) -``` - -## Customize datasets by dataset wrappers - -MMDetection also supports many dataset wrappers to mix the dataset or modify the dataset distribution for training. -Currently it supports to three dataset wrappers as below: - -- `RepeatDataset`: simply repeat the whole dataset. -- `ClassBalancedDataset`: repeat dataset in a class balanced manner. -- `ConcatDataset`: concat datasets. - -### Repeat dataset - -We use `RepeatDataset` as wrapper to repeat the dataset. For example, suppose the original dataset is `Dataset_A`, to repeat it, the config looks like the following - -```python -dataset_A_train = dict( - type='RepeatDataset', - times=N, - dataset=dict( # This is the original config of Dataset_A - type='Dataset_A', - ... - pipeline=train_pipeline - ) - ) -``` - -### Class balanced dataset - -We use `ClassBalancedDataset` as wrapper to repeat the dataset based on category -frequency. The dataset to repeat needs to instantiate function `self.get_cat_ids(idx)` -to support `ClassBalancedDataset`. -For example, to repeat `Dataset_A` with `oversample_thr=1e-3`, the config looks like the following - -```python -dataset_A_train = dict( - type='ClassBalancedDataset', - oversample_thr=1e-3, - dataset=dict( # This is the original config of Dataset_A - type='Dataset_A', - ... - pipeline=train_pipeline - ) - ) -``` - -You may refer to [source code](../../mmdet/datasets/dataset_wrappers.py) for details. - -### Concatenate dataset - -There are three ways to concatenate the dataset. - -1. If the datasets you want to concatenate are in the same type with different annotation files, you can concatenate the dataset configs like the following. - - ```python - dataset_A_train = dict( - type='Dataset_A', - ann_file = ['anno_file_1', 'anno_file_2'], - pipeline=train_pipeline - ) - ``` - - If the concatenated dataset is used for test or evaluation, this manner supports to evaluate each dataset separately. To test the concatenated datasets as a whole, you can set `separate_eval=False` as below. - - ```python - dataset_A_train = dict( - type='Dataset_A', - ann_file = ['anno_file_1', 'anno_file_2'], - separate_eval=False, - pipeline=train_pipeline - ) - ``` - -2. In case the dataset you want to concatenate is different, you can concatenate the dataset configs like the following. - - ```python - dataset_A_train = dict() - dataset_B_train = dict() - - data = dict( - imgs_per_gpu=2, - workers_per_gpu=2, - train = [ - dataset_A_train, - dataset_B_train - ], - val = dataset_A_val, - test = dataset_A_test - ) - ``` - - If the concatenated dataset is used for test or evaluation, this manner also supports to evaluate each dataset separately. - -3. We also support to define `ConcatDataset` explicitly as the following. - - ```python - dataset_A_val = dict() - dataset_B_val = dict() - - data = dict( - imgs_per_gpu=2, - workers_per_gpu=2, - train=dataset_A_train, - val=dict( - type='ConcatDataset', - datasets=[dataset_A_val, dataset_B_val], - separate_eval=False)) - ``` - - This manner allows users to evaluate all the datasets as a single one by setting `separate_eval=False`. - -**Note:** - -1. The option `separate_eval=False` assumes the datasets use `self.data_infos` during evaluation. Therefore, COCO datasets do not support this behavior since COCO datasets do not fully rely on `self.data_infos` for evaluation. Combining different types of datasets and evaluating them as a whole is not tested thus is not suggested. -2. Evaluating `ClassBalancedDataset` and `RepeatDataset` is not supported thus evaluating concatenated datasets of these types is also not supported. - -A more complex example that repeats `Dataset_A` and `Dataset_B` by N and M times, respectively, and then concatenates the repeated datasets is as the following. - -```python -dataset_A_train = dict( - type='RepeatDataset', - times=N, - dataset=dict( - type='Dataset_A', - ... - pipeline=train_pipeline - ) -) -dataset_A_val = dict( - ... - pipeline=test_pipeline -) -dataset_A_test = dict( - ... - pipeline=test_pipeline -) -dataset_B_train = dict( - type='RepeatDataset', - times=M, - dataset=dict( - type='Dataset_B', - ... - pipeline=train_pipeline - ) -) -data = dict( - imgs_per_gpu=2, - workers_per_gpu=2, - train = [ - dataset_A_train, - dataset_B_train - ], - val = dataset_A_val, - test = dataset_A_test -) - -``` - -## Modify Dataset Classes - -With existing dataset types, we can modify the class names of them to train subset of the annotations. -For example, if you want to train only three classes of the current dataset, -you can modify the classes of dataset. -The dataset will filter out the ground truth boxes of other classes automatically. - -```python -classes = ('person', 'bicycle', 'car') -data = dict( - train=dict(classes=classes), - val=dict(classes=classes), - test=dict(classes=classes)) -``` - -MMDetection V2.0 also supports to read the classes from a file, which is common in real applications. -For example, assume the `classes.txt` contains the name of classes as the following. - -``` -person -bicycle -car -``` - -Users can set the classes as a file path, the dataset will load it and convert it to a list automatically. - -```python -classes = 'path/to/classes.txt' -data = dict( - train=dict(classes=classes), - val=dict(classes=classes), - test=dict(classes=classes)) -``` - -**Note**: - -- Before MMDetection v2.5.0, the dataset will filter out the empty GT images automatically if the classes are set and there is no way to disable that through config. This is an undesirable behavior and introduces confusion because if the classes are not set, the dataset only filter the empty GT images when `filter_empty_gt=True` and `test_mode=False`. After MMDetection v2.5.0, we decouple the image filtering process and the classes modification, i.e., the dataset will only filter empty GT images when `filter_empty_gt=True` and `test_mode=False`, no matter whether the classes are set. Thus, setting the classes only influences the annotations of classes used for training and users could decide whether to filter empty GT images by themselves. -- Since the middle format only has box labels and does not contain the class names, when using `CustomDataset`, users cannot filter out the empty GT images through configs but only do this offline. -- Please remember to modify the `num_classes` in the head when specifying `classes` in dataset. We implemented [NumClassCheckHook](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/datasets/utils.py) to check whether the numbers are consistent since v2.9.0(after PR#4508). -- The features for setting dataset classes and dataset filtering will be refactored to be more user-friendly in the future (depends on the progress). diff --git a/spaces/totalbogus/prompthero-openjourney-v4/README.md b/spaces/totalbogus/prompthero-openjourney-v4/README.md deleted file mode 100644 index a3c0a081a649ddccf0fc1b77fc8c616a11203650..0000000000000000000000000000000000000000 --- a/spaces/totalbogus/prompthero-openjourney-v4/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Prompthero Openjourney V4 -emoji: 📊 -colorFrom: yellow -colorTo: green -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/trttung1610/musicgen/audiocraft/utils/__init__.py b/spaces/trttung1610/musicgen/audiocraft/utils/__init__.py deleted file mode 100644 index 75e25a0212f98e4a18d97c86c6cda225636a3215..0000000000000000000000000000000000000000 --- a/spaces/trttung1610/musicgen/audiocraft/utils/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -"""Utilities.""" diff --git a/spaces/user238921933/stable-diffusion-webui/modules/sd_vae_approx.py b/spaces/user238921933/stable-diffusion-webui/modules/sd_vae_approx.py deleted file mode 100644 index ea4c4a3a72941c31a654a29ce90cf8d9c82ce674..0000000000000000000000000000000000000000 --- a/spaces/user238921933/stable-diffusion-webui/modules/sd_vae_approx.py +++ /dev/null @@ -1,58 +0,0 @@ -import os - -import torch -from torch import nn -from modules import devices, paths - -sd_vae_approx_model = None - - -class VAEApprox(nn.Module): - def __init__(self): - super(VAEApprox, self).__init__() - self.conv1 = nn.Conv2d(4, 8, (7, 7)) - self.conv2 = nn.Conv2d(8, 16, (5, 5)) - self.conv3 = nn.Conv2d(16, 32, (3, 3)) - self.conv4 = nn.Conv2d(32, 64, (3, 3)) - self.conv5 = nn.Conv2d(64, 32, (3, 3)) - self.conv6 = nn.Conv2d(32, 16, (3, 3)) - self.conv7 = nn.Conv2d(16, 8, (3, 3)) - self.conv8 = nn.Conv2d(8, 3, (3, 3)) - - def forward(self, x): - extra = 11 - x = nn.functional.interpolate(x, (x.shape[2] * 2, x.shape[3] * 2)) - x = nn.functional.pad(x, (extra, extra, extra, extra)) - - for layer in [self.conv1, self.conv2, self.conv3, self.conv4, self.conv5, self.conv6, self.conv7, self.conv8, ]: - x = layer(x) - x = nn.functional.leaky_relu(x, 0.1) - - return x - - -def model(): - global sd_vae_approx_model - - if sd_vae_approx_model is None: - sd_vae_approx_model = VAEApprox() - sd_vae_approx_model.load_state_dict(torch.load(os.path.join(paths.models_path, "VAE-approx", "model.pt"), map_location='cpu' if devices.device.type != 'cuda' else None)) - sd_vae_approx_model.eval() - sd_vae_approx_model.to(devices.device, devices.dtype) - - return sd_vae_approx_model - - -def cheap_approximation(sample): - # https://discuss.huggingface.co/t/decoding-latents-to-rgb-without-upscaling/23204/2 - - coefs = torch.tensor([ - [0.298, 0.207, 0.208], - [0.187, 0.286, 0.173], - [-0.158, 0.189, 0.264], - [-0.184, -0.271, -0.473], - ]).to(sample.device) - - x_sample = torch.einsum("lxy,lr -> rxy", sample, coefs) - - return x_sample diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/hub/integrations.md b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/hub/integrations.md deleted file mode 100644 index 19480d27d8234ce677ded54d5e134bc8862bc0bf..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/hub/integrations.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -comments: true ---- - -# 🚧 Page Under Construction ⚒ - -This page is currently under construction!️ 👷Please check back later for updates. 😃🔜 diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/data/dataloaders/v5loader.md b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/data/dataloaders/v5loader.md deleted file mode 100644 index 55986951904b870c34e3d0343109e0ac56c1d880..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/docs/reference/yolo/data/dataloaders/v5loader.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -description: Efficiently load images and labels to models using Ultralytics YOLO's InfiniteDataLoader, LoadScreenshots, and LoadStreams. -keywords: YOLO, data loader, image classification, object detection, Ultralytics ---- - -## InfiniteDataLoader ---- -### ::: ultralytics.yolo.data.dataloaders.v5loader.InfiniteDataLoader -

                - -## _RepeatSampler ---- -### ::: ultralytics.yolo.data.dataloaders.v5loader._RepeatSampler -

                - -## LoadScreenshots ---- -### ::: ultralytics.yolo.data.dataloaders.v5loader.LoadScreenshots -

                - -## LoadImages ---- -### ::: ultralytics.yolo.data.dataloaders.v5loader.LoadImages -

                - -## LoadStreams ---- -### ::: ultralytics.yolo.data.dataloaders.v5loader.LoadStreams -

                - -## LoadImagesAndLabels ---- -### ::: ultralytics.yolo.data.dataloaders.v5loader.LoadImagesAndLabels -

                - -## ClassificationDataset ---- -### ::: ultralytics.yolo.data.dataloaders.v5loader.ClassificationDataset -

                - -## get_hash ---- -### ::: ultralytics.yolo.data.dataloaders.v5loader.get_hash -

                - -## exif_size ---- -### ::: ultralytics.yolo.data.dataloaders.v5loader.exif_size -

                - -## exif_transpose ---- -### ::: ultralytics.yolo.data.dataloaders.v5loader.exif_transpose -

                - -## seed_worker ---- -### ::: ultralytics.yolo.data.dataloaders.v5loader.seed_worker -

                - -## create_dataloader ---- -### ::: ultralytics.yolo.data.dataloaders.v5loader.create_dataloader -

                - -## img2label_paths ---- -### ::: ultralytics.yolo.data.dataloaders.v5loader.img2label_paths -

                - -## flatten_recursive ---- -### ::: ultralytics.yolo.data.dataloaders.v5loader.flatten_recursive -

                - -## extract_boxes ---- -### ::: ultralytics.yolo.data.dataloaders.v5loader.extract_boxes -

                - -## autosplit ---- -### ::: ultralytics.yolo.data.dataloaders.v5loader.autosplit -

                - -## verify_image_label ---- -### ::: ultralytics.yolo.data.dataloaders.v5loader.verify_image_label -

                - -## create_classification_dataloader ---- -### ::: ultralytics.yolo.data.dataloaders.v5loader.create_classification_dataloader -

                diff --git a/spaces/vedet9/ipl/app.py b/spaces/vedet9/ipl/app.py deleted file mode 100644 index a8f387fe86f877740bc774c79cea3af6e90018b4..0000000000000000000000000000000000000000 --- a/spaces/vedet9/ipl/app.py +++ /dev/null @@ -1,78 +0,0 @@ -import streamlit as st -import pickle -import pandas as pd -import numpy as np - -pipe = pickle.load(open('pipe.pkl','rb')) - -teams=[ - 'Sunrisers Hyderabad', - 'Mumbai Indians', - 'Royal Challengers Bangalore', - 'Kolkata Knight Riders', - 'Rajasthan Royals', - 'Chennai Super Kings', - 'Kings XI Punjab', - -] - -e_cities=['Mumbai', - 'Mohali', - 'Kolkata', - 'Delhi', - 'Bangalore', - 'Chennai', - 'Chandigarh', - 'Jaipur', - 'Hyderabad', - 'Abu Dhabi', - 'Durban', - 'Pune', - 'Ahmedabad', - 'Centurion', - 'Dharamsala', - 'Port Elizabeth', - 'Visakhapatnam', - 'Ranchi', - 'Indore'] - -st.title('IPL 2023 Score Prediction') -col1,col2=st.columns(2) - -with col1: - batting_team=st.selectbox('Select batting team',sorted(teams)) - -with col2: - bowling_team=st.selectbox('Select bowling team',sorted(teams)) - -city=st.selectbox('Select city',sorted(e_cities)) - -col3,col4,col5=st.columns(3) - -with col3: - current_score= st.number_input('Current Score') - - -with col4: - overs_completed=st.number_input('Overs completed(over>5)') - -with col5: - wickets=st.slider('Wickets_out',0,10) - -last_five_over=st.number_input('Runs Scoed in last 5 Overs') - -if st.button('Predict Score'): - balls_left=120-(overs_completed*6) - wickets_left=10- wickets - crr=current_score/overs_completed - - input_data_f=pd.DataFrame( - {'batting_team':[batting_team],'bowling_team': [bowling_team],'city': city, 'current_score' : [current_score], - 'balls_left': [balls_left],'wickets_left': [wickets],'crr': [crr], 'last_five_over': [last_five_over] - - } - ) - result=pipe.predict(input_data_f) - st.text("Predicted Score "+ str(int(result[0]))) - - diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmseg/models/backbones/uniformer.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmseg/models/backbones/uniformer.py deleted file mode 100644 index 0c4bb88e4c928540cca9ab609988b916520f5b7a..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmseg/models/backbones/uniformer.py +++ /dev/null @@ -1,422 +0,0 @@ -# -------------------------------------------------------- -# UniFormer -# Copyright (c) 2022 SenseTime X-Lab -# Licensed under The MIT License [see LICENSE for details] -# Written by Kunchang Li -# -------------------------------------------------------- - -from collections import OrderedDict -import math - -from functools import partial -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.utils.checkpoint as checkpoint -import numpy as np -from timm.models.layers import DropPath, to_2tuple, trunc_normal_ - -from annotator.uniformer.mmcv_custom import load_checkpoint -from annotator.uniformer.mmseg.utils import get_root_logger -from ..builder import BACKBONES - - -class Mlp(nn.Module): - def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = act_layer() - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - - -class CMlp(nn.Module): - def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Conv2d(in_features, hidden_features, 1) - self.act = act_layer() - self.fc2 = nn.Conv2d(hidden_features, out_features, 1) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - - -class CBlock(nn.Module): - def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., - drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): - super().__init__() - self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim) - self.norm1 = nn.BatchNorm2d(dim) - self.conv1 = nn.Conv2d(dim, dim, 1) - self.conv2 = nn.Conv2d(dim, dim, 1) - self.attn = nn.Conv2d(dim, dim, 5, padding=2, groups=dim) - # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here - self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() - self.norm2 = nn.BatchNorm2d(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = CMlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) - - def forward(self, x): - x = x + self.pos_embed(x) - x = x + self.drop_path(self.conv2(self.attn(self.conv1(self.norm1(x))))) - x = x + self.drop_path(self.mlp(self.norm2(x))) - return x - - -class Attention(nn.Module): - def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): - super().__init__() - self.num_heads = num_heads - head_dim = dim // num_heads - # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights - self.scale = qk_scale or head_dim ** -0.5 - - self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) - self.attn_drop = nn.Dropout(attn_drop) - self.proj = nn.Linear(dim, dim) - self.proj_drop = nn.Dropout(proj_drop) - - def forward(self, x): - B, N, C = x.shape - qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) - q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) - - attn = (q @ k.transpose(-2, -1)) * self.scale - attn = attn.softmax(dim=-1) - attn = self.attn_drop(attn) - - x = (attn @ v).transpose(1, 2).reshape(B, N, C) - x = self.proj(x) - x = self.proj_drop(x) - return x - - -class SABlock(nn.Module): - def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., - drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): - super().__init__() - self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim) - self.norm1 = norm_layer(dim) - self.attn = Attention( - dim, - num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, - attn_drop=attn_drop, proj_drop=drop) - # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here - self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) - - def forward(self, x): - x = x + self.pos_embed(x) - B, N, H, W = x.shape - x = x.flatten(2).transpose(1, 2) - x = x + self.drop_path(self.attn(self.norm1(x))) - x = x + self.drop_path(self.mlp(self.norm2(x))) - x = x.transpose(1, 2).reshape(B, N, H, W) - return x - - -def window_partition(x, window_size): - """ - Args: - x: (B, H, W, C) - window_size (int): window size - Returns: - windows: (num_windows*B, window_size, window_size, C) - """ - B, H, W, C = x.shape - x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) - windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) - return windows - - -def window_reverse(windows, window_size, H, W): - """ - Args: - windows: (num_windows*B, window_size, window_size, C) - window_size (int): Window size - H (int): Height of image - W (int): Width of image - Returns: - x: (B, H, W, C) - """ - B = int(windows.shape[0] / (H * W / window_size / window_size)) - x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) - x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) - return x - - -class SABlock_Windows(nn.Module): - def __init__(self, dim, num_heads, window_size=14, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., - drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): - super().__init__() - self.window_size=window_size - self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim) - self.norm1 = norm_layer(dim) - self.attn = Attention( - dim, - num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, - attn_drop=attn_drop, proj_drop=drop) - # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here - self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) - - def forward(self, x): - x = x + self.pos_embed(x) - x = x.permute(0, 2, 3, 1) - B, H, W, C = x.shape - shortcut = x - x = self.norm1(x) - - pad_l = pad_t = 0 - pad_r = (self.window_size - W % self.window_size) % self.window_size - pad_b = (self.window_size - H % self.window_size) % self.window_size - x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) - _, Hp, Wp, _ = x.shape - - x_windows = window_partition(x, self.window_size) # nW*B, window_size, window_size, C - x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C - - # W-MSA/SW-MSA - attn_windows = self.attn(x_windows) # nW*B, window_size*window_size, C - - # merge windows - attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) - x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C - - # reverse cyclic shift - if pad_r > 0 or pad_b > 0: - x = x[:, :H, :W, :].contiguous() - - x = shortcut + self.drop_path(x) - x = x + self.drop_path(self.mlp(self.norm2(x))) - x = x.permute(0, 3, 1, 2).reshape(B, C, H, W) - return x - - -class PatchEmbed(nn.Module): - """ Image to Patch Embedding - """ - def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): - super().__init__() - img_size = to_2tuple(img_size) - patch_size = to_2tuple(patch_size) - num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) - self.img_size = img_size - self.patch_size = patch_size - self.num_patches = num_patches - self.norm = nn.LayerNorm(embed_dim) - self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) - - def forward(self, x): - B, _, H, W = x.shape - x = self.proj(x) - B, _, H, W = x.shape - x = x.flatten(2).transpose(1, 2) - x = self.norm(x) - x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() - return x - - -@BACKBONES.register_module() -class UniFormer(nn.Module): - """ Vision Transformer - A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - - https://arxiv.org/abs/2010.11929 - """ - def __init__(self, layers=[3, 4, 8, 3], img_size=224, in_chans=3, num_classes=80, embed_dim=[64, 128, 320, 512], - head_dim=64, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None, - drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=partial(nn.LayerNorm, eps=1e-6), - pretrained_path=None, use_checkpoint=False, checkpoint_num=[0, 0, 0, 0], - windows=False, hybrid=False, window_size=14): - """ - Args: - layer (list): number of block in each layer - img_size (int, tuple): input image size - in_chans (int): number of input channels - num_classes (int): number of classes for classification head - embed_dim (int): embedding dimension - head_dim (int): dimension of attention heads - mlp_ratio (int): ratio of mlp hidden dim to embedding dim - qkv_bias (bool): enable bias for qkv if True - qk_scale (float): override default qk scale of head_dim ** -0.5 if set - representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set - drop_rate (float): dropout rate - attn_drop_rate (float): attention dropout rate - drop_path_rate (float): stochastic depth rate - norm_layer (nn.Module): normalization layer - pretrained_path (str): path of pretrained model - use_checkpoint (bool): whether use checkpoint - checkpoint_num (list): index for using checkpoint in every stage - windows (bool): whether use window MHRA - hybrid (bool): whether use hybrid MHRA - window_size (int): size of window (>14) - """ - super().__init__() - self.num_classes = num_classes - self.use_checkpoint = use_checkpoint - self.checkpoint_num = checkpoint_num - self.windows = windows - print(f'Use Checkpoint: {self.use_checkpoint}') - print(f'Checkpoint Number: {self.checkpoint_num}') - self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models - norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) - - self.patch_embed1 = PatchEmbed( - img_size=img_size, patch_size=4, in_chans=in_chans, embed_dim=embed_dim[0]) - self.patch_embed2 = PatchEmbed( - img_size=img_size // 4, patch_size=2, in_chans=embed_dim[0], embed_dim=embed_dim[1]) - self.patch_embed3 = PatchEmbed( - img_size=img_size // 8, patch_size=2, in_chans=embed_dim[1], embed_dim=embed_dim[2]) - self.patch_embed4 = PatchEmbed( - img_size=img_size // 16, patch_size=2, in_chans=embed_dim[2], embed_dim=embed_dim[3]) - - self.pos_drop = nn.Dropout(p=drop_rate) - dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(layers))] # stochastic depth decay rule - num_heads = [dim // head_dim for dim in embed_dim] - self.blocks1 = nn.ModuleList([ - CBlock( - dim=embed_dim[0], num_heads=num_heads[0], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer) - for i in range(layers[0])]) - self.norm1=norm_layer(embed_dim[0]) - self.blocks2 = nn.ModuleList([ - CBlock( - dim=embed_dim[1], num_heads=num_heads[1], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]], norm_layer=norm_layer) - for i in range(layers[1])]) - self.norm2 = norm_layer(embed_dim[1]) - if self.windows: - print('Use local window for all blocks in stage3') - self.blocks3 = nn.ModuleList([ - SABlock_Windows( - dim=embed_dim[2], num_heads=num_heads[2], window_size=window_size, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer) - for i in range(layers[2])]) - elif hybrid: - print('Use hybrid window for blocks in stage3') - block3 = [] - for i in range(layers[2]): - if (i + 1) % 4 == 0: - block3.append(SABlock( - dim=embed_dim[2], num_heads=num_heads[2], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer)) - else: - block3.append(SABlock_Windows( - dim=embed_dim[2], num_heads=num_heads[2], window_size=window_size, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer)) - self.blocks3 = nn.ModuleList(block3) - else: - print('Use global window for all blocks in stage3') - self.blocks3 = nn.ModuleList([ - SABlock( - dim=embed_dim[2], num_heads=num_heads[2], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer) - for i in range(layers[2])]) - self.norm3 = norm_layer(embed_dim[2]) - self.blocks4 = nn.ModuleList([ - SABlock( - dim=embed_dim[3], num_heads=num_heads[3], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]+layers[2]], norm_layer=norm_layer) - for i in range(layers[3])]) - self.norm4 = norm_layer(embed_dim[3]) - - # Representation layer - if representation_size: - self.num_features = representation_size - self.pre_logits = nn.Sequential(OrderedDict([ - ('fc', nn.Linear(embed_dim, representation_size)), - ('act', nn.Tanh()) - ])) - else: - self.pre_logits = nn.Identity() - - self.apply(self._init_weights) - self.init_weights(pretrained=pretrained_path) - - def init_weights(self, pretrained): - if isinstance(pretrained, str): - logger = get_root_logger() - load_checkpoint(self, pretrained, map_location='cpu', strict=False, logger=logger) - print(f'Load pretrained model from {pretrained}') - def _init_weights(self, m): - if isinstance(m, nn.Linear): - trunc_normal_(m.weight, std=.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.LayerNorm): - nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) - - @torch.jit.ignore - def no_weight_decay(self): - return {'pos_embed', 'cls_token'} - - def get_classifier(self): - return self.head - - def reset_classifier(self, num_classes, global_pool=''): - self.num_classes = num_classes - self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() - - def forward_features(self, x): - out = [] - x = self.patch_embed1(x) - x = self.pos_drop(x) - for i, blk in enumerate(self.blocks1): - if self.use_checkpoint and i < self.checkpoint_num[0]: - x = checkpoint.checkpoint(blk, x) - else: - x = blk(x) - x_out = self.norm1(x.permute(0, 2, 3, 1)) - out.append(x_out.permute(0, 3, 1, 2).contiguous()) - x = self.patch_embed2(x) - for i, blk in enumerate(self.blocks2): - if self.use_checkpoint and i < self.checkpoint_num[1]: - x = checkpoint.checkpoint(blk, x) - else: - x = blk(x) - x_out = self.norm2(x.permute(0, 2, 3, 1)) - out.append(x_out.permute(0, 3, 1, 2).contiguous()) - x = self.patch_embed3(x) - for i, blk in enumerate(self.blocks3): - if self.use_checkpoint and i < self.checkpoint_num[2]: - x = checkpoint.checkpoint(blk, x) - else: - x = blk(x) - x_out = self.norm3(x.permute(0, 2, 3, 1)) - out.append(x_out.permute(0, 3, 1, 2).contiguous()) - x = self.patch_embed4(x) - for i, blk in enumerate(self.blocks4): - if self.use_checkpoint and i < self.checkpoint_num[3]: - x = checkpoint.checkpoint(blk, x) - else: - x = blk(x) - x_out = self.norm4(x.permute(0, 2, 3, 1)) - out.append(x_out.permute(0, 3, 1, 2).contiguous()) - return tuple(out) - - def forward(self, x): - x = self.forward_features(x) - return x diff --git a/spaces/w1zrd/MusicGen/tests/modules/test_lstm.py b/spaces/w1zrd/MusicGen/tests/modules/test_lstm.py deleted file mode 100644 index 1248964c8191e19f27661f0974bef9cc967eb015..0000000000000000000000000000000000000000 --- a/spaces/w1zrd/MusicGen/tests/modules/test_lstm.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import random -import torch - -from audiocraft.modules.lstm import StreamableLSTM - - -class TestStreamableLSTM: - - def test_lstm(self): - B, C, T = 4, 2, random.randint(1, 100) - - lstm = StreamableLSTM(C, 3, skip=False) - x = torch.randn(B, C, T) - y = lstm(x) - - print(y.shape) - assert y.shape == torch.Size([B, C, T]) - - def test_lstm_skip(self): - B, C, T = 4, 2, random.randint(1, 100) - - lstm = StreamableLSTM(C, 3, skip=True) - x = torch.randn(B, C, T) - y = lstm(x) - - assert y.shape == torch.Size([B, C, T]) diff --git a/spaces/wanghuoto/gogoai/src/lib/storage.ts b/spaces/wanghuoto/gogoai/src/lib/storage.ts deleted file mode 100644 index a5b7825c4f76a28c704da512ae39e8bb45addd09..0000000000000000000000000000000000000000 --- a/spaces/wanghuoto/gogoai/src/lib/storage.ts +++ /dev/null @@ -1,27 +0,0 @@ -import { getMany, set, del, clear } from 'idb-keyval'; - -export const Storage = { - async get(key: string | string[] | null): Promise { - if (key === null) return null; - if (typeof key === 'string') { - key = [key] - } - const returnData: Record = {} - const values = await getMany(key) - key.forEach((k, idx)=> { - returnData[k] = values[idx] - }) - return returnData; - }, - async set(object: any) { - for (let key of Object.keys(object)) { - await set(key, object[key]) - } - }, - async remove(key: string) { - return del(key); - }, - async clear() { - return clear(); - } -} diff --git a/spaces/wanghuoto/gogoai/tailwind.config.js b/spaces/wanghuoto/gogoai/tailwind.config.js deleted file mode 100644 index 03da3c3c45be6983b9f5ffa6df5f1fd0870e9636..0000000000000000000000000000000000000000 --- a/spaces/wanghuoto/gogoai/tailwind.config.js +++ /dev/null @@ -1,48 +0,0 @@ -/** @type {import('tailwindcss').Config} */ -module.exports = { - content: [ - './src/pages/**/*.{js,ts,jsx,tsx,mdx}', - './src/components/**/*.{js,ts,jsx,tsx,mdx}', - './src/app/**/*.{js,ts,jsx,tsx,mdx}', - './src/ui/**/*.{js,ts,jsx,tsx,mdx}', - ], - "darkMode": "class", - theme: { - extend: { - colors: { - 'primary-blue': 'rgb(var(--color-primary-blue) / )', - secondary: 'rgb(var(--color-secondary) / )', - 'primary-background': 'rgb(var(--primary-background) / )', - 'primary-text': 'rgb(var(--primary-text) / )', - 'secondary-text': 'rgb(var(--secondary-text) / )', - 'light-text': 'rgb(var(--light-text) / )', - 'primary-border': 'rgb(var(--primary-border) / )', - }, - keyframes: { - slideDownAndFade: { - from: { opacity: 0, transform: 'translateY(-2px)' }, - to: { opacity: 1, transform: 'translateY(0)' }, - }, - slideLeftAndFade: { - from: { opacity: 0, transform: 'translateX(2px)' }, - to: { opacity: 1, transform: 'translateX(0)' }, - }, - slideUpAndFade: { - from: { opacity: 0, transform: 'translateY(2px)' }, - to: { opacity: 1, transform: 'translateY(0)' }, - }, - slideRightAndFade: { - from: { opacity: 0, transform: 'translateX(2px)' }, - to: { opacity: 1, transform: 'translateX(0)' }, - }, - }, - animation: { - slideDownAndFade: 'slideDownAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideLeftAndFade: 'slideLeftAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideUpAndFade: 'slideUpAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - slideRightAndFade: 'slideRightAndFade 400ms cubic-bezier(0.16, 1, 0.3, 1)', - }, - }, - }, - plugins: [require('@headlessui/tailwindcss'), require('tailwind-scrollbar')], -} diff --git a/spaces/wby/human-photo-3dize/README.md b/spaces/wby/human-photo-3dize/README.md deleted file mode 100644 index 7af050706895821efdc17bc739a15fbffdf842eb..0000000000000000000000000000000000000000 --- a/spaces/wby/human-photo-3dize/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Human Photo 3dize -emoji: 💻 -colorFrom: indigo -colorTo: gray -sdk: gradio -sdk_version: 3.3.1 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/winnielin/mySecretBox/app.py b/spaces/winnielin/mySecretBox/app.py deleted file mode 100644 index bba9c67e8e39131dc608ac5b2bbb42d219b1a773..0000000000000000000000000000000000000000 --- a/spaces/winnielin/mySecretBox/app.py +++ /dev/null @@ -1,25 +0,0 @@ -import gradio as gr -import numpy as np -import matplotlib.pyplot as plt -import time - -Name={'Amy1113':'我的興趣是寫可以互動的小程式,希望未來成為一個充滿活力的數位內容創作者⭐️⭐️⭐️','王小明0419':'期中考將近,祝您考試順利!','Herry0720':'聽說你找到工作了,之後聚會再聽你分享心得','Emma0825':'最近心情好多了嗎?等你有空一起喝杯咖啡散散心吧!','wewe1213':'最近工作還好嗎?一起好好吃個飯~','Terry0427':'希望你一切安好!','Belly0427':'新年快樂!','Bear0613':'謝謝你陪我分享我的情緒與大小事,之後也要常聯絡喔!','蔡1215':'我從完全的Python新手很害怕寫程式,變成現在很喜歡自己寫小機器人,希望以後有機會從事程式相關的工作。'}; - -def recev(recevier,birthday): - glodKey = recevier + birthday - if glodKey in Name: - secret=Name[glodKey] - else: - secret='您的收件夾中目前沒有信件,祝您有美好的一天!' - return f"""致{recevier} : {secret}""" - -Secretmsg = gr.Interface(recev, - [ - gr.inputs.Textbox(placeholder="Enter sentence here..."), - gr.inputs.Textbox(placeholder="Enter sentence here..."), - - ], - "text", - ) -if __name__ == "__main__": - Secretmsg.launch(share=True) \ No newline at end of file diff --git a/spaces/xdecoder/Instruct-X-Decoder/utils/util.py b/spaces/xdecoder/Instruct-X-Decoder/utils/util.py deleted file mode 100644 index 868c090d4fca05263ee59b7f7e32ef04802674e0..0000000000000000000000000000000000000000 --- a/spaces/xdecoder/Instruct-X-Decoder/utils/util.py +++ /dev/null @@ -1,283 +0,0 @@ -# adopted from -# https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py -# and -# https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py -# and -# https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py -# -# thanks! -import importlib - -import os -import math -import torch -import torch.nn as nn -import numpy as np -from einops import repeat - - -def instantiate_from_config(config): - if not "target" in config: - if config == '__is_first_stage__': - return None - elif config == "__is_unconditional__": - return None - raise KeyError("Expected key `target` to instantiate.") - return get_obj_from_str(config["target"])(**config.get("params", dict())) - - -def get_obj_from_str(string, reload=False): - module, cls = string.rsplit(".", 1) - if reload: - module_imp = importlib.import_module(module) - importlib.reload(module_imp) - return getattr(importlib.import_module(module, package=None), cls) - - -def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): - if schedule == "linear": - betas = ( - torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2 - ) - - elif schedule == "cosine": - timesteps = ( - torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s - ) - alphas = timesteps / (1 + cosine_s) * np.pi / 2 - alphas = torch.cos(alphas).pow(2) - alphas = alphas / alphas[0] - betas = 1 - alphas[1:] / alphas[:-1] - betas = np.clip(betas, a_min=0, a_max=0.999) - - elif schedule == "sqrt_linear": - betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) - elif schedule == "sqrt": - betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5 - else: - raise ValueError(f"schedule '{schedule}' unknown.") - return betas.numpy() - - -def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True): - if ddim_discr_method == 'uniform': - c = num_ddpm_timesteps // num_ddim_timesteps - ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c))) - elif ddim_discr_method == 'quad': - ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int) - else: - raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"') - - # assert ddim_timesteps.shape[0] == num_ddim_timesteps - # add one to get the final alpha values right (the ones from first scale to data during sampling) - steps_out = ddim_timesteps + 1 - if verbose: - print(f'Selected timesteps for ddim sampler: {steps_out}') - return steps_out - - -def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True): - # select alphas for computing the variance schedule - alphas = alphacums[ddim_timesteps] - alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist()) - - # according the the formula provided in https://arxiv.org/abs/2010.02502 - sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)) - if verbose: - print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}') - print(f'For the chosen value of eta, which is {eta}, ' - f'this results in the following sigma_t schedule for ddim sampler {sigmas}') - return sigmas, alphas, alphas_prev - - -def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): - """ - Create a beta schedule that discretizes the given alpha_t_bar function, - which defines the cumulative product of (1-beta) over time from t = [0,1]. - :param num_diffusion_timesteps: the number of betas to produce. - :param alpha_bar: a lambda that takes an argument t from 0 to 1 and - produces the cumulative product of (1-beta) up to that - part of the diffusion process. - :param max_beta: the maximum beta to use; use values lower than 1 to - prevent singularities. - """ - betas = [] - for i in range(num_diffusion_timesteps): - t1 = i / num_diffusion_timesteps - t2 = (i + 1) / num_diffusion_timesteps - betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) - return np.array(betas) - - -def extract_into_tensor(a, t, x_shape): - b, *_ = t.shape - out = a.gather(-1, t) - return out.reshape(b, *((1,) * (len(x_shape) - 1))) - - -def checkpoint(func, inputs, params, flag): - """ - Evaluate a function without caching intermediate activations, allowing for - reduced memory at the expense of extra compute in the backward pass. - :param func: the function to evaluate. - :param inputs: the argument sequence to pass to `func`. - :param params: a sequence of parameters `func` depends on but does not - explicitly take as arguments. - :param flag: if False, disable gradient checkpointing. - """ - if flag: - args = tuple(inputs) + tuple(params) - return CheckpointFunction.apply(func, len(inputs), *args) - else: - return func(*inputs) - - -class CheckpointFunction(torch.autograd.Function): - @staticmethod - def forward(ctx, run_function, length, *args): - ctx.run_function = run_function - ctx.input_tensors = list(args[:length]) - ctx.input_params = list(args[length:]) - - with torch.no_grad(): - output_tensors = ctx.run_function(*ctx.input_tensors) - return output_tensors - - @staticmethod - def backward(ctx, *output_grads): - ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors] - with torch.enable_grad(): - # Fixes a bug where the first op in run_function modifies the - # Tensor storage in place, which is not allowed for detach()'d - # Tensors. - shallow_copies = [x.view_as(x) for x in ctx.input_tensors] - output_tensors = ctx.run_function(*shallow_copies) - input_grads = torch.autograd.grad( - output_tensors, - ctx.input_tensors + ctx.input_params, - output_grads, - allow_unused=True, - ) - del ctx.input_tensors - del ctx.input_params - del output_tensors - return (None, None) + input_grads - - -def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False): - """ - Create sinusoidal timestep embeddings. - :param timesteps: a 1-D Tensor of N indices, one per batch element. - These may be fractional. - :param dim: the dimension of the output. - :param max_period: controls the minimum frequency of the embeddings. - :return: an [N x dim] Tensor of positional embeddings. - """ - if not repeat_only: - half = dim // 2 - freqs = torch.exp( - -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half - ).to(device=timesteps.device) - args = timesteps[:, None].float() * freqs[None] - embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) - if dim % 2: - embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) - else: - embedding = repeat(timesteps, 'b -> b d', d=dim) - return embedding - - -def zero_module(module): - """ - Zero out the parameters of a module and return it. - """ - for p in module.parameters(): - p.detach().zero_() - return module - - -def scale_module(module, scale): - """ - Scale the parameters of a module and return it. - """ - for p in module.parameters(): - p.detach().mul_(scale) - return module - - -def mean_flat(tensor): - """ - Take the mean over all non-batch dimensions. - """ - return tensor.mean(dim=list(range(1, len(tensor.shape)))) - - -def normalization(channels): - """ - Make a standard normalization layer. - :param channels: number of input channels. - :return: an nn.Module for normalization. - """ - return GroupNorm32(32, channels) - - -# PyTorch 1.7 has SiLU, but we support PyTorch 1.5. -class SiLU(nn.Module): - def forward(self, x): - return x * torch.sigmoid(x) - - -class GroupNorm32(nn.GroupNorm): - def forward(self, x): - return super().forward(x.float()).type(x.dtype) - -def conv_nd(dims, *args, **kwargs): - """ - Create a 1D, 2D, or 3D convolution module. - """ - if dims == 1: - return nn.Conv1d(*args, **kwargs) - elif dims == 2: - return nn.Conv2d(*args, **kwargs) - elif dims == 3: - return nn.Conv3d(*args, **kwargs) - raise ValueError(f"unsupported dimensions: {dims}") - - -def linear(*args, **kwargs): - """ - Create a linear module. - """ - return nn.Linear(*args, **kwargs) - - -def avg_pool_nd(dims, *args, **kwargs): - """ - Create a 1D, 2D, or 3D average pooling module. - """ - if dims == 1: - return nn.AvgPool1d(*args, **kwargs) - elif dims == 2: - return nn.AvgPool2d(*args, **kwargs) - elif dims == 3: - return nn.AvgPool3d(*args, **kwargs) - raise ValueError(f"unsupported dimensions: {dims}") - - -class HybridConditioner(nn.Module): - - def __init__(self, c_concat_config, c_crossattn_config): - super().__init__() - self.concat_conditioner = instantiate_from_config(c_concat_config) - self.crossattn_conditioner = instantiate_from_config(c_crossattn_config) - - def forward(self, c_concat, c_crossattn): - c_concat = self.concat_conditioner(c_concat) - c_crossattn = self.crossattn_conditioner(c_crossattn) - return {'c_concat': [c_concat], 'c_crossattn': [c_crossattn]} - - -def noise_like(shape, device, repeat=False): - repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1))) - noise = lambda: torch.randn(shape, device=device) - return repeat_noise() if repeat else noise() \ No newline at end of file diff --git a/spaces/xfys/yolov5_tracking/val_utils/scripts/run_mots_challenge.py b/spaces/xfys/yolov5_tracking/val_utils/scripts/run_mots_challenge.py deleted file mode 100644 index b5170660e2113d0d23dc1beb72c82830c4111b35..0000000000000000000000000000000000000000 --- a/spaces/xfys/yolov5_tracking/val_utils/scripts/run_mots_challenge.py +++ /dev/null @@ -1,96 +0,0 @@ -""" run_mots.py - -Run example: -run_mots.py --USE_PARALLEL False --METRICS Hota --TRACKERS_TO_EVAL TrackRCNN - -Command Line Arguments: Defaults, # Comments - Eval arguments: - 'USE_PARALLEL': False, - 'NUM_PARALLEL_CORES': 8, - 'BREAK_ON_ERROR': True, - 'PRINT_RESULTS': True, - 'PRINT_ONLY_COMBINED': False, - 'PRINT_CONFIG': True, - 'TIME_PROGRESS': True, - 'OUTPUT_SUMMARY': True, - 'OUTPUT_DETAILED': True, - 'PLOT_CURVES': True, - Dataset arguments: - 'GT_FOLDER': os.path.join(code_path, 'data/gt/mot_challenge/'), # Location of GT data - 'TRACKERS_FOLDER': os.path.join(code_path, 'data/trackers/mot_challenge/'), # Trackers location - 'OUTPUT_FOLDER': None, # Where to save eval results (if None, same as TRACKERS_FOLDER) - 'TRACKERS_TO_EVAL': None, # Filenames of trackers to eval (if None, all in folder) - 'CLASSES_TO_EVAL': ['pedestrian'], # Valid: ['pedestrian'] - 'SPLIT_TO_EVAL': 'train', # Valid: 'train', 'test' - 'INPUT_AS_ZIP': False, # Whether tracker input files are zipped - 'PRINT_CONFIG': True, # Whether to print current config - 'TRACKER_SUB_FOLDER': 'data', # Tracker files are in TRACKER_FOLDER/tracker_name/TRACKER_SUB_FOLDER - 'OUTPUT_SUB_FOLDER': '', # Output files are saved in OUTPUT_FOLDER/tracker_name/OUTPUT_SUB_FOLDER - 'SEQMAP_FOLDER': None, # Where seqmaps are found (if None, GT_FOLDER/seqmaps) - 'SEQMAP_FILE': None, # Directly specify seqmap file (if none use seqmap_folder/MOTS-split_to_eval) - 'SEQ_INFO': None, # If not None, directly specify sequences to eval and their number of timesteps - 'GT_LOC_FORMAT': '{gt_folder}/{seq}/gt/gt.txt', # '{gt_folder}/{seq}/gt/gt.txt' - 'SKIP_SPLIT_FOL': False, # If False, data is in GT_FOLDER/MOTS-SPLIT_TO_EVAL/ and in - # TRACKERS_FOLDER/MOTS-SPLIT_TO_EVAL/tracker/ - # If True, then the middle 'MOTS-split' folder is skipped for both. - Metric arguments: - 'METRICS': ['HOTA','CLEAR', 'Identity', 'VACE', 'JAndF'] -""" - -import sys -import os -import argparse -from multiprocessing import freeze_support - -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) -import trackeval # noqa: E402 - -if __name__ == '__main__': - freeze_support() - - # Command line interface: - default_eval_config = trackeval.Evaluator.get_default_eval_config() - default_eval_config['DISPLAY_LESS_PROGRESS'] = False - default_dataset_config = trackeval.datasets.MOTSChallenge.get_default_dataset_config() - default_metrics_config = {'METRICS': ['HOTA', 'CLEAR', 'Identity']} - config = {**default_eval_config, **default_dataset_config, **default_metrics_config} # Merge default configs - parser = argparse.ArgumentParser() - for setting in config.keys(): - if type(config[setting]) == list or type(config[setting]) == type(None): - parser.add_argument("--" + setting, nargs='+') - else: - parser.add_argument("--" + setting) - args = parser.parse_args().__dict__ - for setting in args.keys(): - if args[setting] is not None: - if type(config[setting]) == type(True): - if args[setting] == 'True': - x = True - elif args[setting] == 'False': - x = False - else: - raise Exception('Command line parameter ' + setting + 'must be True or False') - elif type(config[setting]) == type(1): - x = int(args[setting]) - elif type(args[setting]) == type(None): - x = None - elif setting == 'SEQ_INFO': - x = dict(zip(args[setting], [None]*len(args[setting]))) - else: - x = args[setting] - config[setting] = x - eval_config = {k: v for k, v in config.items() if k in default_eval_config.keys()} - dataset_config = {k: v for k, v in config.items() if k in default_dataset_config.keys()} - metrics_config = {k: v for k, v in config.items() if k in default_metrics_config.keys()} - - # Run code - evaluator = trackeval.Evaluator(eval_config) - dataset_list = [trackeval.datasets.MOTSChallenge(dataset_config)] - metrics_list = [] - for metric in [trackeval.metrics.HOTA, trackeval.metrics.CLEAR, trackeval.metrics.Identity, trackeval.metrics.VACE, - trackeval.metrics.JAndF]: - if metric.get_name() in metrics_config['METRICS']: - metrics_list.append(metric()) - if len(metrics_list) == 0: - raise Exception('No metrics selected for evaluation') - evaluator.evaluate(dataset_list, metrics_list) diff --git a/spaces/xswu/HPSv2/src/training/data.py b/spaces/xswu/HPSv2/src/training/data.py deleted file mode 100644 index 9bbfa45d49a91d922cec2ebe2b08337ce8d95c6e..0000000000000000000000000000000000000000 --- a/spaces/xswu/HPSv2/src/training/data.py +++ /dev/null @@ -1,1131 +0,0 @@ -from __future__ import annotations -import ast -import copy -from curses import meta -from email.mime import image -import json -import logging -import math -import os -import random -import sys -import time -import io -import itertools -import braceexpand -from dataclasses import dataclass -from multiprocessing import Value -import pyarrow as pa - -import numpy as np -import pandas as pd -import functools -import torch -import torchvision.datasets as datasets -import torchvision.transforms.functional as TF -import torch.distributed as dist -import webdataset as wds -from PIL import Image -from torchvision.transforms import InterpolationMode -from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler, IterableDataset, get_worker_info -from torch.utils.data.distributed import DistributedSampler, Sampler -from webdataset.filters import _shuffle -from webdataset.tariterators import base_plus_ext, url_opener, tar_file_expander, valid_sample - -from open_clip import transform - -try: - import horovod.torch as hvd -except ImportError: - hvd = None - -try: - from petrel_client.client import Client -except ImportError as E: - "petrel_client.client cannot be imported" - pass - -def pil_loader(img_str): - buff = io.BytesIO(img_str) - return Image.open(buff).convert("RGB") - -@functools.lru_cache() -def _get_global_gloo_group(): - """ - Return a process group based on gloo backend, containing all the ranks - The result is cached. - """ - if dist.get_backend() == "nccl": - return dist.new_group(backend="gloo") - else: - return dist.group.WORLD - -def all_gather(data, group=None): - """ - Run all_gather on arbitrary picklable data (not necessarily tensors). - - Args: - data: any picklable object - group: a torch process group. By default, will use a group which - contains all ranks on gloo backend. - - Returns: - list[data]: list of data gathered from each rank - """ - if dist.get_world_size() == 1: - return [data] - if group is None: - group = _get_global_gloo_group() # use CPU group by default, to reduce GPU RAM usage. - world_size = dist.get_world_size(group) - if world_size == 1: - return [data] - - output = [None for _ in range(world_size)] - dist.all_gather_object(output, data, group=group) - return output - -def shared_random_seed(): - """ - Returns: - int: a random number that is the same across all workers. - If workers need a shared RNG, they can use this shared seed to - create one. - - All workers must call this function, otherwise it will deadlock. - """ - ints = np.random.randint(2**31) - all_ints = all_gather(ints) - return all_ints[0] - -class TrainingSampler(Sampler): - """ - In training, we only care about the "infinite stream" of training data. - So this sampler produces an infinite stream of indices and - all workers cooperate to correctly shuffle the indices and sample different indices. - - The samplers in each worker effectively produces `indices[worker_id::num_workers]` - where `indices` is an infinite stream of indices consisting of - `shuffle(range(size)) + shuffle(range(size)) + ...` (if shuffle is True) - or `range(size) + range(size) + ...` (if shuffle is False) - """ - - def __init__(self, dataset, num_replicas=None, rank=None, local_rank=None, local_size=None, shuffle=True, seed = None): - if num_replicas is None: - if not dist.is_available(): - raise RuntimeError("Requires distributed package to be available") - num_replicas = dist.get_world_size() - if rank is None: - if not dist.is_available(): - raise RuntimeError("Requires distributed package to be available") - rank = dist.get_rank() - self.dataset = dataset - self.num_replicas = num_replicas - self.rank = rank - self.epoch = 0 - self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas)) -1 - self.total_size = len(dataset) - self.shuffle = shuffle - # self.dataset_repeat = dataset_repeat - if seed is None: - seed = shared_random_seed() - self.seed = int(seed) - - def __len__(self): - return self.num_samples - - def __iter__(self): - start = self.rank - yield from itertools.islice(self._infinite_indices(), start, None, self.num_replicas) - - def _infinite_indices(self): - g = torch.Generator() - g.manual_seed(self.seed) - while True: - if self.shuffle: - yield from torch.randperm(self.total_size, generator=g).tolist() - else: - yield from torch.arange(self.total_size).tolist() - -class TCSLoader(object): - - def __init__(self, time_limit=3): - conf_path = os.environ.get('CEPH_CONFIG', './petreloss.config') - self.client = Client(conf_path) - self.time_limit = time_limit - - def __call__(self, fn): - try: - img_value_str = self.client.get(fn) - img = pil_loader(img_value_str) - return img - except Exception as e: - print('Read image failed ({})'.format(fn)) - raise e - - -class CsvDataset(Dataset): - def __init__(self, input_filename, transforms, img_key, caption_key, sep="\t", tokenizer=None): - logging.debug(f'Loading csv data from {input_filename}.') - df = pd.read_csv(input_filename, sep=sep) - - self.images = df[img_key].tolist() - self.captions = df[caption_key].tolist() - self.transforms = transforms - logging.debug('Done loading data.') - - self.tokenize = tokenizer - - def __len__(self): - return len(self.captions) - - def __getitem__(self, idx): - images = self.transforms(Image.open(str(self.images[idx]))) - texts = self.tokenize([str(self.captions[idx])])[0] - return images, texts - - -class SharedEpoch: - def __init__(self, epoch: int = 0): - self.shared_epoch = Value('i', epoch) - - def set_value(self, epoch): - self.shared_epoch.value = epoch - - def get_value(self): - return self.shared_epoch.value - - -@dataclass -class DataInfo: - dataloader: DataLoader - data_type: str - sampler: DistributedSampler = None - shared_epoch: SharedEpoch = None - - def set_epoch(self, epoch): - if self.shared_epoch is not None: - self.shared_epoch.set_value(epoch) - if self.sampler is not None and isinstance(self.sampler, DistributedSampler): - self.sampler.set_epoch(epoch) - - -def expand_urls(urls, weights=None): - if weights is None: - expanded_urls = wds.shardlists.expand_urls(urls) - return expanded_urls, None - if isinstance(urls, str): - urllist = urls.split("::") - weights = weights.split('::') - assert len(weights) == len(urllist), f"Expected the number of data components ({len(urllist)}) and weights({len(weights)}) to match." - weights = [float(weight) for weight in weights] - all_urls, all_weights = [], [] - for url, weight in zip(urllist, weights): - expanded_url = list(braceexpand.braceexpand(url)) - expanded_weights = [weight for _ in expanded_url] - all_urls.extend(expanded_url) - all_weights.extend(expanded_weights) - return all_urls, all_weights - else: - all_urls = list(urls) - return all_urls, weights - - -def get_dataset_size(shards): - shards_list, _ = expand_urls(shards) - dir_path = os.path.dirname(shards_list[0]) - sizes_filename = os.path.join(dir_path, 'sizes.json') - len_filename = os.path.join(dir_path, '__len__') - if os.path.exists(sizes_filename): - sizes = json.load(open(sizes_filename, 'r')) - total_size = sum([int(sizes[os.path.basename(shard)]) for shard in shards_list]) - elif os.path.exists(len_filename): - # FIXME this used to be eval(open(...)) but that seemed rather unsafe - total_size = ast.literal_eval(open(len_filename, 'r').read()) - else: - total_size = None # num samples undefined - # some common dataset sizes (at time of authors last download) - # CC3M (train): 2905954 - # CC12M: 10968539 - # LAION-400M: 407332084 - # LAION-2B (english): 2170337258 - num_shards = len(shards_list) - return total_size, num_shards - - -def get_imagenet(args, preprocess_fns, split): - assert split in ["train", "val", "v2"] - is_train = split == "train" - preprocess_train, preprocess_val = preprocess_fns - - if split == "v2": - from imagenetv2_pytorch import ImageNetV2Dataset - dataset = ImageNetV2Dataset(location=args.imagenet_v2, transform=preprocess_val) - else: - if is_train: - data_path = args.imagenet_train - preprocess_fn = preprocess_train - else: - data_path = args.imagenet_val - preprocess_fn = preprocess_val - assert data_path - - dataset = datasets.ImageFolder(data_path, transform=preprocess_fn) - - if is_train: - idxs = np.zeros(len(dataset.targets)) - target_array = np.array(dataset.targets) - k = 50 - for c in range(1000): - m = target_array == c - n = len(idxs[m]) - arr = np.zeros(n) - arr[:k] = 1 - np.random.shuffle(arr) - idxs[m] = arr - - idxs = idxs.astype('int') - sampler = SubsetRandomSampler(np.where(idxs)[0]) - else: - sampler = None - - dataloader = torch.utils.data.DataLoader( - dataset, - batch_size=args.batch_size, - num_workers=args.workers, - sampler=sampler, - ) - - return DataInfo(dataloader=dataloader, sampler=sampler, data_type='classification') - - -def count_samples(dataloader): - os.environ["WDS_EPOCH"] = "0" - n_elements, n_batches = 0, 0 - for images, texts in dataloader: - n_batches += 1 - n_elements += len(images) - assert len(images) == len(texts) - return n_elements, n_batches - - -def filter_no_caption_or_no_image(sample): - has_caption = ('txt' in sample) - has_image = ('png' in sample or 'jpg' in sample or 'jpeg' in sample or 'webp' in sample) - return has_caption and has_image - - -def log_and_continue(exn): - """Call in an exception handler to ignore any exception, issue a warning, and continue.""" - logging.warning(f'Handling webdataset error ({repr(exn)}). Ignoring.') - return True - - -def group_by_keys_nothrow(data, keys=base_plus_ext, lcase=True, suffixes=None, handler=None): - """Return function over iterator that groups key, value pairs into samples. - - :param keys: function that splits the key into key and extension (base_plus_ext) - :param lcase: convert suffixes to lower case (Default value = True) - """ - current_sample = None - for filesample in data: - assert isinstance(filesample, dict) - fname, value = filesample["fname"], filesample["data"] - prefix, suffix = keys(fname) - if prefix is None: - continue - if lcase: - suffix = suffix.lower() - # FIXME webdataset version throws if suffix in current_sample, but we have a potential for - # this happening in the current LAION400m dataset if a tar ends with same prefix as the next - # begins, rare, but can happen since prefix aren't unique across tar files in that dataset - if current_sample is None or prefix != current_sample["__key__"] or suffix in current_sample: - if valid_sample(current_sample): - yield current_sample - current_sample = dict(__key__=prefix, __url__=filesample["__url__"]) - if suffixes is None or suffix in suffixes: - current_sample[suffix] = value - if valid_sample(current_sample): - yield current_sample - - -def tarfile_to_samples_nothrow(src, handler=log_and_continue): - # NOTE this is a re-impl of the webdataset impl with group_by_keys that doesn't throw - streams = url_opener(src, handler=handler) - files = tar_file_expander(streams, handler=handler) - samples = group_by_keys_nothrow(files, handler=handler) - return samples - - -def pytorch_worker_seed(increment=0): - """get dataloader worker seed from pytorch""" - worker_info = get_worker_info() - if worker_info is not None: - # favour using the seed already created for pytorch dataloader workers if it exists - seed = worker_info.seed - if increment: - # space out seed increments so they can't overlap across workers in different iterations - seed += increment * max(1, worker_info.num_workers) - return seed - # fallback to wds rank based seed - return wds.utils.pytorch_worker_seed() - - -_SHARD_SHUFFLE_SIZE = 2000 -_SHARD_SHUFFLE_INITIAL = 500 -_SAMPLE_SHUFFLE_SIZE = 5000 -_SAMPLE_SHUFFLE_INITIAL = 1000 - - -class detshuffle2(wds.PipelineStage): - def __init__( - self, - bufsize=1000, - initial=100, - seed=0, - epoch=-1, - ): - self.bufsize = bufsize - self.initial = initial - self.seed = seed - self.epoch = epoch - - def run(self, src): - if isinstance(self.epoch, SharedEpoch): - epoch = self.epoch.get_value() - else: - # NOTE: this is epoch tracking is problematic in a multiprocess (dataloader workers or train) - # situation as different workers may wrap at different times (or not at all). - self.epoch += 1 - epoch = self.epoch - rng = random.Random() - if self.seed < 0: - # If seed is negative, we use the worker's seed, this will be different across all nodes/workers - seed = pytorch_worker_seed(epoch) - else: - # This seed to be deterministic AND the same across all nodes/workers in each epoch - seed = self.seed + epoch - rng.seed(seed) - return _shuffle(src, self.bufsize, self.initial, rng) - - -class ResampledShards2(IterableDataset): - """An iterable dataset yielding a list of urls.""" - - def __init__( - self, - urls, - weights=None, - nshards=sys.maxsize, - worker_seed=None, - deterministic=False, - epoch=-1, - ): - """Sample shards from the shard list with replacement. - - :param urls: a list of URLs as a Python list or brace notation string - """ - super().__init__() - urls, weights = expand_urls(urls, weights) - self.urls = urls - self.weights = weights - if self.weights is not None: - assert len(self.urls) == len(self.weights), f"Number of urls {len(self.urls)} and weights {len(self.weights)} should match." - assert isinstance(self.urls[0], str) - self.nshards = nshards - self.rng = random.Random() - self.worker_seed = worker_seed - self.deterministic = deterministic - self.epoch = epoch - - def __iter__(self): - """Return an iterator over the shards.""" - if isinstance(self.epoch, SharedEpoch): - epoch = self.epoch.get_value() - else: - # NOTE: this is epoch tracking is problematic in a multiprocess (dataloader workers or train) - # situation as different workers may wrap at different times (or not at all). - self.epoch += 1 - epoch = self.epoch - if self.deterministic: - # reset seed w/ epoch if deterministic - if self.worker_seed is None: - # pytorch worker seed should be deterministic due to being init by arg.seed + rank + worker id - seed = pytorch_worker_seed(epoch) - else: - seed = self.worker_seed() + epoch - self.rng.seed(seed) - for _ in range(self.nshards): - if self.weights is None: - yield dict(url=self.rng.choice(self.urls)) - else: - yield dict(url=self.rng.choices(self.urls, weights=self.weights, k=1)[0]) - - -def get_wds_dataset(args, preprocess_img, is_train, epoch=0, floor=False, tokenizer=None): - input_shards = args.train_data if is_train else args.val_data - assert input_shards is not None - resampled = getattr(args, 'dataset_resampled', False) and is_train - - num_samples, num_shards = get_dataset_size(input_shards) - if not num_samples: - if is_train: - num_samples = args.train_num_samples - if not num_samples: - raise RuntimeError( - 'Currently, number of dataset samples must be specified for training dataset. ' - 'Please specify via `--train-num-samples` if no dataset length info present.') - else: - num_samples = args.val_num_samples or 0 # eval will just exhaust the iterator if not specified - - shared_epoch = SharedEpoch(epoch=epoch) # create a shared epoch store to sync epoch to dataloader worker proc - - if resampled: - pipeline = [ResampledShards2(input_shards, weights=args.train_data_upsampling_factors, deterministic=True, epoch=shared_epoch)] - else: - assert args.train_data_upsampling_factors is None, "--train_data_upsampling_factors is only supported when sampling with replacement (together with --dataset-resampled)." - pipeline = [wds.SimpleShardList(input_shards)] - - # at this point we have an iterator over all the shards - if is_train: - if not resampled: - pipeline.extend([ - detshuffle2( - bufsize=_SHARD_SHUFFLE_SIZE, - initial=_SHARD_SHUFFLE_INITIAL, - seed=args.seed, - epoch=shared_epoch, - ), - wds.split_by_node, - wds.split_by_worker, - ]) - pipeline.extend([ - # at this point, we have an iterator over the shards assigned to each worker at each node - tarfile_to_samples_nothrow, # wds.tarfile_to_samples(handler=log_and_continue), - wds.shuffle( - bufsize=_SAMPLE_SHUFFLE_SIZE, - initial=_SAMPLE_SHUFFLE_INITIAL, - ), - ]) - else: - pipeline.extend([ - wds.split_by_worker, - # at this point, we have an iterator over the shards assigned to each worker - wds.tarfile_to_samples(handler=log_and_continue), - ]) - pipeline.extend([ - wds.select(filter_no_caption_or_no_image), - wds.decode("pilrgb", handler=log_and_continue), - wds.rename(image="jpg;png;jpeg;webp", text="txt"), - wds.map_dict(image=preprocess_img, text=lambda text: tokenizer(text)[0]), - wds.to_tuple("image", "text"), - wds.batched(args.batch_size, partial=not is_train) - ]) - - dataset = wds.DataPipeline(*pipeline) - - if is_train: - if not resampled: - assert num_shards >= args.workers * args.world_size, 'number of shards must be >= total workers' - # roll over and repeat a few samples to get same number of full batches on each node - round_fn = math.floor if floor else math.ceil - global_batch_size = args.batch_size * args.world_size - num_batches = round_fn(num_samples / global_batch_size) - num_workers = max(1, args.workers) - num_worker_batches = round_fn(num_batches / num_workers) # per dataloader worker - num_batches = num_worker_batches * num_workers - num_samples = num_batches * global_batch_size - dataset = dataset.with_epoch(num_worker_batches) # each worker is iterating over this - else: - # last batches are partial, eval is done on single (master) node - num_batches = math.ceil(num_samples / args.batch_size) - - dataloader = wds.WebLoader( - dataset, - batch_size=None, - shuffle=False, - num_workers=args.workers, - persistent_workers=True, - ) - - # FIXME not clear which approach is better, with_epoch before vs after dataloader? - # hoping to resolve via https://github.com/webdataset/webdataset/issues/169 - # if is_train: - # # roll over and repeat a few samples to get same number of full batches on each node - # global_batch_size = args.batch_size * args.world_size - # num_batches = math.ceil(num_samples / global_batch_size) - # num_workers = max(1, args.workers) - # num_batches = math.ceil(num_batches / num_workers) * num_workers - # num_samples = num_batches * global_batch_size - # dataloader = dataloader.with_epoch(num_batches) - # else: - # # last batches are partial, eval is done on single (master) node - # num_batches = math.ceil(num_samples / args.batch_size) - - # add meta-data to dataloader instance for convenience - dataloader.num_batches = num_batches - dataloader.num_samples = num_samples - - return DataInfo(dataloader=dataloader, shared_epoch=shared_epoch, data_type='image-text') - - -def get_csv_dataset(args, preprocess_fn, is_train, epoch=0, tokenizer=None): - input_filename = args.train_data if is_train else args.val_data - assert input_filename - dataset = CsvDataset( - input_filename, - preprocess_fn, - img_key=args.csv_img_key, - caption_key=args.csv_caption_key, - sep=args.csv_separator, - tokenizer=tokenizer - ) - num_samples = len(dataset) - sampler = DistributedSampler(dataset) if args.distributed and is_train else None - shuffle = is_train and not args.distributed and sampler is None - - dataloader = DataLoader( - dataset, - batch_size=args.batch_size, - shuffle=shuffle, - num_workers=args.workers, - pin_memory=True, - sampler=sampler, - drop_last=is_train, - ) - dataloader.num_samples = num_samples - dataloader.num_batches = len(dataloader) - - return DataInfo(dataloader=dataloader, sampler=sampler, data_type='image-text') - - -class SyntheticDataset(Dataset): - - def __init__(self, transform=None, image_size=(224, 224), caption="Dummy caption", dataset_size=100, tokenizer=None): - self.transform = transform - self.image_size = image_size - self.caption = caption - self.image = Image.new('RGB', image_size) - self.dataset_size = dataset_size - - self.preprocess_txt = lambda text: tokenizer(text)[0] - - def __len__(self): - return self.dataset_size - - def __getitem__(self, idx): - if self.transform is not None: - image = self.transform(self.image) - return image, self.preprocess_txt(self.caption) - - -def get_synthetic_dataset(args, preprocess_fn, is_train, epoch=0, tokenizer=None): - image_size = preprocess_fn.transforms[0].size - dataset = SyntheticDataset( - transform=preprocess_fn, image_size=image_size, dataset_size=args.train_num_samples, tokenizer=tokenizer) - num_samples = len(dataset) - sampler = DistributedSampler(dataset) if args.distributed and is_train else None - shuffle = is_train and not args.distributed and sampler is None - - dataloader = DataLoader( - dataset, - batch_size=args.batch_size, - shuffle=shuffle, - num_workers=args.workers, - pin_memory=True, - sampler=sampler, - drop_last=is_train, - ) - dataloader.num_samples = num_samples - dataloader.num_batches = len(dataloader) - - return DataInfo(dataloader=dataloader, sampler=sampler, data_type='image-text') - -class PreferenceDataset(Dataset): - def __init__(self, meta_file, image_folder, transforms, tokenizer, extra_data=(None, None)): - extra_meta, extra_folder = extra_data - self.transforms = transforms - self.tokenizer = tokenizer - self.open_image = Image.open - if image_folder.startswith('s3://'): - loader = TCSLoader() - self.open_image = loader - if meta_file is not None: - with open(meta_file, 'r') as f: - self.table = pa.Table.from_pylist(json.load(f)) - self.image_folder = image_folder - else: - # self.captions = pa.array() - self.table = [] - if extra_meta: - with open(extra_meta, 'r') as f: - meta = json.load(f) - self.files = [t['files'] for t in meta] - self.extra_captions = [t['caption'] for t in meta] - self.extra_label = [t['human_preference'] for t in meta] - self.extra_image_folder = extra_folder - else: - self.extra_captions = [] - - def __len__(self): - return len(self.table) + len(self.extra_captions) - - def __getitem__(self, idx): - try: - if idx < len(self.table): - images = [self.transforms(self.open_image(os.path.join(self.image_folder, file_names))) for file_names in self.table.column('file_path')[idx].as_py()] - if not len(set([i.size() for i in images])) == 1: - return self.__getitem__((idx + 1) % len(self)) - label = self.table.column('pap_pref')[idx].as_py() - caption = self.tokenizer(self.table.column('prompt')[idx].as_py()) - else: - idx = idx - len(self.captions) - images = [self.transforms(self.open_image(os.path.join(self.extra_image_folder, f))) for f in self.files[idx]] - label = self.extra_label[idx] - caption = self.tokenizer(self.extra_captions[idx]) - if not len(set([i.size() for i in images])) == 1: - return self.__getitem__((idx + 1) % len(self)) - else: - return images, label, caption - except: - return self.__getitem__((idx + 1) % len(self)) - -class HPDDataset(Dataset): - def __init__(self, meta_file, image_folder, transforms, tokenizer, is_train=True): - self.transforms = transforms - self.tokenizer = tokenizer - self.open_image = Image.open - self.is_train = is_train - if image_folder.startswith('s3://'): - loader = TCSLoader() - self.open_image = loader - if meta_file is not None: - with open(meta_file, 'r') as f: - self.table = pa.Table.from_pylist(json.load(f)) - self.image_folder = image_folder - else: - self.table = [] - - def __len__(self): - return len(self.table) - - def __getitem__(self, idx): - try: - if self.is_train: - images = [self.transforms(self.open_image(os.path.join(self.image_folder, file_names))) for file_names in self.table.column('file_path')[idx].as_py()] - if not len(set([i.size() for i in images])) == 1: - return self.__getitem__((idx + 1) % len(self)) - label = self.table.column('human_preference')[idx].as_py() - caption = self.tokenizer(self.table.column('prompt')[idx].as_py()) - # num_per_prompt = self.table.column('num_per_prompt')[idx].as_py() - return images, label, caption - else: - images = [self.transforms(self.open_image(os.path.join(self.image_folder, file_names))) for file_names in self.table.column('file_path')[idx].as_py()] - if not len(set([i.size() for i in images])) == 1: - return self.__getitem__((idx + 1) % len(self)) - label = self.table.column('human_preference')[idx].as_py() - caption = self.tokenizer(self.table.column('prompt')[idx].as_py()) - return images, label, caption - except: - return self.__getitem__((idx + 1) % len(self)) - - -class RatingDataset(Dataset): - def __init__(self, meta_file, image_folder, transforms): - self.transforms = transforms - self.image_folder = image_folder - self.open_image = Image.open - self.max_size = 224 - if image_folder.startswith('s3://'): - loader = TCSLoader() - self.open_image = loader - with open(meta_file, 'r') as f: - self.table = pa.Table.from_pylist(json.load(f)) - - - def __len__(self): - return len(self.table) - - def __getitem__(self, idx): - try: - images = self.transforms(self.open_image(os.path.join(self.image_folder, self.table.column('path')[idx].as_py()))) - img_weight, img_height = images.shape[1:] - if img_weight != self.max_size or img_height != self.max_size: - return self.__getitem__((idx + 10) % len(self)) - label = self.table.column('rating')[idx].as_py() - return images, label - except: - return self.__getitem__((idx + 1) % len(self)) - -class RankingDataset(Dataset): - def __init__(self, meta_file, image_folder, transforms, tokenizer): - self.transforms = transforms - self.image_folder = image_folder - self.open_image = Image.open - if image_folder.startswith('s3://'): - loader = TCSLoader() - self.open_image = loader - self.tokenizer = tokenizer - - with open(meta_file, 'r') as f: - self.table = pa.Table.from_pylist(json.load(f)) - - - def __len__(self): - return len(self.table) - - def __getitem__(self, idx): - try: - images = [self.transforms(self.open_image(os.path.join(self.image_folder, file_names))) for file_names in self.table.column('image_path')[idx].as_py()] - label = self.table.column('rank')[idx].as_py() - caption = self.tokenizer(self.table.column('prompt')[idx].as_py()) - return images, label, caption - except: - return self.__getitem__((idx + 1) % len(self)) - -class RegionDataset(Dataset): - def __init__(self, meta_file, image_folder, transforms): - self.transforms = transforms - self.image_folder = image_folder - self.open_image = Image.open - - with open(meta_file,'r') as f: - self.table = pa.Table.from_pylist(json.load(f)) - - - - def __len__(self): - return len(self.table) - - def __getitem__(self, idx): - try: - img = self.open_image(os.path.join(self.image_folder, self.table.column('image_path')[idx].as_py())) - mask = self.open_image(os.path.join(self.image_folder, self.table.column('mask_path')[idx].as_py())) - img.putalpha(mask) - masked_image = self.transforms(img) - image = masked_image[:3] - mask = masked_image[3] - return image, mask - except: - return self.__getitem__((idx + 1) % len(self)) - -class ImageRewardDataset(Dataset): - def __init__(self, meta_file, image_folder,transforms, tokenizer): - self.transforms = transforms - self.image_folder = image_folder - self.open_image = Image.open - self.tokenizer = tokenizer - - with open(meta_file, 'r') as f: - self.table = pa.Table.from_pylist(json.load(f)) - - def __len__(self): - return len(self.table) - - def __getitem__(self, idx): - - images = [self.transforms(self.open_image(os.path.join(self.image_folder, file_names))) for file_names in self.table.column('generations')[idx].as_py()] - label = self.table.column('ranking')[idx].as_py() - caption = self.tokenizer(self.table.column('prompt')[idx].as_py()) - return images, label, caption - - -def set_env_vars(something): - os.environ['http_proxy'] = '' - os.environ['https_proxy'] = '' - -def collate_rating(batch): - images = [sample[0] for sample in batch] - labels = torch.tensor([sample[1] for sample in batch]) - images = torch.stack(images) - return images, labels - -def get_rating_dataset(args, preprocess_fn, is_train, epoch=0, tokenizer=None): - # only training data - assert is_train - dataset = RatingDataset(meta_file=args.train_data, - image_folder=args.train_folder, - transforms=preprocess_fn) - num_samples = len(dataset) - sampler = TrainingSampler(dataset) if args.distributed else None - shuffle = is_train and not args.distributed - - dataloader = DataLoader( - dataset, - batch_size=args.batch_size, - shuffle=shuffle, - num_workers=args.workers, - pin_memory=True, - sampler=sampler, - drop_last=is_train, - collate_fn=collate_rating, - worker_init_fn=set_env_vars, - persistent_workers=True, - ) - dataloader.num_samples = num_samples - dataloader.num_batches = len(dataloader) - - return DataInfo(dataloader=dataloader, sampler=sampler, data_type='rating') - -def collate_pref(batch): - images = [torch.stack(sample[0]) for sample in batch] - num_images = torch.tensor([g.size(0) for g in images]) - labels = torch.tensor([sample[1] for sample in batch]) - captions = torch.cat([sample[2] for sample in batch]) - images = torch.cat(images) - return images, num_images, labels, captions - -def get_preference_dataset(args, preprocess_fn, is_train, epoch=0, tokenizer=None, extra_val=False): - if is_train: - extra_data = (args.extra_train_data, args.extra_train_folder) - dataset = PreferenceDataset(meta_file=args.train_data if is_train else args.val_data, - image_folder=args.train_folder if is_train else args.val_folder, - transforms=preprocess_fn, tokenizer=tokenizer, extra_data=extra_data) - else: - if extra_val: - dataset = PreferenceDataset(meta_file=None, - image_folder=None, - transforms=preprocess_fn, tokenizer=tokenizer, extra_data=(args.extra_val_data, args.extra_val_folder)) - else: - dataset = PreferenceDataset(meta_file=args.val_data, - image_folder=args.val_folder, - transforms=preprocess_fn, tokenizer=tokenizer) - - num_samples = len(dataset) - sampler = TrainingSampler(dataset) if args.distributed and is_train else None - shuffle = is_train and not args.distributed and sampler is None - - dataloader = DataLoader( - dataset, - batch_size=args.batch_size, - shuffle=shuffle, - num_workers=args.workers, - pin_memory=True, - sampler=sampler, - drop_last=is_train, - collate_fn=collate_pref, - worker_init_fn=set_env_vars, - persistent_workers=True, - ) - dataloader.num_samples = num_samples - dataloader.num_batches = len(dataloader) - - return DataInfo(dataloader=dataloader, sampler=sampler, data_type='preference') - -def collate_HPD(batch): - image_1 = torch.stack([sample[0][0] for sample in batch]) - image_2 = torch.stack([sample[0][1] for sample in batch]) - label_1 = torch.tensor([sample[1][0] for sample in batch]) - label_2 = torch.tensor([sample[1][1] for sample in batch]) - labels = torch.cat([label_1, label_2], dim=0) - captions = torch.cat([sample[2] for sample in batch]) - images = torch.cat([image_1, image_2]) - return images, labels, captions - -def get_HPD_dataset(args, preprocess_fn, is_train, epoch=0, tokenizer=None): - dataset = HPDDataset(meta_file=args.train_data if is_train else args.val_data, - image_folder=args.train_folder if is_train else args.val_folder, - transforms=preprocess_fn, tokenizer=tokenizer, is_train=is_train) - - num_samples = len(dataset) - sampler = TrainingSampler(dataset) if args.distributed and is_train else None - shuffle = is_train and not args.distributed and sampler is None - - dataloader = DataLoader( - dataset, - batch_size=args.batch_size, - shuffle=shuffle, - num_workers=args.workers, - pin_memory=True, - sampler=sampler, - drop_last=is_train, - collate_fn=collate_HPD if is_train else collate_pref, - worker_init_fn=set_env_vars, - persistent_workers=True, - ) - dataloader.num_samples = num_samples - dataloader.num_batches = len(dataloader) - - return DataInfo(dataloader=dataloader, sampler=sampler, data_type='HPD' if is_train else 'preference') - -def get_ranking_dataset(args, preprocess_fn, is_train, epoch=0, tokenizer=None): - if is_train: - dataset = RankingDataset(meta_file=args.train_data, - image_folder=args.train_folder, transforms=preprocess_fn, tokenizer=tokenizer) - else: - dataset = RankingDataset(meta_file=args.val_data, - image_folder=args.val_folder, transforms=preprocess_fn, tokenizer=tokenizer) - - num_samples = len(dataset) - sampler = TrainingSampler(dataset) if args.distributed and is_train else None - shuffle = is_train and not args.distributed and sampler is None - - dataloader = DataLoader( - dataset, - batch_size=args.batch_size, - shuffle=shuffle, - num_workers=args.workers, - pin_memory=True, - sampler=sampler, - drop_last=is_train, - collate_fn=collate_rank, - ) - dataloader.num_samples = num_samples - dataloader.num_batches = len(dataloader) - return DataInfo(dataloader=dataloader, sampler=sampler, data_type='ranking') - -def get_regional_dataset(args, preprocess_fn, is_train, epoch=0, tokenizer=None): - if is_train: - dataset = RegionDataset( - meta_file=args.train_data, - image_folder=args.train_folder, - transforms=preprocess_fn - ) - else: - dataset = RegionDataset( - meta_file=args.val_data, - image_folder=args.val_folder, - transforms=preprocess_fn - ) - num_samples = len(dataset) - sampler = TrainingSampler(dataset) if args.distributed else None - shuffle = is_train and not args.distributed - - dataloader = DataLoader( - dataset, - batch_size=args.batch_size, - shuffle=shuffle, - num_workers=args.workers, - pin_memory=True, - sampler=sampler, - drop_last=is_train, - worker_init_fn=set_env_vars, - persistent_workers=True, - ) - dataloader.num_samples = num_samples - dataloader.num_batches = len(dataloader) - - return DataInfo(dataloader=dataloader, sampler=sampler, data_type='regional') - -def collate_rank(batch): - images = [torch.stack(sample[0]) for sample in batch] - num_images = torch.tensor([g.size(0) for g in images]) - labels = [torch.tensor(sample[1]) for sample in batch] - captions = torch.cat([sample[2] for sample in batch]) - images = torch.cat(images) - labels = torch.cat(labels) - return images, num_images, labels, captions - -def get_imagereward_dataset(args, preprocess_fn, is_train, epoch=0, tokenizer=None): - #only support evaluation - if not is_train: - dataset = ImageRewardDataset( - meta_file=args.val_data, - image_folder = args.val_folder, - transforms=preprocess_fn, - tokenizer=tokenizer - ) - num_samples = len(dataset) - sampler = TrainingSampler(dataset) if args.distributed and is_train else None - shuffle = is_train and not args.distributed - - dataloader = DataLoader( - dataset, - batch_size=args.batch_size, - shuffle=shuffle, - num_workers=args.workers, - pin_memory=True, - sampler=sampler, - drop_last=is_train, - worker_init_fn=set_env_vars, - collate_fn=collate_rank, - persistent_workers=True, - ) - dataloader.num_samples = num_samples - dataloader.num_batches = len(dataloader) - - return DataInfo(dataloader=dataloader, sampler=sampler, data_type='ImageReward') - -def get_dataset_fn(data_path, dataset_type): - if dataset_type == "webdataset": - return get_wds_dataset - elif dataset_type == "csv": - return get_csv_dataset - elif dataset_type == "synthetic": - return get_synthetic_dataset - elif dataset_type == "auto": - ext = data_path.split('.')[-1] - if ext in ['csv', 'tsv']: - return get_csv_dataset - elif ext in ['tar']: - return get_wds_dataset - else: - raise ValueError( - f"Tried to figure out dataset type, but failed for extension {ext}.") - elif dataset_type == "preference": - return get_preference_dataset - elif dataset_type == "rating": - return get_rating_dataset - elif dataset_type == 'ranking': - return get_ranking_dataset - elif dataset_type == 'regional': - return get_regional_dataset - elif dataset_type == 'ImageReward': - return get_imagereward_dataset - elif dataset_type == "HPD": - return get_HPD_dataset - else: - raise ValueError(f"Unsupported dataset type: {dataset_type}") - - -def get_data(args, preprocess_fns, epoch=0, tokenizer=None): - preprocess_train, preprocess_val = preprocess_fns - data = {} - - if args.train_data or args.dataset_type == "synthetic": - assert len(args.train_data) == len(args.dataset_type) == len(args.batch_size) == len(args.workers) == len(args.train_folder) == len(args.train_data_sample_ratio) == len(args.ignore_in_train) - for train_data, dataset_type, batch_size, workers, train_folder, train_data_sample_ratio, ignore in zip(args.train_data, args.dataset_type, args.batch_size, args.workers, args.train_folder, args.train_data_sample_ratio, args.ignore_in_train): - if ignore: - continue - if 'train' not in data: - data['train'] = [] - new_args = copy.deepcopy(args) - new_args.train_data = train_data - new_args.dataset_type = dataset_type - new_args.batch_size = batch_size - new_args.workers = workers - new_args.train_folder = train_folder - new_args.train_data_sample_ratio = train_data_sample_ratio - dataset = get_dataset_fn(new_args.train_data, new_args.dataset_type)( - new_args, preprocess_train, is_train=True, epoch=epoch, tokenizer=tokenizer) - data['train'].append(dataset) - - if args.val_data[0]: - assert len(args.val_data) == len(args.dataset_type) == len(args.batch_size) == len(args.workers) == len(args.val_folder) == len(args.ignore_in_val) - # data['val'] = [] - for val_data, dataset_type, batch_size, workers, val_folder ,ignore in zip(args.val_data, args.dataset_type, args.batch_size, args.workers, args.val_folder, args.ignore_in_val): - if ignore: - continue - if 'val' not in data: - data['val'] = [] - new_args = copy.deepcopy(args) - new_args.val_data = val_data - new_args.dataset_type = dataset_type - new_args.batch_size = batch_size - new_args.workers = workers - new_args.val_folder = val_folder - dataset = get_dataset_fn(new_args.val_data, new_args.dataset_type)( - new_args, preprocess_val, is_train=False, tokenizer=tokenizer) - data['val'].append(dataset) - - if args.extra_val_data: - assert False - data["extra_val"] = get_dataset_fn(args.val_data, args.dataset_type)( - args, preprocess_val, is_train=False, tokenizer=tokenizer, extra_val=True) - - if args.imagenet_val is not None: - data["imagenet-val"] = get_imagenet(args, preprocess_fns, "val") - - if args.imagenet_v2 is not None: - data["imagenet-v2"] = get_imagenet(args, preprocess_fns, "v2") - - return data diff --git a/spaces/ybelkada/interfacegan_pp/models/pggan_tf_official/metrics/sliced_wasserstein.py b/spaces/ybelkada/interfacegan_pp/models/pggan_tf_official/metrics/sliced_wasserstein.py deleted file mode 100644 index 0028897c3aeffe7eb8f63eb4b1f37c2329dc84cf..0000000000000000000000000000000000000000 --- a/spaces/ybelkada/interfacegan_pp/models/pggan_tf_official/metrics/sliced_wasserstein.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. -# -# This work is licensed under the Creative Commons Attribution-NonCommercial -# 4.0 International License. To view a copy of this license, visit -# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to -# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. - -import numpy as np -import scipy.ndimage - -#---------------------------------------------------------------------------- - -def get_descriptors_for_minibatch(minibatch, nhood_size, nhoods_per_image): - S = minibatch.shape # (minibatch, channel, height, width) - assert len(S) == 4 and S[1] == 3 - N = nhoods_per_image * S[0] - H = nhood_size // 2 - nhood, chan, x, y = np.ogrid[0:N, 0:3, -H:H+1, -H:H+1] - img = nhood // nhoods_per_image - x = x + np.random.randint(H, S[3] - H, size=(N, 1, 1, 1)) - y = y + np.random.randint(H, S[2] - H, size=(N, 1, 1, 1)) - idx = ((img * S[1] + chan) * S[2] + y) * S[3] + x - return minibatch.flat[idx] - -#---------------------------------------------------------------------------- - -def finalize_descriptors(desc): - if isinstance(desc, list): - desc = np.concatenate(desc, axis=0) - assert desc.ndim == 4 # (neighborhood, channel, height, width) - desc -= np.mean(desc, axis=(0, 2, 3), keepdims=True) - desc /= np.std(desc, axis=(0, 2, 3), keepdims=True) - desc = desc.reshape(desc.shape[0], -1) - return desc - -#---------------------------------------------------------------------------- - -def sliced_wasserstein(A, B, dir_repeats, dirs_per_repeat): - assert A.ndim == 2 and A.shape == B.shape # (neighborhood, descriptor_component) - results = [] - for repeat in range(dir_repeats): - dirs = np.random.randn(A.shape[1], dirs_per_repeat) # (descriptor_component, direction) - dirs /= np.sqrt(np.sum(np.square(dirs), axis=0, keepdims=True)) # normalize descriptor components for each direction - dirs = dirs.astype(np.float32) - projA = np.matmul(A, dirs) # (neighborhood, direction) - projB = np.matmul(B, dirs) - projA = np.sort(projA, axis=0) # sort neighborhood projections for each direction - projB = np.sort(projB, axis=0) - dists = np.abs(projA - projB) # pointwise wasserstein distances - results.append(np.mean(dists)) # average over neighborhoods and directions - return np.mean(results) # average over repeats - -#---------------------------------------------------------------------------- - -def downscale_minibatch(minibatch, lod): - if lod == 0: - return minibatch - t = minibatch.astype(np.float32) - for i in range(lod): - t = (t[:, :, 0::2, 0::2] + t[:, :, 0::2, 1::2] + t[:, :, 1::2, 0::2] + t[:, :, 1::2, 1::2]) * 0.25 - return np.round(t).clip(0, 255).astype(np.uint8) - -#---------------------------------------------------------------------------- - -gaussian_filter = np.float32([ - [1, 4, 6, 4, 1], - [4, 16, 24, 16, 4], - [6, 24, 36, 24, 6], - [4, 16, 24, 16, 4], - [1, 4, 6, 4, 1]]) / 256.0 - -def pyr_down(minibatch): # matches cv2.pyrDown() - assert minibatch.ndim == 4 - return scipy.ndimage.convolve(minibatch, gaussian_filter[np.newaxis, np.newaxis, :, :], mode='mirror')[:, :, ::2, ::2] - -def pyr_up(minibatch): # matches cv2.pyrUp() - assert minibatch.ndim == 4 - S = minibatch.shape - res = np.zeros((S[0], S[1], S[2] * 2, S[3] * 2), minibatch.dtype) - res[:, :, ::2, ::2] = minibatch - return scipy.ndimage.convolve(res, gaussian_filter[np.newaxis, np.newaxis, :, :] * 4.0, mode='mirror') - -def generate_laplacian_pyramid(minibatch, num_levels): - pyramid = [np.float32(minibatch)] - for i in range(1, num_levels): - pyramid.append(pyr_down(pyramid[-1])) - pyramid[-2] -= pyr_up(pyramid[-1]) - return pyramid - -def reconstruct_laplacian_pyramid(pyramid): - minibatch = pyramid[-1] - for level in pyramid[-2::-1]: - minibatch = pyr_up(minibatch) + level - return minibatch - -#---------------------------------------------------------------------------- - -class API: - def __init__(self, num_images, image_shape, image_dtype, minibatch_size): - self.nhood_size = 7 - self.nhoods_per_image = 128 - self.dir_repeats = 4 - self.dirs_per_repeat = 128 - self.resolutions = [] - res = image_shape[1] - while res >= 16: - self.resolutions.append(res) - res //= 2 - - def get_metric_names(self): - return ['SWDx1e3_%d' % res for res in self.resolutions] + ['SWDx1e3_avg'] - - def get_metric_formatting(self): - return ['%-13.4f'] * len(self.get_metric_names()) - - def begin(self, mode): - assert mode in ['warmup', 'reals', 'fakes'] - self.descriptors = [[] for res in self.resolutions] - - def feed(self, mode, minibatch): - for lod, level in enumerate(generate_laplacian_pyramid(minibatch, len(self.resolutions))): - desc = get_descriptors_for_minibatch(level, self.nhood_size, self.nhoods_per_image) - self.descriptors[lod].append(desc) - - def end(self, mode): - desc = [finalize_descriptors(d) for d in self.descriptors] - del self.descriptors - if mode in ['warmup', 'reals']: - self.desc_real = desc - dist = [sliced_wasserstein(dreal, dfake, self.dir_repeats, self.dirs_per_repeat) for dreal, dfake in zip(self.desc_real, desc)] - del desc - dist = [d * 1e3 for d in dist] # multiply by 10^3 - return dist + [np.mean(dist)] - -#---------------------------------------------------------------------------- diff --git a/spaces/yenniejun/tokenizers-languages/README.md b/spaces/yenniejun/tokenizers-languages/README.md deleted file mode 100644 index ed78b7fa4a277b614563ee962f24dc155e6f06cf..0000000000000000000000000000000000000000 --- a/spaces/yenniejun/tokenizers-languages/README.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: Tokenizers Languages -emoji: 🐠 -colorFrom: pink -colorTo: green -sdk: streamlit -sdk_version: 1.19.0 -app_file: app.py -pinned: false -license: cc ---- - - -# LLM Tokenizers in Multiple Languages - -This is the repo for the [HuggingFace Space](https://huggingface.co/spaces/yenniejun/tokenizers-languages) corresponding with the article, [All languages are NOT created (tokenized) equal](https://www.artfish.ai/p/all-languages-are-not-created-tokenized). - - -![Screenshot of the corresponding HuggingFace Space](img/screenshot_huggingface_space.png) - -The Space explores token length for various LLM tokenizers on many different languages. - -### Introduction to the project -Large language models such as ChatGPT process and generate text sequences by first splitting the text into smaller units called tokens. This process of tokenization is not uniform across languages, leading to disparities in the number of tokens produced for equivalent expressions in different languages. For example, a sentence in Burmese or Amharic may require 10x more tokens than a similar message in English. - -### Dataset -[MASSIVE](https://arxiv.org/abs/2204.08582) is a parallel dataset [introduced by Amazon](https://github.com/alexa/massive) consisting of 1 million realistic, parallel short texts translated across 52 languages and 18 domains. I used the dev split of the dataset, which consists of 2033 texts translated into each of the languages. The [dataset is available on HuggingFace](https://huggingface.co/datasets/AmazonScience/massive) and is licensed under the CC BY 4.0 license. - - -![Word cloud of the word "hey" translated into 51 languages, from the Massive dataset](img/word_cloud_massive.png) - - diff --git a/spaces/yeqingmei123/face-test/e4e/datasets/__init__.py b/spaces/yeqingmei123/face-test/e4e/datasets/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/tools/plain_train_net.py b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/tools/plain_train_net.py deleted file mode 100644 index 4851a8398e128bdce1986feccf0f1cca4a12f704..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/tools/plain_train_net.py +++ /dev/null @@ -1,223 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) Facebook, Inc. and its affiliates. -""" -Detectron2 training script with a plain training loop. - -This script reads a given config file and runs the training or evaluation. -It is an entry point that is able to train standard models in detectron2. - -In order to let one script support training of many models, -this script contains logic that are specific to these built-in models and therefore -may not be suitable for your own project. -For example, your research project perhaps only needs a single "evaluator". - -Therefore, we recommend you to use detectron2 as a library and take -this file as an example of how to use the library. -You may want to write your own script with your datasets and other customizations. - -Compared to "train_net.py", this script supports fewer default features. -It also includes fewer abstraction, therefore is easier to add custom logic. -""" - -import logging -import os -from collections import OrderedDict -import torch -from torch.nn.parallel import DistributedDataParallel - -import detectron2.utils.comm as comm -from detectron2.checkpoint import DetectionCheckpointer, PeriodicCheckpointer -from detectron2.config import get_cfg -from detectron2.data import ( - MetadataCatalog, - build_detection_test_loader, - build_detection_train_loader, -) -from detectron2.engine import default_argument_parser, default_setup, default_writers, launch -from detectron2.evaluation import ( - CityscapesInstanceEvaluator, - CityscapesSemSegEvaluator, - COCOEvaluator, - COCOPanopticEvaluator, - DatasetEvaluators, - LVISEvaluator, - PascalVOCDetectionEvaluator, - SemSegEvaluator, - inference_on_dataset, - print_csv_format, -) -from detectron2.modeling import build_model -from detectron2.solver import build_lr_scheduler, build_optimizer -from detectron2.utils.events import EventStorage - -logger = logging.getLogger("detectron2") - - -def get_evaluator(cfg, dataset_name, output_folder=None): - """ - Create evaluator(s) for a given dataset. - This uses the special metadata "evaluator_type" associated with each builtin dataset. - For your own dataset, you can simply create an evaluator manually in your - script and do not have to worry about the hacky if-else logic here. - """ - if output_folder is None: - output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") - evaluator_list = [] - evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type - if evaluator_type in ["sem_seg", "coco_panoptic_seg"]: - evaluator_list.append( - SemSegEvaluator( - dataset_name, - distributed=True, - output_dir=output_folder, - ) - ) - if evaluator_type in ["coco", "coco_panoptic_seg"]: - evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) - if evaluator_type == "coco_panoptic_seg": - evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) - if evaluator_type == "cityscapes_instance": - assert ( - torch.cuda.device_count() > comm.get_rank() - ), "CityscapesEvaluator currently do not work with multiple machines." - return CityscapesInstanceEvaluator(dataset_name) - if evaluator_type == "cityscapes_sem_seg": - assert ( - torch.cuda.device_count() > comm.get_rank() - ), "CityscapesEvaluator currently do not work with multiple machines." - return CityscapesSemSegEvaluator(dataset_name) - if evaluator_type == "pascal_voc": - return PascalVOCDetectionEvaluator(dataset_name) - if evaluator_type == "lvis": - return LVISEvaluator(dataset_name, cfg, True, output_folder) - if len(evaluator_list) == 0: - raise NotImplementedError( - "no Evaluator for the dataset {} with the type {}".format(dataset_name, evaluator_type) - ) - if len(evaluator_list) == 1: - return evaluator_list[0] - return DatasetEvaluators(evaluator_list) - - -def do_test(cfg, model): - results = OrderedDict() - for dataset_name in cfg.DATASETS.TEST: - data_loader = build_detection_test_loader(cfg, dataset_name) - evaluator = get_evaluator( - cfg, dataset_name, os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name) - ) - results_i = inference_on_dataset(model, data_loader, evaluator) - results[dataset_name] = results_i - if comm.is_main_process(): - logger.info("Evaluation results for {} in csv format:".format(dataset_name)) - print_csv_format(results_i) - if len(results) == 1: - results = list(results.values())[0] - return results - - -def do_train(cfg, model, resume=False): - model.train() - optimizer = build_optimizer(cfg, model) - scheduler = build_lr_scheduler(cfg, optimizer) - - checkpointer = DetectionCheckpointer( - model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler - ) - start_iter = ( - checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1 - ) - max_iter = cfg.SOLVER.MAX_ITER - - periodic_checkpointer = PeriodicCheckpointer( - checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter - ) - - writers = default_writers(cfg.OUTPUT_DIR, max_iter) if comm.is_main_process() else [] - - # compared to "train_net.py", we do not support accurate timing and - # precise BN here, because they are not trivial to implement in a small training loop - data_loader = build_detection_train_loader(cfg) - logger.info("Starting training from iteration {}".format(start_iter)) - with EventStorage(start_iter) as storage: - for data, iteration in zip(data_loader, range(start_iter, max_iter)): - storage.iter = iteration - - loss_dict = model(data) - losses = sum(loss_dict.values()) - assert torch.isfinite(losses).all(), loss_dict - - loss_dict_reduced = {k: v.item() for k, v in comm.reduce_dict(loss_dict).items()} - losses_reduced = sum(loss for loss in loss_dict_reduced.values()) - if comm.is_main_process(): - storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced) - - optimizer.zero_grad() - losses.backward() - optimizer.step() - storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False) - scheduler.step() - - if ( - cfg.TEST.EVAL_PERIOD > 0 - and (iteration + 1) % cfg.TEST.EVAL_PERIOD == 0 - and iteration != max_iter - 1 - ): - do_test(cfg, model) - # Compared to "train_net.py", the test results are not dumped to EventStorage - comm.synchronize() - - if iteration - start_iter > 5 and ( - (iteration + 1) % 20 == 0 or iteration == max_iter - 1 - ): - for writer in writers: - writer.write() - periodic_checkpointer.step(iteration) - - -def setup(args): - """ - Create configs and perform basic setups. - """ - cfg = get_cfg() - cfg.merge_from_file(args.config_file) - cfg.merge_from_list(args.opts) - cfg.freeze() - default_setup( - cfg, args - ) # if you don't like any of the default setup, write your own setup code - return cfg - - -def main(args): - cfg = setup(args) - - model = build_model(cfg) - logger.info("Model:\n{}".format(model)) - if args.eval_only: - DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( - cfg.MODEL.WEIGHTS, resume=args.resume - ) - return do_test(cfg, model) - - distributed = comm.get_world_size() > 1 - if distributed: - model = DistributedDataParallel( - model, device_ids=[comm.get_local_rank()], broadcast_buffers=False - ) - - do_train(cfg, model, resume=args.resume) - return do_test(cfg, model) - - -if __name__ == "__main__": - args = default_argument_parser().parse_args() - print("Command Line Args:", args) - launch( - main, - args.num_gpus, - num_machines=args.num_machines, - machine_rank=args.machine_rank, - dist_url=args.dist_url, - args=(args,), - ) diff --git a/spaces/yousuf-e/yousuf-space-1/README.md b/spaces/yousuf-e/yousuf-space-1/README.md deleted file mode 100644 index c7ab71c4628df2cd75631d1df02456e675689e70..0000000000000000000000000000000000000000 --- a/spaces/yousuf-e/yousuf-space-1/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: AutoTrain Advanced -emoji: 🚀 -colorFrom: blue -colorTo: green -sdk: docker -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/yunfei0710/gpt-academic/docs/use_azure.md b/spaces/yunfei0710/gpt-academic/docs/use_azure.md deleted file mode 100644 index f1c27ef3eab1ac393e37efa2dec34e6733bc070d..0000000000000000000000000000000000000000 --- a/spaces/yunfei0710/gpt-academic/docs/use_azure.md +++ /dev/null @@ -1,152 +0,0 @@ -# 通过微软Azure云服务申请 Openai API - -由于Openai和微软的关系,现在是可以通过微软的Azure云计算服务直接访问openai的api,免去了注册和网络的问题。 - -快速入门的官方文档的链接是:[快速入门 - 开始通过 Azure OpenAI 服务使用 ChatGPT 和 GPT-4 - Azure OpenAI Service | Microsoft Learn](https://learn.microsoft.com/zh-cn/azure/cognitive-services/openai/chatgpt-quickstart?pivots=programming-language-python) - -# 申请API - -按文档中的“先决条件”的介绍,出了编程的环境以外,还需要以下三个条件: - -1.  Azure账号并创建订阅 - -2.  为订阅添加Azure OpenAI 服务 - -3.  部署模型 - -## Azure账号并创建订阅 - -### Azure账号 - -创建Azure的账号时最好是有微软的账号,这样似乎更容易获得免费额度(第一个月的200美元,实测了一下,如果用一个刚注册的微软账号登录Azure的话,并没有这一个月的免费额度)。 - -创建Azure账号的网址是:[立即创建 Azure 免费帐户 | Microsoft Azure](https://azure.microsoft.com/zh-cn/free/) - -![](https://wdcdn.qpic.cn/MTY4ODg1Mjk4NzI5NTU1NQ_944786_iH6AECuZ_tY0EaBd_1685327219?w=1327\&h=695\&type=image/png) - -打开网页后,点击 “免费开始使用” 会跳转到登录或注册页面,如果有微软的账户,直接登录即可,如果没有微软账户,那就需要到微软的网页再另行注册一个。 - -注意,Azure的页面和政策时不时会变化,已实际最新显示的为准就好。 - -### 创建订阅 - -注册好Azure后便可进入主页: - -![](https://wdcdn.qpic.cn/MTY4ODg1Mjk4NzI5NTU1NQ_444847_tk-9S-pxOYuaLs_K_1685327675?w=1865\&h=969\&type=image/png) - -首先需要在订阅里进行添加操作,点开后即可进入订阅的页面: - -![](https://wdcdn.qpic.cn/MTY4ODg1Mjk4NzI5NTU1NQ_612820_z_1AlaEgnJR-rUl0_1685327892?w=1865\&h=969\&type=image/png) - -第一次进来应该是空的,点添加即可创建新的订阅(可以是“免费”或者“即付即用”的订阅),其中订阅ID是后面申请Azure OpenAI需要使用的。 - -## 为订阅添加Azure OpenAI服务 - -之后回到首页,点Azure OpenAI即可进入OpenAI服务的页面(如果不显示的话,则在首页上方的搜索栏里搜索“openai”即可)。 - -![](https://wdcdn.qpic.cn/MTY4ODg1Mjk4NzI5NTU1NQ_269759_nExkGcPC0EuAR5cp_1685328130?w=1865\&h=969\&type=image/png) - -不过现在这个服务还不能用。在使用前,还需要在这个网址申请一下: - -[Request Access to Azure OpenAI Service (microsoft.com)](https://customervoice.microsoft.com/Pages/ResponsePage.aspx?id=v4j5cvGGr0GRqy180BHbR7en2Ais5pxKtso_Pz4b1_xUOFA5Qk1UWDRBMjg0WFhPMkIzTzhKQ1dWNyQlQCN0PWcu) - -这里有二十来个问题,按照要求和自己的实际情况填写即可。 - -其中需要注意的是 - -1.  千万记得填对"订阅ID" - -2.  需要填一个公司邮箱(可以不是注册用的邮箱)和公司网址 - -之后,在回到上面那个页面,点创建,就会进入创建页面了: - -![](https://wdcdn.qpic.cn/MTY4ODg1Mjk4NzI5NTU1NQ_72708_9d9JYhylPVz3dFWL_1685328372?w=824\&h=590\&type=image/png) - -需要填入“资源组”和“名称”,按照自己的需要填入即可。 - -完成后,在主页的“资源”里就可以看到刚才创建的“资源”了,点击进入后,就可以进行最后的部署了。 - -![](https://wdcdn.qpic.cn/MTY4ODg1Mjk4NzI5NTU1NQ_871541_CGCnbgtV9Uk1Jccy_1685329861?w=1217\&h=628\&type=image/png) - -## 部署模型 - -进入资源页面后,在部署模型前,可以先点击“开发”,把密钥和终结点记下来。 - -![](https://wdcdn.qpic.cn/MTY4ODg1Mjk4NzI5NTU1NQ_852567_dxCZOrkMlWDSLH0d_1685330736?w=856\&h=568\&type=image/png) - -之后,就可以去部署模型了,点击“部署”即可,会跳转到 Azure OpenAI Stuido 进行下面的操作: - -![](https://wdcdn.qpic.cn/MTY4ODg1Mjk4NzI5NTU1NQ_169225_uWs1gMhpNbnwW4h2_1685329901?w=1865\&h=969\&type=image/png) - -进入 Azure OpenAi Studio 后,点击新建部署,会弹出如下对话框: - -![](https://wdcdn.qpic.cn/MTY4ODg1Mjk4NzI5NTU1NQ_391255_iXUSZAzoud5qlxjJ_1685330224?w=656\&h=641\&type=image/png) - -在这里选 gpt-35-turbo 或需要的模型并按需要填入“部署名”即可完成模型的部署。 - -![](https://wdcdn.qpic.cn/MTY4ODg1Mjk4NzI5NTU1NQ_724099_vBaHcUilsm1EtPgK_1685330396?w=1869\&h=482\&type=image/png) - -这个部署名需要记下来。 - -到现在为止,申请操作就完成了,需要记下来的有下面几个东西: - -● 密钥(1或2都可以) - -● 终结点 - -● 部署名(不是模型名) - -# 修改 config.py - -``` -AZURE_ENDPOINT = "填入终结点" -AZURE_API_KEY = "填入azure openai api的密钥" -AZURE_API_VERSION = "2023-05-15" # 默认使用 2023-05-15 版本,无需修改 -AZURE_ENGINE = "填入部署名" - -``` -# API的使用 - -接下来就是具体怎么使用API了,还是可以参考官方文档:[快速入门 - 开始通过 Azure OpenAI 服务使用 ChatGPT 和 GPT-4 - Azure OpenAI Service | Microsoft Learn](https://learn.microsoft.com/zh-cn/azure/cognitive-services/openai/chatgpt-quickstart?pivots=programming-language-python) - -和openai自己的api调用有点类似,都需要安装openai库,不同的是调用方式 - -``` -import openai -openai.api_type = "azure" #固定格式,无需修改 -openai.api_base = os.getenv("AZURE_OPENAI_ENDPOINT") #这里填入“终结点” -openai.api_version = "2023-05-15" #固定格式,无需修改 -openai.api_key = os.getenv("AZURE_OPENAI_KEY") #这里填入“密钥1”或“密钥2” - -response = openai.ChatCompletion.create( - engine="gpt-35-turbo", #这里填入的不是模型名,是部署名 - messages=[ - {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": "Does Azure OpenAI support customer managed keys?"}, - {"role": "assistant", "content": "Yes, customer managed keys are supported by Azure OpenAI."}, - {"role": "user", "content": "Do other Azure Cognitive Services support this too?"} - ] -) - -print(response) -print(response['choices'][0]['message']['content']) - -``` - -需要注意的是: - -1.  engine那里填入的是部署名,不是模型名 - -2.  通过openai库获得的这个 response 和通过 request 库访问 url 获得的 response 不同,不需要 decode,已经是解析好的 json 了,直接根据键值读取即可。 - -更细节的使用方法,详见官方API文档。 - -# 关于费用 - -Azure OpenAI API 还是需要一些费用的(免费订阅只有1个月有效期),费用如下: - -![image.png](https://note.youdao.com/yws/res/18095/WEBRESOURCEeba0ab6d3127b79e143ef2d5627c0e44) - -具体可以可以看这个网址 :[Azure OpenAI 服务 - 定价| Microsoft Azure](https://azure.microsoft.com/zh-cn/pricing/details/cognitive-services/openai-service/?cdn=disable) - -并非网上说的什么“一年白嫖”,但注册方法以及网络问题都比直接使用openai的api要简单一些。 diff --git a/spaces/zhang-wei-jian/docker/node_modules/brace-expansion/index.js b/spaces/zhang-wei-jian/docker/node_modules/brace-expansion/index.js deleted file mode 100644 index 0478be81eabc2b140c2405999e46ba98214461eb..0000000000000000000000000000000000000000 --- a/spaces/zhang-wei-jian/docker/node_modules/brace-expansion/index.js +++ /dev/null @@ -1,201 +0,0 @@ -var concatMap = require('concat-map'); -var balanced = require('balanced-match'); - -module.exports = expandTop; - -var escSlash = '\0SLASH'+Math.random()+'\0'; -var escOpen = '\0OPEN'+Math.random()+'\0'; -var escClose = '\0CLOSE'+Math.random()+'\0'; -var escComma = '\0COMMA'+Math.random()+'\0'; -var escPeriod = '\0PERIOD'+Math.random()+'\0'; - -function numeric(str) { - return parseInt(str, 10) == str - ? parseInt(str, 10) - : str.charCodeAt(0); -} - -function escapeBraces(str) { - return str.split('\\\\').join(escSlash) - .split('\\{').join(escOpen) - .split('\\}').join(escClose) - .split('\\,').join(escComma) - .split('\\.').join(escPeriod); -} - -function unescapeBraces(str) { - return str.split(escSlash).join('\\') - .split(escOpen).join('{') - .split(escClose).join('}') - .split(escComma).join(',') - .split(escPeriod).join('.'); -} - - -// Basically just str.split(","), but handling cases -// where we have nested braced sections, which should be -// treated as individual members, like {a,{b,c},d} -function parseCommaParts(str) { - if (!str) - return ['']; - - var parts = []; - var m = balanced('{', '}', str); - - if (!m) - return str.split(','); - - var pre = m.pre; - var body = m.body; - var post = m.post; - var p = pre.split(','); - - p[p.length-1] += '{' + body + '}'; - var postParts = parseCommaParts(post); - if (post.length) { - p[p.length-1] += postParts.shift(); - p.push.apply(p, postParts); - } - - parts.push.apply(parts, p); - - return parts; -} - -function expandTop(str) { - if (!str) - return []; - - // I don't know why Bash 4.3 does this, but it does. - // Anything starting with {} will have the first two bytes preserved - // but *only* at the top level, so {},a}b will not expand to anything, - // but a{},b}c will be expanded to [a}c,abc]. - // One could argue that this is a bug in Bash, but since the goal of - // this module is to match Bash's rules, we escape a leading {} - if (str.substr(0, 2) === '{}') { - str = '\\{\\}' + str.substr(2); - } - - return expand(escapeBraces(str), true).map(unescapeBraces); -} - -function identity(e) { - return e; -} - -function embrace(str) { - return '{' + str + '}'; -} -function isPadded(el) { - return /^-?0\d/.test(el); -} - -function lte(i, y) { - return i <= y; -} -function gte(i, y) { - return i >= y; -} - -function expand(str, isTop) { - var expansions = []; - - var m = balanced('{', '}', str); - if (!m || /\$$/.test(m.pre)) return [str]; - - var isNumericSequence = /^-?\d+\.\.-?\d+(?:\.\.-?\d+)?$/.test(m.body); - var isAlphaSequence = /^[a-zA-Z]\.\.[a-zA-Z](?:\.\.-?\d+)?$/.test(m.body); - var isSequence = isNumericSequence || isAlphaSequence; - var isOptions = m.body.indexOf(',') >= 0; - if (!isSequence && !isOptions) { - // {a},b} - if (m.post.match(/,.*\}/)) { - str = m.pre + '{' + m.body + escClose + m.post; - return expand(str); - } - return [str]; - } - - var n; - if (isSequence) { - n = m.body.split(/\.\./); - } else { - n = parseCommaParts(m.body); - if (n.length === 1) { - // x{{a,b}}y ==> x{a}y x{b}y - n = expand(n[0], false).map(embrace); - if (n.length === 1) { - var post = m.post.length - ? expand(m.post, false) - : ['']; - return post.map(function(p) { - return m.pre + n[0] + p; - }); - } - } - } - - // at this point, n is the parts, and we know it's not a comma set - // with a single entry. - - // no need to expand pre, since it is guaranteed to be free of brace-sets - var pre = m.pre; - var post = m.post.length - ? expand(m.post, false) - : ['']; - - var N; - - if (isSequence) { - var x = numeric(n[0]); - var y = numeric(n[1]); - var width = Math.max(n[0].length, n[1].length) - var incr = n.length == 3 - ? Math.abs(numeric(n[2])) - : 1; - var test = lte; - var reverse = y < x; - if (reverse) { - incr *= -1; - test = gte; - } - var pad = n.some(isPadded); - - N = []; - - for (var i = x; test(i, y); i += incr) { - var c; - if (isAlphaSequence) { - c = String.fromCharCode(i); - if (c === '\\') - c = ''; - } else { - c = String(i); - if (pad) { - var need = width - c.length; - if (need > 0) { - var z = new Array(need + 1).join('0'); - if (i < 0) - c = '-' + z + c.slice(1); - else - c = z + c; - } - } - } - N.push(c); - } - } else { - N = concatMap(n, function(el) { return expand(el, false) }); - } - - for (var j = 0; j < N.length; j++) { - for (var k = 0; k < post.length; k++) { - var expansion = pre + N[j] + post[k]; - if (!isTop || isSequence || expansion) - expansions.push(expansion); - } - } - - return expansions; -} - diff --git a/spaces/zhang-wei-jian/docker/node_modules/on-finished/HISTORY.md b/spaces/zhang-wei-jian/docker/node_modules/on-finished/HISTORY.md deleted file mode 100644 index 1917595a714e11049ec9402d87aa625c68caa080..0000000000000000000000000000000000000000 --- a/spaces/zhang-wei-jian/docker/node_modules/on-finished/HISTORY.md +++ /dev/null @@ -1,98 +0,0 @@ -2.4.1 / 2022-02-22 -================== - - * Fix error on early async hooks implementations - -2.4.0 / 2022-02-21 -================== - - * Prevent loss of async hooks context - -2.3.0 / 2015-05-26 -================== - - * Add defined behavior for HTTP `CONNECT` requests - * Add defined behavior for HTTP `Upgrade` requests - * deps: ee-first@1.1.1 - -2.2.1 / 2015-04-22 -================== - - * Fix `isFinished(req)` when data buffered - -2.2.0 / 2014-12-22 -================== - - * Add message object to callback arguments - -2.1.1 / 2014-10-22 -================== - - * Fix handling of pipelined requests - -2.1.0 / 2014-08-16 -================== - - * Check if `socket` is detached - * Return `undefined` for `isFinished` if state unknown - -2.0.0 / 2014-08-16 -================== - - * Add `isFinished` function - * Move to `jshttp` organization - * Remove support for plain socket argument - * Rename to `on-finished` - * Support both `req` and `res` as arguments - * deps: ee-first@1.0.5 - -1.2.2 / 2014-06-10 -================== - - * Reduce listeners added to emitters - - avoids "event emitter leak" warnings when used multiple times on same request - -1.2.1 / 2014-06-08 -================== - - * Fix returned value when already finished - -1.2.0 / 2014-06-05 -================== - - * Call callback when called on already-finished socket - -1.1.4 / 2014-05-27 -================== - - * Support node.js 0.8 - -1.1.3 / 2014-04-30 -================== - - * Make sure errors passed as instanceof `Error` - -1.1.2 / 2014-04-18 -================== - - * Default the `socket` to passed-in object - -1.1.1 / 2014-01-16 -================== - - * Rename module to `finished` - -1.1.0 / 2013-12-25 -================== - - * Call callback when called on already-errored socket - -1.0.1 / 2013-12-20 -================== - - * Actually pass the error to the callback - -1.0.0 / 2013-12-20 -================== - - * Initial release diff --git a/spaces/zhangs2022/ChuanhuChatGPT/modules/config.py b/spaces/zhangs2022/ChuanhuChatGPT/modules/config.py deleted file mode 100644 index 2eee7730787df6a857de21dbb0cbefc42cb7273d..0000000000000000000000000000000000000000 --- a/spaces/zhangs2022/ChuanhuChatGPT/modules/config.py +++ /dev/null @@ -1,173 +0,0 @@ -from collections import defaultdict -from contextlib import contextmanager -import os -import logging -import sys -import commentjson as json - -from . import shared -from . import presets - - -__all__ = [ - "my_api_key", - "authflag", - "auth_list", - "dockerflag", - "retrieve_proxy", - "log_level", - "advance_docs", - "update_doc_config", - "multi_api_key", - "server_name", - "server_port", - "share", -] - -# 添加一个统一的config文件,避免文件过多造成的疑惑(优先级最低) -# 同时,也可以为后续支持自定义功能提供config的帮助 -if os.path.exists("config.json"): - with open("config.json", "r", encoding='utf-8') as f: - config = json.load(f) -else: - config = {} - -lang_config = config.get("language", "auto") -language = os.environ.get("LANGUAGE", lang_config) - -if os.path.exists("api_key.txt"): - logging.info("检测到api_key.txt文件,正在进行迁移...") - with open("api_key.txt", "r") as f: - config["openai_api_key"] = f.read().strip() - os.rename("api_key.txt", "api_key(deprecated).txt") - with open("config.json", "w", encoding='utf-8') as f: - json.dump(config, f, indent=4) - -if os.path.exists("auth.json"): - logging.info("检测到auth.json文件,正在进行迁移...") - auth_list = [] - with open("auth.json", "r", encoding='utf-8') as f: - auth = json.load(f) - for _ in auth: - if auth[_]["username"] and auth[_]["password"]: - auth_list.append((auth[_]["username"], auth[_]["password"])) - else: - logging.error("请检查auth.json文件中的用户名和密码!") - sys.exit(1) - config["users"] = auth_list - os.rename("auth.json", "auth(deprecated).json") - with open("config.json", "w", encoding='utf-8') as f: - json.dump(config, f, indent=4) - -## 处理docker if we are running in Docker -dockerflag = config.get("dockerflag", False) -if os.environ.get("dockerrun") == "yes": - dockerflag = True - -## 处理 api-key 以及 允许的用户列表 -my_api_key = config.get("openai_api_key", "") -my_api_key = os.environ.get("OPENAI_API_KEY", my_api_key) - -xmchat_api_key = config.get("xmchat_api_key", "") -if os.environ.get("XMCHAT_API_KEY", None) == None: - os.environ["XMCHAT_API_KEY"] = xmchat_api_key - -## 多账户机制 -multi_api_key = config.get("multi_api_key", False) # 是否开启多账户机制 -if multi_api_key: - api_key_list = config.get("api_key_list", []) - if len(api_key_list) == 0: - logging.error("多账号模式已开启,但api_key_list为空,请检查config.json") - sys.exit(1) - shared.state.set_api_key_queue(api_key_list) - -auth_list = config.get("users", []) # 实际上是使用者的列表 -authflag = len(auth_list) > 0 # 是否开启认证的状态值,改为判断auth_list长度 - -# 处理自定义的api_host,优先读环境变量的配置,如果存在则自动装配 -api_host = os.environ.get("api_host", config.get("api_host", "")) -if api_host: - shared.state.set_api_host(api_host) - -@contextmanager -def retrieve_openai_api(api_key = None): - old_api_key = os.environ.get("OPENAI_API_KEY", "") - if api_key is None: - os.environ["OPENAI_API_KEY"] = my_api_key - yield my_api_key - else: - os.environ["OPENAI_API_KEY"] = api_key - yield api_key - os.environ["OPENAI_API_KEY"] = old_api_key - -## 处理log -log_level = config.get("log_level", "INFO") -logging.basicConfig( - level=log_level, - format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s", -) - -## 处理代理: -http_proxy = config.get("http_proxy", "") -https_proxy = config.get("https_proxy", "") -http_proxy = os.environ.get("HTTP_PROXY", http_proxy) -https_proxy = os.environ.get("HTTPS_PROXY", https_proxy) - -# 重置系统变量,在不需要设置的时候不设置环境变量,以免引起全局代理报错 -os.environ["HTTP_PROXY"] = "" -os.environ["HTTPS_PROXY"] = "" - -local_embedding = config.get("local_embedding", False) # 是否使用本地embedding - -@contextmanager -def retrieve_proxy(proxy=None): - """ - 1, 如果proxy = NONE,设置环境变量,并返回最新设置的代理 - 2,如果proxy != NONE,更新当前的代理配置,但是不更新环境变量 - """ - global http_proxy, https_proxy - if proxy is not None: - http_proxy = proxy - https_proxy = proxy - yield http_proxy, https_proxy - else: - old_var = os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"] - os.environ["HTTP_PROXY"] = http_proxy - os.environ["HTTPS_PROXY"] = https_proxy - yield http_proxy, https_proxy # return new proxy - - # return old proxy - os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"] = old_var - - -## 处理advance docs -advance_docs = defaultdict(lambda: defaultdict(dict)) -advance_docs.update(config.get("advance_docs", {})) -def update_doc_config(two_column_pdf): - global advance_docs - advance_docs["pdf"]["two_column"] = two_column_pdf - - logging.info(f"更新后的文件参数为:{advance_docs}") - -## 处理gradio.launch参数 -server_name = config.get("server_name", None) -server_port = config.get("server_port", None) -if server_name is None: - if dockerflag: - server_name = "0.0.0.0" - else: - server_name = "127.0.0.1" -if server_port is None: - if dockerflag: - server_port = 7860 - -assert server_port is None or type(server_port) == int, "要求port设置为int类型" - -# 设置默认model -default_model = config.get("default_model", "") -try: - presets.DEFAULT_MODEL = presets.MODELS.index(default_model) -except ValueError: - pass - -share = config.get("share", False) diff --git a/spaces/zideliu/styledrop/taming/modules/util.py b/spaces/zideliu/styledrop/taming/modules/util.py deleted file mode 100644 index 9ee16385d8b1342a2d60a5f1aa5cadcfbe934bd8..0000000000000000000000000000000000000000 --- a/spaces/zideliu/styledrop/taming/modules/util.py +++ /dev/null @@ -1,130 +0,0 @@ -import torch -import torch.nn as nn - - -def count_params(model): - total_params = sum(p.numel() for p in model.parameters()) - return total_params - - -class ActNorm(nn.Module): - def __init__(self, num_features, logdet=False, affine=True, - allow_reverse_init=False): - assert affine - super().__init__() - self.logdet = logdet - self.loc = nn.Parameter(torch.zeros(1, num_features, 1, 1)) - self.scale = nn.Parameter(torch.ones(1, num_features, 1, 1)) - self.allow_reverse_init = allow_reverse_init - - self.register_buffer('initialized', torch.tensor(0, dtype=torch.uint8)) - - def initialize(self, input): - with torch.no_grad(): - flatten = input.permute(1, 0, 2, 3).contiguous().view(input.shape[1], -1) - mean = ( - flatten.mean(1) - .unsqueeze(1) - .unsqueeze(2) - .unsqueeze(3) - .permute(1, 0, 2, 3) - ) - std = ( - flatten.std(1) - .unsqueeze(1) - .unsqueeze(2) - .unsqueeze(3) - .permute(1, 0, 2, 3) - ) - - self.loc.data.copy_(-mean) - self.scale.data.copy_(1 / (std + 1e-6)) - - def forward(self, input, reverse=False): - if reverse: - return self.reverse(input) - if len(input.shape) == 2: - input = input[:,:,None,None] - squeeze = True - else: - squeeze = False - - _, _, height, width = input.shape - - if self.training and self.initialized.item() == 0: - self.initialize(input) - self.initialized.fill_(1) - - h = self.scale * (input + self.loc) - - if squeeze: - h = h.squeeze(-1).squeeze(-1) - - if self.logdet: - log_abs = torch.log(torch.abs(self.scale)) - logdet = height*width*torch.sum(log_abs) - logdet = logdet * torch.ones(input.shape[0]).to(input) - return h, logdet - - return h - - def reverse(self, output): - if self.training and self.initialized.item() == 0: - if not self.allow_reverse_init: - raise RuntimeError( - "Initializing ActNorm in reverse direction is " - "disabled by default. Use allow_reverse_init=True to enable." - ) - else: - self.initialize(output) - self.initialized.fill_(1) - - if len(output.shape) == 2: - output = output[:,:,None,None] - squeeze = True - else: - squeeze = False - - h = output / self.scale - self.loc - - if squeeze: - h = h.squeeze(-1).squeeze(-1) - return h - - -class AbstractEncoder(nn.Module): - def __init__(self): - super().__init__() - - def encode(self, *args, **kwargs): - raise NotImplementedError - - -class Labelator(AbstractEncoder): - """Net2Net Interface for Class-Conditional Model""" - def __init__(self, n_classes, quantize_interface=True): - super().__init__() - self.n_classes = n_classes - self.quantize_interface = quantize_interface - - def encode(self, c): - c = c[:,None] - if self.quantize_interface: - return c, None, [None, None, c.long()] - return c - - -class SOSProvider(AbstractEncoder): - # for unconditional training - def __init__(self, sos_token, quantize_interface=True): - super().__init__() - self.sos_token = sos_token - self.quantize_interface = quantize_interface - - def encode(self, x): - # get batch size from data and replicate sos_token - c = torch.ones(x.shape[0], 1)*self.sos_token - c = c.long().to(x.device) - if self.quantize_interface: - return c, None, [None, None, c] - return c diff --git a/spaces/zideliu/styledrop/timm/utils/misc.py b/spaces/zideliu/styledrop/timm/utils/misc.py deleted file mode 100644 index 39c0097c60ed602547f832f1f8dafbe37f156064..0000000000000000000000000000000000000000 --- a/spaces/zideliu/styledrop/timm/utils/misc.py +++ /dev/null @@ -1,18 +0,0 @@ -""" Misc utils - -Hacked together by / Copyright 2020 Ross Wightman -""" -import re - - -def natural_key(string_): - """See http://www.codinghorror.com/blog/archives/001018.html""" - return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())] - - -def add_bool_arg(parser, name, default=False, help=''): - dest_name = name.replace('-', '_') - group = parser.add_mutually_exclusive_group(required=False) - group.add_argument('--' + name, dest=dest_name, action='store_true', help=help) - group.add_argument('--no-' + name, dest=dest_name, action='store_false', help=help) - parser.set_defaults(**{dest_name: default}) diff --git a/spaces/zlc99/M4Singer/modules/parallel_wavegan/layers/residual_block.py b/spaces/zlc99/M4Singer/modules/parallel_wavegan/layers/residual_block.py deleted file mode 100644 index 7a267a86c1fa521c2824addf9dda304c43f1ff1f..0000000000000000000000000000000000000000 --- a/spaces/zlc99/M4Singer/modules/parallel_wavegan/layers/residual_block.py +++ /dev/null @@ -1,129 +0,0 @@ -# -*- coding: utf-8 -*- - -"""Residual block module in WaveNet. - -This code is modified from https://github.com/r9y9/wavenet_vocoder. - -""" - -import math - -import torch -import torch.nn.functional as F - - -class Conv1d(torch.nn.Conv1d): - """Conv1d module with customized initialization.""" - - def __init__(self, *args, **kwargs): - """Initialize Conv1d module.""" - super(Conv1d, self).__init__(*args, **kwargs) - - def reset_parameters(self): - """Reset parameters.""" - torch.nn.init.kaiming_normal_(self.weight, nonlinearity="relu") - if self.bias is not None: - torch.nn.init.constant_(self.bias, 0.0) - - -class Conv1d1x1(Conv1d): - """1x1 Conv1d with customized initialization.""" - - def __init__(self, in_channels, out_channels, bias): - """Initialize 1x1 Conv1d module.""" - super(Conv1d1x1, self).__init__(in_channels, out_channels, - kernel_size=1, padding=0, - dilation=1, bias=bias) - - -class ResidualBlock(torch.nn.Module): - """Residual block module in WaveNet.""" - - def __init__(self, - kernel_size=3, - residual_channels=64, - gate_channels=128, - skip_channels=64, - aux_channels=80, - dropout=0.0, - dilation=1, - bias=True, - use_causal_conv=False - ): - """Initialize ResidualBlock module. - - Args: - kernel_size (int): Kernel size of dilation convolution layer. - residual_channels (int): Number of channels for residual connection. - skip_channels (int): Number of channels for skip connection. - aux_channels (int): Local conditioning channels i.e. auxiliary input dimension. - dropout (float): Dropout probability. - dilation (int): Dilation factor. - bias (bool): Whether to add bias parameter in convolution layers. - use_causal_conv (bool): Whether to use use_causal_conv or non-use_causal_conv convolution. - - """ - super(ResidualBlock, self).__init__() - self.dropout = dropout - # no future time stamps available - if use_causal_conv: - padding = (kernel_size - 1) * dilation - else: - assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size." - padding = (kernel_size - 1) // 2 * dilation - self.use_causal_conv = use_causal_conv - - # dilation conv - self.conv = Conv1d(residual_channels, gate_channels, kernel_size, - padding=padding, dilation=dilation, bias=bias) - - # local conditioning - if aux_channels > 0: - self.conv1x1_aux = Conv1d1x1(aux_channels, gate_channels, bias=False) - else: - self.conv1x1_aux = None - - # conv output is split into two groups - gate_out_channels = gate_channels // 2 - self.conv1x1_out = Conv1d1x1(gate_out_channels, residual_channels, bias=bias) - self.conv1x1_skip = Conv1d1x1(gate_out_channels, skip_channels, bias=bias) - - def forward(self, x, c): - """Calculate forward propagation. - - Args: - x (Tensor): Input tensor (B, residual_channels, T). - c (Tensor): Local conditioning auxiliary tensor (B, aux_channels, T). - - Returns: - Tensor: Output tensor for residual connection (B, residual_channels, T). - Tensor: Output tensor for skip connection (B, skip_channels, T). - - """ - residual = x - x = F.dropout(x, p=self.dropout, training=self.training) - x = self.conv(x) - - # remove future time steps if use_causal_conv conv - x = x[:, :, :residual.size(-1)] if self.use_causal_conv else x - - # split into two part for gated activation - splitdim = 1 - xa, xb = x.split(x.size(splitdim) // 2, dim=splitdim) - - # local conditioning - if c is not None: - assert self.conv1x1_aux is not None - c = self.conv1x1_aux(c) - ca, cb = c.split(c.size(splitdim) // 2, dim=splitdim) - xa, xb = xa + ca, xb + cb - - x = torch.tanh(xa) * torch.sigmoid(xb) - - # for skip connection - s = self.conv1x1_skip(x) - - # for residual connection - x = (self.conv1x1_out(x) + residual) * math.sqrt(0.5) - - return x, s diff --git a/spaces/zomehwh/sovits-teio/modules/attentions.py b/spaces/zomehwh/sovits-teio/modules/attentions.py deleted file mode 100644 index f9c11ca4a3acb86bf1abc04d9dcfa82a4ed4061f..0000000000000000000000000000000000000000 --- a/spaces/zomehwh/sovits-teio/modules/attentions.py +++ /dev/null @@ -1,349 +0,0 @@ -import copy -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -import modules.commons as commons -import modules.modules as modules -from modules.modules import LayerNorm - - -class FFT(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers=1, kernel_size=1, p_dropout=0., - proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append( - MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, - proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - x = x * x_mask - return x - - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/zxy666/bingo-chatai666/src/components/chat-suggestions.tsx b/spaces/zxy666/bingo-chatai666/src/components/chat-suggestions.tsx deleted file mode 100644 index 48aec7c84e4407c482acdfcc7857fb0f660d12d3..0000000000000000000000000000000000000000 --- a/spaces/zxy666/bingo-chatai666/src/components/chat-suggestions.tsx +++ /dev/null @@ -1,45 +0,0 @@ -import React, { useMemo } from 'react' -import Image from 'next/image' -import HelpIcon from '@/assets/images/help.svg' -import { SuggestedResponse } from '@/lib/bots/bing/types' -import { useBing } from '@/lib/hooks/use-bing' -import { atom, useAtom } from 'jotai' - -type Suggestions = SuggestedResponse[] -const helpSuggestions = ['为什么不回应某些主题', '告诉我更多关于必应的资迅', '必应如何使用 AI?'].map((text) => ({ text })) -const suggestionsAtom = atom([]) - -type ChatSuggestionsProps = React.ComponentProps<'div'> & Pick, 'setInput'> & { suggestions?: Suggestions } - -export function ChatSuggestions({ setInput, suggestions = [] }: ChatSuggestionsProps) { - const [currentSuggestions, setSuggestions] = useAtom(suggestionsAtom) - const toggleSuggestions = (() => { - if (currentSuggestions === helpSuggestions) { - setSuggestions(suggestions) - } else { - setSuggestions(helpSuggestions) - } - }) - - useMemo(() => { - setSuggestions(suggestions) - window.scrollBy(0, 2000) - }, [suggestions.length, setSuggestions]) - - return currentSuggestions?.length ? ( -
                -
                - - { - currentSuggestions.map(suggestion => ( - - )) - } -
                -
                - ) : null -}

    Create a personal avatar from just a single image using ROME.
    Paper | Project Page | Github